summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2017-04-05 11:28:46 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-04-05 11:28:46 +0200
commit46f05b92b20bd5d38bb952f6aaad6a7783ce36c9 (patch)
treebaeecf308721459eb76970794b2935aab8a7affe
parentspcache01: Instruction cache invalidate workaround (diff)
downloadrtems-46f05b92b20bd5d38bb952f6aaad6a7783ce36c9.tar.bz2
SMP: Simplify SMP multicast actions
-rw-r--r--cpukit/score/include/rtems/score/processormask.h31
-rw-r--r--cpukit/score/include/rtems/score/smpimpl.h8
-rw-r--r--cpukit/score/src/smp.c7
-rw-r--r--cpukit/score/src/smpmulticastaction.c102
4 files changed, 85 insertions, 63 deletions
diff --git a/cpukit/score/include/rtems/score/processormask.h b/cpukit/score/include/rtems/score/processormask.h
index 5a78dd33c6..8ee869a895 100644
--- a/cpukit/score/include/rtems/score/processormask.h
+++ b/cpukit/score/include/rtems/score/processormask.h
@@ -62,6 +62,37 @@ extern "C" {
*/
typedef uint32_t Processor_mask[ PROCESSOR_MASK_FIELD_COUNT ];
+RTEMS_INLINE_ROUTINE void _Processor_mask_Zero( Processor_mask mask )
+{
+ size_t i;
+
+ for ( i = 0; i < PROCESSOR_MASK_FIELD_COUNT; ++i ) {
+ mask[ i ] = 0;
+ }
+}
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_zero( const Processor_mask mask )
+{
+ size_t i;
+
+ for ( i = 0; i < PROCESSOR_MASK_FIELD_COUNT; ++i ) {
+ if ( mask[ i ] != 0 ) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Assign( Processor_mask dst, const Processor_mask src )
+{
+ size_t i;
+
+ for ( i = 0; i < PROCESSOR_MASK_FIELD_COUNT; ++i ) {
+ dst[ i ] = src[ i ];
+ }
+}
+
RTEMS_INLINE_ROUTINE void _Processor_mask_Set( Processor_mask mask, uint32_t index )
{
mask[ PROCESSOR_MASK_FIELD( index ) ] |= PROCESSOR_MASK_BIT( index );
diff --git a/cpukit/score/include/rtems/score/smpimpl.h b/cpukit/score/include/rtems/score/smpimpl.h
index 1651a5ecc6..f85251e8b0 100644
--- a/cpukit/score/include/rtems/score/smpimpl.h
+++ b/cpukit/score/include/rtems/score/smpimpl.h
@@ -237,14 +237,12 @@ void _SMP_Send_message_broadcast(
*
* The sending processor may be part of the set.
*
- * @param[in] setsize The size of the set of target processors of the message.
- * @param[in] cpus The set of target processors of the message.
+ * @param[in] targets The set of processors to send the message.
* @param[in] message The message.
*/
void _SMP_Send_message_multicast(
- const size_t setsize,
- const cpu_set_t *cpus,
- unsigned long message
+ const Processor_mask targets,
+ unsigned long message
);
typedef void ( *SMP_Action_handler )( void *arg );
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index ab9c7a6115..434483c5ad 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -225,16 +225,15 @@ void _SMP_Send_message_broadcast( unsigned long message )
}
void _SMP_Send_message_multicast(
- const size_t setsize,
- const cpu_set_t *cpus,
- unsigned long message
+ const Processor_mask targets,
+ unsigned long message
)
{
uint32_t cpu_count = _SMP_Get_processor_count();
uint32_t cpu_index;
for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
- if ( CPU_ISSET_S( cpu_index, setsize, cpus ) ) {
+ if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
_SMP_Send_message( cpu_index, message );
}
}
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index c0343bf117..a7c63498a3 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -16,59 +16,56 @@
#include <rtems/score/sysstate.h>
typedef struct {
- Chain_Node Node;
- SMP_Action_handler handler;
- void *arg;
- cpu_set_t *recipients;
- size_t setsize;
- Atomic_Ulong done;
+ Chain_Node Node;
+ SMP_Action_handler handler;
+ void *arg;
+ Processor_mask targets;
+ Atomic_Ulong done;
} SMP_Multicast_action;
typedef struct {
SMP_lock_Control Lock;
- Chain_Control List;
-} SMP_Multicast_action_context;
+ Chain_Control Actions;
+} SMP_Multicast_context;
-static SMP_Multicast_action_context _SMP_Multicast_action_context = {
- .Lock = SMP_LOCK_INITIALIZER("SMP Multicast Action"),
- .List = CHAIN_INITIALIZER_EMPTY(_SMP_Multicast_action_context.List)
+static SMP_Multicast_context _SMP_Multicast = {
+ .Lock = SMP_LOCK_INITIALIZER( "SMP Multicast Action" ),
+ .Actions = CHAIN_INITIALIZER_EMPTY( _SMP_Multicast.Actions )
};
-void
-_SMP_Multicast_actions_process(void)
+void _SMP_Multicast_actions_process( void )
{
- SMP_lock_Context lock_context;
+ SMP_lock_Context lock_context;
+ uint32_t cpu_self_index;
SMP_Multicast_action *node;
SMP_Multicast_action *next;
- uint32_t cpu_self_idx;
- _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast_action_context.Lock,
- &lock_context );
- cpu_self_idx = _SMP_Get_current_processor();
+ _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
+ cpu_self_index = _SMP_Get_current_processor();
+ node = (SMP_Multicast_action *) _Chain_First( &_SMP_Multicast.Actions );
- node = (SMP_Multicast_action*)_Chain_First(
- &_SMP_Multicast_action_context.List );
- while ( !_Chain_Is_tail( &_SMP_Multicast_action_context.List, &node->Node ) ) {
- next = (SMP_Multicast_action*)_Chain_Next( &node->Node );
- if ( CPU_ISSET_S ( cpu_self_idx, node->setsize, node->recipients ) ) {
- CPU_CLR_S ( cpu_self_idx, node->setsize, node->recipients );
+ while ( !_Chain_Is_tail( &_SMP_Multicast.Actions, &node->Node ) ) {
+ next = (SMP_Multicast_action *) _Chain_Next( &node->Node );
- node->handler( node->arg );
+ if ( _Processor_mask_Is_set( node->targets, cpu_self_index ) ) {
+ _Processor_mask_Clear( node->targets, cpu_self_index );
- if ( CPU_COUNT_S( node->setsize, node->recipients ) == 0 ) {
+ ( *node->handler )( node->arg );
+
+ if ( _Processor_mask_Is_zero( node->targets ) ) {
_Chain_Extract_unprotected( &node->Node );
_Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE );
}
}
+
node = next;
}
- _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast_action_context.Lock,
- &lock_context );
+ _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
}
static void
-_SMP_Multicast_actions_try_process( void )
+_SMP_Multicasts_try_process( void )
{
unsigned long message;
Per_CPU_Control *cpu_self;
@@ -98,45 +95,42 @@ void _SMP_Multicast_action(
void *arg
)
{
- uint32_t i;
SMP_Multicast_action node;
- size_t set_size = CPU_ALLOC_SIZE( _SMP_Get_processor_count() );
- char cpu_set_copy[set_size];
- SMP_lock_Context lock_context;
+ Processor_mask targets;
+ SMP_lock_Context lock_context;
+ uint32_t i;
if ( ! _System_state_Is_up( _System_state_Get() ) ) {
- handler( arg );
+ ( *handler )( arg );
return;
}
- memset( cpu_set_copy, 0, set_size );
if( cpus == NULL ) {
- for( i=0; i<_SMP_Get_processor_count(); ++i )
- CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy );
+ _Processor_mask_Assign( targets, _SMP_Online_processors );
} else {
- for( i=0; i<_SMP_Get_processor_count(); ++i )
- if( CPU_ISSET_S( i, set_size, cpus ) )
- CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy );
+ _Processor_mask_Zero( targets );
+
+ for ( i = 0; i < _SMP_Get_processor_count(); ++i ) {
+ if ( CPU_ISSET_S( i, setsize, cpus ) ) {
+ _Processor_mask_Set( targets, i );
+ }
+ }
}
+ _Chain_Initialize_node( &node.Node );
node.handler = handler;
node.arg = arg;
- node.setsize = set_size;
- node.recipients = (cpu_set_t *)cpu_set_copy;
+ _Processor_mask_Assign( node.targets, targets );
_Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );
+ _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
+ _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node );
+ _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
- _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast_action_context.Lock,
- &lock_context );
- _Chain_Initialize_node( &node.Node );
- _Chain_Prepend_unprotected( &_SMP_Multicast_action_context.List, &node.Node );
- _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast_action_context.Lock,
- &lock_context );
-
- _SMP_Send_message_multicast( set_size, node.recipients,
- SMP_MESSAGE_MULTICAST_ACTION );
-
- _SMP_Multicast_actions_try_process();
+ _SMP_Send_message_multicast( targets, SMP_MESSAGE_MULTICAST_ACTION );
+ _SMP_Multicasts_try_process();
- while ( !_Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) );
+ while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
+ /* Wait */
+ };
}