summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2019-04-11 15:16:40 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2019-04-12 09:44:48 +0200
commite90486ab41a4edf045a6153675b6be9dcd422b71 (patch)
treeea0ad25fd9d9324ca02f9a26556d6cc5e2e9a592
parentscore: Use processor mask in _SMP_Multicast_action (diff)
downloadrtems-e90486ab41a4edf045a6153675b6be9dcd422b71.tar.bz2
score: Rework SMP multicast action
Use a FIFO list of jobs per processor to carry out the SMP multicast action. Use a done indicator per job to reduce the bus traffic a bit.
Diffstat (limited to '')
-rw-r--r--cpukit/include/rtems/score/percpu.h40
-rw-r--r--cpukit/include/rtems/score/smpimpl.h13
-rw-r--r--cpukit/score/src/smp.c1
-rw-r--r--cpukit/score/src/smpmulticastaction.c249
-rw-r--r--testsuites/smptests/smpcache01/init.c5
-rw-r--r--testsuites/smptests/smpmulticast01/init.c3
6 files changed, 225 insertions, 86 deletions
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 27f4e93a46..cd7713db6d 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -74,6 +74,8 @@ struct _Thread_Control;
struct Scheduler_Context;
+struct Per_CPU_Job;
+
/**
* @defgroup PerCPU RTEMS Per CPU Information
*
@@ -495,6 +497,37 @@ typedef struct Per_CPU_Control {
Atomic_Uintptr before_multitasking_action;
/**
+ * @brief FIFO list of jobs to be performed by this processor.
+ *
+ * @see _SMP_Multicast_action().
+ */
+ struct {
+ /**
+ * @brief Lock to protect the FIFO list of jobs to be performed by this
+ * processor.
+ */
+ ISR_lock_Control Lock;
+
+ /**
+ * @brief Head of the FIFO list of jobs to be performed by this
+ * processor.
+ *
+ * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
+ */
+ struct Per_CPU_Job *head;
+
+ /**
+ * @brief Tail of the FIFO list of jobs to be performed by this
+ * processor.
+ *
+ * This member is only valid if the head is not @c NULL.
+ *
+ * This member is protected by the Per_CPU_Control::Jobs::Lock lock.
+ */
+ struct Per_CPU_Job **tail;
+ } Jobs;
+
+ /**
* @brief Indicates if the processor has been successfully started via
* _CPU_SMP_Start_processor().
*/
@@ -710,6 +743,13 @@ bool _Per_CPU_State_wait_for_non_initial_state(
uint32_t timeout_in_ns
);
+/**
+ * @brief Performs the jobs of the specified processor.
+ *
+ * @param[in, out] cpu The jobs of this processor will be performed.
+ */
+void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
+
#endif /* defined( RTEMS_SMP ) */
/*
diff --git a/cpukit/include/rtems/score/smpimpl.h b/cpukit/include/rtems/score/smpimpl.h
index 6b59b9497d..d6fdef5ca1 100644
--- a/cpukit/include/rtems/score/smpimpl.h
+++ b/cpukit/include/rtems/score/smpimpl.h
@@ -51,11 +51,11 @@ extern "C" {
#define SMP_MESSAGE_TEST 0x2UL
/**
- * @brief SMP message to request a multicast action.
+ * @brief SMP message to perform per-processor jobs.
*
* @see _SMP_Send_message().
*/
-#define SMP_MESSAGE_MULTICAST_ACTION 0x4UL
+#define SMP_MESSAGE_PERFORM_JOBS 0x4UL
/**
* @brief SMP message to request a clock tick.
@@ -158,11 +158,6 @@ static inline void _SMP_Set_test_message_handler(
}
/**
- * @brief Processes all pending multicast actions.
- */
-void _SMP_Multicast_actions_process( void );
-
-/**
* @brief Interrupt handler for inter-processor interrupts.
*
* @return The received message.
@@ -195,8 +190,8 @@ static inline long unsigned _SMP_Inter_processor_interrupt_handler(
( *_SMP_Test_message_handler )( cpu_self );
}
- if ( ( message & SMP_MESSAGE_MULTICAST_ACTION ) != 0 ) {
- _SMP_Multicast_actions_process();
+ if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) {
+ _Per_CPU_Perform_jobs( cpu_self );
}
}
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index 873682962d..027c2e81d8 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -118,6 +118,7 @@ void _SMP_Handler_initialize( void )
cpu = _Per_CPU_Get_by_index( cpu_index );
_ISR_lock_Set_name( &cpu->Lock, "Per-CPU" );
+ _ISR_lock_Set_name( &cpu->Jobs.Lock, "Per-CPU Jobs" );
_ISR_lock_Set_name( &cpu->Watchdog.Lock, "Per-CPU Watchdog" );
_Chain_Initialize_empty( &cpu->Threads_in_need_for_help );
}
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index 0b9641c3db..a1df0269e1 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -1,91 +1,194 @@
/*
- * Copyright (c) 2014 Aeroflex Gaisler AB. All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * Copyright (C) 2019 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
- #include "config.h"
+#include "config.h"
#endif
#include <rtems/score/smpimpl.h>
-#include <rtems/score/isrlock.h>
-#include <rtems/score/chainimpl.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/threaddispatch.h>
#include <rtems/score/sysstate.h>
-typedef struct {
- Chain_Node Node;
- SMP_Action_handler handler;
- void *arg;
- Processor_mask targets;
- Atomic_Ulong done;
-} SMP_Multicast_action;
-
-typedef struct {
- ISR_lock_Control Lock;
- Chain_Control Actions;
-} SMP_Multicast_context;
-
-static SMP_Multicast_context _SMP_Multicast = {
- .Lock = ISR_LOCK_INITIALIZER( "SMP Multicast Action" ),
- .Actions = CHAIN_INITIALIZER_EMPTY( _SMP_Multicast.Actions )
+typedef struct Per_CPU_Job Per_CPU_Job;
+
+typedef struct Per_CPU_Jobs Per_CPU_Jobs;
+
+/**
+ * @brief A per-processor job.
+ */
+struct Per_CPU_Job {
+ union {
+ /**
+ * @brief The next job in the corresponding per-processor job list.
+ */
+ Per_CPU_Job *next;
+
+ /**
+ * @brief Indication if the job is done.
+ *
+ * A job is done if this member has the value one. This assumes that one
+ * is not a valid pointer value.
+ */
+ Atomic_Ulong done;
+ };
+
+ /**
+ * @brief Back pointer to the jobs to get the handler and argument.
+ */
+ Per_CPU_Jobs *jobs;
};
-void _SMP_Multicast_actions_process( void )
+/**
+ * @brief A collection of jobs, one for each processor.
+ */
+struct Per_CPU_Jobs {
+ /**
+ * @brief The job handler.
+ */
+ SMP_Action_handler handler;
+
+ /**
+ * @brief The job handler argument.
+ */
+ void *arg;
+
+ /**
+ * @brief One job for each potential processor.
+ */
+ Per_CPU_Job Jobs[ CPU_MAXIMUM_PROCESSORS ];
+};
+
+void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu )
{
- ISR_lock_Context lock_context;
- uint32_t cpu_self_index;
- SMP_Multicast_action *node;
- SMP_Multicast_action *next;
+ ISR_lock_Context lock_context;
+ Per_CPU_Job *job;
- _ISR_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
- cpu_self_index = _SMP_Get_current_processor();
- node = (SMP_Multicast_action *) _Chain_First( &_SMP_Multicast.Actions );
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
- while ( !_Chain_Is_tail( &_SMP_Multicast.Actions, &node->Node ) ) {
- next = (SMP_Multicast_action *) _Chain_Next( &node->Node );
+ while ( ( job = cpu->Jobs.head ) != NULL ) {
+ Per_CPU_Jobs *jobs;
- if ( _Processor_mask_Is_set( &node->targets, cpu_self_index ) ) {
- _Processor_mask_Clear( &node->targets, cpu_self_index );
+ cpu->Jobs.head = job->next;
+ _Per_CPU_Release( cpu, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
- ( *node->handler )( node->arg );
+ jobs = job->jobs;
+ ( *jobs->handler )( jobs->arg );
+ _Atomic_Store_ulong( &job->done, 1, ATOMIC_ORDER_RELEASE );
- if ( _Processor_mask_Is_zero( &node->targets ) ) {
- _Chain_Extract_unprotected( &node->Node );
- _Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE );
- }
- }
-
- node = next;
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
}
- _ISR_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
+ _Per_CPU_Release( cpu, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
}
-static void
-_SMP_Multicasts_try_process( void )
+static void _Per_CPU_Try_perform_jobs( Per_CPU_Control *cpu_self )
{
unsigned long message;
- Per_CPU_Control *cpu_self;
- ISR_Level isr_level;
- _ISR_Local_disable( isr_level );
+ message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
- cpu_self = _Per_CPU_Get();
+ if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) {
+ bool success;
- message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
+ success = _Atomic_Compare_exchange_ulong(
+ &cpu_self->message, &message,
+ message & ~SMP_MESSAGE_PERFORM_JOBS, ATOMIC_ORDER_RELAXED,
+ ATOMIC_ORDER_RELAXED
+ );
- if ( message & SMP_MESSAGE_MULTICAST_ACTION ) {
- if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message,
- message & ~SMP_MESSAGE_MULTICAST_ACTION, ATOMIC_ORDER_RELAXED,
- ATOMIC_ORDER_RELAXED ) ) {
- _SMP_Multicast_actions_process();
+ if ( success ) {
+ _Per_CPU_Perform_jobs( cpu_self );
}
}
+}
- _ISR_Local_enable( isr_level );
+static void _SMP_Issue_action_jobs(
+ const Processor_mask *targets,
+ Per_CPU_Jobs *jobs,
+ uint32_t cpu_max
+)
+{
+ uint32_t cpu_index;
+
+ for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
+ if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
+ ISR_lock_Context lock_context;
+ Per_CPU_Job *job;
+ Per_CPU_Control *cpu;
+
+ job = &jobs->Jobs[ cpu_index ];
+ _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED );
+ _Assert( job->next == NULL );
+ job->jobs = jobs;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
+
+ if ( cpu->Jobs.head == NULL ) {
+ cpu->Jobs.head = job;
+ } else {
+ *cpu->Jobs.tail = job;
+ }
+
+ cpu->Jobs.tail = &job->next;
+
+ _Per_CPU_Release( cpu, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+ _SMP_Send_message( cpu_index, SMP_MESSAGE_PERFORM_JOBS );
+ }
+ }
+}
+
+static void _SMP_Wait_for_action_jobs(
+ const Processor_mask *targets,
+ const Per_CPU_Jobs *jobs,
+ uint32_t cpu_max,
+ Per_CPU_Control *cpu_self
+)
+{
+ uint32_t cpu_index;
+
+ for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
+ if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
+ const Per_CPU_Job *job;
+
+ job = &jobs->Jobs[ cpu_index ];
+
+ while ( _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
+ _Per_CPU_Try_perform_jobs( cpu_self );
+ }
+ }
+ }
}
void _SMP_Multicast_action(
@@ -94,33 +197,27 @@ void _SMP_Multicast_action(
void *arg
)
{
- SMP_Multicast_action node;
- ISR_lock_Context lock_context;
- uint32_t i;
+ Per_CPU_Jobs jobs;
+ uint32_t cpu_max;
+ Per_CPU_Control *cpu_self;
+
+ cpu_max = _SMP_Get_processor_maximum();
+ _Assert( cpu_max <= CPU_MAXIMUM_PROCESSORS );
if ( ! _System_state_Is_up( _System_state_Get() ) ) {
( *handler )( arg );
return;
}
- if( targets == NULL ) {
+ if ( targets == NULL ) {
targets = _SMP_Get_online_processors();
}
- _Chain_Initialize_node( &node.Node );
- node.handler = handler;
- node.arg = arg;
- _Processor_mask_Assign( &node.targets, targets );
- _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );
-
- _ISR_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
- _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node );
- _ISR_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
+ jobs.handler = handler;
+ jobs.arg = arg;
- _SMP_Send_message_multicast( targets, SMP_MESSAGE_MULTICAST_ACTION );
- _SMP_Multicasts_try_process();
-
- while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
- /* Wait */
- };
+ cpu_self = _Thread_Dispatch_disable();
+ _SMP_Issue_action_jobs( targets, &jobs, cpu_max );
+ _SMP_Wait_for_action_jobs( targets, &jobs, cpu_max, cpu_self );
+ _Thread_Dispatch_enable( cpu_self );
}
diff --git a/testsuites/smptests/smpcache01/init.c b/testsuites/smptests/smpcache01/init.c
index 878a015bf1..e9cee1eec5 100644
--- a/testsuites/smptests/smpcache01/init.c
+++ b/testsuites/smptests/smpcache01/init.c
@@ -126,12 +126,15 @@ static void call_tests_isr_disabled( SMP_barrier_State *bs )
broadcast_test_init();
for (i = 0; i < RTEMS_ARRAY_SIZE( test_cases ); ++i) {
- ISR_Level isr_level;
+ Per_CPU_Control *cpu_self;
+ ISR_Level isr_level;
+ cpu_self = _Thread_Dispatch_disable();
_ISR_Local_disable( isr_level );
barrier( bs );
( *test_cases[ i ] )();
_ISR_Local_enable( isr_level );
+ _Thread_Dispatch_enable( cpu_self );
barrier( bs );
}
diff --git a/testsuites/smptests/smpmulticast01/init.c b/testsuites/smptests/smpmulticast01/init.c
index 2319582ab6..e599a78bde 100644
--- a/testsuites/smptests/smpmulticast01/init.c
+++ b/testsuites/smptests/smpmulticast01/init.c
@@ -59,11 +59,14 @@ static void multicast_action_irq_disabled(
void *arg
)
{
+ Per_CPU_Control *cpu_self;
rtems_interrupt_level level;
+ cpu_self = _Thread_Dispatch_disable();
rtems_interrupt_local_disable(level);
_SMP_Multicast_action(targets, handler, arg);
rtems_interrupt_local_enable(level);
+ _Thread_Dispatch_enable(cpu_self);
}
static void multicast_action_dispatch_disabled(