summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-27 13:40:51 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-28 19:32:24 +0200
commitcbb1103a3ca9d2f42501ba373b5d559b981fb1c7 (patch)
tree8e20c863237d95390b49c82d7ee45791389b5a2a /cpukit/score
parentscore: Remove processor event broadcast/receive (diff)
downloadrtems-cbb1103a3ca9d2f42501ba373b5d559b981fb1c7.tar.bz2
score: Simplify SMP processor state handling
The per-CPU states which control the SMP system initialization were added quite early during the SMP support development. Replace this initial implementation with a simplified one. There is no longer a global SMP lock required which serialized the state changes of all processors. The new implementation better integrates with the per-CPU jobs.
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/src/percpu.c151
-rw-r--r--cpukit/score/src/smp.c142
-rw-r--r--cpukit/score/src/smpmulticastaction.c36
-rw-r--r--cpukit/score/src/threadstartmultitasking.c4
4 files changed, 143 insertions, 190 deletions
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index 7fbc1c8637..3bacbd7f55 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -3,8 +3,8 @@
*
* @ingroup RTEMSScorePerCPU
*
- * @brief This source file contains a definition of ::_Per_CPU_Information and
- * the implementation of _Per_CPU_State_change().
+ * @brief This source file contains the uniprocessor definition of
+ * ::_Per_CPU_Information and some static assertions.
*/
/*
@@ -21,10 +21,6 @@
#endif
#include <rtems/score/percpu.h>
-#include <rtems/score/assert.h>
-#include <rtems/score/isrlock.h>
-#include <rtems/score/smpimpl.h>
-#include <rtems/config.h>
RTEMS_STATIC_ASSERT(
sizeof( CPU_Uint32ptr ) >= sizeof( uintptr_t ),
@@ -36,148 +32,7 @@ RTEMS_STATIC_ASSERT(
CPU_Uint32ptr_greater_equal_uint32_t
);
-#if defined(RTEMS_SMP)
-
-ISR_LOCK_DEFINE( static, _Per_CPU_State_lock, "Per-CPU State" )
-
-static void _Per_CPU_State_acquire( ISR_lock_Context *lock_context )
-{
- _ISR_lock_ISR_disable_and_acquire( &_Per_CPU_State_lock, lock_context );
-}
-
-static void _Per_CPU_State_release( ISR_lock_Context *lock_context )
-{
- _ISR_lock_Release_and_ISR_enable( &_Per_CPU_State_lock, lock_context );
-}
-
-static void _Per_CPU_State_busy_wait(
- Per_CPU_Control *cpu,
- Per_CPU_State new_state
-)
-{
- Per_CPU_State state;
-
- state = _Per_CPU_Get_state( cpu );
-
- switch ( new_state ) {
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
- while (
- state != PER_CPU_STATE_READY_TO_START_MULTITASKING
- && state != PER_CPU_STATE_SHUTDOWN
- ) {
- _Per_CPU_Perform_jobs( cpu );
- state = _Per_CPU_Get_state( cpu );
- }
- break;
- case PER_CPU_STATE_UP:
- while (
- state != PER_CPU_STATE_REQUEST_START_MULTITASKING
- && state != PER_CPU_STATE_SHUTDOWN
- ) {
- _Per_CPU_Perform_jobs( cpu );
- state = _Per_CPU_Get_state( cpu );
- }
- break;
- default:
- /* No need to wait */
- break;
- }
-}
-
-static Per_CPU_State _Per_CPU_State_get_next(
- Per_CPU_State current_state,
- Per_CPU_State new_state
-)
-{
- switch ( current_state ) {
- case PER_CPU_STATE_INITIAL:
- switch ( new_state ) {
- case PER_CPU_STATE_READY_TO_START_MULTITASKING:
- case PER_CPU_STATE_SHUTDOWN:
- /* Change is acceptable */
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
- break;
- case PER_CPU_STATE_READY_TO_START_MULTITASKING:
- switch ( new_state ) {
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
- case PER_CPU_STATE_SHUTDOWN:
- /* Change is acceptable */
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
- break;
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
- switch ( new_state ) {
- case PER_CPU_STATE_UP:
- case PER_CPU_STATE_SHUTDOWN:
- /* Change is acceptable */
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
-
- return new_state;
-}
-
-void _Per_CPU_State_change(
- Per_CPU_Control *cpu,
- Per_CPU_State new_state
-)
-{
- ISR_lock_Context lock_context;
- Per_CPU_State next_state;
-
- _Per_CPU_State_busy_wait( cpu, new_state );
-
- _Per_CPU_State_acquire( &lock_context );
-
- next_state = _Per_CPU_State_get_next( _Per_CPU_Get_state( cpu ), new_state );
- _Per_CPU_Set_state( cpu, next_state );
-
- if ( next_state == PER_CPU_STATE_SHUTDOWN ) {
- uint32_t cpu_max = rtems_configuration_get_maximum_processors();
- uint32_t cpu_index;
-
- for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
- Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index( cpu_index );
-
- if ( cpu_other != cpu ) {
- switch ( _Per_CPU_Get_state( cpu_other ) ) {
- case PER_CPU_STATE_UP:
- _SMP_Send_message( cpu_index, SMP_MESSAGE_SHUTDOWN );
- break;
- default:
- /* Nothing to do */
- break;
- }
-
- _Per_CPU_Set_state( cpu_other, PER_CPU_STATE_SHUTDOWN );
- }
- }
- }
-
- _Per_CPU_State_release( &lock_context );
-
- if (
- next_state == PER_CPU_STATE_SHUTDOWN
- && new_state != PER_CPU_STATE_SHUTDOWN
- ) {
- _SMP_Fatal( SMP_FATAL_SHUTDOWN );
- }
-}
-#else
+#if !defined(RTEMS_SMP)
/*
* On single core systems, we can efficiently directly access a single
* statically allocated per cpu structure. And the fields are initialized
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index 0488464da0..2c37cd6884 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -5,11 +5,12 @@
*
* @brief This source file contains the definition of ::_SMP_Online_processors
* and ::_SMP_Processor_maximum and the implementation of
- * _SMP_Handler_initialize(), _SMP_Request_shutdown(),
- * _SMP_Request_start_multitasking(), _SMP_Send_message(),
- * _SMP_Send_message_broadcast(), _SMP_Send_message_multicast(),
- * _SMP_Should_start_processor(), and
- * _SMP_Start_multitasking_on_secondary_processor().
+ * _SMP_Handler_initialize(), _SMP_Process_message(),
+ * _SMP_Request_shutdown(), _SMP_Request_start_multitasking(),
+ * _SMP_Send_message(), _SMP_Send_message_broadcast(),
+ * _SMP_Send_message_multicast(), _SMP_Should_start_processor(),
+ * _SMP_Start_multitasking_on_secondary_processor(), and
+ * _SMP_Try_to_process_message().
*/
/*
@@ -35,6 +36,15 @@
#error "deferred FP switch not implemented for SMP"
#endif
+/**
+ * @brief Indicates if the system is ready to start multitasking.
+ *
+ * Only the boot processor is allowed to change this object. If the object has
+ * a non-zero value and no fatal error occurred, then secondary processors
+ * should call _Thread_Start_multitasking() to start multiprocessing.
+ */
+static Atomic_Uint _SMP_Ready_to_start_multitasking;
+
Processor_mask _SMP_Online_processors;
uint32_t _SMP_Processor_maximum;
@@ -159,20 +169,38 @@ void _SMP_Request_start_multitasking( void )
uint32_t cpu_max;
uint32_t cpu_index;
- cpu_self = _Per_CPU_Get();
- _Per_CPU_State_change( cpu_self, PER_CPU_STATE_READY_TO_START_MULTITASKING );
-
cpu_max = _SMP_Get_processor_maximum();
+ cpu_self = _Per_CPU_Get();
+ /*
+ * Wait until all other online processors reached the
+ * PER_CPU_STATE_READY_TO_START_MULTITASKING state. The waiting is done
+ * without a timeout. If secondary processors cannot reach this state, then
+ * it is expected that they indicate this failure with an
+ * ::SMP_MESSAGE_SHUTDOWN message or reset the system.
+ */
for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
Per_CPU_Control *cpu;
cpu = _Per_CPU_Get_by_index( cpu_index );
- if ( _Per_CPU_Is_processor_online( cpu ) ) {
- _Per_CPU_State_change( cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING );
+ if ( cpu != cpu_self && _Per_CPU_Is_processor_online( cpu ) ) {
+ while (
+ _Per_CPU_Get_state( cpu ) != PER_CPU_STATE_READY_TO_START_MULTITASKING
+ ) {
+ _SMP_Try_to_process_message(
+ cpu_self,
+ _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED )
+ );
+ }
}
}
+
+ _Atomic_Store_uint(
+ &_SMP_Ready_to_start_multitasking,
+ 1U,
+ ATOMIC_ORDER_RELEASE
+ );
}
bool _SMP_Should_start_processor( uint32_t cpu_index )
@@ -183,6 +211,22 @@ bool _SMP_Should_start_processor( uint32_t cpu_index )
return _Scheduler_Should_start_processor( assignment );
}
+static void _SMP_Wait_for_start_multitasking( Per_CPU_Control *cpu_self )
+{
+ unsigned int ready;
+
+ do {
+ _SMP_Try_to_process_message(
+ cpu_self,
+ _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED )
+ );
+ ready = _Atomic_Load_uint(
+ &_SMP_Ready_to_start_multitasking,
+ ATOMIC_ORDER_ACQUIRE
+ );
+ } while ( ready == 0U );
+}
+
void _SMP_Start_multitasking_on_secondary_processor(
Per_CPU_Control *cpu_self
)
@@ -199,28 +243,96 @@ void _SMP_Start_multitasking_on_secondary_processor(
_SMP_Fatal( SMP_FATAL_MULTITASKING_START_ON_UNASSIGNED_PROCESSOR );
}
- _Per_CPU_State_change( cpu_self, PER_CPU_STATE_READY_TO_START_MULTITASKING );
-
+ _Per_CPU_Set_state( cpu_self, PER_CPU_STATE_READY_TO_START_MULTITASKING );
+ _SMP_Wait_for_start_multitasking( cpu_self );
_Thread_Start_multitasking();
}
void _SMP_Request_shutdown( void )
{
ISR_Level level;
+ uint32_t cpu_max;
+ uint32_t cpu_index_self;
+ uint32_t cpu_index;
_ISR_Local_disable( level );
(void) level;
- _Per_CPU_State_change( _Per_CPU_Get(), PER_CPU_STATE_SHUTDOWN );
+ cpu_max = _SMP_Processor_configured_maximum;
+ cpu_index_self = _SMP_Get_current_processor();
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ Per_CPU_Control *cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ if ( cpu_index == cpu_index_self ) {
+ _Per_CPU_Set_state( cpu, PER_CPU_STATE_SHUTDOWN );
+ } else {
+ _Atomic_Fetch_or_ulong(
+ &cpu->message,
+ SMP_MESSAGE_SHUTDOWN,
+ ATOMIC_ORDER_RELEASE
+ );
+
+ if ( _Per_CPU_Get_state( cpu ) == PER_CPU_STATE_UP ) {
+ _CPU_SMP_Send_interrupt( cpu_index );
+ }
+ }
+ }
+}
+
+long unsigned _SMP_Process_message(
+ Per_CPU_Control *cpu_self,
+ long unsigned message
+)
+{
+ if ( ( message & SMP_MESSAGE_SHUTDOWN ) != 0 ) {
+ /* Check the state to prevent recursive shutdowns */
+ if ( _Per_CPU_Get_state( cpu_self ) != PER_CPU_STATE_SHUTDOWN ) {
+ _Per_CPU_Set_state( cpu_self, PER_CPU_STATE_SHUTDOWN );
+ _SMP_Fatal( SMP_FATAL_SHUTDOWN_RESPONSE );
+ }
+ }
+
+ if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) {
+ _Per_CPU_Perform_jobs( cpu_self );
+ }
+}
+
+void _SMP_Try_to_process_message(
+ Per_CPU_Control *cpu_self,
+ unsigned long message
+)
+{
+ if ( message != 0 ) {
+ /*
+ * Fetch the current message. Only a read-modify-write operation
+ * guarantees that we get an up to date message. This is especially
+ * important if the function was called using SMP_MESSAGE_FORCE_PROCESSING.
+ */
+ message = _Atomic_Exchange_ulong(
+ &cpu_self->message,
+ 0,
+ ATOMIC_ORDER_ACQUIRE
+ );
+
+ _SMP_Process_message( cpu_self, message );
+ }
}
void _SMP_Send_message( uint32_t cpu_index, unsigned long message )
{
Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
- _Atomic_Fetch_or_ulong( &cpu->message, message, ATOMIC_ORDER_RELEASE );
+ (void) _Atomic_Fetch_or_ulong(
+ &cpu->message, message,
+ ATOMIC_ORDER_RELEASE
+ );
- _CPU_SMP_Send_interrupt( cpu_index );
+ if ( _Per_CPU_Get_state( cpu ) == PER_CPU_STATE_UP ) {
+ _CPU_SMP_Send_interrupt( cpu_index );
+ }
}
void _SMP_Send_message_broadcast( unsigned long message )
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index 5d65ef14ca..8dbdef80c7 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -92,27 +92,6 @@ void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
_Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
}
-static void _Per_CPU_Try_perform_jobs( Per_CPU_Control *cpu_self )
-{
- unsigned long message;
-
- message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
-
- if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) {
- bool success;
-
- success = _Atomic_Compare_exchange_ulong(
- &cpu_self->message, &message,
- message & ~SMP_MESSAGE_PERFORM_JOBS, ATOMIC_ORDER_RELAXED,
- ATOMIC_ORDER_RELAXED
- );
-
- if ( success ) {
- _Per_CPU_Perform_jobs( cpu_self );
- }
- }
-}
-
void _Per_CPU_Wait_for_job(
const Per_CPU_Control *cpu,
const Per_CPU_Job *job
@@ -122,17 +101,22 @@ void _Per_CPU_Wait_for_job(
_Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE )
!= PER_CPU_JOB_DONE
) {
+ Per_CPU_Control *cpu_self;
+
switch ( _Per_CPU_Get_state( cpu ) ) {
case PER_CPU_STATE_INITIAL:
case PER_CPU_STATE_READY_TO_START_MULTITASKING:
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
case PER_CPU_STATE_UP:
/*
- * Calling this function with the current processor is intentional.
- * We have to perform our own jobs here in case inter-processor
- * interrupts are not working.
+ * Calling this function with the current processor is intentional. We
+ * have to perform our own jobs here in case inter-processor interrupts
+ * are not working.
*/
- _Per_CPU_Try_perform_jobs( _Per_CPU_Get() );
+ cpu_self = _Per_CPU_Get();
+ _SMP_Try_to_process_message(
+ cpu_self,
+ _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED )
+ );
break;
default:
_SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS );
diff --git a/cpukit/score/src/threadstartmultitasking.c b/cpukit/score/src/threadstartmultitasking.c
index 094a535394..9fa52a58ac 100644
--- a/cpukit/score/src/threadstartmultitasking.c
+++ b/cpukit/score/src/threadstartmultitasking.c
@@ -22,6 +22,7 @@
#include <rtems/score/threadimpl.h>
#include <rtems/score/assert.h>
+#include <rtems/score/smpimpl.h>
void _Thread_Start_multitasking( void )
{
@@ -29,7 +30,8 @@ void _Thread_Start_multitasking( void )
Thread_Control *heir;
#if defined(RTEMS_SMP)
- _Per_CPU_State_change( cpu_self, PER_CPU_STATE_UP );
+ _Per_CPU_Set_state( cpu_self, PER_CPU_STATE_UP );
+ _SMP_Try_to_process_message( cpu_self, SMP_MESSAGE_FORCE_PROCESSING );
/*
* Threads begin execution in the _Thread_Handler() function. This