diff options
author | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2021-07-27 13:40:51 +0200 |
---|---|---|
committer | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2021-07-28 19:32:24 +0200 |
commit | cbb1103a3ca9d2f42501ba373b5d559b981fb1c7 (patch) | |
tree | 8e20c863237d95390b49c82d7ee45791389b5a2a /cpukit/score/src/smpmulticastaction.c | |
parent | score: Remove processor event broadcast/receive (diff) | |
download | rtems-cbb1103a3ca9d2f42501ba373b5d559b981fb1c7.tar.bz2 |
score: Simplify SMP processor state handling
The per-CPU states which control the SMP system initialization were added quite
early during the SMP support development. Replace this initial implementation
with a simplified one. There is no longer a global SMP lock required which
serialized the state changes of all processors. The new implementation better
integrates with the per-CPU jobs.
Diffstat (limited to 'cpukit/score/src/smpmulticastaction.c')
-rw-r--r-- | cpukit/score/src/smpmulticastaction.c | 36 |
1 files changed, 10 insertions, 26 deletions
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c index 5d65ef14ca..8dbdef80c7 100644 --- a/cpukit/score/src/smpmulticastaction.c +++ b/cpukit/score/src/smpmulticastaction.c @@ -92,27 +92,6 @@ void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job ) _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context ); } -static void _Per_CPU_Try_perform_jobs( Per_CPU_Control *cpu_self ) -{ - unsigned long message; - - message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ); - - if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) { - bool success; - - success = _Atomic_Compare_exchange_ulong( - &cpu_self->message, &message, - message & ~SMP_MESSAGE_PERFORM_JOBS, ATOMIC_ORDER_RELAXED, - ATOMIC_ORDER_RELAXED - ); - - if ( success ) { - _Per_CPU_Perform_jobs( cpu_self ); - } - } -} - void _Per_CPU_Wait_for_job( const Per_CPU_Control *cpu, const Per_CPU_Job *job @@ -122,17 +101,22 @@ void _Per_CPU_Wait_for_job( _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE ) != PER_CPU_JOB_DONE ) { + Per_CPU_Control *cpu_self; + switch ( _Per_CPU_Get_state( cpu ) ) { case PER_CPU_STATE_INITIAL: case PER_CPU_STATE_READY_TO_START_MULTITASKING: - case PER_CPU_STATE_REQUEST_START_MULTITASKING: case PER_CPU_STATE_UP: /* - * Calling this function with the current processor is intentional. - * We have to perform our own jobs here in case inter-processor - * interrupts are not working. + * Calling this function with the current processor is intentional. We + * have to perform our own jobs here in case inter-processor interrupts + * are not working. */ - _Per_CPU_Try_perform_jobs( _Per_CPU_Get() ); + cpu_self = _Per_CPU_Get(); + _SMP_Try_to_process_message( + cpu_self, + _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ) + ); break; default: _SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS ); |