summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/percpu.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-27 13:40:51 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-28 19:32:24 +0200
commitcbb1103a3ca9d2f42501ba373b5d559b981fb1c7 (patch)
tree8e20c863237d95390b49c82d7ee45791389b5a2a /cpukit/score/src/percpu.c
parentscore: Remove processor event broadcast/receive (diff)
downloadrtems-cbb1103a3ca9d2f42501ba373b5d559b981fb1c7.tar.bz2
score: Simplify SMP processor state handling
The per-CPU states which control the SMP system initialization were added quite early during the SMP support development. Replace this initial implementation with a simplified one. There is no longer a global SMP lock required which serialized the state changes of all processors. The new implementation better integrates with the per-CPU jobs.
Diffstat (limited to 'cpukit/score/src/percpu.c')
-rw-r--r--cpukit/score/src/percpu.c151
1 files changed, 3 insertions, 148 deletions
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index 7fbc1c8637..3bacbd7f55 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -3,8 +3,8 @@
*
* @ingroup RTEMSScorePerCPU
*
- * @brief This source file contains a definition of ::_Per_CPU_Information and
- * the implementation of _Per_CPU_State_change().
+ * @brief This source file contains the uniprocessor definition of
+ * ::_Per_CPU_Information and some static assertions.
*/
/*
@@ -21,10 +21,6 @@
#endif
#include <rtems/score/percpu.h>
-#include <rtems/score/assert.h>
-#include <rtems/score/isrlock.h>
-#include <rtems/score/smpimpl.h>
-#include <rtems/config.h>
RTEMS_STATIC_ASSERT(
sizeof( CPU_Uint32ptr ) >= sizeof( uintptr_t ),
@@ -36,148 +32,7 @@ RTEMS_STATIC_ASSERT(
CPU_Uint32ptr_greater_equal_uint32_t
);
-#if defined(RTEMS_SMP)
-
-ISR_LOCK_DEFINE( static, _Per_CPU_State_lock, "Per-CPU State" )
-
-static void _Per_CPU_State_acquire( ISR_lock_Context *lock_context )
-{
- _ISR_lock_ISR_disable_and_acquire( &_Per_CPU_State_lock, lock_context );
-}
-
-static void _Per_CPU_State_release( ISR_lock_Context *lock_context )
-{
- _ISR_lock_Release_and_ISR_enable( &_Per_CPU_State_lock, lock_context );
-}
-
-static void _Per_CPU_State_busy_wait(
- Per_CPU_Control *cpu,
- Per_CPU_State new_state
-)
-{
- Per_CPU_State state;
-
- state = _Per_CPU_Get_state( cpu );
-
- switch ( new_state ) {
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
- while (
- state != PER_CPU_STATE_READY_TO_START_MULTITASKING
- && state != PER_CPU_STATE_SHUTDOWN
- ) {
- _Per_CPU_Perform_jobs( cpu );
- state = _Per_CPU_Get_state( cpu );
- }
- break;
- case PER_CPU_STATE_UP:
- while (
- state != PER_CPU_STATE_REQUEST_START_MULTITASKING
- && state != PER_CPU_STATE_SHUTDOWN
- ) {
- _Per_CPU_Perform_jobs( cpu );
- state = _Per_CPU_Get_state( cpu );
- }
- break;
- default:
- /* No need to wait */
- break;
- }
-}
-
-static Per_CPU_State _Per_CPU_State_get_next(
- Per_CPU_State current_state,
- Per_CPU_State new_state
-)
-{
- switch ( current_state ) {
- case PER_CPU_STATE_INITIAL:
- switch ( new_state ) {
- case PER_CPU_STATE_READY_TO_START_MULTITASKING:
- case PER_CPU_STATE_SHUTDOWN:
- /* Change is acceptable */
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
- break;
- case PER_CPU_STATE_READY_TO_START_MULTITASKING:
- switch ( new_state ) {
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
- case PER_CPU_STATE_SHUTDOWN:
- /* Change is acceptable */
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
- break;
- case PER_CPU_STATE_REQUEST_START_MULTITASKING:
- switch ( new_state ) {
- case PER_CPU_STATE_UP:
- case PER_CPU_STATE_SHUTDOWN:
- /* Change is acceptable */
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
- break;
- default:
- new_state = PER_CPU_STATE_SHUTDOWN;
- break;
- }
-
- return new_state;
-}
-
-void _Per_CPU_State_change(
- Per_CPU_Control *cpu,
- Per_CPU_State new_state
-)
-{
- ISR_lock_Context lock_context;
- Per_CPU_State next_state;
-
- _Per_CPU_State_busy_wait( cpu, new_state );
-
- _Per_CPU_State_acquire( &lock_context );
-
- next_state = _Per_CPU_State_get_next( _Per_CPU_Get_state( cpu ), new_state );
- _Per_CPU_Set_state( cpu, next_state );
-
- if ( next_state == PER_CPU_STATE_SHUTDOWN ) {
- uint32_t cpu_max = rtems_configuration_get_maximum_processors();
- uint32_t cpu_index;
-
- for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
- Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index( cpu_index );
-
- if ( cpu_other != cpu ) {
- switch ( _Per_CPU_Get_state( cpu_other ) ) {
- case PER_CPU_STATE_UP:
- _SMP_Send_message( cpu_index, SMP_MESSAGE_SHUTDOWN );
- break;
- default:
- /* Nothing to do */
- break;
- }
-
- _Per_CPU_Set_state( cpu_other, PER_CPU_STATE_SHUTDOWN );
- }
- }
- }
-
- _Per_CPU_State_release( &lock_context );
-
- if (
- next_state == PER_CPU_STATE_SHUTDOWN
- && new_state != PER_CPU_STATE_SHUTDOWN
- ) {
- _SMP_Fatal( SMP_FATAL_SHUTDOWN );
- }
-}
-#else
+#if !defined(RTEMS_SMP)
/*
* On single core systems, we can efficiently directly access a single
* statically allocated per cpu structure. And the fields are initialized