From 3380ee8194ec35506b88257f369e88d1d26350f1 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Tue, 22 Apr 2014 07:46:53 +0200 Subject: score: Use common names for per-CPU variables Use "cpu" for an arbitrary Per_CPU_Control variable. Use "cpu_self" for the Per_CPU_Control of the current processor. Use "cpu_index" for an arbitrary processor index. Use "cpu_index_self" for the processor index of the current processor. Use "cpu_count" for the processor count obtained via _SMP_Get_processor_count(). Use "cpu_max" for the processor maximum obtained by rtems_configuration_get_maximum_processors(). --- cpukit/libmisc/cpuuse/cpuusagereset.c | 12 ++--- cpukit/score/include/rtems/score/percpu.h | 58 +++++++++++----------- cpukit/score/include/rtems/score/profiling.h | 20 ++++---- cpukit/score/include/rtems/score/smpimpl.h | 16 +++--- cpukit/score/src/cpuset.c | 12 ++--- cpukit/score/src/debugisthreaddispatchingallowed.c | 6 +-- cpukit/score/src/isr.c | 16 +++--- cpukit/score/src/percpu.c | 32 ++++++------ cpukit/score/src/profilingisrentryexit.c | 6 +-- cpukit/score/src/smp.c | 46 ++++++++--------- cpukit/score/src/threadcreateidle.c | 22 ++++---- cpukit/score/src/threaddispatch.c | 48 +++++++++--------- cpukit/score/src/threaddispatchdisablelevel.c | 44 ++++++++-------- cpukit/score/src/threadhandler.c | 10 ++-- cpukit/score/src/threadstartmultitasking.c | 18 +++---- 15 files changed, 183 insertions(+), 183 deletions(-) diff --git a/cpukit/libmisc/cpuuse/cpuusagereset.c b/cpukit/libmisc/cpuuse/cpuusagereset.c index e77ac2c006..a167e859f6 100644 --- a/cpukit/libmisc/cpuuse/cpuusagereset.c +++ b/cpukit/libmisc/cpuuse/cpuusagereset.c @@ -40,16 +40,16 @@ static void CPU_usage_Per_thread_handler( void rtems_cpu_usage_reset( void ) { #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ - uint32_t processor_count; - uint32_t processor; + uint32_t cpu_count; + uint32_t cpu_index; _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset ); - processor_count = rtems_get_processor_count(); - for ( processor = 0 ; processor < processor_count ; ++processor ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor ); + cpu_count = rtems_get_processor_count(); + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - per_cpu->time_of_last_context_switch = CPU_usage_Uptime_at_last_reset; + cpu->time_of_last_context_switch = CPU_usage_Uptime_at_last_reset; } #else CPU_usage_Ticks_at_last_reset = _Watchdog_Ticks_since_boot; diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h index e491ffa002..afc38031df 100644 --- a/cpukit/score/include/rtems/score/percpu.h +++ b/cpukit/score/include/rtems/score/percpu.h @@ -347,55 +347,55 @@ typedef struct { extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT; #if defined( RTEMS_SMP ) -#define _Per_CPU_Acquire( per_cpu ) \ +#define _Per_CPU_Acquire( cpu ) \ _SMP_ticket_lock_Acquire( \ - &( per_cpu )->Lock, \ - &( per_cpu )->Lock_stats_context \ + &( cpu )->Lock, \ + &( cpu )->Lock_stats_context \ ) #else -#define _Per_CPU_Acquire( per_cpu ) \ +#define _Per_CPU_Acquire( cpu ) \ do { \ - (void) ( per_cpu ); \ + (void) ( cpu ); \ } while ( 0 ) #endif #if defined( RTEMS_SMP ) -#define _Per_CPU_Release( per_cpu ) \ +#define _Per_CPU_Release( cpu ) \ _SMP_ticket_lock_Release( \ - &( per_cpu )->Lock, \ - &( per_cpu )->Lock_stats_context \ + &( cpu )->Lock, \ + &( cpu )->Lock_stats_context \ ) #else -#define _Per_CPU_Release( per_cpu ) \ +#define _Per_CPU_Release( cpu ) \ do { \ - (void) ( per_cpu ); \ + (void) ( cpu ); \ } while ( 0 ) #endif #if defined( RTEMS_SMP ) -#define _Per_CPU_ISR_disable_and_acquire( per_cpu, isr_cookie ) \ +#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \ do { \ _ISR_Disable_without_giant( isr_cookie ); \ - _Per_CPU_Acquire( per_cpu ); \ + _Per_CPU_Acquire( cpu ); \ } while ( 0 ) #else -#define _Per_CPU_ISR_disable_and_acquire( per_cpu, isr_cookie ) \ +#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \ do { \ _ISR_Disable( isr_cookie ); \ - (void) ( per_cpu ); \ + (void) ( cpu ); \ } while ( 0 ) #endif #if defined( RTEMS_SMP ) -#define _Per_CPU_Release_and_ISR_enable( per_cpu, isr_cookie ) \ +#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \ do { \ - _Per_CPU_Release( per_cpu ); \ + _Per_CPU_Release( cpu ); \ _ISR_Enable_without_giant( isr_cookie ); \ } while ( 0 ) #else -#define _Per_CPU_Release_and_ISR_enable( per_cpu, isr_cookie ) \ +#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \ do { \ - (void) ( per_cpu ); \ + (void) ( cpu ); \ _ISR_Enable( isr_cookie ); \ } while ( 0 ) #endif @@ -443,13 +443,13 @@ extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT; #if defined( RTEMS_SMP ) static inline Per_CPU_Control *_Per_CPU_Get( void ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_snapshot(); + Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot(); _Assert( - per_cpu->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0 + cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0 ); - return per_cpu; + return cpu_self; } #else #define _Per_CPU_Get() _Per_CPU_Get_snapshot() @@ -460,22 +460,22 @@ static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index ) return &_Per_CPU_Information[ index ].per_cpu; } -static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *per_cpu ) +static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu ) { const Per_CPU_Control_envelope *per_cpu_envelope = - ( const Per_CPU_Control_envelope * ) per_cpu; + ( const Per_CPU_Control_envelope * ) cpu; return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] ); } static inline bool _Per_CPU_Is_processor_started( - const Per_CPU_Control *per_cpu + const Per_CPU_Control *cpu ) { #if defined( RTEMS_SMP ) - return per_cpu->started; + return cpu->started; #else - (void) per_cpu; + (void) cpu; return true; #endif @@ -483,9 +483,9 @@ static inline bool _Per_CPU_Is_processor_started( #if defined( RTEMS_SMP ) -static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *per_cpu ) +static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *cpu ) { - _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( per_cpu ) ); + _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu ) ); } /** @@ -496,7 +496,7 @@ static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *per_cpu ) void _Per_CPU_Initialize(void); void _Per_CPU_State_change( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, Per_CPU_State new_state ); diff --git a/cpukit/score/include/rtems/score/profiling.h b/cpukit/score/include/rtems/score/profiling.h index 62ac3c325e..f5fa6cc241 100644 --- a/cpukit/score/include/rtems/score/profiling.h +++ b/cpukit/score/include/rtems/score/profiling.h @@ -38,31 +38,31 @@ extern "C" { */ static inline void _Profiling_Thread_dispatch_disable( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, uint32_t previous_thread_dispatch_disable_level ) { #if defined( RTEMS_PROFILING ) if ( previous_thread_dispatch_disable_level == 0 ) { - Per_CPU_Stats *stats = &per_cpu->Stats; + Per_CPU_Stats *stats = &cpu->Stats; stats->thread_dispatch_disabled_instant = _CPU_Counter_read(); ++stats->thread_dispatch_disabled_count; } #else - (void) per_cpu; + (void) cpu; (void) previous_thread_dispatch_disable_level; #endif } static inline void _Profiling_Thread_dispatch_enable( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, uint32_t new_thread_dispatch_disable_level ) { #if defined( RTEMS_PROFILING ) if ( new_thread_dispatch_disable_level == 0 ) { - Per_CPU_Stats *stats = &per_cpu->Stats; + Per_CPU_Stats *stats = &cpu->Stats; CPU_Counter_ticks now = _CPU_Counter_read(); CPU_Counter_ticks delta = _CPU_Counter_difference( now, @@ -76,30 +76,30 @@ static inline void _Profiling_Thread_dispatch_enable( } } #else - (void) per_cpu; + (void) cpu; (void) new_thread_dispatch_disable_level; #endif } static inline void _Profiling_Update_max_interrupt_delay( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, CPU_Counter_ticks interrupt_delay ) { #if defined( RTEMS_PROFILING ) - Per_CPU_Stats *stats = &per_cpu->Stats; + Per_CPU_Stats *stats = &cpu->Stats; if ( stats->max_interrupt_delay < interrupt_delay ) { stats->max_interrupt_delay = interrupt_delay; } #else - (void) per_cpu; + (void) cpu; (void) interrupt_delay; #endif } void _Profiling_Outer_most_interrupt_entry_and_exit( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, CPU_Counter_ticks interrupt_entry_instant, CPU_Counter_ticks interrupt_exit_instant ); diff --git a/cpukit/score/include/rtems/score/smpimpl.h b/cpukit/score/include/rtems/score/smpimpl.h index 225da0fde4..c281592bdc 100644 --- a/cpukit/score/include/rtems/score/smpimpl.h +++ b/cpukit/score/include/rtems/score/smpimpl.h @@ -107,16 +107,16 @@ void _SMP_Start_multitasking_on_secondary_processor( void ) */ static inline void _SMP_Inter_processor_interrupt_handler( void ) { - Per_CPU_Control *self_cpu = _Per_CPU_Get(); + Per_CPU_Control *cpu_self = _Per_CPU_Get(); - if ( self_cpu->message != 0 ) { + if ( cpu_self->message != 0 ) { uint32_t message; ISR_Level level; - _Per_CPU_ISR_disable_and_acquire( self_cpu, level ); - message = self_cpu->message; - self_cpu->message = 0; - _Per_CPU_Release_and_ISR_enable( self_cpu, level ); + _Per_CPU_ISR_disable_and_acquire( cpu_self, level ); + message = cpu_self->message; + cpu_self->message = 0; + _Per_CPU_Release_and_ISR_enable( cpu_self, level ); if ( ( message & SMP_MESSAGE_SHUTDOWN ) != 0 ) { rtems_fatal( RTEMS_FATAL_SOURCE_SMP, SMP_FATAL_SHUTDOWN ); @@ -130,10 +130,10 @@ static inline void _SMP_Inter_processor_interrupt_handler( void ) * * The target processor may be the sending processor. * - * @param[in] cpu The target processor of the message. + * @param[in] cpu_index The target processor of the message. * @param[in] message The message. */ -void _SMP_Send_message( uint32_t cpu, uint32_t message ); +void _SMP_Send_message( uint32_t cpu_index, uint32_t message ); /** * @brief Request of others CPUs. diff --git a/cpukit/score/src/cpuset.c b/cpukit/score/src/cpuset.c index 7addb0dba3..95fbd45ec4 100644 --- a/cpukit/score/src/cpuset.c +++ b/cpukit/score/src/cpuset.c @@ -32,22 +32,22 @@ static CPU_set_Control cpuset_default; */ void _CPU_set_Handler_initialization() { - int i; - int max_cpus; + uint32_t cpu_count; + uint32_t cpu_index; /* We do not support a cpu count over CPU_SETSIZE */ - max_cpus = _SMP_Get_processor_count(); + cpu_count = _SMP_Get_processor_count(); /* This should never happen */ - _Assert( max_cpus <= CPU_SETSIZE ); + _Assert( cpu_count <= CPU_SETSIZE ); /* Initialize the affinity to be the set of all available CPU's */ cpuset_default.set = &cpuset_default.preallocated; cpuset_default.setsize = sizeof( *cpuset_default.set ); CPU_ZERO_S( cpuset_default.setsize, &cpuset_default.preallocated ); - for (i=0; ithread_dispatch_disable_level == 0; + cpu_self = _Per_CPU_Get_snapshot(); + dispatch_allowed = cpu_self->thread_dispatch_disable_level == 0; _ISR_Enable_without_giant( level ); return dispatch_allowed; diff --git a/cpukit/score/src/isr.c b/cpukit/score/src/isr.c index f3907a73b8..07f3e61f5a 100644 --- a/cpukit/score/src/isr.c +++ b/cpukit/score/src/isr.c @@ -41,8 +41,8 @@ void _ISR_Handler_initialization( void ) #if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE ) { size_t stack_size = rtems_configuration_get_interrupt_stack_size(); - uint32_t max_cpus = rtems_configuration_get_maximum_processors(); - uint32_t cpu; + uint32_t cpu_max = rtems_configuration_get_maximum_processors(); + uint32_t cpu_index; if ( !_Stack_Is_enough( stack_size ) ) _Terminate( @@ -51,8 +51,8 @@ void _ISR_Handler_initialization( void ) INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL ); - for ( cpu = 0 ; cpu < max_cpus; ++cpu ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); + for ( cpu_index = 0 ; cpu_index < cpu_max; ++cpu_index ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); void *low = _Workspace_Allocate_or_fatal_error( stack_size ); void *high = _Addresses_Add_offset( low, stack_size ); @@ -60,8 +60,8 @@ void _ISR_Handler_initialization( void ) high = _Addresses_Align_down( high, CPU_STACK_ALIGNMENT ); #endif - per_cpu->interrupt_stack_low = low; - per_cpu->interrupt_stack_high = high; + cpu->interrupt_stack_low = low; + cpu->interrupt_stack_high = high; /* * Interrupt stack might have to be aligned and/or setup in a specific @@ -71,8 +71,8 @@ void _ISR_Handler_initialization( void ) */ #if defined(_CPU_Interrupt_stack_setup) _CPU_Interrupt_stack_setup( - per_cpu->interrupt_stack_low, - per_cpu->interrupt_stack_high + cpu->interrupt_stack_low, + cpu->interrupt_stack_high ); #endif } diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c index c396ace6c9..91e92bead0 100644 --- a/cpukit/score/src/percpu.c +++ b/cpukit/score/src/percpu.c @@ -29,11 +29,11 @@ static SMP_lock_Control _Per_CPU_State_lock = SMP_LOCK_INITIALIZER("per-CPU state"); static void _Per_CPU_State_busy_wait( - const Per_CPU_Control *per_cpu, + const Per_CPU_Control *cpu, Per_CPU_State new_state ) { - Per_CPU_State state = per_cpu->state; + Per_CPU_State state = cpu->state; switch ( new_state ) { case PER_CPU_STATE_REQUEST_START_MULTITASKING: @@ -42,7 +42,7 @@ static void _Per_CPU_State_busy_wait( && state != PER_CPU_STATE_SHUTDOWN ) { _CPU_SMP_Processor_event_receive(); - state = per_cpu->state; + state = cpu->state; } break; case PER_CPU_STATE_UP: @@ -51,7 +51,7 @@ static void _Per_CPU_State_busy_wait( && state != PER_CPU_STATE_SHUTDOWN ) { _CPU_SMP_Processor_event_receive(); - state = per_cpu->state; + state = cpu->state; } break; default: @@ -108,7 +108,7 @@ static Per_CPU_State _Per_CPU_State_get_next( } void _Per_CPU_State_change( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, Per_CPU_State new_state ) { @@ -116,31 +116,31 @@ void _Per_CPU_State_change( SMP_lock_Context lock_context; Per_CPU_State next_state; - _Per_CPU_State_busy_wait( per_cpu, new_state ); + _Per_CPU_State_busy_wait( cpu, new_state ); _SMP_lock_ISR_disable_and_acquire( lock, &lock_context ); - next_state = _Per_CPU_State_get_next( per_cpu->state, new_state ); - per_cpu->state = next_state; + next_state = _Per_CPU_State_get_next( cpu->state, new_state ); + cpu->state = next_state; if ( next_state == PER_CPU_STATE_SHUTDOWN ) { - uint32_t ncpus = rtems_configuration_get_maximum_processors(); - uint32_t cpu; + uint32_t cpu_max = rtems_configuration_get_maximum_processors(); + uint32_t cpu_index; - for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { - Per_CPU_Control *other_cpu = _Per_CPU_Get_by_index( cpu ); + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index( cpu_index ); - if ( per_cpu != other_cpu ) { - switch ( other_cpu->state ) { + if ( cpu_other != cpu ) { + switch ( cpu_other->state ) { case PER_CPU_STATE_UP: - _SMP_Send_message( cpu, SMP_MESSAGE_SHUTDOWN ); + _SMP_Send_message( cpu_index, SMP_MESSAGE_SHUTDOWN ); break; default: /* Nothing to do */ break; } - other_cpu->state = PER_CPU_STATE_SHUTDOWN; + cpu_other->state = PER_CPU_STATE_SHUTDOWN; } } } diff --git a/cpukit/score/src/profilingisrentryexit.c b/cpukit/score/src/profilingisrentryexit.c index d06e06d251..c7842a5dcb 100644 --- a/cpukit/score/src/profilingisrentryexit.c +++ b/cpukit/score/src/profilingisrentryexit.c @@ -19,13 +19,13 @@ #include void _Profiling_Outer_most_interrupt_entry_and_exit( - Per_CPU_Control *per_cpu, + Per_CPU_Control *cpu, CPU_Counter_ticks interrupt_entry_instant, CPU_Counter_ticks interrupt_exit_instant ) { #if defined( RTEMS_PROFILING ) - Per_CPU_Stats *stats = &per_cpu->Stats; + Per_CPU_Stats *stats = &cpu->Stats; CPU_Counter_ticks delta = _CPU_Counter_difference( interrupt_exit_instant, interrupt_entry_instant @@ -38,7 +38,7 @@ void _Profiling_Outer_most_interrupt_entry_and_exit( stats->max_interrupt_time = delta; } #else - (void) per_cpu; + (void) cpu; (void) interrupt_entry_instant; (void) interrupt_exit_instant; #endif diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c index 6df72070ec..f03a4c05e4 100644 --- a/cpukit/score/src/smp.c +++ b/cpukit/score/src/smp.c @@ -41,17 +41,17 @@ static void _SMP_Check_scheduler_configuration( void ) static void _SMP_Start_processors( uint32_t cpu_count ) { - uint32_t cpu_self = _SMP_Get_current_processor(); + uint32_t cpu_index_self = _SMP_Get_current_processor(); uint32_t cpu_index; for ( cpu_index = 0 ; cpu_index < cpu_count; ++cpu_index ) { const Scheduler_Assignment *assignment = _Scheduler_Get_assignment( cpu_index ); - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu_index ); + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); bool started; - if ( cpu_index != cpu_self ) { + if ( cpu_index != cpu_index_self ) { if ( _Scheduler_Should_start_processor( assignment ) ) { started = _CPU_SMP_Start_processor( cpu_index ); @@ -69,7 +69,7 @@ static void _SMP_Start_processors( uint32_t cpu_count ) } } - per_cpu->started = started; + cpu->started = started; if ( started ) { ++assignment->scheduler->context->processor_count; @@ -86,9 +86,9 @@ void _SMP_Handler_initialize( void ) uint32_t cpu_index; for ( cpu_index = 0 ; cpu_index < cpu_max; ++cpu_index ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu_index ); + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - _SMP_ticket_lock_Initialize( &per_cpu->Lock, "per-CPU" ); + _SMP_ticket_lock_Initialize( &cpu->Lock, "per-CPU" ); } /* @@ -116,15 +116,15 @@ void _SMP_Handler_initialize( void ) void _SMP_Request_start_multitasking( void ) { Per_CPU_Control *self_cpu = _Per_CPU_Get(); - uint32_t ncpus = _SMP_Get_processor_count(); - uint32_t cpu; + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index; _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING ); - for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - _Per_CPU_State_change( per_cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING ); + _Per_CPU_State_change( cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING ); } } @@ -156,29 +156,29 @@ void _SMP_Request_shutdown( void ) _Giant_Drop( self_cpu ); } -void _SMP_Send_message( uint32_t cpu, uint32_t message ) +void _SMP_Send_message( uint32_t cpu_index, uint32_t message ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); ISR_Level level; - _Per_CPU_ISR_disable_and_acquire( per_cpu, level ); - per_cpu->message |= message; - _Per_CPU_Release_and_ISR_enable( per_cpu, level ); + _Per_CPU_ISR_disable_and_acquire( cpu, level ); + cpu->message |= message; + _Per_CPU_Release_and_ISR_enable( cpu, level ); - _CPU_SMP_Send_interrupt( cpu ); + _CPU_SMP_Send_interrupt( cpu_index ); } void _SMP_Broadcast_message( uint32_t message ) { - uint32_t self = _SMP_Get_current_processor(); - uint32_t ncpus = _SMP_Get_processor_count(); - uint32_t cpu; + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index_self = _SMP_Get_current_processor(); + uint32_t cpu_index; _Assert( _Debug_Is_thread_dispatching_allowed() ); - for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { - if ( cpu != self ) { - _SMP_Send_message( cpu, message ); + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { + if ( cpu_index != cpu_index_self ) { + _SMP_Send_message( cpu_index, message ); } } } diff --git a/cpukit/score/src/threadcreateidle.c b/cpukit/score/src/threadcreateidle.c index 2a242658a5..8a5812f1a5 100644 --- a/cpukit/score/src/threadcreateidle.c +++ b/cpukit/score/src/threadcreateidle.c @@ -23,7 +23,7 @@ #include #include -static void _Thread_Create_idle_for_cpu( Per_CPU_Control *per_cpu ) +static void _Thread_Create_idle_for_cpu( Per_CPU_Control *cpu ) { Objects_Name name; Thread_Control *idle; @@ -40,7 +40,7 @@ static void _Thread_Create_idle_for_cpu( Per_CPU_Control *per_cpu ) _Thread_Initialize( &_Thread_Internal_information, idle, - _Scheduler_Get_by_CPU( per_cpu ), + _Scheduler_Get_by_CPU( cpu ), NULL, /* allocate the stack */ _Stack_Ensure_minimum( rtems_configuration_get_idle_task_stack_size() ), CPU_IDLE_TASK_IS_FP, @@ -56,8 +56,8 @@ static void _Thread_Create_idle_for_cpu( Per_CPU_Control *per_cpu ) * WARNING!!! This is necessary to "kick" start the system and * MUST be done before _Thread_Start is invoked. */ - per_cpu->heir = - per_cpu->executing = idle; + cpu->heir = + cpu->executing = idle; _Thread_Start( idle, @@ -65,20 +65,20 @@ static void _Thread_Create_idle_for_cpu( Per_CPU_Control *per_cpu ) rtems_configuration_get_idle_task(), NULL, 0, - per_cpu + cpu ); } void _Thread_Create_idle( void ) { - uint32_t processor_count = _SMP_Get_processor_count(); - uint32_t processor; + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index; - for ( processor = 0 ; processor < processor_count ; ++processor ) { - Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor ); + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if ( _Per_CPU_Is_processor_started( per_cpu ) ) { - _Thread_Create_idle_for_cpu( per_cpu ); + if ( _Per_CPU_Is_processor_started( cpu ) ) { + _Thread_Create_idle_for_cpu( cpu ); } } } diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c index 9e3f86e23b..f1c6cfd103 100644 --- a/cpukit/score/src/threaddispatch.c +++ b/cpukit/score/src/threaddispatch.c @@ -42,27 +42,27 @@ static Thread_Action *_Thread_Get_post_switch_action( static void _Thread_Run_post_switch_actions( Thread_Control *executing ) { ISR_Level level; - Per_CPU_Control *cpu; + Per_CPU_Control *cpu_self; Thread_Action *action; - cpu = _Thread_Action_ISR_disable_and_acquire( executing, &level ); + cpu_self = _Thread_Action_ISR_disable_and_acquire( executing, &level ); action = _Thread_Get_post_switch_action( executing ); while ( action != NULL ) { _Chain_Set_off_chain( &action->Node ); - ( *action->handler )( executing, action, cpu, level ); + ( *action->handler )( executing, action, cpu_self, level ); - cpu = _Thread_Action_ISR_disable_and_acquire( executing, &level ); + cpu_self = _Thread_Action_ISR_disable_and_acquire( executing, &level ); action = _Thread_Get_post_switch_action( executing ); } - _Thread_Action_release_and_ISR_enable( cpu, level ); + _Thread_Action_release_and_ISR_enable( cpu_self, level ); } void _Thread_Dispatch( void ) { - Per_CPU_Control *per_cpu; + Per_CPU_Control *cpu_self; Thread_Control *executing; Thread_Control *heir; ISR_Level level; @@ -71,10 +71,10 @@ void _Thread_Dispatch( void ) _ISR_Disable_without_giant( level ); #endif - per_cpu = _Per_CPU_Get(); - _Assert( per_cpu->thread_dispatch_disable_level == 0 ); - _Profiling_Thread_dispatch_disable( per_cpu, 0 ); - per_cpu->thread_dispatch_disable_level = 1; + cpu_self = _Per_CPU_Get(); + _Assert( cpu_self->thread_dispatch_disable_level == 0 ); + _Profiling_Thread_dispatch_disable( cpu_self, 0 ); + cpu_self->thread_dispatch_disable_level = 1; #if defined( RTEMS_SMP ) _ISR_Enable_without_giant( level ); @@ -83,8 +83,8 @@ void _Thread_Dispatch( void ) /* * Now determine if we need to perform a dispatch on the current CPU. */ - executing = per_cpu->executing; - _Per_CPU_ISR_disable_and_acquire( per_cpu, level ); + executing = cpu_self->executing; + _Per_CPU_ISR_disable_and_acquire( cpu_self, level ); #if defined( RTEMS_SMP ) /* * On SMP the complete context switch must be atomic with respect to one @@ -93,11 +93,11 @@ void _Thread_Dispatch( void ) * cannot execute on more than one processor at a time. See also * _Thread_Handler() since _Context_switch() may branch to this function. */ - if ( per_cpu->dispatch_necessary ) { + if ( cpu_self->dispatch_necessary ) { #else - while ( per_cpu->dispatch_necessary ) { + while ( cpu_self->dispatch_necessary ) { #endif - per_cpu->dispatch_necessary = false; + cpu_self->dispatch_necessary = false; #if defined( RTEMS_SMP ) /* @@ -108,8 +108,8 @@ void _Thread_Dispatch( void ) _Atomic_Fence( ATOMIC_ORDER_SEQ_CST ); #endif - heir = per_cpu->heir; - per_cpu->executing = heir; + heir = cpu_self->heir; + cpu_self->executing = heir; #if defined( RTEMS_SMP ) executing->is_executing = false; @@ -142,11 +142,11 @@ void _Thread_Dispatch( void ) #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Thread_Update_cpu_time_used( executing, - &per_cpu->time_of_last_context_switch + &cpu_self->time_of_last_context_switch ); #else { - _TOD_Get_uptime( &per_cpu->time_of_last_context_switch ); + _TOD_Get_uptime( &cpu_self->time_of_last_context_switch ); heir->cpu_time_used++; } #endif @@ -205,7 +205,7 @@ void _Thread_Dispatch( void ) * heir thread may have migrated from another processor. Values from the * stack or non-volatile registers reflect the old execution environment. */ - per_cpu = _Per_CPU_Get(); + cpu_self = _Per_CPU_Get(); #if !defined( RTEMS_SMP ) _ISR_Disable( level ); @@ -213,11 +213,11 @@ void _Thread_Dispatch( void ) } post_switch: - _Assert( per_cpu->thread_dispatch_disable_level == 1 ); - per_cpu->thread_dispatch_disable_level = 0; - _Profiling_Thread_dispatch_enable( per_cpu, 0 ); + _Assert( cpu_self->thread_dispatch_disable_level == 1 ); + cpu_self->thread_dispatch_disable_level = 0; + _Profiling_Thread_dispatch_enable( cpu_self, 0 ); - _Per_CPU_Release_and_ISR_enable( per_cpu, level ); + _Per_CPU_Release_and_ISR_enable( cpu_self, level ); _Thread_Run_post_switch_actions( executing ); } diff --git a/cpukit/score/src/threaddispatchdisablelevel.c b/cpukit/score/src/threaddispatchdisablelevel.c index 1a17aeeaf9..3b7837c1cb 100644 --- a/cpukit/score/src/threaddispatchdisablelevel.c +++ b/cpukit/score/src/threaddispatchdisablelevel.c @@ -34,40 +34,40 @@ static Giant_Control _Giant = { .nest_level = 0 }; -static void _Giant_Do_acquire( Per_CPU_Control *self_cpu ) +static void _Giant_Do_acquire( Per_CPU_Control *cpu_self ) { Giant_Control *giant = &_Giant; - if ( giant->owner_cpu != self_cpu ) { - _SMP_lock_Acquire( &giant->lock, &self_cpu->Giant_lock_context ); - giant->owner_cpu = self_cpu; + if ( giant->owner_cpu != cpu_self ) { + _SMP_lock_Acquire( &giant->lock, &cpu_self->Giant_lock_context ); + giant->owner_cpu = cpu_self; giant->nest_level = 1; } else { ++giant->nest_level; } } -static void _Giant_Do_release( Per_CPU_Control *self_cpu ) +static void _Giant_Do_release( Per_CPU_Control *cpu_self ) { Giant_Control *giant = &_Giant; --giant->nest_level; if ( giant->nest_level == 0 ) { giant->owner_cpu = NO_OWNER_CPU; - _SMP_lock_Release( &giant->lock, &self_cpu->Giant_lock_context ); + _SMP_lock_Release( &giant->lock, &cpu_self->Giant_lock_context ); } } -void _Giant_Drop( Per_CPU_Control *self_cpu ) +void _Giant_Drop( Per_CPU_Control *cpu_self ) { Giant_Control *giant = &_Giant; _Assert( _ISR_Get_level() != 0 ); - if ( giant->owner_cpu == self_cpu ) { + if ( giant->owner_cpu == cpu_self ) { giant->nest_level = 0; giant->owner_cpu = NO_OWNER_CPU; - _SMP_lock_Release( &giant->lock, &self_cpu->Giant_lock_context ); + _SMP_lock_Release( &giant->lock, &cpu_self->Giant_lock_context ); } } @@ -75,7 +75,7 @@ uint32_t _Thread_Dispatch_increment_disable_level( void ) { ISR_Level isr_level; uint32_t disable_level; - Per_CPU_Control *self_cpu; + Per_CPU_Control *cpu_self; _ISR_Disable_without_giant( isr_level ); @@ -83,14 +83,14 @@ uint32_t _Thread_Dispatch_increment_disable_level( void ) * We must obtain the processor after interrupts are disabled to prevent * thread migration. */ - self_cpu = _Per_CPU_Get(); + cpu_self = _Per_CPU_Get(); - _Giant_Do_acquire( self_cpu ); + _Giant_Do_acquire( cpu_self ); - disable_level = self_cpu->thread_dispatch_disable_level; - _Profiling_Thread_dispatch_disable( self_cpu, disable_level ); + disable_level = cpu_self->thread_dispatch_disable_level; + _Profiling_Thread_dispatch_disable( cpu_self, disable_level ); ++disable_level; - self_cpu->thread_dispatch_disable_level = disable_level; + cpu_self->thread_dispatch_disable_level = disable_level; _ISR_Enable_without_giant( isr_level ); @@ -101,19 +101,19 @@ uint32_t _Thread_Dispatch_decrement_disable_level( void ) { ISR_Level isr_level; uint32_t disable_level; - Per_CPU_Control *self_cpu; + Per_CPU_Control *cpu_self; _ISR_Disable_without_giant( isr_level ); - self_cpu = _Per_CPU_Get(); - disable_level = self_cpu->thread_dispatch_disable_level; + cpu_self = _Per_CPU_Get(); + disable_level = cpu_self->thread_dispatch_disable_level; --disable_level; - self_cpu->thread_dispatch_disable_level = disable_level; + cpu_self->thread_dispatch_disable_level = disable_level; - _Giant_Do_release( self_cpu ); - _Assert( disable_level != 0 || _Giant.owner_cpu != self_cpu ); + _Giant_Do_release( cpu_self ); + _Assert( disable_level != 0 || _Giant.owner_cpu != cpu_self ); - _Profiling_Thread_dispatch_enable( self_cpu, disable_level ); + _Profiling_Thread_dispatch_enable( cpu_self, disable_level ); _ISR_Enable_without_giant( isr_level ); return disable_level; diff --git a/cpukit/score/src/threadhandler.c b/cpukit/score/src/threadhandler.c index 3d4d9b29fb..229e74f937 100644 --- a/cpukit/score/src/threadhandler.c +++ b/cpukit/score/src/threadhandler.c @@ -148,15 +148,15 @@ void _Thread_Handler( void ) * _Thread_Dispatch() obtained the per-CPU lock for us. We have to * release it here and set the desired interrupt level of the thread. */ - Per_CPU_Control *per_cpu = _Per_CPU_Get(); + Per_CPU_Control *cpu_self = _Per_CPU_Get(); - _Assert( per_cpu->thread_dispatch_disable_level == 1 ); + _Assert( cpu_self->thread_dispatch_disable_level == 1 ); _Assert( _ISR_Get_level() != 0 ); - per_cpu->thread_dispatch_disable_level = 0; - _Profiling_Thread_dispatch_enable( per_cpu, 0 ); + cpu_self->thread_dispatch_disable_level = 0; + _Profiling_Thread_dispatch_enable( cpu_self, 0 ); - _Per_CPU_Release( per_cpu ); + _Per_CPU_Release( cpu_self ); level = executing->Start.isr_level; _ISR_Set_level( level); diff --git a/cpukit/score/src/threadstartmultitasking.c b/cpukit/score/src/threadstartmultitasking.c index e170a0d71a..78a438f6d8 100644 --- a/cpukit/score/src/threadstartmultitasking.c +++ b/cpukit/score/src/threadstartmultitasking.c @@ -22,30 +22,30 @@ void _Thread_Start_multitasking( void ) { - Per_CPU_Control *self_cpu = _Per_CPU_Get(); + Per_CPU_Control *cpu_self = _Per_CPU_Get(); Thread_Control *heir; #if defined(RTEMS_SMP) - _Per_CPU_State_change( self_cpu, PER_CPU_STATE_UP ); + _Per_CPU_State_change( cpu_self, PER_CPU_STATE_UP ); /* * Threads begin execution in the _Thread_Handler() function. This * function will set the thread dispatch disable level to zero and calls * _Per_CPU_Release(). */ - _Per_CPU_Acquire( self_cpu ); - self_cpu->thread_dispatch_disable_level = 1; + _Per_CPU_Acquire( cpu_self ); + cpu_self->thread_dispatch_disable_level = 1; #endif - heir = self_cpu->heir; + heir = cpu_self->heir; #if defined(RTEMS_SMP) - self_cpu->executing->is_executing = false; + cpu_self->executing->is_executing = false; heir->is_executing = true; #endif - self_cpu->dispatch_necessary = false; - self_cpu->executing = heir; + cpu_self->dispatch_necessary = false; + cpu_self->executing = heir; /* * Get the init task(s) running. @@ -69,7 +69,7 @@ void _Thread_Start_multitasking( void ) _Context_Restore_fp( &heir->fp_context ); #endif - _Profiling_Thread_dispatch_disable( self_cpu, 0 ); + _Profiling_Thread_dispatch_disable( cpu_self, 0 ); #if defined(_CPU_Start_multitasking) _CPU_Start_multitasking( &heir->Registers ); -- cgit v1.2.3