From 5d6b21198140f406a71599a2d388b6ec47ee3337 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Wed, 7 Sep 2016 09:04:45 +0200 Subject: score: Add scheduler node table for each thread Update #2556. --- cpukit/sapi/include/confdefs.h | 64 +++++++++++---------- cpukit/score/include/rtems/score/schedulerimpl.h | 41 +++++++++----- cpukit/score/include/rtems/score/schedulernode.h | 9 +++ cpukit/score/include/rtems/score/thread.h | 9 ++- cpukit/score/include/rtems/score/threadimpl.h | 18 +++++- cpukit/score/src/threadinitialize.c | 71 ++++++++++++++++++++---- cpukit/score/src/threadmp.c | 2 +- 7 files changed, 156 insertions(+), 58 deletions(-) diff --git a/cpukit/sapi/include/confdefs.h b/cpukit/sapi/include/confdefs.h index b75839e433..3eac92c636 100644 --- a/cpukit/sapi/include/confdefs.h +++ b/cpukit/sapi/include/confdefs.h @@ -3224,38 +3224,44 @@ extern rtems_initialization_tasks_table Initialization_tasks[]; ) #ifdef CONFIGURE_INIT + typedef union { + Scheduler_Node Base; + #ifdef CONFIGURE_SCHEDULER_CBS + Scheduler_CBS_Node CBS; + #endif + #ifdef CONFIGURE_SCHEDULER_EDF + Scheduler_EDF_Node EDF; + #endif + #ifdef CONFIGURE_SCHEDULER_PRIORITY + Scheduler_priority_Node Priority; + #endif + #ifdef CONFIGURE_SCHEDULER_SIMPLE_SMP + Scheduler_SMP_Node Simple_SMP; + #endif + #ifdef CONFIGURE_SCHEDULER_PRIORITY_SMP + Scheduler_priority_SMP_Node Priority_SMP; + #endif + #ifdef CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP + Scheduler_priority_affinity_SMP_Node Priority_affinity_SMP; + #endif + #ifdef CONFIGURE_SCHEDULER_STRONG_APA + Scheduler_strong_APA_Node Strong_APA; + #endif + #ifdef CONFIGURE_SCHEDULER_USER_PER_THREAD + CONFIGURE_SCHEDULER_USER_PER_THREAD User; + #endif + } Configuration_Scheduler_node; + + #ifdef RTEMS_SMP + const size_t _Scheduler_Node_size = sizeof( Configuration_Scheduler_node ); + #endif + typedef struct { Thread_Control Control; #if CONFIGURE_MAXIMUM_USER_EXTENSIONS > 0 void *extensions[ CONFIGURE_MAXIMUM_USER_EXTENSIONS + 1 ]; #endif - union { - Scheduler_Node Base; - #ifdef CONFIGURE_SCHEDULER_CBS - Scheduler_CBS_Node CBS; - #endif - #ifdef CONFIGURE_SCHEDULER_EDF - Scheduler_EDF_Node EDF; - #endif - #ifdef CONFIGURE_SCHEDULER_PRIORITY - Scheduler_priority_Node Priority; - #endif - #ifdef CONFIGURE_SCHEDULER_SIMPLE_SMP - Scheduler_SMP_Node Simple_SMP; - #endif - #ifdef CONFIGURE_SCHEDULER_PRIORITY_SMP - Scheduler_priority_SMP_Node Priority_SMP; - #endif - #ifdef CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP - Scheduler_priority_affinity_SMP_Node Priority_affinity_SMP; - #endif - #ifdef CONFIGURE_SCHEDULER_STRONG_APA - Scheduler_strong_APA_Node Strong_APA; - #endif - #ifdef CONFIGURE_SCHEDULER_USER_PER_THREAD - CONFIGURE_SCHEDULER_USER_PER_THREAD User; - #endif - } Scheduler; + Configuration_Scheduler_node Scheduler_nodes[ CONFIGURE_SCHEDULER_COUNT ]; RTEMS_API_Control API_RTEMS; #ifdef RTEMS_POSIX_API POSIX_API_Control API_POSIX; @@ -3273,8 +3279,8 @@ extern rtems_initialization_tasks_table Initialization_tasks[]; const Thread_Control_add_on _Thread_Control_add_ons[] = { { - offsetof( Configuration_Thread_control, Control.Scheduler.node ), - offsetof( Configuration_Thread_control, Scheduler ) + offsetof( Configuration_Thread_control, Control.Scheduler.nodes ), + offsetof( Configuration_Thread_control, Scheduler_nodes ) }, { offsetof( Configuration_Thread_control, diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h index dea1888a51..2fdc01a695 100644 --- a/cpukit/score/include/rtems/score/schedulerimpl.h +++ b/cpukit/score/include/rtems/score/schedulerimpl.h @@ -772,7 +772,11 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node( const Thread_Control *the_thread ) { +#if defined(RTEMS_SMP) return the_thread->Scheduler.node; +#else + return the_thread->Scheduler.nodes; +#endif } RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority( @@ -1377,7 +1381,8 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( Priority_Control priority ) { - Scheduler_Node *own_node; + Scheduler_Node *new_scheduler_node; + Scheduler_Node *old_scheduler_node; if ( _Thread_Owns_resources( the_thread ) @@ -1386,22 +1391,34 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( return STATUS_RESOURCE_IN_USE; } - own_node = _Thread_Scheduler_get_own_node( the_thread ); - _Priority_Plain_extract( &own_node->Wait.Priority, &the_thread->Real_priority ); + old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread ); + _Priority_Plain_extract( + &old_scheduler_node->Wait.Priority, + &the_thread->Real_priority + ); - if ( !_Priority_Is_empty( &own_node->Wait.Priority ) ) { + if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) { _Priority_Plain_insert( - &own_node->Wait.Priority, + &old_scheduler_node->Wait.Priority, &the_thread->Real_priority, the_thread->Real_priority.priority ); return STATUS_RESOURCE_IN_USE; } +#if defined(RTEMS_SMP) + new_scheduler_node = _Thread_Scheduler_get_node_by_index( + the_thread, + _Scheduler_Get_index( new_scheduler ) + ); +#else + new_scheduler_node = old_scheduler_node; +#endif + the_thread->Start.initial_priority = priority; _Priority_Node_set_priority( &the_thread->Real_priority, priority ); _Priority_Initialize_one( - &own_node->Wait.Priority, + &new_scheduler_node->Wait.Priority, &the_thread->Real_priority ); @@ -1420,15 +1437,11 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( _Scheduler_Block( the_thread ); } - _Scheduler_Node_destroy( old_scheduler, own_node ); the_thread->Scheduler.own_control = new_scheduler; the_thread->Scheduler.control = new_scheduler; - _Scheduler_Node_initialize( - new_scheduler, - own_node, - the_thread, - priority - ); + the_thread->Scheduler.own_node = new_scheduler_node; + the_thread->Scheduler.node = new_scheduler_node; + _Scheduler_Node_set_priority( new_scheduler_node, priority, false ); if ( _States_Is_ready( current_state ) ) { _Scheduler_Unblock( the_thread ); @@ -1439,7 +1452,7 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( } #endif - _Scheduler_Node_set_priority( own_node, priority, false ); + _Scheduler_Node_set_priority( new_scheduler_node, priority, false ); _Scheduler_Update_priority( the_thread ); return STATUS_SUCCESSFUL; } diff --git a/cpukit/score/include/rtems/score/schedulernode.h b/cpukit/score/include/rtems/score/schedulernode.h index 9827d21e64..2954db5350 100644 --- a/cpukit/score/include/rtems/score/schedulernode.h +++ b/cpukit/score/include/rtems/score/schedulernode.h @@ -188,6 +188,15 @@ typedef struct Scheduler_Node { } Priority; } Scheduler_Node; +#if defined(RTEMS_SMP) +/** + * @brief The size of a scheduler node. + * + * This value is provided via . + */ +extern const size_t _Scheduler_Node_size; +#endif + #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h index 393d431e0e..17aeacde29 100644 --- a/cpukit/score/include/rtems/score/thread.h +++ b/cpukit/score/include/rtems/score/thread.h @@ -271,7 +271,6 @@ typedef struct { * priority and ask for help operations. */ Scheduler_Node *own_node; -#endif /** * @brief The scheduler node of this thread. @@ -284,12 +283,18 @@ typedef struct { */ Scheduler_Node *node; -#if defined(RTEMS_SMP) /** * @brief The processor assigned by the current scheduler. */ struct Per_CPU_Control *cpu; #endif + + /** + * @brief The scheduler nodes of this thread. + * + * Each thread has a scheduler node for each scheduler instance. + */ + Scheduler_Node *nodes; } Thread_Scheduler_control; /** diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h index 7f9dccf5e2..09af9c15dd 100644 --- a/cpukit/score/include/rtems/score/threadimpl.h +++ b/cpukit/score/include/rtems/score/threadimpl.h @@ -993,7 +993,23 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node( #if defined(RTEMS_SMP) return the_thread->Scheduler.own_node; #else - return the_thread->Scheduler.node; + return the_thread->Scheduler.nodes; +#endif +} + +RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index( + const Thread_Control *the_thread, + size_t scheduler_index +) +{ +#if defined(RTEMS_SMP) + return (Scheduler_Node *) + ( (uintptr_t) the_thread->Scheduler.nodes + + scheduler_index * _Scheduler_Node_size ); +#else + _Assert( scheduler_index == 0 ); + (void) scheduler_index; + return the_thread->Scheduler.nodes; #endif } diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c index 76b1dff6e4..590d7be995 100644 --- a/cpukit/score/src/threadinitialize.c +++ b/cpukit/score/src/threadinitialize.c @@ -53,7 +53,11 @@ bool _Thread_Initialize( bool extension_status; size_t i; Scheduler_Node *scheduler_node; - bool scheduler_node_initialized = false; +#if defined(RTEMS_SMP) + Scheduler_Node *scheduler_node_for_index; + const Scheduler_Control *scheduler_for_index; +#endif + size_t scheduler_index; Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); #if defined( RTEMS_SMP ) @@ -105,6 +109,8 @@ bool _Thread_Initialize( actual_stack_size ); + scheduler_index = 0; + /* Thread-local storage (TLS) area allocation */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); @@ -174,13 +180,54 @@ bool _Thread_Initialize( #endif } - scheduler_node = the_thread->Scheduler.node; +#if defined(RTEMS_SMP) + scheduler_node_for_index = the_thread->Scheduler.nodes; + scheduler_for_index = &_Scheduler_Table[ 0 ]; + + while ( scheduler_index < _Scheduler_Count ) { + Priority_Control priority_for_index; + + if ( scheduler_for_index == scheduler ) { + priority_for_index = priority; + scheduler_node = scheduler_node_for_index; + } else { + priority_for_index = 0; + } + + _Scheduler_Node_initialize( + scheduler_for_index, + scheduler_node_for_index, + the_thread, + priority_for_index + ); + scheduler_node_for_index = (Scheduler_Node *) + ( (uintptr_t) scheduler_node_for_index + _Scheduler_Node_size ); + ++scheduler_for_index; + ++scheduler_index; + } +#else + scheduler_node = _Thread_Scheduler_get_own_node( the_thread ); + _Scheduler_Node_initialize( + scheduler, + scheduler_node, + the_thread, + priority + ); + scheduler_index = 1; +#endif + + _Priority_Node_initialize( &the_thread->Real_priority, priority ); + _Priority_Initialize_one( + &scheduler_node->Wait.Priority, + &the_thread->Real_priority + ); #if defined(RTEMS_SMP) RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state ); the_thread->Scheduler.own_control = scheduler; the_thread->Scheduler.control = scheduler; the_thread->Scheduler.own_node = scheduler_node; + the_thread->Scheduler.node = scheduler_node; _Resource_Node_initialize( &the_thread->Resource_node ); _ISR_lock_Initialize( &the_thread->Wait.Lock.Default, @@ -202,14 +249,6 @@ bool _Thread_Initialize( RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags ); - _Priority_Node_initialize( &the_thread->Real_priority, priority ); - _Priority_Initialize_one( - &scheduler_node->Wait.Priority, - &the_thread->Real_priority - ); - _Scheduler_Node_initialize( scheduler, scheduler_node, the_thread, priority ); - scheduler_node_initialized = true; - /* POSIX Keys */ _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs ); _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" ); @@ -234,9 +273,19 @@ bool _Thread_Initialize( failed: - if ( scheduler_node_initialized ) { +#if defined(RTEMS_SMP) + while ( scheduler_index > 0 ) { + scheduler_node_for_index = (Scheduler_Node *) + ( (uintptr_t) scheduler_node_for_index - _Scheduler_Node_size ); + --scheduler_for_index; + --scheduler_index; + _Scheduler_Node_destroy( scheduler_for_index, scheduler_node_for_index ); + } +#else + if ( scheduler_index > 0 ) { _Scheduler_Node_destroy( scheduler, scheduler_node ); } +#endif _Workspace_Free( the_thread->Start.tls_area ); diff --git a/cpukit/score/src/threadmp.c b/cpukit/score/src/threadmp.c index 49e1c27979..6ff5065e1d 100644 --- a/cpukit/score/src/threadmp.c +++ b/cpukit/score/src/threadmp.c @@ -78,7 +78,7 @@ void _Thread_MP_Handler_initialization ( #if defined(RTEMS_SMP) proxy->Scheduler.own_node = &proxy->Scheduler_node; #endif - proxy->Scheduler.node = &proxy->Scheduler_node; + proxy->Scheduler.nodes = &proxy->Scheduler_node; _Scheduler_Node_do_initialize( _Scheduler_Get_by_CPU_index( 0 ), &proxy->Scheduler_node, -- cgit v1.2.3