From 34487537ceb62ee2e2fabc0667e65c43a1319855 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Tue, 4 Jul 2017 09:57:30 +0200 Subject: score: Add simple affinity support to EDF SMP Update #3059. --- cpukit/score/include/rtems/score/scheduleredfsmp.h | 77 +++- .../score/include/rtems/score/schedulersmpimpl.h | 140 ++++++- cpukit/score/src/scheduleredfsmp.c | 445 +++++++++++++++++---- cpukit/score/src/schedulerpriorityaffinitysmp.c | 3 +- cpukit/score/src/schedulerprioritysmp.c | 3 +- cpukit/score/src/schedulersimplesmp.c | 3 +- cpukit/score/src/schedulersmpstartidle.c | 22 +- cpukit/score/src/schedulerstrongapa.c | 3 +- 8 files changed, 592 insertions(+), 104 deletions(-) (limited to 'cpukit/score') diff --git a/cpukit/score/include/rtems/score/scheduleredfsmp.h b/cpukit/score/include/rtems/score/scheduleredfsmp.h index 8f6e85777a..68f01d2c4b 100644 --- a/cpukit/score/include/rtems/score/scheduleredfsmp.h +++ b/cpukit/score/include/rtems/score/scheduleredfsmp.h @@ -33,15 +33,65 @@ extern "C" { * @{ */ -typedef struct { - Scheduler_SMP_Context Base; - RBTree_Control Ready; -} Scheduler_EDF_SMP_Context; - typedef struct { Scheduler_SMP_Node Base; + + /** + * @brief Generation number to ensure FIFO/LIFO order for threads of the same + * priority across different ready queues. + */ + int64_t generation; + + /** + * @brief The ready queue index depending on the processor affinity of the thread. + * + * The ready queue index zero is used for threads with a one-to-all thread + * processor affinity. Threads with a one-to-one processor affinity use the + * processor index plus one as the ready queue index. + */ + uint32_t ready_queue_index; } Scheduler_EDF_SMP_Node; +typedef struct { + /** + * @brief Chain node for Scheduler_SMP_Context::Affine_queues. + */ + Chain_Node Node; + + /** + * @brief The ready threads of the corresponding affinity. + */ + RBTree_Control Queue; + + /** + * @brief The scheduled thread of the corresponding processor. + */ + Scheduler_EDF_SMP_Node *scheduled; +} Scheduler_EDF_SMP_Ready_queue; + +typedef struct { + Scheduler_SMP_Context Base; + + /** + * @brief Current generation for FIFO/LIFO ordering. + */ + int64_t generations[ 2 ]; + + /** + * @brief Chain of ready queues with affine threads to determine the highest + * priority ready thread. + */ + Chain_Control Affine_queues; + + /** + * @brief A table with ready queues. + * + * The index zero queue is used for threads with a one-to-all processor + * affinity. Index one corresponds to processor index zero, and so on. + */ + Scheduler_EDF_SMP_Ready_queue Ready[ RTEMS_ZERO_LENGTH_ARRAY ]; +} Scheduler_EDF_SMP_Context; + #define SCHEDULER_EDF_SMP_ENTRY_POINTS \ { \ _Scheduler_EDF_SMP_Initialize, \ @@ -62,8 +112,8 @@ typedef struct { _Scheduler_EDF_Release_job, \ _Scheduler_EDF_Cancel_job, \ _Scheduler_default_Tick, \ - _Scheduler_SMP_Start_idle \ - SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + _Scheduler_EDF_SMP_Start_idle, \ + _Scheduler_EDF_SMP_Set_affinity \ } void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler ); @@ -128,6 +178,19 @@ void _Scheduler_EDF_SMP_Yield( Scheduler_Node *node ); +void _Scheduler_EDF_SMP_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + struct Per_CPU_Control *cpu +); + +bool _Scheduler_EDF_SMP_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node, + const Processor_mask *affinity +); + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h index 620a42f863..3afa6b2a0c 100644 --- a/cpukit/score/include/rtems/score/schedulersmpimpl.h +++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -317,6 +317,12 @@ typedef void ( *Scheduler_SMP_Update )( Priority_Control new_priority ); +typedef void ( *Scheduler_SMP_Set_affinity )( + Scheduler_Context *context, + Scheduler_Node *node, + void *arg +); + typedef bool ( *Scheduler_SMP_Enqueue )( Scheduler_Context *context, Scheduler_Node *node_to_enqueue @@ -329,6 +335,23 @@ typedef void ( *Scheduler_SMP_Allocate_processor )( Per_CPU_Control *victim_cpu ); +typedef void ( *Scheduler_SMP_Register_idle )( + Scheduler_Context *context, + Scheduler_Node *idle, + Per_CPU_Control *cpu +); + +static inline void _Scheduler_SMP_Do_nothing_register_idle( + Scheduler_Context *context, + Scheduler_Node *idle, + Per_CPU_Control *cpu +) +{ + (void) context; + (void) idle; + (void) cpu; +} + static inline bool _Scheduler_SMP_Insert_priority_lifo_order( const Chain_Node *to_insert, const Chain_Node *next @@ -903,6 +926,50 @@ static inline void _Scheduler_SMP_Schedule_highest_ready( } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); } +static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready( + Scheduler_Context *context, + Scheduler_Node *victim, + Per_CPU_Control *victim_cpu, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Get_highest_ready get_highest_ready, + Scheduler_SMP_Move move_from_ready_to_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Scheduler_Try_to_schedule_action action; + + do { + Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim ); + + action = _Scheduler_Try_to_schedule_node( + context, + highest_ready, + NULL, + _Scheduler_SMP_Get_idle_thread + ); + + if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { + _Scheduler_SMP_Preempt( + context, + highest_ready, + victim, + allocate_processor + ); + + ( *move_from_ready_to_scheduled )( context, highest_ready ); + } else { + _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); + + _Scheduler_SMP_Node_change_state( + highest_ready, + SCHEDULER_SMP_NODE_BLOCKED + ); + + ( *extract_from_ready )( context, highest_ready ); + } + } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); +} + /** * @brief Blocks a thread. * @@ -1274,11 +1341,34 @@ static inline void _Scheduler_SMP_Withdraw_node( } } +static inline void _Scheduler_SMP_Do_start_idle( + Scheduler_Context *context, + Thread_Control *idle, + Per_CPU_Control *cpu, + Scheduler_SMP_Register_idle register_idle +) +{ + Scheduler_SMP_Context *self; + Scheduler_SMP_Node *node; + + self = _Scheduler_SMP_Get_self( context ); + node = _Scheduler_SMP_Thread_get_node( idle ); + + _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED ); + node->state = SCHEDULER_SMP_NODE_SCHEDULED; + + _Thread_Set_CPU( idle, cpu ); + ( *register_idle )( context, &node->Base, cpu ); + _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain ); + _Scheduler_SMP_Release_idle_thread( &self->Base, idle ); +} + static inline void _Scheduler_SMP_Add_processor( - Scheduler_Context *context, - Thread_Control *idle, - Scheduler_SMP_Has_ready has_ready, - Scheduler_SMP_Enqueue enqueue_scheduled_fifo + Scheduler_Context *context, + Thread_Control *idle, + Scheduler_SMP_Has_ready has_ready, + Scheduler_SMP_Enqueue enqueue_scheduled_fifo, + Scheduler_SMP_Register_idle register_idle ) { Scheduler_SMP_Context *self; @@ -1289,6 +1379,7 @@ static inline void _Scheduler_SMP_Add_processor( _Scheduler_SMP_Release_idle_thread( &self->Base, idle ); node = _Thread_Scheduler_get_home_node( idle ); _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED ); + ( *register_idle )( context, node, _Thread_Get_CPU( idle ) ); if ( ( *has_ready )( &self->Base ) ) { ( *enqueue_scheduled_fifo )( &self->Base, node ); @@ -1355,6 +1446,45 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor( return idle; } +static inline void _Scheduler_SMP_Set_affinity( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_Node *node, + void *arg, + Scheduler_SMP_Set_affinity set_affinity, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Get_highest_ready get_highest_ready, + Scheduler_SMP_Move move_from_ready_to_scheduled, + Scheduler_SMP_Enqueue enqueue_fifo, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Scheduler_SMP_Node_state node_state; + + node_state = _Scheduler_SMP_Node_state( node ); + + if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { + _Scheduler_SMP_Extract_from_scheduled( node ); + _Scheduler_SMP_Preempt_and_schedule_highest_ready( + context, + node, + _Thread_Get_CPU( thread ), + extract_from_ready, + get_highest_ready, + move_from_ready_to_scheduled, + allocate_processor + ); + ( *set_affinity )( context, node, arg ); + ( *enqueue_fifo )( context, node ); + } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { + ( *extract_from_ready )( context, node ); + ( *set_affinity )( context, node, arg ); + ( *enqueue_fifo )( context, node ); + } else { + ( *set_affinity )( context, node, arg ); + } +} + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c index 644bf2f347..bd6ce82068 100644 --- a/cpukit/score/src/scheduleredfsmp.c +++ b/cpukit/score/src/scheduleredfsmp.c @@ -21,13 +21,13 @@ #include #include -static Scheduler_EDF_SMP_Context * +static inline Scheduler_EDF_SMP_Context * _Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler ) { return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler ); } -static Scheduler_EDF_SMP_Context * +static inline Scheduler_EDF_SMP_Context * _Scheduler_EDF_SMP_Get_self( Scheduler_Context *context ) { return (Scheduler_EDF_SMP_Context *) context; @@ -83,7 +83,8 @@ void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler ) _Scheduler_EDF_SMP_Get_context( scheduler ); _Scheduler_SMP_Initialize( &self->Base ); - _RBTree_Initialize_empty( &self->Ready ); + _Chain_Initialize_empty( &self->Affine_queues ); + /* The ready queues are zero initialized and thus empty */ } void _Scheduler_EDF_SMP_Node_initialize( @@ -99,7 +100,7 @@ void _Scheduler_EDF_SMP_Node_initialize( _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); } -static void _Scheduler_EDF_SMP_Do_update( +static inline void _Scheduler_EDF_SMP_Do_update( Scheduler_Context *context, Scheduler_Node *node, Priority_Control new_priority @@ -113,112 +114,318 @@ static void _Scheduler_EDF_SMP_Do_update( _Scheduler_SMP_Node_update_priority( smp_node, new_priority ); } -static bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context ) +static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context ) { Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - return !_RBTree_Is_empty( &self->Ready ); + return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue ); } -static Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready( +static inline bool _Scheduler_EDF_SMP_Overall_less( + const Scheduler_EDF_SMP_Node *left, + const Scheduler_EDF_SMP_Node *right +) +{ + Priority_Control lp; + Priority_Control rp; + + lp = left->Base.priority; + rp = right->Base.priority; + + return lp < rp || (lp == rp && left->generation < right->generation ); +} + +static inline Scheduler_EDF_SMP_Node * +_Scheduler_EDF_SMP_Challenge_highest_ready( + Scheduler_EDF_SMP_Context *self, + Scheduler_EDF_SMP_Node *highest_ready, + RBTree_Control *ready_queue +) +{ + Scheduler_EDF_SMP_Node *other; + + other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue ); + _Assert( other != NULL ); + + if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) { + return other; + } + + return highest_ready; +} + +static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready( Scheduler_Context *context, - Scheduler_Node *node + Scheduler_Node *filter ) { - Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - Scheduler_Node *first = (Scheduler_Node *) _RBTree_Minimum( &self->Ready ); + Scheduler_EDF_SMP_Context *self; + Scheduler_EDF_SMP_Node *highest_ready; + Scheduler_EDF_SMP_Node *node; + uint32_t rqi; + const Chain_Node *tail; + Chain_Node *next; + + self = _Scheduler_EDF_SMP_Get_self( context ); + highest_ready = (Scheduler_EDF_SMP_Node *) + _RBTree_Minimum( &self->Ready[ 0 ].Queue ); + _Assert( highest_ready != NULL ); + + /* + * The filter node is a scheduled node which is no longer on the scheduled + * chain. In case this is an affine thread, then we have to check the + * corresponding affine ready queue. + */ + + node = (Scheduler_EDF_SMP_Node *) filter; + rqi = node->ready_queue_index; + + if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) { + highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready( + self, + highest_ready, + &self->Ready[ rqi ].Queue + ); + } + + tail = _Chain_Immutable_tail( &self->Affine_queues ); + next = _Chain_First( &self->Affine_queues ); + + while ( next != tail ) { + Scheduler_EDF_SMP_Ready_queue *ready_queue; + + ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next; + highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready( + self, + highest_ready, + &ready_queue->Queue + ); + + next = _Chain_Next( next ); + } + + return &highest_ready->Base.Base; +} + +static inline void _Scheduler_EDF_SMP_Set_scheduled( + Scheduler_EDF_SMP_Context *self, + Scheduler_EDF_SMP_Node *scheduled, + const Per_CPU_Control *cpu +) +{ + self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled; +} + +static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled( + const Scheduler_EDF_SMP_Context *self, + uint32_t rqi +) +{ + return self->Ready[ rqi ].scheduled; +} + +static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled( + Scheduler_Context *context, + Scheduler_Node *filter_base, + Chain_Node_order order +) +{ + Scheduler_EDF_SMP_Node *filter; + uint32_t rqi; + + filter = _Scheduler_EDF_SMP_Node_downcast( filter_base ); + rqi = filter->ready_queue_index; - (void) node; + if ( rqi != 0 ) { + Scheduler_EDF_SMP_Context *self; + Scheduler_EDF_SMP_Node *node; - _Assert( &first->Node != NULL ); + self = _Scheduler_EDF_SMP_Get_self( context ); + node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi ); - return first; + if ( node->ready_queue_index > 0 ) { + _Assert( node->ready_queue_index == rqi ); + return &node->Base.Base; + } + } + + return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base, order ); } -static void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready( +static inline void _Scheduler_EDF_SMP_Insert_ready( Scheduler_Context *context, - Scheduler_Node *scheduled_to_ready + Scheduler_Node *node_base, + size_t generation_index, + int increment, + bool ( *less )( const void *, const RBTree_Node * ) ) { - Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - Scheduler_EDF_SMP_Node *node = - _Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready ); + Scheduler_EDF_SMP_Context *self; + Scheduler_EDF_SMP_Node *node; + uint32_t rqi; + Scheduler_EDF_SMP_Ready_queue *ready_queue; + int64_t generation; + + self = _Scheduler_EDF_SMP_Get_self( context ); + node = _Scheduler_EDF_SMP_Node_downcast( node_base ); + rqi = node->ready_queue_index; + ready_queue = &self->Ready[ rqi ]; + + generation = self->generations[ generation_index ]; + node->generation = generation; + self->generations[ generation_index ] = generation + increment; - _Chain_Extract_unprotected( &node->Base.Base.Node.Chain ); _RBTree_Initialize_node( &node->Base.Base.Node.RBTree ); _RBTree_Insert_inline( - &self->Ready, + &ready_queue->Queue, &node->Base.Base.Node.RBTree, &node->Base.priority, - _Scheduler_EDF_SMP_Less + less ); + + if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) { + Scheduler_EDF_SMP_Node *scheduled; + + scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi ); + + if ( scheduled->ready_queue_index == 0 ) { + _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node ); + } + } } -static void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled( +static inline void _Scheduler_EDF_SMP_Extract_from_ready( Scheduler_Context *context, - Scheduler_Node *ready_to_scheduled + Scheduler_Node *node_to_extract ) { - Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - Scheduler_EDF_SMP_Node *node = - _Scheduler_EDF_SMP_Node_downcast( ready_to_scheduled ); + Scheduler_EDF_SMP_Context *self; + Scheduler_EDF_SMP_Node *node; + uint32_t rqi; + Scheduler_EDF_SMP_Ready_queue *ready_queue; - _RBTree_Extract( &self->Ready, &node->Base.Base.Node.RBTree ); + self = _Scheduler_EDF_SMP_Get_self( context ); + node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract ); + rqi = node->ready_queue_index; + ready_queue = &self->Ready[ rqi ]; + + _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree ); _Chain_Initialize_node( &node->Base.Base.Node.Chain ); - _Chain_Insert_ordered_unprotected( - &self->Base.Scheduled, - &node->Base.Base.Node.Chain, - _Scheduler_SMP_Insert_priority_fifo_order + + if ( + rqi != 0 + && _RBTree_Is_empty( &ready_queue->Queue ) + && !_Chain_Is_node_off_chain( &ready_queue->Node ) + ) { + _Chain_Extract_unprotected( &ready_queue->Node ); + _Chain_Set_off_chain( &ready_queue->Node ); + } +} + +static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready( + Scheduler_Context *context, + Scheduler_Node *scheduled_to_ready +) +{ + _Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain ); + _Scheduler_EDF_SMP_Insert_ready( + context, + scheduled_to_ready, + 0, + 1, + _Scheduler_EDF_SMP_Less ); } -static void _Scheduler_EDF_SMP_Insert_ready_lifo( +static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled( Scheduler_Context *context, - Scheduler_Node *node_to_insert + Scheduler_Node *ready_to_scheduled ) { - Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - Scheduler_EDF_SMP_Node *node = - _Scheduler_EDF_SMP_Node_downcast( node_to_insert ); + _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled ); + _Scheduler_SMP_Insert_scheduled_fifo( context, ready_to_scheduled ); +} - _RBTree_Initialize_node( &node->Base.Base.Node.RBTree ); - _RBTree_Insert_inline( - &self->Ready, - &node->Base.Base.Node.RBTree, - &node->Base.priority, +static inline void _Scheduler_EDF_SMP_Insert_ready_lifo( + Scheduler_Context *context, + Scheduler_Node *node_to_insert +) +{ + _Scheduler_EDF_SMP_Insert_ready( + context, + node_to_insert, + 1, + -1, _Scheduler_EDF_SMP_Less_or_equal ); } -static void _Scheduler_EDF_SMP_Insert_ready_fifo( +static inline void _Scheduler_EDF_SMP_Insert_ready_fifo( Scheduler_Context *context, Scheduler_Node *node_to_insert ) { - Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - Scheduler_EDF_SMP_Node *node = - _Scheduler_EDF_SMP_Node_downcast( node_to_insert ); - - _RBTree_Initialize_node( &node->Base.Base.Node.RBTree ); - _RBTree_Insert_inline( - &self->Ready, - &node->Base.Base.Node.RBTree, - &node->Base.priority, + _Scheduler_EDF_SMP_Insert_ready( + context, + node_to_insert, + 0, + 1, _Scheduler_EDF_SMP_Less ); } -static void _Scheduler_EDF_SMP_Extract_from_ready( +static inline void _Scheduler_EDF_SMP_Allocate_processor( Scheduler_Context *context, - Scheduler_Node *node_to_extract + Scheduler_Node *scheduled_base, + Scheduler_Node *victim_base, + Per_CPU_Control *victim_cpu ) { - Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context ); - Scheduler_EDF_SMP_Node *node = - _Scheduler_EDF_SMP_Node_downcast( node_to_extract ); - - _RBTree_Extract( &self->Ready, &node->Base.Base.Node.RBTree ); - _Chain_Initialize_node( &node->Base.Base.Node.Chain ); + Scheduler_EDF_SMP_Context *self; + Scheduler_EDF_SMP_Node *scheduled; + uint32_t rqi; + + (void) victim_base; + self = _Scheduler_EDF_SMP_Get_self( context ); + scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base ); + rqi = scheduled->ready_queue_index; + + if ( rqi != 0 ) { + Scheduler_EDF_SMP_Ready_queue *ready_queue; + Per_CPU_Control *desired_cpu; + + ready_queue = &self->Ready[ rqi ]; + + if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) { + _Chain_Extract_unprotected( &ready_queue->Node ); + _Chain_Set_off_chain( &ready_queue->Node ); + } + + desired_cpu = _Per_CPU_Get_by_index( rqi - 1 ); + + if ( victim_cpu != desired_cpu ) { + Scheduler_EDF_SMP_Node *node; + + node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi ); + _Assert( node->ready_queue_index == 0 ); + _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu ); + _Scheduler_SMP_Allocate_processor_exact( + context, + &node->Base.Base, + NULL, + victim_cpu + ); + victim_cpu = desired_cpu; + } + } + + _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu ); + _Scheduler_SMP_Allocate_processor_exact( + context, + &scheduled->Base.Base, + NULL, + victim_cpu + ); } void _Scheduler_EDF_SMP_Block( @@ -236,11 +443,11 @@ void _Scheduler_EDF_SMP_Block( _Scheduler_EDF_SMP_Extract_from_ready, _Scheduler_EDF_SMP_Get_highest_ready, _Scheduler_EDF_SMP_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_EDF_SMP_Allocate_processor ); } -static bool _Scheduler_EDF_SMP_Enqueue_ordered( +static inline bool _Scheduler_EDF_SMP_Enqueue_ordered( Scheduler_Context *context, Scheduler_Node *node, Chain_Node_order order, @@ -255,12 +462,12 @@ static bool _Scheduler_EDF_SMP_Enqueue_ordered( insert_ready, insert_scheduled, _Scheduler_EDF_SMP_Move_from_scheduled_to_ready, - _Scheduler_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_EDF_SMP_Get_lowest_scheduled, + _Scheduler_EDF_SMP_Allocate_processor ); } -static bool _Scheduler_EDF_SMP_Enqueue_lifo( +static inline bool _Scheduler_EDF_SMP_Enqueue_lifo( Scheduler_Context *context, Scheduler_Node *node ) @@ -274,7 +481,7 @@ static bool _Scheduler_EDF_SMP_Enqueue_lifo( ); } -static bool _Scheduler_EDF_SMP_Enqueue_fifo( +static inline bool _Scheduler_EDF_SMP_Enqueue_fifo( Scheduler_Context *context, Scheduler_Node *node ) @@ -288,7 +495,7 @@ static bool _Scheduler_EDF_SMP_Enqueue_fifo( ); } -static bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered( +static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered( Scheduler_Context *context, Scheduler_Node *node, Chain_Node_order order, @@ -305,11 +512,11 @@ static bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered( insert_ready, insert_scheduled, _Scheduler_EDF_SMP_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_EDF_SMP_Allocate_processor ); } -static bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo( +static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo( Scheduler_Context *context, Scheduler_Node *node ) @@ -323,7 +530,7 @@ static bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo( ); } -static bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo( +static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo( Scheduler_Context *context, Scheduler_Node *node ) @@ -354,7 +561,7 @@ void _Scheduler_EDF_SMP_Unblock( ); } -static bool _Scheduler_EDF_SMP_Do_ask_for_help( +static inline bool _Scheduler_EDF_SMP_Do_ask_for_help( Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node @@ -368,8 +575,8 @@ static bool _Scheduler_EDF_SMP_Do_ask_for_help( _Scheduler_EDF_SMP_Insert_ready_lifo, _Scheduler_SMP_Insert_scheduled_lifo, _Scheduler_EDF_SMP_Move_from_scheduled_to_ready, - _Scheduler_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_EDF_SMP_Get_lowest_scheduled, + _Scheduler_EDF_SMP_Allocate_processor ); } @@ -439,10 +646,24 @@ void _Scheduler_EDF_SMP_Withdraw_node( _Scheduler_EDF_SMP_Extract_from_ready, _Scheduler_EDF_SMP_Get_highest_ready, _Scheduler_EDF_SMP_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_EDF_SMP_Allocate_processor ); } +static inline void _Scheduler_EDF_SMP_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +) +{ + Scheduler_EDF_SMP_Context *self; + Scheduler_EDF_SMP_Node *idle; + + self = _Scheduler_EDF_SMP_Get_self( context ); + idle = _Scheduler_EDF_SMP_Node_downcast( idle_base ); + _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu ); +} + void _Scheduler_EDF_SMP_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle @@ -454,7 +675,8 @@ void _Scheduler_EDF_SMP_Add_processor( context, idle, _Scheduler_EDF_SMP_Has_ready, - _Scheduler_EDF_SMP_Enqueue_scheduled_fifo + _Scheduler_EDF_SMP_Enqueue_scheduled_fifo, + _Scheduler_EDF_SMP_Register_idle ); } @@ -490,3 +712,78 @@ void _Scheduler_EDF_SMP_Yield( _Scheduler_EDF_SMP_Enqueue_scheduled_fifo ); } + +static inline void _Scheduler_EDF_SMP_Do_set_affinity( + Scheduler_Context *context, + Scheduler_Node *node_base, + void *arg +) +{ + Scheduler_EDF_SMP_Node *node; + const uint32_t *rqi; + + node = _Scheduler_EDF_SMP_Node_downcast( node_base ); + rqi = arg; + node->ready_queue_index = *rqi; +} + +void _Scheduler_EDF_SMP_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + Per_CPU_Control *cpu +) +{ + Scheduler_Context *context; + + context = _Scheduler_Get_context( scheduler ); + + _Scheduler_SMP_Do_start_idle( + context, + idle, + cpu, + _Scheduler_EDF_SMP_Register_idle + ); +} + +bool _Scheduler_EDF_SMP_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node, + const Processor_mask *affinity +) +{ + Scheduler_Context *context; + Processor_mask a; + Processor_mask b; + uint32_t rqi; + + context = _Scheduler_Get_context( scheduler ); + _Processor_mask_And( &a, &context->Processors, affinity ); + + if ( _Processor_mask_Count( &a ) == 0 ) { + return false; + } + + _Processor_mask_And( &b, &_SMP_Online_processors, affinity ); + + if ( _Processor_mask_Count( &b ) == _SMP_Processor_count ) { + rqi = 0; + } else { + rqi = _Processor_mask_Find_last_set( &a ); + } + + _Scheduler_SMP_Set_affinity( + context, + thread, + node, + &rqi, + _Scheduler_EDF_SMP_Do_set_affinity, + _Scheduler_EDF_SMP_Extract_from_ready, + _Scheduler_EDF_SMP_Get_highest_ready, + _Scheduler_EDF_SMP_Move_from_ready_to_scheduled, + _Scheduler_EDF_SMP_Enqueue_fifo, + _Scheduler_EDF_SMP_Allocate_processor + ); + + return true; +} diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c index 99938cdc56..c35883f142 100644 --- a/cpukit/score/src/schedulerpriorityaffinitysmp.c +++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c @@ -579,7 +579,8 @@ void _Scheduler_priority_affinity_SMP_Add_processor( context, idle, _Scheduler_priority_SMP_Has_ready, - _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo + _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo, + _Scheduler_SMP_Do_nothing_register_idle ); } diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c index 5548ebf8e8..071a4218f3 100644 --- a/cpukit/score/src/schedulerprioritysmp.c +++ b/cpukit/score/src/schedulerprioritysmp.c @@ -323,7 +323,8 @@ void _Scheduler_priority_SMP_Add_processor( context, idle, _Scheduler_priority_SMP_Has_ready, - _Scheduler_priority_SMP_Enqueue_scheduled_fifo + _Scheduler_priority_SMP_Enqueue_scheduled_fifo, + _Scheduler_SMP_Do_nothing_register_idle ); } diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c index 2884876381..4be43abff1 100644 --- a/cpukit/score/src/schedulersimplesmp.c +++ b/cpukit/score/src/schedulersimplesmp.c @@ -398,7 +398,8 @@ void _Scheduler_simple_SMP_Add_processor( context, idle, _Scheduler_simple_SMP_Has_ready, - _Scheduler_simple_SMP_Enqueue_scheduled_fifo + _Scheduler_simple_SMP_Enqueue_scheduled_fifo, + _Scheduler_SMP_Do_nothing_register_idle ); } diff --git a/cpukit/score/src/schedulersmpstartidle.c b/cpukit/score/src/schedulersmpstartidle.c index d396a159fe..bdb1b47876 100644 --- a/cpukit/score/src/schedulersmpstartidle.c +++ b/cpukit/score/src/schedulersmpstartidle.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016 embedded brains GmbH. + * Copyright (c) 2013, 2017 embedded brains GmbH. * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -18,18 +18,12 @@ void _Scheduler_SMP_Start_idle( Per_CPU_Control *cpu ) { - Scheduler_Context *context; - Scheduler_SMP_Context *self; - Scheduler_SMP_Node *node; + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - context = _Scheduler_Get_context( scheduler ); - self = _Scheduler_SMP_Get_self( context ); - node = _Scheduler_SMP_Thread_get_node( idle ); - - _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED ); - node->state = SCHEDULER_SMP_NODE_SCHEDULED; - - _Thread_Set_CPU( idle, cpu ); - _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain ); - _Scheduler_SMP_Release_idle_thread( &self->Base, idle ); + _Scheduler_SMP_Do_start_idle( + context, + idle, + cpu, + _Scheduler_SMP_Do_nothing_register_idle + ); } diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index f6313584f6..d5bfed74f4 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -457,7 +457,8 @@ void _Scheduler_strong_APA_Add_processor( context, idle, _Scheduler_strong_APA_Has_ready, - _Scheduler_strong_APA_Enqueue_scheduled_fifo + _Scheduler_strong_APA_Enqueue_scheduled_fifo, + _Scheduler_SMP_Do_nothing_register_idle ); } -- cgit v1.2.3