diff options
author | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2016-09-27 11:33:36 +0200 |
---|---|---|
committer | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2016-11-02 10:05:43 +0100 |
commit | 351c14dfd00e1bdaced2823242532cab4bccb58c (patch) | |
tree | cbf9f2c23f45a470819094626cbb146896931769 /cpukit/score/include/rtems/score | |
parent | score: Add _Thread_Scheduler_process_requests() (diff) | |
download | rtems-351c14dfd00e1bdaced2823242532cab4bccb58c.tar.bz2 |
score: Add new SMP scheduler helping protocol
Update #2556.
Diffstat (limited to '')
-rw-r--r-- | cpukit/score/include/rtems/score/percpu.h | 8 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/scheduler.h | 92 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/schedulerimpl.h | 158 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h | 22 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/schedulerprioritysmp.h | 24 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/schedulersimplesmp.h | 24 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/schedulersmpimpl.h | 173 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/schedulerstrongapa.h | 22 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/thread.h | 15 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/threadimpl.h | 24 |
10 files changed, 518 insertions, 44 deletions
diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h index cb28cd98c4..3e0c348005 100644 --- a/cpukit/score/include/rtems/score/percpu.h +++ b/cpukit/score/include/rtems/score/percpu.h @@ -23,6 +23,7 @@ #include <rtems/asm.h> #else #include <rtems/score/assert.h> + #include <rtems/score/chain.h> #include <rtems/score/isrlock.h> #include <rtems/score/smp.h> #include <rtems/score/smplock.h> @@ -395,6 +396,13 @@ typedef struct Per_CPU_Control { #endif /** + * @brief Chain of threads in need for help. + * + * This field is protected by the Per_CPU_Control::Lock lock. + */ + Chain_Control Threads_in_need_for_help; + + /** * @brief Bit field for SMP messages. * * This bit field is not protected locks. Atomic operations are used to diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h index ad04e7fa6e..d13277ab53 100644 --- a/cpukit/score/include/rtems/score/scheduler.h +++ b/cpukit/score/include/rtems/score/scheduler.h @@ -104,6 +104,52 @@ typedef struct { #if defined(RTEMS_SMP) /** + * @brief Ask for help operation. + * + * @param[in] scheduler The scheduler instance to ask for help. + * @param[in] the_thread The thread needing help. + * @param[in] node The scheduler node. + * + * @retval true Ask for help was successful. + * @retval false Otherwise. + */ + bool ( *ask_for_help )( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node + ); + + /** + * @brief Reconsider help operation. + * + * @param[in] scheduler The scheduler instance to reconsider the help + * request. + * @param[in] the_thread The thread reconsidering a help request. + * @param[in] node The scheduler node. + */ + void ( *reconsider_help_request )( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node + ); + + /** + * @brief Withdraw node operation. + * + * @param[in] scheduler The scheduler instance to withdraw the node. + * @param[in] the_thread The thread using the node. + * @param[in] node The scheduler node to withdraw. + * @param[in] next_state The next thread scheduler state in case the node is + * scheduled. + */ + void ( *withdraw_node )( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state + ); + + /** * Ask for help operation. * * @param[in] scheduler The scheduler of the thread offering help. @@ -322,6 +368,49 @@ Priority_Control _Scheduler_default_Map_priority( * @brief Does nothing. * * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + * @param[in] node Unused. + * + * @retval false Always. + */ + bool _Scheduler_default_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node + ); + + /** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + * @param[in] node Unused. + */ + void _Scheduler_default_Reconsider_help_request( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node + ); + + /** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + * @param[in] node Unused. + * @param[in] next_state Unused. + */ + void _Scheduler_default_Withdraw_node( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state + ); + + /** + * @brief Does nothing. + * + * @param[in] scheduler Unused. * @param[in] offers_help Unused. * @param[in] needs_help Unused. * @@ -334,6 +423,9 @@ Priority_Control _Scheduler_default_Map_priority( ); #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \ + _Scheduler_default_Ask_for_help, \ + _Scheduler_default_Reconsider_help_request, \ + _Scheduler_default_Withdraw_node, \ _Scheduler_default_Ask_for_help_X, #else #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h index e33e8d7c97..dbb17a8392 100644 --- a/cpukit/score/include/rtems/score/schedulerimpl.h +++ b/cpukit/score/include/rtems/score/schedulerimpl.h @@ -325,19 +325,54 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread ) */ RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread ) { +#if defined(RTEMS_SMP) + Chain_Node *node; + const Chain_Node *tail; + Scheduler_Node *scheduler_node; const Scheduler_Control *scheduler; ISR_lock_Context lock_context; - scheduler = _Scheduler_Get( the_thread ); + node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes ); + tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes ); + + scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); + scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); + _Scheduler_Acquire_critical( scheduler, &lock_context ); + ( *scheduler->Operations.block )( + scheduler, + the_thread, + scheduler_node + ); + _Scheduler_Release_critical( scheduler, &lock_context ); + + node = _Chain_Next( node ); + + while ( node != tail ) { + scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); + scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); + _Scheduler_Acquire_critical( scheduler, &lock_context ); + ( *scheduler->Operations.withdraw_node )( + scheduler, + the_thread, + scheduler_node, + THREAD_SCHEDULER_BLOCKED + ); + _Scheduler_Release_critical( scheduler, &lock_context ); + + node = _Chain_Next( node ); + } +#else + const Scheduler_Control *scheduler; + + scheduler = _Scheduler_Get( the_thread ); ( *scheduler->Operations.block )( scheduler, the_thread, _Thread_Scheduler_get_home_node( the_thread ) ); - - _Scheduler_Release_critical( scheduler, &lock_context ); +#endif } /** @@ -352,33 +387,65 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread ) */ RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread ) { +#if defined(RTEMS_SMP) + Chain_Node *node; + const Chain_Node *tail; + Scheduler_Node *scheduler_node; const Scheduler_Control *scheduler; ISR_lock_Context lock_context; -#if defined(RTEMS_SMP) Thread_Control *needs_help; -#endif -#if defined(RTEMS_SMP) - _Thread_Scheduler_process_requests( the_thread ); -#endif + node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes ); + tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes ); + + scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); + scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); - scheduler = _Scheduler_Get( the_thread ); _Scheduler_Acquire_critical( scheduler, &lock_context ); + needs_help = ( *scheduler->Operations.unblock )( + scheduler, + the_thread, + scheduler_node + ); + _Scheduler_Ask_for_help_if_necessary( needs_help ); + _Scheduler_Release_critical( scheduler, &lock_context ); -#if defined(RTEMS_SMP) - needs_help = -#endif + if ( needs_help != the_thread ) { + return; + } + + node = _Chain_Next( node ); + + while ( node != tail ) { + bool success; + + scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); + scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); + + _Scheduler_Acquire_critical( scheduler, &lock_context ); + success = ( *scheduler->Operations.ask_for_help )( + scheduler, + the_thread, + scheduler_node + ); + _Scheduler_Release_critical( scheduler, &lock_context ); + + if ( success ) { + break; + } + + node = _Chain_Next( node ); + } +#else + const Scheduler_Control *scheduler; + + scheduler = _Scheduler_Get( the_thread ); ( *scheduler->Operations.unblock )( scheduler, the_thread, _Thread_Scheduler_get_home_node( the_thread ) ); - -#if defined(RTEMS_SMP) - _Scheduler_Ask_for_help_if_necessary( needs_help ); #endif - - _Scheduler_Release_critical( scheduler, &lock_context ); } /** @@ -397,33 +464,45 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread ) */ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread ) { - const Scheduler_Control *own_scheduler; - ISR_lock_Context lock_context; #if defined(RTEMS_SMP) - Thread_Control *needs_help; -#endif + Chain_Node *node; + const Chain_Node *tail; -#if defined(RTEMS_SMP) _Thread_Scheduler_process_requests( the_thread ); -#endif - own_scheduler = _Scheduler_Get_own( the_thread ); - _Scheduler_Acquire_critical( own_scheduler, &lock_context ); + node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes ); + tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes ); -#if defined(RTEMS_SMP) - needs_help = -#endif - ( *own_scheduler->Operations.update_priority )( - own_scheduler, + do { + Scheduler_Node *scheduler_node; + const Scheduler_Control *scheduler; + ISR_lock_Context lock_context; + Thread_Control *needs_help; + + scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); + scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); + + _Scheduler_Acquire_critical( scheduler, &lock_context ); + needs_help = ( *scheduler->Operations.update_priority )( + scheduler, + the_thread, + scheduler_node + ); + _Scheduler_Ask_for_help_if_necessary( needs_help ); + _Scheduler_Release_critical( scheduler, &lock_context ); + + node = _Chain_Next( node ); + } while ( node != tail ); +#else + const Scheduler_Control *scheduler; + + scheduler = _Scheduler_Get( the_thread ); + ( *scheduler->Operations.update_priority )( + scheduler, the_thread, _Thread_Scheduler_get_home_node( the_thread ) ); - -#if defined(RTEMS_SMP) - _Scheduler_Ask_for_help_if_necessary( needs_help ); #endif - - _Scheduler_Release_critical( own_scheduler, &lock_context ); } /** @@ -1008,7 +1087,13 @@ _Scheduler_Try_to_schedule_node( _Thread_Scheduler_acquire_critical( user, &lock_context ); if ( node->help_state == SCHEDULER_HELP_YOURSELF ) { - _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED ); + if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) ); + _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED ); + } else { + action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK; + } + _Thread_Scheduler_release_critical( user, &lock_context ); return action; } @@ -1129,6 +1214,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node( _Thread_Scheduler_acquire_critical( thread, &lock_context ); thread_cpu = _Thread_Get_CPU( thread ); + _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu ); _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED ); _Thread_Scheduler_release_critical( thread, &lock_context ); diff --git a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h index 359c369dd0..ab83435edf 100644 --- a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h +++ b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h @@ -57,6 +57,9 @@ extern "C" { _Scheduler_priority_affinity_SMP_Update_priority, \ _Scheduler_default_Map_priority, \ _Scheduler_default_Unmap_priority, \ + _Scheduler_priority_affinity_SMP_Ask_for_help, \ + _Scheduler_priority_affinity_SMP_Reconsider_help_request, \ + _Scheduler_priority_affinity_SMP_Withdraw_node, \ _Scheduler_priority_affinity_SMP_Ask_for_help_X, \ _Scheduler_priority_affinity_SMP_Node_initialize, \ _Scheduler_default_Node_destroy, \ @@ -128,6 +131,25 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Update_priority( Scheduler_Node *node ); +bool _Scheduler_priority_affinity_SMP_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_priority_affinity_SMP_Reconsider_help_request( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_priority_affinity_SMP_Withdraw_node( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state +); + Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help_X( const Scheduler_Control *scheduler, Thread_Control *offers_help, diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmp.h b/cpukit/score/include/rtems/score/schedulerprioritysmp.h index 9042243c09..4b3e577578 100644 --- a/cpukit/score/include/rtems/score/schedulerprioritysmp.h +++ b/cpukit/score/include/rtems/score/schedulerprioritysmp.h @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -86,6 +86,9 @@ typedef struct { _Scheduler_priority_SMP_Update_priority, \ _Scheduler_default_Map_priority, \ _Scheduler_default_Unmap_priority, \ + _Scheduler_priority_SMP_Ask_for_help, \ + _Scheduler_priority_SMP_Reconsider_help_request, \ + _Scheduler_priority_SMP_Withdraw_node, \ _Scheduler_priority_SMP_Ask_for_help_X, \ _Scheduler_priority_SMP_Node_initialize, \ _Scheduler_default_Node_destroy, \ @@ -123,6 +126,25 @@ Thread_Control *_Scheduler_priority_SMP_Update_priority( Scheduler_Node *node ); +bool _Scheduler_priority_SMP_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_priority_SMP_Reconsider_help_request( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_priority_SMP_Withdraw_node( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state +); + Thread_Control *_Scheduler_priority_SMP_Ask_for_help_X( const Scheduler_Control *scheduler, Thread_Control *needs_help, diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h index 2275237447..2afe10b1d1 100644 --- a/cpukit/score/include/rtems/score/schedulersimplesmp.h +++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h @@ -9,7 +9,7 @@ /* * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). * - * Copyright (c) 2013 embedded brains GmbH. + * Copyright (c) 2013, 2016 embedded brains GmbH. * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -69,6 +69,9 @@ typedef struct { _Scheduler_simple_SMP_Update_priority, \ _Scheduler_default_Map_priority, \ _Scheduler_default_Unmap_priority, \ + _Scheduler_simple_SMP_Ask_for_help, \ + _Scheduler_simple_SMP_Reconsider_help_request, \ + _Scheduler_simple_SMP_Withdraw_node, \ _Scheduler_simple_SMP_Ask_for_help_X, \ _Scheduler_simple_SMP_Node_initialize, \ _Scheduler_default_Node_destroy, \ @@ -106,6 +109,25 @@ Thread_Control *_Scheduler_simple_SMP_Update_priority( Scheduler_Node *node ); +bool _Scheduler_simple_SMP_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_simple_SMP_Reconsider_help_request( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_simple_SMP_Withdraw_node( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state +); + Thread_Control *_Scheduler_simple_SMP_Ask_for_help_X( const Scheduler_Control *scheduler, Thread_Control *offers_help, diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h index 6b1ccc30a4..8f9bf2f363 100644 --- a/cpukit/score/include/rtems/score/schedulersmpimpl.h +++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -27,6 +27,7 @@ #include <rtems/score/assert.h> #include <rtems/score/chainimpl.h> #include <rtems/score/schedulersimpleimpl.h> +#include <rtems/bspIo.h> #ifdef __cplusplus extern "C" { @@ -300,6 +301,12 @@ typedef void ( *Scheduler_SMP_Move )( Scheduler_Node *node_to_move ); +typedef bool ( *Scheduler_SMP_Ask_for_help )( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_Node *node +); + typedef void ( *Scheduler_SMP_Update )( Scheduler_Context *context, Scheduler_Node *node_to_update, @@ -559,8 +566,22 @@ static inline Thread_Control *_Scheduler_SMP_Preempt( _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY ); _Thread_Scheduler_acquire_critical( victim_thread, &lock_context ); + victim_cpu = _Thread_Get_CPU( victim_thread ); - _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY ); + + if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) { + _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY ); + + if ( victim_thread->Scheduler.helping_nodes > 0 ) { + _Per_CPU_Acquire( victim_cpu ); + _Chain_Append_unprotected( + &victim_cpu->Threads_in_need_for_help, + &victim_thread->Scheduler.Help_node + ); + _Per_CPU_Release( victim_cpu ); + } + } + _Thread_Scheduler_release_critical( victim_thread, &lock_context ); _Scheduler_SMP_Allocate_processor( @@ -589,6 +610,9 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled( (void) order; _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) ); + _Assert( + _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled ) + ); return lowest_scheduled; } @@ -901,7 +925,6 @@ static inline void _Scheduler_SMP_Block( Per_CPU_Control *thread_cpu; node_state = _Scheduler_SMP_Node_state( node ); - _Assert( node_state != SCHEDULER_SMP_NODE_BLOCKED ); thread_cpu = _Scheduler_Block_node( context, @@ -910,6 +933,7 @@ static inline void _Scheduler_SMP_Block( node_state == SCHEDULER_SMP_NODE_SCHEDULED, _Scheduler_SMP_Get_idle_thread ); + if ( thread_cpu != NULL ) { _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); @@ -924,7 +948,7 @@ static inline void _Scheduler_SMP_Block( move_from_ready_to_scheduled, allocate_processor ); - } else { + } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { ( *extract_from_ready )( context, node ); } } @@ -996,7 +1020,8 @@ static inline Thread_Control *_Scheduler_SMP_Update_priority( Scheduler_SMP_Enqueue enqueue_fifo, Scheduler_SMP_Enqueue enqueue_lifo, Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo, - Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo + Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo, + Scheduler_SMP_Ask_for_help ask_for_help ) { Thread_Control *needs_help; @@ -1007,7 +1032,10 @@ static inline Thread_Control *_Scheduler_SMP_Update_priority( new_priority = _Scheduler_Node_get_priority( node, &prepend_it ); if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) { - /* Nothing to do */ + if ( _Thread_Is_ready( thread ) ) { + ( *ask_for_help )( context, thread, node ); + } + return NULL; } @@ -1036,6 +1064,10 @@ static inline Thread_Control *_Scheduler_SMP_Update_priority( } else { ( *update )( context, node, new_priority ); + if ( _Thread_Is_ready( thread ) ) { + ( *ask_for_help )( context, thread, node ); + } + needs_help = NULL; } @@ -1149,6 +1181,135 @@ static inline void _Scheduler_SMP_Insert_scheduled_fifo( ); } +static inline bool _Scheduler_SMP_Ask_for_help( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_Node *node, + Chain_Node_order order, + Scheduler_SMP_Insert insert_ready, + Scheduler_SMP_Insert insert_scheduled, + Scheduler_SMP_Move move_from_scheduled_to_ready, + Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Scheduler_Node *lowest_scheduled; + ISR_lock_Context lock_context; + bool success; + + lowest_scheduled = ( *get_lowest_scheduled )( context, node, order ); + + _Thread_Scheduler_acquire_critical( thread, &lock_context ); + + if ( + thread->Scheduler.state == THREAD_SCHEDULER_READY + && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_BLOCKED + ) { + if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { + _Thread_Scheduler_cancel_need_for_help( + thread, + _Thread_Get_CPU( thread ) + ); + _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED ); + _Thread_Scheduler_release_critical( thread, &lock_context ); + + _Scheduler_SMP_Preempt( + context, + node, + lowest_scheduled, + allocate_processor + ); + + ( *insert_scheduled )( context, node ); + ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); + + _Scheduler_Release_idle_thread( + context, + lowest_scheduled, + _Scheduler_SMP_Release_idle_thread + ); + success = true; + } else { + _Thread_Scheduler_release_critical( thread, &lock_context ); + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); + ( *insert_ready )( context, node ); + success = false; + } + } else { + _Thread_Scheduler_release_critical( thread, &lock_context ); + success = false; + } + + return success; +} + +static inline void _Scheduler_SMP_Reconsider_help_request( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_Node *node, + Scheduler_SMP_Extract extract_from_ready +) +{ + ISR_lock_Context lock_context; + + _Thread_Scheduler_acquire_critical( thread, &lock_context ); + + if ( + thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED + && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY + ) { + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); + ( *extract_from_ready )( context, node ); + } + + _Thread_Scheduler_release_critical( thread, &lock_context ); +} + +static inline void _Scheduler_SMP_Withdraw_node( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Get_highest_ready get_highest_ready, + Scheduler_SMP_Move move_from_ready_to_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + ISR_lock_Context lock_context; + Scheduler_SMP_Node_state node_state; + + _Thread_Scheduler_acquire_critical( thread, &lock_context ); + + node_state = _Scheduler_SMP_Node_state( node ); + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); + + if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { + Per_CPU_Control *thread_cpu; + + thread_cpu = _Thread_Get_CPU( thread ); + _Scheduler_Thread_change_state( thread, next_state ); + _Thread_Scheduler_release_critical( thread, &lock_context ); + + _Scheduler_SMP_Extract_from_scheduled( node ); + _Scheduler_SMP_Schedule_highest_ready( + context, + node, + thread_cpu, + extract_from_ready, + get_highest_ready, + move_from_ready_to_scheduled, + allocate_processor + ); + } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { + _Thread_Scheduler_release_critical( thread, &lock_context ); + ( *extract_from_ready )( context, node ); + } else { + _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED ); + _Thread_Scheduler_release_critical( thread, &lock_context ); + } +} + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/include/rtems/score/schedulerstrongapa.h b/cpukit/score/include/rtems/score/schedulerstrongapa.h index fd6d6ec247..b8a5f2f22e 100644 --- a/cpukit/score/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/score/include/rtems/score/schedulerstrongapa.h @@ -86,6 +86,9 @@ typedef struct { _Scheduler_strong_APA_Update_priority, \ _Scheduler_default_Map_priority, \ _Scheduler_default_Unmap_priority, \ + _Scheduler_strong_APA_Ask_for_help, \ + _Scheduler_strong_APA_Reconsider_help_request, \ + _Scheduler_strong_APA_Withdraw_node, \ _Scheduler_strong_APA_Ask_for_help_X, \ _Scheduler_strong_APA_Node_initialize, \ _Scheduler_default_Node_destroy, \ @@ -123,6 +126,25 @@ Thread_Control *_Scheduler_strong_APA_Update_priority( Scheduler_Node *node ); +bool _Scheduler_strong_APA_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_strong_APA_Reconsider_help_request( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_strong_APA_Withdraw_node( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state +); + Thread_Control *_Scheduler_strong_APA_Ask_for_help_X( const Scheduler_Control *scheduler, Thread_Control *needs_help, diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h index 236eaed064..304f90439c 100644 --- a/cpukit/score/include/rtems/score/thread.h +++ b/cpukit/score/include/rtems/score/thread.h @@ -310,6 +310,21 @@ typedef struct { Chain_Control Scheduler_nodes; /** + * @brief Node for the Per_CPU_Control::Threads_in_need_for_help chain. + * + * This chain is protected by the Per_CPU_Control::Lock lock of the assigned + * processor. + */ + Chain_Node Help_node; + + /** + * @brief Count of nodes scheduler nodes minus one. + * + * This chain is protected by the thread state lock. + */ + size_t helping_nodes; + + /** * @brief List of pending scheduler node requests. * * This list is protected by the thread scheduler lock. diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h index 957fd55305..19fad0ba18 100644 --- a/cpukit/score/include/rtems/score/threadimpl.h +++ b/cpukit/score/include/rtems/score/threadimpl.h @@ -81,6 +81,9 @@ extern Thread_Control *_Thread_Allocated_fp; #if defined(RTEMS_SMP) #define THREAD_RESOURCE_NODE_TO_THREAD( node ) \ RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node ) + +#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \ + RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node ) #endif typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg ); @@ -993,6 +996,23 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources( return owns_resources; } +#if defined(RTEMS_SMP) +RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help( + Thread_Control *the_thread, + Per_CPU_Control *cpu +) +{ + _Per_CPU_Acquire( cpu ); + + if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) { + _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node ); + _Chain_Set_off_chain( &the_thread->Scheduler.Help_node ); + } + + _Per_CPU_Release( cpu ); +} +#endif + RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node( const Thread_Control *the_thread ) @@ -1051,7 +1071,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical( _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context ); } +#if defined(RTEMS_SMP) +void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread ); + void _Thread_Scheduler_process_requests( Thread_Control *the_thread ); +#endif RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request( Thread_Control *the_thread, |