diff options
author | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2014-07-04 14:34:23 +0200 |
---|---|---|
committer | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2014-07-09 10:05:17 +0200 |
commit | 5c3d2509593476869e791111cd3d93cc1e840b3a (patch) | |
tree | 2f642fcce66748460f0f823fbeb6f292267b8cd0 /cpukit/score/src/schedulerpriorityaffinitysmp.c | |
parent | schedulerpriorityaffinitysmp.c: Add period at end of sentence (diff) | |
download | rtems-5c3d2509593476869e791111cd3d93cc1e840b3a.tar.bz2 |
score: Implement scheduler helping protocol
The following scheduler operations return a thread in need for help
- unblock,
- change priority, and
- yield.
A thread in need for help is a thread that encounters a scheduler state
change from scheduled to ready or a thread that cannot be scheduled in
an unblock operation. Such a thread can ask threads which depend on
resources owned by this thread for help.
Add a new ask for help scheduler operation. This operation is used by
_Scheduler_Ask_for_help() to help threads in need for help returned by
the operations mentioned above. This operation is also used by
_Scheduler_Thread_change_resource_root() in case the root of a resource
sub-tree changes. A use case is the ownership change of a resource.
In case it is not possible to schedule a thread in need for help, then
the corresponding scheduler node will be placed into the set of ready
scheduler nodes of the scheduler instance. Once a state change from
ready to scheduled happens for this scheduler node it may be used to
schedule the thread in need for help.
Diffstat (limited to 'cpukit/score/src/schedulerpriorityaffinitysmp.c')
-rw-r--r-- | cpukit/score/src/schedulerpriorityaffinitysmp.c | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c index 24fae3cfee..39a0a481ea 100644 --- a/cpukit/score/src/schedulerpriorityaffinitysmp.c +++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c @@ -60,6 +60,15 @@ static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order( && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next ); } +static Scheduler_priority_affinity_SMP_Node * +_Scheduler_priority_affinity_SMP_Thread_get_own_node( + Thread_Control *thread +) +{ + return (Scheduler_priority_affinity_SMP_Node *) + _Scheduler_Thread_get_own_node( thread ); +} + /* * This method returns the scheduler node for the specified thread * as a scheduler specific type. @@ -69,7 +78,8 @@ _Scheduler_priority_affinity_SMP_Thread_get_node( Thread_Control *thread ) { - return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Thread_get_node( thread ); + return (Scheduler_priority_affinity_SMP_Node *) + _Scheduler_Thread_get_node( thread ); } static Scheduler_priority_affinity_SMP_Node * @@ -90,7 +100,7 @@ void _Scheduler_priority_affinity_SMP_Node_initialize( ) { Scheduler_priority_affinity_SMP_Node *node = - _Scheduler_priority_affinity_SMP_Thread_get_node( thread ); + _Scheduler_priority_affinity_SMP_Thread_get_own_node( thread ); (void) scheduler; @@ -221,7 +231,8 @@ void _Scheduler_priority_affinity_SMP_Block( _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_affinity_SMP_Get_highest_ready, _Scheduler_priority_SMP_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_SMP_Allocate_processor_exact, + _Scheduler_priority_SMP_Get_idle_thread ); /* @@ -303,7 +314,8 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo( _Scheduler_SMP_Insert_scheduled_fifo, _Scheduler_priority_SMP_Move_from_scheduled_to_ready, _Scheduler_priority_affinity_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_SMP_Allocate_processor_exact, + _Scheduler_priority_SMP_Release_idle_thread ); } @@ -387,7 +399,8 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Unblock( needs_help = _Scheduler_SMP_Unblock( context, thread, - _Scheduler_priority_affinity_SMP_Enqueue_fifo + _Scheduler_priority_affinity_SMP_Enqueue_fifo, + _Scheduler_priority_SMP_Release_idle_thread ); /* @@ -420,7 +433,8 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered( insert_scheduled, _Scheduler_priority_SMP_Move_from_scheduled_to_ready, _Scheduler_priority_affinity_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_SMP_Allocate_processor_exact, + _Scheduler_priority_SMP_Release_idle_thread ); } @@ -463,11 +477,14 @@ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered( context, node, order, + _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_affinity_SMP_Get_highest_ready, insert_ready, insert_scheduled, _Scheduler_priority_SMP_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_SMP_Allocate_processor_exact, + _Scheduler_priority_SMP_Get_idle_thread, + _Scheduler_priority_SMP_Release_idle_thread ); } @@ -543,6 +560,27 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority( return displaced; } +Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *offers_help, + Thread_Control *needs_help +) +{ + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + + needs_help = _Scheduler_SMP_Ask_for_help( + context, + offers_help, + needs_help, + _Scheduler_priority_affinity_SMP_Enqueue_fifo, + _Scheduler_priority_SMP_Release_idle_thread + ); + + _Scheduler_priority_affinity_SMP_Check_for_migrations( context ); + + return needs_help; +} + /* * This is the public scheduler specific Change Priority operation. */ |