summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score/schedulersmpimpl.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-05-10 21:30:26 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-05-11 08:58:40 +0200
commitbe0366bb62ed4a804725a484ffd73242cd4f1d7b (patch)
tree4267cd07e8e1134284ec145280f8197480aa55b7 /cpukit/score/include/rtems/score/schedulersmpimpl.h
parentmrm332-testsuite.tcfg: Add fileio (diff)
downloadrtems-be0366bb62ed4a804725a484ffd73242cd4f1d7b.tar.bz2
score: Fix scheduler helping protocol
Account for priority changes of threads executing in a foreign partition. Exchange idle threads in case a victim node uses an idle thread and the new scheduled node needs an idle thread.
Diffstat (limited to 'cpukit/score/include/rtems/score/schedulersmpimpl.h')
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h218
1 files changed, 141 insertions, 77 deletions
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index e41c7372c0..a395f2c0ba 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -7,7 +7,7 @@
*/
/*
- * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -532,41 +532,76 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
Scheduler_SMP_Allocate_processor allocate_processor
)
{
- Thread_Control *user = _Scheduler_Node_get_user( node );
- Thread_Control *lowest_scheduled_user =
- _Scheduler_Node_get_user( lowest_scheduled );
Thread_Control *needs_help;
- Thread_Control *idle;
+ Scheduler_Try_to_schedule_action action;
- _Scheduler_SMP_Node_change_state(
- _Scheduler_SMP_Node_downcast( lowest_scheduled ),
- SCHEDULER_SMP_NODE_READY
- );
- _Scheduler_Thread_change_state(
- lowest_scheduled_user,
- THREAD_SCHEDULER_READY
- );
-
- _Scheduler_Thread_set_node( user, node );
-
- _Scheduler_SMP_Allocate_processor(
+ action = _Scheduler_Try_to_schedule_node(
context,
node,
- lowest_scheduled,
- allocate_processor
+ _Scheduler_Node_get_idle( lowest_scheduled ),
+ _Scheduler_SMP_Get_idle_thread
);
- ( *insert_scheduled )( context, node );
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ Thread_Control *lowest_scheduled_user =
+ _Scheduler_Node_get_user( lowest_scheduled );
+ Thread_Control *idle;
- idle = _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
- if ( idle == NULL ) {
- needs_help = lowest_scheduled_user;
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( lowest_scheduled ),
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_Thread_change_state(
+ lowest_scheduled_user,
+ THREAD_SCHEDULER_READY
+ );
+
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ node,
+ lowest_scheduled,
+ allocate_processor
+ );
+
+ ( *insert_scheduled )( context, node );
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ idle = _Scheduler_Release_idle_thread(
+ context,
+ lowest_scheduled,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ if ( idle == NULL ) {
+ needs_help = lowest_scheduled_user;
+ } else {
+ needs_help = NULL;
+ }
+ } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( lowest_scheduled ),
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( node ),
+ SCHEDULER_SMP_NODE_SCHEDULED
+ );
+
+ ( *insert_scheduled )( context, node );
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ _Scheduler_Exchange_idle_thread(
+ node,
+ lowest_scheduled,
+ _Scheduler_Node_get_idle( lowest_scheduled )
+ );
+
+ needs_help = NULL;
} else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( node ),
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
needs_help = NULL;
}
@@ -660,7 +695,7 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
{
Thread_Control *needs_help;
- while ( true ) {
+ do {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
/*
@@ -671,55 +706,80 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
( *insert_scheduled )( context, node );
needs_help = NULL;
+ } else {
+ Scheduler_Try_to_schedule_action action;
- break;
- } else if (
- _Scheduler_Try_to_schedule_node(
+ action = _Scheduler_Try_to_schedule_node(
context,
highest_ready,
+ _Scheduler_Node_get_idle( node ),
_Scheduler_SMP_Get_idle_thread
- )
- ) {
- Thread_Control *user = _Scheduler_Node_get_user( node );
- Thread_Control *idle;
-
- _Scheduler_SMP_Node_change_state(
- _Scheduler_SMP_Node_downcast( node ),
- SCHEDULER_SMP_NODE_READY
);
- _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
- _Scheduler_SMP_Allocate_processor(
- context,
- highest_ready,
- node,
- allocate_processor
- );
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ Thread_Control *user = _Scheduler_Node_get_user( node );
+ Thread_Control *idle;
- ( *insert_ready )( context, node );
- ( *move_from_ready_to_scheduled )( context, highest_ready );
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( node ),
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
+
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ highest_ready,
+ node,
+ allocate_processor
+ );
+
+ ( *insert_ready )( context, node );
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+ idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ _Scheduler_SMP_Release_idle_thread
+ );
+ if ( idle == NULL ) {
+ needs_help = user;
+ } else {
+ needs_help = NULL;
+ }
+ } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( node ),
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( highest_ready ),
+ SCHEDULER_SMP_NODE_SCHEDULED
+ );
+
+ ( *insert_ready )( context, node );
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+ _Scheduler_Exchange_idle_thread(
+ highest_ready,
+ node,
+ _Scheduler_Node_get_idle( node )
+ );
- idle = _Scheduler_Release_idle_thread(
- context,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
- if ( idle == NULL ) {
- needs_help = user;
- } else {
needs_help = NULL;
- }
+ } else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
- break;
- } else {
- _Scheduler_SMP_Node_change_state(
- _Scheduler_SMP_Node_downcast( highest_ready ),
- SCHEDULER_SMP_NODE_BLOCKED
- );
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( highest_ready ),
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
- ( *extract_from_ready )( context, highest_ready );
+ ( *extract_from_ready )( context, highest_ready );
+
+ continue;
+ }
}
- }
+ } while ( false );
return needs_help;
}
@@ -740,16 +800,18 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
Scheduler_SMP_Allocate_processor allocate_processor
)
{
- while ( true ) {
+ do {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+ Scheduler_Try_to_schedule_action action;
- if (
- _Scheduler_Try_to_schedule_node(
- context,
- highest_ready,
- _Scheduler_SMP_Get_idle_thread
- )
- ) {
+ action = _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ NULL,
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
_Scheduler_SMP_Allocate_processor(
context,
highest_ready,
@@ -758,17 +820,19 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
);
( *move_from_ready_to_scheduled )( context, highest_ready );
-
- break;
} else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
_Scheduler_SMP_Node_change_state(
_Scheduler_SMP_Node_downcast( highest_ready ),
SCHEDULER_SMP_NODE_BLOCKED
);
( *extract_from_ready )( context, highest_ready );
+
+ continue;
}
- }
+ } while ( false );
}
/**