summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-07-04 14:34:23 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-07-09 10:05:17 +0200
commit5c3d2509593476869e791111cd3d93cc1e840b3a (patch)
tree2f642fcce66748460f0f823fbeb6f292267b8cd0
parentschedulerpriorityaffinitysmp.c: Add period at end of sentence (diff)
downloadrtems-5c3d2509593476869e791111cd3d93cc1e840b3a.tar.bz2
score: Implement scheduler helping protocol
The following scheduler operations return a thread in need for help - unblock, - change priority, and - yield. A thread in need for help is a thread that encounters a scheduler state change from scheduled to ready or a thread that cannot be scheduled in an unblock operation. Such a thread can ask threads which depend on resources owned by this thread for help. Add a new ask for help scheduler operation. This operation is used by _Scheduler_Ask_for_help() to help threads in need for help returned by the operations mentioned above. This operation is also used by _Scheduler_Thread_change_resource_root() in case the root of a resource sub-tree changes. A use case is the ownership change of a resource. In case it is not possible to schedule a thread in need for help, then the corresponding scheduler node will be placed into the set of ready scheduler nodes of the scheduler instance. Once a state change from ready to scheduled happens for this scheduler node it may be used to schedule the thread in need for help.
-rw-r--r--cpukit/score/Makefile.am4
-rw-r--r--cpukit/score/include/rtems/score/mrspimpl.h30
-rw-r--r--cpukit/score/include/rtems/score/scheduler.h47
-rw-r--r--cpukit/score/include/rtems/score/schedulercbs.h1
-rw-r--r--cpukit/score/include/rtems/score/scheduleredf.h1
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h586
-rw-r--r--cpukit/score/include/rtems/score/schedulerpriority.h1
-rw-r--r--cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h7
-rw-r--r--cpukit/score/include/rtems/score/schedulerprioritysmp.h7
-rw-r--r--cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h22
-rw-r--r--cpukit/score/include/rtems/score/schedulersimple.h1
-rw-r--r--cpukit/score/include/rtems/score/schedulersimplesmp.h7
-rw-r--r--cpukit/score/include/rtems/score/schedulersmp.h5
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h369
-rw-r--r--cpukit/score/include/rtems/score/thread.h62
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h10
-rw-r--r--cpukit/score/src/schedulercbsnodeinit.c2
-rw-r--r--cpukit/score/src/schedulerchangeroot.c85
-rw-r--r--cpukit/score/src/schedulerdefaultaskforhelp.c26
-rw-r--r--cpukit/score/src/schedulerdefaultnodeinit.c7
-rw-r--r--cpukit/score/src/scheduleredfnodeinit.c2
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c52
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c33
-rw-r--r--cpukit/score/src/schedulersimplesmp.c55
-rw-r--r--cpukit/score/src/schedulersmpdebug.c (renamed from cpukit/score/src/schedulersmpvalidstatechanges.c)22
-rw-r--r--cpukit/score/src/schedulersmpstartidle.c1
-rw-r--r--cpukit/score/src/threadinitialize.c3
-rw-r--r--doc/user/smp.t78
28 files changed, 1399 insertions, 127 deletions
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 6caefb5f10..e4c373c836 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -133,13 +133,15 @@ endif
if HAS_SMP
libscore_a_SOURCES += src/percpustatewait.c
libscore_a_SOURCES += src/profilingsmplock.c
-libscore_a_SOURCES += src/schedulersmpvalidstatechanges.c
+libscore_a_SOURCES += src/schedulerchangeroot.c
libscore_a_SOURCES += src/schedulerpriorityaffinitysmp.c
libscore_a_SOURCES += src/schedulerprioritysmp.c
libscore_a_SOURCES += src/schedulersimplesmp.c
+libscore_a_SOURCES += src/schedulersmpdebug.c
libscore_a_SOURCES += src/smp.c
libscore_a_SOURCES += src/cpuset.c
libscore_a_SOURCES += src/cpusetprintsupport.c
+libscore_a_SOURCES += src/schedulerdefaultaskforhelp.c
libscore_a_SOURCES += src/schedulerdefaultgetaffinity.c
libscore_a_SOURCES += src/schedulerdefaultsetaffinity.c
libscore_a_SOURCES += src/schedulersmpstartidle.c
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index 6aa45a8c4f..4aaa50bc01 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -42,25 +42,6 @@ extern "C" {
#define MRSP_RIVAL_STATE_TIMEOUT 0x2U
-RTEMS_INLINE_ROUTINE bool _MRSP_Set_root_visitor(
- Resource_Node *node,
- void *arg
-)
-{
- _Resource_Node_set_root( node, arg );
-
- return false;
-}
-
-RTEMS_INLINE_ROUTINE void _MRSP_Set_root(
- Resource_Node *top,
- Resource_Node *root
-)
-{
- _Resource_Node_set_root( top, root );
- _Resource_Iterate( top, _MRSP_Set_root_visitor, root );
-}
-
RTEMS_INLINE_ROUTINE void _MRSP_Elevate_priority(
MRSP_Control *mrsp,
Thread_Control *new_owner,
@@ -197,9 +178,10 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
_Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
previous_help_state =
_Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
- _MRSP_Set_root(
- &executing->Resource_node,
- _Resource_Node_get_root( owner )
+
+ _Scheduler_Thread_change_resource_root(
+ executing,
+ _Thread_Resource_node_to_thread( _Resource_Node_get_root( owner ) )
);
if ( timeout > 0 ) {
@@ -241,7 +223,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
_Resource_Node_extract( &executing->Resource_node );
_Resource_Node_set_dependency( &executing->Resource_node, NULL );
_Scheduler_Thread_change_help_state( executing, previous_help_state );
- _MRSP_Set_root( &executing->Resource_node, &executing->Resource_node );
+ _Scheduler_Thread_change_resource_root( executing, executing );
_MRSP_Restore_priority( mrsp, executing, initial_priority );
status = MRSP_TIMEOUT;
@@ -334,7 +316,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
_Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
_Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
_Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
- _MRSP_Set_root( &new_owner->Resource_node, &new_owner->Resource_node );
+ _Scheduler_Thread_change_resource_root( new_owner, new_owner );
_MRSP_Add_state( rival, MRSP_RIVAL_STATE_NEW_OWNER );
}
diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index 993ae55b29..8da988fffd 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -90,6 +90,31 @@ typedef struct {
bool
);
+#if defined(RTEMS_SMP)
+ /**
+ * Ask for help operation.
+ *
+ * @param[in] scheduler The scheduler of the thread offering help.
+ * @param[in] offers_help The thread offering help.
+ * @param[in] needs_help The thread needing help.
+ *
+ * @retval needs_help It was not possible to schedule the thread needing
+ * help, so it is returned to continue the search for help.
+ * @retval next_needs_help It was possible to schedule the thread needing
+ * help, but this displaced another thread eligible to ask for help. So
+ * this thread is returned to start a new search for help.
+ * @retval NULL It was possible to schedule the thread needing help, and no
+ * other thread needs help as a result.
+ *
+ * @see _Scheduler_Ask_for_help().
+ */
+ Thread_Control *( *ask_for_help )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+ );
+#endif
+
/** @see _Scheduler_Node_initialize() */
void ( *node_initialize )( const Scheduler_Control *, Thread_Control * );
@@ -375,6 +400,28 @@ extern const Scheduler_Control _Scheduler_Table[];
extern const Scheduler_Assignment _Scheduler_Assignments[];
#endif
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Does nothing.
+ *
+ * @param[in] scheduler Unused.
+ * @param[in] offers_help Unused.
+ * @param[in] needs_help Unused.
+ *
+ * @retval NULL Always.
+ */
+ Thread_Control *_Scheduler_default_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+ );
+
+ #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+ _Scheduler_default_Ask_for_help,
+#else
+ #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP
+#endif
+
/**
* @brief Does nothing.
*
diff --git a/cpukit/score/include/rtems/score/schedulercbs.h b/cpukit/score/include/rtems/score/schedulercbs.h
index 008cc91261..b3381e0e43 100644
--- a/cpukit/score/include/rtems/score/schedulercbs.h
+++ b/cpukit/score/include/rtems/score/schedulercbs.h
@@ -53,6 +53,7 @@ extern "C" {
_Scheduler_EDF_Block, /* block entry point */ \
_Scheduler_CBS_Unblock, /* unblock entry point */ \
_Scheduler_EDF_Change_priority, /* change priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
_Scheduler_CBS_Node_initialize, /* node initialize entry point */ \
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_EDF_Update_priority, /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/scheduleredf.h b/cpukit/score/include/rtems/score/scheduleredf.h
index 1dda767cf6..e69569156d 100644
--- a/cpukit/score/include/rtems/score/scheduleredf.h
+++ b/cpukit/score/include/rtems/score/scheduleredf.h
@@ -46,6 +46,7 @@ extern "C" {
_Scheduler_EDF_Block, /* block entry point */ \
_Scheduler_EDF_Unblock, /* unblock entry point */ \
_Scheduler_EDF_Change_priority, /* change priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
_Scheduler_EDF_Node_initialize, /* node initialize entry point */ \
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_EDF_Update_priority, /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 5e4e5098d2..c41c3af3e6 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -10,6 +10,7 @@
/*
* Copyright (C) 2010 Gedare Bloom.
* Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ * Copyright (c) 2014 embedded brains GmbH
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -42,6 +43,13 @@ extern "C" {
*/
void _Scheduler_Handler_initialization( void );
+RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
+ const Scheduler_Control *scheduler
+)
+{
+ return scheduler->context;
+}
+
RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
const Thread_Control *the_thread
)
@@ -55,6 +63,19 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
#endif
}
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return the_thread->Scheduler.own_control;
+#else
+ (void) the_thread;
+
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
uint32_t cpu_index
)
@@ -78,6 +99,13 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
}
#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
+ const Thread_Control *the_thread
+)
+{
+ return the_thread->Scheduler.own_node;
+}
+
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
const Scheduler_Node *node
)
@@ -117,6 +145,39 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
}
#if defined(RTEMS_SMP)
+typedef struct {
+ Thread_Control *needs_help;
+ Thread_Control *next_needs_help;
+} Scheduler_Ask_for_help_context ;
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
+ Resource_Node *resource_node,
+ void *arg
+)
+{
+ bool done;
+ Scheduler_Ask_for_help_context *help_context = arg;
+ Thread_Control *previous_needs_help = help_context->needs_help;
+ Thread_Control *next_needs_help;
+ Thread_Control *offers_help =
+ _Thread_Resource_node_to_thread( resource_node );
+ const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
+
+ next_needs_help = ( *scheduler->Operations.ask_for_help )(
+ scheduler,
+ offers_help,
+ previous_needs_help
+ );
+
+ done = next_needs_help != previous_needs_help;
+
+ if ( done ) {
+ help_context->next_needs_help = next_needs_help;
+ }
+
+ return done;
+}
+
/**
* @brief Ask threads depending on resources owned by the thread for help.
*
@@ -124,13 +185,56 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
* pre-emption by a higher priority thread or it was not possible to assign it
* a processor since its priority is to low on its current scheduler instance.
*
+ * The run-time of this function depends on the size of the resource tree of
+ * the thread needing help and other resource trees in case threads in need for
+ * help are produced during this operation.
+ *
* @param[in] needs_help The thread needing help.
*/
+RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
+ Thread_Control *needs_help
+)
+{
+ do {
+ const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
+
+ needs_help = ( *scheduler->Operations.ask_for_help )(
+ scheduler,
+ needs_help,
+ needs_help
+ );
+
+ if ( needs_help != NULL ) {
+ Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
+
+ _Resource_Iterate(
+ &needs_help->Resource_node,
+ _Scheduler_Ask_for_help_visitor,
+ &help_context
+ );
+
+ needs_help = help_context.next_needs_help;
+ }
+ } while ( needs_help != NULL );
+}
+
RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
Thread_Control *needs_help
)
{
- (void) needs_help;
+ if (
+ needs_help != NULL
+ && _Resource_Node_owns_resources( &needs_help->Resource_node )
+ ) {
+ Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
+
+ if (
+ node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
+ || _Scheduler_Node_get_user( node ) != needs_help
+ ) {
+ _Scheduler_Ask_for_help( needs_help );
+ }
+ }
}
#endif
@@ -218,7 +322,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
bool prepend_it
)
{
- const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+ const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread );
#if defined(RTEMS_SMP)
Thread_Control *needs_help;
@@ -426,6 +530,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Set(
if ( current_scheduler != scheduler ) {
_Thread_Set_state( the_thread, STATES_MIGRATING );
_Scheduler_Node_destroy( current_scheduler, the_thread );
+ the_thread->Scheduler.own_control = scheduler;
the_thread->Scheduler.control = scheduler;
_Scheduler_Node_initialize( scheduler, the_thread );
_Scheduler_Update_priority( the_thread, the_thread->current_priority );
@@ -628,13 +733,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority_if_higher(
}
}
-RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
- const Scheduler_Control *scheduler
-)
-{
- return scheduler->context;
-}
-
RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
const Scheduler_Control *scheduler
)
@@ -721,6 +819,29 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
}
#if defined(RTEMS_SMP)
+/**
+ * @brief Gets an idle thread from the scheduler instance.
+ *
+ * @param[in] context The scheduler instance context.
+ *
+ * @retval idle An idle thread for use. This function must always return an
+ * idle thread. If none is available, then this is a fatal error.
+ */
+typedef Thread_Control *( *Scheduler_Get_idle_thread )(
+ Scheduler_Context *context
+);
+
+/**
+ * @brief Releases an idle thread to the scheduler instance for reuse.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] idle The idle thread to release
+ */
+typedef void ( *Scheduler_Release_idle_thread )(
+ Scheduler_Context *context,
+ Thread_Control *idle
+);
+
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
const Scheduler_Node *node
)
@@ -735,6 +856,50 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
return node->idle;
}
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
+ Scheduler_Node *node,
+ Thread_Control *user
+)
+{
+ node->user = user;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ the_thread->Scheduler.node = node;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ const Thread_Control *previous_user_of_node
+)
+{
+ const Scheduler_Control *scheduler =
+ _Scheduler_Get_own( previous_user_of_node );
+
+ the_thread->Scheduler.control = scheduler;
+ _Scheduler_Thread_set_node( the_thread, node );
+}
+
+extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
+ Thread_Control *the_thread,
+ Thread_Scheduler_state new_state
+)
+{
+ _Assert(
+ _Scheduler_Thread_state_valid_state_changes
+ [ the_thread->Scheduler.state ][ new_state ]
+ );
+
+ the_thread->Scheduler.state = new_state;
+}
+
/**
* @brief Changes the scheduler help state of a thread.
*
@@ -748,13 +913,414 @@ RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
Scheduler_Help_state new_help_state
)
{
- Scheduler_Node *node = _Scheduler_Thread_get_node( the_thread );
+ Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
Scheduler_Help_state previous_help_state = node->help_state;
node->help_state = new_help_state;
return previous_help_state;
}
+
+/**
+ * @brief Changes the resource tree root of a thread.
+ *
+ * For each node of the resource sub-tree specified by the top thread the
+ * scheduler asks for help. So the root thread gains access to all scheduler
+ * nodes corresponding to the resource sub-tree. In case a thread previously
+ * granted help is displaced by this operation, then the scheduler asks for
+ * help using its remaining resource tree.
+ *
+ * The run-time of this function depends on the size of the resource sub-tree
+ * and other resource trees in case threads in need for help are produced
+ * during this operation.
+ *
+ * @param[in] top The thread specifying the resource sub-tree top.
+ * @param[in] root The thread specifying the new resource sub-tree root.
+ */
+void _Scheduler_Thread_change_resource_root(
+ Thread_Control *top,
+ Thread_Control *root
+);
+
+/**
+ * @brief Use an idle thread for this scheduler node.
+ *
+ * A thread in the SCHEDULER_HELP_ACTIVE_OWNER owner state may use an idle
+ * thread for the scheduler node owned by itself in case it executes currently
+ * using another scheduler node or in case it is in a blocking state.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to use the idle thread.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Scheduler_Get_idle_thread get_idle_thread
+)
+{
+ Thread_Control *idle = ( *get_idle_thread )( context );
+
+ _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+ _Assert(
+ _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
+ );
+
+ _Scheduler_Thread_set_node( idle, node );
+
+ _Scheduler_Node_set_user( node, idle );
+ node->idle = idle;
+
+ return idle;
+}
+
+/**
+ * @brief Try to schedule this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to get scheduled.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ *
+ * @retval true This node can be scheduled.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Scheduler_Get_idle_thread get_idle_thread
+)
+{
+ bool schedule;
+ Thread_Control *owner;
+ Thread_Control *user;
+
+ if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
+ return true;
+ }
+
+ owner = _Scheduler_Node_get_owner( node );
+ user = _Scheduler_Node_get_user( node );
+
+ if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
+ if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+ } else {
+ _Scheduler_Node_set_user( node, owner );
+ }
+
+ schedule = true;
+ } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+ if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+ } else {
+ _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+ }
+
+ schedule = true;
+ } else {
+ _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
+
+ if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+ schedule = true;
+ } else {
+ schedule = false;
+ }
+ }
+
+ return schedule;
+}
+
+/**
+ * @brief Release an idle thread using this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which may have an idle thread as user.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval idle The idle thread which used this node.
+ * @retval NULL This node had no idle thread as an user.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ Thread_Control *idle = _Scheduler_Node_get_idle( node );
+
+ if ( idle != NULL ) {
+ Thread_Control *owner = _Scheduler_Node_get_owner( node );
+
+ node->idle = NULL;
+ _Scheduler_Node_set_user( node, owner );
+ _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
+ _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
+
+ ( *release_idle_thread )( context, idle );
+ }
+
+ return idle;
+}
+
+/**
+ * @brief Block this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to get blocked.
+ * @param[in] is_scheduled This node is scheduled.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ *
+ * @retval true Continue with the blocking operation.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ bool is_scheduled,
+ Scheduler_Get_idle_thread get_idle_thread
+)
+{
+ bool block;
+ Thread_Control *old_user = _Scheduler_Node_get_user( node );
+ Thread_Control *new_user;
+
+ _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
+
+ if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
+ new_user = _Scheduler_Node_get_owner( node );
+
+ _Assert( new_user != old_user );
+ _Scheduler_Node_set_user( node, new_user );
+ } else if (
+ node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
+ && is_scheduled
+ ) {
+ new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+ } else {
+ new_user = NULL;
+ }
+
+ if ( new_user != NULL && is_scheduled ) {
+ Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+
+ _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Set_CPU( new_user, cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
+
+ block = false;
+ } else {
+ block = true;
+ }
+
+ return block;
+}
+
+/**
+ * @brief Unblock this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] the_thread The thread which wants to get unblocked.
+ * @param[in] node The node which wants to get unblocked.
+ * @param[in] is_scheduled This node is scheduled.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval true Continue with the unblocking operation.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
+ Scheduler_Context *context,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ bool is_scheduled,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ bool unblock;
+
+ if ( is_scheduled ) {
+ Thread_Control *old_user = _Scheduler_Node_get_user( node );
+ Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+
+ if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+ Thread_Control *idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ release_idle_thread
+ );
+
+ _Assert( idle != NULL );
+ (void) idle;
+ } else {
+ _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+
+ _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
+ _Scheduler_Node_set_user( node, the_thread );
+ }
+
+ _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Set_CPU( the_thread, cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
+
+ unblock = false;
+ } else {
+ _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
+
+ unblock = true;
+ }
+
+ return unblock;
+}
+
+/**
+ * @brief Asks a ready scheduler node for help.
+ *
+ * @param[in] node The ready node offering help.
+ * @param[in] needs_help The thread needing help.
+ *
+ * @retval needs_help The thread needing help.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
+ Scheduler_Node *node,
+ Thread_Control *needs_help
+)
+{
+ _Scheduler_Node_set_user( node, needs_help );
+
+ return needs_help;
+}
+
+/**
+ * @brief Asks a scheduled scheduler node for help.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The scheduled node offering help.
+ * @param[in] offers_help The thread offering help.
+ * @param[in] needs_help The thread needing help.
+ * @param[in] previous_accepts_help The previous thread accepting help by this
+ * scheduler node.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval needs_help The previous thread accepting help by this scheduler node
+ * which was displaced by the thread needing help.
+ * @retval NULL There are no more threads needing help.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help,
+ Thread_Control *previous_accepts_help,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ Thread_Control *next_needs_help = NULL;
+ Thread_Control *old_user = NULL;
+ Thread_Control *new_user = NULL;
+
+ if (
+ previous_accepts_help != needs_help
+ && _Scheduler_Thread_get_node( previous_accepts_help ) == node
+ ) {
+ Thread_Control *idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ release_idle_thread
+ );
+
+ if ( idle != NULL ) {
+ old_user = idle;
+ } else {
+ _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
+ old_user = previous_accepts_help;
+ }
+
+ if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ new_user = needs_help;
+ } else {
+ _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+ _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
+
+ new_user = offers_help;
+ }
+
+ if ( previous_accepts_help != offers_help ) {
+ next_needs_help = previous_accepts_help;
+ }
+ } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ Thread_Control *idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ release_idle_thread
+ );
+
+ if ( idle != NULL ) {
+ old_user = idle;
+ } else {
+ old_user = _Scheduler_Node_get_user( node );
+ }
+
+ new_user = needs_help;
+ } else {
+ _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
+ }
+
+ if ( new_user != old_user ) {
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+ Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+
+ _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
+ _Scheduler_Thread_set_scheduler_and_node(
+ old_user,
+ _Scheduler_Thread_get_own_node( old_user ),
+ old_user
+ );
+
+ _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
+ _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
+
+ _Scheduler_Node_set_user( node, new_user );
+ _Thread_Set_CPU( new_user, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
+ }
+
+ return next_needs_help;
+}
+
+/**
+ * @brief Asks a blocked scheduler node for help.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The scheduled node offering help.
+ * @param[in] offers_help The thread offering help.
+ * @param[in] needs_help The thread needing help.
+ *
+ * @retval true Enqueue this scheduler node.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+)
+{
+ bool enqueue;
+
+ _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
+
+ if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ _Scheduler_Node_set_user( node, needs_help );
+ _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
+
+ enqueue = true;
+ } else {
+ enqueue = false;
+ }
+
+ return enqueue;
+}
#endif
/** @} */
diff --git a/cpukit/score/include/rtems/score/schedulerpriority.h b/cpukit/score/include/rtems/score/schedulerpriority.h
index 805e30257d..de051a8114 100644
--- a/cpukit/score/include/rtems/score/schedulerpriority.h
+++ b/cpukit/score/include/rtems/score/schedulerpriority.h
@@ -45,6 +45,7 @@ extern "C" {
_Scheduler_priority_Block, /* block entry point */ \
_Scheduler_priority_Unblock, /* unblock entry point */ \
_Scheduler_priority_Change_priority, /* change priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
_Scheduler_default_Node_initialize, /* node initialize entry point */ \
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_priority_Update_priority, /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
index 3a235104cf..91ffcd28e5 100644
--- a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
@@ -55,6 +55,7 @@ extern "C" {
_Scheduler_priority_affinity_SMP_Block, \
_Scheduler_priority_affinity_SMP_Unblock, \
_Scheduler_priority_affinity_SMP_Change_priority, \
+ _Scheduler_priority_affinity_SMP_Ask_for_help, \
_Scheduler_priority_affinity_SMP_Node_initialize, \
_Scheduler_default_Node_destroy, \
_Scheduler_priority_SMP_Update_priority, \
@@ -139,6 +140,12 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
bool prepend_it
);
+Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+);
+
/**
* @brief Set affinity for the priority affinity SMP scheduler.
*
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmp.h b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
index a1a148173d..d8ce7dc1dd 100644
--- a/cpukit/score/include/rtems/score/schedulerprioritysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
@@ -84,6 +84,7 @@ typedef struct {
_Scheduler_priority_SMP_Block, \
_Scheduler_priority_SMP_Unblock, \
_Scheduler_priority_SMP_Change_priority, \
+ _Scheduler_priority_SMP_Ask_for_help, \
_Scheduler_priority_SMP_Node_initialize, \
_Scheduler_default_Node_destroy, \
_Scheduler_priority_SMP_Update_priority, \
@@ -118,6 +119,12 @@ Thread_Control *_Scheduler_priority_SMP_Change_priority(
bool prepend_it
);
+Thread_Control *_Scheduler_priority_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *needs_help,
+ Thread_Control *offers_help
+);
+
void _Scheduler_priority_SMP_Update_priority(
const Scheduler_Control *scheduler,
Thread_Control *thread,
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
index 9ae01038b6..bb200b4641 100644
--- a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
@@ -148,6 +148,28 @@ static inline void _Scheduler_priority_SMP_Extract_from_ready(
);
}
+static inline Thread_Control *_Scheduler_priority_SMP_Get_idle_thread(
+ Scheduler_Context *context
+)
+{
+ return _Scheduler_SMP_Get_idle_thread(
+ context,
+ _Scheduler_priority_SMP_Extract_from_ready
+ );
+}
+
+static void _Scheduler_priority_SMP_Release_idle_thread(
+ Scheduler_Context *context,
+ Thread_Control *idle
+)
+{
+ _Scheduler_SMP_Release_idle_thread(
+ context,
+ idle,
+ _Scheduler_priority_SMP_Insert_ready_fifo
+ );
+}
+
static inline void _Scheduler_priority_SMP_Do_update(
Scheduler_Context *context,
Scheduler_Node *node_to_update,
diff --git a/cpukit/score/include/rtems/score/schedulersimple.h b/cpukit/score/include/rtems/score/schedulersimple.h
index c97ad2f3c6..82b8c3d7b9 100644
--- a/cpukit/score/include/rtems/score/schedulersimple.h
+++ b/cpukit/score/include/rtems/score/schedulersimple.h
@@ -43,6 +43,7 @@ extern "C" {
_Scheduler_simple_Block, /* block entry point */ \
_Scheduler_simple_Unblock, /* unblock entry point */ \
_Scheduler_simple_Change_priority, /* change priority entry point */ \
+ SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
_Scheduler_default_Node_initialize, /* node initialize entry point */ \
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_default_Update_priority, /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h
index de338ab168..11310c6898 100644
--- a/cpukit/score/include/rtems/score/schedulersimplesmp.h
+++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h
@@ -65,6 +65,7 @@ typedef struct {
_Scheduler_simple_SMP_Block, \
_Scheduler_simple_SMP_Unblock, \
_Scheduler_simple_SMP_Change_priority, \
+ _Scheduler_simple_SMP_Ask_for_help, \
_Scheduler_simple_SMP_Node_initialize, \
_Scheduler_default_Node_destroy, \
_Scheduler_simple_SMP_Update_priority, \
@@ -99,6 +100,12 @@ Thread_Control *_Scheduler_simple_SMP_Change_priority(
bool prepend_it
);
+Thread_Control *_Scheduler_simple_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+);
+
void _Scheduler_simple_SMP_Update_priority(
const Scheduler_Control *scheduler,
Thread_Control *thread,
diff --git a/cpukit/score/include/rtems/score/schedulersmp.h b/cpukit/score/include/rtems/score/schedulersmp.h
index 0c51a142f6..a58417a268 100644
--- a/cpukit/score/include/rtems/score/schedulersmp.h
+++ b/cpukit/score/include/rtems/score/schedulersmp.h
@@ -51,6 +51,11 @@ typedef struct {
* @brief The chain of scheduled nodes.
*/
Chain_Control Scheduled;
+
+ /**
+ * @brief Chain of the available idle threads.
+ */
+ Chain_Control Idle_threads;
} Scheduler_SMP_Context;
/**
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 55d0697df1..3cf7861d3b 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -361,6 +361,7 @@ static inline void _Scheduler_SMP_Initialize(
)
{
_Chain_Initialize_empty( &self->Scheduled );
+ _Chain_Initialize_empty( &self->Idle_threads );
}
static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
@@ -370,6 +371,13 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
}
+static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
+ Thread_Control *thread
+)
+{
+ return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
+}
+
static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
Scheduler_Node *node
)
@@ -416,6 +424,36 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
return cpu->scheduler_context == context;
}
+static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
+ Scheduler_Context *context,
+ Scheduler_SMP_Extract extract_from_ready
+)
+{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Thread_Control *idle = (Thread_Control *)
+ _Chain_Get_first_unprotected( &self->Idle_threads );
+ Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle );
+
+ ( *extract_from_ready )( &self->Base, own_node );
+
+ _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
+
+ return idle;
+}
+
+static inline void _Scheduler_SMP_Release_idle_thread(
+ Scheduler_Context *context,
+ Thread_Control *idle,
+ Scheduler_SMP_Insert insert_ready
+)
+{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle );
+
+ _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
+ ( *insert_ready )( context, own_node );
+}
+
static inline void _Scheduler_SMP_Allocate_processor_lazy(
Scheduler_Context *context,
Thread_Control *scheduled_thread,
@@ -468,6 +506,7 @@ static inline void _Scheduler_SMP_Allocate_processor(
_Scheduler_SMP_Node_downcast( scheduled ),
SCHEDULER_SMP_NODE_SCHEDULED
);
+ _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
( *allocate_processor )( context, scheduled_thread, victim_thread );
}
@@ -491,6 +530,57 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
return lowest_scheduled;
}
+static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Scheduler_Node *lowest_scheduled,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ Thread_Control *user = _Scheduler_Node_get_user( node );
+ Thread_Control *lowest_scheduled_user =
+ _Scheduler_Node_get_user( lowest_scheduled );
+ Thread_Control *needs_help;
+ Thread_Control *idle;
+
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( lowest_scheduled ),
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_Thread_change_state(
+ lowest_scheduled_user,
+ THREAD_SCHEDULER_READY
+ );
+
+ _Scheduler_Thread_set_node( user, node );
+
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ node,
+ lowest_scheduled,
+ allocate_processor
+ );
+
+ ( *insert_scheduled )( context, node );
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ idle = _Scheduler_Release_idle_thread(
+ context,
+ lowest_scheduled,
+ release_idle_thread
+ );
+ if ( idle == NULL ) {
+ needs_help = lowest_scheduled_user;
+ } else {
+ needs_help = NULL;
+ }
+
+ return needs_help;
+}
+
/**
* @brief Enqueues a node according to the specified order function.
*
@@ -513,6 +603,7 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
* if this pointer is passed as the second argument to the order function.
* @param[in] allocate_processor Function to allocate a processor to a node
* based on the rules of the scheduler.
+ * @param[in] release_idle_thread Function to release an idle thread.
*/
static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
Scheduler_Context *context,
@@ -523,29 +614,23 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Release_idle_thread release_idle_thread
)
{
Scheduler_Node *lowest_scheduled =
( *get_lowest_scheduled )( context, node, order );
if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
- _Scheduler_SMP_Node_change_state(
- _Scheduler_SMP_Node_downcast( lowest_scheduled ),
- SCHEDULER_SMP_NODE_READY
- );
-
- _Scheduler_SMP_Allocate_processor(
+ needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
context,
node,
lowest_scheduled,
- allocate_processor
+ insert_scheduled,
+ move_from_scheduled_to_ready,
+ allocate_processor,
+ release_idle_thread
);
-
- ( *insert_scheduled )( context, node );
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
-
- needs_help = _Scheduler_Node_get_user( lowest_scheduled );
} else {
( *insert_ready )( context, node );
}
@@ -560,6 +645,8 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
* @param[in] context The scheduler instance context.
* @param[in] node The node to enqueue.
* @param[in] order The order function.
+ * @param[in] extract_from_ready Function to extract a node from the set of
+ * ready nodes.
* @param[in] get_highest_ready Function to get the highest ready node.
* @param[in] insert_ready Function to insert a node into the set of ready
* nodes.
@@ -569,48 +656,86 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
* of ready nodes to the set of scheduled nodes.
* @param[in] allocate_processor Function to allocate a processor to a node
* based on the rules of the scheduler.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ * @param[in] release_idle_thread Function to release an idle thread.
*/
static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
Scheduler_Context *context,
Scheduler_Node *node,
Chain_Node_order order,
+ Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_thread get_idle_thread,
+ Scheduler_Release_idle_thread release_idle_thread
)
{
- Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
Thread_Control *needs_help;
- _Assert( highest_ready != NULL );
+ while ( true ) {
+ Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
+
+ _Assert( highest_ready != NULL );
+
+ /*
+ * The node has been extracted from the scheduled chain. We have to place
+ * it now on the scheduled or ready set.
+ */
+ if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
+ ( *insert_scheduled )( context, node );
+
+ needs_help = NULL;
+
+ break;
+ } else if (
+ _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ get_idle_thread
+ )
+ ) {
+ Thread_Control *user = _Scheduler_Node_get_user( node );
+ Thread_Control *idle;
+
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( node ),
+ SCHEDULER_SMP_NODE_READY
+ );
+ _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
- /*
- * The node has been extracted from the scheduled chain. We have to place
- * it now on the scheduled or ready set.
- */
- if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
- ( *insert_scheduled )( context, node );
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ highest_ready,
+ node,
+ allocate_processor
+ );
- needs_help = NULL;
- } else {
- _Scheduler_SMP_Node_change_state(
- _Scheduler_SMP_Node_downcast( node ),
- SCHEDULER_SMP_NODE_READY
- );
+ ( *insert_ready )( context, node );
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
- _Scheduler_SMP_Allocate_processor(
- context,
- highest_ready,
- node,
- allocate_processor
- );
+ idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ release_idle_thread
+ );
+ if ( idle == NULL ) {
+ needs_help = user;
+ } else {
+ needs_help = NULL;
+ }
- ( *insert_ready )( context, node );
- ( *move_from_ready_to_scheduled )( context, highest_ready );
+ break;
+ } else {
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( highest_ready ),
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
- needs_help = _Scheduler_Node_get_user( node );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
return needs_help;
@@ -626,21 +751,44 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
static inline void _Scheduler_SMP_Schedule_highest_ready(
Scheduler_Context *context,
Scheduler_Node *victim,
+ Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_thread get_idle_thread
)
{
- Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+ while ( true ) {
+ Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+
+ _Assert( highest_ready != NULL );
+
+ if (
+ _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ get_idle_thread
+ )
+ ) {
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ highest_ready,
+ victim,
+ allocate_processor
+ );
- _Scheduler_SMP_Allocate_processor(
- context,
- highest_ready,
- victim,
- allocate_processor
- );
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+ break;
+ } else {
+ _Scheduler_SMP_Node_change_state(
+ _Scheduler_SMP_Node_downcast( highest_ready ),
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
- ( *move_from_ready_to_scheduled )( context, highest_ready );
+ ( *extract_from_ready )( context, highest_ready );
+ }
+ }
}
/**
@@ -649,10 +797,11 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
* @param[in] context The scheduler instance context.
* @param[in] thread The thread of the scheduling operation.
* @param[in] extract_from_ready Function to extract a node from the set of
- * ready nodes.
+ * ready nodes.
* @param[in] get_highest_ready Function to get the highest ready node.
* @param[in] move_from_ready_to_scheduled Function to move a node from the set
- * of ready nodes to the set of scheduled nodes.
+ * of ready nodes to the set of scheduled nodes.
+ * @param[in] get_idle_thread Function to get an idle thread.
*/
static inline void _Scheduler_SMP_Block(
Scheduler_Context *context,
@@ -660,40 +809,67 @@ static inline void _Scheduler_SMP_Block(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_thread get_idle_thread
)
{
Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
+ bool block = _Scheduler_Block_node(
+ context,
+ &node->Base,
+ is_scheduled,
+ get_idle_thread
+ );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ if ( block ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
- if ( is_scheduled ) {
- _Scheduler_SMP_Extract_from_scheduled( &node->Base );
+ if ( is_scheduled ) {
+ _Scheduler_SMP_Extract_from_scheduled( &node->Base );
- _Scheduler_SMP_Schedule_highest_ready(
- context,
- &node->Base,
- get_highest_ready,
- move_from_ready_to_scheduled,
- allocate_processor
- );
- } else {
- ( *extract_from_ready )( context, &node->Base );
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ &node->Base,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_thread
+ );
+ } else {
+ ( *extract_from_ready )( context, &node->Base );
+ }
}
}
static inline Thread_Control *_Scheduler_SMP_Unblock(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_SMP_Enqueue enqueue_fifo
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_SMP_Enqueue enqueue_fifo,
+ Scheduler_Release_idle_thread release_idle_thread
)
{
Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
+ bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
+ bool unblock = _Scheduler_Unblock_node(
+ context,
+ thread,
+ &node->Base,
+ is_scheduled,
+ release_idle_thread
+ );
+ Thread_Control *needs_help;
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+ if ( unblock ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- return ( *enqueue_fifo )( context, &node->Base, thread );
+ needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
+ } else {
+ needs_help = NULL;
+ }
+
+ return needs_help;
}
static inline Thread_Control *_Scheduler_SMP_Change_priority(
@@ -709,7 +885,7 @@ static inline Thread_Control *_Scheduler_SMP_Change_priority(
Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo
)
{
- Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
+ Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread );
Thread_Control *needs_help;
if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
@@ -722,7 +898,7 @@ static inline Thread_Control *_Scheduler_SMP_Change_priority(
} else {
needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
}
- } else {
+ } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
( *extract_from_ready )( context, &node->Base );
( *update )( context, &node->Base, new_priority );
@@ -732,11 +908,68 @@ static inline Thread_Control *_Scheduler_SMP_Change_priority(
} else {
needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
}
+ } else {
+ ( *update )( context, &node->Base, new_priority );
+
+ needs_help = NULL;
}
return needs_help;
}
+static inline Thread_Control *_Scheduler_SMP_Ask_for_help(
+ Scheduler_Context *context,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help,
+ Scheduler_SMP_Enqueue enqueue_fifo,
+ Scheduler_Release_idle_thread release_idle_thread
+)
+{
+ Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
+ Thread_Control *next_needs_help = NULL;
+ Thread_Control *previous_accepts_help;
+
+ previous_accepts_help = node->Base.accepts_help;
+ node->Base.accepts_help = needs_help;
+
+ switch ( node->state ) {
+ case SCHEDULER_SMP_NODE_READY:
+ next_needs_help =
+ _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
+ break;
+ case SCHEDULER_SMP_NODE_SCHEDULED:
+ next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
+ context,
+ &node->Base,
+ offers_help,
+ needs_help,
+ previous_accepts_help,
+ release_idle_thread
+ );
+ break;
+ case SCHEDULER_SMP_NODE_BLOCKED:
+ if (
+ _Scheduler_Ask_blocked_node_for_help(
+ context,
+ &node->Base,
+ offers_help,
+ needs_help
+ )
+ ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+
+ next_needs_help = ( *enqueue_fifo )(
+ context,
+ &node->Base,
+ needs_help
+ );
+ }
+ break;
+ }
+
+ return next_needs_help;
+}
+
static inline Thread_Control *_Scheduler_SMP_Yield(
Scheduler_Context *context,
Thread_Control *thread,
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 28844c3062..0d9025fdfa 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -445,19 +445,77 @@ typedef struct {
Thread_Control *terminator;
} Thread_Life_control;
+#if defined(RTEMS_SMP)
+/**
+ * @brief The thread state with respect to the scheduler.
+ */
+typedef enum {
+ /**
+ * @brief This thread is blocked with respect to the scheduler.
+ *
+ * This thread uses no scheduler nodes.
+ */
+ THREAD_SCHEDULER_BLOCKED,
+
+ /**
+ * @brief This thread is scheduled with respect to the scheduler.
+ *
+ * This thread executes using one of its scheduler nodes. This could be its
+ * own scheduler node or in case it owns resources taking part in the
+ * scheduler helping protocol a scheduler node of another thread.
+ */
+ THREAD_SCHEDULER_SCHEDULED,
+
+ /**
+ * @brief This thread is ready with respect to the scheduler.
+ *
+ * None of the scheduler nodes of this thread is scheduled.
+ */
+ THREAD_SCHEDULER_READY
+} Thread_Scheduler_state;
+#endif
+
/**
* @brief Thread scheduler control.
*/
typedef struct {
#if defined(RTEMS_SMP)
/**
- * @brief The current scheduler control of this thread.
+ * @brief The current scheduler state of this thread.
+ */
+ Thread_Scheduler_state state;
+
+ /**
+ * @brief The own scheduler control of this thread.
+ *
+ * This field is constant after initialization.
+ */
+ const struct Scheduler_Control *own_control;
+
+ /**
+ * @brief The scheduler control of this thread.
+ *
+ * The scheduler helping protocol may change this field.
*/
const struct Scheduler_Control *control;
+
+ /**
+ * @brief The own scheduler node of this thread.
+ *
+ * This field is constant after initialization. It is used by change
+ * priority and ask for help operations.
+ */
+ struct Scheduler_Node *own_node;
#endif
/**
- * @brief The current scheduler node of this thread.
+ * @brief The scheduler node of this thread.
+ *
+ * On uni-processor configurations this field is constant after
+ * initialization.
+ *
+ * On SMP configurations the scheduler helping protocol may change this
+ * field.
*/
struct Scheduler_Node *node;
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 4971e9d274..cb7d5fe8d2 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -828,6 +828,16 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
return owns_resources;
}
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Resource_node_to_thread(
+ Resource_Node *node
+)
+{
+ return (Thread_Control *)
+ ( (char *) node - offsetof( Thread_Control, Resource_node ) );
+}
+#endif
+
RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
Thread_Control *the_thread,
Per_CPU_Control *cpu
diff --git a/cpukit/score/src/schedulercbsnodeinit.c b/cpukit/score/src/schedulercbsnodeinit.c
index 1a5299a863..d16f3fa035 100644
--- a/cpukit/score/src/schedulercbsnodeinit.c
+++ b/cpukit/score/src/schedulercbsnodeinit.c
@@ -29,6 +29,8 @@ void _Scheduler_CBS_Node_initialize(
(void) scheduler;
+ _Scheduler_Node_do_initialize( &node->Base.Base, the_thread );
+
node->Base.thread = the_thread;
node->Base.queue_state = SCHEDULER_EDF_QUEUE_STATE_NEVER_HAS_BEEN;
node->cbs_server = NULL;
diff --git a/cpukit/score/src/schedulerchangeroot.c b/cpukit/score/src/schedulerchangeroot.c
new file mode 100644
index 0000000000..eba852ba5e
--- /dev/null
+++ b/cpukit/score/src/schedulerchangeroot.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include <rtems/score/schedulerimpl.h>
+
+typedef struct {
+ Thread_Control *root;
+ Thread_Control *needs_help;
+} Scheduler_Set_root_context;
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_Set_root_visitor(
+ Resource_Node *resource_node,
+ void *arg
+)
+{
+ Scheduler_Set_root_context *ctx = arg;
+ Thread_Control *root = ctx->root;
+ Thread_Control *needs_help = root;
+ Thread_Control *offers_help =
+ _Thread_Resource_node_to_thread( resource_node );
+ const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
+ Thread_Control *needs_help_too;
+
+ _Resource_Node_set_root( resource_node, &root->Resource_node );
+
+ needs_help_too = ( *scheduler->Operations.ask_for_help )(
+ scheduler,
+ offers_help,
+ needs_help
+ );
+
+ if ( needs_help_too != needs_help && needs_help_too != NULL ) {
+ _Assert( ctx->needs_help == NULL );
+ ctx->needs_help = needs_help_too;
+ }
+
+ return false;
+}
+
+void _Scheduler_Thread_change_resource_root(
+ Thread_Control *top,
+ Thread_Control *root
+)
+{
+ Scheduler_Set_root_context ctx = { root, NULL };
+ Thread_Control *offers_help = top;
+ Scheduler_Node *offers_help_node;
+ Thread_Control *offers_help_too;
+ ISR_Level level;
+
+ _ISR_Disable( level );
+
+ offers_help_node = _Scheduler_Thread_get_node( offers_help );
+ offers_help_too = _Scheduler_Node_get_owner( offers_help_node );
+
+ if ( offers_help != offers_help_too ) {
+ _Scheduler_Set_root_visitor( &offers_help_too->Resource_node, &ctx );
+ _Assert( ctx.needs_help == offers_help );
+ ctx.needs_help = NULL;
+ }
+
+ _Scheduler_Set_root_visitor( &top->Resource_node, &ctx );
+ _Resource_Iterate( &top->Resource_node, _Scheduler_Set_root_visitor, &ctx );
+
+ if ( ctx.needs_help != NULL ) {
+ _Scheduler_Ask_for_help( ctx.needs_help );
+ }
+
+ _ISR_Enable( level );
+}
diff --git a/cpukit/score/src/schedulerdefaultaskforhelp.c b/cpukit/score/src/schedulerdefaultaskforhelp.c
new file mode 100644
index 0000000000..b69524846b
--- /dev/null
+++ b/cpukit/score/src/schedulerdefaultaskforhelp.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include <rtems/score/scheduler.h>
+
+Thread_Control *_Scheduler_default_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+)
+{
+ (void) scheduler;
+ (void) offers_help;
+ (void) needs_help;
+
+ return NULL;
+}
diff --git a/cpukit/score/src/schedulerdefaultnodeinit.c b/cpukit/score/src/schedulerdefaultnodeinit.c
index ab371bd7fe..a96a528ee4 100644
--- a/cpukit/score/src/schedulerdefaultnodeinit.c
+++ b/cpukit/score/src/schedulerdefaultnodeinit.c
@@ -19,13 +19,16 @@
#include "config.h"
#endif
-#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerimpl.h>
void _Scheduler_default_Node_initialize(
const Scheduler_Control *scheduler,
Thread_Control *the_thread
)
{
+ Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
+
(void) scheduler;
- (void) the_thread;
+
+ _Scheduler_Node_do_initialize( node, the_thread );
}
diff --git a/cpukit/score/src/scheduleredfnodeinit.c b/cpukit/score/src/scheduleredfnodeinit.c
index b23f5acda8..e7f8af70a5 100644
--- a/cpukit/score/src/scheduleredfnodeinit.c
+++ b/cpukit/score/src/scheduleredfnodeinit.c
@@ -29,6 +29,8 @@ void _Scheduler_EDF_Node_initialize(
(void) scheduler;
+ _Scheduler_Node_do_initialize( &node->Base, the_thread );
+
node->thread = the_thread;
node->queue_state = SCHEDULER_EDF_QUEUE_STATE_NEVER_HAS_BEEN;
}
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 24fae3cfee..39a0a481ea 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -60,6 +60,15 @@ static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
&& _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
}
+static Scheduler_priority_affinity_SMP_Node *
+_Scheduler_priority_affinity_SMP_Thread_get_own_node(
+ Thread_Control *thread
+)
+{
+ return (Scheduler_priority_affinity_SMP_Node *)
+ _Scheduler_Thread_get_own_node( thread );
+}
+
/*
* This method returns the scheduler node for the specified thread
* as a scheduler specific type.
@@ -69,7 +78,8 @@ _Scheduler_priority_affinity_SMP_Thread_get_node(
Thread_Control *thread
)
{
- return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Thread_get_node( thread );
+ return (Scheduler_priority_affinity_SMP_Node *)
+ _Scheduler_Thread_get_node( thread );
}
static Scheduler_priority_affinity_SMP_Node *
@@ -90,7 +100,7 @@ void _Scheduler_priority_affinity_SMP_Node_initialize(
)
{
Scheduler_priority_affinity_SMP_Node *node =
- _Scheduler_priority_affinity_SMP_Thread_get_node( thread );
+ _Scheduler_priority_affinity_SMP_Thread_get_own_node( thread );
(void) scheduler;
@@ -221,7 +231,8 @@ void _Scheduler_priority_affinity_SMP_Block(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_affinity_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle_thread
);
/*
@@ -303,7 +314,8 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
_Scheduler_SMP_Insert_scheduled_fifo,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Release_idle_thread
);
}
@@ -387,7 +399,8 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
needs_help = _Scheduler_SMP_Unblock(
context,
thread,
- _Scheduler_priority_affinity_SMP_Enqueue_fifo
+ _Scheduler_priority_affinity_SMP_Enqueue_fifo,
+ _Scheduler_priority_SMP_Release_idle_thread
);
/*
@@ -420,7 +433,8 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Release_idle_thread
);
}
@@ -463,11 +477,14 @@ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
context,
node,
order,
+ _Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_affinity_SMP_Get_highest_ready,
insert_ready,
insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle_thread,
+ _Scheduler_priority_SMP_Release_idle_thread
);
}
@@ -543,6 +560,27 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
return displaced;
}
+Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ needs_help = _Scheduler_SMP_Ask_for_help(
+ context,
+ offers_help,
+ needs_help,
+ _Scheduler_priority_affinity_SMP_Enqueue_fifo,
+ _Scheduler_priority_SMP_Release_idle_thread
+ );
+
+ _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
+
+ return needs_help;
+}
+
/*
* This is the public scheduler specific Change Priority operation.
*/
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index b642c5d6f3..93d1c76d30 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -47,7 +47,7 @@ void _Scheduler_priority_SMP_Node_initialize(
Thread_Control *thread
)
{
- Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
+ Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread );
_Scheduler_SMP_Node_initialize( node, thread );
}
@@ -93,7 +93,8 @@ void _Scheduler_priority_SMP_Block(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle_thread
);
}
@@ -115,7 +116,8 @@ static Thread_Control *_Scheduler_priority_SMP_Enqueue_ordered(
insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Release_idle_thread
);
}
@@ -163,11 +165,14 @@ static Thread_Control *_Scheduler_priority_SMP_Enqueue_scheduled_ordered(
context,
node,
order,
+ _Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
insert_ready,
insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle_thread,
+ _Scheduler_priority_SMP_Release_idle_thread
);
}
@@ -209,7 +214,8 @@ Thread_Control *_Scheduler_priority_SMP_Unblock(
return _Scheduler_SMP_Unblock(
context,
thread,
- _Scheduler_priority_SMP_Enqueue_fifo
+ _Scheduler_priority_SMP_Enqueue_fifo,
+ _Scheduler_priority_SMP_Release_idle_thread
);
}
@@ -236,6 +242,23 @@ Thread_Control *_Scheduler_priority_SMP_Change_priority(
);
}
+Thread_Control *_Scheduler_priority_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ return _Scheduler_SMP_Ask_for_help(
+ context,
+ offers_help,
+ needs_help,
+ _Scheduler_priority_SMP_Enqueue_fifo,
+ _Scheduler_priority_SMP_Release_idle_thread
+ );
+}
+
Thread_Control *_Scheduler_priority_SMP_Yield(
const Scheduler_Control *scheduler,
Thread_Control *thread
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index ee540bebd0..084d78251f 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -47,7 +47,7 @@ void _Scheduler_simple_SMP_Node_initialize(
Thread_Control *the_thread
)
{
- Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( the_thread );
+ Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( the_thread );
_Scheduler_SMP_Node_initialize( node, the_thread );
}
@@ -162,6 +162,28 @@ static void _Scheduler_simple_SMP_Extract_from_ready(
_Chain_Extract_unprotected( &node_to_extract->Node );
}
+static Thread_Control *_Scheduler_simple_SMP_Get_idle_thread(
+ Scheduler_Context *context
+)
+{
+ return _Scheduler_SMP_Get_idle_thread(
+ context,
+ _Scheduler_simple_SMP_Extract_from_ready
+ );
+}
+
+static void _Scheduler_simple_SMP_Release_idle_thread(
+ Scheduler_Context *context,
+ Thread_Control *idle
+)
+{
+ _Scheduler_SMP_Release_idle_thread(
+ context,
+ idle,
+ _Scheduler_simple_SMP_Insert_ready_fifo
+ );
+}
+
void _Scheduler_simple_SMP_Block(
const Scheduler_Control *scheduler,
Thread_Control *thread
@@ -175,7 +197,8 @@ void _Scheduler_simple_SMP_Block(
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle_thread
);
}
@@ -197,7 +220,8 @@ static Thread_Control *_Scheduler_simple_SMP_Enqueue_ordered(
insert_scheduled,
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Release_idle_thread
);
}
@@ -245,11 +269,14 @@ static Thread_Control *_Scheduler_simple_SMP_Enqueue_scheduled_ordered(
context,
node,
order,
+ _Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
insert_ready,
insert_scheduled,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle_thread,
+ _Scheduler_simple_SMP_Release_idle_thread
);
}
@@ -291,7 +318,8 @@ Thread_Control *_Scheduler_simple_SMP_Unblock(
return _Scheduler_SMP_Unblock(
context,
thread,
- _Scheduler_simple_SMP_Enqueue_fifo
+ _Scheduler_simple_SMP_Enqueue_fifo,
+ _Scheduler_simple_SMP_Release_idle_thread
);
}
@@ -318,6 +346,23 @@ Thread_Control *_Scheduler_simple_SMP_Change_priority(
);
}
+Thread_Control *_Scheduler_simple_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *offers_help,
+ Thread_Control *needs_help
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ return _Scheduler_SMP_Ask_for_help(
+ context,
+ offers_help,
+ needs_help,
+ _Scheduler_simple_SMP_Enqueue_fifo,
+ _Scheduler_simple_SMP_Release_idle_thread
+ );
+}
+
Thread_Control *_Scheduler_simple_SMP_Yield(
const Scheduler_Control *scheduler,
Thread_Control *thread
diff --git a/cpukit/score/src/schedulersmpvalidstatechanges.c b/cpukit/score/src/schedulersmpdebug.c
index 6a5dcc6de4..4a45d2095f 100644
--- a/cpukit/score/src/schedulersmpvalidstatechanges.c
+++ b/cpukit/score/src/schedulersmpdebug.c
@@ -1,9 +1,9 @@
/**
* @file
*
- * @ingroup ScoreSchedulerSMP
+ * @ingroup ScoreScheduler
*
- * @brief SMP Scheduler Implementation
+ * @brief Scheduler SMP Debug Implementation
*/
/*
@@ -24,10 +24,24 @@
#include "config.h"
#endif
+#include <rtems/score/schedulerimpl.h>
#include <rtems/score/schedulerpriorityimpl.h>
+#if defined(RTEMS_DEBUG)
+
+/*
+ * Table with all valid state transitions for _Scheduler_Thread_change_state()
+ * in case RTEMS_DEBUG is defined.
+ */
+const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ] = {
+ /* FROM / TO BLOCKED SCHEDULED READY */
+ /* BLOCKED */ { false, true, true },
+ /* SCHEDULED */ { true, false, true },
+ /* READY */ { true, true, true }
+};
+
/*
- * Table with all valid state transitions. It is used in
+ * Table with all valid state transitions for
* _Scheduler_SMP_Node_change_state() in case RTEMS_DEBUG is defined.
*/
const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ] = {
@@ -36,3 +50,5 @@ const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ] = {
/* SCHEDULED */ { true, false, true },
/* READY */ { true, true, false }
};
+
+#endif
diff --git a/cpukit/score/src/schedulersmpstartidle.c b/cpukit/score/src/schedulersmpstartidle.c
index 6809fd81a3..de125d3323 100644
--- a/cpukit/score/src/schedulersmpstartidle.c
+++ b/cpukit/score/src/schedulersmpstartidle.c
@@ -26,4 +26,5 @@ void _Scheduler_SMP_Start_idle(
_Thread_Set_CPU( thread, cpu );
_Chain_Append_unprotected( &self->Scheduled, &node->Base.Node );
+ _Chain_Prepend_unprotected( &self->Idle_threads, &thread->Object.Node );
}
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index e6c4985591..e56e4e6cda 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -181,7 +181,10 @@ bool _Thread_Initialize(
}
#if defined(RTEMS_SMP)
+ the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
+ the_thread->Scheduler.own_control = scheduler;
the_thread->Scheduler.control = scheduler;
+ the_thread->Scheduler.own_node = the_thread->Scheduler.node;
_Resource_Node_initialize( &the_thread->Resource_node );
_CPU_Context_Set_is_executing( &the_thread->Registers, false );
#endif
diff --git a/doc/user/smp.t b/doc/user/smp.t
index dd84c37360..c21c8a0791 100644
--- a/doc/user/smp.t
+++ b/doc/user/smp.t
@@ -147,6 +147,84 @@ another processor. So if we enable interrupts during this transition we have
to provide an alternative task independent stack for this time frame. This
issue needs further investigation.
+@subsection Scheduler Helping Protocol
+
+The scheduler provides a helping protocol to support locking protocols like
+@cite{Migratory Priority Inheritance} or the @cite{Multiprocessor Resource
+Sharing Protocol}. Each ready task can use at least one scheduler node at a
+time to gain access to a processor. Each scheduler node has an owner, a user
+and an optional idle task. The owner of a scheduler node is determined a task
+creation and never changes during the life time of a scheduler node. The user
+of a scheduler node may change due to the scheduler helping protocol. A
+scheduler node is in one of the four scheduler help states:
+
+@table @dfn
+
+@item help yourself
+
+This scheduler node is solely used by the owner task. This task owns no
+resources using a helping protocol and thus does not take part in the scheduler
+helping protocol. No help will be provided for other tasks.
+
+@item help active owner
+
+This scheduler node is owned by a task actively owning a resource and can be
+used to help out tasks.
+
+In case this scheduler node changes its state from ready to scheduled and the
+task executes using another node, then an idle task will be provided as a user
+of this node to temporarily execute on behalf of the owner task. Thus lower
+priority tasks are denied access to the processors of this scheduler instance.
+
+In case a task actively owning a resource performs a blocking operation, then
+an idle task will be used also in case this node is in the scheduled state.
+
+@item help active rival
+
+This scheduler node is owned by a task actively obtaining a resource currently
+owned by another task and can be used to help out tasks.
+
+The task owning this node is ready and will give away its processor in case the
+task owning the resource asks for help.
+
+@item help passive
+
+This scheduler node is owned by a task obtaining a resource currently owned by
+another task and can be used to help out tasks.
+
+The task owning this node is blocked.
+
+@end table
+
+The following scheduler operations return a task in need for help
+
+@itemize @bullet
+@item unblock,
+@item change priority,
+@item yield, and
+@item ask for help.
+@end itemize
+
+A task in need for help is a task that encounters a scheduler state change from
+scheduled to ready (this is a pre-emption by a higher priority task) or a task
+that cannot be scheduled in an unblock operation. Such a task can ask tasks
+which depend on resources owned by this task for help.
+
+In case it is not possible to schedule a task in need for help, then the
+scheduler nodes available for the task will be placed into the set of ready
+scheduler nodes of the corresponding scheduler instances. Once a state change
+from ready to scheduled happens for one of scheduler nodes it will be used to
+schedule the task in need for help.
+
+The ask for help scheduler operation is used to help tasks in need for help
+returned by the operations mentioned above. This operation is also used in
+case the root of a resource sub-tree owned by a task changes.
+
+The run-time of the ask for help procedures depend on the size of the resource
+tree of the task needing help and other resource trees in case tasks in need
+for help are produced during this operation. Thus the worst-case latency in
+the system depends on the maximum resource tree size of the application.
+
@subsection Critical Section Techniques and SMP
As discussed earlier, SMP systems have opportunities for true parallelism