summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-10-20 14:21:20 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-23 11:00:28 +0100
commitff20bc981654ec896ca2b7830962079b2e20c29e (patch)
tree6c3ba2266501cd5c1e9e0e1cd311ff74b04c3d9b /cpukit
parentscore: Add SMP scheduler make/clean sticky (diff)
downloadrtems-ff20bc981654ec896ca2b7830962079b2e20c29e.tar.bz2
score: Rework idle handling in SMP schedulers
This patch fixes an issue with the idle thread handling in the SMP scheduler framework used for the MrsP locking protocol. The approach to use a simple chain of unused idle threads is broken for schedulers which support thread to processor affinity. The reason is that the thread to processor affinity introduces another ordering indicator which may under certain conditions lead to a reordering of idle threads in the scheduled chain. This reordering is not propagated to the chain of unused idle threads. This could lead to use an idle thread for a sticky scheduler node which is already in use. This locks up the system in infinite loops in the thread context switch procedure. To fix this, the SMP scheduler implementations must now provide callbacks to get and release an unused idle thread. Update #4531.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/include/rtems/score/schedulerimpl.h276
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmp.h1
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmpimpl.h35
-rw-r--r--cpukit/include/rtems/score/schedulersmp.h9
-rw-r--r--cpukit/include/rtems/score/schedulersmpimpl.h572
-rw-r--r--cpukit/score/src/scheduleredfsmp.c65
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c41
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c32
-rw-r--r--cpukit/score/src/schedulersimplesmp.c55
-rw-r--r--cpukit/score/src/schedulerstrongapa.c114
10 files changed, 686 insertions, 514 deletions
diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h
index eb279876c7..12b6806402 100644
--- a/cpukit/include/rtems/score/schedulerimpl.h
+++ b/cpukit/include/rtems/score/schedulerimpl.h
@@ -798,26 +798,26 @@ RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
#if defined(RTEMS_SMP)
/**
- * @brief Gets an idle thread from the scheduler instance.
+ * @brief Gets a scheduler node which is owned by an unused idle thread.
*
- * @param context The scheduler instance context.
+ * @param arg is the handler argument.
*
- * @return idle An idle thread for use. This function must always return an
- * idle thread. If none is available, then this is a fatal error.
+ * @return Returns a scheduler node owned by an idle thread for use. This
+ * handler must always return a node. If none is available, then this is a
+ * fatal error.
*/
-typedef Thread_Control *( *Scheduler_Get_idle_thread )(
- Scheduler_Context *context
-);
+typedef Scheduler_Node *( *Scheduler_Get_idle_node )( void *arg );
/**
- * @brief Releases an idle thread to the scheduler instance for reuse.
+ * @brief Releases the scheduler node which is owned by an idle thread.
*
- * @param context The scheduler instance context.
- * @param idle The idle thread to release.
+ * @param node is the node to release.
+ *
+ * @param arg is the handler argument.
*/
-typedef void ( *Scheduler_Release_idle_thread )(
- Scheduler_Context *context,
- Thread_Control *idle
+typedef void ( *Scheduler_Release_idle_node )(
+ Scheduler_Node *node,
+ void *arg
);
/**
@@ -841,189 +841,114 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
}
/**
- * @brief Uses an idle thread for this scheduler node.
+ * @brief Uses an idle thread for the scheduler node.
*
- * A thread whose home scheduler node has a sticky level greater than zero may
- * use an idle thread in the home scheduler instance in the case it executes
- * currently in another scheduler instance or in the case it is in a blocking
- * state.
+ * @param[in, out] node is the node which wants to use an idle thread.
*
- * @param context The scheduler instance context.
- * @param[in, out] node The node which wants to use the idle thread.
- * @param cpu The processor for the idle thread.
- * @param get_idle_thread Function to get an idle thread.
+ * @param get_idle_node is the get idle node handler.
+ *
+ * @param arg is the handler argument.
*/
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Scheduler_Get_idle_thread get_idle_thread
+ Scheduler_Node *node,
+ Scheduler_Get_idle_node get_idle_node,
+ void *arg
)
{
- Thread_Control *idle = ( *get_idle_thread )( context );
+ Scheduler_Node *idle_node;
+ Thread_Control *idle;
+ idle_node = ( *get_idle_node )( arg );
+ idle = _Scheduler_Node_get_owner( idle_node );
+ _Assert( idle->is_idle );
_Scheduler_Node_set_idle_user( node, idle );
+
return idle;
}
/**
- * @brief This enumeration defines what a scheduler should do with a node which
- * could be scheduled.
- */
-typedef enum {
- SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
- SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
- SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
-} Scheduler_Try_to_schedule_action;
-
-/**
- * @brief Tries to schedule the scheduler node.
- *
- * When a scheduler needs to schedule a node, it shall use this function to
- * determine what it shall do with the node. The node replaces a victim node if
- * it can be scheduled.
- *
- * This function uses the state of the node and the scheduler state of the owner
- * thread to determine what shall be done. Each scheduler maintains its nodes
- * independent of other schedulers. This function ensures that a thread is
- * scheduled by at most one scheduler. If a node requires an executing thread
- * due to some locking protocol and the owner thread is already scheduled by
- * another scheduler, then an idle thread shall be attached to the node.
- *
- * @param[in, out] context is the scheduler context.
- * @param[in, out] node is the node which could be scheduled.
- * @param idle is an idle thread used by the victim node or NULL.
- * @param get_idle_thread points to a function to get an idle thread.
- *
- * @retval SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE The node shall be scheduled.
- *
- * @retval SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE The node shall be
- * scheduled and the provided idle thread shall be attached to the node. This
- * action is returned, if the node cannot use the owner thread and shall use
- * an idle thread instead. In this case, the idle thread is provided by the
- * victim node.
- *
- * @retval SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK The node shall be blocked. This
- * action is returned, if the owner thread is already scheduled by another
- * scheduler.
+ * @brief Releases the idle thread used by the scheduler node.
+ *
+ * @param[in, out] node is the node which wants to release the idle thread.
+ *
+ * @param idle is the idle thread to release.
+ *
+ * @param release_idle_node is the release idle node handler.
+ *
+ * @param arg is the handler argument.
*/
-RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
-_Scheduler_Try_to_schedule_node(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Scheduler_Node *victim,
- Scheduler_Get_idle_thread get_idle_thread
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_idle_thread(
+ Scheduler_Node *node,
+ const Thread_Control *idle,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
- ISR_lock_Context lock_context;
- Scheduler_Try_to_schedule_action action;
- Thread_Control *owner;
+ Thread_Control *owner;
+ Scheduler_Node *idle_node;
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
owner = _Scheduler_Node_get_owner( node );
- _Assert( _Scheduler_Node_get_user( node ) == owner );
- _Assert( _Scheduler_Node_get_idle( node ) == NULL );
-
- _Thread_Scheduler_acquire_critical( owner, &lock_context );
-
- if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
- _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
- _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
- } else if (
- owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
- && node->sticky_level <= 1
- ) {
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
- } else if ( node->sticky_level == 0 ) {
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
- } else if ( _Scheduler_Node_get_idle( victim ) != NULL ) {
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
- } else {
- Thread_Control *idle;
- Thread_Control *user;
-
- idle = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
- user = _Scheduler_Node_get_user( node );
- _Thread_Set_CPU( idle, _Thread_Get_CPU( user ) );
- }
-
- _Thread_Scheduler_release_critical( owner, &lock_context );
- return action;
+ _Assert( _Scheduler_Node_get_user( node ) == idle );
+ _Scheduler_Node_set_user( node, owner );
+ node->idle = NULL;
+ idle_node = _Thread_Scheduler_get_home_node( idle );
+ ( *release_idle_node )( idle_node, arg );
}
/**
- * @brief Releases an idle thread using this scheduler node.
+ * @brief Releases the idle thread used by the scheduler node if the node uses
+ * an idle thread.
*
- * @param context The scheduler instance context.
- * @param[in, out] node The node which may have an idle thread as user.
- * @param release_idle_thread Function to release an idle thread.
+ * @param[in, out] node is the node which wants to release the idle thread.
+ *
+ * @param release_idle_node is the release idle node handler.
+ *
+ * @param arg is the handler argument.
+ *
+ * @retval NULL The scheduler node did not use an idle thread.
*
- * @retval idle The idle thread which used this node.
- * @retval NULL This node had no idle thread as an user.
+ * @return Returns the idle thread used by the scheduler node.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Scheduler_Release_idle_thread release_idle_thread
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
+ Scheduler_Node *node,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
- Thread_Control *idle = _Scheduler_Node_get_idle( node );
+ Thread_Control *idle;
- if ( idle != NULL ) {
- Thread_Control *owner = _Scheduler_Node_get_owner( node );
+ idle = _Scheduler_Node_get_idle( node );
- node->idle = NULL;
- _Scheduler_Node_set_user( node, owner );
- ( *release_idle_thread )( context, idle );
+ if ( idle != NULL ) {
+ _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
}
return idle;
}
/**
- * @brief Exchanges an idle thread from the scheduler node that uses it
- * right now to another scheduler node.
- *
- * @param needs_idle is the scheduler node that needs an idle thread.
- *
- * @param uses_idle is the scheduler node that used the idle thread.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
- Scheduler_Node *needs_idle,
- Scheduler_Node *uses_idle
-)
-{
- _Scheduler_Node_set_idle_user(
- needs_idle,
- _Scheduler_Node_get_idle( uses_idle )
- );
- _Scheduler_Node_set_user(
- uses_idle,
- _Scheduler_Node_get_owner( uses_idle )
- );
- uses_idle->idle = NULL;
-}
-
-/**
* @brief Blocks this scheduler node.
*
- * @param context The scheduler instance context.
* @param[in, out] thread The thread which wants to get blocked referencing this
* node. This is not necessarily the user of this node in case the node
* participates in the scheduler helping protocol.
- * @param[in, out] node The node which wants to get blocked.
- * @param is_scheduled This node is scheduled.
- * @param get_idle_thread Function to get an idle thread.
+ *
+ * @param[in, out] node is the node which wants to get blocked.
+ *
+ * @param get_idle_node is the get idle node handler.
+ *
+ * @param arg is the get idle node handler argument.
*
* @retval thread_cpu The processor of the thread. Indicates to continue with
* the blocking operation.
* @retval NULL Otherwise.
*/
RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- bool is_scheduled,
- Scheduler_Get_idle_thread get_idle_thread
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ bool is_scheduled,
+ Scheduler_Get_idle_node get_idle_node,
+ void *arg
)
{
int sticky_level;
@@ -1045,7 +970,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
Thread_Control *idle;
- idle = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+ idle = _Scheduler_Use_idle_thread( node, get_idle_node, arg );
_Thread_Set_CPU( idle, thread_cpu );
_Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
}
@@ -1058,31 +983,28 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
}
/**
- * @brief Discard the idle thread from the scheduler node.
+ * @brief Discards the idle thread used by the scheduler node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the node which wants to release the idle thread.
+ *
+ * @param release_idle_node is the release idle node handler.
*
- * @param context The scheduler context.
- * @param[in, out] the_thread The thread for the operation.
- * @param[in, out] node The scheduler node to discard the idle thread from.
- * @param release_idle_thread Method to release the idle thread from the context.
+ * @param arg is the handler argument.
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
- Scheduler_Context *context,
- Thread_Control *the_thread,
- Scheduler_Node *node,
- Scheduler_Release_idle_thread release_idle_thread
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
Thread_Control *idle;
- Thread_Control *owner;
Per_CPU_Control *cpu;
idle = _Scheduler_Node_get_idle( node );
- owner = _Scheduler_Node_get_owner( node );
-
- node->idle = NULL;
- _Assert( _Scheduler_Node_get_user( node ) == idle );
- _Scheduler_Node_set_user( node, owner );
- ( *release_idle_thread )( context, idle );
+ _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
cpu = _Thread_Get_CPU( idle );
_Thread_Set_CPU( the_thread, cpu );
@@ -1102,11 +1024,11 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
* @retval false Do not continue with the unblocking operation.
*/
RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
- Scheduler_Context *context,
- Thread_Control *the_thread,
- Scheduler_Node *node,
- bool is_scheduled,
- Scheduler_Release_idle_thread release_idle_thread
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ bool is_scheduled,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
bool unblock;
@@ -1115,13 +1037,13 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
_Assert( node->sticky_level > 0 );
if ( is_scheduled ) {
+ _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
_Scheduler_Discard_idle_thread(
- context,
the_thread,
node,
- release_idle_thread
+ release_idle_node,
+ arg
);
- _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
unblock = false;
} else {
_Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
diff --git a/cpukit/include/rtems/score/schedulerprioritysmp.h b/cpukit/include/rtems/score/schedulerprioritysmp.h
index 9ece9ae143..21916647bc 100644
--- a/cpukit/include/rtems/score/schedulerprioritysmp.h
+++ b/cpukit/include/rtems/score/schedulerprioritysmp.h
@@ -57,6 +57,7 @@ extern "C" {
*/
typedef struct {
Scheduler_SMP_Context Base;
+ Chain_Control *idle_ready_queue;
Priority_bit_map_Control Bit_map;
Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
} Scheduler_priority_SMP_Context;
diff --git a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
index 6e14200840..8ffd8d01c1 100644
--- a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
+++ b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
@@ -156,6 +156,41 @@ static inline void _Scheduler_priority_SMP_Extract_from_ready(
);
}
+static inline Scheduler_Node *_Scheduler_priority_SMP_Get_idle( void *arg )
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *lowest_ready;
+
+ self = _Scheduler_priority_SMP_Get_self( arg );
+ lowest_ready = (Scheduler_priority_SMP_Node *)
+ _Chain_Last( self->idle_ready_queue );
+ _Scheduler_priority_Ready_queue_extract(
+ &lowest_ready->Base.Base.Node.Chain,
+ &lowest_ready->Ready_queue,
+ &self->Bit_map
+ );
+
+ return &lowest_ready->Base.Base;
+}
+
+static inline void _Scheduler_priority_SMP_Release_idle(
+ Scheduler_Node *node_base,
+ void *arg
+)
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *node;
+
+ self = _Scheduler_priority_SMP_Get_self( arg );
+ node = _Scheduler_priority_SMP_Node_downcast( node_base );
+
+ _Scheduler_priority_Ready_queue_enqueue(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+}
+
static inline void _Scheduler_priority_SMP_Do_update(
Scheduler_Context *context,
Scheduler_Node *node_to_update,
diff --git a/cpukit/include/rtems/score/schedulersmp.h b/cpukit/include/rtems/score/schedulersmp.h
index 1d5294b4f0..fb4d6c46d2 100644
--- a/cpukit/include/rtems/score/schedulersmp.h
+++ b/cpukit/include/rtems/score/schedulersmp.h
@@ -55,15 +55,6 @@ typedef struct {
* @brief The chain of scheduled nodes.
*/
Chain_Control Scheduled;
-
- /**
- * @brief Chain of the available idle threads.
- *
- * Idle threads are used for the scheduler helping protocol. It is crucial
- * that the idle threads preserve their relative order. This is the case for
- * this priority based scheduler.
- */
- Chain_Control Idle_threads;
} Scheduler_SMP_Context;
/**
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index 731b15d4bf..6fb97c86b2 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -8,7 +8,7 @@
*/
/*
- * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013, 2021 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -282,7 +282,11 @@ typedef bool ( *Scheduler_SMP_Has_ready )(
typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *filter
+);
+
+typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
+ Scheduler_Context *context
);
typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
@@ -339,7 +343,7 @@ typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
typedef void ( *Scheduler_SMP_Allocate_processor )(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
+ Thread_Control *victim_thread,
Per_CPU_Control *victim_cpu
);
@@ -414,7 +418,6 @@ static inline void _Scheduler_SMP_Initialize(
)
{
_Chain_Initialize_empty( &self->Scheduled );
- _Chain_Initialize_empty( &self->Idle_threads );
}
/**
@@ -556,51 +559,79 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
}
/**
- * @brief Gets The first idle thread of the given context.
+ * @brief This enumeration defines what a scheduler should do with a node which
+ * could be scheduled.
+ */
+typedef enum {
+ SCHEDULER_SMP_DO_SCHEDULE,
+ SCHEDULER_SMP_DO_NOT_SCHEDULE
+} Scheduler_SMP_Action;
+
+/**
+ * @brief Tries to schedule the scheduler node.
+ *
+ * When an SMP scheduler needs to schedule a node, it shall use this function
+ * to determine what it shall do with the node.
+ *
+ * This function uses the state of the node and the scheduler state of the
+ * owner thread to determine what shall be done. Each scheduler maintains its
+ * nodes independent of other schedulers. This function ensures that a thread
+ * is scheduled by at most one scheduler. If a node requires an executing
+ * thread due to some locking protocol and the owner thread is already
+ * scheduled by another scheduler, then an idle thread will be attached to the
+ * node.
+ *
+ * @param[in, out] node is the node which should be scheduled.
+ *
+ * @param get_idle_node is the get idle node handler.
+ *
+ * @param arg is the get idle node handler argument.
*
- * @param context The scheduler context to get the first idle thread from.
+ * @retval SCHEDULER_SMP_DO_SCHEDULE The node shall be scheduled.
*
- * @return The first idle thread of @a context.
+ * @retval SCHEDULER_SMP_DO_NOT_SCHEDULE The node shall be blocked. This
+ * action is returned, if the owner thread is already scheduled by another
+ * scheduler.
*/
-static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
- Scheduler_Context *context
+static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
+ Scheduler_Node *node,
+ Scheduler_Get_idle_node get_idle_node,
+ void *arg
)
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
- Thread_Control *idle = (Thread_Control *)
- _Chain_Get_first_unprotected( &self->Idle_threads );
+ ISR_lock_Context lock_context;
+ Thread_Control *owner;
+ Thread_Scheduler_state owner_state;
+ int owner_sticky_level;
+
+ owner = _Scheduler_Node_get_owner( node );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ _Thread_Scheduler_acquire_critical( owner, &lock_context );
+ owner_state = owner->Scheduler.state;
+ owner_sticky_level = node->sticky_level;
+
+ if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
+ _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
+ _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ return SCHEDULER_SMP_DO_SCHEDULE;
+ }
- _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
- return idle;
-}
+ if (
+ ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
+ owner_sticky_level == 0
+ ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
-/**
- * @brief Releases the thread and adds it to the idle threads.
- *
- * @param[in, out] context The scheduler context instance.
- * @param idle The thread to add to the idle threads.
- */
-static inline void _Scheduler_SMP_Release_idle_thread(
- Scheduler_Context *context,
- Thread_Control *idle
-)
-{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ return SCHEDULER_SMP_DO_NOT_SCHEDULE;
+ }
- _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
-}
+ (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
-/**
- * @brief Extracts the node of the idle thread.
- *
- * @param[in, out] idle The thread to extract the node of.
- */
-static inline void _Scheduler_SMP_Extract_idle_thread(
- Thread_Control *idle
-)
-{
- _Chain_Extract_unprotected( &idle->Object.Node );
+ return SCHEDULER_SMP_DO_SCHEDULE;
}
/**
@@ -616,12 +647,11 @@ static inline void _Scheduler_SMP_Extract_idle_thread(
static inline void _Scheduler_SMP_Allocate_processor_lazy(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
+ Thread_Control *victim_thread,
Per_CPU_Control *victim_cpu
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
- Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
Per_CPU_Control *cpu_self = _Per_CPU_Get();
Thread_Control *heir;
@@ -666,7 +696,7 @@ static inline void _Scheduler_SMP_Allocate_processor_lazy(
static inline void _Scheduler_SMP_Allocate_processor_exact(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
+ Thread_Control *victim_thread,
Per_CPU_Control *victim_cpu
)
{
@@ -674,7 +704,7 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
Per_CPU_Control *cpu_self = _Per_CPU_Get();
(void) context;
- (void) victim;
+ (void) victim_thread;
_Thread_Set_CPU( scheduled_thread, victim_cpu );
_Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
@@ -692,69 +722,80 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
static inline void _Scheduler_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
+ Thread_Control *victim_thread,
Per_CPU_Control *victim_cpu,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
_Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
- ( *allocate_processor )( context, scheduled, victim, victim_cpu );
+ ( *allocate_processor )( context, scheduled, victim_thread, victim_cpu );
}
/**
- * @brief Preempts the victim's thread and allocates a cpu for the scheduled thread.
+ * @brief Preempts the victim's thread and allocates a processor for the user
+ * of the scheduled node.
*
- * @param context The scheduler context instance.
- * @param scheduled Node of the scheduled thread that is about to be executed.
- * @param[in, out] victim Node of the thread to preempt.
- * @param allocate_processor The function for allocation of a processor for the new thread.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param scheduled[in, out] is the node of the user thread that is about to
+ * get a processor allocated.
+ *
+ * @param[in, out] victim is the victim node of the thread to preempt.
*
- * @return The preempted thread.
+ * @param[in, out] victim_idle is the idle thread used by the victim node or NULL.
+ *
+ * @param allocate_processor The function for allocation of a processor for the new thread.
*/
-static inline Thread_Control *_Scheduler_SMP_Preempt(
+static inline void _Scheduler_SMP_Preempt(
Scheduler_Context *context,
Scheduler_Node *scheduled,
Scheduler_Node *victim,
+ Thread_Control *victim_idle,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
- Thread_Control *victim_thread;
- ISR_lock_Context scheduler_lock_context;
+ Thread_Control *victim_owner;
+ Thread_Control *victim_user;
+ ISR_lock_Context lock_context;
Per_CPU_Control *victim_cpu;
- victim_thread = _Scheduler_Node_get_user( victim );
_Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
- _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
+ victim_owner = _Scheduler_Node_get_owner( victim );
+ _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
- victim_cpu = _Thread_Get_CPU( victim_thread );
+ if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
+ victim_user = victim_owner;
+ victim_cpu = _Thread_Get_CPU( victim_owner );
- if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
- _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
+ if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
+ _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
- if ( victim_thread->Scheduler.helping_nodes > 0 ) {
- ISR_lock_Context per_cpu_lock_context;
+ if ( victim_owner->Scheduler.helping_nodes > 0 ) {
+ ISR_lock_Context lock_context_2;
- _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
- _Chain_Append_unprotected(
- &victim_cpu->Threads_in_need_for_help,
- &victim_thread->Scheduler.Help_node
- );
- _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
+ _Per_CPU_Acquire( victim_cpu, &lock_context_2 );
+ _Chain_Append_unprotected(
+ &victim_cpu->Threads_in_need_for_help,
+ &victim_owner->Scheduler.Help_node
+ );
+ _Per_CPU_Release( victim_cpu, &lock_context_2 );
+ }
}
+ } else {
+ victim_user = victim_idle;
+ victim_cpu = _Thread_Get_CPU( victim_idle );
}
- _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
+ _Thread_Scheduler_release_critical( victim_owner, &lock_context );
_Scheduler_SMP_Allocate_processor(
context,
scheduled,
- victim,
+ victim_user,
victim_cpu,
allocate_processor
);
-
- return victim_thread;
}
/**
@@ -808,54 +849,43 @@ static inline void _Scheduler_SMP_Enqueue_to_scheduled(
Scheduler_Node *lowest_scheduled,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *lowest_scheduled_idle;
+ Scheduler_SMP_Action action;
- action = _Scheduler_Try_to_schedule_node(
- context,
- node,
+ lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
lowest_scheduled,
- _Scheduler_SMP_Get_idle_thread
+ release_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
+
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
+ lowest_scheduled_idle,
allocate_processor
);
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
( *insert_scheduled )( context, node, priority );
+ } else {
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
- _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state(
- lowest_scheduled,
- SCHEDULER_SMP_NODE_READY
- );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
-
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
- ( *insert_scheduled )( context, node, priority );
+ if ( lowest_scheduled_idle != NULL ) {
+ (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
+ }
- _Scheduler_Exchange_idle_thread( node, lowest_scheduled );
- ( *allocate_processor )(
- context,
- node,
- lowest_scheduled,
- _Thread_Get_CPU( _Scheduler_Node_get_user( node ) )
- );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
}
}
@@ -889,8 +919,11 @@ static inline bool _Scheduler_SMP_Enqueue(
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
bool needs_help;
@@ -906,10 +939,14 @@ static inline bool _Scheduler_SMP_Enqueue(
lowest_scheduled,
insert_scheduled,
move_from_scheduled_to_ready,
- allocate_processor
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
needs_help = false;
} else {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
( *insert_ready )( context, node, insert_priority );
needs_help = true;
}
@@ -946,12 +983,22 @@ static inline void _Scheduler_SMP_Enqueue_scheduled(
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
+ Thread_Control *node_idle;
+
+ node_idle = _Scheduler_Release_idle_thread_if_necessary(
+ node,
+ release_idle_node,
+ context
+ );
+
while ( true ) {
- Scheduler_Node *highest_ready;
- Scheduler_Try_to_schedule_action action;
+ Scheduler_Node *highest_ready;
+ Scheduler_SMP_Action action;
highest_ready = ( *get_highest_ready )( context, node );
@@ -963,9 +1010,7 @@ static inline void _Scheduler_SMP_Enqueue_scheduled(
node->sticky_level > 0
&& ( *order )( &insert_priority, &highest_ready->Node.Chain )
) {
- ( *insert_scheduled )( context, node, insert_priority );
-
- if ( _Scheduler_Node_get_idle( node ) != NULL ) {
+ if ( node_idle != NULL ) {
Thread_Control *owner;
ISR_lock_Context lock_context;
@@ -973,77 +1018,53 @@ static inline void _Scheduler_SMP_Enqueue_scheduled(
_Thread_Scheduler_acquire_critical( owner, &lock_context );
if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
+ Per_CPU_Control *cpu;
+
_Thread_Scheduler_cancel_need_for_help(
owner,
_Thread_Get_CPU( owner )
);
- _Scheduler_Discard_idle_thread(
- context,
- owner,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
_Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ cpu = _Thread_Get_CPU( node_idle );
+ _Thread_Set_CPU( owner, cpu );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
+ } else {
+ Thread_Control *new_idle;
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
+ _Assert_Unused_variable_equals( new_idle, node_idle );
}
-
- _Thread_Scheduler_release_critical( owner, &lock_context );
}
+ ( *insert_scheduled )( context, node, insert_priority );
+
return;
}
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- node,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
highest_ready,
node,
+ node_idle,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
( *insert_ready )( context, node, insert_priority );
-
- _Scheduler_Release_idle_thread(
- context,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
- return;
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_SCHEDULED
- );
-
- ( *move_from_ready_to_scheduled )( context, highest_ready );
- ( *insert_ready )( context, node, insert_priority );
-
- _Scheduler_Exchange_idle_thread( highest_ready, node );
- ( *allocate_processor )(
- context,
- highest_ready,
- node,
- _Thread_Get_CPU( _Scheduler_Node_get_user( highest_ready ) )
- );
return;
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
}
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
}
}
@@ -1083,63 +1104,46 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *victim_thread;
+ Scheduler_SMP_Action action;
- do {
+ victim_thread = _Scheduler_Node_get_user( victim );
+ (void) _Scheduler_Release_idle_thread_if_necessary(
+ victim,
+ release_idle_node,
+ context
+ );
+
+ while ( true ) {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- victim,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Allocate_processor(
context,
highest_ready,
- victim,
+ victim_thread,
victim_cpu,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
-
- _Scheduler_Release_idle_thread(
- context,
- victim,
- _Scheduler_SMP_Release_idle_thread
- );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_SCHEDULED
- );
-
- ( *move_from_ready_to_scheduled )( context, highest_ready );
-
- _Scheduler_Exchange_idle_thread( highest_ready, victim );
- ( *allocate_processor )(
- context,
- highest_ready,
- victim,
- _Thread_Get_CPU( _Scheduler_Node_get_user( highest_ready ) )
- );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
- } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
/**
@@ -1147,7 +1151,6 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
*
* @param context The scheduler context instance.
* @param victim The node of the thread that is repressed by the newly scheduled thread.
- * @param victim_cpu The cpu to allocate.
* @param extract_from_ready Function to extract a node from the set of
* ready nodes.
* @param get_highest_ready Function to get the highest ready node.
@@ -1159,61 +1162,49 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
Scheduler_Context *context,
Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *victim_idle;
+ Scheduler_SMP_Action action;
- do {
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
+ victim_idle = _Scheduler_Release_idle_thread_if_necessary(
+ victim,
+ release_idle_node,
+ context
+ );
+
+ while ( true ) {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- victim,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
highest_ready,
victim,
+ victim_idle,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
-
- _Scheduler_Release_idle_thread(
- context,
- victim,
- _Scheduler_SMP_Release_idle_thread
- );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_SCHEDULED
- );
-
- ( *move_from_ready_to_scheduled )( context, highest_ready );
-
- _Scheduler_Exchange_idle_thread( highest_ready, victim );
- ( *allocate_processor )( context, highest_ready, victim, _Thread_Get_CPU( _Scheduler_Node_get_user( highest_ready ) ) );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
- } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
/**
@@ -1240,7 +1231,9 @@ static inline void _Scheduler_SMP_Block(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
@@ -1249,11 +1242,11 @@ static inline void _Scheduler_SMP_Block(
node_state = _Scheduler_SMP_Node_state( node );
thread_cpu = _Scheduler_Block_node(
- context,
thread,
node,
node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
if ( thread_cpu != NULL ) {
@@ -1268,7 +1261,9 @@ static inline void _Scheduler_SMP_Block(
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
( *extract_from_ready )( context, node );
@@ -1287,11 +1282,12 @@ static inline void _Scheduler_SMP_Block(
* of a context.
*/
static inline void _Scheduler_SMP_Unblock(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Update update,
- Scheduler_SMP_Enqueue enqueue
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
@@ -1299,11 +1295,11 @@ static inline void _Scheduler_SMP_Unblock(
node_state = _Scheduler_SMP_Node_state( node );
unblock = _Scheduler_Unblock_node(
- context,
thread,
node,
node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ context
);
if ( unblock ) {
@@ -1320,7 +1316,6 @@ static inline void _Scheduler_SMP_Unblock(
if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
Priority_Control insert_priority;
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
needs_help = ( *enqueue )( context, node, insert_priority );
} else {
@@ -1507,7 +1502,8 @@ static inline bool _Scheduler_SMP_Ask_for_help(
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_Node *lowest_scheduled;
@@ -1537,6 +1533,8 @@ static inline bool _Scheduler_SMP_Ask_for_help(
insert_priority = _Scheduler_SMP_Node_priority( node );
if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
+ Thread_Control *lowest_scheduled_idle;
+
_Thread_Scheduler_cancel_need_for_help(
thread,
_Thread_Get_CPU( thread )
@@ -1544,24 +1542,27 @@ static inline bool _Scheduler_SMP_Ask_for_help(
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
_Thread_Scheduler_release_critical( thread, &lock_context );
+ lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
+ lowest_scheduled,
+ release_idle_node,
+ context
+ );
+
_Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
+ lowest_scheduled_idle,
allocate_processor
);
( *move_from_scheduled_to_ready )( context, lowest_scheduled );
( *insert_scheduled )( context, node, insert_priority );
- _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
success = true;
} else {
_Thread_Scheduler_release_critical( thread, &lock_context );
+
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
( *insert_ready )( context, node, insert_priority );
success = false;
@@ -1571,14 +1572,14 @@ static inline bool _Scheduler_SMP_Ask_for_help(
thread,
_Thread_Get_CPU( thread )
);
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
_Scheduler_Discard_idle_thread(
- context,
thread,
node,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ context
);
- _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
- _Thread_Scheduler_release_critical( thread, &lock_context );
success = true;
} else {
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1650,7 +1651,9 @@ static inline void _Scheduler_SMP_Withdraw_node(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
ISR_lock_Context lock_context;
@@ -1664,6 +1667,7 @@ static inline void _Scheduler_SMP_Withdraw_node(
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
Per_CPU_Control *thread_cpu;
+ _Assert( thread == _Scheduler_Node_get_user( node ) );
thread_cpu = _Thread_Get_CPU( thread );
_Scheduler_Thread_change_state( thread, next_state );
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1676,7 +1680,9 @@ static inline void _Scheduler_SMP_Withdraw_node(
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1721,7 +1727,6 @@ static inline void _Scheduler_SMP_Make_sticky(
( *update )( context, node, priority );
}
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
(void) ( *enqueue )( context, node, insert_priority );
}
@@ -1744,7 +1749,9 @@ static inline void _Scheduler_SMP_Clean_sticky(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
@@ -1755,8 +1762,8 @@ static inline void _Scheduler_SMP_Clean_sticky(
Scheduler_Context *context;
context = _Scheduler_Get_context( scheduler );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
( *extract_from_scheduled )( context, node );
_Scheduler_SMP_Schedule_highest_ready(
@@ -1766,7 +1773,9 @@ static inline void _Scheduler_SMP_Clean_sticky(
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
}
}
@@ -1798,7 +1807,6 @@ static inline void _Scheduler_SMP_Do_start_idle(
_Thread_Set_CPU( idle, cpu );
( *register_idle )( context, &node->Base, cpu );
_Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
- _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
}
/**
@@ -1823,7 +1831,6 @@ static inline void _Scheduler_SMP_Add_processor(
self = _Scheduler_SMP_Get_self( context );
idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
- _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
node = _Thread_Scheduler_get_home_node( idle );
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
@@ -1853,11 +1860,13 @@ static inline void _Scheduler_SMP_Add_processor(
* @return The idle thread of @a cpu.
*/
static inline Thread_Control *_Scheduler_SMP_Remove_processor(
- Scheduler_Context *context,
- Per_CPU_Control *cpu,
- Scheduler_SMP_Extract extract_from_scheduled,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Enqueue enqueue
+ Scheduler_Context *context,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Context *self;
@@ -1877,24 +1886,25 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
chain_node = _Chain_Next( chain_node );
} while ( _Thread_Get_CPU( victim_user ) != cpu );
- ( *extract_from_scheduled )( context, victim_node );
+ ( *extract_from_scheduled )( &self->Base, victim_node );
victim_owner = _Scheduler_Node_get_owner( victim_node );
if ( !victim_owner->is_idle ) {
+ Thread_Control *victim_idle;
Scheduler_Node *idle_node;
- _Scheduler_Release_idle_thread(
- &self->Base,
+ victim_idle = _Scheduler_Release_idle_thread_if_necessary(
victim_node,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ &self->Base
);
- idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
- idle_node = _Thread_Scheduler_get_home_node( idle );
- ( *extract_from_ready )( &self->Base, idle_node );
+ idle_node = ( *get_idle_node )( &self->Base );
+ idle = _Scheduler_Node_get_owner( idle_node );
_Scheduler_SMP_Preempt(
&self->Base,
idle_node,
victim_node,
+ victim_idle,
_Scheduler_SMP_Allocate_processor_exact
);
@@ -1903,13 +1913,12 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
insert_priority = _Scheduler_SMP_Node_priority( victim_node );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- ( *enqueue )( context, victim_node, insert_priority );
+ ( *enqueue )( &self->Base, victim_node, insert_priority );
}
} else {
_Assert( victim_owner == victim_user );
_Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
idle = victim_owner;
- _Scheduler_SMP_Extract_idle_thread( idle );
}
return idle;
@@ -1947,7 +1956,9 @@ static inline void _Scheduler_SMP_Set_affinity(
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
@@ -1962,11 +1973,12 @@ static inline void _Scheduler_SMP_Set_affinity(
_Scheduler_SMP_Preempt_and_schedule_highest_ready(
context,
node,
- _Thread_Get_CPU( thread ),
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
( *set_affinity )( context, node, arg );
( *enqueue )( context, node, insert_priority );
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index 0b0ee6ed21..28266dd13d 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -409,10 +409,36 @@ static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
);
}
+static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_idle( void *arg )
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_Node *lowest_ready;
+
+ self = _Scheduler_EDF_SMP_Get_self( arg );
+ lowest_ready = (Scheduler_Node *) _RBTree_Maximum( &self->Ready[ 0 ].Queue );
+ _Assert( lowest_ready != NULL );
+ _RBTree_Extract( &self->Ready[ 0 ].Queue, &lowest_ready->Node.RBTree );
+ _Chain_Initialize_node( &lowest_ready->Node.Chain );
+
+ return lowest_ready;
+}
+
+static inline void _Scheduler_EDF_SMP_Release_idle(
+ Scheduler_Node *node,
+ void *arg
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+
+ self = _Scheduler_EDF_SMP_Get_self( arg );
+ _RBTree_Initialize_node( &node->Node.RBTree );
+ _RBTree_Append( &self->Ready[ 0 ].Queue, &node->Node.RBTree );
+}
+
static inline void _Scheduler_EDF_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled_base,
- Scheduler_Node *victim_base,
+ Thread_Control *victim_thread,
Per_CPU_Control *victim_cpu
)
{
@@ -420,7 +446,7 @@ static inline void _Scheduler_EDF_SMP_Allocate_processor(
Scheduler_EDF_SMP_Node *scheduled;
uint8_t rqi;
- (void) victim_base;
+ (void) victim_thread;
self = _Scheduler_EDF_SMP_Get_self( context );
scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
rqi = scheduled->ready_queue_index;
@@ -471,7 +497,9 @@ void _Scheduler_EDF_SMP_Block(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -489,8 +517,11 @@ static inline bool _Scheduler_EDF_SMP_Enqueue(
_Scheduler_EDF_SMP_Insert_ready,
_Scheduler_EDF_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Get_lowest_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -510,7 +541,9 @@ static inline void _Scheduler_EDF_SMP_Enqueue_scheduled(
_Scheduler_EDF_SMP_Insert_ready,
_Scheduler_EDF_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -527,7 +560,8 @@ void _Scheduler_EDF_SMP_Unblock(
thread,
node,
_Scheduler_EDF_SMP_Do_update,
- _Scheduler_EDF_SMP_Enqueue
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -546,7 +580,8 @@ static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
_Scheduler_EDF_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
_Scheduler_EDF_SMP_Get_lowest_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -616,7 +651,9 @@ void _Scheduler_EDF_SMP_Withdraw_node(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -649,7 +686,9 @@ void _Scheduler_EDF_SMP_Clean_sticky(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -695,7 +734,9 @@ Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
cpu,
_Scheduler_EDF_SMP_Extract_from_scheduled,
_Scheduler_EDF_SMP_Extract_from_ready,
- _Scheduler_EDF_SMP_Enqueue
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -833,7 +874,9 @@ Status_Control _Scheduler_EDF_SMP_Set_affinity(
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Enqueue,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 648c243589..7d971ef8d1 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -190,7 +190,9 @@ void _Scheduler_priority_affinity_SMP_Block(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_affinity_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
/*
@@ -262,8 +264,11 @@ static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
_Scheduler_priority_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -342,7 +347,10 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
lowest_scheduled,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
}
@@ -363,7 +371,8 @@ void _Scheduler_priority_affinity_SMP_Unblock(
thread,
node,
_Scheduler_priority_SMP_Do_update,
- _Scheduler_priority_affinity_SMP_Enqueue_fifo
+ _Scheduler_priority_affinity_SMP_Enqueue_fifo,
+ _Scheduler_priority_SMP_Release_idle
);
/*
@@ -390,8 +399,11 @@ static bool _Scheduler_priority_affinity_SMP_Enqueue(
_Scheduler_priority_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -416,7 +428,9 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled(
_Scheduler_priority_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -435,7 +449,8 @@ static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -510,7 +525,9 @@ void _Scheduler_priority_affinity_SMP_Withdraw_node(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_affinity_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -543,7 +560,9 @@ void _Scheduler_priority_affinity_SMP_Clean_sticky(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_affinity_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_SMP_Allocate_processor_exact,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -575,7 +594,9 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
cpu,
_Scheduler_SMP_Extract_from_scheduled,
_Scheduler_priority_SMP_Extract_from_ready,
- _Scheduler_priority_affinity_SMP_Enqueue
+ _Scheduler_priority_affinity_SMP_Enqueue,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index 7262f48e8e..a33b44587f 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -50,6 +50,7 @@ void _Scheduler_priority_SMP_Initialize( const Scheduler_Control *scheduler )
_Scheduler_priority_SMP_Get_context( scheduler );
_Scheduler_SMP_Initialize( &self->Base );
+ self->idle_ready_queue = &self->Ready[ scheduler->maximum_priority ];
_Priority_bit_map_Initialize( &self->Bit_map );
_Scheduler_priority_Ready_queue_initialize(
&self->Ready[ 0 ],
@@ -118,7 +119,9 @@ void _Scheduler_priority_SMP_Block(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -136,8 +139,11 @@ static bool _Scheduler_priority_SMP_Enqueue(
_Scheduler_priority_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -157,7 +163,9 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled(
_Scheduler_priority_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -174,7 +182,8 @@ void _Scheduler_priority_SMP_Unblock(
thread,
node,
_Scheduler_priority_SMP_Do_update,
- _Scheduler_priority_SMP_Enqueue
+ _Scheduler_priority_SMP_Enqueue,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -193,7 +202,8 @@ static bool _Scheduler_priority_SMP_Do_ask_for_help(
_Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -263,7 +273,9 @@ void _Scheduler_priority_SMP_Withdraw_node(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -296,7 +308,9 @@ void _Scheduler_priority_SMP_Clean_sticky(
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
@@ -328,7 +342,9 @@ Thread_Control *_Scheduler_priority_SMP_Remove_processor(
cpu,
_Scheduler_SMP_Extract_from_scheduled,
_Scheduler_priority_SMP_Extract_from_ready,
- _Scheduler_priority_SMP_Enqueue
+ _Scheduler_priority_SMP_Enqueue,
+ _Scheduler_priority_SMP_Get_idle,
+ _Scheduler_priority_SMP_Release_idle
);
}
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index b7814b0fcb..086d00094d 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -172,6 +172,30 @@ static void _Scheduler_simple_SMP_Extract_from_ready(
_Chain_Extract_unprotected( &node_to_extract->Node.Chain );
}
+static inline Scheduler_Node *_Scheduler_simple_SMP_Get_idle( void *arg )
+{
+ Scheduler_simple_SMP_Context *self =
+ _Scheduler_simple_SMP_Get_self( arg );
+ Scheduler_Node *lowest_ready = (Scheduler_Node *) _Chain_Last( &self->Ready );
+
+ _Assert( &lowest_ready->Node.Chain != _Chain_Head( &self->Ready ) );
+ _Chain_Extract_unprotected( &lowest_ready->Node.Chain );
+
+ return lowest_ready;
+}
+
+static inline void _Scheduler_simple_SMP_Release_idle(
+ Scheduler_Node *node,
+ void *arg
+)
+{
+ Scheduler_simple_SMP_Context *self;
+
+ self = _Scheduler_simple_SMP_Get_self( arg );
+
+ _Chain_Append_unprotected( &self->Ready, &node->Node.Chain );
+}
+
void _Scheduler_simple_SMP_Block(
const Scheduler_Control *scheduler,
Thread_Control *thread,
@@ -188,7 +212,9 @@ void _Scheduler_simple_SMP_Block(
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -206,8 +232,11 @@ static bool _Scheduler_simple_SMP_Enqueue(
_Scheduler_simple_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -227,7 +256,9 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled(
_Scheduler_simple_SMP_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -244,7 +275,8 @@ void _Scheduler_simple_SMP_Unblock(
thread,
node,
_Scheduler_simple_SMP_Do_update,
- _Scheduler_simple_SMP_Enqueue
+ _Scheduler_simple_SMP_Enqueue,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -263,7 +295,8 @@ static bool _Scheduler_simple_SMP_Do_ask_for_help(
_Scheduler_SMP_Insert_scheduled,
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -333,7 +366,9 @@ void _Scheduler_simple_SMP_Withdraw_node(
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -366,7 +401,9 @@ void _Scheduler_simple_SMP_Clean_sticky(
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_SMP_Allocate_processor_lazy,
+ _Scheduler_simple_SMP_Get_idle,
+ _Scheduler_simple_SMP_Release_idle
);
}
@@ -398,7 +435,9 @@ Thread_Control *_Scheduler_simple_SMP_Remove_processor(
cpu,
_Scheduler_SMP_Extract_from_scheduled,
_Scheduler_simple_SMP_Extract_from_ready,
- _Scheduler_simple_SMP_Enqueue
+ _Scheduler_simple_SMP_Enqueue,
+ _Scheduler_simple_SMP_Get_idle,
+ _Scheduler_simple_SMP_Release_idle
);
}
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index 6c7ab942fa..36ceaeddc1 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -157,14 +157,14 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
static inline void _Scheduler_strong_APA_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled_base,
- Scheduler_Node *victim_base,
+ Thread_Control *victim_thread,
Per_CPU_Control *victim_cpu
)
{
Scheduler_strong_APA_Node *scheduled;
Scheduler_strong_APA_Context *self;
- (void) victim_base;
+ (void) victim_thread;
scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
self = _Scheduler_strong_APA_Get_self( context );
@@ -270,6 +270,63 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
return highest_ready;
}
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_idle( void *arg )
+{
+ Scheduler_strong_APA_Context *self;
+ Scheduler_strong_APA_Node *lowest_ready = NULL;
+ Priority_Control max_priority_num;
+ const Chain_Node *tail;
+ Chain_Node *next;
+
+ self = _Scheduler_strong_APA_Get_self( arg );
+ tail = _Chain_Immutable_tail( &self->Ready );
+ next = _Chain_First( &self->Ready );
+ max_priority_num = 0;
+
+ while ( next != tail ) {
+ Scheduler_strong_APA_Node *node;
+ Scheduler_SMP_Node_state curr_state;
+
+ node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+ curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
+
+ if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
+ Priority_Control curr_priority;
+
+ curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
+
+ if ( curr_priority > max_priority_num ) {
+ max_priority_num = curr_priority;
+ lowest_ready = node;
+ }
+ }
+
+ next = _Chain_Next( next );
+ }
+
+ _Assert( lowest_ready != NULL );
+ _Chain_Extract_unprotected( &lowest_ready->Ready_node );
+ _Chain_Set_off_chain( &lowest_ready->Ready_node );
+
+ return &lowest_ready->Base.Base;
+}
+
+static inline void _Scheduler_strong_APA_Release_idle(
+ Scheduler_Node *node_base,
+ void *arg
+)
+{
+ Scheduler_strong_APA_Context *self;
+ Scheduler_strong_APA_Node *node;
+
+ self = _Scheduler_strong_APA_Get_self( arg );
+ node = _Scheduler_strong_APA_Node_downcast( node_base );
+
+ if ( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
+ _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
+ }
+}
+
static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
Scheduler_Context *context,
Scheduler_Node *ready_to_scheduled
@@ -386,16 +443,24 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
* So there is need for task shifting.
*/
while ( node->cpu_to_preempt != filter_cpu ) {
+ Thread_Control *next_node_idle;
+
curr_node = &node->Base.Base;
next_node = _Scheduler_strong_APA_Get_scheduled(
self,
node->cpu_to_preempt
);
+ next_node_idle = _Scheduler_Release_idle_thread_if_necessary(
+ next_node,
+ _Scheduler_strong_APA_Release_idle,
+ context
+ );
(void) _Scheduler_SMP_Preempt(
context,
curr_node,
next_node,
+ next_node_idle,
_Scheduler_strong_APA_Allocate_processor
);
@@ -587,6 +652,8 @@ static inline bool _Scheduler_strong_APA_Do_enqueue(
self = _Scheduler_strong_APA_Get_self( context );
CPU = self->CPU;
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+
node_priority = _Scheduler_Node_get_priority( node );
node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
@@ -633,7 +700,10 @@ static inline bool _Scheduler_strong_APA_Do_enqueue(
next_node,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
curr_node = next_node;
@@ -641,13 +711,21 @@ static inline bool _Scheduler_strong_APA_Do_enqueue(
curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
while ( curr_node != lowest_reachable ) {
+ Thread_Control *next_node_idle;
+
curr_CPU = curr_strong_node->cpu_to_preempt;
next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
+ next_node_idle = _Scheduler_Release_idle_thread_if_necessary(
+ next_node,
+ _Scheduler_strong_APA_Release_idle,
+ context
+ );
/* curr_node preempts the next_node; */
_Scheduler_SMP_Preempt(
context,
curr_node,
next_node,
+ next_node_idle,
_Scheduler_strong_APA_Allocate_processor
);
@@ -755,7 +833,9 @@ static inline void _Scheduler_strong_APA_Enqueue_scheduled(
_Scheduler_strong_APA_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -774,7 +854,8 @@ static inline bool _Scheduler_strong_APA_Do_ask_for_help(
_Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
_Scheduler_strong_APA_Get_lowest_scheduled,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -842,7 +923,9 @@ void _Scheduler_strong_APA_Block(
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Get_highest_ready,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -859,7 +942,8 @@ void _Scheduler_strong_APA_Unblock(
thread,
node,
_Scheduler_strong_APA_Do_update,
- _Scheduler_strong_APA_Enqueue
+ _Scheduler_strong_APA_Enqueue,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -933,7 +1017,9 @@ void _Scheduler_strong_APA_Withdraw_node(
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Get_highest_ready,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -966,7 +1052,9 @@ void _Scheduler_strong_APA_Clean_sticky(
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Get_highest_ready,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -1028,7 +1116,9 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor(
cpu,
_Scheduler_strong_APA_Extract_from_scheduled,
_Scheduler_strong_APA_Extract_from_ready,
- _Scheduler_strong_APA_Enqueue
+ _Scheduler_strong_APA_Enqueue,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
}
@@ -1089,7 +1179,9 @@ Status_Control _Scheduler_strong_APA_Set_affinity(
_Scheduler_strong_APA_Get_highest_ready,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
_Scheduler_strong_APA_Enqueue,
- _Scheduler_strong_APA_Allocate_processor
+ _Scheduler_strong_APA_Allocate_processor,
+ _Scheduler_strong_APA_Get_idle,
+ _Scheduler_strong_APA_Release_idle
);
return STATUS_SUCCESSFUL;