summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score/schedulersmpimpl.h
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/include/rtems/score/schedulersmpimpl.h')
-rw-r--r--cpukit/include/rtems/score/schedulersmpimpl.h1065
1 files changed, 660 insertions, 405 deletions
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index dbfc241b18..c1839c4517 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -8,17 +10,28 @@
*/
/*
- * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
- *
- * embedded brains GmbH
- * Dornierstr. 4
- * 82178 Puchheim
- * Germany
- * <rtems@embedded-brains.de>
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * Copyright (C) 2013, 2021 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
@@ -282,7 +295,11 @@ typedef bool ( *Scheduler_SMP_Has_ready )(
typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *filter
+);
+
+typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
+ Scheduler_Context *context
);
typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
@@ -330,11 +347,16 @@ typedef bool ( *Scheduler_SMP_Enqueue )(
Priority_Control priority
);
+typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_enqueue,
+ Priority_Control priority
+);
+
typedef void ( *Scheduler_SMP_Allocate_processor )(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
);
typedef void ( *Scheduler_SMP_Register_idle )(
@@ -364,21 +386,26 @@ static inline void _Scheduler_SMP_Do_nothing_register_idle(
/**
* @brief Checks if @a to_insert is less or equal than the priority of the chain node.
*
- * @param to_insert The priority to compare.
- * @param next The chain node to compare the priority of.
+ * @param key is the priority to compare.
+ *
+ * @param to_insert is the chain node to insert.
+ *
+ * @param next is the chain node to compare the priority of.
*
* @retval true @a to_insert is less or equal than the priority of @a next.
* @retval false @a to_insert is greater than the priority of @a next.
*/
static inline bool _Scheduler_SMP_Priority_less_equal(
- const void *to_insert,
+ const void *key,
+ const Chain_Node *to_insert,
const Chain_Node *next
)
{
const Priority_Control *priority_to_insert;
const Scheduler_SMP_Node *node_next;
- priority_to_insert = (const Priority_Control *) to_insert;
+ (void) to_insert;
+ priority_to_insert = (const Priority_Control *) key;
node_next = (const Scheduler_SMP_Node *) next;
return *priority_to_insert <= node_next->priority;
@@ -408,7 +435,6 @@ static inline void _Scheduler_SMP_Initialize(
)
{
_Chain_Initialize_empty( &self->Scheduled );
- _Chain_Initialize_empty( &self->Idle_threads );
}
/**
@@ -550,205 +576,300 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
}
/**
- * @brief Gets The first idle thread of the given context.
+ * @brief Removes the thread's ask for help request from the processor.
+ *
+ * The caller must be the owner of the thread's scheduler lock.
*
- * @param context The scheduler context to get the first idle thread from.
+ * @param[in, out] thread is the thread of the ask for help request.
*
- * @return The first idle thread of @a context.
+ * @param[in, out] cpu is the processor from which the ask for help request
+ * should be removed.
*/
-static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
- Scheduler_Context *context
-)
+void _Scheduler_SMP_Remove_ask_for_help_from_processor(
+ Thread_Control *thread,
+ Per_CPU_Control *cpu
+);
+
+/**
+ * @brief Cancels the thread's ask for help request.
+ *
+ * The caller must be the owner of the thread's scheduler lock.
+ *
+ * @param[in, out] thread is the thread of the ask help request.
+ */
+static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
- Thread_Control *idle = (Thread_Control *)
- _Chain_Get_first_unprotected( &self->Idle_threads );
+ Per_CPU_Control *cpu;
- _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
+ _Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
+ cpu = thread->Scheduler.ask_for_help_cpu;
- return idle;
+ if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
+ _Scheduler_SMP_Remove_ask_for_help_from_processor( thread, cpu );
+ }
}
/**
- * @brief Releases the thread and adds it to the idle threads.
+ * @brief Requests to ask for help for the thread.
*
- * @param[in, out] context The scheduler context instance.
- * @param idle The thread to add to the idle threads.
+ * The actual ask for help operations are carried out during
+ * _Thread_Do_dispatch() on the current processor.
+ *
+ * An alternative approach would be to carry out the requests on a processor
+ * related to the thread. This could reduce the overhead for the preempting
+ * thread a bit, however, there are at least two problems with this approach.
+ * Firstly, we have to figure out what is a processor related to the thread.
+ * Secondly, we may need an inter-processor interrupt.
+ *
+ * @param[in, out] thread is the thread in need for help.
*/
-static inline void _Scheduler_SMP_Release_idle_thread(
- Scheduler_Context *context,
- Thread_Control *idle
-)
+static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Per_CPU_Get();
- _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
+ _Assert( thread->Scheduler.ask_for_help_cpu == NULL );
+ thread->Scheduler.ask_for_help_cpu = cpu_self;
+ cpu_self->dispatch_necessary = true;
+
+ _Per_CPU_Acquire( cpu_self, &lock_context );
+ _Chain_Append_unprotected(
+ &cpu_self->Threads_in_need_for_help,
+ &thread->Scheduler.Help_node
+ );
+ _Per_CPU_Release( cpu_self, &lock_context );
}
/**
- * @brief Extracts the node of the idle thread.
+ * @brief This enumeration defines what a scheduler should do with a node which
+ * could be scheduled.
+ */
+typedef enum {
+ SCHEDULER_SMP_DO_SCHEDULE,
+ SCHEDULER_SMP_DO_NOT_SCHEDULE
+} Scheduler_SMP_Action;
+
+/**
+ * @brief Tries to schedule the scheduler node.
+ *
+ * When an SMP scheduler needs to schedule a node, it shall use this function
+ * to determine what it shall do with the node.
+ *
+ * This function uses the state of the node and the scheduler state of the
+ * owner thread to determine what shall be done. Each scheduler maintains its
+ * nodes independent of other schedulers. This function ensures that a thread
+ * is scheduled by at most one scheduler. If a node requires an executing
+ * thread due to some locking protocol and the owner thread is already
+ * scheduled by another scheduler, then an idle thread will be attached to the
+ * node.
+ *
+ * @param[in, out] node is the node which should be scheduled.
+ *
+ * @param get_idle_node is the get idle node handler.
*
- * @param[in, out] idle The thread to extract the node of.
+ * @param arg is the get idle node handler argument.
+ *
+ * @retval SCHEDULER_SMP_DO_SCHEDULE The node shall be scheduled.
+ *
+ * @retval SCHEDULER_SMP_DO_NOT_SCHEDULE The node shall be blocked. This
+ * action is returned, if the owner thread is already scheduled by another
+ * scheduler.
*/
-static inline void _Scheduler_SMP_Exctract_idle_thread(
- Thread_Control *idle
+static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
+ Scheduler_Node *node,
+ Scheduler_Get_idle_node get_idle_node,
+ void *arg
)
{
- _Chain_Extract_unprotected( &idle->Object.Node );
+ ISR_lock_Context lock_context;
+ Thread_Control *owner;
+ Thread_Scheduler_state owner_state;
+ int owner_sticky_level;
+
+ owner = _Scheduler_Node_get_owner( node );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ _Thread_Scheduler_acquire_critical( owner, &lock_context );
+ owner_state = owner->Scheduler.state;
+ owner_sticky_level = node->sticky_level;
+
+ if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
+ _Scheduler_SMP_Cancel_ask_for_help( owner );
+ _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ return SCHEDULER_SMP_DO_SCHEDULE;
+ }
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+
+ if (
+ ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
+ owner_sticky_level == 0
+ ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+
+ return SCHEDULER_SMP_DO_NOT_SCHEDULE;
+ }
+
+ (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
+
+ return SCHEDULER_SMP_DO_SCHEDULE;
}
/**
- * @brief Allocates the cpu for the scheduled thread.
+ * @brief Allocates a processor to the user of the scheduled node.
*
* Attempts to prevent migrations but does not take into account affinity.
*
- * @param context The scheduler context instance.
- * @param scheduled The scheduled node that should be executed next.
- * @param victim If the heir is this node's thread, no processor is allocated.
- * @param[in, out] victim_cpu The cpu to allocate.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
+ *
+ * @param[in, out] cpu is the processor to allocate.
*/
static inline void _Scheduler_SMP_Allocate_processor_lazy(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
- Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
Per_CPU_Control *cpu_self = _Per_CPU_Get();
- Thread_Control *heir;
_Assert( _ISR_Get_level() != 0 );
- if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
- if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
- heir = scheduled_cpu->heir;
- _Thread_Dispatch_update_heir(
- cpu_self,
- scheduled_cpu,
- scheduled_thread
- );
- } else {
- /* We have to force a migration to our processor set */
- heir = scheduled_thread;
- }
- } else {
- heir = scheduled_thread;
+ if ( cpu == scheduled_cpu ) {
+ _Thread_Set_CPU( scheduled_thread, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
+
+ return;
}
- if ( heir != victim_thread ) {
- _Thread_Set_CPU( heir, victim_cpu );
- _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
+ if (
+ _Thread_Is_executing_on_a_processor( scheduled_thread ) &&
+ _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu )
+ ) {
+ Thread_Control *heir = scheduled_cpu->heir;
+ _Thread_Dispatch_update_heir( cpu_self, scheduled_cpu, scheduled_thread );
+ _Thread_Set_CPU( heir, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, heir );
+
+ return;
}
+
+ _Thread_Set_CPU( scheduled_thread, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
}
/**
- * @brief Allocates the cpu for the scheduled thread.
+ * @brief Allocates exactly the processor to the user of the scheduled node.
*
* This method is slightly different from
* _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
* do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
* but does not take into account affinity.
*
- * @param context This parameter is unused.
- * @param scheduled The scheduled node whose thread should be executed next.
- * @param victim This parameter is unused.
- * @param[in, out] victim_cpu The cpu to allocate.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
+ *
+ * @param[in, out] cpu is the processor to allocate.
*/
static inline void _Scheduler_SMP_Allocate_processor_exact(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
Per_CPU_Control *cpu_self = _Per_CPU_Get();
(void) context;
- (void) victim;
- _Thread_Set_CPU( scheduled_thread, victim_cpu );
- _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
+ _Thread_Set_CPU( scheduled_thread, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
}
/**
- * @brief Allocates the cpu for the scheduled thread using the given allocation function.
+ * @brief Allocates the processor to the user of the scheduled node using the
+ * given allocation handler.
*
- * @param context The scheduler context instance.
- * @param scheduled The scheduled node that should be executed next.
- * @param victim If the heir is this node's thread, no processor is allocated.
- * @param[in, out] victim_cpu The cpu to allocate.
- * @param allocate_processor The function to use for the allocation of @a victim_cpu.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
+ *
+ * @param[in, out] cpu is the processor to allocate.
+ *
+ * @param allocate_processor is the handler which should allocate the processor.
*/
static inline void _Scheduler_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
+ Per_CPU_Control *cpu,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
_Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
- ( *allocate_processor )( context, scheduled, victim, victim_cpu );
+ ( *allocate_processor )( context, scheduled, cpu );
}
/**
- * @brief Preempts the victim's thread and allocates a cpu for the scheduled thread.
+ * @brief Preempts the victim's thread and allocates a processor for the user
+ * of the scheduled node.
*
- * @param context The scheduler context instance.
- * @param scheduled Node of the scheduled thread that is about to be executed.
- * @param[in, out] victim Node of the thread to preempt.
- * @param allocate_processor The function for allocation of a processor for the new thread.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param scheduled[in, out] is the node of the user thread that is about to
+ * get a processor allocated.
+ *
+ * @param[in, out] victim is the victim node of the thread to preempt.
+ *
+ * @param[in, out] victim_idle is the idle thread used by the victim node or NULL.
*
- * @return The preempted thread.
+ * @param allocate_processor The function for allocation of a processor for the new thread.
*/
-static inline Thread_Control *_Scheduler_SMP_Preempt(
+static inline void _Scheduler_SMP_Preempt(
Scheduler_Context *context,
Scheduler_Node *scheduled,
Scheduler_Node *victim,
+ Thread_Control *victim_idle,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
- Thread_Control *victim_thread;
- ISR_lock_Context scheduler_lock_context;
- Per_CPU_Control *victim_cpu;
+ Thread_Control *victim_owner;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu;
- victim_thread = _Scheduler_Node_get_user( victim );
_Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
- _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
-
- victim_cpu = _Thread_Get_CPU( victim_thread );
+ victim_owner = _Scheduler_Node_get_owner( victim );
+ _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
- if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
- _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
+ if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
+ if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
+ _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
- if ( victim_thread->Scheduler.helping_nodes > 0 ) {
- ISR_lock_Context per_cpu_lock_context;
-
- _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
- _Chain_Append_unprotected(
- &victim_cpu->Threads_in_need_for_help,
- &victim_thread->Scheduler.Help_node
- );
- _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
+ if ( victim_owner->Scheduler.helping_nodes > 0 ) {
+ _Scheduler_SMP_Request_ask_for_help( victim_owner );
+ }
}
+
+ cpu = _Thread_Get_CPU( victim_owner );
+ } else {
+ cpu = _Thread_Get_CPU( victim_idle );
}
- _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
+ _Thread_Scheduler_release_critical( victim_owner, &lock_context );
_Scheduler_SMP_Allocate_processor(
context,
scheduled,
- victim,
- victim_cpu,
+ cpu,
allocate_processor
);
-
- return victim_thread;
}
/**
@@ -764,16 +885,19 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
Scheduler_Node *filter
)
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
- Chain_Control *scheduled = &self->Scheduled;
- Scheduler_Node *lowest_scheduled =
- (Scheduler_Node *) _Chain_Last( scheduled );
+ Scheduler_SMP_Context *self;
+ Scheduler_Node *lowest_scheduled;
(void) filter;
- _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
+ self = _Scheduler_SMP_Get_self( context );
+
+ _Assert( !_Chain_Is_empty( &self->Scheduled ) );
+ lowest_scheduled = (Scheduler_Node *) _Chain_Last( &self->Scheduled );
+
_Assert(
- _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
+ _Chain_Next( &lowest_scheduled->Node.Chain ) ==
+ _Chain_Tail( &self->Scheduled )
);
return lowest_scheduled;
@@ -802,52 +926,43 @@ static inline void _Scheduler_SMP_Enqueue_to_scheduled(
Scheduler_Node *lowest_scheduled,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *lowest_scheduled_idle;
+ Scheduler_SMP_Action action;
- action = _Scheduler_Try_to_schedule_node(
- context,
- node,
- _Scheduler_Node_get_idle( lowest_scheduled ),
- _Scheduler_SMP_Get_idle_thread
+ lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
+ lowest_scheduled,
+ release_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
+
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
+ lowest_scheduled_idle,
allocate_processor
);
( *insert_scheduled )( context, node, priority );
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
-
- _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state(
- lowest_scheduled,
- SCHEDULER_SMP_NODE_READY
- );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
+ } else {
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
- ( *insert_scheduled )( context, node, priority );
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+ if ( lowest_scheduled_idle != NULL ) {
+ (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
+ }
- _Scheduler_Exchange_idle_thread(
- node,
- lowest_scheduled,
- _Scheduler_Node_get_idle( lowest_scheduled )
- );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
}
}
@@ -881,8 +996,11 @@ static inline bool _Scheduler_SMP_Enqueue(
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
bool needs_help;
@@ -890,7 +1008,13 @@ static inline bool _Scheduler_SMP_Enqueue(
lowest_scheduled = ( *get_lowest_scheduled )( context, node );
- if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
+ if (
+ ( *order )(
+ &insert_priority,
+ &node->Node.Chain,
+ &lowest_scheduled->Node.Chain
+ )
+ ) {
_Scheduler_SMP_Enqueue_to_scheduled(
context,
node,
@@ -898,10 +1022,14 @@ static inline bool _Scheduler_SMP_Enqueue(
lowest_scheduled,
insert_scheduled,
move_from_scheduled_to_ready,
- allocate_processor
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
needs_help = false;
} else {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
( *insert_ready )( context, node, insert_priority );
needs_help = true;
}
@@ -928,7 +1056,7 @@ static inline bool _Scheduler_SMP_Enqueue(
* @param allocate_processor Function to allocate a processor to a node
* based on the rules of the scheduler.
*/
-static inline bool _Scheduler_SMP_Enqueue_scheduled(
+static inline void _Scheduler_SMP_Enqueue_scheduled(
Scheduler_Context *context,
Scheduler_Node *const node,
Priority_Control insert_priority,
@@ -938,12 +1066,22 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled(
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
+ Thread_Control *node_idle;
+
+ node_idle = _Scheduler_Release_idle_thread_if_necessary(
+ node,
+ release_idle_node,
+ context
+ );
+
while ( true ) {
- Scheduler_Node *highest_ready;
- Scheduler_Try_to_schedule_action action;
+ Scheduler_Node *highest_ready;
+ Scheduler_SMP_Action action;
highest_ready = ( *get_highest_ready )( context, node );
@@ -952,12 +1090,13 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled(
* it now on the scheduled or ready set.
*/
if (
- node->sticky_level > 0
- && ( *order )( &insert_priority, &highest_ready->Node.Chain )
+ node->sticky_level > 0 && ( *order )(
+ &insert_priority,
+ &node->Node.Chain,
+ &highest_ready->Node.Chain
+ )
) {
- ( *insert_scheduled )( context, node, insert_priority );
-
- if ( _Scheduler_Node_get_idle( node ) != NULL ) {
+ if ( node_idle != NULL ) {
Thread_Control *owner;
ISR_lock_Context lock_context;
@@ -965,77 +1104,50 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled(
_Thread_Scheduler_acquire_critical( owner, &lock_context );
if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
- _Thread_Scheduler_cancel_need_for_help(
- owner,
- _Thread_Get_CPU( owner )
- );
- _Scheduler_Discard_idle_thread(
- context,
- owner,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
+ Per_CPU_Control *cpu;
+
+ _Scheduler_SMP_Cancel_ask_for_help( owner );
_Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ cpu = _Thread_Get_CPU( node_idle );
+ _Thread_Set_CPU( owner, cpu );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
+ } else {
+ Thread_Control *new_idle;
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
+ _Assert_Unused_variable_equals( new_idle, node_idle );
}
-
- _Thread_Scheduler_release_critical( owner, &lock_context );
}
- return false;
+ ( *insert_scheduled )( context, node, insert_priority );
+
+ return;
}
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- _Scheduler_Node_get_idle( node ),
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
- Thread_Control *idle;
-
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
highest_ready,
node,
+ node_idle,
allocate_processor
);
- ( *insert_ready )( context, node, insert_priority );
( *move_from_ready_to_scheduled )( context, highest_ready );
-
- idle = _Scheduler_Release_idle_thread(
- context,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
- return ( idle == NULL );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_SCHEDULED
- );
-
( *insert_ready )( context, node, insert_priority );
- ( *move_from_ready_to_scheduled )( context, highest_ready );
-
- _Scheduler_Exchange_idle_thread(
- highest_ready,
- node,
- _Scheduler_Node_get_idle( node )
- );
- return false;
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
}
}
@@ -1059,7 +1171,9 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
*
* @param context The scheduler context instance.
* @param victim The node of the thread that is repressed by the newly scheduled thread.
- * @param victim_cpu The cpu to allocate.
+ * @param cpu is the processor to allocate.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the set of
* ready nodes.
* @param get_highest_ready Function to get the highest ready node.
@@ -1071,46 +1185,44 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
static inline void _Scheduler_SMP_Schedule_highest_ready(
Scheduler_Context *context,
Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Extract extract_from_scheduled,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Scheduler_SMP_Action action;
- do {
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *extract_from_scheduled )( context, victim );
+
+ while ( true ) {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- NULL,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Allocate_processor(
context,
highest_ready,
- victim,
- victim_cpu,
+ cpu,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
- } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
/**
@@ -1118,7 +1230,6 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
*
* @param context The scheduler context instance.
* @param victim The node of the thread that is repressed by the newly scheduled thread.
- * @param victim_cpu The cpu to allocate.
* @param extract_from_ready Function to extract a node from the set of
* ready nodes.
* @param get_highest_ready Function to get the highest ready node.
@@ -1130,45 +1241,49 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
Scheduler_Context *context,
Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *victim_idle;
+ Scheduler_SMP_Action action;
- do {
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
+ victim_idle = _Scheduler_Release_idle_thread_if_necessary(
+ victim,
+ release_idle_node,
+ context
+ );
+
+ while ( true ) {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- NULL,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
highest_ready,
victim,
+ victim_idle,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
- } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
/**
@@ -1195,39 +1310,61 @@ static inline void _Scheduler_SMP_Block(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node
)
{
+ int sticky_level;
+ ISR_lock_Context lock_context;
Scheduler_SMP_Node_state node_state;
- Per_CPU_Control *thread_cpu;
+ Per_CPU_Control *cpu;
- node_state = _Scheduler_SMP_Node_state( node );
+ sticky_level = node->sticky_level;
+ --sticky_level;
+ node->sticky_level = sticky_level;
+ _Assert( sticky_level >= 0 );
- thread_cpu = _Scheduler_Block_node(
- context,
- thread,
- node,
- node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- _Scheduler_SMP_Get_idle_thread
- );
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+ _Scheduler_SMP_Cancel_ask_for_help( thread );
+ cpu = _Thread_Get_CPU( thread );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
- if ( thread_cpu != NULL ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ node_state = _Scheduler_SMP_Node_state( node );
- if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- ( *extract_from_scheduled )( context, node );
- _Scheduler_SMP_Schedule_highest_ready(
- context,
- node,
- thread_cpu,
- extract_from_ready,
- get_highest_ready,
- move_from_ready_to_scheduled,
- allocate_processor
- );
- } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
- ( *extract_from_ready )( context, node );
+ if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
+ if (
+ node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
+ _Scheduler_Node_get_idle( node ) == NULL
+ ) {
+ Thread_Control *idle;
+
+ idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
+ _Thread_Set_CPU( idle, cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, idle );
}
+
+ return;
+ }
+
+ _Assert( _Scheduler_Node_get_user( node ) == thread );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ cpu,
+ extract_from_scheduled,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node
+ );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *extract_from_ready )( context, node );
}
}
@@ -1242,52 +1379,60 @@ static inline void _Scheduler_SMP_Block(
* of a context.
*/
static inline void _Scheduler_SMP_Unblock(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Update update,
- Scheduler_SMP_Enqueue enqueue
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
- bool unblock;
+ Priority_Control priority;
+
+ _Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
+
+ ++node->sticky_level;
+ _Assert( node->sticky_level > 0 );
node_state = _Scheduler_SMP_Node_state( node );
- unblock = _Scheduler_Unblock_node(
- context,
- thread,
- node,
- node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- _Scheduler_SMP_Release_idle_thread
- );
- if ( unblock ) {
- Priority_Control priority;
- bool needs_help;
+ if ( RTEMS_PREDICT_FALSE( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) ) {
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Scheduler_Discard_idle_thread(
+ thread,
+ node,
+ release_idle_node,
+ context
+ );
- priority = _Scheduler_Node_get_priority( node );
- priority = SCHEDULER_PRIORITY_PURIFY( priority );
+ return;
+ }
- if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
- ( *update )( context, node, priority );
- }
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_READY );
- if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
- Priority_Control insert_priority;
+ priority = _Scheduler_Node_get_priority( node );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
- needs_help = ( *enqueue )( context, node, insert_priority );
- } else {
- _Assert( node_state == SCHEDULER_SMP_NODE_READY );
- _Assert( node->sticky_level > 0 );
- _Assert( node->idle == NULL );
- needs_help = true;
- }
+ if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
+ ( *update )( context, node, priority );
+ }
+
+ if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
+ Priority_Control insert_priority;
+ bool needs_help;
+
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
+ needs_help = ( *enqueue )( context, node, insert_priority );
- if ( needs_help ) {
- _Scheduler_Ask_for_help( thread );
+ if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
+ _Scheduler_SMP_Request_ask_for_help( thread );
}
+ } else {
+ _Assert( node_state == SCHEDULER_SMP_NODE_READY );
+ _Assert( node->sticky_level > 0 );
+ _Assert( node->idle == NULL );
+ _Scheduler_SMP_Request_ask_for_help( thread );
}
}
@@ -1302,6 +1447,8 @@ static inline void _Scheduler_SMP_Unblock(
* @param context The scheduler instance context.
* @param thread The thread for the operation.
* @param[in, out] node The node to update the priority of.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready
* queue of the scheduler context.
* @param update Function to update the priority of a node in the scheduler
@@ -1311,14 +1458,15 @@ static inline void _Scheduler_SMP_Unblock(
* @param ask_for_help Function to perform a help request.
*/
static inline void _Scheduler_SMP_Update_priority(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Update update,
- Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Enqueue enqueue_scheduled,
- Scheduler_SMP_Ask_for_help ask_for_help
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
+ Scheduler_SMP_Ask_for_help ask_for_help
)
{
Priority_Control priority;
@@ -1339,7 +1487,7 @@ static inline void _Scheduler_SMP_Update_priority(
node_state = _Scheduler_SMP_Node_state( node );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ ( *extract_from_scheduled )( context, node );
( *update )( context, node, priority );
( *enqueue_scheduled )( context, node, insert_priority );
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
@@ -1361,21 +1509,23 @@ static inline void _Scheduler_SMP_Update_priority(
* @param context The scheduler instance context.
* @param thread The thread for the operation.
* @param node The node of the thread that yields.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready
* queue of the scheduler context.
* @param enqueue Function to enqueue a node with a given priority.
* @param enqueue_scheduled Function to enqueue a scheduled node.
*/
static inline void _Scheduler_SMP_Yield(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Enqueue enqueue_scheduled
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Enqueue_scheduled enqueue_scheduled
)
{
- bool needs_help;
Scheduler_SMP_Node_state node_state;
Priority_Control insert_priority;
@@ -1384,19 +1534,11 @@ static inline void _Scheduler_SMP_Yield(
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ ( *extract_from_scheduled )( context, node );
( *enqueue_scheduled )( context, node, insert_priority );
- needs_help = false;
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
( *extract_from_ready )( context, node );
-
- needs_help = ( *enqueue )( context, node, insert_priority );
- } else {
- needs_help = true;
- }
-
- if ( needs_help ) {
- _Scheduler_Ask_for_help( thread );
+ (void) ( *enqueue )( context, node, insert_priority );
}
}
@@ -1456,7 +1598,8 @@ static inline bool _Scheduler_SMP_Ask_for_help(
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_Node *lowest_scheduled;
@@ -1485,49 +1628,54 @@ static inline bool _Scheduler_SMP_Ask_for_help(
insert_priority = _Scheduler_SMP_Node_priority( node );
- if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
- _Thread_Scheduler_cancel_need_for_help(
- thread,
- _Thread_Get_CPU( thread )
- );
+ if (
+ ( *order )(
+ &insert_priority,
+ &node->Node.Chain,
+ &lowest_scheduled->Node.Chain
+ )
+ ) {
+ Thread_Control *lowest_scheduled_idle;
+
+ _Scheduler_SMP_Cancel_ask_for_help( thread );
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
_Thread_Scheduler_release_critical( thread, &lock_context );
+ lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
+ lowest_scheduled,
+ release_idle_node,
+ context
+ );
+
_Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
+ lowest_scheduled_idle,
allocate_processor
);
- ( *insert_scheduled )( context, node, insert_priority );
( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+ ( *insert_scheduled )( context, node, insert_priority );
- _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
success = true;
} else {
_Thread_Scheduler_release_critical( thread, &lock_context );
+
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
( *insert_ready )( context, node, insert_priority );
success = false;
}
} else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Thread_Scheduler_cancel_need_for_help(
- thread,
- _Thread_Get_CPU( thread )
- );
+ _Scheduler_SMP_Cancel_ask_for_help( thread );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
_Scheduler_Discard_idle_thread(
- context,
thread,
node,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ context
);
- _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
- _Thread_Scheduler_release_critical( thread, &lock_context );
success = true;
} else {
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1580,6 +1728,8 @@ static inline void _Scheduler_SMP_Reconsider_help_request(
* @param[in, out] thread The thread to change to @a next_state.
* @param[in, out] node The node to withdraw.
* @param next_state The new state for @a thread.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready queue
* of the scheduler context.
* @param get_highest_ready Function to get the highest ready node.
@@ -1593,10 +1743,12 @@ static inline void _Scheduler_SMP_Withdraw_node(
Thread_Control *thread,
Scheduler_Node *node,
Thread_Scheduler_state next_state,
+ Scheduler_SMP_Extract extract_from_scheduled,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node
)
{
ISR_lock_Context lock_context;
@@ -1605,27 +1757,32 @@ static inline void _Scheduler_SMP_Withdraw_node(
_Thread_Scheduler_acquire_critical( thread, &lock_context );
node_state = _Scheduler_SMP_Node_state( node );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- Per_CPU_Control *thread_cpu;
+ Per_CPU_Control *cpu;
- thread_cpu = _Thread_Get_CPU( thread );
+ _Assert( thread == _Scheduler_Node_get_user( node ) );
+ cpu = _Thread_Get_CPU( thread );
_Scheduler_Thread_change_state( thread, next_state );
_Thread_Scheduler_release_critical( thread, &lock_context );
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ _Assert( _Scheduler_Node_get_user( node ) == thread );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
_Scheduler_SMP_Schedule_highest_ready(
context,
node,
- thread_cpu,
+ cpu,
+ extract_from_scheduled,
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node
);
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
_Thread_Scheduler_release_critical( thread, &lock_context );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
( *extract_from_ready )( context, node );
} else {
_Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
@@ -1634,6 +1791,97 @@ static inline void _Scheduler_SMP_Withdraw_node(
}
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+static inline void _Scheduler_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue
+)
+{
+ Scheduler_SMP_Node_state node_state;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
+ Scheduler_Context *context;
+ Priority_Control insert_priority;
+ Priority_Control priority;
+
+ context = _Scheduler_Get_context( scheduler );
+ priority = _Scheduler_Node_get_priority( node );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
+
+ if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
+ ( *update )( context, node, priority );
+ }
+
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
+ (void) ( *enqueue )( context, node, insert_priority );
+ }
+}
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+static inline void _Scheduler_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
+)
+{
+ Scheduler_SMP_Node_state node_state;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ Thread_Control *idle;
+
+ idle = _Scheduler_Node_get_idle( node );
+
+ if ( idle != NULL ) {
+ Scheduler_Context *context;
+
+ context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_Release_idle_thread( node, idle, release_idle_node, context );
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ _Thread_Get_CPU( idle ),
+ extract_from_scheduled,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node
+ );
+ }
+ }
+}
+
+/**
* @brief Starts the idle thread on the given processor.
*
* @param context The scheduler context instance.
@@ -1660,7 +1908,6 @@ static inline void _Scheduler_SMP_Do_start_idle(
_Thread_Set_CPU( idle, cpu );
( *register_idle )( context, &node->Base, cpu );
_Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
- _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
}
/**
@@ -1673,11 +1920,11 @@ static inline void _Scheduler_SMP_Do_start_idle(
* @param register_idle Function to register the idle thread for a cpu.
*/
static inline void _Scheduler_SMP_Add_processor(
- Scheduler_Context *context,
- Thread_Control *idle,
- Scheduler_SMP_Has_ready has_ready,
- Scheduler_SMP_Enqueue enqueue_scheduled,
- Scheduler_SMP_Register_idle register_idle
+ Scheduler_Context *context,
+ Thread_Control *idle,
+ Scheduler_SMP_Has_ready has_ready,
+ Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
+ Scheduler_SMP_Register_idle register_idle
)
{
Scheduler_SMP_Context *self;
@@ -1685,7 +1932,6 @@ static inline void _Scheduler_SMP_Add_processor(
self = _Scheduler_SMP_Get_self( context );
idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
- _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
node = _Thread_Scheduler_get_home_node( idle );
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
@@ -1706,6 +1952,8 @@ static inline void _Scheduler_SMP_Add_processor(
*
* @param context The scheduler context instance.
* @param cpu The processor to remove from.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready queue
* of the scheduler context.
* @param enqueue Function to enqueue a node with a given priority.
@@ -1713,10 +1961,13 @@ static inline void _Scheduler_SMP_Add_processor(
* @return The idle thread of @a cpu.
*/
static inline Thread_Control *_Scheduler_SMP_Remove_processor(
- Scheduler_Context *context,
- Per_CPU_Control *cpu,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Enqueue enqueue
+ Scheduler_Context *context,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Context *self;
@@ -1736,39 +1987,37 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
chain_node = _Chain_Next( chain_node );
} while ( _Thread_Get_CPU( victim_user ) != cpu );
- _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
+ ( *extract_from_scheduled )( &self->Base, victim_node );
victim_owner = _Scheduler_Node_get_owner( victim_node );
if ( !victim_owner->is_idle ) {
- Scheduler_Node *idle_node;
+ Thread_Control *victim_idle;
+ Scheduler_Node *idle_node;
+ Priority_Control insert_priority;
- _Scheduler_Release_idle_thread(
- &self->Base,
+ victim_idle = _Scheduler_Release_idle_thread_if_necessary(
victim_node,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ &self->Base
);
- idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
- idle_node = _Thread_Scheduler_get_home_node( idle );
- ( *extract_from_ready )( &self->Base, idle_node );
+ idle_node = ( *get_idle_node )( &self->Base );
+ idle = _Scheduler_Node_get_owner( idle_node );
_Scheduler_SMP_Preempt(
&self->Base,
idle_node,
victim_node,
+ victim_idle,
_Scheduler_SMP_Allocate_processor_exact
);
- if ( !_Chain_Is_empty( &self->Scheduled ) ) {
- Priority_Control insert_priority;
-
- insert_priority = _Scheduler_SMP_Node_priority( victim_node );
- insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- ( *enqueue )( context, victim_node, insert_priority );
- }
+ _Assert( !_Chain_Is_empty( &self->Scheduled ) );
+ insert_priority = _Scheduler_SMP_Node_priority( victim_node );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ ( *enqueue )( &self->Base, victim_node, insert_priority );
} else {
_Assert( victim_owner == victim_user );
_Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
idle = victim_owner;
- _Scheduler_SMP_Exctract_idle_thread( idle );
}
return idle;
@@ -1784,6 +2033,8 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
* @param[in, out] node The node to set the affinity of.
* @param arg The affinity for @a node.
* @param set_affinity Function to set the affinity of a node.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready queue
* of the scheduler context.
* @param get_highest_ready Function to get the highest ready node.
@@ -1799,11 +2050,14 @@ static inline void _Scheduler_SMP_Set_affinity(
Scheduler_Node *node,
void *arg,
Scheduler_SMP_Set_affinity set_affinity,
+ Scheduler_SMP_Extract extract_from_scheduled,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
@@ -1814,15 +2068,16 @@ static inline void _Scheduler_SMP_Set_affinity(
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ ( *extract_from_scheduled )( context, node );
_Scheduler_SMP_Preempt_and_schedule_highest_ready(
context,
node,
- _Thread_Get_CPU( thread ),
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
( *set_affinity )( context, node, arg );
( *enqueue )( context, node, insert_priority );