summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-10-15 11:21:31 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-23 11:00:28 +0100
commit3d6ebde1acab61e7c8fce1a1ed51517f7c2bf7c6 (patch)
tree53af30eda5f2f52317549afb9492b5a9f4cb7720 /cpukit/score
parentscore: Add SMP scheduler idle exchange callback (diff)
downloadrtems-3d6ebde1acab61e7c8fce1a1ed51517f7c2bf7c6.tar.bz2
score: Add SMP scheduler make/clean sticky
This patch fixes the following broken behaviour: While a thread is scheduled on a helping scheduler, while it does not own a MrsP semaphore, if it obtains a MrsP semaphore, then no scheduler node using an idle thread and the ceiling priority of the semaphore is unblocked for the home scheduler. This could lead to priority inversion issues and is not in line with the MrsP protocol. Introduce two new scheduler operations which are only enabled if RTEMS_SMP is defined. The operations are used to make the scheduler node of the home scheduler sticky and to clean the sticky property. This helps to keep the sticky handing out of the frequently used priority update operation. Close #4532.
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/src/schedulerdefaultmakecleansticky.c52
-rw-r--r--cpukit/score/src/scheduleredfsmp.c36
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c39
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c37
-rw-r--r--cpukit/score/src/schedulersimplesmp.c37
-rw-r--r--cpukit/score/src/schedulerstrongapa.c37
-rw-r--r--cpukit/score/src/threadchangepriority.c132
-rw-r--r--cpukit/score/src/threadqenqueue.c6
8 files changed, 362 insertions, 14 deletions
diff --git a/cpukit/score/src/schedulerdefaultmakecleansticky.c b/cpukit/score/src/schedulerdefaultmakecleansticky.c
new file mode 100644
index 0000000000..e2b2d659f1
--- /dev/null
+++ b/cpukit/score/src/schedulerdefaultmakecleansticky.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreScheduler
+ *
+ * @brief This source file contains the implementation of
+ * _Scheduler_default_Sticky_do_nothing().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/scheduler.h>
+
+void _Scheduler_default_Sticky_do_nothing(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ (void) scheduler;
+ (void) the_thread;
+ (void) node;
+}
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index 27be08ac40..0b0ee6ed21 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -11,7 +11,8 @@
* _Scheduler_EDF_SMP_Remove_processor(), _Scheduler_EDF_SMP_Set_affinity(),
* _Scheduler_EDF_SMP_Start_idle(), _Scheduler_EDF_SMP_Unblock(),
* _Scheduler_EDF_SMP_Unpin(), _Scheduler_EDF_SMP_Update_priority(),
- * _Scheduler_EDF_SMP_Withdraw_node(), and _Scheduler_EDF_SMP_Yield().
+ * _Scheduler_EDF_SMP_Withdraw_node(), _Scheduler_EDF_SMP_Make_sticky(),
+ * _Scheduler_EDF_SMP_Clean_sticky(), and _Scheduler_EDF_SMP_Yield().
*/
/*
@@ -619,6 +620,39 @@ void _Scheduler_EDF_SMP_Withdraw_node(
);
}
+void _Scheduler_EDF_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Make_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_EDF_SMP_Do_update,
+ _Scheduler_EDF_SMP_Enqueue
+ );
+}
+
+void _Scheduler_EDF_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Clean_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
+ _Scheduler_EDF_SMP_Extract_from_ready,
+ _Scheduler_EDF_SMP_Get_highest_ready,
+ _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_EDF_SMP_Allocate_processor
+ );
+}
+
static inline void _Scheduler_EDF_SMP_Register_idle(
Scheduler_Context *context,
Scheduler_Node *idle_base,
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 4bbf2f6e17..648c243589 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -12,8 +12,10 @@
* _Scheduler_priority_affinity_SMP_Remove_processor(),
* _Scheduler_priority_affinity_SMP_Set_affinity(),
* _Scheduler_priority_affinity_SMP_Unblock(),
- * _Scheduler_priority_affinity_SMP_Update_priority(), and
- * _Scheduler_priority_affinity_SMP_Withdraw_node().
+ * _Scheduler_priority_affinity_SMP_Update_priority(),
+ * _Scheduler_priority_affinity_SMP_Withdraw_node(),
+ * _Scheduler_priority_affinity_SMP_Make_sticky(), and
+ * _Scheduler_priority_affinity_SMP_Clean_sticky().
*/
/*
@@ -512,6 +514,39 @@ void _Scheduler_priority_affinity_SMP_Withdraw_node(
);
}
+void _Scheduler_priority_affinity_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Make_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_priority_SMP_Do_update,
+ _Scheduler_priority_affinity_SMP_Enqueue
+ );
+}
+
+void _Scheduler_priority_affinity_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Clean_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_SMP_Extract_from_scheduled,
+ _Scheduler_priority_SMP_Extract_from_ready,
+ _Scheduler_priority_affinity_SMP_Get_highest_ready,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor_exact
+ );
+}
+
void _Scheduler_priority_affinity_SMP_Add_processor(
const Scheduler_Control *scheduler,
Thread_Control *idle
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index b0b920c960..7262f48e8e 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -12,7 +12,9 @@
* _Scheduler_priority_SMP_Remove_processor(),
* _Scheduler_priority_SMP_Unblock(),
* _Scheduler_priority_SMP_Update_priority(),
- * _Scheduler_priority_SMP_Withdraw_node(), and
+ * _Scheduler_priority_SMP_Withdraw_node(),
+ * _Scheduler_priority_SMP_Make_sticky(),
+ * _Scheduler_priority_SMP_Clean_sticky(), and
* _Scheduler_priority_SMP_Yield().
*/
@@ -265,6 +267,39 @@ void _Scheduler_priority_SMP_Withdraw_node(
);
}
+void _Scheduler_priority_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Make_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_priority_SMP_Do_update,
+ _Scheduler_priority_SMP_Enqueue
+ );
+}
+
+void _Scheduler_priority_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Clean_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_SMP_Extract_from_scheduled,
+ _Scheduler_priority_SMP_Extract_from_ready,
+ _Scheduler_priority_SMP_Get_highest_ready,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor_lazy
+ );
+}
+
void _Scheduler_priority_SMP_Add_processor(
const Scheduler_Control *scheduler,
Thread_Control *idle
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 02e4579aa1..b7814b0fcb 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -11,7 +11,9 @@
* _Scheduler_simple_SMP_Reconsider_help_request(),
* _Scheduler_simple_SMP_Remove_processor(), _Scheduler_simple_SMP_Unblock(),
* _Scheduler_simple_SMP_Update_priority(),
- * _Scheduler_simple_SMP_Withdraw_node(), and _Scheduler_simple_SMP_Yield().
+ * _Scheduler_simple_SMP_Withdraw_node(),
+ * _Scheduler_simple_SMP_Make_sticky(), _Scheduler_simple_SMP_Clean_sticky(),
+ * and _Scheduler_simple_SMP_Yield().
*/
/*
@@ -335,6 +337,39 @@ void _Scheduler_simple_SMP_Withdraw_node(
);
}
+void _Scheduler_simple_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Make_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_simple_SMP_Do_update,
+ _Scheduler_simple_SMP_Enqueue
+ );
+}
+
+void _Scheduler_simple_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Clean_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_SMP_Extract_from_scheduled,
+ _Scheduler_simple_SMP_Extract_from_ready,
+ _Scheduler_simple_SMP_Get_highest_ready,
+ _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor_lazy
+ );
+}
+
void _Scheduler_simple_SMP_Add_processor(
const Scheduler_Control *scheduler,
Thread_Control *idle
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index afd9fcc709..6c7ab942fa 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -31,7 +31,9 @@
* _Scheduler_strong_APA_Set_affinity(),
* _Scheduler_strong_APA_Set_scheduled(), _Scheduler_strong_APA_Start_idle(),
* _Scheduler_strong_APA_Unblock(), _Scheduler_strong_APA_Update_priority(),
- * _Scheduler_strong_APA_Withdraw_node(), and _Scheduler_strong_APA_Yield().
+ * _Scheduler_strong_APA_Withdraw_node(),
+ * _Scheduler_strong_APA_Make_sticky(), _Scheduler_strong_APA_Clean_sticky(),
+ * and _Scheduler_strong_APA_Yield().
*/
/*
@@ -935,6 +937,39 @@ void _Scheduler_strong_APA_Withdraw_node(
);
}
+void _Scheduler_strong_APA_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Make_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_strong_APA_Do_update,
+ _Scheduler_strong_APA_Enqueue
+ );
+}
+
+void _Scheduler_strong_APA_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Clean_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_SMP_Extract_from_scheduled,
+ _Scheduler_strong_APA_Extract_from_ready,
+ _Scheduler_strong_APA_Get_highest_ready,
+ _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+ _Scheduler_strong_APA_Allocate_processor
+ );
+}
+
static inline void _Scheduler_strong_APA_Register_idle(
Scheduler_Context *context,
Scheduler_Node *idle_base,
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index bd4fef279b..637d5e810d 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -372,18 +372,140 @@ void _Thread_Priority_update( Thread_queue_Context *queue_context )
}
#if defined(RTEMS_SMP)
-void _Thread_Priority_and_sticky_update(
+static void _Thread_Priority_update_helping(
Thread_Control *the_thread,
- int sticky_level_change
+ Chain_Node *first_node
)
{
- ISR_lock_Context lock_context;
+ const Chain_Node *tail;
+ Chain_Node *node;
+
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+ node = _Chain_Next( first_node );
+
+ while ( node != tail ) {
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+}
+
+void _Thread_Priority_update_and_make_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Chain_Node *node;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ int new_sticky_level;
+ int make_sticky_level;
_Thread_State_acquire( the_thread, &lock_context );
- _Scheduler_Priority_and_sticky_update(
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context_2 );
+
+ new_sticky_level = scheduler_node->sticky_level + 1;
+ scheduler_node->sticky_level = new_sticky_level;
+ _Assert( new_sticky_level >= 1 );
+
+ /*
+ * The sticky level is incremented by the scheduler block operation, so for a
+ * ready thread, the change to sticky happens at a level of two.
+ */
+ make_sticky_level = 1 + (int) _Thread_Is_ready( the_thread );
+
+ if ( new_sticky_level == make_sticky_level ) {
+ ( *scheduler->Operations.make_sticky )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ }
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
the_thread,
- sticky_level_change
+ scheduler_node
);
+
+ _Scheduler_Release_critical( scheduler, &lock_context_2 );
+ _Thread_Priority_update_helping( the_thread, node );
+ _Thread_State_release( the_thread, &lock_context );
+}
+
+void _Thread_Priority_update_and_clean_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Chain_Node *node;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ int new_sticky_level;
+ int clean_sticky_level;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context_2 );
+
+ new_sticky_level = scheduler_node->sticky_level - 1;
+ scheduler_node->sticky_level = new_sticky_level;
+ _Assert( new_sticky_level >= 0 );
+
+ /*
+ * The sticky level is incremented by the scheduler block operation, so for a
+ * ready thread, the change to sticky happens at a level of one.
+ */
+ clean_sticky_level = (int) _Thread_Is_ready( the_thread );
+
+ if ( new_sticky_level == clean_sticky_level ) {
+ ( *scheduler->Operations.clean_sticky )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ }
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+
+ _Scheduler_Release_critical( scheduler, &lock_context_2 );
+ _Thread_Priority_update_helping( the_thread, node );
+ _Thread_State_release( the_thread, &lock_context );
+}
+
+void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_Scheduler_process_requests( the_thread );
+ _Scheduler_Update_priority( the_thread );
_Thread_State_release( the_thread, &lock_context );
}
#endif
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 5fc357ec82..ed6c64543c 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -518,7 +518,7 @@ Status_Control _Thread_queue_Enqueue_sticky(
);
_Thread_Priority_update( queue_context );
- _Thread_Priority_and_sticky_update( the_thread, 1 );
+ _Thread_Priority_update_and_make_sticky( the_thread );
_Thread_Dispatch_enable( cpu_self );
while (
@@ -899,8 +899,8 @@ void _Thread_queue_Surrender_sticky(
queue,
&queue_context->Lock_context.Lock_context
);
- _Thread_Priority_and_sticky_update( previous_owner, -1 );
- _Thread_Priority_and_sticky_update( new_owner, 0 );
+ _Thread_Priority_update_and_clean_sticky( previous_owner );
+ _Thread_Priority_update_ignore_sticky( new_owner );
_Thread_Dispatch_enable( cpu_self );
}
#endif