summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threadchangepriority.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-10-15 11:21:31 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-23 11:00:28 +0100
commit3d6ebde1acab61e7c8fce1a1ed51517f7c2bf7c6 (patch)
tree53af30eda5f2f52317549afb9492b5a9f4cb7720 /cpukit/score/src/threadchangepriority.c
parentscore: Add SMP scheduler idle exchange callback (diff)
downloadrtems-3d6ebde1acab61e7c8fce1a1ed51517f7c2bf7c6.tar.bz2
score: Add SMP scheduler make/clean sticky
This patch fixes the following broken behaviour: While a thread is scheduled on a helping scheduler, while it does not own a MrsP semaphore, if it obtains a MrsP semaphore, then no scheduler node using an idle thread and the ceiling priority of the semaphore is unblocked for the home scheduler. This could lead to priority inversion issues and is not in line with the MrsP protocol. Introduce two new scheduler operations which are only enabled if RTEMS_SMP is defined. The operations are used to make the scheduler node of the home scheduler sticky and to clean the sticky property. This helps to keep the sticky handing out of the frequently used priority update operation. Close #4532.
Diffstat (limited to 'cpukit/score/src/threadchangepriority.c')
-rw-r--r--cpukit/score/src/threadchangepriority.c132
1 files changed, 127 insertions, 5 deletions
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index bd4fef279b..637d5e810d 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -372,18 +372,140 @@ void _Thread_Priority_update( Thread_queue_Context *queue_context )
}
#if defined(RTEMS_SMP)
-void _Thread_Priority_and_sticky_update(
+static void _Thread_Priority_update_helping(
Thread_Control *the_thread,
- int sticky_level_change
+ Chain_Node *first_node
)
{
- ISR_lock_Context lock_context;
+ const Chain_Node *tail;
+ Chain_Node *node;
+
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+ node = _Chain_Next( first_node );
+
+ while ( node != tail ) {
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+}
+
+void _Thread_Priority_update_and_make_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Chain_Node *node;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ int new_sticky_level;
+ int make_sticky_level;
_Thread_State_acquire( the_thread, &lock_context );
- _Scheduler_Priority_and_sticky_update(
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context_2 );
+
+ new_sticky_level = scheduler_node->sticky_level + 1;
+ scheduler_node->sticky_level = new_sticky_level;
+ _Assert( new_sticky_level >= 1 );
+
+ /*
+ * The sticky level is incremented by the scheduler block operation, so for a
+ * ready thread, the change to sticky happens at a level of two.
+ */
+ make_sticky_level = 1 + (int) _Thread_Is_ready( the_thread );
+
+ if ( new_sticky_level == make_sticky_level ) {
+ ( *scheduler->Operations.make_sticky )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ }
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
the_thread,
- sticky_level_change
+ scheduler_node
);
+
+ _Scheduler_Release_critical( scheduler, &lock_context_2 );
+ _Thread_Priority_update_helping( the_thread, node );
+ _Thread_State_release( the_thread, &lock_context );
+}
+
+void _Thread_Priority_update_and_clean_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Chain_Node *node;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ int new_sticky_level;
+ int clean_sticky_level;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context_2 );
+
+ new_sticky_level = scheduler_node->sticky_level - 1;
+ scheduler_node->sticky_level = new_sticky_level;
+ _Assert( new_sticky_level >= 0 );
+
+ /*
+ * The sticky level is incremented by the scheduler block operation, so for a
+ * ready thread, the change to sticky happens at a level of one.
+ */
+ clean_sticky_level = (int) _Thread_Is_ready( the_thread );
+
+ if ( new_sticky_level == clean_sticky_level ) {
+ ( *scheduler->Operations.clean_sticky )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ }
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+
+ _Scheduler_Release_critical( scheduler, &lock_context_2 );
+ _Thread_Priority_update_helping( the_thread, node );
+ _Thread_State_release( the_thread, &lock_context );
+}
+
+void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_Scheduler_process_requests( the_thread );
+ _Scheduler_Update_priority( the_thread );
_Thread_State_release( the_thread, &lock_context );
}
#endif