summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threadchangepriority.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--cpukit/score/src/threadchangepriority.c172
1 files changed, 162 insertions, 10 deletions
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index bd4fef279b..78291b7798 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -14,11 +16,28 @@
* COPYRIGHT (c) 1989-2014.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2013, 2016 embedded brains GmbH
+ * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -92,6 +111,7 @@ static void _Thread_Priority_action_change(
void *arg
)
{
+ (void) arg;
_Thread_Set_scheduler_node_priority(
priority_aggregation,
priority_group_order
@@ -116,11 +136,15 @@ static void _Thread_Priority_do_perform_actions(
priority_aggregation = _Priority_Actions_move( &queue_context->Priority.Actions );
do {
+#if defined(RTEMS_SMP)
Priority_Aggregation *next_aggregation;
+#endif
Priority_Node *priority_action_node;
Priority_Action_type priority_action_type;
+#if defined(RTEMS_SMP)
next_aggregation = _Priority_Get_next_action( priority_aggregation );
+#endif
priority_action_node = priority_aggregation->Action.node;
priority_action_type = priority_aggregation->Action.type;
@@ -179,8 +203,12 @@ static void _Thread_Priority_do_perform_actions(
break;
}
+#if defined(RTEMS_SMP)
priority_aggregation = next_aggregation;
- } while ( _Priority_Actions_is_valid( priority_aggregation ) );
+ } while ( priority_aggregation != NULL );
+#else
+ } while ( false );
+#endif
if ( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
_Thread_queue_Context_add_priority_update( queue_context, the_thread );
@@ -333,6 +361,7 @@ void _Thread_Priority_changed(
);
}
+#if defined(RTEMS_SMP)
void _Thread_Priority_replace(
Thread_Control *the_thread,
Priority_Node *victim_node,
@@ -348,6 +377,7 @@ void _Thread_Priority_replace(
replacement_node
);
}
+#endif
void _Thread_Priority_update( Thread_queue_Context *queue_context )
{
@@ -372,18 +402,140 @@ void _Thread_Priority_update( Thread_queue_Context *queue_context )
}
#if defined(RTEMS_SMP)
-void _Thread_Priority_and_sticky_update(
+static void _Thread_Priority_update_helping(
Thread_Control *the_thread,
- int sticky_level_change
+ Chain_Node *first_node
)
{
- ISR_lock_Context lock_context;
+ const Chain_Node *tail;
+ Chain_Node *node;
+
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+ node = _Chain_Next( first_node );
+
+ while ( node != tail ) {
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+}
+
+void _Thread_Priority_update_and_make_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Chain_Node *node;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ int new_sticky_level;
+ int make_sticky_level;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context_2 );
+
+ new_sticky_level = scheduler_node->sticky_level + 1;
+ scheduler_node->sticky_level = new_sticky_level;
+ _Assert( new_sticky_level >= 1 );
+
+ /*
+ * The sticky level is incremented by the scheduler block operation, so for a
+ * ready thread, the change to sticky happens at a level of two.
+ */
+ make_sticky_level = 1 + (int) _Thread_Is_ready( the_thread );
+
+ if ( new_sticky_level == make_sticky_level ) {
+ ( *scheduler->Operations.make_sticky )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ }
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+
+ _Scheduler_Release_critical( scheduler, &lock_context_2 );
+ _Thread_Priority_update_helping( the_thread, node );
+ _Thread_State_release( the_thread, &lock_context );
+}
+
+void _Thread_Priority_update_and_clean_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Chain_Node *node;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ int new_sticky_level;
+ int clean_sticky_level;
_Thread_State_acquire( the_thread, &lock_context );
- _Scheduler_Priority_and_sticky_update(
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context_2 );
+
+ new_sticky_level = scheduler_node->sticky_level - 1;
+ scheduler_node->sticky_level = new_sticky_level;
+ _Assert( new_sticky_level >= 0 );
+
+ /*
+ * The sticky level is incremented by the scheduler block operation, so for a
+ * ready thread, the change to sticky happens at a level of one.
+ */
+ clean_sticky_level = (int) _Thread_Is_ready( the_thread );
+
+ if ( new_sticky_level == clean_sticky_level ) {
+ ( *scheduler->Operations.clean_sticky )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ }
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
the_thread,
- sticky_level_change
+ scheduler_node
);
+
+ _Scheduler_Release_critical( scheduler, &lock_context_2 );
+ _Thread_Priority_update_helping( the_thread, node );
+ _Thread_State_release( the_thread, &lock_context );
+}
+
+void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread )
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_Scheduler_process_requests( the_thread );
+ _Scheduler_Update_priority( the_thread );
_Thread_State_release( the_thread, &lock_context );
}
#endif