summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-06-22 17:09:23 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-09-21 08:59:26 +0200
commit300f6a481aaf9e6d29811faca71bf7104a01492c (patch)
treeba8f18cedb93e3781a2f17aa989c5c805dd18d6a
parentclassic networking: do not reference BSP_irq_enabled_at_i8259s which is no mo... (diff)
downloadrtems-300f6a481aaf9e6d29811faca71bf7104a01492c.tar.bz2
score: Rework thread priority management
Add priority nodes which contribute to the overall thread priority. The actual priority of a thread is now an aggregation of priority nodes. The thread priority aggregation for the home scheduler instance of a thread consists of at least one priority node, which is normally the real priority of the thread. The locking protocols (e.g. priority ceiling and priority inheritance), rate-monotonic period objects and the POSIX sporadic server add, change and remove priority nodes. A thread changes its priority now immediately, e.g. priority changes are not deferred until the thread releases its last resource. Replace the _Thread_Change_priority() function with * _Thread_Priority_perform_actions(), * _Thread_Priority_add(), * _Thread_Priority_remove(), * _Thread_Priority_change(), and * _Thread_Priority_update(). Update #2412. Update #2556.
-rw-r--r--cpukit/libmisc/capture/capture.h2
-rw-r--r--cpukit/libmisc/cpuuse/cpuusagetop.c6
-rw-r--r--cpukit/posix/include/rtems/posix/pthreadimpl.h2
-rw-r--r--cpukit/posix/include/rtems/posix/threadsup.h8
-rw-r--r--cpukit/posix/src/killinfo.c2
-rw-r--r--cpukit/posix/src/mutexsetprioceiling.c13
-rw-r--r--cpukit/posix/src/pthread.c103
-rw-r--r--cpukit/posix/src/pthreadcreate.c32
-rw-r--r--cpukit/posix/src/pthreadgetschedparam.c2
-rw-r--r--cpukit/posix/src/pthreadsetschedparam.c143
-rw-r--r--cpukit/posix/src/pthreadsetschedprio.c92
-rw-r--r--cpukit/rtems/include/rtems/rtems/ratemon.h6
-rw-r--r--cpukit/rtems/src/ratemoncancel.c12
-rw-r--r--cpukit/rtems/src/ratemoncreate.c2
-rw-r--r--cpukit/rtems/src/ratemonperiod.c15
-rw-r--r--cpukit/rtems/src/semsetpriority.c18
-rw-r--r--cpukit/rtems/src/tasksetpriority.c95
-rw-r--r--cpukit/score/Makefile.am3
-rw-r--r--cpukit/score/include/rtems/score/coremutex.h4
-rw-r--r--cpukit/score/include/rtems/score/coremuteximpl.h146
-rw-r--r--cpukit/score/include/rtems/score/mrsp.h12
-rw-r--r--cpukit/score/include/rtems/score/mrspimpl.h255
-rw-r--r--cpukit/score/include/rtems/score/priority.h175
-rw-r--r--cpukit/score/include/rtems/score/priorityimpl.h435
-rw-r--r--cpukit/score/include/rtems/score/scheduler.h28
-rw-r--r--cpukit/score/include/rtems/score/schedulercbs.h17
-rw-r--r--cpukit/score/include/rtems/score/scheduleredf.h26
-rw-r--r--cpukit/score/include/rtems/score/scheduleredfimpl.h14
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h86
-rw-r--r--cpukit/score/include/rtems/score/schedulernode.h21
-rw-r--r--cpukit/score/include/rtems/score/schedulernodeimpl.h19
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h11
-rw-r--r--cpukit/score/include/rtems/score/thread.h53
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h261
-rw-r--r--cpukit/score/include/rtems/score/threadq.h117
-rw-r--r--cpukit/score/include/rtems/score/threadqimpl.h145
-rw-r--r--cpukit/score/preinstall.am4
-rw-r--r--cpukit/score/src/mutex.c21
-rw-r--r--cpukit/score/src/schedulercbs.c22
-rw-r--r--cpukit/score/src/schedulercbsnodeinit.c1
-rw-r--r--cpukit/score/src/schedulercbsreleasejob.c40
-rw-r--r--cpukit/score/src/schedulercbsunblock.c51
-rw-r--r--cpukit/score/src/schedulerdefaultnodeinit.c4
-rw-r--r--cpukit/score/src/schedulerdefaultreleasejob.c20
-rw-r--r--cpukit/score/src/scheduleredfchangepriority.c8
-rw-r--r--cpukit/score/src/scheduleredfnodeinit.c6
-rw-r--r--cpukit/score/src/scheduleredfreleasejob.c84
-rw-r--r--cpukit/score/src/scheduleredfunblock.c2
-rw-r--r--cpukit/score/src/scheduleredfyield.c2
-rw-r--r--cpukit/score/src/schedulerpriority.c2
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c7
-rw-r--r--cpukit/score/src/schedulersimplesmp.c2
-rw-r--r--cpukit/score/src/schedulerstrongapa.c7
-rw-r--r--cpukit/score/src/thread.c4
-rw-r--r--cpukit/score/src/threadchangepriority.c366
-rw-r--r--cpukit/score/src/threadinitialize.c7
-rw-r--r--cpukit/score/src/threadmp.c3
-rw-r--r--cpukit/score/src/threadqenqueue.c122
-rw-r--r--cpukit/score/src/threadqflush.c9
-rw-r--r--cpukit/score/src/threadqops.c847
-rw-r--r--cpukit/score/src/threadrestart.c61
-rw-r--r--cpukit/score/src/threadsetpriority.c59
-rw-r--r--cpukit/score/src/threadtimeout.c3
-rw-r--r--testsuites/smptests/smpmutex01/init.c3
-rw-r--r--testsuites/smptests/smpscheduler03/init.c43
-rw-r--r--testsuites/sptests/spmutex01/init.c141
-rw-r--r--testsuites/sptests/spsem03/init.c30
-rw-r--r--testsuites/sptests/spsem03/spsem03.doc4
68 files changed, 2854 insertions, 1512 deletions
diff --git a/cpukit/libmisc/capture/capture.h b/cpukit/libmisc/capture/capture.h
index 0f3635c0fc..5e3237d549 100644
--- a/cpukit/libmisc/capture/capture.h
+++ b/cpukit/libmisc/capture/capture.h
@@ -888,7 +888,7 @@ rtems_capture_task_start_priority (rtems_tcb* tcb)
static inline rtems_task_priority
rtems_capture_task_real_priority (rtems_tcb* tcb)
{
- return tcb->real_priority;
+ return tcb->Real_priority.priority;
}
/**
diff --git a/cpukit/libmisc/cpuuse/cpuusagetop.c b/cpukit/libmisc/cpuuse/cpuusagetop.c
index ccf32de4d7..38d5ca4b25 100644
--- a/cpukit/libmisc/cpuuse/cpuusagetop.c
+++ b/cpukit/libmisc/cpuuse/cpuusagetop.c
@@ -261,7 +261,7 @@ task_usage(Thread_Control* thread, void* arg)
CPU_usage_Less_than(&usage, &data->usage[j]))
continue;
case RTEMS_TOP_SORT_REAL_PRI:
- if (thread->real_priority > data->tasks[j]->real_priority)
+ if (thread->Real_priority.priority > data->tasks[j]->Real_priority.priority)
continue;
case RTEMS_TOP_SORT_CURRENT_PRI:
if (
@@ -478,10 +478,10 @@ rtems_cpuusage_top_thread (rtems_task_argument arg)
snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.Entry.Kinds.Numeric.entry);
rtems_printf(data->printer,
- " 0x%08" PRIx32 " | %-19s | %3" PRId32 " | %3" PRId32 " | ",
+ " 0x%08" PRIx32 " | %-19s | %3" PRId64 " | %3" PRId64 " | ",
thread->Object.id,
name,
- thread->real_priority,
+ thread->Real_priority.priority,
_Thread_Get_priority(thread));
usage = data->usage[i];
diff --git a/cpukit/posix/include/rtems/posix/pthreadimpl.h b/cpukit/posix/include/rtems/posix/pthreadimpl.h
index 988246ed53..acf66822c0 100644
--- a/cpukit/posix/include/rtems/posix/pthreadimpl.h
+++ b/cpukit/posix/include/rtems/posix/pthreadimpl.h
@@ -68,6 +68,8 @@ RTEMS_INLINE_ROUTINE void _POSIX_Threads_Sporadic_timer_insert(
);
}
+void _POSIX_Threads_Sporadic_timer( Watchdog_Control *watchdog );
+
/**
* @brief POSIX threads sporadic budget callout.
*
diff --git a/cpukit/posix/include/rtems/posix/threadsup.h b/cpukit/posix/include/rtems/posix/threadsup.h
index 21977be210..3bb2210d97 100644
--- a/cpukit/posix/include/rtems/posix/threadsup.h
+++ b/cpukit/posix/include/rtems/posix/threadsup.h
@@ -60,13 +60,7 @@ typedef struct {
* @brief The low priority when using the sporadic server scheduling
* policy.
*/
- Priority_Control low_priority;
-
- /**
- * @brief The high priority when using the sporadic server scheduling
- * policy.
- */
- Priority_Control high_priority;
+ Priority_Node Low_priority;
} Sporadic;
/** This is the set of signals which are currently unblocked. */
diff --git a/cpukit/posix/src/killinfo.c b/cpukit/posix/src/killinfo.c
index 7cf74eb0bf..6f45b29ec6 100644
--- a/cpukit/posix/src/killinfo.c
+++ b/cpukit/posix/src/killinfo.c
@@ -153,7 +153,7 @@ int _POSIX_signals_Send(
the_node = the_node->next ) {
Scheduler_Node *scheduler_node;
- scheduler_node = SCHEDULER_NODE_OF_WAIT_CHAIN_NODE( the_node );
+ scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( the_node );
the_thread = _Scheduler_Node_get_owner( scheduler_node );
api = the_thread->API_Extensions[ THREAD_API_POSIX ];
diff --git a/cpukit/posix/src/mutexsetprioceiling.c b/cpukit/posix/src/mutexsetprioceiling.c
index 31e54c5fc3..aaea57ddfb 100644
--- a/cpukit/posix/src/mutexsetprioceiling.c
+++ b/cpukit/posix/src/mutexsetprioceiling.c
@@ -64,7 +64,18 @@ int pthread_mutex_setprioceiling(
new_priority = _POSIX_Priority_To_core( scheduler, prioceiling, &valid );
if ( valid ) {
- _CORE_ceiling_mutex_Set_priority( &the_mutex->Mutex, new_priority );
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _CORE_ceiling_mutex_Set_priority(
+ &the_mutex->Mutex,
+ new_priority,
+ &queue_context
+ );
+ cpu_self = _Thread_Dispatch_disable();
+ _Thread_Priority_update( &queue_context );
+ _Thread_Dispatch_enable( cpu_self );
error = 0;
} else {
error = EINVAL;
diff --git a/cpukit/posix/src/pthread.c b/cpukit/posix/src/pthread.c
index a1394a5dee..9cd730b59f 100644
--- a/cpukit/posix/src/pthread.c
+++ b/cpukit/posix/src/pthread.c
@@ -82,85 +82,70 @@ pthread_attr_t _POSIX_Threads_Default_attributes = {
#endif
};
-static bool _POSIX_Threads_Sporadic_timer_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
-)
+void _POSIX_Threads_Sporadic_timer( Watchdog_Control *watchdog )
{
- POSIX_API_Control *api;
- Priority_Control current_priority;
- Priority_Control new_priority;
+ POSIX_API_Control *api;
+ Thread_Control *the_thread;
+ Thread_queue_Context queue_context;
- api = arg;
-
- new_priority = api->Sporadic.high_priority;
- *new_priority_p = new_priority;
+ api = RTEMS_CONTAINER_OF( watchdog, POSIX_API_Control, Sporadic.Timer );
+ the_thread = api->thread;
- current_priority = _Thread_Get_priority( the_thread );
- the_thread->real_priority = new_priority;
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire( the_thread, &queue_context );
+
+ if ( _Priority_Node_is_active( &api->Sporadic.Low_priority ) ) {
+ _Thread_Priority_add(
+ the_thread,
+ &the_thread->Real_priority,
+ &queue_context
+ );
+ _Thread_Priority_remove(
+ the_thread,
+ &api->Sporadic.Low_priority,
+ &queue_context
+ );
+ _Priority_Node_set_inactive( &api->Sporadic.Low_priority );
+ }
_Watchdog_Per_CPU_remove_relative( &api->Sporadic.Timer );
_POSIX_Threads_Sporadic_timer_insert( the_thread, api );
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
-}
-
-static void _POSIX_Threads_Sporadic_timer( Watchdog_Control *watchdog )
-{
- POSIX_API_Control *api;
- Thread_Control *the_thread;
-
- api = RTEMS_CONTAINER_OF( watchdog, POSIX_API_Control, Sporadic.Timer );
- the_thread = api->thread;
-
- _Thread_Change_priority(
- the_thread,
- 0,
- api,
- _POSIX_Threads_Sporadic_timer_filter,
- true
- );
+ _Thread_Wait_release( the_thread, &queue_context );
+ _Thread_Priority_update( &queue_context );
}
-static bool _POSIX_Threads_Sporadic_budget_callout_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
-)
+void _POSIX_Threads_Sporadic_budget_callout( Thread_Control *the_thread )
{
- POSIX_API_Control *api;
- Priority_Control current_priority;
- Priority_Control new_priority;
+ POSIX_API_Control *api;
+ Thread_queue_Context queue_context;
api = the_thread->API_Extensions[ THREAD_API_POSIX ];
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire( the_thread, &queue_context );
+
/*
* This will prevent the thread from consuming its entire "budget"
* while at low priority.
*/
the_thread->cpu_time_budget = UINT32_MAX;
- new_priority = api->Sporadic.low_priority;
- *new_priority_p = new_priority;
-
- current_priority = _Thread_Get_priority( the_thread );
- the_thread->real_priority = new_priority;
-
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
-}
+ if ( !_Priority_Node_is_active( &api->Sporadic.Low_priority ) ) {
+ _Thread_Priority_add(
+ the_thread,
+ &api->Sporadic.Low_priority,
+ &queue_context
+ );
+ _Thread_Priority_remove(
+ the_thread,
+ &the_thread->Real_priority,
+ &queue_context
+ );
+ }
-void _POSIX_Threads_Sporadic_budget_callout( Thread_Control *the_thread )
-{
- _Thread_Change_priority(
- the_thread,
- 0,
- NULL,
- _POSIX_Threads_Sporadic_budget_callout_filter,
- true
- );
+ _Thread_Wait_release( the_thread, &queue_context );
+ _Thread_Priority_update( &queue_context );
}
/*
diff --git a/cpukit/posix/src/pthreadcreate.c b/cpukit/posix/src/pthreadcreate.c
index 411882d532..f1983ac128 100644
--- a/cpukit/posix/src/pthreadcreate.c
+++ b/cpukit/posix/src/pthreadcreate.c
@@ -61,11 +61,11 @@ int pthread_create(
}
};
const pthread_attr_t *the_attr;
+ int normal_prio;
int low_prio;
- int high_prio;
bool valid;
+ Priority_Control core_normal_prio;
Priority_Control core_low_prio;
- Priority_Control core_high_prio;
Thread_CPU_budget_algorithms budget_algorithm;
Thread_CPU_budget_algorithm_callout budget_callout;
bool is_fp;
@@ -149,22 +149,22 @@ int pthread_create(
return error;
}
- if ( schedpolicy == SCHED_SPORADIC ) {
- low_prio = schedparam.sched_ss_low_priority;
- high_prio = schedparam.sched_priority;
- } else {
- low_prio = schedparam.sched_priority;
- high_prio = low_prio;
- }
+ normal_prio = schedparam.sched_priority;
scheduler = _Scheduler_Get_own( executing );
- core_low_prio = _POSIX_Priority_To_core( scheduler, low_prio, &valid );
+ core_normal_prio = _POSIX_Priority_To_core( scheduler, normal_prio, &valid );
if ( !valid ) {
return EINVAL;
}
- core_high_prio = _POSIX_Priority_To_core( scheduler, high_prio, &valid );
+ if ( schedpolicy == SCHED_SPORADIC ) {
+ low_prio = schedparam.sched_ss_low_priority;
+ } else {
+ low_prio = normal_prio;
+ }
+
+ core_low_prio = _POSIX_Priority_To_core( scheduler, low_prio, &valid );
if ( !valid ) {
return EINVAL;
}
@@ -205,7 +205,7 @@ int pthread_create(
the_attr->stackaddr,
_POSIX_Threads_Ensure_minimum_stack(the_attr->stacksize),
is_fp,
- core_high_prio,
+ core_normal_prio,
true, /* preemptible */
budget_algorithm,
budget_callout,
@@ -245,13 +245,11 @@ int pthread_create(
api = the_thread->API_Extensions[ THREAD_API_POSIX ];
_POSIX_Threads_Copy_attributes( &api->Attributes, the_attr );
- api->Sporadic.low_priority = core_low_prio;
- api->Sporadic.high_priority = core_high_prio;
+ _Priority_Node_initialize( &api->Sporadic.Low_priority, core_low_prio );
+ _Priority_Node_set_inactive( &api->Sporadic.Low_priority );
if ( schedpolicy == SCHED_SPORADIC ) {
- _ISR_lock_ISR_disable( &lock_context );
- _POSIX_Threads_Sporadic_timer_insert( the_thread, api );
- _ISR_lock_ISR_enable( &lock_context );
+ _POSIX_Threads_Sporadic_timer( &api->Sporadic.Timer );
}
/*
diff --git a/cpukit/posix/src/pthreadgetschedparam.c b/cpukit/posix/src/pthreadgetschedparam.c
index 38e0b4f9b5..1d4812c067 100644
--- a/cpukit/posix/src/pthreadgetschedparam.c
+++ b/cpukit/posix/src/pthreadgetschedparam.c
@@ -59,7 +59,7 @@ int pthread_getschedparam(
*param = api->Attributes.schedparam;
scheduler = _Scheduler_Get_own( the_thread );
- priority = the_thread->real_priority;
+ priority = the_thread->Real_priority.priority;
_Thread_Wait_release( the_thread, &queue_context );
diff --git a/cpukit/posix/src/pthreadsetschedparam.c b/cpukit/posix/src/pthreadsetschedparam.c
index e3711e368c..9aa2122ab8 100644
--- a/cpukit/posix/src/pthreadsetschedparam.c
+++ b/cpukit/posix/src/pthreadsetschedparam.c
@@ -30,36 +30,24 @@
#include <rtems/score/threadimpl.h>
#include <rtems/score/schedulerimpl.h>
-typedef struct {
- int policy;
- const struct sched_param *param;
- Thread_CPU_budget_algorithms budget_algorithm;
- Thread_CPU_budget_algorithm_callout budget_callout;
- int error;
-} POSIX_Set_sched_param_context;
-
-static bool _POSIX_Set_sched_param_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
+static int _POSIX_Set_sched_param(
+ Thread_Control *the_thread,
+ int policy,
+ struct sched_param *param,
+ Thread_CPU_budget_algorithms budget_algorithm,
+ Thread_CPU_budget_algorithm_callout budget_callout,
+ Thread_queue_Context *queue_context
)
{
- POSIX_Set_sched_param_context *context;
- const struct sched_param *param;
- const Scheduler_Control *scheduler;
- POSIX_API_Control *api;
- int low_prio;
- int high_prio;
- bool valid;
- Priority_Control core_low_prio;
- Priority_Control core_high_prio;
- Priority_Control current_priority;
-
- context = arg;
- param = context->param;
- scheduler = _Scheduler_Get_own( the_thread );
-
- if ( context->policy == SCHED_SPORADIC ) {
+ const Scheduler_Control *scheduler;
+ POSIX_API_Control *api;
+ int low_prio;
+ int high_prio;
+ bool valid;
+ Priority_Control core_normal_prio;
+ Priority_Control core_low_prio;
+
+ if ( policy == SCHED_SPORADIC ) {
low_prio = param->sched_ss_low_priority;
high_prio = param->sched_priority;
} else {
@@ -67,45 +55,60 @@ static bool _POSIX_Set_sched_param_filter(
high_prio = low_prio;
}
- core_low_prio = _POSIX_Priority_To_core( scheduler, low_prio, &valid );
+ scheduler = _Scheduler_Get_own( the_thread );
+
+ core_normal_prio = _POSIX_Priority_To_core( scheduler, low_prio, &valid );
if ( !valid ) {
- context->error = EINVAL;
- return false;
+ return EINVAL;
}
- core_high_prio = _POSIX_Priority_To_core( scheduler, high_prio, &valid );
+ core_low_prio = _POSIX_Priority_To_core( scheduler, high_prio, &valid );
if ( !valid ) {
- context->error = EINVAL;
- return false;
+ return EINVAL;
}
- *new_priority_p = core_high_prio;
-
- current_priority = _Thread_Get_priority( the_thread );
- the_thread->real_priority = core_high_prio;
-
api = the_thread->API_Extensions[ THREAD_API_POSIX ];
_Watchdog_Per_CPU_remove_relative( &api->Sporadic.Timer );
- api->Attributes.schedpolicy = context->policy;
+ _Priority_Node_set_priority( &the_thread->Real_priority, core_normal_prio );
+
+ if ( _Priority_Node_is_active( &api->Sporadic.Low_priority ) ) {
+ _Thread_Priority_add(
+ the_thread,
+ &the_thread->Real_priority,
+ queue_context
+ );
+ _Thread_Priority_remove(
+ the_thread,
+ &api->Sporadic.Low_priority,
+ queue_context
+ );
+ _Priority_Node_set_inactive( &api->Sporadic.Low_priority );
+ } else {
+ _Thread_Priority_changed(
+ the_thread,
+ &the_thread->Real_priority,
+ false,
+ queue_context
+ );
+ }
+
+ api->Attributes.schedpolicy = policy;
api->Attributes.schedparam = *param;
- api->Sporadic.low_priority = core_low_prio;
- api->Sporadic.high_priority = core_high_prio;
- the_thread->budget_algorithm = context->budget_algorithm;
- the_thread->budget_callout = context->budget_callout;
+ the_thread->budget_algorithm = budget_algorithm;
+ the_thread->budget_callout = budget_callout;
- if ( context->policy == SCHED_SPORADIC ) {
+ if ( policy == SCHED_SPORADIC ) {
+ _Priority_Node_set_priority( &api->Sporadic.Low_priority, core_low_prio );
_POSIX_Threads_Sporadic_timer_insert( the_thread, api );
} else {
the_thread->cpu_time_budget =
rtems_configuration_get_ticks_per_timeslice();
}
- context->error = 0;
- return _Thread_Priority_less_than( current_priority, core_high_prio )
- || !_Thread_Owns_resources( the_thread );
+ return 0;
}
int pthread_setschedparam(
@@ -114,11 +117,12 @@ int pthread_setschedparam(
struct sched_param *param
)
{
- Thread_Control *the_thread;
- Per_CPU_Control *cpu_self;
- POSIX_Set_sched_param_context context;
- ISR_lock_Context lock_context;
- int error;
+ Thread_CPU_budget_algorithms budget_algorithm;
+ Thread_CPU_budget_algorithm_callout budget_callout;
+ Thread_Control *the_thread;
+ Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
+ int error;
if ( param == NULL ) {
return EINVAL;
@@ -127,33 +131,34 @@ int pthread_setschedparam(
error = _POSIX_Thread_Translate_sched_param(
policy,
param,
- &context.budget_algorithm,
- &context.budget_callout
+ &budget_algorithm,
+ &budget_callout
);
if ( error != 0 ) {
return error;
}
- context.policy = policy;
- context.param = param;
-
- the_thread = _Thread_Get( thread, &lock_context );
+ the_thread = _Thread_Get( thread, &queue_context.Lock_context.Lock_context );
if ( the_thread == NULL ) {
return ESRCH;
}
- cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
- _ISR_lock_ISR_enable( &lock_context );
-
- _Thread_Change_priority(
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire_critical( the_thread, &queue_context );
+ error = _POSIX_Set_sched_param(
the_thread,
- 0,
- &context,
- _POSIX_Set_sched_param_filter,
- false
+ policy,
+ param,
+ budget_algorithm,
+ budget_callout,
+ &queue_context
);
-
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context.Lock_context.Lock_context
+ );
+ _Thread_Wait_release( the_thread, &queue_context );
+ _Thread_Priority_update( &queue_context );
_Thread_Dispatch_enable( cpu_self );
- return context.error;
+ return error;
}
diff --git a/cpukit/posix/src/pthreadsetschedprio.c b/cpukit/posix/src/pthreadsetschedprio.c
index fac86d2609..e3d9de0136 100644
--- a/cpukit/posix/src/pthreadsetschedprio.c
+++ b/cpukit/posix/src/pthreadsetschedprio.c
@@ -18,79 +18,47 @@
#include <rtems/score/threadimpl.h>
#include <rtems/score/schedulerimpl.h>
-typedef struct {
- int prio;
- int error;
-} POSIX_Set_sched_prio_context;
-
-static bool _POSIX_Set_sched_prio_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
-)
-{
- POSIX_Set_sched_prio_context *context;
- int prio;
- const Scheduler_Control *scheduler;
- POSIX_API_Control *api;
- bool valid;
- Priority_Control current_priority;
- Priority_Control new_priority;
-
- context = arg;
- prio = context->prio;
- scheduler = _Scheduler_Get_own( the_thread );
-
- new_priority = _POSIX_Priority_To_core( scheduler, prio, &valid );
- if ( !valid ) {
- context->error = EINVAL;
- return false;
- }
-
- *new_priority_p = new_priority;
-
- current_priority = _Thread_Get_priority( the_thread );
- the_thread->real_priority = new_priority;
-
- api = the_thread->API_Extensions[ THREAD_API_POSIX ];
-
- api->Sporadic.high_priority = new_priority;
-
- if ( api->Sporadic.low_priority < new_priority ) {
- api->Sporadic.low_priority = new_priority;
- }
-
- context->error = 0;
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
-}
-
int pthread_setschedprio( pthread_t thread, int prio )
{
- Thread_Control *the_thread;
- Per_CPU_Control *cpu_self;
- POSIX_Set_sched_prio_context context;
- ISR_lock_Context lock_context;
-
- context.prio = prio;
+ Thread_Control *the_thread;
+ Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
+ const Scheduler_Control *scheduler;
+ Priority_Control new_priority;
+ bool valid;
- the_thread = _Thread_Get( thread, &lock_context );
+ the_thread = _Thread_Get( thread, &queue_context.Lock_context.Lock_context );
if ( the_thread == NULL ) {
return ESRCH;
}
- cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
- _ISR_lock_ISR_enable( &lock_context );
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire_critical( the_thread, &queue_context );
- _Thread_Change_priority(
+ scheduler = _Scheduler_Get_own( the_thread );
+
+ new_priority = _POSIX_Priority_To_core( scheduler, prio, &valid );
+ if ( !valid ) {
+ _Thread_Wait_release( the_thread, &queue_context );
+ return EINVAL;
+ }
+
+ _Thread_Priority_change(
the_thread,
- 0,
- &context,
- _POSIX_Set_sched_prio_filter,
- true
+ &the_thread->Real_priority,
+ new_priority,
+ true,
+ &queue_context
);
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context.Lock_context.Lock_context
+ );
+ _Thread_Wait_release( the_thread, &queue_context );
+
+ _Thread_Priority_update( &queue_context );
+
_Thread_Dispatch_enable( cpu_self );
- return context.error;
+ return 0;
}
diff --git a/cpukit/rtems/include/rtems/rtems/ratemon.h b/cpukit/rtems/include/rtems/rtems/ratemon.h
index a2df13f025..50b847840f 100644
--- a/cpukit/rtems/include/rtems/rtems/ratemon.h
+++ b/cpukit/rtems/include/rtems/rtems/ratemon.h
@@ -206,6 +206,12 @@ typedef struct {
rtems_rate_monotonic_period_states state;
/**
+ * @brief A priority node for use by the scheduler job release and cancel
+ * operations.
+ */
+ Priority_Node Priority;
+
+ /**
* This field contains the length of the next period to be
* executed.
*/
diff --git a/cpukit/rtems/src/ratemoncancel.c b/cpukit/rtems/src/ratemoncancel.c
index b4e899d296..cb95f54408 100644
--- a/cpukit/rtems/src/ratemoncancel.c
+++ b/cpukit/rtems/src/ratemoncancel.c
@@ -27,18 +27,22 @@ void _Rate_monotonic_Cancel(
ISR_lock_Context *lock_context
)
{
- Per_CPU_Control *cpu_self;
- Thread_Control *update_priority;
+ Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
_Rate_monotonic_Acquire_critical( the_period, lock_context );
_Watchdog_Per_CPU_remove_relative( &the_period->Timer );
the_period->state = RATE_MONOTONIC_INACTIVE;
- update_priority = _Scheduler_Cancel_job( the_period->owner );
+ _Scheduler_Cancel_job(
+ the_period->owner,
+ &the_period->Priority,
+ &queue_context
+ );
cpu_self = _Thread_Dispatch_disable_critical( lock_context );
_Rate_monotonic_Release( the_period, lock_context );
- _Thread_Update_priority( update_priority );
+ _Thread_Priority_update( &queue_context );
_Thread_Dispatch_enable( cpu_self );
}
diff --git a/cpukit/rtems/src/ratemoncreate.c b/cpukit/rtems/src/ratemoncreate.c
index a86c6a1eeb..12327295ac 100644
--- a/cpukit/rtems/src/ratemoncreate.c
+++ b/cpukit/rtems/src/ratemoncreate.c
@@ -63,6 +63,8 @@ rtems_status_code rtems_rate_monotonic_create(
}
_ISR_lock_Initialize( &the_period->Lock, "Rate Monotonic Period" );
+ _Priority_Node_initialize( &the_period->Priority, 0 );
+ _Priority_Node_set_inactive( &the_period->Priority );
the_period->owner = _Thread_Get_executing();
the_period->state = RATE_MONOTONIC_INACTIVE;
diff --git a/cpukit/rtems/src/ratemonperiod.c b/cpukit/rtems/src/ratemonperiod.c
index 75a80d8088..77bd996819 100644
--- a/cpukit/rtems/src/ratemonperiod.c
+++ b/cpukit/rtems/src/ratemonperiod.c
@@ -70,9 +70,9 @@ static void _Rate_monotonic_Release_job(
ISR_lock_Context *lock_context
)
{
- Per_CPU_Control *cpu_self;
- Thread_Control *update_priority;
- uint64_t deadline;
+ Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
+ uint64_t deadline;
cpu_self = _Thread_Dispatch_disable_critical( lock_context );
@@ -81,10 +81,15 @@ static void _Rate_monotonic_Release_job(
cpu_self,
next_length
);
- update_priority = _Scheduler_Release_job( owner, deadline );
+ _Scheduler_Release_job(
+ owner,
+ &the_period->Priority,
+ deadline,
+ &queue_context
+ );
_Rate_monotonic_Release( the_period, lock_context );
- _Thread_Update_priority( update_priority );
+ _Thread_Priority_update( &queue_context );
_Thread_Dispatch_enable( cpu_self );
}
diff --git a/cpukit/rtems/src/semsetpriority.c b/cpukit/rtems/src/semsetpriority.c
index f992d99522..37dea5da80 100644
--- a/cpukit/rtems/src/semsetpriority.c
+++ b/cpukit/rtems/src/semsetpriority.c
@@ -42,16 +42,18 @@ static rtems_status_code _Semaphore_Set_priority(
Thread_queue_Context *queue_context
)
{
- rtems_status_code sc;
- bool valid;
- Priority_Control core_priority;
- Priority_Control old_priority;
+ rtems_status_code sc;
+ bool valid;
+ Priority_Control core_priority;
+ Priority_Control old_priority;
+ Per_CPU_Control *cpu_self;
core_priority = _RTEMS_Priority_To_core( scheduler, new_priority, &valid );
if ( new_priority != RTEMS_CURRENT_PRIORITY && !valid ) {
return RTEMS_INVALID_PRIORITY;
}
+ _Thread_queue_Context_clear_priority_updates( queue_context );
_Thread_queue_Acquire_critical(
&the_semaphore->Core_control.Wait_queue,
queue_context
@@ -71,7 +73,8 @@ static rtems_status_code _Semaphore_Set_priority(
if ( sc == RTEMS_SUCCESSFUL && new_priority != RTEMS_CURRENT_PRIORITY ) {
_CORE_ceiling_mutex_Set_priority(
&the_semaphore->Core_control.Mutex,
- core_priority
+ core_priority,
+ queue_context
);
}
@@ -106,10 +109,15 @@ static rtems_status_code _Semaphore_Set_priority(
break;
}
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
_Thread_queue_Release(
&the_semaphore->Core_control.Wait_queue,
queue_context
);
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
*old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority );
return sc;
diff --git a/cpukit/rtems/src/tasksetpriority.c b/cpukit/rtems/src/tasksetpriority.c
index 9779c973a1..6b8ab4e976 100644
--- a/cpukit/rtems/src/tasksetpriority.c
+++ b/cpukit/rtems/src/tasksetpriority.c
@@ -22,50 +22,43 @@
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/threadimpl.h>
-typedef struct {
- const Scheduler_Control *scheduler;
- rtems_task_priority new_priority;
- Priority_Control old_priority;
- rtems_status_code status;
-} RTEMS_tasks_Set_priority_context;
-
-static bool _RTEMS_tasks_Set_priority_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
+static rtems_status_code _RTEMS_tasks_Set_priority(
+ Thread_Control *the_thread,
+ const Scheduler_Control *scheduler,
+ Priority_Control new_priority,
+ Thread_queue_Context *queue_context
)
{
- RTEMS_tasks_Set_priority_context *context;
- const Scheduler_Control *scheduler;
- bool valid;
- Priority_Control current_priority;
- Priority_Control new_priority;
+ Priority_Control core_new_priority;
+ bool valid;
+ Per_CPU_Control *cpu_self;
- context = arg;
- scheduler = _Scheduler_Get_own( the_thread );
- current_priority = _Thread_Get_priority( the_thread );
-
- context->scheduler = scheduler;
- context->old_priority = current_priority;
-
- new_priority = _RTEMS_Priority_To_core(
+ core_new_priority = _RTEMS_Priority_To_core(
scheduler,
- context->new_priority,
+ new_priority,
&valid
);
- *new_priority_p = new_priority;
-
if ( !valid ) {
- context->status = RTEMS_INVALID_PRIORITY;
- return false;
+ _Thread_Wait_release( the_thread, queue_context );
+ return RTEMS_INVALID_PRIORITY;
}
- the_thread->real_priority = new_priority;
- context->status = STATUS_SUCCESSFUL;
-
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Priority_change(
+ the_thread,
+ &the_thread->Real_priority,
+ core_new_priority,
+ false,
+ queue_context
+ );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Wait_release( the_thread, queue_context );
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
+ return RTEMS_SUCCESSFUL;
}
rtems_status_code rtems_task_set_priority(
@@ -75,7 +68,7 @@ rtems_status_code rtems_task_set_priority(
)
{
Thread_Control *the_thread;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
const Scheduler_Control *scheduler;
Priority_Control old_priority;
rtems_status_code status;
@@ -84,7 +77,8 @@ rtems_status_code rtems_task_set_priority(
return RTEMS_INVALID_ADDRESS;
}
- the_thread = _Thread_Get( id, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ the_thread = _Thread_Get( id, &queue_context.Lock_context.Lock_context );
if ( the_thread == NULL ) {
#if defined(RTEMS_MULTIPROCESSING)
@@ -94,31 +88,20 @@ rtems_status_code rtems_task_set_priority(
#endif
}
- if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
- RTEMS_tasks_Set_priority_context context;
- Per_CPU_Control *cpu_self;
+ _Thread_Wait_acquire_critical( the_thread, &queue_context );
- cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
- _ISR_lock_ISR_enable( &lock_context );
+ scheduler = _Scheduler_Get_own( the_thread );
+ old_priority = _Thread_Get_priority( the_thread );
- context.new_priority = new_priority;
- _Thread_Change_priority(
+ if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
+ status = _RTEMS_tasks_Set_priority(
the_thread,
- 0,
- &context,
- _RTEMS_tasks_Set_priority_filter,
- false
+ scheduler,
+ new_priority,
+ &queue_context
);
-
- _Thread_Dispatch_enable( cpu_self );
- scheduler = context.scheduler;
- old_priority = context.old_priority;
- status = context.status;
} else {
- _Thread_State_acquire_critical( the_thread, &lock_context );
- scheduler = _Scheduler_Get_own( the_thread );
- old_priority = _Thread_Get_priority( the_thread );
- _Thread_State_release( the_thread, &lock_context );
+ _Thread_Wait_release( the_thread, &queue_context );
status = RTEMS_SUCCESSFUL;
}
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index f18af7d4e6..a196ff5dae 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -58,6 +58,7 @@ include_rtems_score_HEADERS += include/rtems/score/objectimpl.h
include_rtems_score_HEADERS += include/rtems/score/onceimpl.h
include_rtems_score_HEADERS += include/rtems/score/percpu.h
include_rtems_score_HEADERS += include/rtems/score/priority.h
+include_rtems_score_HEADERS += include/rtems/score/priorityimpl.h
include_rtems_score_HEADERS += include/rtems/score/prioritybitmap.h
include_rtems_score_HEADERS += include/rtems/score/prioritybitmapimpl.h
include_rtems_score_HEADERS += include/rtems/score/profiling.h
@@ -300,7 +301,7 @@ libscore_a_SOURCES += src/thread.c src/threadchangepriority.c \
src/threaddispatch.c \
src/threadget.c src/threadhandler.c src/threadinitialize.c \
src/threadloadenv.c \
- src/threadrestart.c src/threadsetpriority.c \
+ src/threadrestart.c \
src/threadsetstate.c \
src/threadstackallocate.c src/threadstackfree.c src/threadstart.c \
src/threadstartmultitasking.c src/iterateoverthreads.c
diff --git a/cpukit/score/include/rtems/score/coremutex.h b/cpukit/score/include/rtems/score/coremutex.h
index a3dcabf523..d2bf1c21f6 100644
--- a/cpukit/score/include/rtems/score/coremutex.h
+++ b/cpukit/score/include/rtems/score/coremutex.h
@@ -82,9 +82,9 @@ typedef struct {
CORE_recursive_mutex_Control Recursive;
/**
- * @brief The priority ceiling value for the mutex owner.
+ * @brief The priority ceiling node for the mutex owner.
*/
- Priority_Control priority_ceiling;
+ Priority_Node Priority_ceiling;
#if defined(RTEMS_SMP)
/**
diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h b/cpukit/score/include/rtems/score/coremuteximpl.h
index 25094a49df..a30d1a880e 100644
--- a/cpukit/score/include/rtems/score/coremuteximpl.h
+++ b/cpukit/score/include/rtems/score/coremuteximpl.h
@@ -124,32 +124,6 @@ RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
return _CORE_mutex_Get_owner( the_mutex ) == the_thread;
}
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Restore_priority(
- Thread_Control *executing
-)
-{
- /*
- * Whether or not someone is waiting for the mutex, an
- * inherited priority must be lowered if this is the last
- * mutex (i.e. resource) this task has.
- */
- if ( !_Thread_Owns_resources( executing ) ) {
- /*
- * Ensure that the executing resource count is visible to all other
- * processors and that we read the latest priority restore hint.
- */
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
-
- if ( executing->priority_restore_hint ) {
- Per_CPU_Control *cpu_self;
-
- cpu_self = _Thread_Dispatch_disable();
- _Thread_Restore_priority( executing );
- _Thread_Dispatch_enable( cpu_self );
- }
- }
-}
-
RTEMS_INLINE_ROUTINE void _CORE_recursive_mutex_Initialize(
CORE_recursive_mutex_Control *the_mutex
)
@@ -212,7 +186,6 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
{
unsigned int nest_level;
Thread_queue_Heads *heads;
- bool keep_priority;
_CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
@@ -232,29 +205,19 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
--executing->resource_count;
_CORE_mutex_Set_owner( &the_mutex->Mutex, NULL );
- /*
- * Ensure that the owner resource count is visible to all other
- * processors and that we read the latest priority restore
- * hint.
- */
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
-
heads = the_mutex->Mutex.Wait_queue.Queue.heads;
- keep_priority = _Thread_Owns_resources( executing )
- || !executing->priority_restore_hint;
- if ( heads == NULL && keep_priority ) {
+ if ( heads == NULL ) {
_CORE_mutex_Release( &the_mutex->Mutex, queue_context );
return STATUS_SUCCESSFUL;
}
_Thread_queue_Surrender(
&the_mutex->Mutex.Wait_queue.Queue,
- CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS,
heads,
executing,
- keep_priority,
- queue_context
+ queue_context,
+ CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS
);
return STATUS_SUCCESSFUL;
}
@@ -349,7 +312,7 @@ RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
)
{
_CORE_recursive_mutex_Initialize( &the_mutex->Recursive );
- the_mutex->priority_ceiling = priority_ceiling;
+ _Priority_Node_initialize( &the_mutex->Priority_ceiling, priority_ceiling );
#if defined(RTEMS_SMP)
the_mutex->scheduler = scheduler;
#endif
@@ -369,17 +332,34 @@ _CORE_ceiling_mutex_Get_scheduler(
RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
CORE_ceiling_mutex_Control *the_mutex,
- Priority_Control priority_ceiling
+ Priority_Control priority_ceiling,
+ Thread_queue_Context *queue_context
)
{
- the_mutex->priority_ceiling = priority_ceiling;
+ Thread_Control *owner;
+
+ owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
+
+ if ( owner != NULL ) {
+ _Thread_Wait_acquire( owner, queue_context );
+ _Thread_Priority_change(
+ owner,
+ &the_mutex->Priority_ceiling,
+ priority_ceiling,
+ false,
+ queue_context
+ );
+ _Thread_Wait_release( owner, queue_context );
+ } else {
+ the_mutex->Priority_ceiling.priority = priority_ceiling;
+ }
}
RTEMS_INLINE_ROUTINE Priority_Control _CORE_ceiling_mutex_Get_priority(
const CORE_ceiling_mutex_Control *the_mutex
)
{
- return the_mutex->priority_ceiling;
+ return the_mutex->Priority_ceiling.priority;
}
RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
@@ -388,31 +368,38 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
Thread_queue_Context *queue_context
)
{
- Priority_Control priority_ceiling;
- Priority_Control current_priority;
+ ISR_lock_Context lock_context;
+ Scheduler_Node *own_node;
Per_CPU_Control *cpu_self;
- priority_ceiling = the_mutex->priority_ceiling;
- current_priority = _Thread_Get_priority( owner );
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( owner, &lock_context );
- if ( current_priority < priority_ceiling ) {
+ own_node = _Thread_Scheduler_get_own_node( owner );
+
+ if (
+ own_node->Wait.Priority.Node.priority
+ < the_mutex->Priority_ceiling.priority
+ ) {
+ _Thread_Wait_release_default_critical( owner, &lock_context );
_CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
return STATUS_MUTEX_CEILING_VIOLATED;
}
_CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, owner );
++owner->resource_count;
-
- if ( current_priority == priority_ceiling ) {
- _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
- return STATUS_SUCCESSFUL;
- }
+ _Thread_Priority_add(
+ owner,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ _Thread_Wait_release_default_critical( owner, &lock_context );
cpu_self = _Thread_Dispatch_disable_critical(
&queue_context->Lock_context.Lock_context
);
_CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
- _Thread_Raise_priority( owner, priority_ceiling );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
@@ -472,8 +459,10 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
Thread_queue_Context *queue_context
)
{
- unsigned int nest_level;
- Thread_Control *new_owner;
+ unsigned int nest_level;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *new_owner;
_CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
@@ -492,47 +481,50 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
--executing->resource_count;
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( executing, &lock_context );
+ _Thread_Priority_remove(
+ executing,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ _Thread_Wait_release_default_critical( executing, &lock_context );
+
new_owner = _Thread_queue_First_locked(
&the_mutex->Recursive.Mutex.Wait_queue,
CORE_MUTEX_TQ_OPERATIONS
);
_CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, new_owner );
- if ( new_owner != NULL ) {
- bool unblock;
-
- /*
- * We must extract the thread now since this will restore its default
- * thread lock. This is necessary to avoid a deadlock in the
- * _Thread_Change_priority() below due to a recursive thread queue lock
- * acquire.
- */
- unblock = _Thread_queue_Extract_locked(
- &the_mutex->Recursive.Mutex.Wait_queue.Queue,
- CORE_MUTEX_TQ_OPERATIONS,
- new_owner,
- queue_context
- );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ if ( new_owner != NULL ) {
#if defined(RTEMS_MULTIPROCESSING)
if ( _Objects_Is_local_id( new_owner->Object.id ) )
#endif
{
++new_owner->resource_count;
- _Thread_Raise_priority( new_owner, the_mutex->priority_ceiling );
+ _Thread_Priority_add(
+ new_owner,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
}
- _Thread_queue_Unblock_critical(
- unblock,
+ _Thread_queue_Extract_critical(
&the_mutex->Recursive.Mutex.Wait_queue.Queue,
+ CORE_MUTEX_TQ_OPERATIONS,
new_owner,
- &queue_context->Lock_context.Lock_context
+ queue_context
);
} else {
_CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
}
- _CORE_mutex_Restore_priority( executing );
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
index 08a2427492..5af3a06901 100644
--- a/cpukit/score/include/rtems/score/mrsp.h
+++ b/cpukit/score/include/rtems/score/mrsp.h
@@ -80,12 +80,9 @@ typedef struct {
Thread_Control *thread;
/**
- * @brief The initial priority of the thread at the begin of the resource
- * obtain sequence.
- *
- * Used to restore the priority after a release of this resource or timeout.
+ * @brief The ceiling priority used by the rival thread.
*/
- Priority_Control initial_priority;
+ Priority_Node Ceiling_priority;
/**
* @brief The initial help state of the thread at the begin of the resource
@@ -135,10 +132,9 @@ struct MRSP_Control {
Chain_Control Rivals;
/**
- * @brief The initial priority of the owner before it was elevated to the
- * ceiling priority.
+ * @brief The ceiling priority used by the owner thread.
*/
- Priority_Control initial_priority_of_owner;
+ Priority_Node Ceiling_priority;
/**
* @brief One ceiling priority per scheduler instance.
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index cc00aa3cfd..deaacf20a0 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -77,53 +77,115 @@ RTEMS_INLINE_ROUTINE void _MRSP_Release(
_Thread_queue_Release( &mrsp->Wait_queue, queue_context );
}
-RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter(
- Thread_Control *thread,
- Priority_Control *new_priority,
- void *arg
+RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
+ const MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler
)
{
- *new_priority = _Thread_Priority_highest(
- thread->real_priority,
- *new_priority
- );
+ uint32_t scheduler_index;
- return *new_priority != _Thread_Get_priority( thread );
+ scheduler_index = _Scheduler_Get_index( scheduler );
+ return mrsp->ceiling_priorities[ scheduler_index ];
}
-RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority(
- Thread_Control *thread,
- Priority_Control initial_priority
+RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
+ MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler,
+ Priority_Control new_priority
)
{
- /*
- * The Thread_Control::resource_count is used by the normal priority ceiling
- * or priority inheritance semaphores.
- */
- if ( thread->resource_count == 0 ) {
- _Thread_Change_priority(
- thread,
- initial_priority,
- NULL,
- _MRSP_Restore_priority_filter,
- true
- );
+ uint32_t scheduler_index;
+
+ scheduler_index = _Scheduler_Get_index( scheduler );
+ mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Raise_priority(
+ MRSP_Control *mrsp,
+ Thread_Control *thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ Status_Control status;
+ ISR_lock_Context lock_context;
+ const Scheduler_Control *scheduler;
+ Priority_Control ceiling_priority;
+ Scheduler_Node *own_node;
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+
+ scheduler = _Scheduler_Get_own( thread );
+ own_node = _Thread_Scheduler_get_own_node( thread );
+ ceiling_priority = _MRSP_Get_priority( mrsp, scheduler );
+
+ if ( ceiling_priority <= own_node->Wait.Priority.Node.priority ) {
+ _Priority_Node_initialize( priority_node, ceiling_priority );
+ _Thread_Priority_add( thread, priority_node, queue_context );
+ status = STATUS_SUCCESSFUL;
+ } else {
+ status = STATUS_MUTEX_CEILING_VIOLATED;
}
+
+ _Thread_Wait_release_default_critical( thread, &lock_context );
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
+ Thread_Control *thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Priority_remove( thread, priority_node, queue_context );
+ _Thread_Wait_release_default_critical( thread, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
+ MRSP_Control *mrsp,
+ Thread_Control *thread,
+ MRSP_Rival *rival
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Priority_replace(
+ thread,
+ &rival->Ceiling_priority,
+ &mrsp->Ceiling_priority
+ );
+ _Thread_Wait_release_default_critical( thread, &lock_context );
}
-RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
MRSP_Control *mrsp,
Thread_Control *new_owner,
- Priority_Control initial_priority,
- Priority_Control ceiling_priority,
Thread_queue_Context *queue_context
)
{
+ Status_Control status;
Per_CPU_Control *cpu_self;
+ status = _MRSP_Raise_priority(
+ mrsp,
+ new_owner,
+ &mrsp->Ceiling_priority,
+ queue_context
+ );
+
+ if ( status != STATUS_SUCCESSFUL ) {
+ _MRSP_Release( mrsp, queue_context );
+ return status;
+ }
+
_Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
_Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
- mrsp->initial_priority_of_owner = initial_priority;
_Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
cpu_self = _Thread_Dispatch_disable_critical(
@@ -131,9 +193,10 @@ RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
);
_MRSP_Release( mrsp, queue_context );
- _Thread_Raise_priority( new_owner, ceiling_priority );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
}
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
@@ -178,35 +241,16 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
return STATUS_SUCCESSFUL;
}
-RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
- const MRSP_Control *mrsp,
- const Scheduler_Control *scheduler
-)
-{
- uint32_t scheduler_index;
-
- scheduler_index = _Scheduler_Get_index( scheduler );
- return mrsp->ceiling_priorities[ scheduler_index ];
-}
-
-RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
- MRSP_Control *mrsp,
- const Scheduler_Control *scheduler,
- Priority_Control new_priority
-)
-{
- uint32_t scheduler_index;
-
- scheduler_index = _Scheduler_Get_index( scheduler );
- mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
-}
-
RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
{
- MRSP_Rival *rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
- MRSP_Control *mrsp = rival->resource;
- Thread_Control *thread = rival->thread;
- Thread_queue_Context queue_context;
+ MRSP_Rival *rival;
+ MRSP_Control *mrsp;
+ Thread_Control *thread;
+ Thread_queue_Context queue_context;
+
+ rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
+ mrsp = rival->resource;
+ thread = rival->thread;
_Thread_queue_Context_initialize( &queue_context );
_ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
@@ -215,6 +259,8 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
ISR_lock_Context giant_lock_context;
+ _MRSP_Remove_priority( thread, &rival->Ceiling_priority, &queue_context );
+
_MRSP_Giant_acquire( &giant_lock_context );
_Chain_Extract_unprotected( &rival->Node );
@@ -228,6 +274,8 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
rival->status = STATUS_TIMEOUT;
_MRSP_Release( mrsp, &queue_context );
+
+ _Thread_Priority_update( &queue_context );
} else {
_MRSP_Release( mrsp, &queue_context );
}
@@ -237,23 +285,34 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
MRSP_Control *mrsp,
Resource_Node *owner,
Thread_Control *executing,
- Priority_Control initial_priority,
- Priority_Control ceiling_priority,
Thread_queue_Context *queue_context
)
{
- Status_Control status;
- MRSP_Rival rival;
- Thread_Life_state life_state;
- Per_CPU_Control *cpu_self;
- ISR_lock_Context giant_lock_context;
- ISR_Level level;
- Watchdog_Interval timeout = queue_context->timeout;
+ Status_Control status;
+ MRSP_Rival rival;
+ Thread_Life_state life_state;
+ Per_CPU_Control *cpu_self;
+ ISR_lock_Context giant_lock_context;
+ ISR_Level level;
+ Watchdog_Interval timeout;
+
_Assert( queue_context->timeout_discipline == WATCHDOG_RELATIVE );
+ status = _MRSP_Raise_priority(
+ mrsp,
+ executing,
+ &rival.Ceiling_priority,
+ queue_context
+ );
+
+ if ( status != STATUS_SUCCESSFUL ) {
+ _MRSP_Release( mrsp, queue_context );
+ return status;
+ }
+
rival.thread = executing;
rival.resource = mrsp;
- rival.initial_priority = initial_priority;
+ _Chain_Initialize_node( &rival.Node );
_MRSP_Giant_acquire( &giant_lock_context );
@@ -277,7 +336,9 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
);
_MRSP_Release( mrsp, queue_context );
- _Thread_Raise_priority( executing, ceiling_priority );
+ _Thread_Priority_update( queue_context );
+
+ timeout = (Watchdog_Interval) queue_context->timeout;
if ( timeout > 0 ) {
_Watchdog_Preinitialize( &rival.Watchdog, cpu_self );
@@ -307,10 +368,6 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
&cpu_self->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]
);
_ISR_Local_enable( level );
-
- if ( status == STATUS_TIMEOUT ) {
- _MRSP_Restore_priority( executing, initial_priority );
- }
}
return status;
@@ -323,44 +380,20 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
Thread_queue_Context *queue_context
)
{
- Status_Control status;
- const Scheduler_Control *scheduler = _Scheduler_Get_own( executing );
- Priority_Control initial_priority = _Thread_Get_priority( executing );
- Priority_Control ceiling_priority = _MRSP_Get_priority( mrsp, scheduler );
- bool priority_ok = !_Thread_Priority_less_than(
- ceiling_priority,
- initial_priority
- );
- Resource_Node *owner;
-
- if ( !priority_ok) {
- _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
- return STATUS_MUTEX_CEILING_VIOLATED;
- }
+ Status_Control status;
+ Resource_Node *owner;
_MRSP_Acquire_critical( mrsp, queue_context );
+
owner = _Resource_Get_owner( &mrsp->Resource );
+
if ( owner == NULL ) {
- _MRSP_Claim_ownership(
- mrsp,
- executing,
- initial_priority,
- ceiling_priority,
- queue_context
- );
- status = STATUS_SUCCESSFUL;
+ status = _MRSP_Claim_ownership( mrsp, executing, queue_context );
} else if (
wait
&& _Resource_Node_get_root( owner ) != &executing->Resource_node
) {
- status = _MRSP_Wait_for_ownership(
- mrsp,
- owner,
- executing,
- initial_priority,
- ceiling_priority,
- queue_context
- );
+ status = _MRSP_Wait_for_ownership( mrsp, owner, executing, queue_context );
} else {
_MRSP_Release( mrsp, queue_context );
/* Not available, nested access or deadlock */
@@ -376,9 +409,8 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
Thread_queue_Context *queue_context
)
{
- Priority_Control initial_priority;
- Per_CPU_Control *cpu_self;
- ISR_lock_Context giant_lock_context;
+ ISR_lock_Context giant_lock_context;
+ Per_CPU_Control *cpu_self;
if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
_ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
@@ -395,10 +427,8 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
return STATUS_RELEASE_ORDER_VIOLATION;
}
- initial_priority = mrsp->initial_priority_of_owner;
-
_MRSP_Acquire_critical( mrsp, queue_context );
-
+ _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
_MRSP_Giant_acquire( &giant_lock_context );
_Resource_Extract( &mrsp->Resource );
@@ -406,10 +436,11 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
_Resource_Set_owner( &mrsp->Resource, NULL );
} else {
- MRSP_Rival *rival = (MRSP_Rival *)
- _Chain_Get_first_unprotected( &mrsp->Rivals );
+ MRSP_Rival *rival;
Thread_Control *new_owner;
+ rival = (MRSP_Rival *) _Chain_Get_first_unprotected( &mrsp->Rivals );
+
/*
* This must be inside the critical section since the status prevents a
* potential double extraction in _MRSP_Timeout().
@@ -417,7 +448,9 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
rival->status = STATUS_SUCCESSFUL;
new_owner = rival->thread;
- mrsp->initial_priority_of_owner = rival->initial_priority;
+
+ _MRSP_Replace_priority( mrsp, new_owner, rival );
+
_Resource_Node_extract( &new_owner->Resource_node );
_Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
_Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
@@ -437,7 +470,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
);
_MRSP_Release( mrsp, queue_context );
- _MRSP_Restore_priority( executing, initial_priority );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
diff --git a/cpukit/score/include/rtems/score/priority.h b/cpukit/score/include/rtems/score/priority.h
index 842f01706c..595aa3ebcf 100644
--- a/cpukit/score/include/rtems/score/priority.h
+++ b/cpukit/score/include/rtems/score/priority.h
@@ -1,17 +1,15 @@
/**
- * @file rtems/score/priority.h
+ * @file
*
- * @brief Thread Priority Manipulation Routines
- *
- * This include file contains all thread priority manipulation routines.
- * This Handler provides mechanisms which can be used to
- * initialize and manipulate thread priorities.
+ * @brief Priority Handler API
*/
/*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2016 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
@@ -20,58 +18,169 @@
#ifndef _RTEMS_SCORE_PRIORITY_H
#define _RTEMS_SCORE_PRIORITY_H
-/**
- * @defgroup ScorePriority Priority Handler
- *
- * @ingroup Score
- *
- * This handler encapsulates functionality which is used to manage
- * thread priorities. At the SuperCore level 256 priority levels
- * are supported with lower numbers representing logically more important
- * threads. The priority level 0 is reserved for internal RTEMS use.
- * Typically it is assigned to threads which defer internal RTEMS
- * actions from an interrupt to thread level to improve interrupt response.
- * Priority level 255 is assigned to the IDLE thread and really should not
- * be used by application threads. The default IDLE thread implementation
- * is an infinite "branch to self" loop which never yields to other threads
- * at the same priority.
- */
-/**@{*/
-
-/*
- * Processor specific information.
- */
+#include <rtems/score/chain.h>
#include <rtems/score/cpu.h>
+#include <rtems/score/rbtree.h>
+
+struct Scheduler_Control;
#ifdef __cplusplus
extern "C" {
#endif
/**
- * The following type defines the control block used to manage
- * thread priorities.
+ * @defgroup ScorePriority Priority Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which is used to manage thread
+ * priorities. The actual priority of a thread is an aggregation of priority
+ * nodes. The thread priority aggregation for the home scheduler instance of a
+ * thread consists of at least one priority node, which is normally the real
+ * priority of the thread. The locking protocols (e.g. priority ceiling and
+ * priority inheritance), rate-monotonic period objects and the POSIX sporadic
+ * server add, change and remove priority nodes.
+ *
+ * @{
+ */
+
+/**
+ * @brief A plain thread priority value.
*
- * @note Priority 0 is reserved for internal threads only.
+ * Lower values represent higher priorities. So, a priority value of zero
+ * represents the highest priority thread. This value is reserved for internal
+ * threads and the priority ceiling protocol.
*/
typedef uint64_t Priority_Control;
-/** This defines the highest (most important) thread priority. */
+/**
+ * @brief The highest (most important) thread priority value.
+ */
#define PRIORITY_MINIMUM 0
/**
- * @brief This defines the priority of pseudo-ISR threads.
+ * @brief The priority value of pseudo-ISR threads.
*
* Examples are the MPCI and timer server threads.
*/
#define PRIORITY_PSEUDO_ISR PRIORITY_MINIMUM
-/** This defines the default lowest (least important) thread priority. */
+/**
+ * @brief The default lowest (least important) thread priority value.
+ *
+ * This value is CPU port dependent.
+ */
#if defined (CPU_PRIORITY_MAXIMUM)
#define PRIORITY_DEFAULT_MAXIMUM CPU_PRIORITY_MAXIMUM
#else
#define PRIORITY_DEFAULT_MAXIMUM 255
#endif
+/**
+ * @brief The priority node to build up a priority aggregation.
+ */
+typedef struct {
+ /**
+ * @brief Node component for a chain or red-black tree.
+ */
+ union {
+ Chain_Node Chain;
+ RBTree_Node RBTree;
+ } Node;
+
+ /**
+ * @brief The priority value of this node.
+ */
+ Priority_Control priority;
+} Priority_Node;
+
+/**
+ * @brief The priority action type.
+ */
+typedef enum {
+ PRIORITY_ACTION_ADD,
+ PRIORITY_ACTION_CHANGE,
+ PRIORITY_ACTION_REMOVE,
+ PRIORITY_ACTION_INVALID
+} Priority_Action_type;
+
+typedef struct Priority_Aggregation Priority_Aggregation;
+
+/**
+ * @brief The priority aggregation.
+ *
+ * This structure serves two purposes. Firstly, it provides a place to
+ * register priority nodes and reflects the overall priority of its
+ * contributors. Secondly, it provides an action block to signal addition,
+ * change and removal of a priority node.
+ */
+struct Priority_Aggregation {
+ /**
+ * @brief This priority node reflects the overall priority of the aggregation.
+ *
+ * The overall priority of the aggregation is the minimum priority of the
+ * priority nodes in the contributors tree.
+ *
+ * This priority node may be used to add this aggregation to another
+ * aggregation to build up a recursive priority scheme.
+ *
+ * In case priority nodes of the contributors tree are added, changed or
+ * removed the priority of this node may change. To signal such changes to a
+ * priority aggregation the action block may be used.
+ */
+ Priority_Node Node;
+
+ /**
+ * @brief A red-black tree to contain priority nodes contributing to the
+ * overall priority of this priority aggregation.
+ */
+ RBTree_Control Contributors;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The scheduler instance of this priority aggregation.
+ */
+ const struct Scheduler_Control *scheduler;
+#endif
+
+ /**
+ * @brief A priority action block to manage priority node additions, changes
+ * and removals.
+ */
+ struct {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The next priority aggregation in the action list.
+ */
+ Priority_Aggregation *next;
+#endif
+
+ /**
+ * @brief The priority node of the action.
+ */
+ Priority_Node *node;
+
+ /**
+ * @brief The type of the action.
+ */
+ Priority_Action_type type;
+ } Action;
+};
+
+/**
+ * @brief A list of priority actions.
+ *
+ * Actions are only added to the list. The action lists reside on the stack
+ * and have a short life-time. They are moved, processed or destroyed as a
+ * whole.
+ */
+typedef struct {
+ /**
+ * @brief The first action of a priority action list.
+ */
+ Priority_Aggregation *actions;
+} Priority_Actions;
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/include/rtems/score/priorityimpl.h b/cpukit/score/include/rtems/score/priorityimpl.h
new file mode 100644
index 0000000000..3380983cb7
--- /dev/null
+++ b/cpukit/score/include/rtems/score/priorityimpl.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PRIORITYIMPL_H
+#define _RTEMS_SCORE_PRIORITYIMPL_H
+
+#include <rtems/score/priority.h>
+#include <rtems/score/scheduler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_empty(
+ Priority_Actions *actions
+)
+{
+ actions->actions = NULL;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_one(
+ Priority_Actions *actions,
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Action_type type
+)
+{
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = node;
+ aggregation->Action.type = type;
+
+ actions->actions = aggregation;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_empty(
+ const Priority_Actions *actions
+)
+{
+ return actions->actions == NULL;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_valid(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation != NULL;
+#else
+ (void) aggregation;
+ return false;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Actions_move(
+ Priority_Actions *actions
+)
+{
+ Priority_Aggregation *aggregation;
+
+ aggregation = actions->actions;
+ actions->actions = NULL;
+
+ return aggregation;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_add(
+ Priority_Actions *actions,
+ Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ /*
+ * Priority aggregations are only added to action lists, so do not care about
+ * the current next pointer value.
+ */
+ aggregation->Action.next = actions->actions;
+#endif
+ actions->actions = aggregation;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_initialize(
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ node->priority = priority;
+ _RBTree_Initialize_node( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_set_priority(
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ node->priority = priority;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_set_inactive(
+ Priority_Node *node
+)
+{
+ _RBTree_Set_off_tree( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Node_is_active(
+ const Priority_Node *node
+)
+{
+ return !_RBTree_Is_node_off_tree( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Initialize_empty(
+ Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_DEBUG)
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = NULL;
+ aggregation->Action.type = PRIORITY_ACTION_INVALID;
+#endif
+ _RBTree_Initialize_node( &aggregation->Node.Node.RBTree );
+ _RBTree_Initialize_empty( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Initialize_one(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+#if defined(RTEMS_DEBUG)
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = NULL;
+ aggregation->Action.type = PRIORITY_ACTION_INVALID;
+#endif
+ _Priority_Node_initialize( &aggregation->Node, node->priority );
+ _RBTree_Initialize_one( &aggregation->Contributors, &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Is_empty(
+ const Priority_Aggregation *aggregation
+)
+{
+ return _RBTree_Is_empty( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _Priority_Get_priority(
+ const Priority_Aggregation *aggregation
+)
+{
+ return aggregation->Node.priority;
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Priority_Get_scheduler(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation->scheduler;
+#else
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Priority_Node *_Priority_Get_minimum_node(
+ const Priority_Aggregation *aggregation
+)
+{
+ return (Priority_Node *) _RBTree_Minimum( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action_node(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ aggregation->Action.node = node;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action_type(
+ Priority_Aggregation *aggregation,
+ Priority_Action_type type
+)
+{
+ aggregation->Action.type = type;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Action_type type
+)
+{
+ aggregation->Action.node = node;
+ aggregation->Action.type = type;
+}
+
+RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Get_next_action(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation->Action.next;
+#else
+ (void) aggregation;
+ return NULL;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Less(
+ const void *left,
+ const RBTree_Node *right
+)
+{
+ const Priority_Control *the_left;
+ const Priority_Node *the_right;
+
+ the_left = left;
+ the_right = RTEMS_CONTAINER_OF( right, Priority_Node, Node.RBTree );
+
+ return *the_left < the_right->priority;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Plain_insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ return _RBTree_Insert_inline(
+ &aggregation->Contributors,
+ &node->Node.RBTree,
+ &priority,
+ _Priority_Less
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Plain_extract(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ _RBTree_Extract( &aggregation->Contributors, &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Plain_changed(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ _Priority_Plain_extract( aggregation, node );
+ _Priority_Plain_insert( aggregation, node, node->priority );
+}
+
+typedef void ( *Priority_Add_handler )(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+);
+
+typedef void ( *Priority_Change_handler )(
+ Priority_Aggregation *aggregation,
+ bool prepend_it,
+ Priority_Actions *actions,
+ void *arg
+);
+
+typedef void ( *Priority_Remove_handler )(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+);
+
+RTEMS_INLINE_ROUTINE void _Priority_Change_nothing(
+ Priority_Aggregation *aggregation,
+ bool prepend_it,
+ Priority_Actions *actions,
+ void *arg
+)
+{
+ (void) aggregation;
+ (void) prepend_it;
+ (void) actions;
+ (void) arg;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Remove_nothing(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+)
+{
+ (void) aggregation;
+ (void) actions;
+ (void) arg;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Non_empty_insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ bool is_new_minimum;
+
+ _Assert( !_Priority_Is_empty( aggregation ) );
+ is_new_minimum = _Priority_Plain_insert( aggregation, node, node->priority );
+
+ if ( is_new_minimum ) {
+ aggregation->Node.priority = node->priority;
+ ( *change )( aggregation, false, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Add_handler add,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ if ( _Priority_Is_empty( aggregation ) ) {
+ _Priority_Initialize_one( aggregation, node );
+ ( *add )( aggregation, actions, arg );
+ } else {
+ _Priority_Non_empty_insert( aggregation, node, actions, change, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Extract(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Remove_handler remove,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ _Priority_Plain_extract( aggregation, node );
+
+ if ( _Priority_Is_empty( aggregation ) ) {
+ ( *remove )( aggregation, actions, arg );
+ } else {
+ Priority_Node *min;
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( node->priority < min->priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, true, actions, arg );
+ }
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Extract_non_empty(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ Priority_Node *min;
+
+ _Priority_Plain_extract( aggregation, node );
+ _Assert( !_Priority_Is_empty( aggregation ) );
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( node->priority < min->priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, true, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Changed(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ bool prepend_it,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ Priority_Node *min;
+
+ _Priority_Plain_changed( aggregation, node );
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( min->priority != aggregation->Node.priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, prepend_it, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Replace(
+ Priority_Aggregation *aggregation,
+ Priority_Node *victim,
+ Priority_Node *replacement
+)
+{
+ replacement->priority = victim->priority;
+ _RBTree_Replace_node(
+ &aggregation->Contributors,
+ &victim->Node.RBTree,
+ &replacement->Node.RBTree
+ );
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PRIORITYIMPL_H */
diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index bbb4e37240..b934269188 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -135,16 +135,20 @@ typedef struct {
void ( *node_destroy )( const Scheduler_Control *, Scheduler_Node * );
/** @see _Scheduler_Release_job() */
- Thread_Control *( *release_job ) (
+ void ( *release_job ) (
const Scheduler_Control *,
Thread_Control *,
- uint64_t
+ Priority_Node *,
+ uint64_t,
+ Thread_queue_Context *
);
/** @see _Scheduler_Cancel_job() */
- Thread_Control *( *cancel_job ) (
+ void ( *cancel_job ) (
const Scheduler_Control *,
- Thread_Control *
+ Thread_Control *,
+ Priority_Node *,
+ Thread_queue_Context *
);
/** @see _Scheduler_Tick() */
@@ -373,14 +377,18 @@ void _Scheduler_default_Node_destroy(
*
* @param[in] scheduler Unused.
* @param[in] the_thread Unused.
+ * @param[in] priority_node Unused.
* @param[in] deadline Unused.
+ * @param[in] queue_context Unused.
*
* @retval NULL Always.
*/
-Thread_Control *_Scheduler_default_Release_job(
+void _Scheduler_default_Release_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
);
/**
@@ -388,12 +396,16 @@ Thread_Control *_Scheduler_default_Release_job(
*
* @param[in] scheduler Unused.
* @param[in] the_thread Unused.
+ * @param[in] priority_node Unused.
+ * @param[in] queue_context Unused.
*
* @retval NULL Always.
*/
-Thread_Control *_Scheduler_default_Cancel_job(
+void _Scheduler_default_Cancel_job(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
/**
diff --git a/cpukit/score/include/rtems/score/schedulercbs.h b/cpukit/score/include/rtems/score/schedulercbs.h
index c230e08d24..02c7b53e12 100644
--- a/cpukit/score/include/rtems/score/schedulercbs.h
+++ b/cpukit/score/include/rtems/score/schedulercbs.h
@@ -61,7 +61,7 @@ extern "C" {
_Scheduler_CBS_Node_initialize, /* node initialize entry point */ \
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_CBS_Release_job, /* new period of task */ \
- _Scheduler_EDF_Cancel_job, /* cancel period of task */ \
+ _Scheduler_CBS_Cancel_job, /* cancel period of task */ \
_Scheduler_default_Tick, /* tick entry point */ \
_Scheduler_default_Start_idle /* start idle entry point */ \
SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
@@ -135,6 +135,8 @@ typedef struct {
Scheduler_EDF_Node Base;
/** CBS server specific data of a task. */
Scheduler_CBS_Server *cbs_server;
+
+ Priority_Node *deadline_node;
} Scheduler_CBS_Node;
@@ -163,10 +165,19 @@ Scheduler_Void_or_thread _Scheduler_CBS_Unblock(
Thread_Control *the_thread
);
-Thread_Control *_Scheduler_CBS_Release_job(
+void _Scheduler_CBS_Release_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
+);
+
+void _Scheduler_CBS_Cancel_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t length
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
/**
diff --git a/cpukit/score/include/rtems/score/scheduleredf.h b/cpukit/score/include/rtems/score/scheduleredf.h
index 81b245e391..ab43672ec1 100644
--- a/cpukit/score/include/rtems/score/scheduleredf.h
+++ b/cpukit/score/include/rtems/score/scheduleredf.h
@@ -89,24 +89,14 @@ typedef struct {
Scheduler_Node Base;
/**
- * Pointer to corresponding Thread Control Block.
- */
- Thread_Control *thread;
- /**
* Rbtree node related to this thread.
*/
RBTree_Node Node;
/**
- * @brief The thread priority used by this scheduler instance in case no job
- * is released.
+ * @brief The thread priority currently used for this scheduler instance.
*/
- Priority_Control background_priority;
-
- /**
- * @brief The thread priority currently used by this scheduler instance.
- */
- Priority_Control current_priority;
+ Priority_Control priority;
} Scheduler_EDF_Node;
/**
@@ -215,15 +205,19 @@ Scheduler_Void_or_thread _Scheduler_EDF_Yield(
Thread_Control *the_thread
);
-Thread_Control *_Scheduler_EDF_Release_job(
+void _Scheduler_EDF_Release_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
);
-Thread_Control *_Scheduler_EDF_Cancel_job(
+void _Scheduler_EDF_Cancel_job(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
#ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/scheduleredfimpl.h b/cpukit/score/include/rtems/score/scheduleredfimpl.h
index 844d745d54..61aceddf19 100644
--- a/cpukit/score/include/rtems/score/scheduleredfimpl.h
+++ b/cpukit/score/include/rtems/score/scheduleredfimpl.h
@@ -74,7 +74,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less(
the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
prio_left = *the_left;
- prio_right = the_right->current_priority;
+ prio_right = the_right->priority;
return prio_left < prio_right;
}
@@ -93,7 +93,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less_or_equal(
the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
prio_left = *the_left;
- prio_right = the_right->current_priority;
+ prio_right = the_right->priority;
return prio_left <= prio_right;
}
@@ -101,13 +101,13 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less_or_equal(
RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
Scheduler_EDF_Context *context,
Scheduler_EDF_Node *node,
- Priority_Control current_priority
+ Priority_Control priority
)
{
_RBTree_Insert_inline(
&context->Ready,
&node->Node,
- &current_priority,
+ &priority,
_Scheduler_EDF_Less
);
}
@@ -115,13 +115,13 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue_first(
Scheduler_EDF_Context *context,
Scheduler_EDF_Node *node,
- Priority_Control current_priority
+ Priority_Control priority
)
{
_RBTree_Insert_inline(
&context->Ready,
&node->Node,
- &current_priority,
+ &priority,
_Scheduler_EDF_Less_or_equal
);
}
@@ -164,7 +164,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Schedule_body(
first = _RBTree_Minimum( &context->Ready );
node = RTEMS_CONTAINER_OF( first, Scheduler_EDF_Node, Node );
- _Scheduler_Update_heir( node->thread, force_dispatch );
+ _Scheduler_Update_heir( node->Base.owner, force_dispatch );
}
/**@}*/
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index bbad6e0a36..dea1888a51 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -22,6 +22,7 @@
#include <rtems/score/scheduler.h>
#include <rtems/score/cpusetimpl.h>
+#include <rtems/score/priorityimpl.h>
#include <rtems/score/smpimpl.h>
#include <rtems/score/status.h>
#include <rtems/score/threadimpl.h>
@@ -99,17 +100,6 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
return _Scheduler_Get_by_CPU_index( cpu_index );
}
-RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
- const Thread_Control *the_thread
-)
-{
-#if defined(RTEMS_SMP)
- return the_thread->Scheduler.own_node;
-#else
- return the_thread->Scheduler.node;
-#endif
-}
-
ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
/**
@@ -264,7 +254,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
needs_help != NULL
&& _Resource_Node_owns_resources( &needs_help->Resource_node )
) {
- Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
+ Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
if (
node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
@@ -486,21 +476,27 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
* @brief Releases a job of a thread with respect to the scheduler.
*
* @param[in] the_thread The thread.
+ * @param[in] priority_node The priority node of the job.
* @param[in] deadline The deadline in watchdog ticks since boot.
- *
- * @return The thread to hand over to _Thread_Update_priority().
+ * @param[in] queue_context The thread queue context to provide the set of
+ * threads for _Thread_Priority_update().
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_job(
- Thread_Control *the_thread,
- uint64_t deadline
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
)
{
const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
- return ( *scheduler->Operations.release_job )(
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *scheduler->Operations.release_job )(
scheduler,
the_thread,
- deadline
+ priority_node,
+ deadline,
+ queue_context
);
}
@@ -508,16 +504,25 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_job(
* @brief Cancels a job of a thread with respect to the scheduler.
*
* @param[in] the_thread The thread.
- *
- * @return The thread to hand over to _Thread_Update_priority().
+ * @param[in] priority_node The priority node of the job.
+ * @param[in] queue_context The thread queue context to provide the set of
+ * threads for _Thread_Priority_update().
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Cancel_job(
- Thread_Control *the_thread
+RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
)
{
const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
- return ( *scheduler->Operations.cancel_job )( scheduler, the_thread );
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *scheduler->Operations.cancel_job )(
+ scheduler,
+ the_thread,
+ priority_node,
+ queue_context
+ );
}
/**
@@ -776,12 +781,10 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
bool prepend_it
)
{
- Scheduler_Node *own_node;
-
- own_node = _Scheduler_Thread_get_own_node( the_thread );
- _Scheduler_Node_set_priority( own_node, new_priority, prepend_it );
+ Scheduler_Node *scheduler_node;
- the_thread->current_priority = new_priority;
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
}
#if defined(RTEMS_SMP)
@@ -857,7 +860,7 @@ RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
Scheduler_Help_state new_help_state
)
{
- Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
+ Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
Scheduler_Help_state previous_help_state = node->help_state;
node->help_state = new_help_state;
@@ -1294,7 +1297,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
_Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
_Scheduler_Thread_set_scheduler_and_node(
old_user,
- _Scheduler_Thread_get_own_node( old_user ),
+ _Thread_Scheduler_get_own_node( old_user ),
old_user
);
@@ -1383,11 +1386,24 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
return STATUS_RESOURCE_IN_USE;
}
- the_thread->current_priority = priority;
- the_thread->real_priority = priority;
- the_thread->Start.initial_priority = priority;
+ own_node = _Thread_Scheduler_get_own_node( the_thread );
+ _Priority_Plain_extract( &own_node->Wait.Priority, &the_thread->Real_priority );
- own_node = _Scheduler_Thread_get_own_node( the_thread );
+ if ( !_Priority_Is_empty( &own_node->Wait.Priority ) ) {
+ _Priority_Plain_insert(
+ &own_node->Wait.Priority,
+ &the_thread->Real_priority,
+ the_thread->Real_priority.priority
+ );
+ return STATUS_RESOURCE_IN_USE;
+ }
+
+ the_thread->Start.initial_priority = priority;
+ _Priority_Node_set_priority( &the_thread->Real_priority, priority );
+ _Priority_Initialize_one(
+ &own_node->Wait.Priority,
+ &the_thread->Real_priority
+ );
#if defined(RTEMS_SMP)
{
diff --git a/cpukit/score/include/rtems/score/schedulernode.h b/cpukit/score/include/rtems/score/schedulernode.h
index 293887c4b7..9827d21e64 100644
--- a/cpukit/score/include/rtems/score/schedulernode.h
+++ b/cpukit/score/include/rtems/score/schedulernode.h
@@ -98,7 +98,7 @@ typedef enum {
/**
* @brief Scheduler node for per-thread data.
*/
-typedef struct {
+typedef struct Scheduler_Node {
#if defined(RTEMS_SMP)
/**
* @brief Chain node for usage in various scheduler data structures.
@@ -144,24 +144,7 @@ typedef struct {
* @brief Thread wait support block.
*/
struct {
- /**
- * @brief Node for thread queues.
- *
- * Each scheduler node can be enqueued on a thread queue on behalf of the
- * thread owning the scheduler node. The scheduler node reflects the
- * priority of the thread within the corresponding scheduler instance.
- */
- union {
- /**
- * @brief A node for chains.
- */
- Chain_Node Chain;
-
- /**
- * @brief A node for red-black trees.
- */
- RBTree_Node RBTree;
- } Node;
+ Priority_Aggregation Priority;
} Wait;
/**
diff --git a/cpukit/score/include/rtems/score/schedulernodeimpl.h b/cpukit/score/include/rtems/score/schedulernodeimpl.h
index ecb61c239a..b0f7d77bbe 100644
--- a/cpukit/score/include/rtems/score/schedulernodeimpl.h
+++ b/cpukit/score/include/rtems/score/schedulernodeimpl.h
@@ -17,20 +17,23 @@
#include <rtems/score/schedulernode.h>
+struct Scheduler_Control;
+
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
-#define SCHEDULER_NODE_OF_WAIT_CHAIN_NODE( node ) \
- RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Node.Chain )
+#define SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority.Node.Node.Chain )
-#define SCHEDULER_NODE_OF_WAIT_RBTREE_NODE( node ) \
- RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Node.RBTree )
+#define SCHEDULER_NODE_OF_WAIT_PRIORITY( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority )
RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
- Scheduler_Node *node,
- Thread_Control *the_thread,
- Priority_Control priority
+ const struct Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
)
{
node->owner = the_thread;
@@ -39,12 +42,14 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
node->Priority.prepend_it = false;
#if defined(RTEMS_SMP)
+ node->Wait.Priority.scheduler = scheduler;
node->user = the_thread;
node->help_state = SCHEDULER_HELP_YOURSELF;
node->idle = NULL;
node->accepts_help = the_thread;
_SMP_sequence_lock_Initialize( &node->Priority.Lock );
#else
+ (void) scheduler;
(void) the_thread;
#endif
}
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index ae1941aa43..035df46d65 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -375,7 +375,7 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
Thread_Control *thread
)
{
- return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
+ return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
}
static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
@@ -386,12 +386,13 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
}
static inline void _Scheduler_SMP_Node_initialize(
- Scheduler_SMP_Node *node,
- Thread_Control *thread,
- Priority_Control priority
+ const Scheduler_Control *scheduler,
+ Scheduler_SMP_Node *node,
+ Thread_Control *thread,
+ Priority_Control priority
)
{
- _Scheduler_Node_do_initialize( &node->Base, thread, priority );
+ _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
node->state = SCHEDULER_SMP_NODE_BLOCKED;
node->priority = priority;
}
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 2cd229f76f..393d431e0e 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -373,7 +373,7 @@ typedef struct {
*
* The thread wait lock mechanism protects the following thread variables
* - POSIX_API_Control::Attributes,
- * - Thread_Control::current_priority,
+ * - Scheduler_Node::Wait,
* - Thread_Control::Wait::Lock::Pending_requests,
* - Thread_Control::Wait::queue, and
* - Thread_Control::Wait::operations.
@@ -461,32 +461,11 @@ typedef struct {
/** This field is the current execution state of this proxy. */
States_Control current_state;
- /**
- * @brief This field is the current priority state of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
- */
- Priority_Control current_priority;
/**
- * @brief This field is the base priority of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
+ * @brief The base priority of this thread in its home scheduler instance.
*/
- Priority_Control real_priority;
-
- /**
- * @brief Hints if a priority restore is necessary once the resource count
- * changes from one to zero.
- *
- * This is an optimization to speed up the mutex surrender sequence in case
- * no attempt to change the priority was made during the mutex ownership. On
- * SMP configurations atomic fences must synchronize writes to
- * Thread_Control::priority_restore_hint and Thread_Control::resource_count.
- */
- bool priority_restore_hint;
+ Priority_Node Real_priority;
/** This field is the number of mutexes currently held by this proxy. */
uint32_t resource_count;
@@ -708,31 +687,9 @@ struct _Thread_Control {
States_Control current_state;
/**
- * @brief This field is the current priority state of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
- */
- Priority_Control current_priority;
-
- /**
- * @brief This field is the base priority of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
- */
- Priority_Control real_priority;
-
- /**
- * @brief Hints if a priority restore is necessary once the resource count
- * changes from one to zero.
- *
- * This is an optimization to speed up the mutex surrender sequence in case
- * no attempt to change the priority was made during the mutex ownership. On
- * SMP configurations atomic fences must synchronize writes to
- * Thread_Control::priority_restore_hint and Thread_Control::resource_count.
+ * @brief The base priority of this thread in its home scheduler instance.
*/
- bool priority_restore_hint;
+ Priority_Node Real_priority;
/** This field is the number of mutexes currently held by this thread. */
uint32_t resource_count;
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 1fce842533..7f9dccf5e2 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -415,128 +415,185 @@ RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
#endif
/**
- * @brief Returns true if the left thread priority is less than the right
- * thread priority in the intuitive sense of priority and false otherwise.
+ * @brief Performs the priority actions specified by the thread queue context
+ * along the thread queue path.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param start_of_path The start thread of the thread queue path.
+ * @param queue_context The thread queue context specifying the thread queue
+ * path and initial thread priority actions.
+ *
+ * @see _Thread_queue_Path_acquire_critical().
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
- Priority_Control left,
- Priority_Control right
-)
-{
- return left > right;
-}
+void _Thread_Priority_perform_actions(
+ Thread_Control *start_of_path,
+ Thread_queue_Context *queue_context
+);
/**
- * @brief Returns the highest priority of the left and right thread priorities
- * in the intuitive sense of priority.
+ * @brief Adds the specified thread priority node to the corresponding thread
+ * priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to add.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
- Priority_Control left,
- Priority_Control right
-)
-{
- return _Thread_Priority_less_than( left, right ) ? right : left;
-}
+void _Thread_Priority_add(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
/**
- * @brief Filters a thread priority change.
+ * @brief Removes the specified thread priority node from the corresponding
+ * thread priority aggregation.
*
- * Called by _Thread_Change_priority() under the protection of the thread lock.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in, out] new_priority The new priority of the thread. The filter may
- * alter this value.
- * @param[in] arg The argument passed to _Thread_Change_priority().
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to remove.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
*
- * @retval true Change the current priority.
- * @retval false Otherwise.
+ * @see _Thread_Wait_acquire().
*/
-typedef bool ( *Thread_Change_priority_filter )(
- Thread_Control *the_thread,
- Priority_Control *new_priority,
- void *arg
-);
-
-Thread_Control *_Thread_Apply_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
- bool prepend_it
+void _Thread_Priority_remove(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
-void _Thread_Update_priority( Thread_Control *the_thread );
-
/**
- * @brief Changes the priority of a thread if allowed by the filter function.
+ * @brief Propagates a thread priority value change in the specified thread
+ * priority node to the corresponding thread priority aggregation.
*
- * It changes current priority of the thread to the new priority in case the
- * filter function returns true. In this case the scheduler is notified of the
- * priority change as well.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in] new_priority The new priority of the thread.
- * @param[in] arg The argument for the filter function.
- * @param[in] filter The filter function to determine if a priority change is
- * allowed and optionally perform other actions under the protection of the
- * thread lock simultaneously with the update of the current priority.
- * @param[in] prepend_it In case this is true, then the thread is prepended to
- * its priority group in its scheduler instance, otherwise it is appended.
- */
-void _Thread_Change_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
- bool prepend_it
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to change.
+ * @param prepend_it In case this is true, then the thread is prepended to
+ * its priority group in its home scheduler instance, otherwise it is
+ * appended.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+void _Thread_Priority_changed(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
);
/**
- * @brief Raises the priority of a thread.
+ * @brief Changes the thread priority value of the specified thread priority
+ * node in the corresponding thread priority aggregation.
*
- * It changes the current priority of the thread to the new priority if the new
- * priority is higher than the current priority. In this case the thread is
- * appended to its new priority group in its scheduler instance.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in] new_priority The new priority of the thread.
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to change.
+ * @param new_priority The new thread priority value of the thread priority
+ * node to change.
+ * @param prepend_it In case this is true, then the thread is prepended to
+ * its priority group in its home scheduler instance, otherwise it is
+ * appended.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
*
- * @see _Thread_Change_priority().
+ * @see _Thread_Wait_acquire().
*/
-void _Thread_Raise_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority
-);
+RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Priority_Control new_priority,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
+)
+{
+ _Priority_Node_set_priority( priority_node, new_priority );
+ _Thread_Priority_changed(
+ the_thread,
+ priority_node,
+ prepend_it,
+ queue_context
+ );
+}
/**
- * @brief Sets the current to the real priority of a thread.
+ * @brief Replaces the victim priority node with the replacement priority node
+ * in the corresponding thread priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
*
- * Sets the priority restore hint to false.
+ * @param the_thread The thread.
+ * @param victim_node The victim thread priority node.
+ * @param replacement_node The replacement thread priority node.
+ *
+ * @see _Thread_Wait_acquire().
*/
-void _Thread_Restore_priority( Thread_Control *the_thread );
+void _Thread_Priority_replace(
+ Thread_Control *the_thread,
+ Priority_Node *victim_node,
+ Priority_Node *replacement_node
+);
/**
- * @brief Sets the priority of a thread.
+ * @brief Adds a priority node to the corresponding thread priority
+ * aggregation.
*
- * It sets the real priority of the thread. In addition it changes the current
- * priority of the thread if the new priority is higher than the current
- * priority or the thread owns no resources.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in] new_priority The new priority of the thread.
- * @param[out] old_priority The old real priority of the thread. This pointer
- * must not be @c NULL.
- * @param[in] prepend_it In case this is true, then the thread is prepended to
- * its priority group in its scheduler instance, otherwise it is appended.
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to add.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
*
- * @see _Thread_Change_priority().
+ * @see _Thread_Priority_add(), _Thread_Priority_change(),
+ * _Thread_Priority_changed() and _Thread_Priority_remove().
*/
-void _Thread_Set_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- Priority_Control *old_priority,
- bool prepend_it
-);
+void _Thread_Priority_update( Thread_queue_Context *queue_context );
+
+/**
+ * @brief Returns true if the left thread priority is less than the right
+ * thread priority in the intuitive sense of priority and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
+ Priority_Control left,
+ Priority_Control right
+)
+{
+ return left > right;
+}
+
+/**
+ * @brief Returns the highest priority of the left and right thread priorities
+ * in the intuitive sense of priority.
+ */
+RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
+ Priority_Control left,
+ Priority_Control right
+)
+{
+ return _Thread_Priority_less_than( left, right ) ? right : left;
+}
RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
Objects_Id id
@@ -929,6 +986,17 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
return owns_resources;
}
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return the_thread->Scheduler.own_node;
+#else
+ return the_thread->Scheduler.node;
+#endif
+}
+
/**
* @brief Returns the priority of the thread.
*
@@ -937,14 +1005,15 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
* protocols, a job release or the POSIX sporadic server for example.
*
* @return The priority of the thread.
- *
- * @see _Scheduler_Node_get_priority().
*/
RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
const Thread_Control *the_thread
)
{
- return the_thread->current_priority;
+ Scheduler_Node *scheduler_node;
+
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ return _Priority_Get_priority( &scheduler_node->Wait.Priority );
}
/**
@@ -1389,7 +1458,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
_Assert( queue_context->Lock_context.Wait.queue == queue );
#endif
- ( *the_thread->Wait.operations->extract )( queue, the_thread );
+ ( *the_thread->Wait.operations->extract )(
+ queue,
+ the_thread,
+ queue_context
+ );
_Thread_Wait_restore_default( the_thread );
#if defined(RTEMS_SMP)
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index a4ad0827a3..6f62506c26 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -30,6 +30,8 @@
extern "C" {
#endif
+struct Scheduler_Node;
+
/**
* @defgroup ScoreThreadQueue Thread Queue Handler
*
@@ -47,8 +49,6 @@ typedef struct Thread_queue_Queue Thread_queue_Queue;
typedef struct Thread_queue_Operations Thread_queue_Operations;
-typedef struct Thread_queue_Path Thread_queue_Path;
-
/**
* @brief Thread queue deadlock callout.
*
@@ -195,6 +195,56 @@ typedef struct {
*/
uint64_t timeout;
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Representation of a thread queue path from a start thread queue to
+ * the terminal thread queue.
+ *
+ * The start thread queue is determined by the object on which a thread intends
+ * to block. The terminal thread queue is the thread queue reachable via
+ * thread queue links whose owner is not blocked on a thread queue. The thread
+ * queue links are determined by the thread queue owner and thread wait queue
+ * relationships.
+ */
+ struct {
+ /**
+ * @brief The chain of thread queue links defining the thread queue path.
+ */
+ Chain_Control Links;
+
+ /**
+ * @brief The start of a thread queue path.
+ */
+ Thread_queue_Link Start;
+ } Path;
+#endif
+
+ /**
+ * @brief Block to manage thread priority changes due to a thread queue
+ * operation.
+ */
+ struct {
+ /**
+ * @brief A priority action list.
+ */
+ Priority_Actions Actions;
+
+ /**
+ * @brief Count of threads to update the priority via
+ * _Thread_Priority_update().
+ */
+ size_t update_count;
+
+ /**
+ * @brief Threads to update the priority via _Thread_Priority_update().
+ *
+ * Currently, a maximum of two threads need an update in one rush, for
+ * example the thread of the thread queue operation and the owner of the
+ * thread queue.
+ */
+ Thread_Control *update[ 2 ];
+ } Priority;
+
/**
* @brief Invoked in case of a detected deadlock.
*
@@ -237,7 +287,13 @@ typedef struct {
/**
* @brief The actual thread priority queue.
*/
- RBTree_Control Queue;
+ Priority_Aggregation Queue;
+
+ /**
+ * @brief This priority queue is added to a scheduler node of the owner in
+ * case of priority inheritance.
+ */
+ struct Scheduler_Node *scheduler_node;
} Thread_queue_Priority_queue;
/**
@@ -289,6 +345,11 @@ typedef struct _Thread_queue_Heads {
#if defined(RTEMS_SMP)
/**
+ * @brief Boost priority.
+ */
+ Priority_Node Boost_priority;
+
+ /**
* @brief One priority queue per scheduler instance.
*/
Thread_queue_Priority_queue Priority[ RTEMS_ZERO_LENGTH_ARRAY ];
@@ -337,34 +398,33 @@ struct Thread_queue_Queue {
};
/**
- * @brief Thread queue priority change operation.
+ * @brief Thread queue action operation.
*
* @param[in] queue The actual thread queue.
* @param[in] the_thread The thread.
- * @param[in] new_priority The new priority value.
- *
- * @see Thread_queue_Operations.
+ * @param[in] queue_context The thread queue context providing the thread queue
+ * action set to perform. Returns the thread queue action set to perform on
+ * the thread queue owner or the empty set in case there is nothing to do.
*/
-typedef void ( *Thread_queue_Priority_change_operation )(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Priority_Control new_priority
+typedef void ( *Thread_queue_Priority_actions_operation )(
+ Thread_queue_Queue *queue,
+ Priority_Actions *priority_actions
);
/**
* @brief Thread queue enqueue operation.
*
* A potential thread to update the priority due to priority inheritance is
- * returned via the thread queue path. This thread is handed over to
- * _Thread_Update_priority().
+ * returned via the thread queue context. This thread is handed over to
+ * _Thread_Priority_update().
*
* @param[in] queue The actual thread queue.
* @param[in] the_thread The thread to enqueue on the queue.
*/
typedef void ( *Thread_queue_Enqueue_operation )(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Thread_queue_Path *path
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
);
/**
@@ -374,8 +434,9 @@ typedef void ( *Thread_queue_Enqueue_operation )(
* @param[in] the_thread The thread to extract from the thread queue.
*/
typedef void ( *Thread_queue_Extract_operation )(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
);
/**
@@ -390,9 +451,10 @@ typedef void ( *Thread_queue_Extract_operation )(
* @return The previous first thread on the queue.
*/
typedef Thread_Control *( *Thread_queue_Surrender_operation )(
- Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *previous_owner
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context
);
/**
@@ -415,16 +477,9 @@ typedef Thread_Control *( *Thread_queue_First_operation )(
*/
struct Thread_queue_Operations {
/**
- * @brief Thread queue priority change operation.
- *
- * Called by _Thread_Change_priority() to notify a thread about a priority
- * change. In case this thread waits currently for a resource the handler
- * may adjust its data structures according to the new priority value. This
- * handler must not be NULL, instead the default handler
- * _Thread_Do_nothing_priority_change() should be used in case nothing needs
- * to be done during a priority change.
- */
- Thread_queue_Priority_change_operation priority_change;
+ * @brief Thread queue priority actions operation.
+ */
+ Thread_queue_Priority_actions_operation priority_actions;
/**
* @brief Thread queue enqueue operation.
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 977b0ceb38..65b0e8eeab 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -21,7 +21,7 @@
#include <rtems/score/threadq.h>
#include <rtems/score/chainimpl.h>
-#include <rtems/score/rbtreeimpl.h>
+#include <rtems/score/priorityimpl.h>
#include <rtems/score/scheduler.h>
#include <rtems/score/smp.h>
#include <rtems/score/thread.h>
@@ -39,38 +39,8 @@ extern "C" {
*/
/**@{*/
-/**
- * @brief Representation of a thread queue path from a start thread queue to
- * the terminal thread queue.
- *
- * The start thread queue is determined by the object on which a thread intends
- * to block. The terminal thread queue is the thread queue reachable via
- * thread queue links those owner is not blocked on a thread queue. The thread
- * queue links are determined by the thread queue owner and thread wait queue
- * relationships.
- */
-struct Thread_queue_Path {
-#if defined(RTEMS_SMP)
- /**
- * @brief The chain of thread queue links defining the thread queue path.
- */
- Chain_Control Links;
-
- /**
- * @brief The start of a thread queue path.
- */
- Thread_queue_Link Start;
-#endif
-
- /**
- * @brief A potential thread to update the priority via
- * _Thread_Update_priority().
- *
- * This thread is determined by thread queues which support priority
- * inheritance.
- */
- Thread_Control *update_priority;
-};
+#define THREAD_QUEUE_LINK_OF_PATH_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
/**
* @brief Thread queue with a layout compatible to struct _Thread_queue_Queue
@@ -210,6 +180,42 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
queue_context->deadlock_callout = deadlock_callout;
}
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_clear_priority_updates(
+ Thread_queue_Context *queue_context
+)
+{
+ queue_context->Priority.update_count = 0;
+}
+
+RTEMS_INLINE_ROUTINE size_t _Thread_queue_Context_save_priority_updates(
+ Thread_queue_Context *queue_context
+)
+{
+ return queue_context->Priority.update_count;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_restore_priority_updates(
+ Thread_queue_Context *queue_context,
+ size_t update_count
+)
+{
+ queue_context->Priority.update_count = update_count;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_add_priority_update(
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
+)
+{
+ size_t n;
+
+ n = queue_context->Priority.update_count;
+ _Assert( n < RTEMS_ARRAY_SIZE( queue_context->Priority.update ) );
+
+ queue_context->Priority.update_count = n + 1;
+ queue_context->Priority.update[ n ] = the_thread;
+}
+
/**
* @brief Sets the MP callout in the thread queue context.
*
@@ -274,9 +280,12 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
#if defined(RTEMS_SMP)
size_t i;
+ _Priority_Node_initialize( &heads->Boost_priority, 0 );
+ _Priority_Node_set_inactive( &heads->Boost_priority );
+
for ( i = 0; i < _Scheduler_Count; ++i ) {
_Chain_Initialize_node( &heads->Priority[ i ].Node );
- _RBTree_Initialize_empty( &heads->Priority[ i ].Queue );
+ _Priority_Initialize_empty( &heads->Priority[ i ].Queue );
}
#endif
@@ -579,16 +588,6 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue(
);
}
-bool _Thread_queue_Do_extract_locked(
- Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations,
- Thread_Control *the_thread
-#if defined(RTEMS_MULTIPROCESSING)
- ,
- const Thread_queue_Context *queue_context
-#endif
-);
-
/**
* @brief Extracts the thread from the thread queue, restores the default wait
* operations and restores the default thread lock.
@@ -599,8 +598,7 @@ bool _Thread_queue_Do_extract_locked(
* @param[in] queue The actual thread queue.
* @param[in] operations The thread queue operations.
* @param[in] the_thread The thread to extract.
- * @param[in] queue_context The thread queue context. This parameter is only
- * used on multiprocessing configurations.
+ * @param[in] queue_context The thread queue context.
*
* @return Returns the unblock indicator for _Thread_queue_Unblock_critical().
* True indicates, that this thread must be unblocked by the scheduler later in
@@ -610,32 +608,12 @@ bool _Thread_queue_Do_extract_locked(
* since this thread may already block on another resource in an SMP
* configuration.
*/
-#if defined(RTEMS_MULTIPROCESSING)
- #define _Thread_queue_Extract_locked( \
- unblock, \
- queue, \
- the_thread, \
- queue_context \
- ) \
- _Thread_queue_Do_extract_locked( \
- unblock, \
- queue, \
- the_thread, \
- queue_context \
- )
-#else
- #define _Thread_queue_Extract_locked( \
- unblock, \
- queue, \
- the_thread, \
- queue_context \
- ) \
- _Thread_queue_Do_extract_locked( \
- unblock, \
- queue, \
- the_thread \
- )
-#endif
+bool _Thread_queue_Extract_locked(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
/**
* @brief Unblocks the thread which was on the thread queue before.
@@ -735,7 +713,7 @@ void _Thread_queue_Extract_with_proxy(
/**
* @brief Surrenders the thread queue previously owned by the thread to the
- * first enqueued thread if it exists.
+ * first enqueued thread.
*
* The owner of the thread queue must be set to NULL by the caller.
*
@@ -743,21 +721,18 @@ void _Thread_queue_Extract_with_proxy(
* thread dispatch if necessary.
*
* @param[in] queue The actual thread queue.
- * @param[in] operations The thread queue operations.
- * @param[in] heads The thread queue heads.
+ * @param[in] heads The thread queue heads. It must not be NULL.
* @param[in] previous_owner The previous owner thread surrendering the thread
* queue.
- * @param[in] keep_priority Indicates if the previous owner thread should keep
- * its current priority.
* @param[in] queue_context The thread queue context of the lock acquire.
+ * @param[in] operations The thread queue operations.
*/
void _Thread_queue_Surrender(
Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations,
Thread_queue_Heads *heads,
Thread_Control *previous_owner,
- bool keep_priority,
- Thread_queue_Context *queue_context
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
);
RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
@@ -980,6 +955,16 @@ void _Thread_queue_Unblock_proxy(
);
#endif
+bool _Thread_queue_Path_acquire_critical(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Path_release_critical(
+ Thread_queue_Context *queue_context
+);
+
/**
* @brief Helper structure to ensure that all objects containing a thread queue
* have the right layout.
diff --git a/cpukit/score/preinstall.am b/cpukit/score/preinstall.am
index 61d44b8e6a..94865a0aaa 100644
--- a/cpukit/score/preinstall.am
+++ b/cpukit/score/preinstall.am
@@ -200,6 +200,10 @@ $(PROJECT_INCLUDE)/rtems/score/priority.h: include/rtems/score/priority.h $(PROJ
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/priority.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/priority.h
+$(PROJECT_INCLUDE)/rtems/score/priorityimpl.h: include/rtems/score/priorityimpl.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/priorityimpl.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/priorityimpl.h
+
$(PROJECT_INCLUDE)/rtems/score/prioritybitmap.h: include/rtems/score/prioritybitmap.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/prioritybitmap.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/prioritybitmap.h
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index bfa36ff994..daa90a528f 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -128,33 +128,20 @@ static void _Mutex_Release_critical(
)
{
Thread_queue_Heads *heads;
- bool keep_priority;
+ heads = mutex->Queue.Queue.heads;
mutex->Queue.Queue.owner = NULL;
-
--executing->resource_count;
- /*
- * Ensure that the owner resource count is visible to all other
- * processors and that we read the latest priority restore
- * hint.
- */
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
-
- heads = mutex->Queue.Queue.heads;
- keep_priority = _Thread_Owns_resources( executing )
- || !executing->priority_restore_hint;
-
- if ( __predict_true( heads == NULL && keep_priority ) ) {
+ if ( __predict_true( heads == NULL ) ) {
_Mutex_Queue_release( mutex, queue_context );
} else {
_Thread_queue_Surrender(
&mutex->Queue.Queue,
- MUTEX_TQ_OPERATIONS,
heads,
executing,
- keep_priority,
- queue_context
+ queue_context,
+ MUTEX_TQ_OPERATIONS
);
}
}
diff --git a/cpukit/score/src/schedulercbs.c b/cpukit/score/src/schedulercbs.c
index 98ec0eb29e..f114d4fa02 100644
--- a/cpukit/score/src/schedulercbs.c
+++ b/cpukit/score/src/schedulercbs.c
@@ -19,24 +19,28 @@
#endif
#include <rtems/score/schedulercbsimpl.h>
-#include <rtems/score/threadimpl.h>
-#include <rtems/score/wkspace.h>
void _Scheduler_CBS_Budget_callout(
Thread_Control *the_thread
)
{
- Priority_Control new_priority;
- Priority_Control unused;
- Scheduler_CBS_Node *node;
- Scheduler_CBS_Server_id server_id;
+ Scheduler_CBS_Node *node;
+ Scheduler_CBS_Server_id server_id;
+ Thread_queue_Context queue_context;
+
+ node = _Scheduler_CBS_Thread_get_node( the_thread );
/* Put violating task to background until the end of period. */
- new_priority = the_thread->Start.initial_priority;
- _Thread_Set_priority( the_thread, new_priority, &unused, true );
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Scheduler_CBS_Cancel_job(
+ NULL,
+ the_thread,
+ node->deadline_node,
+ &queue_context
+ );
+ _Thread_Priority_update( &queue_context );
/* Invoke callback function if any. */
- node = _Scheduler_CBS_Thread_get_node( the_thread );
if ( node->cbs_server->cbs_budget_overrun ) {
_Scheduler_CBS_Get_server_id(
node->cbs_server->task_id,
diff --git a/cpukit/score/src/schedulercbsnodeinit.c b/cpukit/score/src/schedulercbsnodeinit.c
index 53800693c0..89b6f8e2df 100644
--- a/cpukit/score/src/schedulercbsnodeinit.c
+++ b/cpukit/score/src/schedulercbsnodeinit.c
@@ -33,4 +33,5 @@ void _Scheduler_CBS_Node_initialize(
the_node = _Scheduler_CBS_Node_downcast( node );
the_node->cbs_server = NULL;
+ the_node->deadline_node = NULL;
}
diff --git a/cpukit/score/src/schedulercbsreleasejob.c b/cpukit/score/src/schedulercbsreleasejob.c
index d2169af899..186f95c779 100644
--- a/cpukit/score/src/schedulercbsreleasejob.c
+++ b/cpukit/score/src/schedulercbsreleasejob.c
@@ -21,10 +21,12 @@
#include <rtems/score/schedulercbsimpl.h>
-Thread_Control *_Scheduler_CBS_Release_job(
+void _Scheduler_CBS_Release_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
)
{
Scheduler_CBS_Node *node;
@@ -38,5 +40,37 @@ Thread_Control *_Scheduler_CBS_Release_job(
the_thread->cpu_time_budget = serv_info->parameters.budget;
}
- return _Scheduler_EDF_Release_job( scheduler, the_thread, deadline );
+ node->deadline_node = priority_node;
+
+ _Scheduler_EDF_Release_job(
+ scheduler,
+ the_thread,
+ priority_node,
+ deadline,
+ queue_context
+ );
+}
+
+void _Scheduler_CBS_Cancel_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ Scheduler_CBS_Node *node;
+
+ node = _Scheduler_CBS_Thread_get_node( the_thread );
+
+ if ( node->deadline_node != NULL ) {
+ _Assert( node->deadline_node == priority_node );
+ node->deadline_node = NULL;
+
+ _Scheduler_EDF_Cancel_job(
+ scheduler,
+ the_thread,
+ priority_node,
+ queue_context
+ );
+ }
}
diff --git a/cpukit/score/src/schedulercbsunblock.c b/cpukit/score/src/schedulercbsunblock.c
index 0c1e48ebed..c09f471afa 100644
--- a/cpukit/score/src/schedulercbsunblock.c
+++ b/cpukit/score/src/schedulercbsunblock.c
@@ -30,13 +30,11 @@ Scheduler_Void_or_thread _Scheduler_CBS_Unblock(
Thread_Control *the_thread
)
{
- Scheduler_EDF_Context *context;
- Scheduler_CBS_Node *node;
- Scheduler_CBS_Server *serv_info;
- Priority_Control priority;
- bool prepend_it;
+ Scheduler_CBS_Node *node;
+ Scheduler_CBS_Server *serv_info;
+ Priority_Control priority;
+ bool prepend_it;
- context = _Scheduler_EDF_Get_context( scheduler );
node = _Scheduler_CBS_Thread_get_node( the_thread );
serv_info = node->cbs_server;
priority = _Scheduler_Node_get_priority( &node->Base.Base, &prepend_it );
@@ -55,40 +53,19 @@ Scheduler_Void_or_thread _Scheduler_CBS_Unblock(
Priority_Control budget_left = priority - _Watchdog_Ticks_since_boot;
if ( deadline * budget_left > budget * deadline_left ) {
- /* Put late unblocked task to background until the end of period. */
-
- priority = node->Base.background_priority;
- the_thread->real_priority = priority;
+ Thread_queue_Context queue_context;
- if (
- _Thread_Priority_less_than(
- _Thread_Get_priority( the_thread ),
- priority
- ) || !_Thread_Owns_resources( the_thread )
- ) {
- the_thread->current_priority = priority;
- }
+ /* Put late unblocked task to background until the end of period. */
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Scheduler_CBS_Cancel_job(
+ scheduler,
+ the_thread,
+ node->deadline_node,
+ &queue_context
+ );
}
}
- node->Base.current_priority = priority;
- _Scheduler_EDF_Enqueue( context, &node->Base, priority );
-
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir( the_thread, priority == PRIORITY_PSEUDO_ISR );
- }
-
+ _Scheduler_EDF_Unblock( scheduler, the_thread );
SCHEDULER_RETURN_VOID_OR_NULL;
}
diff --git a/cpukit/score/src/schedulerdefaultnodeinit.c b/cpukit/score/src/schedulerdefaultnodeinit.c
index 10e71f8a05..53aed5274f 100644
--- a/cpukit/score/src/schedulerdefaultnodeinit.c
+++ b/cpukit/score/src/schedulerdefaultnodeinit.c
@@ -28,7 +28,5 @@ void _Scheduler_default_Node_initialize(
Priority_Control priority
)
{
- (void) scheduler;
-
- _Scheduler_Node_do_initialize( node, the_thread, priority );
+ _Scheduler_Node_do_initialize( scheduler, node, the_thread, priority );
}
diff --git a/cpukit/score/src/schedulerdefaultreleasejob.c b/cpukit/score/src/schedulerdefaultreleasejob.c
index 7272fc1946..490d58b05f 100644
--- a/cpukit/score/src/schedulerdefaultreleasejob.c
+++ b/cpukit/score/src/schedulerdefaultreleasejob.c
@@ -21,26 +21,30 @@
#include <rtems/score/scheduler.h>
-Thread_Control *_Scheduler_default_Release_job(
+void _Scheduler_default_Release_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
)
{
(void) scheduler;
(void) the_thread;
+ (void) priority_node;
(void) deadline;
-
- return NULL;
+ (void) queue_context;
}
-Thread_Control *_Scheduler_default_Cancel_job(
+void _Scheduler_default_Cancel_job(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
)
{
(void) scheduler;
(void) the_thread;
-
- return NULL;
+ (void) priority_node;
+ (void) queue_context;
}
diff --git a/cpukit/score/src/scheduleredfchangepriority.c b/cpukit/score/src/scheduleredfchangepriority.c
index 9a73128e4c..8940b1d54b 100644
--- a/cpukit/score/src/scheduleredfchangepriority.c
+++ b/cpukit/score/src/scheduleredfchangepriority.c
@@ -54,16 +54,12 @@ Scheduler_Void_or_thread _Scheduler_EDF_Update_priority(
node = _Scheduler_EDF_Thread_get_node( the_thread );
priority = _Scheduler_Node_get_priority( &node->Base, &prepend_it );
- if ( priority == node->current_priority ) {
+ if ( priority == node->priority ) {
/* Nothing to do */
SCHEDULER_RETURN_VOID_OR_NULL;
}
- if ( ( priority & SCHEDULER_EDF_PRIO_MSB ) != 0 ) {
- node->background_priority = priority;
- }
-
- node->current_priority = priority;
+ node->priority = priority;
context = _Scheduler_EDF_Get_context( scheduler );
_Scheduler_EDF_Extract( context, node );
diff --git a/cpukit/score/src/scheduleredfnodeinit.c b/cpukit/score/src/scheduleredfnodeinit.c
index d290bd74fd..94f8facbe3 100644
--- a/cpukit/score/src/scheduleredfnodeinit.c
+++ b/cpukit/score/src/scheduleredfnodeinit.c
@@ -29,11 +29,9 @@ void _Scheduler_EDF_Node_initialize(
{
Scheduler_EDF_Node *the_node;
- (void) scheduler;
-
- _Scheduler_Node_do_initialize( node, the_thread, priority );
+ _Scheduler_Node_do_initialize( scheduler, node, the_thread, priority );
the_node = _Scheduler_EDF_Node_downcast( node );
- the_node->thread = the_thread;
_RBTree_Initialize_node( &the_node->Node );
+ the_node->priority = priority;
}
diff --git a/cpukit/score/src/scheduleredfreleasejob.c b/cpukit/score/src/scheduleredfreleasejob.c
index 4c74c48699..c19d9b9d24 100644
--- a/cpukit/score/src/scheduleredfreleasejob.c
+++ b/cpukit/score/src/scheduleredfreleasejob.c
@@ -20,75 +20,47 @@
#include <rtems/score/scheduleredfimpl.h>
-static bool _Scheduler_EDF_Release_job_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
+void _Scheduler_EDF_Release_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
)
{
- Scheduler_EDF_Node *node;
- Priority_Control current_priority;
- Priority_Control new_priority;
+ (void) scheduler;
- node = _Scheduler_EDF_Thread_get_node( the_thread );
+ _Thread_Wait_acquire_critical( the_thread, queue_context );
- current_priority = _Thread_Get_priority( the_thread );
- new_priority = *new_priority_p;
+ _Priority_Node_set_priority( priority_node, deadline );
- node->current_priority = new_priority;
- the_thread->real_priority = new_priority;
+ if ( _Priority_Node_is_active( priority_node ) ) {
+ _Thread_Priority_changed(
+ the_thread,
+ priority_node,
+ false,
+ queue_context
+ );
+ } else {
+ _Thread_Priority_add( the_thread, priority_node, queue_context );
+ }
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
+ _Thread_Wait_release_critical( the_thread, queue_context );
}
-Thread_Control *_Scheduler_EDF_Release_job(
+void _Scheduler_EDF_Cancel_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
-)
-{
- return _Thread_Apply_priority(
- the_thread,
- deadline,
- NULL,
- _Scheduler_EDF_Release_job_filter,
- true
- );
-}
-
-static bool _Scheduler_EDF_Cancel_job_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_p,
- void *arg
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
)
{
- Scheduler_EDF_Node *node;
- Priority_Control current_priority;
- Priority_Control new_priority;
+ (void) scheduler;
- node = _Scheduler_EDF_Thread_get_node( the_thread );
+ _Thread_Wait_acquire_critical( the_thread, queue_context );
- current_priority = _Thread_Get_priority( the_thread );
- new_priority = node->background_priority;
+ _Thread_Priority_remove( the_thread, priority_node, queue_context );
+ _Priority_Node_set_inactive( priority_node );
- node->current_priority = new_priority;
- the_thread->real_priority = new_priority;
-
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
-}
-
-Thread_Control *_Scheduler_EDF_Cancel_job(
- const Scheduler_Control *scheduler,
- Thread_Control *the_thread
-)
-{
- return _Thread_Apply_priority(
- the_thread,
- 0,
- NULL,
- _Scheduler_EDF_Cancel_job_filter,
- true
- );
+ _Thread_Wait_release_critical( the_thread, queue_context );
}
diff --git a/cpukit/score/src/scheduleredfunblock.c b/cpukit/score/src/scheduleredfunblock.c
index 9b156eca46..a5cc4b6a37 100644
--- a/cpukit/score/src/scheduleredfunblock.c
+++ b/cpukit/score/src/scheduleredfunblock.c
@@ -37,7 +37,7 @@ Scheduler_Void_or_thread _Scheduler_EDF_Unblock(
priority = _Scheduler_Node_get_priority( &node->Base, &prepend_it );
(void) prepend_it;
- node->current_priority = priority;
+ node->priority = priority;
_Scheduler_EDF_Enqueue( context, node, priority );
/*
diff --git a/cpukit/score/src/scheduleredfyield.c b/cpukit/score/src/scheduleredfyield.c
index 06c1b46895..3e64e5ca05 100644
--- a/cpukit/score/src/scheduleredfyield.c
+++ b/cpukit/score/src/scheduleredfyield.c
@@ -33,7 +33,7 @@ Scheduler_Void_or_thread _Scheduler_EDF_Yield(
node = _Scheduler_EDF_Thread_get_node( the_thread );
_Scheduler_EDF_Extract( context, node );
- _Scheduler_EDF_Enqueue( context, node, node->current_priority );
+ _Scheduler_EDF_Enqueue( context, node, node->priority );
_Scheduler_EDF_Schedule_body( scheduler, the_thread, true );
SCHEDULER_RETURN_VOID_OR_NULL;
diff --git a/cpukit/score/src/schedulerpriority.c b/cpukit/score/src/schedulerpriority.c
index 11cee92424..ddfd973e0a 100644
--- a/cpukit/score/src/schedulerpriority.c
+++ b/cpukit/score/src/schedulerpriority.c
@@ -43,7 +43,7 @@ void _Scheduler_priority_Node_initialize(
Scheduler_priority_Context *context;
Scheduler_priority_Node *the_node;
- _Scheduler_Node_do_initialize( node, the_thread, priority );
+ _Scheduler_Node_do_initialize( scheduler, node, the_thread, priority );
context = _Scheduler_priority_Get_context( scheduler );
the_node = _Scheduler_priority_Node_downcast( node );
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index 07e7af4268..e624a6acf4 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -57,7 +57,12 @@ void _Scheduler_priority_SMP_Node_initialize(
Scheduler_priority_SMP_Node *the_node;
the_node = _Scheduler_priority_SMP_Node_downcast( node );
- _Scheduler_SMP_Node_initialize( &the_node->Base, the_thread, priority );
+ _Scheduler_SMP_Node_initialize(
+ scheduler,
+ &the_node->Base,
+ the_thread,
+ priority
+ );
context = _Scheduler_Get_context( scheduler );
self = _Scheduler_priority_SMP_Get_self( context );
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 8f86ea87da..9606896a94 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -52,7 +52,7 @@ void _Scheduler_simple_SMP_Node_initialize(
Scheduler_SMP_Node *the_node;
the_node = _Scheduler_SMP_Node_downcast( node );
- _Scheduler_SMP_Node_initialize( the_node, the_thread, priority );
+ _Scheduler_SMP_Node_initialize( scheduler, the_node, the_thread, priority );
}
static void _Scheduler_simple_SMP_Do_update(
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index 5d7c7f7a1d..fc6d0125e7 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -183,7 +183,12 @@ void _Scheduler_strong_APA_Node_initialize(
Scheduler_strong_APA_Node *the_node;
the_node = _Scheduler_strong_APA_Node_downcast( node );
- _Scheduler_SMP_Node_initialize( &the_node->Base, the_thread, priority );
+ _Scheduler_SMP_Node_initialize(
+ scheduler,
+ &the_node->Base,
+ the_thread,
+ priority
+ );
context = _Scheduler_Get_context( scheduler );
self = _Scheduler_strong_APA_Get_self( context );
diff --git a/cpukit/score/src/thread.c b/cpukit/score/src/thread.c
index c569ae5bab..864b21bf6f 100644
--- a/cpukit/score/src/thread.c
+++ b/cpukit/score/src/thread.c
@@ -32,9 +32,7 @@
THREAD_OFFSET_ASSERT( Object );
THREAD_OFFSET_ASSERT( Join_queue );
THREAD_OFFSET_ASSERT( current_state );
-THREAD_OFFSET_ASSERT( current_priority );
-THREAD_OFFSET_ASSERT( real_priority );
-THREAD_OFFSET_ASSERT( priority_restore_hint );
+THREAD_OFFSET_ASSERT( Real_priority );
THREAD_OFFSET_ASSERT( resource_count );
THREAD_OFFSET_ASSERT( Scheduler );
THREAD_OFFSET_ASSERT( Wait );
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index 3429e1a88d..c10c712710 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -10,6 +10,8 @@
* COPYRIGHT (c) 1989-2014.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2013, 2016 embedded brains GmbH
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
@@ -20,145 +22,321 @@
#endif
#include <rtems/score/threadimpl.h>
+#include <rtems/score/assert.h>
#include <rtems/score/schedulerimpl.h>
-static Thread_Control *_Thread_Apply_priority_locked(
+static void _Thread_Set_scheduler_node_priority(
+ Priority_Aggregation *priority_aggregation,
+ bool prepend_it
+)
+{
+ _Scheduler_Node_set_priority(
+ SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( priority_aggregation ),
+ _Priority_Get_priority( priority_aggregation ),
+ prepend_it
+ );
+}
+
+#if defined(RTEMS_SMP)
+static void _Thread_Priority_action_add(
+ Priority_Aggregation *priority_aggregation,
+ Priority_Actions *priority_actions,
+ void *arg
+)
+{
+ _Thread_Set_scheduler_node_priority( priority_aggregation, false );
+ _Priority_Set_action_type( priority_aggregation, PRIORITY_ACTION_ADD );
+ _Priority_Actions_add( priority_actions, priority_aggregation );
+}
+
+static void _Thread_Priority_action_remove(
+ Priority_Aggregation *priority_aggregation,
+ Priority_Actions *priority_actions,
+ void *arg
+)
+{
+ _Thread_Set_scheduler_node_priority( priority_aggregation, true );
+ _Priority_Set_action_type( priority_aggregation, PRIORITY_ACTION_REMOVE );
+ _Priority_Actions_add( priority_actions, priority_aggregation );
+}
+#endif
+
+static void _Thread_Priority_action_change(
+ Priority_Aggregation *priority_aggregation,
+ bool prepend_it,
+ Priority_Actions *priority_actions,
+ void *arg
+)
+{
+ _Thread_Set_scheduler_node_priority( priority_aggregation, prepend_it );
+#if defined(RTEMS_SMP) || defined(RTEMS_DEBUG)
+ _Priority_Set_action_type( priority_aggregation, PRIORITY_ACTION_CHANGE );
+#endif
+ _Priority_Actions_add( priority_actions, priority_aggregation );
+}
+
+static void _Thread_Priority_do_perform_actions(
Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
bool prepend_it,
Thread_queue_Context *queue_context
)
{
- /*
- * For simplicity set the priority restore hint unconditionally since this is
- * an average case optimization. Otherwise complicated atomic operations
- * would be necessary. Synchronize with a potential read of the resource
- * count in the filter function. See also _CORE_mutex_Surrender(),
- * _Thread_Set_priority_filter() and _Thread_Restore_priority_filter().
- */
- the_thread->priority_restore_hint = true;
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
+ Priority_Aggregation *priority_aggregation;
- /*
- * Do not bother recomputing all the priority related information if
- * we are not REALLY changing priority.
- */
- if ( ( *filter )( the_thread, &new_priority, arg ) ) {
- _Scheduler_Thread_set_priority( the_thread, new_priority, prepend_it );
+ _Assert( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) );
+ priority_aggregation = _Priority_Actions_move( &queue_context->Priority.Actions );
- ( *the_thread->Wait.operations->priority_change )(
- the_thread->Wait.queue,
- the_thread,
- new_priority
+ do {
+ Priority_Aggregation *next_aggregation;
+ Priority_Node *priority_action_node;
+ Priority_Action_type priority_action_type;
+
+ next_aggregation = _Priority_Get_next_action( priority_aggregation );
+
+ priority_action_node = priority_aggregation->Action.node;
+ priority_action_type = priority_aggregation->Action.type;
+
+ switch ( priority_action_type ) {
+ case PRIORITY_ACTION_ADD:
+#if defined(RTEMS_SMP)
+ _Priority_Insert(
+ priority_aggregation,
+ priority_action_node,
+ &queue_context->Priority.Actions,
+ _Thread_Priority_action_add,
+ _Thread_Priority_action_change,
+ NULL
+ );
+#else
+ _Priority_Non_empty_insert(
+ priority_aggregation,
+ priority_action_node,
+ &queue_context->Priority.Actions,
+ _Thread_Priority_action_change,
+ NULL
+ );
+#endif
+ break;
+ case PRIORITY_ACTION_REMOVE:
+#if defined(RTEMS_SMP)
+ _Priority_Extract(
+ priority_aggregation,
+ priority_action_node,
+ &queue_context->Priority.Actions,
+ _Thread_Priority_action_remove,
+ _Thread_Priority_action_change,
+ NULL
+ );
+#else
+ _Priority_Extract_non_empty(
+ priority_aggregation,
+ priority_action_node,
+ &queue_context->Priority.Actions,
+ _Thread_Priority_action_change,
+ NULL
+ );
+#endif
+ break;
+ default:
+ _Assert( priority_action_type == PRIORITY_ACTION_CHANGE );
+ _Priority_Changed(
+ priority_aggregation,
+ priority_action_node,
+ prepend_it,
+ &queue_context->Priority.Actions,
+ _Thread_Priority_action_change,
+ NULL
+ );
+ break;
+ }
+
+ priority_aggregation = next_aggregation;
+ } while ( _Priority_Actions_is_valid( priority_aggregation ) );
+
+ if ( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
+ _Thread_queue_Context_add_priority_update( queue_context, the_thread );
+ ( *operations->priority_actions )(
+ queue,
+ &queue_context->Priority.Actions
);
- } else {
- the_thread = NULL;
}
+}
+
+void _Thread_Priority_perform_actions(
+ Thread_Control *start_of_path,
+ Thread_queue_Context *queue_context
+)
+{
+#if defined(RTEMS_SMP)
+ Thread_queue_Link *link;
+#endif
+ Thread_Control *the_thread;
+ size_t update_count;
+
+ _Assert( start_of_path != NULL );
- return the_thread;
+#if defined(RTEMS_SMP)
+ link = &queue_context->Path.Start;
+#endif
+ the_thread = start_of_path;
+ update_count = _Thread_queue_Context_save_priority_updates( queue_context );
+
+ while ( true ) {
+ Thread_queue_Queue *queue;
+
+#if defined(RTEMS_SMP)
+ _Assert( link->owner == the_thread );
+ queue = link->Lock_context.Wait.queue;
+#else
+ queue = the_thread->Wait.queue;
+#endif
+
+ _Thread_Priority_do_perform_actions(
+ the_thread,
+ queue,
+ the_thread->Wait.operations,
+ false,
+ queue_context
+ );
+
+ if ( _Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
+ return;
+ }
+
+ _Assert( queue != NULL );
+ the_thread = queue->owner;
+ _Assert( the_thread != NULL );
+
+#if defined(RTEMS_SMP)
+ link = THREAD_QUEUE_LINK_OF_PATH_NODE( _Chain_Next( &link->Path_node ) );
+#endif
+
+ /*
+ * In case the priority action list is non-empty, then the current thread
+ * is enqueued on a thread queue. There is no need to notify the scheduler
+ * about a priority change, since it will pick up the new priority once it
+ * is unblocked. Restore the previous set of threads bound to update the
+ * priority.
+ */
+ _Thread_queue_Context_restore_priority_updates(
+ queue_context,
+ update_count
+ );
+ }
}
-Thread_Control *_Thread_Apply_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
- bool prepend_it
+static void _Thread_Priority_apply(
+ Thread_Control *the_thread,
+ Priority_Node *priority_action_node,
+ Thread_queue_Context *queue_context,
+ bool prepend_it,
+ Priority_Action_type priority_action_type
)
{
- Thread_queue_Context queue_context;
- Thread_Control *the_thread_to_update;
+ Scheduler_Node *own_node;
+ Thread_queue_Queue *queue;
- _Thread_Wait_acquire( the_thread, &queue_context );
- the_thread_to_update = _Thread_Apply_priority_locked(
+ own_node = _Thread_Scheduler_get_own_node( the_thread );
+ _Priority_Actions_initialize_one(
+ &queue_context->Priority.Actions,
+ &own_node->Wait.Priority,
+ priority_action_node,
+ priority_action_type
+ );
+ queue = the_thread->Wait.queue;
+ _Thread_Priority_do_perform_actions(
the_thread,
- new_priority,
- arg,
- filter,
+ queue,
+ the_thread->Wait.operations,
prepend_it,
- &queue_context
+ queue_context
);
- _Thread_Wait_release( the_thread, &queue_context );
- return the_thread_to_update;
-}
-
-void _Thread_Update_priority( Thread_Control *the_thread )
-{
- if ( the_thread != NULL ) {
- ISR_lock_Context lock_context;
- _Thread_State_acquire( the_thread, &lock_context );
- _Scheduler_Update_priority( the_thread );
- _Thread_State_release( the_thread, &lock_context );
+ if ( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
+ _Thread_queue_Path_acquire_critical( queue, the_thread, queue_context );
+ _Thread_Priority_perform_actions( queue->owner, queue_context );
+ _Thread_queue_Path_release_critical( queue_context );
}
}
-void _Thread_Change_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
- bool prepend_it
+void _Thread_Priority_add(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
)
{
- the_thread = _Thread_Apply_priority(
+ _Thread_Priority_apply(
the_thread,
- new_priority,
- arg,
- filter,
- prepend_it
+ priority_node,
+ queue_context,
+ false,
+ PRIORITY_ACTION_ADD
);
- _Thread_Update_priority( the_thread );
}
-static bool _Thread_Raise_priority_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority,
- void *arg
+void _Thread_Priority_remove(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
)
{
- return _Thread_Priority_less_than(
- _Thread_Get_priority( the_thread ),
- *new_priority
+ _Thread_Priority_apply(
+ the_thread,
+ priority_node,
+ queue_context,
+ true,
+ PRIORITY_ACTION_REMOVE
);
}
-void _Thread_Raise_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority
+void _Thread_Priority_changed(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
)
{
- _Thread_Change_priority(
+ _Thread_Priority_apply(
the_thread,
- new_priority,
- NULL,
- _Thread_Raise_priority_filter,
- false
+ priority_node,
+ queue_context,
+ prepend_it,
+ PRIORITY_ACTION_CHANGE
);
}
-static bool _Thread_Restore_priority_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority,
- void *arg
+void _Thread_Priority_replace(
+ Thread_Control *the_thread,
+ Priority_Node *victim_node,
+ Priority_Node *replacement_node
)
{
- *new_priority = the_thread->real_priority;
-
- the_thread->priority_restore_hint = false;
+ Scheduler_Node *own_node;
- return *new_priority != _Thread_Get_priority( the_thread );
+ own_node = _Thread_Scheduler_get_own_node( the_thread );
+ _Priority_Replace( &own_node->Wait.Priority, victim_node, replacement_node );
}
-void _Thread_Restore_priority( Thread_Control *the_thread )
+void _Thread_Priority_update( Thread_queue_Context *queue_context )
{
- _Thread_Change_priority(
- the_thread,
- 0,
- NULL,
- _Thread_Restore_priority_filter,
- true
- );
+ size_t i;
+ size_t n;
+
+ n = queue_context->Priority.update_count;
+
+ /*
+ * Update the priority of all threads of the set. Do not care to clear the
+ * set, since the thread queue context will soon get destroyed anyway.
+ */
+ for ( i = 0; i < n ; ++i ) {
+ Thread_Control *the_thread;
+ ISR_lock_Context lock_context;
+
+ the_thread = queue_context->Priority.update[ i ];
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Scheduler_Update_priority( the_thread );
+ _Thread_State_release( the_thread, &lock_context );
+ }
}
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index 90f58e646d..76b1dff6e4 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -198,12 +198,15 @@ bool _Thread_Initialize(
the_thread->current_state = STATES_DORMANT;
the_thread->Wait.operations = &_Thread_queue_Operations_default;
- the_thread->current_priority = priority;
- the_thread->real_priority = priority;
the_thread->Start.initial_priority = priority;
RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags );
+ _Priority_Node_initialize( &the_thread->Real_priority, priority );
+ _Priority_Initialize_one(
+ &scheduler_node->Wait.Priority,
+ &the_thread->Real_priority
+ );
_Scheduler_Node_initialize( scheduler, scheduler_node, the_thread, priority );
scheduler_node_initialized = true;
diff --git a/cpukit/score/src/threadmp.c b/cpukit/score/src/threadmp.c
index cbb6c1c6b1..49e1c27979 100644
--- a/cpukit/score/src/threadmp.c
+++ b/cpukit/score/src/threadmp.c
@@ -80,6 +80,7 @@ void _Thread_MP_Handler_initialization (
#endif
proxy->Scheduler.node = &proxy->Scheduler_node;
_Scheduler_Node_do_initialize(
+ _Scheduler_Get_by_CPU_index( 0 ),
&proxy->Scheduler_node,
(Thread_Control *) proxy,
0
@@ -150,7 +151,7 @@ Thread_Control *_Thread_MP_Allocate_proxy (
the_proxy->receive_packet = receive_packet;
the_proxy->Object.id = source_tid;
- the_proxy->current_priority = receive_packet->source_priority;
+ the_proxy->Real_priority.priority = receive_packet->source_priority;
the_proxy->current_state = _States_Set( STATES_DORMANT, the_state );
the_proxy->Wait.count = executing->Wait.count;
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index f16dff0005..2864c0924a 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -162,17 +162,16 @@ static void _Thread_queue_Link_remove( Thread_queue_Link *link )
}
#endif
-#define THREAD_QUEUE_LINK_OF_PATH_NODE( node ) \
- RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
-
-static void _Thread_queue_Path_release( Thread_queue_Path *path )
+void _Thread_queue_Path_release_critical(
+ Thread_queue_Context *queue_context
+)
{
#if defined(RTEMS_SMP)
Chain_Node *head;
Chain_Node *node;
- head = _Chain_Head( &path->Links );
- node = _Chain_Last( &path->Links );
+ head = _Chain_Head( &queue_context->Path.Links );
+ node = _Chain_Last( &queue_context->Path.Links );
if ( head != node ) {
Thread_queue_Link *link;
@@ -215,18 +214,17 @@ static void _Thread_queue_Path_release( Thread_queue_Path *path )
}
}
#else
- (void) path;
+ (void) queue_context;
#endif
}
-static bool _Thread_queue_Path_acquire(
- Thread_Control *the_thread,
- Thread_queue_Queue *queue,
- Thread_queue_Path *path
+bool _Thread_queue_Path_acquire_critical(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *owner;
-
#if defined(RTEMS_SMP)
Thread_queue_Link *link;
Thread_queue_Queue *target;
@@ -239,7 +237,7 @@ static bool _Thread_queue_Path_acquire(
* this would result in an unrecoverable deadlock of the overall system.
*/
- _Chain_Initialize_empty( &path->Links );
+ _Chain_Initialize_empty( &queue_context->Path.Links );
owner = queue->owner;
@@ -251,13 +249,15 @@ static bool _Thread_queue_Path_acquire(
return false;
}
- _RBTree_Initialize_node( &path->Start.Registry_node );
- _Chain_Initialize_node( &path->Start.Path_node );
- _Chain_Initialize_node( &path->Start.Lock_context.Wait.Gate.Node );
- link = &path->Start;
+ _RBTree_Initialize_node( &queue_context->Path.Start.Registry_node );
+ _Chain_Initialize_node( &queue_context->Path.Start.Path_node );
+ _Chain_Initialize_node(
+ &queue_context->Path.Start.Lock_context.Wait.Gate.Node
+ );
+ link = &queue_context->Path.Start;
do {
- _Chain_Append_unprotected( &path->Links, &link->Path_node );
+ _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
link->owner = owner;
_Thread_Wait_acquire_default_critical(
@@ -293,7 +293,6 @@ static bool _Thread_queue_Path_acquire(
}
} else {
link->Lock_context.Wait.queue = NULL;
- _Thread_queue_Path_release( path );
return false;
}
} else {
@@ -345,9 +344,8 @@ void _Thread_queue_Enqueue_critical(
Thread_queue_Context *queue_context
)
{
- Thread_queue_Path path;
- Per_CPU_Control *cpu_self;
- bool success;
+ Per_CPU_Control *cpu_self;
+ bool success;
#if defined(RTEMS_MULTIPROCESSING)
if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
@@ -357,7 +355,8 @@ void _Thread_queue_Enqueue_critical(
_Thread_Wait_claim( the_thread, queue, operations );
- if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
+ if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
+ _Thread_queue_Path_release_critical( queue_context );
_Thread_Wait_restore_default( the_thread );
_Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
_Thread_Wait_tranquilize( the_thread );
@@ -365,9 +364,10 @@ void _Thread_queue_Enqueue_critical(
return;
}
- ( *operations->enqueue )( queue, the_thread, &path );
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *operations->enqueue )( queue, the_thread, queue_context );
- _Thread_queue_Path_release( &path );
+ _Thread_queue_Path_release_critical( queue_context );
the_thread->Wait.return_code = STATUS_SUCCESSFUL;
_Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
@@ -437,7 +437,7 @@ void _Thread_queue_Enqueue_critical(
_Thread_Remove_timer_and_unblock( the_thread, queue );
}
- _Thread_Update_priority( path.update_priority );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
}
@@ -488,20 +488,17 @@ static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
return unblock;
}
-bool _Thread_queue_Do_extract_locked(
+bool _Thread_queue_Extract_locked(
Thread_queue_Queue *queue,
const Thread_queue_Operations *operations,
- Thread_Control *the_thread
-#if defined(RTEMS_MULTIPROCESSING)
- ,
- const Thread_queue_Context *queue_context
-#endif
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
#if defined(RTEMS_MULTIPROCESSING)
_Thread_queue_MP_set_callout( the_thread, queue_context );
#endif
- ( *operations->extract )( queue, the_thread );
+ ( *operations->extract )( queue, the_thread, queue_context );
return _Thread_queue_Make_ready_again( the_thread );
}
@@ -587,46 +584,51 @@ void _Thread_queue_Extract( Thread_Control *the_thread )
void _Thread_queue_Surrender(
Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations,
Thread_queue_Heads *heads,
Thread_Control *previous_owner,
- bool keep_priority,
- Thread_queue_Context *queue_context
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
)
{
- if ( heads != NULL ) {
- Thread_Control *new_owner;
- bool unblock;
+ Thread_Control *new_owner;
+ bool unblock;
+ Per_CPU_Control *cpu_self;
- new_owner = ( *operations->surrender )( queue, heads, previous_owner );
- queue->owner = new_owner;
+ _Assert( heads != NULL );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ new_owner = ( *operations->surrender )(
+ queue,
+ heads,
+ previous_owner,
+ queue_context
+ );
+ queue->owner = new_owner;
#if defined(RTEMS_MULTIPROCESSING)
- if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
+ if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
#endif
- {
- ++new_owner->resource_count;
- }
+ {
+ ++new_owner->resource_count;
+ }
- unblock = _Thread_queue_Make_ready_again( new_owner );
+ unblock = _Thread_queue_Make_ready_again( new_owner );
- _Thread_queue_Unblock_critical(
- unblock,
- queue,
- new_owner,
- &queue_context->Lock_context.Lock_context
- );
- } else {
- _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
- }
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
- if ( !keep_priority ) {
- Per_CPU_Control *cpu_self;
+ _Thread_Priority_update( queue_context );
- cpu_self = _Thread_Dispatch_disable();
- _Thread_Restore_priority( previous_owner );
- _Thread_Dispatch_enable( cpu_self );
+ if ( unblock ) {
+ _Thread_Remove_timer_and_unblock( new_owner, queue );
}
+
+ _Thread_Dispatch_enable( cpu_self );
}
Thread_Control *_Thread_queue_Do_dequeue(
diff --git a/cpukit/score/src/threadqflush.c b/cpukit/score/src/threadqflush.c
index fb1323073d..9ea82af24a 100644
--- a/cpukit/score/src/threadqflush.c
+++ b/cpukit/score/src/threadqflush.c
@@ -99,8 +99,11 @@ size_t _Thread_queue_Flush_critical(
if ( do_unblock ) {
Scheduler_Node *scheduler_node;
- scheduler_node = _Scheduler_Thread_get_own_node( first );
- _Chain_Append_unprotected( &unblock, &scheduler_node->Wait.Node.Chain );
+ scheduler_node = _Thread_Scheduler_get_own_node( first );
+ _Chain_Append_unprotected(
+ &unblock,
+ &scheduler_node->Wait.Priority.Node.Node.Chain
+ );
}
++flushed;
@@ -123,7 +126,7 @@ size_t _Thread_queue_Flush_critical(
Chain_Node *next;
next = _Chain_Next( node );
- scheduler_node = SCHEDULER_NODE_OF_WAIT_CHAIN_NODE( node );
+ scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( node );
the_thread = _Scheduler_Node_get_owner( scheduler_node );
_Thread_Remove_timer_and_unblock( the_thread, queue );
diff --git a/cpukit/score/src/threadqops.c b/cpukit/score/src/threadqops.c
index e20241d494..602d6bbffb 100644
--- a/cpukit/score/src/threadqops.c
+++ b/cpukit/score/src/threadqops.c
@@ -22,31 +22,58 @@
#include <rtems/score/rbtreeimpl.h>
#include <rtems/score/schedulerimpl.h>
-static void _Thread_queue_Do_nothing_priority_change(
+#define THREAD_QUEUE_CONTEXT_OF_PRIORITY_ACTIONS( priority_actions ) \
+ RTEMS_CONTAINER_OF( \
+ priority_actions, \
+ Thread_queue_Context, \
+ Priority.Actions \
+ )
+
+#define THREAD_QUEUE_PRIORITY_QUEUE_OF_PRIORITY_AGGREGATION( \
+ priority_aggregation \
+) \
+ RTEMS_CONTAINER_OF( \
+ priority_aggregation, \
+ Thread_queue_Priority_queue, \
+ Queue \
+ )
+
+static void _Thread_queue_Do_nothing_priority_actions(
Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Priority_Control new_priority
+ Priority_Actions *priority_actions
)
{
(void) queue;
- (void) the_thread;
- (void) new_priority;
+ _Priority_Actions_initialize_empty( priority_actions );
}
static void _Thread_queue_Do_nothing_extract(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
(void) queue;
(void) the_thread;
+ (void) queue_context;
}
-static Thread_queue_Heads *_Thread_queue_Queue_enqueue(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- void ( *initialize )( Thread_queue_Heads *, Thread_Control * ),
- void ( *enqueue )( Thread_queue_Heads *, Thread_Control * )
+static void _Thread_queue_Queue_enqueue(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ void ( *initialize )(
+ Thread_queue_Queue *,
+ Thread_Control *,
+ Thread_queue_Context *,
+ Thread_queue_Heads *
+ ),
+ void ( *enqueue )(
+ Thread_queue_Queue *,
+ Thread_Control *,
+ Thread_queue_Context *,
+ Thread_queue_Heads *
+ )
)
{
Thread_queue_Heads *heads;
@@ -63,20 +90,26 @@ static Thread_queue_Heads *_Thread_queue_Queue_enqueue(
heads = spare_heads;
queue->heads = heads;
_Chain_Prepend_unprotected( &heads->Free_chain, &spare_heads->Free_node );
- ( *initialize )( heads, the_thread );
+ ( *initialize )( queue, the_thread, queue_context, heads );
} else {
_Chain_Prepend_unprotected( &heads->Free_chain, &spare_heads->Free_node );
- ( *enqueue )( heads, the_thread );
+ ( *enqueue )( queue, the_thread, queue_context, heads );
}
-
- return heads;
}
static void _Thread_queue_Queue_extract(
- Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *the_thread,
- void ( *extract )( Thread_queue_Heads *, Thread_Control * )
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *current_or_previous_owner,
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread,
+ void ( *extract )(
+ Thread_queue_Queue *,
+ Thread_queue_Heads *,
+ Thread_Control *,
+ Thread_queue_Context *,
+ Thread_Control *
+ )
)
{
_Assert( heads != NULL );
@@ -91,76 +124,95 @@ static void _Thread_queue_Queue_extract(
queue->heads = NULL;
}
- ( *extract )( heads, the_thread );
+ ( *extract )(
+ queue,
+ heads,
+ current_or_previous_owner,
+ queue_context,
+ the_thread
+ );
}
static void _Thread_queue_FIFO_do_initialize(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ Thread_queue_Heads *heads
)
{
Scheduler_Node *scheduler_node;
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
- _Chain_Initialize_node( &scheduler_node->Wait.Node.Chain );
+ _Chain_Initialize_node( &scheduler_node->Wait.Priority.Node.Node.Chain );
_Chain_Initialize_one(
&heads->Heads.Fifo,
- &scheduler_node->Wait.Node.Chain
+ &scheduler_node->Wait.Priority.Node.Node.Chain
);
}
static void _Thread_queue_FIFO_do_enqueue(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ Thread_queue_Heads *heads
)
{
Scheduler_Node *scheduler_node;
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
- _Chain_Initialize_node( &scheduler_node->Wait.Node.Chain );
+ _Chain_Initialize_node( &scheduler_node->Wait.Priority.Node.Node.Chain );
_Chain_Append_unprotected(
&heads->Heads.Fifo,
- &scheduler_node->Wait.Node.Chain
+ &scheduler_node->Wait.Priority.Node.Node.Chain
);
}
static void _Thread_queue_FIFO_do_extract(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *current_or_previous_owner,
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
)
{
Scheduler_Node *scheduler_node;
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
- _Chain_Extract_unprotected( &scheduler_node->Wait.Node.Chain );
+ (void) current_or_previous_owner;
+ (void) queue_context;
+
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+
+ _Chain_Extract_unprotected( &scheduler_node->Wait.Priority.Node.Node.Chain );
}
static void _Thread_queue_FIFO_enqueue(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Thread_queue_Path *path
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
- path->update_priority = NULL;
-
_Thread_queue_Queue_enqueue(
queue,
the_thread,
+ queue_context,
_Thread_queue_FIFO_do_initialize,
_Thread_queue_FIFO_do_enqueue
);
}
static void _Thread_queue_FIFO_extract(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
_Thread_queue_Queue_extract(
queue,
queue->heads,
+ NULL,
+ queue_context,
the_thread,
_Thread_queue_FIFO_do_extract
);
@@ -177,15 +229,16 @@ static Thread_Control *_Thread_queue_FIFO_first(
fifo = &heads->Heads.Fifo;
_Assert( !_Chain_Is_empty( fifo ) );
first = _Chain_First( fifo );
- scheduler_node = SCHEDULER_NODE_OF_WAIT_CHAIN_NODE( first );
+ scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( first );
return _Scheduler_Node_get_owner( scheduler_node );
}
static Thread_Control *_Thread_queue_FIFO_surrender(
- Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *previous_owner
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *first;
@@ -194,6 +247,8 @@ static Thread_Control *_Thread_queue_FIFO_surrender(
_Thread_queue_Queue_extract(
queue,
heads,
+ NULL,
+ queue_context,
first,
_Thread_queue_FIFO_do_extract
);
@@ -203,165 +258,162 @@ static Thread_Control *_Thread_queue_FIFO_surrender(
static Thread_queue_Priority_queue *_Thread_queue_Priority_queue(
Thread_queue_Heads *heads,
- const Thread_Control *the_thread
+ const Scheduler_Node *scheduler_node
)
{
#if defined(RTEMS_SMP)
- return &heads->Priority[
- _Scheduler_Get_index( _Scheduler_Get_own( the_thread ) )
- ];
-#else
- (void) the_thread;
+ const Scheduler_Control *scheduler;
+ scheduler = _Priority_Get_scheduler( &scheduler_node->Wait.Priority );
+ return &heads->Priority[ _Scheduler_Get_index( scheduler ) ];
+#else
+ (void) scheduler_node;
return &heads->Heads.Priority;
#endif
}
-static bool _Thread_queue_Priority_less(
- const void *left,
- const RBTree_Node *right
-)
-{
- const Priority_Control *the_left;
- const Scheduler_Node *scheduler_node;
- const Thread_Control *the_right;
-
- the_left = left;
- scheduler_node = SCHEDULER_NODE_OF_WAIT_RBTREE_NODE( right );
- the_right = _Scheduler_Node_get_owner( scheduler_node );
-
- return *the_left < _Thread_Get_priority( the_right );
-}
-
-static void _Thread_queue_Priority_priority_change(
+static void _Thread_queue_Priority_priority_actions(
Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Priority_Control new_priority
+ Priority_Actions *priority_actions
)
{
- Thread_queue_Heads *heads;
- Thread_queue_Priority_queue *priority_queue;
- Scheduler_Node *scheduler_node;
+ Thread_queue_Heads *heads;
+ Priority_Aggregation *priority_aggregation;
heads = queue->heads;
_Assert( heads != NULL );
- priority_queue = _Thread_queue_Priority_queue( heads, the_thread );
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
+ _Assert( !_Priority_Actions_is_empty( priority_actions ) );
+ priority_aggregation = _Priority_Actions_move( priority_actions );
- _RBTree_Extract(
- &priority_queue->Queue,
- &scheduler_node->Wait.Node.RBTree
- );
- _RBTree_Insert_inline(
- &priority_queue->Queue,
- &scheduler_node->Wait.Node.RBTree,
- &new_priority,
- _Thread_queue_Priority_less
- );
+ do {
+ Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
+
+ scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY( priority_aggregation );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
+
+ _Assert( priority_aggregation->Action.type == PRIORITY_ACTION_CHANGE );
+ _Priority_Plain_changed(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node
+ );
+
+ priority_aggregation = _Priority_Get_next_action( priority_aggregation );
+ } while ( _Priority_Actions_is_valid( priority_aggregation ) );
}
static void _Thread_queue_Priority_do_initialize(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ Thread_queue_Heads *heads
)
{
- Thread_queue_Priority_queue *priority_queue;
Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
- priority_queue = _Thread_queue_Priority_queue( heads, the_thread );
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
#if defined(RTEMS_SMP)
_Chain_Initialize_one( &heads->Heads.Fifo, &priority_queue->Node );
#endif
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
-
- _RBTree_Initialize_node( &scheduler_node->Wait.Node.RBTree );
- _RBTree_Initialize_one(
+ _Priority_Initialize_one(
&priority_queue->Queue,
- &scheduler_node->Wait.Node.RBTree
+ &scheduler_node->Wait.Priority.Node
);
}
static void _Thread_queue_Priority_do_enqueue(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ Thread_queue_Heads *heads
)
{
- Thread_queue_Priority_queue *priority_queue;
Scheduler_Node *scheduler_node;
- Priority_Control current_priority;
+ Thread_queue_Priority_queue *priority_queue;
- priority_queue = _Thread_queue_Priority_queue( heads, the_thread );
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
#if defined(RTEMS_SMP)
- if ( _RBTree_Is_empty( &priority_queue->Queue ) ) {
+ if ( _Priority_Is_empty( &priority_queue->Queue ) ) {
_Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
+ _Priority_Initialize_one(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node
+ );
+ return;
}
#endif
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
- current_priority = _Thread_Get_priority( the_thread );
-
- _RBTree_Initialize_node( &scheduler_node->Wait.Node.RBTree );
- _RBTree_Insert_inline(
+ _Priority_Plain_insert(
&priority_queue->Queue,
- &scheduler_node->Wait.Node.RBTree,
- &current_priority,
- _Thread_queue_Priority_less
+ &scheduler_node->Wait.Priority.Node,
+ _Priority_Get_priority( &scheduler_node->Wait.Priority )
);
}
static void _Thread_queue_Priority_do_extract(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *current_or_previous_owner,
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
)
{
- Thread_queue_Priority_queue *priority_queue;
Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
+
+ (void) current_or_previous_owner;
+ (void) queue_context;
- priority_queue = _Thread_queue_Priority_queue( heads, the_thread );
- scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
- _RBTree_Extract(
+ _Priority_Plain_extract(
&priority_queue->Queue,
- &scheduler_node->Wait.Node.RBTree
+ &scheduler_node->Wait.Priority.Node
);
#if defined(RTEMS_SMP)
_Chain_Extract_unprotected( &priority_queue->Node );
- if ( !_RBTree_Is_empty( &priority_queue->Queue ) ) {
+ if ( !_Priority_Is_empty( &priority_queue->Queue ) ) {
_Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
}
#endif
}
static void _Thread_queue_Priority_enqueue(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Thread_queue_Path *path
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
- path->update_priority = NULL;
-
_Thread_queue_Queue_enqueue(
queue,
the_thread,
+ queue_context,
_Thread_queue_Priority_do_initialize,
_Thread_queue_Priority_do_enqueue
);
}
static void _Thread_queue_Priority_extract(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
_Thread_queue_Queue_extract(
queue,
queue->heads,
+ NULL,
+ queue_context,
the_thread,
_Thread_queue_Priority_do_extract
);
@@ -372,7 +424,7 @@ static Thread_Control *_Thread_queue_Priority_first(
)
{
Thread_queue_Priority_queue *priority_queue;
- RBTree_Node *first;
+ Priority_Node *first;
Scheduler_Node *scheduler_node;
#if defined(RTEMS_SMP)
@@ -383,17 +435,18 @@ static Thread_Control *_Thread_queue_Priority_first(
priority_queue = &heads->Heads.Priority;
#endif
- _Assert( !_RBTree_Is_empty( &priority_queue->Queue ) );
- first = _RBTree_Minimum( &priority_queue->Queue );
- scheduler_node = SCHEDULER_NODE_OF_WAIT_RBTREE_NODE( first );
+ _Assert( !_Priority_Is_empty( &priority_queue->Queue ) );
+ first = _Priority_Get_minimum_node( &priority_queue->Queue );
+ scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( first );
return _Scheduler_Node_get_owner( scheduler_node );
}
static Thread_Control *_Thread_queue_Priority_surrender(
- Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *previous_owner
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *first;
@@ -402,6 +455,8 @@ static Thread_Control *_Thread_queue_Priority_surrender(
_Thread_queue_Queue_extract(
queue,
heads,
+ NULL,
+ queue_context,
first,
_Thread_queue_Priority_do_extract
);
@@ -409,104 +464,524 @@ static Thread_Control *_Thread_queue_Priority_surrender(
return first;
}
-static void _Thread_queue_Priority_inherit_enqueue(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Thread_queue_Path *path
+static void _Thread_queue_Priority_inherit_do_actions_change(
+ Priority_Aggregation *priority_aggregation,
+ bool prepend_it,
+ Priority_Actions *priority_actions,
+ void *arg
)
{
- Thread_queue_Heads *heads;
- Thread_Control *owner;
- Priority_Control priority;
+ Thread_queue_Priority_queue *priority_queue;
+ Scheduler_Node *scheduler_node;
- heads = _Thread_queue_Queue_enqueue(
- queue,
- the_thread,
- _Thread_queue_Priority_do_initialize,
- _Thread_queue_Priority_do_enqueue
+ priority_queue = THREAD_QUEUE_PRIORITY_QUEUE_OF_PRIORITY_AGGREGATION(
+ priority_aggregation
);
+ scheduler_node = priority_queue->scheduler_node;
- owner = queue->owner;
+ _Priority_Set_action(
+ &scheduler_node->Wait.Priority,
+ &priority_aggregation->Node,
+ PRIORITY_ACTION_CHANGE
+ );
+ _Priority_Actions_add( priority_actions, &scheduler_node->Wait.Priority );
+}
-#if defined(RTEMS_SMP)
- if ( _Chain_Has_only_one_node( &heads->Heads.Fifo ) ) {
- priority = the_thread->current_priority;
- } else {
- priority = _Scheduler_Map_priority(
- _Scheduler_Get_own( the_thread ),
- PRIORITY_PSEUDO_ISR
+static void _Thread_queue_Priority_inherit_priority_actions(
+ Thread_queue_Queue *queue,
+ Priority_Actions *priority_actions
+)
+{
+ Thread_queue_Heads *heads;
+ Priority_Aggregation *priority_aggregation;
+
+ heads = queue->heads;
+ _Assert( heads != NULL );
+
+ _Assert( !_Priority_Actions_is_empty( priority_actions ) );
+ priority_aggregation = _Priority_Actions_move( priority_actions );
+
+ do {
+ Priority_Aggregation *next_aggregation;
+ Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
+
+ next_aggregation = _Priority_Get_next_action( priority_aggregation );
+
+ scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY( priority_aggregation );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
+
+ _Assert( priority_aggregation->Action.type == PRIORITY_ACTION_CHANGE );
+ _Priority_Changed(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node,
+ false,
+ priority_actions,
+ _Thread_queue_Priority_inherit_do_actions_change,
+ NULL
);
+
+ priority_aggregation = next_aggregation;
+ } while ( _Priority_Actions_is_valid( priority_aggregation ) );
+}
+
+static void _Thread_queue_Boost_priority(
+ Thread_queue_Heads *heads,
+ Thread_Control *the_thread,
+ Thread_Control *owner,
+ Thread_queue_Context *queue_context
+)
+{
+#if defined(RTEMS_SMP)
+ const Scheduler_Control *scheduler;
+ const Scheduler_Control *scheduler_of_owner;
+ Scheduler_Node *scheduler_node_of_owner;
+ Priority_Control boost_priority;
+
+ if ( _Priority_Node_is_active( &heads->Boost_priority ) ) {
+ return;
}
+
+ scheduler = _Scheduler_Get_own( the_thread );
+ scheduler_of_owner = _Scheduler_Get_own( owner );
+
+ if ( scheduler == scheduler_of_owner ) {
+ return;
+ }
+
+ scheduler_node_of_owner = _Thread_Scheduler_get_own_node( owner );
+
+ boost_priority = _Scheduler_Map_priority(
+ scheduler_of_owner,
+ PRIORITY_PSEUDO_ISR
+ );
+
+ _Priority_Node_initialize( &heads->Boost_priority, boost_priority );
+ _Priority_Actions_initialize_one(
+ &queue_context->Priority.Actions,
+ &scheduler_node_of_owner->Wait.Priority,
+ &heads->Boost_priority,
+ PRIORITY_ACTION_ADD
+ );
+ _Thread_Priority_perform_actions( owner, queue_context );
#else
(void) heads;
+ (void) the_thread;
+ (void) owner;
+ (void) queue_context;
+#endif
+}
+
+static void _Thread_queue_Priority_inherit_do_initialize(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ Thread_queue_Heads *heads
+)
+{
+ Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
+ Thread_Control *owner;
+ Scheduler_Node *scheduler_node_of_owner;
+
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
- priority = the_thread->current_priority;
+#if defined(RTEMS_SMP)
+ _Chain_Initialize_one( &heads->Heads.Fifo, &priority_queue->Node );
#endif
- if ( priority < owner->current_priority ) {
- path->update_priority = owner;
+ _Priority_Initialize_one(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node
+ );
- owner->priority_restore_hint = true;
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
+ owner = queue->owner;
+ scheduler_node_of_owner = _Thread_Scheduler_get_own_node( owner );
+ priority_queue->scheduler_node = scheduler_node_of_owner;
+
+ _Priority_Actions_initialize_one(
+ &queue_context->Priority.Actions,
+ &scheduler_node_of_owner->Wait.Priority,
+ &priority_queue->Queue.Node,
+ PRIORITY_ACTION_ADD
+ );
+ _Thread_Priority_perform_actions( owner, queue_context );
+ _Thread_queue_Boost_priority( heads, the_thread, owner, queue_context );
+}
+
+static void _Thread_queue_Priority_inherit_do_enqueue_change(
+ Priority_Aggregation *priority_aggregation,
+ bool prepend_it,
+ Priority_Actions *priority_actions,
+ void *arg
+)
+{
+ Thread_queue_Queue *queue;
+ Thread_Control *owner;
+ Scheduler_Node *scheduler_node_of_owner;
+ Thread_queue_Context *queue_context;
+
+ queue = arg;
+ owner = queue->owner;
+ scheduler_node_of_owner = _Thread_Scheduler_get_own_node( owner );
+ queue_context = THREAD_QUEUE_CONTEXT_OF_PRIORITY_ACTIONS( priority_actions );
+
+ _Priority_Actions_initialize_one(
+ &queue_context->Priority.Actions,
+ &scheduler_node_of_owner->Wait.Priority,
+ &priority_aggregation->Node,
+ PRIORITY_ACTION_CHANGE
+ );
+ _Thread_Priority_perform_actions( owner, queue_context );
+}
+
+static void _Thread_queue_Priority_inherit_do_enqueue(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context,
+ Thread_queue_Heads *heads
+)
+{
+ Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
- _Scheduler_Thread_set_priority( owner, priority, false );
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
- ( *owner->Wait.operations->priority_change )(
- owner->Wait.queue,
+#if defined(RTEMS_SMP)
+ if ( _Priority_Is_empty( &priority_queue->Queue ) ) {
+ Thread_Control *owner;
+ Scheduler_Node *scheduler_node_of_owner;
+
+ _Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
+ _Priority_Initialize_one(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node
+ );
+
+ owner = queue->owner;
+ scheduler_node_of_owner = _Thread_Scheduler_get_own_node( owner );
+ priority_queue->scheduler_node = scheduler_node_of_owner;
+
+ _Priority_Actions_initialize_one(
+ &queue_context->Priority.Actions,
+ &scheduler_node_of_owner->Wait.Priority,
+ &priority_queue->Queue.Node,
+ PRIORITY_ACTION_ADD
+ );
+ _Thread_Priority_perform_actions( owner, queue_context );
+ _Thread_queue_Boost_priority( heads, the_thread, owner, queue_context );
+ return;
+ }
+#endif
+
+ _Priority_Non_empty_insert(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node,
+ &queue_context->Priority.Actions,
+ _Thread_queue_Priority_inherit_do_enqueue_change,
+ queue
+ );
+ _Thread_queue_Boost_priority(
+ heads,
+ the_thread,
+ queue->owner,
+ queue_context
+ );
+}
+
+static void _Thread_queue_Priority_inherit_enqueue(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Queue_enqueue(
+ queue,
+ the_thread,
+ queue_context,
+ _Thread_queue_Priority_inherit_do_initialize,
+ _Thread_queue_Priority_inherit_do_enqueue
+ );
+}
+
+static void _Thread_queue_Priority_inherit_do_extract_action(
+ Priority_Actions *priority_actions,
+ Thread_Control *owner,
+ Priority_Node *priority_action_node,
+ Priority_Action_type priority_action_type
+)
+{
+ Thread_queue_Context *queue_context;
+ Scheduler_Node *scheduler_node_of_owner;
+
+ queue_context = THREAD_QUEUE_CONTEXT_OF_PRIORITY_ACTIONS( priority_actions );
+ scheduler_node_of_owner = _Thread_Scheduler_get_own_node( owner );
+
+ _Priority_Actions_initialize_one(
+ &queue_context->Priority.Actions,
+ &scheduler_node_of_owner->Wait.Priority,
+ priority_action_node,
+ priority_action_type
+ );
+ _Thread_Priority_perform_actions( owner, queue_context );
+}
+
+static void _Thread_queue_Priority_inherit_do_extract_remove(
+ Priority_Aggregation *priority_aggregation,
+ Priority_Actions *priority_actions,
+ void *arg
+)
+{
+ _Thread_queue_Priority_inherit_do_extract_action(
+ priority_actions,
+ arg,
+ &priority_aggregation->Node,
+ PRIORITY_ACTION_REMOVE
+ );
+}
+
+static void _Thread_queue_Priority_inherit_do_extract_change(
+ Priority_Aggregation *priority_aggregation,
+ bool prepend_it,
+ Priority_Actions *priority_actions,
+ void *arg
+)
+{
+ _Thread_queue_Priority_inherit_do_extract_action(
+ priority_actions,
+ arg,
+ &priority_aggregation->Node,
+ PRIORITY_ACTION_CHANGE
+ );
+}
+
+static void _Thread_queue_Priority_inherit_do_extract(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *owner,
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
+)
+{
+ Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
+
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
+
+ _Thread_queue_Path_acquire_critical( queue, the_thread, queue_context );
+
+ _Priority_Extract(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node,
+ &queue_context->Priority.Actions,
+ _Thread_queue_Priority_inherit_do_extract_remove,
+ _Thread_queue_Priority_inherit_do_extract_change,
+ owner
+ );
+
+#if defined(RTEMS_SMP)
+ _Chain_Extract_unprotected( &priority_queue->Node );
+
+ if ( !_Priority_Is_empty( &priority_queue->Queue ) ) {
+ _Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
+ }
+
+ if (
+ _Priority_Node_is_active( &heads->Boost_priority )
+ && ( _Chain_Is_empty( &heads->Heads.Fifo )
+ || _Chain_Has_only_one_node( &heads->Heads.Fifo ) )
+ ) {
+ _Thread_queue_Priority_inherit_do_extract_action(
+ &queue_context->Priority.Actions,
owner,
- priority
+ &heads->Boost_priority,
+ PRIORITY_ACTION_REMOVE
);
- } else {
- path->update_priority = NULL;
+ _Priority_Node_set_inactive( &heads->Boost_priority );
}
+#endif
+
+ _Thread_queue_Path_release_critical( queue_context );
}
-static void _Thread_queue_Boost_priority(
- Thread_queue_Heads *heads,
- Thread_Control *the_thread
+static void _Thread_queue_Priority_inherit_extract(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Queue_extract(
+ queue,
+ queue->heads,
+ queue->owner,
+ queue_context,
+ the_thread,
+ _Thread_queue_Priority_inherit_do_extract
+ );
+}
+
+static void _Thread_queue_Priority_inherit_do_surrender_change(
+ Priority_Aggregation *priority_aggregation,
+ bool prepend_it,
+ Priority_Actions *priority_actions,
+ void *arg
)
{
+ _Thread_queue_Context_add_priority_update(
+ THREAD_QUEUE_CONTEXT_OF_PRIORITY_ACTIONS( priority_actions ),
+ arg
+ );
+ _Scheduler_Node_set_priority(
+ SCHEDULER_NODE_OF_WAIT_PRIORITY( priority_aggregation ),
+ _Priority_Get_priority( priority_aggregation ),
+ prepend_it
+ );
+}
+
+static void _Thread_queue_Priority_add(
+ Thread_Control *the_thread,
+ Priority_Aggregation *priority_aggregation,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ _Priority_Non_empty_insert(
+ priority_aggregation,
+ priority_node,
+ &queue_context->Priority.Actions,
+ _Thread_queue_Priority_inherit_do_surrender_change,
+ the_thread
+ );
+}
+
+static void _Thread_queue_Priority_remove(
+ Thread_Control *the_thread,
+ Scheduler_Node *scheduler_node,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ _Priority_Extract_non_empty(
+ &scheduler_node->Wait.Priority,
+ priority_node,
+ &queue_context->Priority.Actions,
+ _Thread_queue_Priority_inherit_do_surrender_change,
+ the_thread
+ );
+}
+
+static void _Thread_queue_Priority_inherit_do_surrender(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
+)
+{
+ Scheduler_Node *scheduler_node;
+ Thread_queue_Priority_queue *priority_queue;
+ ISR_lock_Context lock_context;
+
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ priority_queue = _Thread_queue_Priority_queue( heads, scheduler_node );
+
+ _Thread_Wait_acquire_default_critical( previous_owner, &lock_context );
+
#if defined(RTEMS_SMP)
- if ( !_Chain_Has_only_one_node( &heads->Heads.Fifo ) ) {
- const Scheduler_Control *scheduler;
- Priority_Control boost_priority;
+ if ( _Priority_Node_is_active( &heads->Boost_priority ) ) {
+ _Thread_queue_Priority_remove(
+ previous_owner,
+ _Thread_Scheduler_get_own_node( previous_owner ),
+ &heads->Boost_priority,
+ queue_context
+ );
+ _Priority_Node_set_inactive( &heads->Boost_priority );
+ }
+#endif
- the_thread->priority_restore_hint = true;
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
+ _Thread_queue_Priority_remove(
+ previous_owner,
+ priority_queue->scheduler_node,
+ &priority_queue->Queue.Node,
+ queue_context
+ );
- scheduler = _Scheduler_Get_own( the_thread );
- boost_priority = _Scheduler_Map_priority( scheduler, PRIORITY_PSEUDO_ISR );
+ _Thread_Wait_release_default_critical( previous_owner, &lock_context );
- _Scheduler_Thread_set_priority( the_thread, boost_priority, false );
+ _Priority_Extract(
+ &priority_queue->Queue,
+ &scheduler_node->Wait.Priority.Node,
+ NULL,
+ _Priority_Remove_nothing,
+ _Priority_Change_nothing,
+ previous_owner
+ );
+
+ if ( !_Priority_Is_empty( &priority_queue->Queue ) ) {
+ priority_queue->scheduler_node = scheduler_node;
+ _Thread_queue_Priority_add(
+ the_thread,
+ &scheduler_node->Wait.Priority,
+ &priority_queue->Queue.Node,
+ queue_context
+ );
+ }
+
+#if defined(RTEMS_SMP)
+ _Chain_Extract_unprotected( &priority_queue->Node );
+
+ if ( !_Priority_Is_empty( &priority_queue->Queue ) ) {
+ _Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
+ }
+
+ if (
+ !_Chain_Is_empty( &heads->Heads.Fifo)
+ && !_Chain_Has_only_one_node( &heads->Heads.Fifo)
+ ) {
+ Priority_Control boost_priority;
+
+ boost_priority = _Scheduler_Map_priority(
+ _Scheduler_Get_own( the_thread ),
+ PRIORITY_PSEUDO_ISR
+ );
+ _Priority_Node_initialize( &heads->Boost_priority, boost_priority );
+ _Thread_queue_Priority_add(
+ the_thread,
+ &scheduler_node->Wait.Priority,
+ &heads->Boost_priority,
+ queue_context
+ );
}
-#else
- (void) heads;
- (void) the_thread;
#endif
}
static Thread_Control *_Thread_queue_Priority_inherit_surrender(
- Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *previous_owner
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *first;
first = _Thread_queue_Priority_first( heads );
- _Thread_queue_Boost_priority( heads, first );
_Thread_queue_Queue_extract(
queue,
heads,
+ previous_owner,
+ queue_context,
first,
- _Thread_queue_Priority_do_extract
+ _Thread_queue_Priority_inherit_do_surrender
);
return first;
}
const Thread_queue_Operations _Thread_queue_Operations_default = {
- .priority_change = _Thread_queue_Do_nothing_priority_change,
+ .priority_actions = _Thread_queue_Do_nothing_priority_actions,
.extract = _Thread_queue_Do_nothing_extract
/*
* The default operations are only used in _Thread_Change_priority() and
@@ -516,7 +991,7 @@ const Thread_queue_Operations _Thread_queue_Operations_default = {
};
const Thread_queue_Operations _Thread_queue_Operations_FIFO = {
- .priority_change = _Thread_queue_Do_nothing_priority_change,
+ .priority_actions = _Thread_queue_Do_nothing_priority_actions,
.enqueue = _Thread_queue_FIFO_enqueue,
.extract = _Thread_queue_FIFO_extract,
.surrender = _Thread_queue_FIFO_surrender,
@@ -524,7 +999,7 @@ const Thread_queue_Operations _Thread_queue_Operations_FIFO = {
};
const Thread_queue_Operations _Thread_queue_Operations_priority = {
- .priority_change = _Thread_queue_Priority_priority_change,
+ .priority_actions = _Thread_queue_Priority_priority_actions,
.enqueue = _Thread_queue_Priority_enqueue,
.extract = _Thread_queue_Priority_extract,
.surrender = _Thread_queue_Priority_surrender,
@@ -532,9 +1007,9 @@ const Thread_queue_Operations _Thread_queue_Operations_priority = {
};
const Thread_queue_Operations _Thread_queue_Operations_priority_inherit = {
- .priority_change = _Thread_queue_Priority_priority_change,
+ .priority_actions = _Thread_queue_Priority_inherit_priority_actions,
.enqueue = _Thread_queue_Priority_inherit_enqueue,
- .extract = _Thread_queue_Priority_extract,
+ .extract = _Thread_queue_Priority_inherit_extract,
.surrender = _Thread_queue_Priority_inherit_surrender,
.first = _Thread_queue_Priority_first
};
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 149882795c..69534001aa 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -50,40 +50,28 @@ static Thread_Zombie_control _Thread_Zombies = {
.Lock = ISR_LOCK_INITIALIZER( "thread zombies" )
};
-static bool _Thread_Raise_real_priority_filter(
+static void _Thread_Raise_real_priority(
Thread_Control *the_thread,
- Priority_Control *new_priority_ptr,
- void *arg
+ Priority_Control priority
)
{
- Priority_Control real_priority;
- Priority_Control new_priority;
- Priority_Control current_priority;
-
- real_priority = the_thread->real_priority;
- new_priority = *new_priority_ptr;
- current_priority = _Thread_Get_priority( the_thread );
-
- new_priority = _Thread_Priority_highest( real_priority, new_priority );
- *new_priority_ptr = new_priority;
+ Thread_queue_Context queue_context;
- the_thread->real_priority = new_priority;
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire( the_thread, &queue_context );
- return _Thread_Priority_less_than( current_priority, new_priority );
-}
+ if ( priority < the_thread->Real_priority.priority ) {
+ _Thread_Priority_change(
+ the_thread,
+ &the_thread->Real_priority,
+ priority,
+ false,
+ &queue_context
+ );
+ }
-static void _Thread_Raise_real_priority(
- Thread_Control *the_thread,
- Priority_Control priority
-)
-{
- _Thread_Change_priority(
- the_thread,
- priority,
- NULL,
- _Thread_Raise_real_priority_filter,
- false
- );
+ _Thread_Wait_release( the_thread, &queue_context );
+ _Thread_Priority_update( &queue_context );
}
typedef struct {
@@ -182,7 +170,7 @@ static void _Thread_Free( Thread_Control *the_thread )
_ISR_lock_Destroy( &the_thread->Keys.Lock );
_Scheduler_Node_destroy(
_Scheduler_Get( the_thread ),
- _Scheduler_Thread_get_own_node( the_thread )
+ _Thread_Scheduler_get_own_node( the_thread )
);
_ISR_lock_Destroy( &the_thread->Timer.Lock );
@@ -622,8 +610,8 @@ void _Thread_Restart_self(
ISR_lock_Context *lock_context
)
{
- Per_CPU_Control *cpu_self;
- Priority_Control unused;
+ Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
_Assert(
_Watchdog_Get_state( &executing->Timer.Watchdog ) == WATCHDOG_INACTIVE
@@ -646,13 +634,18 @@ void _Thread_Restart_self(
cpu_self = _Thread_Dispatch_disable_critical( lock_context );
_Thread_State_release( executing, lock_context );
- _Thread_Set_priority(
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire_default( executing, lock_context );
+ _Thread_Priority_change(
executing,
+ &executing->Real_priority,
executing->Start.initial_priority,
- &unused,
- true
+ false,
+ &queue_context
);
+ _Thread_Wait_release_default( executing, lock_context );
+ _Thread_Priority_update( &queue_context );
_Thread_Dispatch_enable( cpu_self );
RTEMS_UNREACHABLE();
}
diff --git a/cpukit/score/src/threadsetpriority.c b/cpukit/score/src/threadsetpriority.c
deleted file mode 100644
index d6b8319970..0000000000
--- a/cpukit/score/src/threadsetpriority.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * @file
- *
- * @brief Set Thread Priority
- * @ingroup ScoreThread
- */
-
-/*
- * COPYRIGHT (c) 1989-2011.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#if HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <rtems/score/threadimpl.h>
-
-static bool _Thread_Set_priority_filter(
- Thread_Control *the_thread,
- Priority_Control *new_priority_ptr,
- void *arg
-)
-{
- Priority_Control current_priority;
- Priority_Control new_priority;
- Priority_Control *old_priority_ptr;
-
- current_priority = _Thread_Get_priority( the_thread );
- new_priority = *new_priority_ptr;
-
- old_priority_ptr = arg;
- *old_priority_ptr = current_priority;
-
- the_thread->real_priority = new_priority;
-
- return _Thread_Priority_less_than( current_priority, new_priority )
- || !_Thread_Owns_resources( the_thread );
-}
-
-void _Thread_Set_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- Priority_Control *old_priority,
- bool prepend_it
-)
-{
- _Thread_Change_priority(
- the_thread,
- new_priority,
- old_priority,
- _Thread_Set_priority_filter,
- prepend_it
- );
-}
diff --git a/cpukit/score/src/threadtimeout.c b/cpukit/score/src/threadtimeout.c
index b6b6cc4ac2..8297c55c4b 100644
--- a/cpukit/score/src/threadtimeout.c
+++ b/cpukit/score/src/threadtimeout.c
@@ -30,6 +30,8 @@ void _Thread_Timeout( Watchdog_Control *watchdog )
bool unblock;
the_thread = RTEMS_CONTAINER_OF( watchdog, Thread_Control, Timer.Watchdog );
+
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
_Thread_Wait_acquire( the_thread, &queue_context );
wait_flags = _Thread_Wait_flags_get( the_thread );
@@ -66,6 +68,7 @@ void _Thread_Timeout( Watchdog_Control *watchdog )
}
_Thread_Wait_release( the_thread, &queue_context );
+ _Thread_Priority_update( &queue_context );
if ( unblock ) {
_Thread_Wait_tranquilize( the_thread );
diff --git a/testsuites/smptests/smpmutex01/init.c b/testsuites/smptests/smpmutex01/init.c
index 5adc2e2244..93c059e6fc 100644
--- a/testsuites/smptests/smpmutex01/init.c
+++ b/testsuites/smptests/smpmutex01/init.c
@@ -315,10 +315,9 @@ static void test(void)
request(ctx, B_5_0, REQ_MTX_RELEASE);
check_generations(ctx, B_5_0, A_2_1);
assert_prio(ctx, B_5_0, 5);
- assert_prio(ctx, A_2_1, 0);
+ assert_prio(ctx, A_2_1, 2);
request(ctx, A_2_1, REQ_MTX_RELEASE);
check_generations(ctx, A_2_1, B_5_1);
- assert_prio(ctx, A_2_1, 2);
assert_prio(ctx, B_5_1, 5);
request(ctx, B_5_1, REQ_MTX_RELEASE);
check_generations(ctx, B_5_1, NONE);
diff --git a/testsuites/smptests/smpscheduler03/init.c b/testsuites/smptests/smpscheduler03/init.c
index c114eb90be..50d4894859 100644
--- a/testsuites/smptests/smpscheduler03/init.c
+++ b/testsuites/smptests/smpscheduler03/init.c
@@ -40,28 +40,35 @@ typedef struct {
static test_context test_instance;
-static bool change_priority_filter(
- Thread_Control *thread,
- Priority_Control *new_priority,
- void *arg
+static void apply_priority(
+ Thread_Control *thread,
+ Priority_Control new_priority,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
)
{
- return _Thread_Get_priority( thread ) != *new_priority;
+ _Thread_queue_Context_clear_priority_updates(queue_context);
+ _Thread_Wait_acquire(thread, queue_context);
+ _Thread_Priority_change(
+ thread,
+ &thread->Real_priority,
+ new_priority,
+ prepend_it,
+ queue_context
+ );
+ _Thread_Wait_release(thread, queue_context);
}
static void change_priority(
- Thread_Control *thread,
- Priority_Control new_priority,
- bool prepend_it
+ Thread_Control *thread,
+ Priority_Control new_priority,
+ bool prepend_it
)
{
- _Thread_Change_priority(
- thread,
- new_priority,
- NULL,
- change_priority_filter,
- prepend_it
- );
+ Thread_queue_Context queue_context;
+
+ apply_priority(thread, new_priority, prepend_it, &queue_context);
+ _Thread_Priority_update(&queue_context);
}
static void barrier_wait(test_context *ctx)
@@ -197,11 +204,9 @@ static Thread_Control *update_priority_op(
ISR_lock_Context state_lock_context;
ISR_lock_Context scheduler_lock_context;
Thread_Control *needs_help;
- Scheduler_Node *node;
+ Thread_queue_Context queue_context;
- thread->current_priority = new_priority;
- node = _Scheduler_Thread_get_node(thread);
- _Scheduler_Node_set_priority(node, new_priority, prepend_it);
+ apply_priority(thread, new_priority, prepend_it, &queue_context);
_Thread_State_acquire( thread, &state_lock_context );
scheduler = _Scheduler_Get( thread );
diff --git a/testsuites/sptests/spmutex01/init.c b/testsuites/sptests/spmutex01/init.c
index c3633706bf..72e458f167 100644
--- a/testsuites/sptests/spmutex01/init.c
+++ b/testsuites/sptests/spmutex01/init.c
@@ -41,13 +41,14 @@ typedef enum {
REQ_MTX_0_OBTAIN = RTEMS_EVENT_2,
REQ_MTX_0_RELEASE = RTEMS_EVENT_3,
REQ_MTX_1_OBTAIN = RTEMS_EVENT_4,
- REQ_MTX_1_RELEASE = RTEMS_EVENT_5,
- REQ_MTX_2_OBTAIN = RTEMS_EVENT_6,
- REQ_MTX_2_RELEASE = RTEMS_EVENT_7,
- REQ_MTX_C11_OBTAIN = RTEMS_EVENT_8,
- REQ_MTX_C11_RELEASE = RTEMS_EVENT_9,
- REQ_MTX_POSIX_OBTAIN = RTEMS_EVENT_10,
- REQ_MTX_POSIX_RELEASE = RTEMS_EVENT_11
+ REQ_MTX_1_OBTAIN_TIMEOUT = RTEMS_EVENT_5,
+ REQ_MTX_1_RELEASE = RTEMS_EVENT_6,
+ REQ_MTX_2_OBTAIN = RTEMS_EVENT_7,
+ REQ_MTX_2_RELEASE = RTEMS_EVENT_8,
+ REQ_MTX_C11_OBTAIN = RTEMS_EVENT_9,
+ REQ_MTX_C11_RELEASE = RTEMS_EVENT_10,
+ REQ_MTX_POSIX_OBTAIN = RTEMS_EVENT_11,
+ REQ_MTX_POSIX_RELEASE = RTEMS_EVENT_12
} request_id;
typedef enum {
@@ -110,6 +111,14 @@ static void send_event(test_context *ctx, task_id id, rtems_event_set events)
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
+static void wait(void)
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_wake_after(4);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
static rtems_event_set wait_for_events(void)
{
rtems_event_set events;
@@ -141,6 +150,14 @@ static void request(test_context *ctx, task_id id, request_id req)
sync_with_helper(ctx);
}
+static void obtain_timeout(test_context *ctx, mutex_id id)
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, 2);
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+}
+
static void obtain(test_context *ctx, mutex_id id)
{
rtems_status_code sc;
@@ -249,6 +266,22 @@ static void assert_prio(
rtems_test_assert(expected == actual);
}
+static void change_prio(
+ test_context *ctx,
+ task_id id,
+ rtems_task_priority prio
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_set_priority(
+ ctx->tasks[id],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
static void helper(rtems_task_argument arg)
{
test_context *ctx = &test_instance;
@@ -283,6 +316,11 @@ static void worker(rtems_task_argument arg)
++ctx->generation[id];
}
+ if ((events & REQ_MTX_1_OBTAIN_TIMEOUT) != 0) {
+ obtain_timeout(ctx, MTX_1);
+ ++ctx->generation[id];
+ }
+
if ((events & REQ_MTX_1_RELEASE) != 0) {
release(ctx, MTX_1);
++ctx->generation[id];
@@ -370,10 +408,19 @@ static void set_up(test_context *ctx)
static void test_inherit(test_context *ctx)
{
+ assert_prio(ctx, M, 3);
obtain(ctx, MTX_0);
request(ctx, A_1, REQ_MTX_0_OBTAIN);
check_generations(ctx, NONE, NONE);
assert_prio(ctx, M, 1);
+ change_prio(ctx, A_1, 2);
+ assert_prio(ctx, M, 2);
+ change_prio(ctx, A_1, 3);
+ assert_prio(ctx, M, 3);
+ change_prio(ctx, A_1, 4);
+ assert_prio(ctx, M, 3);
+ change_prio(ctx, A_1, 1);
+ assert_prio(ctx, M, 1);
release(ctx, MTX_0);
check_generations(ctx, A_1, NONE);
assert_prio(ctx, M, 3);
@@ -383,6 +430,7 @@ static void test_inherit(test_context *ctx)
static void test_inherit_fifo_for_equal_priority(test_context *ctx)
{
+ assert_prio(ctx, M, 3);
obtain(ctx, MTX_0);
request(ctx, A_2_0, REQ_MTX_0_OBTAIN);
request(ctx, A_1, REQ_MTX_0_OBTAIN);
@@ -401,6 +449,82 @@ static void test_inherit_fifo_for_equal_priority(test_context *ctx)
check_generations(ctx, A_2_1, NONE);
}
+static void test_inherit_nested_vertical(test_context *ctx)
+{
+ assert_prio(ctx, M, 3);
+ obtain(ctx, MTX_0);
+ obtain(ctx, MTX_1);
+ request(ctx, A_1, REQ_MTX_1_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ assert_prio(ctx, M, 1);
+ release(ctx, MTX_1);
+ check_generations(ctx, A_1, NONE);
+ assert_prio(ctx, M, 3);
+ request(ctx, A_1, REQ_MTX_1_RELEASE);
+ check_generations(ctx, A_1, NONE);
+ release(ctx, MTX_0);
+}
+
+static void test_inherit_nested_vertical_timeout(test_context *ctx)
+{
+ assert_prio(ctx, M, 3);
+ obtain(ctx, MTX_0);
+ obtain(ctx, MTX_1);
+ request(ctx, A_1, REQ_MTX_1_OBTAIN_TIMEOUT);
+ check_generations(ctx, NONE, NONE);
+ assert_prio(ctx, M, 1);
+ wait();
+ check_generations(ctx, A_1, NONE);
+ assert_prio(ctx, M, 3);
+ release(ctx, MTX_1);
+ release(ctx, MTX_0);
+}
+
+static void test_inherit_nested_horizontal(test_context *ctx)
+{
+ assert_prio(ctx, M, 3);
+ obtain(ctx, MTX_0);
+ request(ctx, A_2_0, REQ_MTX_1_OBTAIN);
+ check_generations(ctx, A_2_0, NONE);
+ request(ctx, A_2_0, REQ_MTX_0_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ assert_prio(ctx, M, 2);
+ request(ctx, A_1, REQ_MTX_1_OBTAIN_TIMEOUT);
+ check_generations(ctx, NONE, NONE);
+ assert_prio(ctx, A_2_0, 1);
+ assert_prio(ctx, M, 1);
+ wait();
+ check_generations(ctx, A_1, NONE);
+ assert_prio(ctx, A_2_0, 2);
+ assert_prio(ctx, M, 2);
+ request(ctx, A_1, REQ_MTX_1_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ assert_prio(ctx, A_2_0, 1);
+ assert_prio(ctx, M, 1);
+ change_prio(ctx, A_1, 2);
+ assert_prio(ctx, M, 2);
+ change_prio(ctx, A_1, 3);
+ assert_prio(ctx, M, 2);
+ change_prio(ctx, A_2_0, 3);
+ assert_prio(ctx, M, 3);
+ change_prio(ctx, A_2_0, 2);
+ assert_prio(ctx, M, 2);
+ change_prio(ctx, A_1, 1);
+ assert_prio(ctx, M, 1);
+ release(ctx, MTX_0);
+ check_generations(ctx, A_2_0, NONE);
+ assert_prio(ctx, A_2_0, 1);
+ assert_prio(ctx, M, 3);
+ request(ctx, A_2_0, REQ_MTX_0_RELEASE);
+ check_generations(ctx, A_2_0, NONE);
+ assert_prio(ctx, A_2_0, 1);
+ request(ctx, A_2_0, REQ_MTX_1_RELEASE);
+ check_generations(ctx, A_1, A_2_0);
+ assert_prio(ctx, A_2_0, 2);
+ request(ctx, A_1, REQ_MTX_1_RELEASE);
+ check_generations(ctx, A_1, NONE);
+}
+
static void test_deadlock_two_classic(test_context *ctx)
{
obtain(ctx, MTX_0);
@@ -547,6 +671,9 @@ static void Init(rtems_task_argument arg)
set_up(ctx);
test_inherit(ctx);
test_inherit_fifo_for_equal_priority(ctx);
+ test_inherit_nested_vertical(ctx);
+ test_inherit_nested_vertical_timeout(ctx);
+ test_inherit_nested_horizontal(ctx);
test_deadlock_two_classic(ctx);
test_deadlock_three_classic(ctx);
test_deadlock_c11_and_classic(ctx);
diff --git a/testsuites/sptests/spsem03/init.c b/testsuites/sptests/spsem03/init.c
index e64a04d326..7f573e5110 100644
--- a/testsuites/sptests/spsem03/init.c
+++ b/testsuites/sptests/spsem03/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2014, 2016 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -88,20 +88,7 @@ static void obtain_sema(rtems_id id)
static void inversion_task(rtems_task_argument arg)
{
- test_context *ctx = &test_instance;
-
- /*
- * Here we see that the priority of the high priority task blocked on
- * semaphore B doesn't propagate to the low priority task owning semaphore A
- * on which the owner of semaphore B depends.
- */
- assert_prio(ctx->low, 3);
- assert_prio(ctx->mid, 1);
- assert_prio(ctx->high, 1);
- assert_prio(ctx->inversion, 2);
-
- TEST_END();
- rtems_test_exit(0);
+ assert(0);
}
static void mid_task(rtems_task_argument arg)
@@ -137,6 +124,19 @@ static void Init(rtems_task_argument arg)
obtain_sema(ctx->sem_a);
start_task(ctx->mid, mid_task);
start_task(ctx->high, high_task);
+
+ /*
+ * Here we see that the priority of the high priority task blocked on
+ * semaphore B propagated to the low priority task owning semaphore A
+ * on which the owner of semaphore B depends.
+ */
+ assert_prio(ctx->low, 1);
+ assert_prio(ctx->mid, 1);
+ assert_prio(ctx->high, 1);
+ assert_prio(ctx->inversion, 2);
+
+ TEST_END();
+ rtems_test_exit(0);
}
#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
diff --git a/testsuites/sptests/spsem03/spsem03.doc b/testsuites/sptests/spsem03/spsem03.doc
index 0310965798..a13430e874 100644
--- a/testsuites/sptests/spsem03/spsem03.doc
+++ b/testsuites/sptests/spsem03/spsem03.doc
@@ -8,5 +8,5 @@ directives:
concepts:
- - Demonstrate that the priority inheritance protocol doesn't propagate to
- indirectly referenced semaphore owners.
+ - Demonstrate that the priority inheritance protocol works with indirectly
+ referenced semaphore owners.