summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/include/rtems/score')
-rw-r--r--cpukit/score/include/rtems/score/coremutex.h4
-rw-r--r--cpukit/score/include/rtems/score/coremuteximpl.h146
-rw-r--r--cpukit/score/include/rtems/score/mrsp.h12
-rw-r--r--cpukit/score/include/rtems/score/mrspimpl.h255
-rw-r--r--cpukit/score/include/rtems/score/priority.h175
-rw-r--r--cpukit/score/include/rtems/score/priorityimpl.h435
-rw-r--r--cpukit/score/include/rtems/score/scheduler.h28
-rw-r--r--cpukit/score/include/rtems/score/schedulercbs.h17
-rw-r--r--cpukit/score/include/rtems/score/scheduleredf.h26
-rw-r--r--cpukit/score/include/rtems/score/scheduleredfimpl.h14
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h86
-rw-r--r--cpukit/score/include/rtems/score/schedulernode.h21
-rw-r--r--cpukit/score/include/rtems/score/schedulernodeimpl.h19
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h11
-rw-r--r--cpukit/score/include/rtems/score/thread.h53
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h261
-rw-r--r--cpukit/score/include/rtems/score/threadq.h117
-rw-r--r--cpukit/score/include/rtems/score/threadqimpl.h145
18 files changed, 1241 insertions, 584 deletions
diff --git a/cpukit/score/include/rtems/score/coremutex.h b/cpukit/score/include/rtems/score/coremutex.h
index a3dcabf523..d2bf1c21f6 100644
--- a/cpukit/score/include/rtems/score/coremutex.h
+++ b/cpukit/score/include/rtems/score/coremutex.h
@@ -82,9 +82,9 @@ typedef struct {
CORE_recursive_mutex_Control Recursive;
/**
- * @brief The priority ceiling value for the mutex owner.
+ * @brief The priority ceiling node for the mutex owner.
*/
- Priority_Control priority_ceiling;
+ Priority_Node Priority_ceiling;
#if defined(RTEMS_SMP)
/**
diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h b/cpukit/score/include/rtems/score/coremuteximpl.h
index 25094a49df..a30d1a880e 100644
--- a/cpukit/score/include/rtems/score/coremuteximpl.h
+++ b/cpukit/score/include/rtems/score/coremuteximpl.h
@@ -124,32 +124,6 @@ RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
return _CORE_mutex_Get_owner( the_mutex ) == the_thread;
}
-RTEMS_INLINE_ROUTINE void _CORE_mutex_Restore_priority(
- Thread_Control *executing
-)
-{
- /*
- * Whether or not someone is waiting for the mutex, an
- * inherited priority must be lowered if this is the last
- * mutex (i.e. resource) this task has.
- */
- if ( !_Thread_Owns_resources( executing ) ) {
- /*
- * Ensure that the executing resource count is visible to all other
- * processors and that we read the latest priority restore hint.
- */
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
-
- if ( executing->priority_restore_hint ) {
- Per_CPU_Control *cpu_self;
-
- cpu_self = _Thread_Dispatch_disable();
- _Thread_Restore_priority( executing );
- _Thread_Dispatch_enable( cpu_self );
- }
- }
-}
-
RTEMS_INLINE_ROUTINE void _CORE_recursive_mutex_Initialize(
CORE_recursive_mutex_Control *the_mutex
)
@@ -212,7 +186,6 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
{
unsigned int nest_level;
Thread_queue_Heads *heads;
- bool keep_priority;
_CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
@@ -232,29 +205,19 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_recursive_mutex_Surrender(
--executing->resource_count;
_CORE_mutex_Set_owner( &the_mutex->Mutex, NULL );
- /*
- * Ensure that the owner resource count is visible to all other
- * processors and that we read the latest priority restore
- * hint.
- */
- _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
-
heads = the_mutex->Mutex.Wait_queue.Queue.heads;
- keep_priority = _Thread_Owns_resources( executing )
- || !executing->priority_restore_hint;
- if ( heads == NULL && keep_priority ) {
+ if ( heads == NULL ) {
_CORE_mutex_Release( &the_mutex->Mutex, queue_context );
return STATUS_SUCCESSFUL;
}
_Thread_queue_Surrender(
&the_mutex->Mutex.Wait_queue.Queue,
- CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS,
heads,
executing,
- keep_priority,
- queue_context
+ queue_context,
+ CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS
);
return STATUS_SUCCESSFUL;
}
@@ -349,7 +312,7 @@ RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
)
{
_CORE_recursive_mutex_Initialize( &the_mutex->Recursive );
- the_mutex->priority_ceiling = priority_ceiling;
+ _Priority_Node_initialize( &the_mutex->Priority_ceiling, priority_ceiling );
#if defined(RTEMS_SMP)
the_mutex->scheduler = scheduler;
#endif
@@ -369,17 +332,34 @@ _CORE_ceiling_mutex_Get_scheduler(
RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
CORE_ceiling_mutex_Control *the_mutex,
- Priority_Control priority_ceiling
+ Priority_Control priority_ceiling,
+ Thread_queue_Context *queue_context
)
{
- the_mutex->priority_ceiling = priority_ceiling;
+ Thread_Control *owner;
+
+ owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
+
+ if ( owner != NULL ) {
+ _Thread_Wait_acquire( owner, queue_context );
+ _Thread_Priority_change(
+ owner,
+ &the_mutex->Priority_ceiling,
+ priority_ceiling,
+ false,
+ queue_context
+ );
+ _Thread_Wait_release( owner, queue_context );
+ } else {
+ the_mutex->Priority_ceiling.priority = priority_ceiling;
+ }
}
RTEMS_INLINE_ROUTINE Priority_Control _CORE_ceiling_mutex_Get_priority(
const CORE_ceiling_mutex_Control *the_mutex
)
{
- return the_mutex->priority_ceiling;
+ return the_mutex->Priority_ceiling.priority;
}
RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
@@ -388,31 +368,38 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Set_owner(
Thread_queue_Context *queue_context
)
{
- Priority_Control priority_ceiling;
- Priority_Control current_priority;
+ ISR_lock_Context lock_context;
+ Scheduler_Node *own_node;
Per_CPU_Control *cpu_self;
- priority_ceiling = the_mutex->priority_ceiling;
- current_priority = _Thread_Get_priority( owner );
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( owner, &lock_context );
- if ( current_priority < priority_ceiling ) {
+ own_node = _Thread_Scheduler_get_own_node( owner );
+
+ if (
+ own_node->Wait.Priority.Node.priority
+ < the_mutex->Priority_ceiling.priority
+ ) {
+ _Thread_Wait_release_default_critical( owner, &lock_context );
_CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
return STATUS_MUTEX_CEILING_VIOLATED;
}
_CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, owner );
++owner->resource_count;
-
- if ( current_priority == priority_ceiling ) {
- _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
- return STATUS_SUCCESSFUL;
- }
+ _Thread_Priority_add(
+ owner,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ _Thread_Wait_release_default_critical( owner, &lock_context );
cpu_self = _Thread_Dispatch_disable_critical(
&queue_context->Lock_context.Lock_context
);
_CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
- _Thread_Raise_priority( owner, priority_ceiling );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
@@ -472,8 +459,10 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
Thread_queue_Context *queue_context
)
{
- unsigned int nest_level;
- Thread_Control *new_owner;
+ unsigned int nest_level;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *new_owner;
_CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
@@ -492,47 +481,50 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
--executing->resource_count;
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( executing, &lock_context );
+ _Thread_Priority_remove(
+ executing,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
+ _Thread_Wait_release_default_critical( executing, &lock_context );
+
new_owner = _Thread_queue_First_locked(
&the_mutex->Recursive.Mutex.Wait_queue,
CORE_MUTEX_TQ_OPERATIONS
);
_CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, new_owner );
- if ( new_owner != NULL ) {
- bool unblock;
-
- /*
- * We must extract the thread now since this will restore its default
- * thread lock. This is necessary to avoid a deadlock in the
- * _Thread_Change_priority() below due to a recursive thread queue lock
- * acquire.
- */
- unblock = _Thread_queue_Extract_locked(
- &the_mutex->Recursive.Mutex.Wait_queue.Queue,
- CORE_MUTEX_TQ_OPERATIONS,
- new_owner,
- queue_context
- );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ if ( new_owner != NULL ) {
#if defined(RTEMS_MULTIPROCESSING)
if ( _Objects_Is_local_id( new_owner->Object.id ) )
#endif
{
++new_owner->resource_count;
- _Thread_Raise_priority( new_owner, the_mutex->priority_ceiling );
+ _Thread_Priority_add(
+ new_owner,
+ &the_mutex->Priority_ceiling,
+ queue_context
+ );
}
- _Thread_queue_Unblock_critical(
- unblock,
+ _Thread_queue_Extract_critical(
&the_mutex->Recursive.Mutex.Wait_queue.Queue,
+ CORE_MUTEX_TQ_OPERATIONS,
new_owner,
- &queue_context->Lock_context.Lock_context
+ queue_context
);
} else {
_CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
}
- _CORE_mutex_Restore_priority( executing );
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
index 08a2427492..5af3a06901 100644
--- a/cpukit/score/include/rtems/score/mrsp.h
+++ b/cpukit/score/include/rtems/score/mrsp.h
@@ -80,12 +80,9 @@ typedef struct {
Thread_Control *thread;
/**
- * @brief The initial priority of the thread at the begin of the resource
- * obtain sequence.
- *
- * Used to restore the priority after a release of this resource or timeout.
+ * @brief The ceiling priority used by the rival thread.
*/
- Priority_Control initial_priority;
+ Priority_Node Ceiling_priority;
/**
* @brief The initial help state of the thread at the begin of the resource
@@ -135,10 +132,9 @@ struct MRSP_Control {
Chain_Control Rivals;
/**
- * @brief The initial priority of the owner before it was elevated to the
- * ceiling priority.
+ * @brief The ceiling priority used by the owner thread.
*/
- Priority_Control initial_priority_of_owner;
+ Priority_Node Ceiling_priority;
/**
* @brief One ceiling priority per scheduler instance.
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index cc00aa3cfd..deaacf20a0 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -77,53 +77,115 @@ RTEMS_INLINE_ROUTINE void _MRSP_Release(
_Thread_queue_Release( &mrsp->Wait_queue, queue_context );
}
-RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter(
- Thread_Control *thread,
- Priority_Control *new_priority,
- void *arg
+RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
+ const MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler
)
{
- *new_priority = _Thread_Priority_highest(
- thread->real_priority,
- *new_priority
- );
+ uint32_t scheduler_index;
- return *new_priority != _Thread_Get_priority( thread );
+ scheduler_index = _Scheduler_Get_index( scheduler );
+ return mrsp->ceiling_priorities[ scheduler_index ];
}
-RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority(
- Thread_Control *thread,
- Priority_Control initial_priority
+RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
+ MRSP_Control *mrsp,
+ const Scheduler_Control *scheduler,
+ Priority_Control new_priority
)
{
- /*
- * The Thread_Control::resource_count is used by the normal priority ceiling
- * or priority inheritance semaphores.
- */
- if ( thread->resource_count == 0 ) {
- _Thread_Change_priority(
- thread,
- initial_priority,
- NULL,
- _MRSP_Restore_priority_filter,
- true
- );
+ uint32_t scheduler_index;
+
+ scheduler_index = _Scheduler_Get_index( scheduler );
+ mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
+}
+
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Raise_priority(
+ MRSP_Control *mrsp,
+ Thread_Control *thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ Status_Control status;
+ ISR_lock_Context lock_context;
+ const Scheduler_Control *scheduler;
+ Priority_Control ceiling_priority;
+ Scheduler_Node *own_node;
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+
+ scheduler = _Scheduler_Get_own( thread );
+ own_node = _Thread_Scheduler_get_own_node( thread );
+ ceiling_priority = _MRSP_Get_priority( mrsp, scheduler );
+
+ if ( ceiling_priority <= own_node->Wait.Priority.Node.priority ) {
+ _Priority_Node_initialize( priority_node, ceiling_priority );
+ _Thread_Priority_add( thread, priority_node, queue_context );
+ status = STATUS_SUCCESSFUL;
+ } else {
+ status = STATUS_MUTEX_CEILING_VIOLATED;
}
+
+ _Thread_Wait_release_default_critical( thread, &lock_context );
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
+ Thread_Control *thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Priority_remove( thread, priority_node, queue_context );
+ _Thread_Wait_release_default_critical( thread, &lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
+ MRSP_Control *mrsp,
+ Thread_Control *thread,
+ MRSP_Rival *rival
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Priority_replace(
+ thread,
+ &rival->Ceiling_priority,
+ &mrsp->Ceiling_priority
+ );
+ _Thread_Wait_release_default_critical( thread, &lock_context );
}
-RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
+RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
MRSP_Control *mrsp,
Thread_Control *new_owner,
- Priority_Control initial_priority,
- Priority_Control ceiling_priority,
Thread_queue_Context *queue_context
)
{
+ Status_Control status;
Per_CPU_Control *cpu_self;
+ status = _MRSP_Raise_priority(
+ mrsp,
+ new_owner,
+ &mrsp->Ceiling_priority,
+ queue_context
+ );
+
+ if ( status != STATUS_SUCCESSFUL ) {
+ _MRSP_Release( mrsp, queue_context );
+ return status;
+ }
+
_Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
_Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
- mrsp->initial_priority_of_owner = initial_priority;
_Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
cpu_self = _Thread_Dispatch_disable_critical(
@@ -131,9 +193,10 @@ RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
);
_MRSP_Release( mrsp, queue_context );
- _Thread_Raise_priority( new_owner, ceiling_priority );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
}
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
@@ -178,35 +241,16 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
return STATUS_SUCCESSFUL;
}
-RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
- const MRSP_Control *mrsp,
- const Scheduler_Control *scheduler
-)
-{
- uint32_t scheduler_index;
-
- scheduler_index = _Scheduler_Get_index( scheduler );
- return mrsp->ceiling_priorities[ scheduler_index ];
-}
-
-RTEMS_INLINE_ROUTINE void _MRSP_Set_priority(
- MRSP_Control *mrsp,
- const Scheduler_Control *scheduler,
- Priority_Control new_priority
-)
-{
- uint32_t scheduler_index;
-
- scheduler_index = _Scheduler_Get_index( scheduler );
- mrsp->ceiling_priorities[ scheduler_index ] = new_priority;
-}
-
RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
{
- MRSP_Rival *rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
- MRSP_Control *mrsp = rival->resource;
- Thread_Control *thread = rival->thread;
- Thread_queue_Context queue_context;
+ MRSP_Rival *rival;
+ MRSP_Control *mrsp;
+ Thread_Control *thread;
+ Thread_queue_Context queue_context;
+
+ rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
+ mrsp = rival->resource;
+ thread = rival->thread;
_Thread_queue_Context_initialize( &queue_context );
_ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
@@ -215,6 +259,8 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
ISR_lock_Context giant_lock_context;
+ _MRSP_Remove_priority( thread, &rival->Ceiling_priority, &queue_context );
+
_MRSP_Giant_acquire( &giant_lock_context );
_Chain_Extract_unprotected( &rival->Node );
@@ -228,6 +274,8 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
rival->status = STATUS_TIMEOUT;
_MRSP_Release( mrsp, &queue_context );
+
+ _Thread_Priority_update( &queue_context );
} else {
_MRSP_Release( mrsp, &queue_context );
}
@@ -237,23 +285,34 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
MRSP_Control *mrsp,
Resource_Node *owner,
Thread_Control *executing,
- Priority_Control initial_priority,
- Priority_Control ceiling_priority,
Thread_queue_Context *queue_context
)
{
- Status_Control status;
- MRSP_Rival rival;
- Thread_Life_state life_state;
- Per_CPU_Control *cpu_self;
- ISR_lock_Context giant_lock_context;
- ISR_Level level;
- Watchdog_Interval timeout = queue_context->timeout;
+ Status_Control status;
+ MRSP_Rival rival;
+ Thread_Life_state life_state;
+ Per_CPU_Control *cpu_self;
+ ISR_lock_Context giant_lock_context;
+ ISR_Level level;
+ Watchdog_Interval timeout;
+
_Assert( queue_context->timeout_discipline == WATCHDOG_RELATIVE );
+ status = _MRSP_Raise_priority(
+ mrsp,
+ executing,
+ &rival.Ceiling_priority,
+ queue_context
+ );
+
+ if ( status != STATUS_SUCCESSFUL ) {
+ _MRSP_Release( mrsp, queue_context );
+ return status;
+ }
+
rival.thread = executing;
rival.resource = mrsp;
- rival.initial_priority = initial_priority;
+ _Chain_Initialize_node( &rival.Node );
_MRSP_Giant_acquire( &giant_lock_context );
@@ -277,7 +336,9 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
);
_MRSP_Release( mrsp, queue_context );
- _Thread_Raise_priority( executing, ceiling_priority );
+ _Thread_Priority_update( queue_context );
+
+ timeout = (Watchdog_Interval) queue_context->timeout;
if ( timeout > 0 ) {
_Watchdog_Preinitialize( &rival.Watchdog, cpu_self );
@@ -307,10 +368,6 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
&cpu_self->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]
);
_ISR_Local_enable( level );
-
- if ( status == STATUS_TIMEOUT ) {
- _MRSP_Restore_priority( executing, initial_priority );
- }
}
return status;
@@ -323,44 +380,20 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
Thread_queue_Context *queue_context
)
{
- Status_Control status;
- const Scheduler_Control *scheduler = _Scheduler_Get_own( executing );
- Priority_Control initial_priority = _Thread_Get_priority( executing );
- Priority_Control ceiling_priority = _MRSP_Get_priority( mrsp, scheduler );
- bool priority_ok = !_Thread_Priority_less_than(
- ceiling_priority,
- initial_priority
- );
- Resource_Node *owner;
-
- if ( !priority_ok) {
- _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
- return STATUS_MUTEX_CEILING_VIOLATED;
- }
+ Status_Control status;
+ Resource_Node *owner;
_MRSP_Acquire_critical( mrsp, queue_context );
+
owner = _Resource_Get_owner( &mrsp->Resource );
+
if ( owner == NULL ) {
- _MRSP_Claim_ownership(
- mrsp,
- executing,
- initial_priority,
- ceiling_priority,
- queue_context
- );
- status = STATUS_SUCCESSFUL;
+ status = _MRSP_Claim_ownership( mrsp, executing, queue_context );
} else if (
wait
&& _Resource_Node_get_root( owner ) != &executing->Resource_node
) {
- status = _MRSP_Wait_for_ownership(
- mrsp,
- owner,
- executing,
- initial_priority,
- ceiling_priority,
- queue_context
- );
+ status = _MRSP_Wait_for_ownership( mrsp, owner, executing, queue_context );
} else {
_MRSP_Release( mrsp, queue_context );
/* Not available, nested access or deadlock */
@@ -376,9 +409,8 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
Thread_queue_Context *queue_context
)
{
- Priority_Control initial_priority;
- Per_CPU_Control *cpu_self;
- ISR_lock_Context giant_lock_context;
+ ISR_lock_Context giant_lock_context;
+ Per_CPU_Control *cpu_self;
if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
_ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
@@ -395,10 +427,8 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
return STATUS_RELEASE_ORDER_VIOLATION;
}
- initial_priority = mrsp->initial_priority_of_owner;
-
_MRSP_Acquire_critical( mrsp, queue_context );
-
+ _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
_MRSP_Giant_acquire( &giant_lock_context );
_Resource_Extract( &mrsp->Resource );
@@ -406,10 +436,11 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
_Resource_Set_owner( &mrsp->Resource, NULL );
} else {
- MRSP_Rival *rival = (MRSP_Rival *)
- _Chain_Get_first_unprotected( &mrsp->Rivals );
+ MRSP_Rival *rival;
Thread_Control *new_owner;
+ rival = (MRSP_Rival *) _Chain_Get_first_unprotected( &mrsp->Rivals );
+
/*
* This must be inside the critical section since the status prevents a
* potential double extraction in _MRSP_Timeout().
@@ -417,7 +448,9 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
rival->status = STATUS_SUCCESSFUL;
new_owner = rival->thread;
- mrsp->initial_priority_of_owner = rival->initial_priority;
+
+ _MRSP_Replace_priority( mrsp, new_owner, rival );
+
_Resource_Node_extract( &new_owner->Resource_node );
_Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
_Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
@@ -437,7 +470,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
);
_MRSP_Release( mrsp, queue_context );
- _MRSP_Restore_priority( executing, initial_priority );
+ _Thread_Priority_update( queue_context );
_Thread_Dispatch_enable( cpu_self );
diff --git a/cpukit/score/include/rtems/score/priority.h b/cpukit/score/include/rtems/score/priority.h
index 842f01706c..595aa3ebcf 100644
--- a/cpukit/score/include/rtems/score/priority.h
+++ b/cpukit/score/include/rtems/score/priority.h
@@ -1,17 +1,15 @@
/**
- * @file rtems/score/priority.h
+ * @file
*
- * @brief Thread Priority Manipulation Routines
- *
- * This include file contains all thread priority manipulation routines.
- * This Handler provides mechanisms which can be used to
- * initialize and manipulate thread priorities.
+ * @brief Priority Handler API
*/
/*
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2016 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
@@ -20,58 +18,169 @@
#ifndef _RTEMS_SCORE_PRIORITY_H
#define _RTEMS_SCORE_PRIORITY_H
-/**
- * @defgroup ScorePriority Priority Handler
- *
- * @ingroup Score
- *
- * This handler encapsulates functionality which is used to manage
- * thread priorities. At the SuperCore level 256 priority levels
- * are supported with lower numbers representing logically more important
- * threads. The priority level 0 is reserved for internal RTEMS use.
- * Typically it is assigned to threads which defer internal RTEMS
- * actions from an interrupt to thread level to improve interrupt response.
- * Priority level 255 is assigned to the IDLE thread and really should not
- * be used by application threads. The default IDLE thread implementation
- * is an infinite "branch to self" loop which never yields to other threads
- * at the same priority.
- */
-/**@{*/
-
-/*
- * Processor specific information.
- */
+#include <rtems/score/chain.h>
#include <rtems/score/cpu.h>
+#include <rtems/score/rbtree.h>
+
+struct Scheduler_Control;
#ifdef __cplusplus
extern "C" {
#endif
/**
- * The following type defines the control block used to manage
- * thread priorities.
+ * @defgroup ScorePriority Priority Handler
+ *
+ * @ingroup Score
+ *
+ * This handler encapsulates functionality which is used to manage thread
+ * priorities. The actual priority of a thread is an aggregation of priority
+ * nodes. The thread priority aggregation for the home scheduler instance of a
+ * thread consists of at least one priority node, which is normally the real
+ * priority of the thread. The locking protocols (e.g. priority ceiling and
+ * priority inheritance), rate-monotonic period objects and the POSIX sporadic
+ * server add, change and remove priority nodes.
+ *
+ * @{
+ */
+
+/**
+ * @brief A plain thread priority value.
*
- * @note Priority 0 is reserved for internal threads only.
+ * Lower values represent higher priorities. So, a priority value of zero
+ * represents the highest priority thread. This value is reserved for internal
+ * threads and the priority ceiling protocol.
*/
typedef uint64_t Priority_Control;
-/** This defines the highest (most important) thread priority. */
+/**
+ * @brief The highest (most important) thread priority value.
+ */
#define PRIORITY_MINIMUM 0
/**
- * @brief This defines the priority of pseudo-ISR threads.
+ * @brief The priority value of pseudo-ISR threads.
*
* Examples are the MPCI and timer server threads.
*/
#define PRIORITY_PSEUDO_ISR PRIORITY_MINIMUM
-/** This defines the default lowest (least important) thread priority. */
+/**
+ * @brief The default lowest (least important) thread priority value.
+ *
+ * This value is CPU port dependent.
+ */
#if defined (CPU_PRIORITY_MAXIMUM)
#define PRIORITY_DEFAULT_MAXIMUM CPU_PRIORITY_MAXIMUM
#else
#define PRIORITY_DEFAULT_MAXIMUM 255
#endif
+/**
+ * @brief The priority node to build up a priority aggregation.
+ */
+typedef struct {
+ /**
+ * @brief Node component for a chain or red-black tree.
+ */
+ union {
+ Chain_Node Chain;
+ RBTree_Node RBTree;
+ } Node;
+
+ /**
+ * @brief The priority value of this node.
+ */
+ Priority_Control priority;
+} Priority_Node;
+
+/**
+ * @brief The priority action type.
+ */
+typedef enum {
+ PRIORITY_ACTION_ADD,
+ PRIORITY_ACTION_CHANGE,
+ PRIORITY_ACTION_REMOVE,
+ PRIORITY_ACTION_INVALID
+} Priority_Action_type;
+
+typedef struct Priority_Aggregation Priority_Aggregation;
+
+/**
+ * @brief The priority aggregation.
+ *
+ * This structure serves two purposes. Firstly, it provides a place to
+ * register priority nodes and reflects the overall priority of its
+ * contributors. Secondly, it provides an action block to signal addition,
+ * change and removal of a priority node.
+ */
+struct Priority_Aggregation {
+ /**
+ * @brief This priority node reflects the overall priority of the aggregation.
+ *
+ * The overall priority of the aggregation is the minimum priority of the
+ * priority nodes in the contributors tree.
+ *
+ * This priority node may be used to add this aggregation to another
+ * aggregation to build up a recursive priority scheme.
+ *
+ * In case priority nodes of the contributors tree are added, changed or
+ * removed the priority of this node may change. To signal such changes to a
+ * priority aggregation the action block may be used.
+ */
+ Priority_Node Node;
+
+ /**
+ * @brief A red-black tree to contain priority nodes contributing to the
+ * overall priority of this priority aggregation.
+ */
+ RBTree_Control Contributors;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The scheduler instance of this priority aggregation.
+ */
+ const struct Scheduler_Control *scheduler;
+#endif
+
+ /**
+ * @brief A priority action block to manage priority node additions, changes
+ * and removals.
+ */
+ struct {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief The next priority aggregation in the action list.
+ */
+ Priority_Aggregation *next;
+#endif
+
+ /**
+ * @brief The priority node of the action.
+ */
+ Priority_Node *node;
+
+ /**
+ * @brief The type of the action.
+ */
+ Priority_Action_type type;
+ } Action;
+};
+
+/**
+ * @brief A list of priority actions.
+ *
+ * Actions are only added to the list. The action lists reside on the stack
+ * and have a short life-time. They are moved, processed or destroyed as a
+ * whole.
+ */
+typedef struct {
+ /**
+ * @brief The first action of a priority action list.
+ */
+ Priority_Aggregation *actions;
+} Priority_Actions;
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/include/rtems/score/priorityimpl.h b/cpukit/score/include/rtems/score/priorityimpl.h
new file mode 100644
index 0000000000..3380983cb7
--- /dev/null
+++ b/cpukit/score/include/rtems/score/priorityimpl.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PRIORITYIMPL_H
+#define _RTEMS_SCORE_PRIORITYIMPL_H
+
+#include <rtems/score/priority.h>
+#include <rtems/score/scheduler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_empty(
+ Priority_Actions *actions
+)
+{
+ actions->actions = NULL;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_initialize_one(
+ Priority_Actions *actions,
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Action_type type
+)
+{
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = node;
+ aggregation->Action.type = type;
+
+ actions->actions = aggregation;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_empty(
+ const Priority_Actions *actions
+)
+{
+ return actions->actions == NULL;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Actions_is_valid(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation != NULL;
+#else
+ (void) aggregation;
+ return false;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Actions_move(
+ Priority_Actions *actions
+)
+{
+ Priority_Aggregation *aggregation;
+
+ aggregation = actions->actions;
+ actions->actions = NULL;
+
+ return aggregation;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Actions_add(
+ Priority_Actions *actions,
+ Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ /*
+ * Priority aggregations are only added to action lists, so do not care about
+ * the current next pointer value.
+ */
+ aggregation->Action.next = actions->actions;
+#endif
+ actions->actions = aggregation;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_initialize(
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ node->priority = priority;
+ _RBTree_Initialize_node( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_set_priority(
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ node->priority = priority;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Node_set_inactive(
+ Priority_Node *node
+)
+{
+ _RBTree_Set_off_tree( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Node_is_active(
+ const Priority_Node *node
+)
+{
+ return !_RBTree_Is_node_off_tree( &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Initialize_empty(
+ Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_DEBUG)
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = NULL;
+ aggregation->Action.type = PRIORITY_ACTION_INVALID;
+#endif
+ _RBTree_Initialize_node( &aggregation->Node.Node.RBTree );
+ _RBTree_Initialize_empty( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Initialize_one(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+#if defined(RTEMS_DEBUG)
+#if defined(RTEMS_SMP)
+ aggregation->Action.next = NULL;
+#endif
+ aggregation->Action.node = NULL;
+ aggregation->Action.type = PRIORITY_ACTION_INVALID;
+#endif
+ _Priority_Node_initialize( &aggregation->Node, node->priority );
+ _RBTree_Initialize_one( &aggregation->Contributors, &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Is_empty(
+ const Priority_Aggregation *aggregation
+)
+{
+ return _RBTree_Is_empty( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _Priority_Get_priority(
+ const Priority_Aggregation *aggregation
+)
+{
+ return aggregation->Node.priority;
+}
+
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Priority_Get_scheduler(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation->scheduler;
+#else
+ return &_Scheduler_Table[ 0 ];
+#endif
+}
+
+RTEMS_INLINE_ROUTINE Priority_Node *_Priority_Get_minimum_node(
+ const Priority_Aggregation *aggregation
+)
+{
+ return (Priority_Node *) _RBTree_Minimum( &aggregation->Contributors );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action_node(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ aggregation->Action.node = node;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action_type(
+ Priority_Aggregation *aggregation,
+ Priority_Action_type type
+)
+{
+ aggregation->Action.type = type;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Set_action(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Action_type type
+)
+{
+ aggregation->Action.node = node;
+ aggregation->Action.type = type;
+}
+
+RTEMS_INLINE_ROUTINE Priority_Aggregation *_Priority_Get_next_action(
+ const Priority_Aggregation *aggregation
+)
+{
+#if defined(RTEMS_SMP)
+ return aggregation->Action.next;
+#else
+ (void) aggregation;
+ return NULL;
+#endif
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Less(
+ const void *left,
+ const RBTree_Node *right
+)
+{
+ const Priority_Control *the_left;
+ const Priority_Node *the_right;
+
+ the_left = left;
+ the_right = RTEMS_CONTAINER_OF( right, Priority_Node, Node.RBTree );
+
+ return *the_left < the_right->priority;
+}
+
+RTEMS_INLINE_ROUTINE bool _Priority_Plain_insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Control priority
+)
+{
+ return _RBTree_Insert_inline(
+ &aggregation->Contributors,
+ &node->Node.RBTree,
+ &priority,
+ _Priority_Less
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Plain_extract(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ _RBTree_Extract( &aggregation->Contributors, &node->Node.RBTree );
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Plain_changed(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node
+)
+{
+ _Priority_Plain_extract( aggregation, node );
+ _Priority_Plain_insert( aggregation, node, node->priority );
+}
+
+typedef void ( *Priority_Add_handler )(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+);
+
+typedef void ( *Priority_Change_handler )(
+ Priority_Aggregation *aggregation,
+ bool prepend_it,
+ Priority_Actions *actions,
+ void *arg
+);
+
+typedef void ( *Priority_Remove_handler )(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+);
+
+RTEMS_INLINE_ROUTINE void _Priority_Change_nothing(
+ Priority_Aggregation *aggregation,
+ bool prepend_it,
+ Priority_Actions *actions,
+ void *arg
+)
+{
+ (void) aggregation;
+ (void) prepend_it;
+ (void) actions;
+ (void) arg;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Remove_nothing(
+ Priority_Aggregation *aggregation,
+ Priority_Actions *actions,
+ void *arg
+)
+{
+ (void) aggregation;
+ (void) actions;
+ (void) arg;
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Non_empty_insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ bool is_new_minimum;
+
+ _Assert( !_Priority_Is_empty( aggregation ) );
+ is_new_minimum = _Priority_Plain_insert( aggregation, node, node->priority );
+
+ if ( is_new_minimum ) {
+ aggregation->Node.priority = node->priority;
+ ( *change )( aggregation, false, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Insert(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Add_handler add,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ if ( _Priority_Is_empty( aggregation ) ) {
+ _Priority_Initialize_one( aggregation, node );
+ ( *add )( aggregation, actions, arg );
+ } else {
+ _Priority_Non_empty_insert( aggregation, node, actions, change, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Extract(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Remove_handler remove,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ _Priority_Plain_extract( aggregation, node );
+
+ if ( _Priority_Is_empty( aggregation ) ) {
+ ( *remove )( aggregation, actions, arg );
+ } else {
+ Priority_Node *min;
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( node->priority < min->priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, true, actions, arg );
+ }
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Extract_non_empty(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ Priority_Node *min;
+
+ _Priority_Plain_extract( aggregation, node );
+ _Assert( !_Priority_Is_empty( aggregation ) );
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( node->priority < min->priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, true, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Changed(
+ Priority_Aggregation *aggregation,
+ Priority_Node *node,
+ bool prepend_it,
+ Priority_Actions *actions,
+ Priority_Change_handler change,
+ void *arg
+)
+{
+ Priority_Node *min;
+
+ _Priority_Plain_changed( aggregation, node );
+
+ min = _Priority_Get_minimum_node( aggregation );
+
+ if ( min->priority != aggregation->Node.priority ) {
+ aggregation->Node.priority = min->priority;
+ ( *change )( aggregation, prepend_it, actions, arg );
+ }
+}
+
+RTEMS_INLINE_ROUTINE void _Priority_Replace(
+ Priority_Aggregation *aggregation,
+ Priority_Node *victim,
+ Priority_Node *replacement
+)
+{
+ replacement->priority = victim->priority;
+ _RBTree_Replace_node(
+ &aggregation->Contributors,
+ &victim->Node.RBTree,
+ &replacement->Node.RBTree
+ );
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PRIORITYIMPL_H */
diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index bbb4e37240..b934269188 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -135,16 +135,20 @@ typedef struct {
void ( *node_destroy )( const Scheduler_Control *, Scheduler_Node * );
/** @see _Scheduler_Release_job() */
- Thread_Control *( *release_job ) (
+ void ( *release_job ) (
const Scheduler_Control *,
Thread_Control *,
- uint64_t
+ Priority_Node *,
+ uint64_t,
+ Thread_queue_Context *
);
/** @see _Scheduler_Cancel_job() */
- Thread_Control *( *cancel_job ) (
+ void ( *cancel_job ) (
const Scheduler_Control *,
- Thread_Control *
+ Thread_Control *,
+ Priority_Node *,
+ Thread_queue_Context *
);
/** @see _Scheduler_Tick() */
@@ -373,14 +377,18 @@ void _Scheduler_default_Node_destroy(
*
* @param[in] scheduler Unused.
* @param[in] the_thread Unused.
+ * @param[in] priority_node Unused.
* @param[in] deadline Unused.
+ * @param[in] queue_context Unused.
*
* @retval NULL Always.
*/
-Thread_Control *_Scheduler_default_Release_job(
+void _Scheduler_default_Release_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
);
/**
@@ -388,12 +396,16 @@ Thread_Control *_Scheduler_default_Release_job(
*
* @param[in] scheduler Unused.
* @param[in] the_thread Unused.
+ * @param[in] priority_node Unused.
+ * @param[in] queue_context Unused.
*
* @retval NULL Always.
*/
-Thread_Control *_Scheduler_default_Cancel_job(
+void _Scheduler_default_Cancel_job(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
/**
diff --git a/cpukit/score/include/rtems/score/schedulercbs.h b/cpukit/score/include/rtems/score/schedulercbs.h
index c230e08d24..02c7b53e12 100644
--- a/cpukit/score/include/rtems/score/schedulercbs.h
+++ b/cpukit/score/include/rtems/score/schedulercbs.h
@@ -61,7 +61,7 @@ extern "C" {
_Scheduler_CBS_Node_initialize, /* node initialize entry point */ \
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_CBS_Release_job, /* new period of task */ \
- _Scheduler_EDF_Cancel_job, /* cancel period of task */ \
+ _Scheduler_CBS_Cancel_job, /* cancel period of task */ \
_Scheduler_default_Tick, /* tick entry point */ \
_Scheduler_default_Start_idle /* start idle entry point */ \
SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
@@ -135,6 +135,8 @@ typedef struct {
Scheduler_EDF_Node Base;
/** CBS server specific data of a task. */
Scheduler_CBS_Server *cbs_server;
+
+ Priority_Node *deadline_node;
} Scheduler_CBS_Node;
@@ -163,10 +165,19 @@ Scheduler_Void_or_thread _Scheduler_CBS_Unblock(
Thread_Control *the_thread
);
-Thread_Control *_Scheduler_CBS_Release_job(
+void _Scheduler_CBS_Release_job(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
+);
+
+void _Scheduler_CBS_Cancel_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t length
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
/**
diff --git a/cpukit/score/include/rtems/score/scheduleredf.h b/cpukit/score/include/rtems/score/scheduleredf.h
index 81b245e391..ab43672ec1 100644
--- a/cpukit/score/include/rtems/score/scheduleredf.h
+++ b/cpukit/score/include/rtems/score/scheduleredf.h
@@ -89,24 +89,14 @@ typedef struct {
Scheduler_Node Base;
/**
- * Pointer to corresponding Thread Control Block.
- */
- Thread_Control *thread;
- /**
* Rbtree node related to this thread.
*/
RBTree_Node Node;
/**
- * @brief The thread priority used by this scheduler instance in case no job
- * is released.
+ * @brief The thread priority currently used for this scheduler instance.
*/
- Priority_Control background_priority;
-
- /**
- * @brief The thread priority currently used by this scheduler instance.
- */
- Priority_Control current_priority;
+ Priority_Control priority;
} Scheduler_EDF_Node;
/**
@@ -215,15 +205,19 @@ Scheduler_Void_or_thread _Scheduler_EDF_Yield(
Thread_Control *the_thread
);
-Thread_Control *_Scheduler_EDF_Release_job(
+void _Scheduler_EDF_Release_job(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
- uint64_t deadline
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
);
-Thread_Control *_Scheduler_EDF_Cancel_job(
+void _Scheduler_EDF_Cancel_job(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
#ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/scheduleredfimpl.h b/cpukit/score/include/rtems/score/scheduleredfimpl.h
index 844d745d54..61aceddf19 100644
--- a/cpukit/score/include/rtems/score/scheduleredfimpl.h
+++ b/cpukit/score/include/rtems/score/scheduleredfimpl.h
@@ -74,7 +74,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less(
the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
prio_left = *the_left;
- prio_right = the_right->current_priority;
+ prio_right = the_right->priority;
return prio_left < prio_right;
}
@@ -93,7 +93,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less_or_equal(
the_right = RTEMS_CONTAINER_OF( right, Scheduler_EDF_Node, Node );
prio_left = *the_left;
- prio_right = the_right->current_priority;
+ prio_right = the_right->priority;
return prio_left <= prio_right;
}
@@ -101,13 +101,13 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_EDF_Less_or_equal(
RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
Scheduler_EDF_Context *context,
Scheduler_EDF_Node *node,
- Priority_Control current_priority
+ Priority_Control priority
)
{
_RBTree_Insert_inline(
&context->Ready,
&node->Node,
- &current_priority,
+ &priority,
_Scheduler_EDF_Less
);
}
@@ -115,13 +115,13 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue(
RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue_first(
Scheduler_EDF_Context *context,
Scheduler_EDF_Node *node,
- Priority_Control current_priority
+ Priority_Control priority
)
{
_RBTree_Insert_inline(
&context->Ready,
&node->Node,
- &current_priority,
+ &priority,
_Scheduler_EDF_Less_or_equal
);
}
@@ -164,7 +164,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Schedule_body(
first = _RBTree_Minimum( &context->Ready );
node = RTEMS_CONTAINER_OF( first, Scheduler_EDF_Node, Node );
- _Scheduler_Update_heir( node->thread, force_dispatch );
+ _Scheduler_Update_heir( node->Base.owner, force_dispatch );
}
/**@}*/
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index bbad6e0a36..dea1888a51 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -22,6 +22,7 @@
#include <rtems/score/scheduler.h>
#include <rtems/score/cpusetimpl.h>
+#include <rtems/score/priorityimpl.h>
#include <rtems/score/smpimpl.h>
#include <rtems/score/status.h>
#include <rtems/score/threadimpl.h>
@@ -99,17 +100,6 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
return _Scheduler_Get_by_CPU_index( cpu_index );
}
-RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
- const Thread_Control *the_thread
-)
-{
-#if defined(RTEMS_SMP)
- return the_thread->Scheduler.own_node;
-#else
- return the_thread->Scheduler.node;
-#endif
-}
-
ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
/**
@@ -264,7 +254,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
needs_help != NULL
&& _Resource_Node_owns_resources( &needs_help->Resource_node )
) {
- Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
+ Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
if (
node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
@@ -486,21 +476,27 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
* @brief Releases a job of a thread with respect to the scheduler.
*
* @param[in] the_thread The thread.
+ * @param[in] priority_node The priority node of the job.
* @param[in] deadline The deadline in watchdog ticks since boot.
- *
- * @return The thread to hand over to _Thread_Update_priority().
+ * @param[in] queue_context The thread queue context to provide the set of
+ * threads for _Thread_Priority_update().
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_job(
- Thread_Control *the_thread,
- uint64_t deadline
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ uint64_t deadline,
+ Thread_queue_Context *queue_context
)
{
const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
- return ( *scheduler->Operations.release_job )(
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *scheduler->Operations.release_job )(
scheduler,
the_thread,
- deadline
+ priority_node,
+ deadline,
+ queue_context
);
}
@@ -508,16 +504,25 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_job(
* @brief Cancels a job of a thread with respect to the scheduler.
*
* @param[in] the_thread The thread.
- *
- * @return The thread to hand over to _Thread_Update_priority().
+ * @param[in] priority_node The priority node of the job.
+ * @param[in] queue_context The thread queue context to provide the set of
+ * threads for _Thread_Priority_update().
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Cancel_job(
- Thread_Control *the_thread
+RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
)
{
const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
- return ( *scheduler->Operations.cancel_job )( scheduler, the_thread );
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ ( *scheduler->Operations.cancel_job )(
+ scheduler,
+ the_thread,
+ priority_node,
+ queue_context
+ );
}
/**
@@ -776,12 +781,10 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
bool prepend_it
)
{
- Scheduler_Node *own_node;
-
- own_node = _Scheduler_Thread_get_own_node( the_thread );
- _Scheduler_Node_set_priority( own_node, new_priority, prepend_it );
+ Scheduler_Node *scheduler_node;
- the_thread->current_priority = new_priority;
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
}
#if defined(RTEMS_SMP)
@@ -857,7 +860,7 @@ RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
Scheduler_Help_state new_help_state
)
{
- Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
+ Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
Scheduler_Help_state previous_help_state = node->help_state;
node->help_state = new_help_state;
@@ -1294,7 +1297,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
_Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
_Scheduler_Thread_set_scheduler_and_node(
old_user,
- _Scheduler_Thread_get_own_node( old_user ),
+ _Thread_Scheduler_get_own_node( old_user ),
old_user
);
@@ -1383,11 +1386,24 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
return STATUS_RESOURCE_IN_USE;
}
- the_thread->current_priority = priority;
- the_thread->real_priority = priority;
- the_thread->Start.initial_priority = priority;
+ own_node = _Thread_Scheduler_get_own_node( the_thread );
+ _Priority_Plain_extract( &own_node->Wait.Priority, &the_thread->Real_priority );
- own_node = _Scheduler_Thread_get_own_node( the_thread );
+ if ( !_Priority_Is_empty( &own_node->Wait.Priority ) ) {
+ _Priority_Plain_insert(
+ &own_node->Wait.Priority,
+ &the_thread->Real_priority,
+ the_thread->Real_priority.priority
+ );
+ return STATUS_RESOURCE_IN_USE;
+ }
+
+ the_thread->Start.initial_priority = priority;
+ _Priority_Node_set_priority( &the_thread->Real_priority, priority );
+ _Priority_Initialize_one(
+ &own_node->Wait.Priority,
+ &the_thread->Real_priority
+ );
#if defined(RTEMS_SMP)
{
diff --git a/cpukit/score/include/rtems/score/schedulernode.h b/cpukit/score/include/rtems/score/schedulernode.h
index 293887c4b7..9827d21e64 100644
--- a/cpukit/score/include/rtems/score/schedulernode.h
+++ b/cpukit/score/include/rtems/score/schedulernode.h
@@ -98,7 +98,7 @@ typedef enum {
/**
* @brief Scheduler node for per-thread data.
*/
-typedef struct {
+typedef struct Scheduler_Node {
#if defined(RTEMS_SMP)
/**
* @brief Chain node for usage in various scheduler data structures.
@@ -144,24 +144,7 @@ typedef struct {
* @brief Thread wait support block.
*/
struct {
- /**
- * @brief Node for thread queues.
- *
- * Each scheduler node can be enqueued on a thread queue on behalf of the
- * thread owning the scheduler node. The scheduler node reflects the
- * priority of the thread within the corresponding scheduler instance.
- */
- union {
- /**
- * @brief A node for chains.
- */
- Chain_Node Chain;
-
- /**
- * @brief A node for red-black trees.
- */
- RBTree_Node RBTree;
- } Node;
+ Priority_Aggregation Priority;
} Wait;
/**
diff --git a/cpukit/score/include/rtems/score/schedulernodeimpl.h b/cpukit/score/include/rtems/score/schedulernodeimpl.h
index ecb61c239a..b0f7d77bbe 100644
--- a/cpukit/score/include/rtems/score/schedulernodeimpl.h
+++ b/cpukit/score/include/rtems/score/schedulernodeimpl.h
@@ -17,20 +17,23 @@
#include <rtems/score/schedulernode.h>
+struct Scheduler_Control;
+
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
-#define SCHEDULER_NODE_OF_WAIT_CHAIN_NODE( node ) \
- RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Node.Chain )
+#define SCHEDULER_NODE_OF_WAIT_PRIORITY_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority.Node.Node.Chain )
-#define SCHEDULER_NODE_OF_WAIT_RBTREE_NODE( node ) \
- RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Node.RBTree )
+#define SCHEDULER_NODE_OF_WAIT_PRIORITY( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_Node, Wait.Priority )
RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
- Scheduler_Node *node,
- Thread_Control *the_thread,
- Priority_Control priority
+ const struct Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
)
{
node->owner = the_thread;
@@ -39,12 +42,14 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
node->Priority.prepend_it = false;
#if defined(RTEMS_SMP)
+ node->Wait.Priority.scheduler = scheduler;
node->user = the_thread;
node->help_state = SCHEDULER_HELP_YOURSELF;
node->idle = NULL;
node->accepts_help = the_thread;
_SMP_sequence_lock_Initialize( &node->Priority.Lock );
#else
+ (void) scheduler;
(void) the_thread;
#endif
}
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index ae1941aa43..035df46d65 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -375,7 +375,7 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
Thread_Control *thread
)
{
- return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
+ return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
}
static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
@@ -386,12 +386,13 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
}
static inline void _Scheduler_SMP_Node_initialize(
- Scheduler_SMP_Node *node,
- Thread_Control *thread,
- Priority_Control priority
+ const Scheduler_Control *scheduler,
+ Scheduler_SMP_Node *node,
+ Thread_Control *thread,
+ Priority_Control priority
)
{
- _Scheduler_Node_do_initialize( &node->Base, thread, priority );
+ _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
node->state = SCHEDULER_SMP_NODE_BLOCKED;
node->priority = priority;
}
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 2cd229f76f..393d431e0e 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -373,7 +373,7 @@ typedef struct {
*
* The thread wait lock mechanism protects the following thread variables
* - POSIX_API_Control::Attributes,
- * - Thread_Control::current_priority,
+ * - Scheduler_Node::Wait,
* - Thread_Control::Wait::Lock::Pending_requests,
* - Thread_Control::Wait::queue, and
* - Thread_Control::Wait::operations.
@@ -461,32 +461,11 @@ typedef struct {
/** This field is the current execution state of this proxy. */
States_Control current_state;
- /**
- * @brief This field is the current priority state of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
- */
- Priority_Control current_priority;
/**
- * @brief This field is the base priority of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
+ * @brief The base priority of this thread in its home scheduler instance.
*/
- Priority_Control real_priority;
-
- /**
- * @brief Hints if a priority restore is necessary once the resource count
- * changes from one to zero.
- *
- * This is an optimization to speed up the mutex surrender sequence in case
- * no attempt to change the priority was made during the mutex ownership. On
- * SMP configurations atomic fences must synchronize writes to
- * Thread_Control::priority_restore_hint and Thread_Control::resource_count.
- */
- bool priority_restore_hint;
+ Priority_Node Real_priority;
/** This field is the number of mutexes currently held by this proxy. */
uint32_t resource_count;
@@ -708,31 +687,9 @@ struct _Thread_Control {
States_Control current_state;
/**
- * @brief This field is the current priority state of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
- */
- Priority_Control current_priority;
-
- /**
- * @brief This field is the base priority of this thread.
- *
- * Writes to this field are only allowed in _Thread_Initialize() or via
- * _Thread_Change_priority().
- */
- Priority_Control real_priority;
-
- /**
- * @brief Hints if a priority restore is necessary once the resource count
- * changes from one to zero.
- *
- * This is an optimization to speed up the mutex surrender sequence in case
- * no attempt to change the priority was made during the mutex ownership. On
- * SMP configurations atomic fences must synchronize writes to
- * Thread_Control::priority_restore_hint and Thread_Control::resource_count.
+ * @brief The base priority of this thread in its home scheduler instance.
*/
- bool priority_restore_hint;
+ Priority_Node Real_priority;
/** This field is the number of mutexes currently held by this thread. */
uint32_t resource_count;
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 1fce842533..7f9dccf5e2 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -415,128 +415,185 @@ RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
#endif
/**
- * @brief Returns true if the left thread priority is less than the right
- * thread priority in the intuitive sense of priority and false otherwise.
+ * @brief Performs the priority actions specified by the thread queue context
+ * along the thread queue path.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param start_of_path The start thread of the thread queue path.
+ * @param queue_context The thread queue context specifying the thread queue
+ * path and initial thread priority actions.
+ *
+ * @see _Thread_queue_Path_acquire_critical().
*/
-RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
- Priority_Control left,
- Priority_Control right
-)
-{
- return left > right;
-}
+void _Thread_Priority_perform_actions(
+ Thread_Control *start_of_path,
+ Thread_queue_Context *queue_context
+);
/**
- * @brief Returns the highest priority of the left and right thread priorities
- * in the intuitive sense of priority.
+ * @brief Adds the specified thread priority node to the corresponding thread
+ * priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
+ *
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to add.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
*/
-RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
- Priority_Control left,
- Priority_Control right
-)
-{
- return _Thread_Priority_less_than( left, right ) ? right : left;
-}
+void _Thread_Priority_add(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
+);
/**
- * @brief Filters a thread priority change.
+ * @brief Removes the specified thread priority node from the corresponding
+ * thread priority aggregation.
*
- * Called by _Thread_Change_priority() under the protection of the thread lock.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in, out] new_priority The new priority of the thread. The filter may
- * alter this value.
- * @param[in] arg The argument passed to _Thread_Change_priority().
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to remove.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
*
- * @retval true Change the current priority.
- * @retval false Otherwise.
+ * @see _Thread_Wait_acquire().
*/
-typedef bool ( *Thread_Change_priority_filter )(
- Thread_Control *the_thread,
- Priority_Control *new_priority,
- void *arg
-);
-
-Thread_Control *_Thread_Apply_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
- bool prepend_it
+void _Thread_Priority_remove(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Thread_queue_Context *queue_context
);
-void _Thread_Update_priority( Thread_Control *the_thread );
-
/**
- * @brief Changes the priority of a thread if allowed by the filter function.
+ * @brief Propagates a thread priority value change in the specified thread
+ * priority node to the corresponding thread priority aggregation.
*
- * It changes current priority of the thread to the new priority in case the
- * filter function returns true. In this case the scheduler is notified of the
- * priority change as well.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in] new_priority The new priority of the thread.
- * @param[in] arg The argument for the filter function.
- * @param[in] filter The filter function to determine if a priority change is
- * allowed and optionally perform other actions under the protection of the
- * thread lock simultaneously with the update of the current priority.
- * @param[in] prepend_it In case this is true, then the thread is prepended to
- * its priority group in its scheduler instance, otherwise it is appended.
- */
-void _Thread_Change_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- void *arg,
- Thread_Change_priority_filter filter,
- bool prepend_it
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to change.
+ * @param prepend_it In case this is true, then the thread is prepended to
+ * its priority group in its home scheduler instance, otherwise it is
+ * appended.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
+ *
+ * @see _Thread_Wait_acquire().
+ */
+void _Thread_Priority_changed(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
);
/**
- * @brief Raises the priority of a thread.
+ * @brief Changes the thread priority value of the specified thread priority
+ * node in the corresponding thread priority aggregation.
*
- * It changes the current priority of the thread to the new priority if the new
- * priority is higher than the current priority. In this case the thread is
- * appended to its new priority group in its scheduler instance.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in] new_priority The new priority of the thread.
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to change.
+ * @param new_priority The new thread priority value of the thread priority
+ * node to change.
+ * @param prepend_it In case this is true, then the thread is prepended to
+ * its priority group in its home scheduler instance, otherwise it is
+ * appended.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
*
- * @see _Thread_Change_priority().
+ * @see _Thread_Wait_acquire().
*/
-void _Thread_Raise_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority
-);
+RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
+ Thread_Control *the_thread,
+ Priority_Node *priority_node,
+ Priority_Control new_priority,
+ bool prepend_it,
+ Thread_queue_Context *queue_context
+)
+{
+ _Priority_Node_set_priority( priority_node, new_priority );
+ _Thread_Priority_changed(
+ the_thread,
+ priority_node,
+ prepend_it,
+ queue_context
+ );
+}
/**
- * @brief Sets the current to the real priority of a thread.
+ * @brief Replaces the victim priority node with the replacement priority node
+ * in the corresponding thread priority aggregation.
+ *
+ * The caller must be the owner of the thread wait lock.
*
- * Sets the priority restore hint to false.
+ * @param the_thread The thread.
+ * @param victim_node The victim thread priority node.
+ * @param replacement_node The replacement thread priority node.
+ *
+ * @see _Thread_Wait_acquire().
*/
-void _Thread_Restore_priority( Thread_Control *the_thread );
+void _Thread_Priority_replace(
+ Thread_Control *the_thread,
+ Priority_Node *victim_node,
+ Priority_Node *replacement_node
+);
/**
- * @brief Sets the priority of a thread.
+ * @brief Adds a priority node to the corresponding thread priority
+ * aggregation.
*
- * It sets the real priority of the thread. In addition it changes the current
- * priority of the thread if the new priority is higher than the current
- * priority or the thread owns no resources.
+ * The caller must be the owner of the thread wait lock.
*
- * @param[in] the_thread The thread.
- * @param[in] new_priority The new priority of the thread.
- * @param[out] old_priority The old real priority of the thread. This pointer
- * must not be @c NULL.
- * @param[in] prepend_it In case this is true, then the thread is prepended to
- * its priority group in its scheduler instance, otherwise it is appended.
+ * @param the_thread The thread.
+ * @param priority_node The thread priority node to add.
+ * @param queue_context The thread queue context to return an updated set of
+ * threads for _Thread_Priority_update(). The thread queue context must be
+ * initialized via _Thread_queue_Context_clear_priority_updates() before a
+ * call of this function.
*
- * @see _Thread_Change_priority().
+ * @see _Thread_Priority_add(), _Thread_Priority_change(),
+ * _Thread_Priority_changed() and _Thread_Priority_remove().
*/
-void _Thread_Set_priority(
- Thread_Control *the_thread,
- Priority_Control new_priority,
- Priority_Control *old_priority,
- bool prepend_it
-);
+void _Thread_Priority_update( Thread_queue_Context *queue_context );
+
+/**
+ * @brief Returns true if the left thread priority is less than the right
+ * thread priority in the intuitive sense of priority and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
+ Priority_Control left,
+ Priority_Control right
+)
+{
+ return left > right;
+}
+
+/**
+ * @brief Returns the highest priority of the left and right thread priorities
+ * in the intuitive sense of priority.
+ */
+RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
+ Priority_Control left,
+ Priority_Control right
+)
+{
+ return _Thread_Priority_less_than( left, right ) ? right : left;
+}
RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
Objects_Id id
@@ -929,6 +986,17 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
return owns_resources;
}
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return the_thread->Scheduler.own_node;
+#else
+ return the_thread->Scheduler.node;
+#endif
+}
+
/**
* @brief Returns the priority of the thread.
*
@@ -937,14 +1005,15 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
* protocols, a job release or the POSIX sporadic server for example.
*
* @return The priority of the thread.
- *
- * @see _Scheduler_Node_get_priority().
*/
RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
const Thread_Control *the_thread
)
{
- return the_thread->current_priority;
+ Scheduler_Node *scheduler_node;
+
+ scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
+ return _Priority_Get_priority( &scheduler_node->Wait.Priority );
}
/**
@@ -1389,7 +1458,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
_Assert( queue_context->Lock_context.Wait.queue == queue );
#endif
- ( *the_thread->Wait.operations->extract )( queue, the_thread );
+ ( *the_thread->Wait.operations->extract )(
+ queue,
+ the_thread,
+ queue_context
+ );
_Thread_Wait_restore_default( the_thread );
#if defined(RTEMS_SMP)
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index a4ad0827a3..6f62506c26 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -30,6 +30,8 @@
extern "C" {
#endif
+struct Scheduler_Node;
+
/**
* @defgroup ScoreThreadQueue Thread Queue Handler
*
@@ -47,8 +49,6 @@ typedef struct Thread_queue_Queue Thread_queue_Queue;
typedef struct Thread_queue_Operations Thread_queue_Operations;
-typedef struct Thread_queue_Path Thread_queue_Path;
-
/**
* @brief Thread queue deadlock callout.
*
@@ -195,6 +195,56 @@ typedef struct {
*/
uint64_t timeout;
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Representation of a thread queue path from a start thread queue to
+ * the terminal thread queue.
+ *
+ * The start thread queue is determined by the object on which a thread intends
+ * to block. The terminal thread queue is the thread queue reachable via
+ * thread queue links whose owner is not blocked on a thread queue. The thread
+ * queue links are determined by the thread queue owner and thread wait queue
+ * relationships.
+ */
+ struct {
+ /**
+ * @brief The chain of thread queue links defining the thread queue path.
+ */
+ Chain_Control Links;
+
+ /**
+ * @brief The start of a thread queue path.
+ */
+ Thread_queue_Link Start;
+ } Path;
+#endif
+
+ /**
+ * @brief Block to manage thread priority changes due to a thread queue
+ * operation.
+ */
+ struct {
+ /**
+ * @brief A priority action list.
+ */
+ Priority_Actions Actions;
+
+ /**
+ * @brief Count of threads to update the priority via
+ * _Thread_Priority_update().
+ */
+ size_t update_count;
+
+ /**
+ * @brief Threads to update the priority via _Thread_Priority_update().
+ *
+ * Currently, a maximum of two threads need an update in one rush, for
+ * example the thread of the thread queue operation and the owner of the
+ * thread queue.
+ */
+ Thread_Control *update[ 2 ];
+ } Priority;
+
/**
* @brief Invoked in case of a detected deadlock.
*
@@ -237,7 +287,13 @@ typedef struct {
/**
* @brief The actual thread priority queue.
*/
- RBTree_Control Queue;
+ Priority_Aggregation Queue;
+
+ /**
+ * @brief This priority queue is added to a scheduler node of the owner in
+ * case of priority inheritance.
+ */
+ struct Scheduler_Node *scheduler_node;
} Thread_queue_Priority_queue;
/**
@@ -289,6 +345,11 @@ typedef struct _Thread_queue_Heads {
#if defined(RTEMS_SMP)
/**
+ * @brief Boost priority.
+ */
+ Priority_Node Boost_priority;
+
+ /**
* @brief One priority queue per scheduler instance.
*/
Thread_queue_Priority_queue Priority[ RTEMS_ZERO_LENGTH_ARRAY ];
@@ -337,34 +398,33 @@ struct Thread_queue_Queue {
};
/**
- * @brief Thread queue priority change operation.
+ * @brief Thread queue action operation.
*
* @param[in] queue The actual thread queue.
* @param[in] the_thread The thread.
- * @param[in] new_priority The new priority value.
- *
- * @see Thread_queue_Operations.
+ * @param[in] queue_context The thread queue context providing the thread queue
+ * action set to perform. Returns the thread queue action set to perform on
+ * the thread queue owner or the empty set in case there is nothing to do.
*/
-typedef void ( *Thread_queue_Priority_change_operation )(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Priority_Control new_priority
+typedef void ( *Thread_queue_Priority_actions_operation )(
+ Thread_queue_Queue *queue,
+ Priority_Actions *priority_actions
);
/**
* @brief Thread queue enqueue operation.
*
* A potential thread to update the priority due to priority inheritance is
- * returned via the thread queue path. This thread is handed over to
- * _Thread_Update_priority().
+ * returned via the thread queue context. This thread is handed over to
+ * _Thread_Priority_update().
*
* @param[in] queue The actual thread queue.
* @param[in] the_thread The thread to enqueue on the queue.
*/
typedef void ( *Thread_queue_Enqueue_operation )(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- Thread_queue_Path *path
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
);
/**
@@ -374,8 +434,9 @@ typedef void ( *Thread_queue_Enqueue_operation )(
* @param[in] the_thread The thread to extract from the thread queue.
*/
typedef void ( *Thread_queue_Extract_operation )(
- Thread_queue_Queue *queue,
- Thread_Control *the_thread
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
);
/**
@@ -390,9 +451,10 @@ typedef void ( *Thread_queue_Extract_operation )(
* @return The previous first thread on the queue.
*/
typedef Thread_Control *( *Thread_queue_Surrender_operation )(
- Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *previous_owner
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context
);
/**
@@ -415,16 +477,9 @@ typedef Thread_Control *( *Thread_queue_First_operation )(
*/
struct Thread_queue_Operations {
/**
- * @brief Thread queue priority change operation.
- *
- * Called by _Thread_Change_priority() to notify a thread about a priority
- * change. In case this thread waits currently for a resource the handler
- * may adjust its data structures according to the new priority value. This
- * handler must not be NULL, instead the default handler
- * _Thread_Do_nothing_priority_change() should be used in case nothing needs
- * to be done during a priority change.
- */
- Thread_queue_Priority_change_operation priority_change;
+ * @brief Thread queue priority actions operation.
+ */
+ Thread_queue_Priority_actions_operation priority_actions;
/**
* @brief Thread queue enqueue operation.
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 977b0ceb38..65b0e8eeab 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -21,7 +21,7 @@
#include <rtems/score/threadq.h>
#include <rtems/score/chainimpl.h>
-#include <rtems/score/rbtreeimpl.h>
+#include <rtems/score/priorityimpl.h>
#include <rtems/score/scheduler.h>
#include <rtems/score/smp.h>
#include <rtems/score/thread.h>
@@ -39,38 +39,8 @@ extern "C" {
*/
/**@{*/
-/**
- * @brief Representation of a thread queue path from a start thread queue to
- * the terminal thread queue.
- *
- * The start thread queue is determined by the object on which a thread intends
- * to block. The terminal thread queue is the thread queue reachable via
- * thread queue links those owner is not blocked on a thread queue. The thread
- * queue links are determined by the thread queue owner and thread wait queue
- * relationships.
- */
-struct Thread_queue_Path {
-#if defined(RTEMS_SMP)
- /**
- * @brief The chain of thread queue links defining the thread queue path.
- */
- Chain_Control Links;
-
- /**
- * @brief The start of a thread queue path.
- */
- Thread_queue_Link Start;
-#endif
-
- /**
- * @brief A potential thread to update the priority via
- * _Thread_Update_priority().
- *
- * This thread is determined by thread queues which support priority
- * inheritance.
- */
- Thread_Control *update_priority;
-};
+#define THREAD_QUEUE_LINK_OF_PATH_NODE( node ) \
+ RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
/**
* @brief Thread queue with a layout compatible to struct _Thread_queue_Queue
@@ -210,6 +180,42 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
queue_context->deadlock_callout = deadlock_callout;
}
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_clear_priority_updates(
+ Thread_queue_Context *queue_context
+)
+{
+ queue_context->Priority.update_count = 0;
+}
+
+RTEMS_INLINE_ROUTINE size_t _Thread_queue_Context_save_priority_updates(
+ Thread_queue_Context *queue_context
+)
+{
+ return queue_context->Priority.update_count;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_restore_priority_updates(
+ Thread_queue_Context *queue_context,
+ size_t update_count
+)
+{
+ queue_context->Priority.update_count = update_count;
+}
+
+RTEMS_INLINE_ROUTINE void _Thread_queue_Context_add_priority_update(
+ Thread_queue_Context *queue_context,
+ Thread_Control *the_thread
+)
+{
+ size_t n;
+
+ n = queue_context->Priority.update_count;
+ _Assert( n < RTEMS_ARRAY_SIZE( queue_context->Priority.update ) );
+
+ queue_context->Priority.update_count = n + 1;
+ queue_context->Priority.update[ n ] = the_thread;
+}
+
/**
* @brief Sets the MP callout in the thread queue context.
*
@@ -274,9 +280,12 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
#if defined(RTEMS_SMP)
size_t i;
+ _Priority_Node_initialize( &heads->Boost_priority, 0 );
+ _Priority_Node_set_inactive( &heads->Boost_priority );
+
for ( i = 0; i < _Scheduler_Count; ++i ) {
_Chain_Initialize_node( &heads->Priority[ i ].Node );
- _RBTree_Initialize_empty( &heads->Priority[ i ].Queue );
+ _Priority_Initialize_empty( &heads->Priority[ i ].Queue );
}
#endif
@@ -579,16 +588,6 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue(
);
}
-bool _Thread_queue_Do_extract_locked(
- Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations,
- Thread_Control *the_thread
-#if defined(RTEMS_MULTIPROCESSING)
- ,
- const Thread_queue_Context *queue_context
-#endif
-);
-
/**
* @brief Extracts the thread from the thread queue, restores the default wait
* operations and restores the default thread lock.
@@ -599,8 +598,7 @@ bool _Thread_queue_Do_extract_locked(
* @param[in] queue The actual thread queue.
* @param[in] operations The thread queue operations.
* @param[in] the_thread The thread to extract.
- * @param[in] queue_context The thread queue context. This parameter is only
- * used on multiprocessing configurations.
+ * @param[in] queue_context The thread queue context.
*
* @return Returns the unblock indicator for _Thread_queue_Unblock_critical().
* True indicates, that this thread must be unblocked by the scheduler later in
@@ -610,32 +608,12 @@ bool _Thread_queue_Do_extract_locked(
* since this thread may already block on another resource in an SMP
* configuration.
*/
-#if defined(RTEMS_MULTIPROCESSING)
- #define _Thread_queue_Extract_locked( \
- unblock, \
- queue, \
- the_thread, \
- queue_context \
- ) \
- _Thread_queue_Do_extract_locked( \
- unblock, \
- queue, \
- the_thread, \
- queue_context \
- )
-#else
- #define _Thread_queue_Extract_locked( \
- unblock, \
- queue, \
- the_thread, \
- queue_context \
- ) \
- _Thread_queue_Do_extract_locked( \
- unblock, \
- queue, \
- the_thread \
- )
-#endif
+bool _Thread_queue_Extract_locked(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
/**
* @brief Unblocks the thread which was on the thread queue before.
@@ -735,7 +713,7 @@ void _Thread_queue_Extract_with_proxy(
/**
* @brief Surrenders the thread queue previously owned by the thread to the
- * first enqueued thread if it exists.
+ * first enqueued thread.
*
* The owner of the thread queue must be set to NULL by the caller.
*
@@ -743,21 +721,18 @@ void _Thread_queue_Extract_with_proxy(
* thread dispatch if necessary.
*
* @param[in] queue The actual thread queue.
- * @param[in] operations The thread queue operations.
- * @param[in] heads The thread queue heads.
+ * @param[in] heads The thread queue heads. It must not be NULL.
* @param[in] previous_owner The previous owner thread surrendering the thread
* queue.
- * @param[in] keep_priority Indicates if the previous owner thread should keep
- * its current priority.
* @param[in] queue_context The thread queue context of the lock acquire.
+ * @param[in] operations The thread queue operations.
*/
void _Thread_queue_Surrender(
Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations,
Thread_queue_Heads *heads,
Thread_Control *previous_owner,
- bool keep_priority,
- Thread_queue_Context *queue_context
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
);
RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
@@ -980,6 +955,16 @@ void _Thread_queue_Unblock_proxy(
);
#endif
+bool _Thread_queue_Path_acquire_critical(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+
+void _Thread_queue_Path_release_critical(
+ Thread_queue_Context *queue_context
+);
+
/**
* @brief Helper structure to ensure that all objects containing a thread queue
* have the right layout.