summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threadqenqueue.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-03-25 09:11:26 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-01 20:24:57 +0200
commit9c0591f12d450401746bc0bf7cd7a0e0b14a5f3b (patch)
tree02640af241b723d162024131701a0b518c961bf8 /cpukit/score/src/threadqenqueue.c
parentscore: Document Futex Handler (diff)
downloadrtems-9c0591f12d450401746bc0bf7cd7a0e0b14a5f3b.tar.bz2
score: Fix priority discipline handling
The priority queues in clustered scheduling configurations use a per scheduler priority queue rotation to ensure FIFO fairness across schedulers. This mechanism is implemented in the thread queue surrender operation. Unfortunately some semaphore and message queue directives used wrongly the thread queue extract operation. Fix this through the use of _Thread_queue_Surrender(). Update #4358.
Diffstat (limited to '')
-rw-r--r--cpukit/score/src/threadqenqueue.c109
1 files changed, 109 insertions, 0 deletions
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index d187e32fbc..d165e30da7 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -711,6 +711,115 @@ void _Thread_queue_Surrender(
_Thread_Dispatch_enable( cpu_self );
}
+void _Thread_queue_Surrender_no_priority(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+)
+{
+ Thread_Control *the_thread;
+ bool unblock;
+ Per_CPU_Control *cpu_self;
+
+ _Assert( heads != NULL );
+ _Assert( queue->owner == NULL );
+
+ the_thread = ( *operations->surrender )( queue, heads, NULL, queue_context );
+
+#if defined(RTEMS_MULTIPROCESSING)
+ _Thread_queue_MP_set_callout( the_thread, queue_context );
+#endif
+
+ unblock = _Thread_queue_Make_ready_again( the_thread );
+
+ cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
+
+ if ( unblock ) {
+ _Thread_Remove_timer_and_unblock( the_thread, queue );
+ }
+
+ _Thread_Dispatch_enable( cpu_self );
+}
+
+Status_Control _Thread_queue_Surrender_priority_ceiling(
+ Thread_queue_Queue *queue,
+ Thread_Control *executing,
+ Priority_Node *priority_ceiling,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+)
+{
+ ISR_lock_Context lock_context;
+ Thread_queue_Heads *heads;
+ Thread_Control *new_owner;
+ bool unblock;
+ Per_CPU_Control *cpu_self;
+
+ _Thread_Resource_count_decrement( executing );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( executing, &lock_context );
+ _Thread_Priority_remove( executing, priority_ceiling, queue_context );
+ _Thread_Wait_release_default_critical( executing, &lock_context );
+
+ heads = queue->heads;
+ queue->owner = NULL;
+
+ if ( heads == NULL ) {
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_direct( cpu_self );
+ return STATUS_SUCCESSFUL;
+ }
+
+ new_owner = ( *operations->surrender )(
+ queue,
+ heads,
+ NULL,
+ queue_context
+ );
+
+ queue->owner = new_owner;
+
+ unblock = _Thread_queue_Make_ready_again( new_owner );
+
+#if defined(RTEMS_MULTIPROCESSING)
+ if ( _Objects_Is_local_id( new_owner->Object.id ) )
+#endif
+ {
+ _Thread_Resource_count_increment( new_owner );
+ _Thread_Wait_acquire_default_critical( new_owner, &lock_context );
+ _Thread_Priority_add( new_owner, priority_ceiling, queue_context );
+ _Thread_Wait_release_default_critical( new_owner, &lock_context );
+ }
+
+ cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
+
+ _Thread_Priority_update( queue_context );
+
+ if ( unblock ) {
+ _Thread_Remove_timer_and_unblock( new_owner, queue );
+ }
+
+ _Thread_Dispatch_direct( cpu_self );
+ return STATUS_SUCCESSFUL;
+}
+
#if defined(RTEMS_SMP)
void _Thread_queue_Surrender_sticky(
Thread_queue_Queue *queue,