summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threadqenqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/src/threadqenqueue.c')
-rw-r--r--cpukit/score/src/threadqenqueue.c353
1 files changed, 242 insertions, 111 deletions
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 5e228b7190..038c483f65 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -7,21 +9,38 @@
* _Thread_queue_Deadlock_fatal(), _Thread_queue_Deadlock_status(),
* _Thread_queue_Do_dequeue(), _Thread_queue_Enqueue(),
* _Thread_queue_Enqueue_do_nothing_extra(), _Thread_queue_Enqueue_sticky(),
- * _Thread_queue_Extract(), _Thread_queue_Extract_critical(),
- * _Thread_queue_Extract_locked(), _Thread_queue_Path_acquire_critical(),
- * _Thread_queue_Path_release_critical(), _Thread_queue_Surrender(),
- * _Thread_queue_Surrender_sticky(), and _Thread_queue_Unblock_critical().
+ * _Thread_queue_Extract_locked(), _Thread_queue_Path_acquire(),
+ * _Thread_queue_Path_release(),
+ * _Thread_queue_Resume(),_Thread_queue_Surrender(),
+ * _Thread_queue_Surrender_no_priority(), _Thread_queue_Surrender_sticky().
*/
/*
* COPYRIGHT (c) 1989-2014.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2015, 2016 embedded brains GmbH.
+ * Copyright (C) 2015, 2016 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -41,9 +60,6 @@
#define THREAD_QUEUE_BLOCKED \
(THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
-#define THREAD_QUEUE_READY_AGAIN \
- (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
-
#if defined(RTEMS_SMP)
/*
* A global registry of active thread queue links is used to provide deadlock
@@ -112,7 +128,7 @@ static Thread_queue_Link *_Thread_queue_Link_find(
);
}
-static bool _Thread_queue_Link_add(
+static Thread_queue_Deadlock_status _Thread_queue_Link_add(
Thread_queue_Link *link,
Thread_queue_Queue *source,
Thread_queue_Queue *target
@@ -143,7 +159,7 @@ static bool _Thread_queue_Link_add(
if ( recursive_target == source ) {
_ISR_lock_Release( &links->Lock, &lock_context );
- return false;
+ return THREAD_QUEUE_DEADLOCK_DETECTED;
}
}
@@ -155,7 +171,7 @@ static bool _Thread_queue_Link_add(
);
_ISR_lock_Release( &links->Lock, &lock_context );
- return true;
+ return THREAD_QUEUE_NO_DEADLOCK;
}
static void _Thread_queue_Link_remove( Thread_queue_Link *link )
@@ -174,9 +190,7 @@ static void _Thread_queue_Link_remove( Thread_queue_Link *link )
#if !defined(RTEMS_SMP)
static
#endif
-void _Thread_queue_Path_release_critical(
- Thread_queue_Context *queue_context
-)
+void _Thread_queue_Path_release( Thread_queue_Context *queue_context )
{
#if defined(RTEMS_SMP)
Chain_Node *head;
@@ -259,7 +273,7 @@ static void _Thread_queue_Path_append_deadlock_thread(
#if !defined(RTEMS_SMP)
static
#endif
-bool _Thread_queue_Path_acquire_critical(
+Thread_queue_Deadlock_status _Thread_queue_Path_acquire(
Thread_queue_Queue *queue,
Thread_Control *the_thread,
Thread_queue_Context *queue_context
@@ -271,11 +285,12 @@ bool _Thread_queue_Path_acquire_critical(
Thread_queue_Queue *target;
/*
- * For an overview please look at the non-SMP part below. We basically do
- * the same on SMP configurations. The fact that we may have more than one
- * executing thread and each thread queue has its own SMP lock makes the task
- * a bit more difficult. We have to avoid deadlocks at SMP lock level, since
- * this would result in an unrecoverable deadlock of the overall system.
+ * For an overview please look at the non-SMP part below. In SMP
+ * configurations, we basically do the same. The fact that we may have more
+ * than one executing thread and each thread queue has its own SMP lock makes
+ * the procedure a bit more difficult. We have to avoid deadlocks at SMP
+ * lock level, since this would result in an unrecoverable deadlock of the
+ * overall system.
*/
_Chain_Initialize_empty( &queue_context->Path.Links );
@@ -283,11 +298,11 @@ bool _Thread_queue_Path_acquire_critical(
owner = queue->owner;
if ( owner == NULL ) {
- return true;
+ return THREAD_QUEUE_NO_DEADLOCK;
}
if ( owner == the_thread ) {
- return false;
+ return THREAD_QUEUE_DEADLOCK_DETECTED;
}
_Chain_Initialize_node(
@@ -310,7 +325,11 @@ bool _Thread_queue_Path_acquire_critical(
link->Lock_context.Wait.queue = target;
if ( target != NULL ) {
- if ( _Thread_queue_Link_add( link, queue, target ) ) {
+ Thread_queue_Deadlock_status deadlock_status;
+
+ deadlock_status = _Thread_queue_Link_add( link, queue, target );
+
+ if ( deadlock_status == THREAD_QUEUE_NO_DEADLOCK ) {
_Thread_queue_Gate_add(
&owner->Wait.Lock.Pending_requests,
&link->Lock_context.Wait.Gate
@@ -330,15 +349,15 @@ bool _Thread_queue_Path_acquire_critical(
);
_Thread_Wait_remove_request_locked( owner, &link->Lock_context );
_Assert( owner->Wait.queue == NULL );
- return true;
+ return THREAD_QUEUE_NO_DEADLOCK;
}
} else {
link->Lock_context.Wait.queue = NULL;
_Thread_queue_Path_append_deadlock_thread( owner, queue_context );
- return false;
+ return THREAD_QUEUE_DEADLOCK_DETECTED;
}
} else {
- return true;
+ return THREAD_QUEUE_NO_DEADLOCK;
}
link = &owner->Wait.Link;
@@ -350,18 +369,18 @@ bool _Thread_queue_Path_acquire_critical(
owner = queue->owner;
if ( owner == NULL ) {
- return true;
+ return THREAD_QUEUE_NO_DEADLOCK;
}
if ( owner == the_thread ) {
- return false;
+ return THREAD_QUEUE_DEADLOCK_DETECTED;
}
queue = owner->Wait.queue;
} while ( queue != NULL );
#endif
- return true;
+ return THREAD_QUEUE_NO_DEADLOCK;
}
void _Thread_queue_Enqueue_do_nothing_extra(
@@ -381,6 +400,7 @@ void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
{
+ (void) the_thread;
_Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
}
@@ -391,8 +411,9 @@ void _Thread_queue_Enqueue(
Thread_queue_Context *queue_context
)
{
- Per_CPU_Control *cpu_self;
- bool success;
+ Thread_queue_Deadlock_status deadlock_status;
+ Per_CPU_Control *cpu_self;
+ bool success;
_Assert( queue_context->enqueue_callout != NULL );
@@ -404,8 +425,11 @@ void _Thread_queue_Enqueue(
_Thread_Wait_claim( the_thread, queue );
- if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
- _Thread_queue_Path_release_critical( queue_context );
+ deadlock_status =
+ _Thread_queue_Path_acquire( queue, the_thread, queue_context );
+
+ if ( deadlock_status == THREAD_QUEUE_DEADLOCK_DETECTED ) {
+ _Thread_queue_Path_release( queue_context );
_Thread_Wait_restore_default( the_thread );
_Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
_Thread_Wait_tranquilize( the_thread );
@@ -418,7 +442,7 @@ void _Thread_queue_Enqueue(
_Thread_Wait_claim_finalize( the_thread, operations );
( *operations->enqueue )( queue, the_thread, queue_context );
- _Thread_queue_Path_release_critical( queue_context );
+ _Thread_queue_Path_release( queue_context );
the_thread->Wait.return_code = STATUS_SUCCESSFUL;
_Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
@@ -467,14 +491,18 @@ Status_Control _Thread_queue_Enqueue_sticky(
Thread_queue_Context *queue_context
)
{
- Per_CPU_Control *cpu_self;
+ Thread_queue_Deadlock_status deadlock_status;
+ Per_CPU_Control *cpu_self;
_Assert( queue_context->enqueue_callout != NULL );
_Thread_Wait_claim( the_thread, queue );
- if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
- _Thread_queue_Path_release_critical( queue_context );
+ deadlock_status =
+ _Thread_queue_Path_acquire( queue, the_thread, queue_context );
+
+ if ( deadlock_status == THREAD_QUEUE_DEADLOCK_DETECTED ) {
+ _Thread_queue_Path_release( queue_context );
_Thread_Wait_restore_default( the_thread );
_Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
_Thread_Wait_tranquilize( the_thread );
@@ -486,7 +514,7 @@ Status_Control _Thread_queue_Enqueue_sticky(
_Thread_Wait_claim_finalize( the_thread, operations );
( *operations->enqueue )( queue, the_thread, queue_context );
- _Thread_queue_Path_release_critical( queue_context );
+ _Thread_queue_Path_release( queue_context );
the_thread->Wait.return_code = STATUS_SUCCESSFUL;
_Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
@@ -507,7 +535,7 @@ Status_Control _Thread_queue_Enqueue_sticky(
);
_Thread_Priority_update( queue_context );
- _Thread_Priority_and_sticky_update( the_thread, 1 );
+ _Thread_Priority_update_and_make_sticky( the_thread );
_Thread_Dispatch_enable( cpu_self );
while (
@@ -523,7 +551,7 @@ Status_Control _Thread_queue_Enqueue_sticky(
#endif
#if defined(RTEMS_MULTIPROCESSING)
-static bool _Thread_queue_MP_set_callout(
+bool _Thread_queue_MP_set_callout(
Thread_Control *the_thread,
const Thread_queue_Context *queue_context
)
@@ -543,6 +571,16 @@ static bool _Thread_queue_MP_set_callout(
}
#endif
+static void _Thread_queue_Force_ready_again( Thread_Control *the_thread )
+{
+ /*
+ * We must set the wait flags under protection of the current thread lock,
+ * otherwise a _Thread_Timeout() running on another processor may interfere.
+ */
+ _Thread_Wait_flags_set( the_thread, THREAD_WAIT_STATE_READY );
+ _Thread_Wait_restore_default( the_thread );
+}
+
static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
{
bool success;
@@ -555,13 +593,13 @@ static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
success = _Thread_Wait_flags_try_change_release(
the_thread,
THREAD_QUEUE_INTEND_TO_BLOCK,
- THREAD_QUEUE_READY_AGAIN
+ THREAD_WAIT_STATE_READY
);
if ( success ) {
unblock = false;
} else {
_Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
- _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
+ _Thread_Wait_flags_set( the_thread, THREAD_WAIT_STATE_READY );
unblock = true;
}
@@ -569,6 +607,45 @@ static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
return unblock;
}
+/*
+ * This function is used instead of _Thread_queue_Make_ready_again() in
+ * _Thread_queue_Surrender() and _Thread_queue_Surrender_priority_ceiling()
+ * since only the previous owner thread is allowed to surrender the thread
+ * queue.
+ *
+ * In uniprocessor configurations, there is only one executing thread (in this
+ * case the previous owner), so the new owner thread must be fully blocked.
+ *
+ * In SMP configurations, the new owner may execute on another processor in
+ * parallel, so we have to use _Thread_queue_Make_ready_again().
+ */
+static bool _Thread_queue_Make_new_owner_ready_again( Thread_Control *new_owner )
+{
+#if defined(RTEMS_SMP)
+ return _Thread_queue_Make_ready_again( new_owner );
+#else
+ _Assert( _Thread_Wait_flags_get( new_owner ) == THREAD_QUEUE_BLOCKED );
+ _Thread_queue_Force_ready_again( new_owner );
+ return false;
+#endif
+}
+
+static void _Thread_queue_Unblock_new_owner_and_remove_timer(
+ Thread_queue_Queue *queue,
+ Thread_Control *new_owner,
+ bool unblock
+)
+{
+#if defined(RTEMS_SMP)
+ if ( unblock ) {
+ _Thread_Remove_timer_and_unblock( new_owner, queue );
+ }
+#else
+ (void) unblock;
+ _Thread_Remove_timer_and_unblock( new_owner, queue );
+#endif
+}
+
bool _Thread_queue_Extract_locked(
Thread_queue_Queue *queue,
const Thread_queue_Operations *operations,
@@ -583,119 +660,161 @@ bool _Thread_queue_Extract_locked(
return _Thread_queue_Make_ready_again( the_thread );
}
-void _Thread_queue_Unblock_critical(
- bool unblock,
- Thread_queue_Queue *queue,
- Thread_Control *the_thread,
- ISR_lock_Context *lock_context
+void _Thread_queue_Resume(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
)
{
+ bool unblock;
+
+ unblock = _Thread_queue_Make_ready_again( the_thread );
+
if ( unblock ) {
Per_CPU_Control *cpu_self;
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
- _Thread_queue_Queue_release( queue, lock_context );
+ cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+ _Thread_queue_Queue_release(
+ queue, &queue_context->Lock_context.Lock_context
+ );
_Thread_Remove_timer_and_unblock( the_thread, queue );
_Thread_Dispatch_enable( cpu_self );
} else {
- _Thread_queue_Queue_release( queue, lock_context );
+ _Thread_queue_Queue_release(
+ queue, &queue_context->Lock_context.Lock_context
+ );
}
}
-void _Thread_queue_Extract_critical(
+void _Thread_queue_Surrender(
Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations,
- Thread_Control *the_thread,
- Thread_queue_Context *queue_context
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
)
{
- bool unblock;
+ Thread_Control *new_owner;
+ bool unblock;
+ Per_CPU_Control *cpu_self;
+
+ _Assert( heads != NULL );
- unblock = _Thread_queue_Extract_locked(
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ new_owner = ( *operations->surrender )(
queue,
- operations,
- the_thread,
+ heads,
+ previous_owner,
queue_context
);
+ queue->owner = new_owner;
- _Thread_queue_Unblock_critical(
- unblock,
+#if defined(RTEMS_MULTIPROCESSING)
+ if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
+#endif
+ {
+ _Thread_Resource_count_increment( new_owner );
+ }
+
+ unblock = _Thread_queue_Make_new_owner_ready_again( new_owner );
+
+ cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+ _Thread_queue_Queue_release(
queue,
- the_thread,
&queue_context->Lock_context.Lock_context
);
+
+ _Thread_Priority_update( queue_context );
+ _Thread_queue_Unblock_new_owner_and_remove_timer(
+ queue,
+ new_owner,
+ unblock
+ );
+
+ _Thread_Dispatch_enable( cpu_self );
}
-void _Thread_queue_Extract( Thread_Control *the_thread )
+void _Thread_queue_Surrender_no_priority(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+)
{
- Thread_queue_Context queue_context;
- Thread_queue_Queue *queue;
+ Thread_Control *the_thread;
- _Thread_queue_Context_initialize( &queue_context );
- _Thread_queue_Context_clear_priority_updates( &queue_context );
- _Thread_Wait_acquire( the_thread, &queue_context );
+ _Assert( heads != NULL );
+ _Assert( queue->owner == NULL );
- queue = the_thread->Wait.queue;
+ the_thread = ( *operations->surrender )( queue, heads, NULL, queue_context );
- if ( queue != NULL ) {
- bool unblock;
+#if defined(RTEMS_MULTIPROCESSING)
+ _Thread_queue_MP_set_callout( the_thread, queue_context );
+#endif
- _Thread_Wait_remove_request( the_thread, &queue_context.Lock_context );
- _Thread_queue_Context_set_MP_callout(
- &queue_context,
- _Thread_queue_MP_callout_do_nothing
- );
- unblock = _Thread_queue_Extract_locked(
- queue,
- the_thread->Wait.operations,
- the_thread,
- &queue_context
- );
- _Thread_queue_Unblock_critical(
- unblock,
- queue,
- the_thread,
- &queue_context.Lock_context.Lock_context
- );
- } else {
- _Thread_Wait_release( the_thread, &queue_context );
- }
+ _Thread_queue_Resume( queue, the_thread, queue_context );
}
-void _Thread_queue_Surrender(
+Status_Control _Thread_queue_Surrender_priority_ceiling(
Thread_queue_Queue *queue,
- Thread_queue_Heads *heads,
- Thread_Control *previous_owner,
+ Thread_Control *executing,
+ Priority_Node *priority_ceiling,
Thread_queue_Context *queue_context,
const Thread_queue_Operations *operations
)
{
- Thread_Control *new_owner;
- bool unblock;
- Per_CPU_Control *cpu_self;
+ ISR_lock_Context lock_context;
+ Thread_queue_Heads *heads;
+ Thread_Control *new_owner;
+ bool unblock;
+ Per_CPU_Control *cpu_self;
- _Assert( heads != NULL );
+ _Thread_Resource_count_decrement( executing );
_Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_acquire_default_critical( executing, &lock_context );
+ _Thread_Priority_remove( executing, priority_ceiling, queue_context );
+ _Thread_Wait_release_default_critical( executing, &lock_context );
+
+ heads = queue->heads;
+ queue->owner = NULL;
+
+ if ( heads == NULL ) {
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Priority_update( queue_context );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+ }
+
new_owner = ( *operations->surrender )(
queue,
heads,
- previous_owner,
+ NULL,
queue_context
);
+
queue->owner = new_owner;
+ unblock = _Thread_queue_Make_new_owner_ready_again( new_owner );
+
#if defined(RTEMS_MULTIPROCESSING)
- if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
+ if ( _Objects_Is_local_id( new_owner->Object.id ) )
#endif
{
_Thread_Resource_count_increment( new_owner );
+ _Thread_Wait_acquire_default_critical( new_owner, &lock_context );
+ _Thread_Priority_add( new_owner, priority_ceiling, queue_context );
+ _Thread_Wait_release_default_critical( new_owner, &lock_context );
}
- unblock = _Thread_queue_Make_ready_again( new_owner );
-
cpu_self = _Thread_queue_Dispatch_disable( queue_context );
_Thread_queue_Queue_release(
queue,
@@ -703,12 +822,14 @@ void _Thread_queue_Surrender(
);
_Thread_Priority_update( queue_context );
-
- if ( unblock ) {
- _Thread_Remove_timer_and_unblock( new_owner, queue );
- }
+ _Thread_queue_Unblock_new_owner_and_remove_timer(
+ queue,
+ new_owner,
+ unblock
+ );
_Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
}
#if defined(RTEMS_SMP)
@@ -733,15 +854,25 @@ void _Thread_queue_Surrender_sticky(
queue_context
);
queue->owner = new_owner;
- _Thread_queue_Make_ready_again( new_owner );
+
+ /*
+ * There is no need to unblock the thread, since in the corresponding
+ * _Thread_queue_Enqueue_sticky() the thread is not blocked by the scheduler.
+ * Instead, the thread busy waits for a change of its thread wait flags.
+ * Timeouts cannot interfere since we hold the thread queue lock.
+ */
+ _Assert(
+ _Thread_Wait_flags_get( new_owner ) == THREAD_QUEUE_INTEND_TO_BLOCK
+ );
+ _Thread_queue_Force_ready_again( new_owner );
cpu_self = _Thread_queue_Dispatch_disable( queue_context );
_Thread_queue_Queue_release(
queue,
&queue_context->Lock_context.Lock_context
);
- _Thread_Priority_and_sticky_update( previous_owner, -1 );
- _Thread_Priority_and_sticky_update( new_owner, 0 );
+ _Thread_Priority_update_and_clean_sticky( previous_owner );
+ _Thread_Priority_update_ignore_sticky( new_owner );
_Thread_Dispatch_enable( cpu_self );
}
#endif