diff options
author | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2016-07-26 10:34:21 +0200 |
---|---|---|
committer | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2016-07-27 10:55:30 +0200 |
commit | d79df38c2bea50112214ade95776cb90d693e390 (patch) | |
tree | 77bcc6ae76dde57c449d808ef2cce318cd198b06 /cpukit/score | |
parent | score: Turn thread lock into thread wait lock (diff) | |
download | rtems-d79df38c2bea50112214ade95776cb90d693e390.tar.bz2 |
score: Add deadlock detection
The mutex objects use the owner field of the thread queues for the mutex
owner. Use this and add a deadlock detection to
_Thread_queue_Enqueue_critical() for thread queues with an owner.
Update #2412.
Update #2556.
Close #2765.
Diffstat (limited to '')
-rw-r--r-- | cpukit/score/include/rtems/score/interr.h | 3 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/thread.h | 6 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/threadq.h | 45 | ||||
-rw-r--r-- | cpukit/score/include/rtems/score/threadqimpl.h | 38 | ||||
-rw-r--r-- | cpukit/score/src/coremutexseize.c | 9 | ||||
-rw-r--r-- | cpukit/score/src/mutex.c | 4 | ||||
-rw-r--r-- | cpukit/score/src/threadqenqueue.c | 265 |
7 files changed, 353 insertions, 17 deletions
diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h index 8d4c104e28..845dc6f198 100644 --- a/cpukit/score/include/rtems/score/interr.h +++ b/cpukit/score/include/rtems/score/interr.h @@ -163,7 +163,8 @@ typedef enum { INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR, INTERNAL_ERROR_RESOURCE_IN_USE, INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL, - INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL + INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL, + INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK } Internal_errors_Core_list; typedef CPU_Uint32ptr Internal_errors_t; diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h index a52c34f7fd..d03f0c25e5 100644 --- a/cpukit/score/include/rtems/score/thread.h +++ b/cpukit/score/include/rtems/score/thread.h @@ -327,6 +327,12 @@ typedef struct { */ Chain_Control Pending_requests; } Lock; + + /** + * @brief Thread queue link provided for use by the thread wait lock owner to + * build a thread queue path. + */ + Thread_queue_Link Link; #endif /** diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h index 9a178049ea..a39a031905 100644 --- a/cpukit/score/include/rtems/score/threadq.h +++ b/cpukit/score/include/rtems/score/threadq.h @@ -49,6 +49,17 @@ typedef struct Thread_queue_Operations Thread_queue_Operations; typedef struct Thread_queue_Path Thread_queue_Path; +/** + * @brief Thread queue deadlock callout. + * + * @param the_thread The thread that detected the deadlock. + * + * @see _Thread_queue_Context_set_deadlock_callout(). + */ +typedef void ( *Thread_queue_Deadlock_callout )( + Thread_Control *the_thread +); + #if defined(RTEMS_MULTIPROCESSING) /** * @brief Multiprocessing (MP) support callout for thread queue operations. @@ -117,6 +128,17 @@ typedef struct { uint64_t timeout; /** + * @brief Invoked in case of a detected deadlock. + * + * Must be initialized for _Thread_queue_Enqueue_critical() in case the + * thread queue may have an owner, e.g. for mutex objects. + * + * @see _Thread_queue_Context_set_deadlock_callout(). + */ + Thread_queue_Deadlock_callout deadlock_callout; + +#if defined(RTEMS_MULTIPROCESSING) + /** * @brief Callout to unblock the thread in case it is actually a thread * proxy. * @@ -126,7 +148,6 @@ typedef struct { * * @see _Thread_queue_Context_set_MP_callout(). */ -#if defined(RTEMS_MULTIPROCESSING) Thread_queue_MP_callout mp_callout; #endif @@ -175,6 +196,28 @@ typedef struct { */ typedef struct { /** + * @brief Node to register this link in the global thread queue links lookup + * tree. + */ + RBTree_Node Registry_node; + + /** + * @brief The source thread queue determined by the thread queue owner. + */ + Thread_queue_Queue *source; + + /** + * @brief The target thread queue determined by the thread wait queue of the + * source owner. + */ + Thread_queue_Queue *target; + + /** + * @brief Node to add this link to a thread queue path. + */ + Chain_Node Path_node; + + /** * @brief The owner of this thread queue link. */ Thread_Control *owner; diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h index 3d4f6136f6..f0ca614ba1 100644 --- a/cpukit/score/include/rtems/score/threadqimpl.h +++ b/cpukit/score/include/rtems/score/threadqimpl.h @@ -52,6 +52,11 @@ extern "C" { struct Thread_queue_Path { #if defined(RTEMS_SMP) /** + * @brief The chain of thread queue links defining the thread queue path. + */ + Chain_Control Links; + + /** * @brief The start of a thread queue path. */ Thread_queue_Link Start; @@ -86,6 +91,16 @@ typedef struct { } Thread_queue_Syslock_queue; /** + * @brief Sets the thread wait return code to STATUS_DEADLOCK. + */ +void _Thread_queue_Deadlock_status( Thread_Control *the_thread ); + +/** + * @brief Results in an INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal error. + */ +void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread ); + +/** * @brief Initializes a thread queue context. * * @param queue_context The thread queue context to initialize. @@ -97,6 +112,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize( #if defined(RTEMS_DEBUG) memset( queue_context, 0, sizeof( *queue_context ) ); queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef; + queue_context->deadlock_callout = _Thread_queue_Deadlock_fatal; #else (void) queue_context; #endif @@ -173,6 +189,28 @@ _Thread_queue_Context_set_absolute_timeout( } /** + * @brief Sets the deadlock callout in the thread queue + * context. + * + * A deadlock callout must be provided for _Thread_queue_Enqueue_critical() + * operations that operate on thread queues which may have an owner, e.g. mutex + * objects. Available deadlock callouts are _Thread_queue_Deadlock_status() + * and _Thread_queue_Deadlock_fatal(). + * + * @param queue_context The thread queue context. + * @param deadlock_callout The deadlock callout. + * + * @see _Thread_queue_Enqueue_critical(). + */ +RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout( + Thread_queue_Context *queue_context, + Thread_queue_Deadlock_callout deadlock_callout +) +{ + queue_context->deadlock_callout = deadlock_callout; +} + +/** * @brief Sets the MP callout in the thread queue context. * * @param queue_context The thread queue context. diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c index dacb274bdf..cfefc50333 100644 --- a/cpukit/score/src/coremutexseize.c +++ b/cpukit/score/src/coremutexseize.c @@ -62,6 +62,11 @@ Status_Control _CORE_mutex_Seize_slow( _Thread_queue_Context_set_expected_level( queue_context, 2 ); #endif + _Thread_queue_Context_set_deadlock_callout( + queue_context, + _Thread_queue_Deadlock_status + ); + _Thread_queue_Enqueue_critical( &the_mutex->Wait_queue.Queue, CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS, @@ -87,6 +92,10 @@ Status_Control _CORE_mutex_Seize_no_protocol_slow( { if ( wait ) { _Thread_queue_Context_set_expected_level( queue_context, 1 ); + _Thread_queue_Context_set_deadlock_callout( + queue_context, + _Thread_queue_Deadlock_status + ); _Thread_queue_Enqueue_critical( &the_mutex->Wait_queue.Queue, operations, diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c index 12a4f292e1..085d5c2945 100644 --- a/cpukit/score/src/mutex.c +++ b/cpukit/score/src/mutex.c @@ -108,6 +108,10 @@ static void _Mutex_Acquire_slow( ) { _Thread_queue_Context_set_expected_level( queue_context, 1 ); + _Thread_queue_Context_set_deadlock_callout( + queue_context, + _Thread_queue_Deadlock_fatal + ); _Thread_queue_Enqueue_critical( &mutex->Queue.Queue, MUTEX_TQ_OPERATIONS, diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c index 19c345b6c5..a9b2c35af5 100644 --- a/cpukit/score/src/threadqenqueue.c +++ b/cpukit/score/src/threadqenqueue.c @@ -9,6 +9,8 @@ * COPYRIGHT (c) 1989-2014. * On-Line Applications Research Corporation (OAR). * + * Copyright (c) 2015, 2016 embedded brains GmbH. + * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rtems.org/license/LICENSE. @@ -34,49 +36,275 @@ #define THREAD_QUEUE_READY_AGAIN \ (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN) +#if defined(RTEMS_SMP) +/* + * A global registry of active thread queue links is used to provide deadlock + * detection on SMP configurations. This is simple to implement and no + * additional storage is required for the thread queues. The disadvantage is + * the global registry is not scalable and may lead to lock contention. + * However, the registry is only used in case of nested resource conflicts. In + * this case, the application is already in trouble. + */ + +typedef struct { + ISR_lock_Control Lock; + + RBTree_Control Links; +} Thread_queue_Links; + +static Thread_queue_Links _Thread_queue_Links = { + ISR_LOCK_INITIALIZER( "Thread Queue Links" ), + RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links ) +}; + +static bool _Thread_queue_Link_equal( + const void *left, + const RBTree_Node *right +) +{ + const Thread_queue_Queue *the_left; + const Thread_queue_Link *the_right; + + the_left = left; + the_right = (Thread_queue_Link *) right; + + return the_left == the_right->source; +} + +static bool _Thread_queue_Link_less( + const void *left, + const RBTree_Node *right +) +{ + const Thread_queue_Queue *the_left; + const Thread_queue_Link *the_right; + + the_left = left; + the_right = (Thread_queue_Link *) right; + + return (uintptr_t) the_left < (uintptr_t) the_right->source; +} + +static void *_Thread_queue_Link_map( RBTree_Node *node ) +{ + return node; +} + +static Thread_queue_Link *_Thread_queue_Link_find( + Thread_queue_Links *links, + Thread_queue_Queue *source +) +{ + return _RBTree_Find_inline( + &links->Links, + source, + _Thread_queue_Link_equal, + _Thread_queue_Link_less, + _Thread_queue_Link_map + ); +} + +static bool _Thread_queue_Link_add( + Thread_queue_Link *link, + Thread_queue_Queue *source, + Thread_queue_Queue *target +) +{ + Thread_queue_Links *links; + Thread_queue_Queue *recursive_target; + ISR_lock_Context lock_context; + + links = &_Thread_queue_Links; + recursive_target = target; + + _ISR_lock_Acquire( &links->Lock, &lock_context ); + + while ( true ) { + Thread_queue_Link *recursive_link; + + recursive_link = _Thread_queue_Link_find( links, recursive_target ); + + if ( recursive_link == NULL ) { + break; + } + + recursive_target = recursive_link->target; + + if ( recursive_target == source ) { + _ISR_lock_Release( &links->Lock, &lock_context ); + return false; + } + } + + link->source = source; + link->target = target; + _RBTree_Insert_inline( + &links->Links, + &link->Registry_node, + source, + _Thread_queue_Link_less + ); + + _ISR_lock_Release( &links->Lock, &lock_context ); + return true; +} + +static void _Thread_queue_Link_remove( Thread_queue_Link *link ) +{ + Thread_queue_Links *links; + ISR_lock_Context lock_context; + + links = &_Thread_queue_Links; + + _ISR_lock_Acquire( &links->Lock, &lock_context ); + _RBTree_Extract( &links->Links, &link->Registry_node ); + _ISR_lock_Release( &links->Lock, &lock_context ); +} +#endif + static void _Thread_queue_Path_release( Thread_queue_Path *path ) { #if defined(RTEMS_SMP) - Thread_queue_Link *link; + Chain_Node *head; + Chain_Node *node; - link = &path->Start; + head = _Chain_Head( &path->Links ); + node = _Chain_Last( &path->Links ); + + while ( head != node ) { + Thread_queue_Link *link; + + link = RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node ); + + if ( link->Queue_context.Wait.queue_lock != NULL ) { + _Thread_queue_Link_remove( link ); + } - if ( link->owner != NULL ) { _Thread_Wait_release_critical( link->owner, &link->Queue_context ); + + node = _Chain_Previous( node ); +#if defined(RTEMS_DEBUG) + _Chain_Set_off_chain( &link->Path_node ); +#endif } #else (void) path; #endif } -static void _Thread_queue_Path_acquire( +static bool _Thread_queue_Path_acquire( Thread_Control *the_thread, Thread_queue_Queue *queue, Thread_queue_Path *path ) { -#if defined(RTEMS_SMP) Thread_Control *owner; + +#if defined(RTEMS_SMP) Thread_queue_Link *link; + Thread_queue_Queue *target; + + /* + * For an overview please look at the non-SMP part below. We basically do + * the same on SMP configurations. The fact that we may have more than one + * executing thread and each thread queue has its own SMP lock makes the task + * a bit more difficult. We have to avoid deadlocks at SMP lock level, since + * this would result in an unrecoverable deadlock of the overall system. + */ + + _Chain_Initialize_empty( &path->Links ); + _Chain_Initialize_node( &path->Start.Path_node ); + _Thread_queue_Context_initialize( &path->Start.Queue_context ); owner = queue->owner; if ( owner == NULL ) { - return; + return true; + } + + if ( owner == the_thread ) { + return false; } link = &path->Start; - link->owner = owner; - _Thread_Wait_acquire_default_critical( - owner, - &link->Queue_context.Lock_context - ); + do { + _Chain_Append_unprotected( &path->Links, &link->Path_node ); + link->owner = owner; + + _Thread_Wait_acquire_default_critical( + owner, + &link->Queue_context.Lock_context + ); + + target = owner->Wait.queue; + link->Queue_context.Wait.queue = target; + link->Queue_context.Wait.operations = owner->Wait.operations; + + if ( target != NULL ) { + if ( _Thread_queue_Link_add( link, queue, target ) ) { + link->Queue_context.Wait.queue_lock = &target->Lock; + _Chain_Append_unprotected( + &owner->Wait.Lock.Pending_requests, + &link->Queue_context.Wait.Gate.Node + ); + _Thread_Wait_release_default_critical( + owner, + &link->Queue_context.Lock_context + ); + _Thread_Wait_acquire_queue_critical( + &target->Lock, + &link->Queue_context + ); + + if ( link->Queue_context.Wait.queue == NULL ) { + return true; + } + } else { + link->Queue_context.Wait.queue_lock = NULL; + _Thread_queue_Path_release( path ); + return false; + } + } else { + link->Queue_context.Wait.queue_lock = NULL; + return true; + } + + link = &owner->Wait.Link; + queue = target; + owner = queue->owner; + } while ( owner != NULL ); #else - (void) the_thread; - (void) queue; - (void) path; + do { + owner = queue->owner; + + if ( owner == NULL ) { + return true; + } + + if ( owner == the_thread ) { + return false; + } + + queue = owner->Wait.queue; + } while ( queue != NULL ); #endif + + return true; +} + +void _Thread_queue_Deadlock_status( Thread_Control *the_thread ) +{ + the_thread->Wait.return_code = STATUS_DEADLOCK; +} + +void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread ) +{ + _Terminate( + INTERNAL_ERROR_CORE, + false, + INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK + ); } void _Thread_queue_Enqueue_critical( @@ -99,8 +327,15 @@ void _Thread_queue_Enqueue_critical( _Thread_Wait_claim( the_thread, queue, operations ); - _Thread_queue_Path_acquire( the_thread, queue, &path ); + if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) { + _Thread_Wait_restore_default( the_thread ); + _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); + ( *queue_context->deadlock_callout )( the_thread ); + return; + } + ( *operations->enqueue )( queue, the_thread, &path ); + _Thread_queue_Path_release( &path ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; |