summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-25 11:11:18 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-09 15:47:11 +0100
commitd4da2510117821e64841c1081a557a4637d750a8 (patch)
treef9582abc18f5495cc20efc63c620dfba20bbec6a
parent0199fa343e74328e260beaa9dab96e8b22afd068 (diff)
validation: Test thread queue deadlock detail
-rw-r--r--testsuites/validation/tc-score-tq-smp.c348
1 files changed, 308 insertions, 40 deletions
diff --git a/testsuites/validation/tc-score-tq-smp.c b/testsuites/validation/tc-score-tq-smp.c
index 2f6218c838..c0cc36db7a 100644
--- a/testsuites/validation/tc-score-tq-smp.c
+++ b/testsuites/validation/tc-score-tq-smp.c
@@ -53,7 +53,8 @@
#endif
#include <rtems/score/smpbarrier.h>
-#include <rtems/score/threadq.h>
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
#include "tx-support.h"
@@ -81,6 +82,27 @@
*
* - Clean up all used resources.
*
+ * - Build a cyclic dependency graph using several worker threads and mutexes.
+ * Use the mutexes and the worker to construct a thread queue deadlock which
+ * is detected on one processor while it uses thread queue links inserted by
+ * another processor. The runner thread controls the test scenario via the
+ * two thread queue locks. This is an important test scenario which shows
+ * why the thread queue implementation is a bit more complicated in SMP
+ * configurations.
+ *
+ * - Let worker D wait for mutex A. Let worker C wait for mutex D. Let
+ * worker B wait for mutex C.
+ *
+ * - Let worker A attempt to obtain mutex B. Let worker A wait on the lock
+ * of mutex C. Worker A will insert two thread queue links.
+ *
+ * - Let worker E try to obtain mutex D. Worker E will add a thread queue
+ * link which is later used by worker A to detect the deadlock.
+ *
+ * - Let worker A continue the obtain sequence. It will detect a deadlock.
+ *
+ * - Clean up all used resources.
+ *
* @{
*/
@@ -89,6 +111,11 @@
*/
typedef struct {
/**
+ * @brief This member contains the runner identifier.
+ */
+ rtems_id runner_id;
+
+ /**
* @brief This member contains the worker A identifier.
*/
rtems_id worker_a_id;
@@ -99,9 +126,39 @@ typedef struct {
rtems_id worker_b_id;
/**
- * @brief This member contains the mutex identifier.
+ * @brief This member contains the worker C identifier.
+ */
+ rtems_id worker_c_id;
+
+ /**
+ * @brief This member contains the worker D identifier.
+ */
+ rtems_id worker_d_id;
+
+ /**
+ * @brief This member contains the worker E identifier.
+ */
+ rtems_id worker_e_id;
+
+ /**
+ * @brief This member contains the mutex A identifier.
+ */
+ rtems_id mutex_a_id;
+
+ /**
+ * @brief This member contains the mutex B identifier.
*/
- rtems_id mutex_id;
+ rtems_id mutex_b_id;
+
+ /**
+ * @brief This member contains the mutex C identifier.
+ */
+ rtems_id mutex_c_id;
+
+ /**
+ * @brief This member contains the mutex D identifier.
+ */
+ rtems_id mutex_d_id;
/**
* @brief This member contains the thread queue of the mutex.
@@ -114,8 +171,8 @@ typedef struct {
WrapThreadQueueContext wrap;
/**
- * @brief This member contains the barrier to synchronize the runner, worker
- * A, and worker B.
+ * @brief This member contains the barrier to synchronize the runner and the
+ * workers.
*/
SMP_barrier_Control barrier;
@@ -128,10 +185,36 @@ typedef struct {
static ScoreTqValSmp_Context
ScoreTqValSmp_Instance;
+static void ScoreTqValSmp_Setup( ScoreTqValSmp_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+}
+
+static void ScoreTqValSmp_Setup_Wrap( void *arg )
+{
+ ScoreTqValSmp_Context *ctx;
+
+ ctx = arg;
+ ScoreTqValSmp_Setup( ctx );
+}
+
+static void ScoreTqValSmp_Teardown( ScoreTqValSmp_Context *ctx )
+{
+ RestoreRunnerPriority();
+}
+
+static void ScoreTqValSmp_Teardown_Wrap( void *arg )
+{
+ ScoreTqValSmp_Context *ctx;
+
+ ctx = arg;
+ ScoreTqValSmp_Teardown( ctx );
+}
+
static T_fixture ScoreTqValSmp_Fixture = {
- .setup = NULL,
+ .setup = ScoreTqValSmp_Setup_Wrap,
.stop = NULL,
- .teardown = NULL,
+ .teardown = ScoreTqValSmp_Teardown_Wrap,
.scope = NULL,
.initial_context = &ScoreTqValSmp_Instance
};
@@ -140,29 +223,17 @@ typedef ScoreTqValSmp_Context Context;
static void Extract( void *arg )
{
- Context *ctx;
- unsigned int ticket_0;
- unsigned int ticket_1;
+ Context *ctx;
ctx = arg;
- ticket_0 = _Atomic_Load_uint(
- &ctx->thread_queue->Lock.next_ticket,
- ATOMIC_ORDER_RELAXED
- );
-
- /* B1 */
+ /* PC1 */
_SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
/*
* Ensure that worker A acquired the thread wait lock of worker B.
*/
- do {
- ticket_1 = _Atomic_Load_uint(
- &ctx->thread_queue->Lock.next_ticket,
- ATOMIC_ORDER_RELAXED
- );
- } while ( ticket_0 == ticket_1 );
+ TicketLockWaitForOthers( &ctx->thread_queue->Lock, 1 );
/*
* Continue with the thread queue extraction. The thread wait lock of
@@ -175,7 +246,7 @@ static void Extract( void *arg )
*/
}
-static void WorkerA( rtems_task_argument arg )
+static void PriorityChangeWorkerA( rtems_task_argument arg )
{
Context *ctx;
SMP_barrier_State state;
@@ -183,30 +254,108 @@ static void WorkerA( rtems_task_argument arg )
ctx = (Context *) arg;
_SMP_barrier_State_initialize( &state );
- ObtainMutex( ctx->mutex_id );
+ ObtainMutex( ctx->mutex_a_id );
- /* B0 */
+ /* PC0 */
_SMP_barrier_Wait( &ctx->barrier, &state, 2 );
- /* B1 */
+ /* PC1 */
_SMP_barrier_Wait( &ctx->barrier, &state, 2 );
SetPriority( ctx->worker_b_id, PRIO_VERY_HIGH );
- ReleaseMutex( ctx->mutex_id );
+ ReleaseMutex( ctx->mutex_a_id );
+
+ /* PC2 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ (void) ReceiveAnyEvents();
+}
+
+static void PriorityChangeWorkerB( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_a_id );
+}
+
+static void DeadlockWorkerA( rtems_task_argument arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = (Context *) arg;
+ _SMP_barrier_State_initialize( &state );
+
+ ObtainMutex( ctx->mutex_a_id );
- /* B2 */
+ /* D0 */
_SMP_barrier_Wait( &ctx->barrier, &state, 2 );
- SuspendSelf();
+ /* D1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ ObtainMutexDeadlock( ctx->mutex_b_id );
+
+ ReleaseMutex( ctx->mutex_a_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_0 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerB( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_b_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_5 );
+ ObtainMutex( ctx->mutex_c_id );
+ ReleaseMutex( ctx->mutex_c_id );
+ ReleaseMutex( ctx->mutex_b_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_1 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerC( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_c_id );
+ ObtainMutex( ctx->mutex_d_id );
+ ReleaseMutex( ctx->mutex_d_id );
+ ReleaseMutex( ctx->mutex_c_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_2 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerD( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_d_id );
+ ObtainMutex( ctx->mutex_a_id );
+ ReleaseMutex( ctx->mutex_a_id );
+ ReleaseMutex( ctx->mutex_d_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_3 );
+ (void) ReceiveAnyEvents();
}
-static void WorkerB( rtems_task_argument arg )
+static void DeadlockWorkerE( rtems_task_argument arg )
{
Context *ctx;
ctx = (Context *) arg;
- ObtainMutex( ctx->mutex_id );
+ ObtainMutex( ctx->mutex_d_id );
+ ReleaseMutex( ctx->mutex_d_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_4 );
+ (void) ReceiveAnyEvents();
}
/**
@@ -215,22 +364,21 @@ static void WorkerB( rtems_task_argument arg )
*/
static void ScoreTqValSmp_Action_0( ScoreTqValSmp_Context *ctx )
{
- SetSelfPriority( PRIO_NORMAL );
_SMP_barrier_Control_initialize( &ctx->barrier );
_SMP_barrier_State_initialize( &ctx->barrier_state );
WrapThreadQueueInitialize( &ctx->wrap, Extract, ctx );
- ctx->mutex_id = CreateMutex();
- ctx->thread_queue = GetMutexThreadQueue( ctx->mutex_id );
+ ctx->mutex_a_id = CreateMutex();
+ ctx->thread_queue = GetMutexThreadQueue( ctx->mutex_a_id );
/*
* Create and start worker A on a second processor. Let it obtain the mutex.
*/
ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
- StartTask( ctx->worker_a_id, WorkerA, ctx );
+ StartTask( ctx->worker_a_id, PriorityChangeWorkerA, ctx );
- /* B0 */
+ /* PC0 */
_SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
/*
@@ -240,20 +388,139 @@ static void ScoreTqValSmp_Action_0( ScoreTqValSmp_Context *ctx )
* carried out by worker A.
*/
ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
- StartTask( ctx->worker_b_id, WorkerB, ctx );
+ StartTask( ctx->worker_b_id, PriorityChangeWorkerB, ctx );
WrapThreadQueueExtractDirect( &ctx->wrap, GetThread( ctx->worker_b_id ) );
DeleteTask( ctx->worker_b_id );
/*
* Clean up all used resources.
*/
- /* B2 */
+ /* PC2 */
_SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
+ WaitForExecutionStop( ctx->worker_a_id );
DeleteTask( ctx->worker_a_id );
- DeleteMutex( ctx->mutex_id );
+ DeleteMutex( ctx->mutex_a_id );
WrapThreadQueueDestroy( &ctx->wrap );
- RestoreRunnerPriority();
+}
+
+/**
+ * @brief Build a cyclic dependency graph using several worker threads and
+ * mutexes. Use the mutexes and the worker to construct a thread queue
+ * deadlock which is detected on one processor while it uses thread queue
+ * links inserted by another processor. The runner thread controls the test
+ * scenario via the two thread queue locks. This is an important test
+ * scenario which shows why the thread queue implementation is a bit more
+ * complicated in SMP configurations.
+ */
+static void ScoreTqValSmp_Action_1( ScoreTqValSmp_Context *ctx )
+{
+ Thread_queue_Queue *queue_b;
+ Thread_queue_Queue *queue_c;
+ ISR_lock_Context lock_context;
+ SMP_barrier_State state;
+
+ if ( rtems_scheduler_get_processor_maximum() <= 2 ) {
+ /*
+ * We can only run this validation test on systems with three or more
+ * processors. The sequence under test can happen on systems with only two
+ * processors, however, we need a third processor to control the other two
+ * processors via ISR locks to get a deterministic test scenario.
+ */
+ return;
+ }
+
+ ctx->runner_id = rtems_task_self();
+
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+
+ ctx->mutex_a_id = CreateMutexNoProtocol();
+ ctx->mutex_b_id = CreateMutexNoProtocol();
+ ctx->mutex_c_id = CreateMutexNoProtocol();
+ ctx->mutex_d_id = CreateMutexNoProtocol();
+
+ queue_b = GetMutexThreadQueue( ctx->mutex_b_id );
+ queue_c = GetMutexThreadQueue( ctx->mutex_c_id );
+
+ ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
+ ctx->worker_b_id = CreateTask( "WRKB", PRIO_NORMAL );
+ ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
+ ctx->worker_d_id = CreateTask( "WRKD", PRIO_NORMAL );
+ ctx->worker_e_id = CreateTask( "WRKE", PRIO_NORMAL );
+
+ SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
+ SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_c_id, SCHEDULER_B_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_d_id, SCHEDULER_B_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_e_id, SCHEDULER_C_ID, PRIO_NORMAL );
+
+ /*
+ * Let worker D wait for mutex A. Let worker C wait for mutex D. Let worker
+ * B wait for mutex C.
+ */
+ StartTask( ctx->worker_a_id, DeadlockWorkerA, ctx );
+
+ /* D0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ StartTask( ctx->worker_d_id, DeadlockWorkerD, ctx );
+ StartTask( ctx->worker_c_id, DeadlockWorkerC, ctx );
+ StartTask( ctx->worker_b_id, DeadlockWorkerB, ctx );
+ ReceiveAllEvents( RTEMS_EVENT_5 );
+ WaitForExecutionStop( ctx->worker_b_id );
+
+ /*
+ * Let worker A attempt to obtain mutex B. Let worker A wait on the lock of
+ * mutex C. Worker A will insert two thread queue links.
+ */
+ _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Queue_acquire_critical(
+ queue_c,
+ &_Thread_Executing->Potpourri_stats,
+ &lock_context
+ );
+ _ISR_lock_ISR_enable( &lock_context );
+
+ /* D1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ TicketLockWaitForOthers( &queue_c->Lock, 1 );
+
+ /*
+ * Let worker E try to obtain mutex D. Worker E will add a thread queue link
+ * which is later used by worker A to detect the deadlock.
+ */
+ StartTask( ctx->worker_e_id, DeadlockWorkerE, ctx );
+ TicketLockWaitForOthers( &queue_b->Lock, 1 );
+
+ /*
+ * Let worker A continue the obtain sequence. It will detect a deadlock.
+ */
+ _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Queue_release( queue_c, &lock_context );
+
+ /*
+ * Clean up all used resources.
+ */
+ ReceiveAllEvents(
+ RTEMS_EVENT_0 | RTEMS_EVENT_1 | RTEMS_EVENT_2 | RTEMS_EVENT_3 |
+ RTEMS_EVENT_4
+ );
+ WaitForExecutionStop( ctx->worker_a_id );
+ WaitForExecutionStop( ctx->worker_b_id );
+ WaitForExecutionStop( ctx->worker_c_id );
+ WaitForExecutionStop( ctx->worker_d_id );
+ WaitForExecutionStop( ctx->worker_e_id );
+ DeleteTask( ctx->worker_a_id );
+ DeleteTask( ctx->worker_b_id );
+ DeleteTask( ctx->worker_c_id );
+ DeleteTask( ctx->worker_d_id );
+ DeleteTask( ctx->worker_e_id );
+ DeleteMutex( ctx->mutex_a_id );
+ DeleteMutex( ctx->mutex_b_id );
+ DeleteMutex( ctx->mutex_c_id );
+ DeleteMutex( ctx->mutex_d_id );
}
/**
@@ -266,6 +533,7 @@ T_TEST_CASE_FIXTURE( ScoreTqValSmp, &ScoreTqValSmp_Fixture )
ctx = T_fixture_context();
ScoreTqValSmp_Action_0( ctx );
+ ScoreTqValSmp_Action_1( ctx );
}
/** @} */