summaryrefslogtreecommitdiffstats
path: root/testsuites/validation
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-09 16:02:20 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-03-24 11:10:48 +0100
commit7db4c14b9eb015d248f43589a904dc474764299d (patch)
tree70df244b3a373cad574771bc5c4b4feeb254c952 /testsuites/validation
parentvalidation: Add system termination test suites (diff)
downloadrtems-7db4c14b9eb015d248f43589a904dc474764299d.tar.bz2
validation: Test thread queues
The test source code is generated from specification items by the "./spec2modules.py" script contained in the git://git.rtems.org/rtems-central.git Git repository. Please read the "How-To" section in the "Software Requirements Engineering" chapter of the RTEMS Software Engineering manual to get more information about the process. Update #3716.
Diffstat (limited to 'testsuites/validation')
-rw-r--r--testsuites/validation/tc-score-tq-smp.c571
-rw-r--r--testsuites/validation/tr-tq-enqueue-ceiling.c686
-rw-r--r--testsuites/validation/tr-tq-enqueue-ceiling.h109
-rw-r--r--testsuites/validation/tr-tq-enqueue-deadlock.c442
-rw-r--r--testsuites/validation/tr-tq-enqueue-deadlock.h97
-rw-r--r--testsuites/validation/tr-tq-enqueue-fifo.c334
-rw-r--r--testsuites/validation/tr-tq-enqueue-fifo.h91
-rw-r--r--testsuites/validation/tr-tq-enqueue-mrsp.c652
-rw-r--r--testsuites/validation/tr-tq-enqueue-mrsp.h109
-rw-r--r--testsuites/validation/tr-tq-enqueue-priority-inherit.c1736
-rw-r--r--testsuites/validation/tr-tq-enqueue-priority-inherit.h159
-rw-r--r--testsuites/validation/tr-tq-enqueue-priority.c746
-rw-r--r--testsuites/validation/tr-tq-enqueue-priority.h113
-rw-r--r--testsuites/validation/tr-tq-flush-fifo.c381
-rw-r--r--testsuites/validation/tr-tq-flush-fifo.h91
-rw-r--r--testsuites/validation/tr-tq-flush-priority-inherit.c571
-rw-r--r--testsuites/validation/tr-tq-flush-priority-inherit.h103
-rw-r--r--testsuites/validation/tr-tq-flush-priority.c419
-rw-r--r--testsuites/validation/tr-tq-flush-priority.h97
-rw-r--r--testsuites/validation/tr-tq-surrender-mrsp.c1036
-rw-r--r--testsuites/validation/tr-tq-surrender-mrsp.h148
-rw-r--r--testsuites/validation/tr-tq-surrender-priority-inherit.c2516
-rw-r--r--testsuites/validation/tr-tq-surrender-priority-inherit.h169
-rw-r--r--testsuites/validation/tr-tq-surrender.c684
-rw-r--r--testsuites/validation/tr-tq-surrender.h109
-rw-r--r--testsuites/validation/tr-tq-timeout-mrsp.c476
-rw-r--r--testsuites/validation/tr-tq-timeout-mrsp.h102
-rw-r--r--testsuites/validation/tr-tq-timeout-priority-inherit.c2154
-rw-r--r--testsuites/validation/tr-tq-timeout-priority-inherit.h160
-rw-r--r--testsuites/validation/tr-tq-timeout.c453
-rw-r--r--testsuites/validation/tr-tq-timeout.h98
31 files changed, 15612 insertions, 0 deletions
diff --git a/testsuites/validation/tc-score-tq-smp.c b/testsuites/validation/tc-score-tq-smp.c
new file mode 100644
index 0000000000..4eff2de3c6
--- /dev/null
+++ b/testsuites/validation/tc-score-tq-smp.c
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqValSmp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpbarrier.h>
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqValSmp spec:/score/tq/val/smp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @brief Tests SMP-specific thread queue behaviour.
+ *
+ * This test case performs the following actions:
+ *
+ * - Create two or three worker threads and a mutex. Use the mutex and the
+ * worker to do a thread priority change in parallel with a thread queue
+ * extraction.
+ *
+ * - Create a mutex and let the runner obtain it.
+ *
+ * - Create and start worker A on a second processor. mutex. Let it wait on
+ * the barrier.
+ *
+ * - If there are more than two processors, then create and start also worker
+ * C. Let it wait on the barrier.
+ *
+ * - Create and start worker B. Let it try to obtain the mutex which is
+ * owned by the runner. Delete worker B to extract it from the thread
+ * queue. Wrap the thread queue extract operation to do a parallel thread
+ * priority change carried out by worker A (and maybe C).
+ *
+ * - Clean up all used resources.
+ *
+ * - Build a cyclic dependency graph using several worker threads and mutexes.
+ * Use the mutexes and the worker to construct a thread queue deadlock which
+ * is detected on one processor while it uses thread queue links inserted by
+ * another processor. The runner thread controls the test scenario via the
+ * two thread queue locks. This is an important test scenario which shows
+ * why the thread queue implementation is a bit more complicated in SMP
+ * configurations.
+ *
+ * - Let worker D wait for mutex A. Let worker C wait for mutex D. Let
+ * worker B wait for mutex C.
+ *
+ * - Let worker A attempt to obtain mutex B. Let worker A wait on the lock
+ * of mutex C. Worker A will insert two thread queue links.
+ *
+ * - Let worker E try to obtain mutex D. Worker E will add a thread queue
+ * link which is later used by worker A to detect the deadlock.
+ *
+ * - Let worker A continue the obtain sequence. It will detect a deadlock.
+ *
+ * - Clean up all used resources.
+ *
+ * @{
+ */
+
+/**
+ * @brief Test context for spec:/score/tq/val/smp test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the runner identifier.
+ */
+ rtems_id runner_id;
+
+ /**
+ * @brief This member contains the worker A identifier.
+ */
+ rtems_id worker_a_id;
+
+ /**
+ * @brief This member contains the worker B identifier.
+ */
+ rtems_id worker_b_id;
+
+ /**
+ * @brief This member contains the worker C identifier.
+ */
+ rtems_id worker_c_id;
+
+ /**
+ * @brief This member contains the worker D identifier.
+ */
+ rtems_id worker_d_id;
+
+ /**
+ * @brief This member contains the worker E identifier.
+ */
+ rtems_id worker_e_id;
+
+ /**
+ * @brief This member contains the mutex A identifier.
+ */
+ rtems_id mutex_a_id;
+
+ /**
+ * @brief This member contains the mutex B identifier.
+ */
+ rtems_id mutex_b_id;
+
+ /**
+ * @brief This member contains the mutex C identifier.
+ */
+ rtems_id mutex_c_id;
+
+ /**
+ * @brief This member contains the mutex D identifier.
+ */
+ rtems_id mutex_d_id;
+
+ /**
+ * @brief This member contains the count of processors used by the test.
+ */
+ uint32_t used_cpus;
+
+ /**
+ * @brief This member contains the thread queue of the mutex.
+ */
+ Thread_queue_Queue *thread_queue;
+
+ /**
+ * @brief This member contains the context to wrap the thread queue extract.
+ */
+ WrapThreadQueueContext wrap;
+
+ /**
+ * @brief This member contains the barrier to synchronize the runner and the
+ * workers.
+ */
+ SMP_barrier_Control barrier;
+
+ /**
+ * @brief This member contains the barrier state for the runner processor.
+ */
+ SMP_barrier_State barrier_state;
+} ScoreTqValSmp_Context;
+
+static ScoreTqValSmp_Context
+ ScoreTqValSmp_Instance;
+
+typedef ScoreTqValSmp_Context Context;
+
+static void Extract( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ /* PC0 */
+ _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, ctx->used_cpus );
+
+ /*
+ * Ensure that worker A (and maybe C) acquired the thread wait lock of
+ * worker B.
+ */
+ TicketLockWaitForOthers( &ctx->thread_queue->Lock, ctx->used_cpus - 1 );
+
+ /*
+ * Continue with the thread queue extraction. The thread wait lock of
+ * worker B will be changed back to the default thread wait lock. This
+ * will cause worker A (and maybe C) to release the thread queue lock and
+ * acquire the default thread wait lock of worker B instead to carry out
+ * the priority change.
+ *
+ * See also _Thread_Wait_acquire_critical().
+ */
+}
+
+static void PriorityChangeWorker( rtems_task_argument arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = (Context *) arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* PC0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, ctx->used_cpus );
+
+ SetPriority( ctx->worker_b_id, PRIO_VERY_HIGH );
+
+ /* PC1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, ctx->used_cpus );
+
+ (void) ReceiveAnyEvents();
+}
+
+static void MutexObtainWorker( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_a_id );
+}
+
+static void DeadlockWorkerA( rtems_task_argument arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = (Context *) arg;
+ _SMP_barrier_State_initialize( &state );
+
+ ObtainMutex( ctx->mutex_a_id );
+
+ /* D0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* D1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ ObtainMutexDeadlock( ctx->mutex_b_id );
+
+ ReleaseMutex( ctx->mutex_a_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_0 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerB( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_b_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_5 );
+ ObtainMutex( ctx->mutex_c_id );
+ ReleaseMutex( ctx->mutex_c_id );
+ ReleaseMutex( ctx->mutex_b_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_1 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerC( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_c_id );
+ ObtainMutex( ctx->mutex_d_id );
+ ReleaseMutex( ctx->mutex_d_id );
+ ReleaseMutex( ctx->mutex_c_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_2 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerD( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_d_id );
+ ObtainMutex( ctx->mutex_a_id );
+ ReleaseMutex( ctx->mutex_a_id );
+ ReleaseMutex( ctx->mutex_d_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_3 );
+ (void) ReceiveAnyEvents();
+}
+
+static void DeadlockWorkerE( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ ObtainMutex( ctx->mutex_d_id );
+ ReleaseMutex( ctx->mutex_d_id );
+ SendEvents( ctx->runner_id, RTEMS_EVENT_4 );
+ (void) ReceiveAnyEvents();
+}
+
+static void ScoreTqValSmp_Setup( ScoreTqValSmp_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+}
+
+static void ScoreTqValSmp_Setup_Wrap( void *arg )
+{
+ ScoreTqValSmp_Context *ctx;
+
+ ctx = arg;
+ ScoreTqValSmp_Setup( ctx );
+}
+
+static void ScoreTqValSmp_Teardown( ScoreTqValSmp_Context *ctx )
+{
+ RestoreRunnerPriority();
+}
+
+static void ScoreTqValSmp_Teardown_Wrap( void *arg )
+{
+ ScoreTqValSmp_Context *ctx;
+
+ ctx = arg;
+ ScoreTqValSmp_Teardown( ctx );
+}
+
+static T_fixture ScoreTqValSmp_Fixture = {
+ .setup = ScoreTqValSmp_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqValSmp_Teardown_Wrap,
+ .scope = NULL,
+ .initial_context = &ScoreTqValSmp_Instance
+};
+
+/**
+ * @brief Create two or three worker threads and a mutex. Use the mutex and
+ * the worker to do a thread priority change in parallel with a thread queue
+ * extraction.
+ */
+static void ScoreTqValSmp_Action_0( ScoreTqValSmp_Context *ctx )
+{
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &ctx->barrier_state );
+ WrapThreadQueueInitialize( &ctx->wrap, Extract, ctx );
+
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ ctx->used_cpus = 3;
+ } else {
+ ctx->used_cpus = 2;
+ }
+
+ /*
+ * Create a mutex and let the runner obtain it.
+ */
+ ctx->mutex_a_id = CreateMutexNoProtocol();
+ ctx->thread_queue = GetMutexThreadQueue( ctx->mutex_a_id );
+ ObtainMutex( ctx->mutex_a_id );
+
+ /*
+ * Create and start worker A on a second processor. mutex. Let it wait on
+ * the barrier.
+ */
+ ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
+ SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
+ StartTask( ctx->worker_a_id, PriorityChangeWorker, ctx );
+
+ /*
+ * If there are more than two processors, then create and start also worker
+ * C. Let it wait on the barrier.
+ */
+ if ( ctx->used_cpus > 2 ) {
+ ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
+ SetScheduler( ctx->worker_c_id, SCHEDULER_C_ID, PRIO_NORMAL );
+ StartTask( ctx->worker_c_id, PriorityChangeWorker, ctx );
+ }
+
+ /*
+ * Create and start worker B. Let it try to obtain the mutex which is owned
+ * by the runner. Delete worker B to extract it from the thread queue. Wrap
+ * the thread queue extract operation to do a parallel thread priority change
+ * carried out by worker A (and maybe C).
+ */
+ ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
+ StartTask( ctx->worker_b_id, MutexObtainWorker, ctx );
+ WrapThreadQueueExtractDirect( &ctx->wrap, GetThread( ctx->worker_b_id ) );
+ DeleteTask( ctx->worker_b_id );
+
+ /*
+ * Clean up all used resources.
+ */
+ /* PC1 */
+ _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, ctx->used_cpus );
+
+ WaitForExecutionStop( ctx->worker_a_id );
+ DeleteTask( ctx->worker_a_id );
+
+ if ( ctx->used_cpus > 2 ) {
+ WaitForExecutionStop( ctx->worker_c_id );
+ DeleteTask( ctx->worker_c_id );
+ }
+
+ ReleaseMutex( ctx->mutex_a_id );
+ DeleteMutex( ctx->mutex_a_id );
+ WrapThreadQueueDestroy( &ctx->wrap );
+}
+
+/**
+ * @brief Build a cyclic dependency graph using several worker threads and
+ * mutexes. Use the mutexes and the worker to construct a thread queue
+ * deadlock which is detected on one processor while it uses thread queue
+ * links inserted by another processor. The runner thread controls the test
+ * scenario via the two thread queue locks. This is an important test
+ * scenario which shows why the thread queue implementation is a bit more
+ * complicated in SMP configurations.
+ */
+static void ScoreTqValSmp_Action_1( ScoreTqValSmp_Context *ctx )
+{
+ Thread_queue_Queue *queue_b;
+ Thread_queue_Queue *queue_c;
+ ISR_lock_Context lock_context;
+ SMP_barrier_State state;
+
+ if ( rtems_scheduler_get_processor_maximum() <= 2 ) {
+ /*
+ * We can only run this validation test on systems with three or more
+ * processors. The sequence under test can happen on systems with only two
+ * processors, however, we need a third processor to control the other two
+ * processors via ISR locks to get a deterministic test scenario.
+ */
+ return;
+ }
+
+ ctx->runner_id = rtems_task_self();
+
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+
+ ctx->mutex_a_id = CreateMutexNoProtocol();
+ ctx->mutex_b_id = CreateMutexNoProtocol();
+ ctx->mutex_c_id = CreateMutexNoProtocol();
+ ctx->mutex_d_id = CreateMutexNoProtocol();
+
+ queue_b = GetMutexThreadQueue( ctx->mutex_b_id );
+ queue_c = GetMutexThreadQueue( ctx->mutex_c_id );
+
+ ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
+ ctx->worker_b_id = CreateTask( "WRKB", PRIO_NORMAL );
+ ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
+ ctx->worker_d_id = CreateTask( "WRKD", PRIO_NORMAL );
+ ctx->worker_e_id = CreateTask( "WRKE", PRIO_NORMAL );
+
+ SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
+ SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_c_id, SCHEDULER_B_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_d_id, SCHEDULER_B_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_e_id, SCHEDULER_C_ID, PRIO_NORMAL );
+
+ /*
+ * Let worker D wait for mutex A. Let worker C wait for mutex D. Let worker
+ * B wait for mutex C.
+ */
+ StartTask( ctx->worker_a_id, DeadlockWorkerA, ctx );
+
+ /* D0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ StartTask( ctx->worker_d_id, DeadlockWorkerD, ctx );
+ StartTask( ctx->worker_c_id, DeadlockWorkerC, ctx );
+ StartTask( ctx->worker_b_id, DeadlockWorkerB, ctx );
+ ReceiveAllEvents( RTEMS_EVENT_5 );
+ WaitForExecutionStop( ctx->worker_b_id );
+
+ /*
+ * Let worker A attempt to obtain mutex B. Let worker A wait on the lock of
+ * mutex C. Worker A will insert two thread queue links.
+ */
+ _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Queue_acquire_critical(
+ queue_c,
+ &_Thread_Executing->Potpourri_stats,
+ &lock_context
+ );
+ _ISR_lock_ISR_enable( &lock_context );
+
+ /* D1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ TicketLockWaitForOthers( &queue_c->Lock, 1 );
+
+ /*
+ * Let worker E try to obtain mutex D. Worker E will add a thread queue link
+ * which is later used by worker A to detect the deadlock.
+ */
+ StartTask( ctx->worker_e_id, DeadlockWorkerE, ctx );
+ TicketLockWaitForOthers( &queue_b->Lock, 1 );
+
+ /*
+ * Let worker A continue the obtain sequence. It will detect a deadlock.
+ */
+ _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Queue_release( queue_c, &lock_context );
+
+ /*
+ * Clean up all used resources.
+ */
+ ReceiveAllEvents(
+ RTEMS_EVENT_0 | RTEMS_EVENT_1 | RTEMS_EVENT_2 | RTEMS_EVENT_3 |
+ RTEMS_EVENT_4
+ );
+ WaitForExecutionStop( ctx->worker_a_id );
+ WaitForExecutionStop( ctx->worker_b_id );
+ WaitForExecutionStop( ctx->worker_c_id );
+ WaitForExecutionStop( ctx->worker_d_id );
+ WaitForExecutionStop( ctx->worker_e_id );
+ DeleteTask( ctx->worker_a_id );
+ DeleteTask( ctx->worker_b_id );
+ DeleteTask( ctx->worker_c_id );
+ DeleteTask( ctx->worker_d_id );
+ DeleteTask( ctx->worker_e_id );
+ DeleteMutex( ctx->mutex_a_id );
+ DeleteMutex( ctx->mutex_b_id );
+ DeleteMutex( ctx->mutex_c_id );
+ DeleteMutex( ctx->mutex_d_id );
+}
+
+/**
+ * @fn void T_case_body_ScoreTqValSmp( void )
+ */
+T_TEST_CASE_FIXTURE( ScoreTqValSmp, &ScoreTqValSmp_Fixture )
+{
+ ScoreTqValSmp_Context *ctx;
+
+ ctx = T_fixture_context();
+
+ ScoreTqValSmp_Action_0( ctx );
+ ScoreTqValSmp_Action_1( ctx );
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-ceiling.c b/testsuites/validation/tr-tq-enqueue-ceiling.c
new file mode 100644
index 0000000000..5309b31260
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-ceiling.c
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueCeiling
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-enqueue-ceiling.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqEnqueueCeiling \
+ * spec:/score/tq/req/enqueue-ceiling
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_EligibleScheduler_NA : 1;
+ uint8_t Pre_QueueEligible_NA : 1;
+ uint8_t Pre_QueueIneligible_NA : 1;
+ uint8_t Post_Position : 3;
+} ScoreTqReqEnqueueCeiling_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/enqueue-ceiling test case.
+ */
+typedef struct {
+ /**
+ * @brief This this member is true, then the enqueueing thread shall have at
+ * least one helping scheduler.
+ */
+ bool helping;
+
+ /**
+ * @brief This member specifies the priority of a thread with an eligible
+ * scheduler equal to an eligible scheduler of the enqueueing thread.
+ */
+ rtems_task_priority priority;;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued before a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_before;;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued after a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_after;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqEnqueueCeiling_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqEnqueueCeiling_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqEnqueueCeiling_Context;
+
+static ScoreTqReqEnqueueCeiling_Context
+ ScoreTqReqEnqueueCeiling_Instance;
+
+static const char * const ScoreTqReqEnqueueCeiling_PreDesc_EligibleScheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueueCeiling_PreDesc_QueueEligible[] = {
+ "None",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueueCeiling_PreDesc_QueueIneligible[] = {
+ "None",
+ "Before",
+ "After",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqEnqueueCeiling_PreDesc[] = {
+ ScoreTqReqEnqueueCeiling_PreDesc_EligibleScheduler,
+ ScoreTqReqEnqueueCeiling_PreDesc_QueueEligible,
+ ScoreTqReqEnqueueCeiling_PreDesc_QueueIneligible,
+ NULL
+};
+
+typedef ScoreTqReqEnqueueCeiling_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ const rtems_tcb *thread;
+
+ do {
+ thread = TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+ } while ( thread == ctx->tq_ctx->runner_tcb );
+
+ return thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void AddHelper( TQContext *tq_ctx, rtems_id scheduler_id )
+{
+ TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_OBTAIN );
+ TQSetScheduler( tq_ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
+ TQSendAndWaitForExecutionStop(
+ tq_ctx,
+ TQ_BLOCKER_E,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+}
+
+static void RemoveHelper( TQContext *tq_ctx )
+{
+ TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_RELEASE );
+ TQMutexObtain( tq_ctx, TQ_MUTEX_A );
+ TQMutexRelease( tq_ctx, TQ_MUTEX_A );
+}
+
+static void ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Prepare(
+ ScoreTqReqEnqueueCeiling_Context *ctx,
+ ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Home: {
+ /*
+ * While the enqueueing thread has no helping scheduler.
+ */
+ ctx->helping = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Helping: {
+ /*
+ * While the enqueueing thread has at least one helping scheduler.
+ */
+ ctx->helping = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueCeiling_Pre_QueueEligible_Prepare(
+ ScoreTqReqEnqueueCeiling_Context *ctx,
+ ScoreTqReqEnqueueCeiling_Pre_QueueEligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueCeiling_Pre_QueueEligible_None: {
+ /*
+ * While all priority queues of the thread queue associated with eligible
+ * schedulers of the enqueueing thread are empty.
+ */
+ /* This is the default */
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_QueueEligible_Equal: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is equal to the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ctx->priority = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_QueueEligible_Low: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is lower than the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ctx->priority = PRIO_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_QueueEligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_Prepare(
+ ScoreTqReqEnqueueCeiling_Context *ctx,
+ ScoreTqReqEnqueueCeiling_Pre_QueueIneligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_None: {
+ /*
+ * While no priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread.
+ */
+ /* This is the default */
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_Before: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * this priority queue is positioned before all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ctx->other_before = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_After: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * this priority queue is positioned after all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ctx->other_after = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueCeiling_Post_Position_Check(
+ ScoreTqReqEnqueueCeiling_Context *ctx,
+ ScoreTqReqEnqueueCeiling_Post_Position state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ /* Event receives */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+
+ switch ( state ) {
+ case ScoreTqReqEnqueueCeiling_Post_Position_InitialFirst: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the first priority queue of
+ * the thread queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Post_Position_First: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Post_Position_Second: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Post_Position_FirstFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Post_Position_SecondFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Post_Position_SecondQueue: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueCeiling_Post_Position_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueCeiling_Setup(
+ ScoreTqReqEnqueueCeiling_Context *ctx
+)
+{
+ rtems_id scheduler_id;
+
+ scheduler_id = SCHEDULER_A_ID;
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, scheduler_id, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, scheduler_id, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, scheduler_id, PRIO_VERY_HIGH );
+ #if defined( RTEMS_SMP )
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_D, SCHEDULER_B_ID, PRIO_LOW );
+ #endif
+}
+
+static void ScoreTqReqEnqueueCeiling_Setup_Wrap( void *arg )
+{
+ ScoreTqReqEnqueueCeiling_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueueCeiling_Setup( ctx );
+}
+
+static void ScoreTqReqEnqueueCeiling_Teardown(
+ ScoreTqReqEnqueueCeiling_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqEnqueueCeiling_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqEnqueueCeiling_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueueCeiling_Teardown( ctx );
+}
+
+static void ScoreTqReqEnqueueCeiling_Prepare(
+ ScoreTqReqEnqueueCeiling_Context *ctx
+)
+{
+ ctx->priority = PRIO_PSEUDO_ISR; ctx->other_before = false; ctx->other_after = false;
+}
+
+static void ScoreTqReqEnqueueCeiling_Action(
+ ScoreTqReqEnqueueCeiling_Context *ctx
+)
+{
+ Status_Control status;
+
+ if ( ctx->priority == PRIO_PSEUDO_ISR ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ } else {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B , ctx->priority );
+
+ if ( ctx->other_before || ctx->other_after ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_OBTAIN );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_MUTEX_B_OBTAIN | TQ_EVENT_MUTEX_B_RELEASE |
+ TQ_EVENT_RUNNER_SYNC
+ );
+
+ if ( ctx->other_before ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ }
+
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->other_before ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_SURRENDER );
+ }
+ } else {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+ }
+ }
+
+ if ( ctx->helping ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ AddHelper( ctx->tq_ctx, SCHEDULER_C_ID );
+ }
+ } else {
+ AddHelper( ctx->tq_ctx, SCHEDULER_B_ID );
+ }
+ }
+
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ if (
+ ctx->priority != PRIO_PSEUDO_ISR &&
+ ( ctx->other_before || ctx->other_after )
+ ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_RELEASE );
+ TQSynchronizeRunner();
+ }
+
+ if ( ctx->helping ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ RemoveHelper( ctx->tq_ctx );
+ }
+ } else {
+ RemoveHelper( ctx->tq_ctx );
+ }
+ }
+}
+
+static const ScoreTqReqEnqueueCeiling_Entry
+ScoreTqReqEnqueueCeiling_Entries[] = {
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_SecondQueue },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_SecondFirst },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_FirstFirst },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA },
+#endif
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_InitialFirst },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_Second },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_First },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_InitialFirst },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_Second },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_First }
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueueCeiling_Post_Position_NA }
+#endif
+};
+
+static const uint8_t
+ScoreTqReqEnqueueCeiling_Map[] = {
+ 4, 0, 0, 5, 1, 2, 6, 1, 3, 7, 0, 0, 8, 1, 2, 9, 1, 3
+};
+
+static size_t ScoreTqReqEnqueueCeiling_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqEnqueueCeiling_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqEnqueueCeiling_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqEnqueueCeiling_Fixture = {
+ .setup = ScoreTqReqEnqueueCeiling_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqEnqueueCeiling_Teardown_Wrap,
+ .scope = ScoreTqReqEnqueueCeiling_Scope,
+ .initial_context = &ScoreTqReqEnqueueCeiling_Instance
+};
+
+static inline ScoreTqReqEnqueueCeiling_Entry ScoreTqReqEnqueueCeiling_PopEntry(
+ ScoreTqReqEnqueueCeiling_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqEnqueueCeiling_Entries[
+ ScoreTqReqEnqueueCeiling_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqEnqueueCeiling_TestVariant(
+ ScoreTqReqEnqueueCeiling_Context *ctx
+)
+{
+ ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqEnqueueCeiling_Pre_QueueEligible_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ ScoreTqReqEnqueueCeiling_Action( ctx );
+ ScoreTqReqEnqueueCeiling_Post_Position_Check(
+ ctx,
+ ctx->Map.entry.Post_Position
+ );
+}
+
+static T_fixture_node ScoreTqReqEnqueueCeiling_Node;
+
+void ScoreTqReqEnqueueCeiling_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqEnqueueCeiling_Context *ctx;
+
+ ctx = &ScoreTqReqEnqueueCeiling_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqEnqueueCeiling_Node,
+ &ScoreTqReqEnqueueCeiling_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Home;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueueCeiling_Pre_QueueEligible_None;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqEnqueueCeiling_Pre_QueueEligible_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_None;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqEnqueueCeiling_PopEntry( ctx );
+ ScoreTqReqEnqueueCeiling_Prepare( ctx );
+ ScoreTqReqEnqueueCeiling_TestVariant( ctx );
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-ceiling.h b/testsuites/validation/tr-tq-enqueue-ceiling.h
new file mode 100644
index 0000000000..3eca5033db
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-ceiling.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueCeiling
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_ENQUEUE_CEILING_H
+#define _TR_TQ_ENQUEUE_CEILING_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqEnqueueCeiling
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Home,
+ ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_Helping,
+ ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler_NA
+} ScoreTqReqEnqueueCeiling_Pre_EligibleScheduler;
+
+typedef enum {
+ ScoreTqReqEnqueueCeiling_Pre_QueueEligible_None,
+ ScoreTqReqEnqueueCeiling_Pre_QueueEligible_Equal,
+ ScoreTqReqEnqueueCeiling_Pre_QueueEligible_Low,
+ ScoreTqReqEnqueueCeiling_Pre_QueueEligible_NA
+} ScoreTqReqEnqueueCeiling_Pre_QueueEligible;
+
+typedef enum {
+ ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_None,
+ ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_Before,
+ ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_After,
+ ScoreTqReqEnqueueCeiling_Pre_QueueIneligible_NA
+} ScoreTqReqEnqueueCeiling_Pre_QueueIneligible;
+
+typedef enum {
+ ScoreTqReqEnqueueCeiling_Post_Position_InitialFirst,
+ ScoreTqReqEnqueueCeiling_Post_Position_First,
+ ScoreTqReqEnqueueCeiling_Post_Position_Second,
+ ScoreTqReqEnqueueCeiling_Post_Position_FirstFirst,
+ ScoreTqReqEnqueueCeiling_Post_Position_SecondFirst,
+ ScoreTqReqEnqueueCeiling_Post_Position_SecondQueue,
+ ScoreTqReqEnqueueCeiling_Post_Position_NA
+} ScoreTqReqEnqueueCeiling_Post_Position;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreTqReqEnqueueCeiling_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_ENQUEUE_CEILING_H */
diff --git a/testsuites/validation/tr-tq-enqueue-deadlock.c b/testsuites/validation/tr-tq-enqueue-deadlock.c
new file mode 100644
index 0000000000..afe4ef864e
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-deadlock.c
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueDeadlock
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-enqueue-deadlock.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqEnqueueDeadlock \
+ * spec:/score/tq/req/enqueue-deadlock
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Notification_NA : 1;
+ uint8_t Pre_Deadlock_NA : 1;
+ uint8_t Post_Result : 2;
+} ScoreTqReqEnqueueDeadlock_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/enqueue-deadlock test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then more than one mutex shall be used for
+ * the deadlock scenario.
+ */
+ bool more;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqEnqueueDeadlock_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqEnqueueDeadlock_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqEnqueueDeadlock_Context;
+
+static ScoreTqReqEnqueueDeadlock_Context
+ ScoreTqReqEnqueueDeadlock_Instance;
+
+static const char * const ScoreTqReqEnqueueDeadlock_PreDesc_Notification[] = {
+ "Status",
+ "Fatal",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueueDeadlock_PreDesc_Deadlock[] = {
+ "One",
+ "More",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqEnqueueDeadlock_PreDesc[] = {
+ ScoreTqReqEnqueueDeadlock_PreDesc_Notification,
+ ScoreTqReqEnqueueDeadlock_PreDesc_Deadlock,
+ NULL
+};
+
+static void ScoreTqReqEnqueueDeadlock_Pre_Notification_Prepare(
+ ScoreTqReqEnqueueDeadlock_Context *ctx,
+ ScoreTqReqEnqueueDeadlock_Pre_Notification state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueDeadlock_Pre_Notification_Status: {
+ /*
+ * Where a detected deadlock results in a return with a status code.
+ */
+ if ( ctx->tq_ctx->deadlock != TQ_DEADLOCK_STATUS ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueueDeadlock_Pre_Notification_Fatal: {
+ /*
+ * Where a detected deadlock results in a fatal error.
+ */
+ if ( ctx->tq_ctx->deadlock != TQ_DEADLOCK_FATAL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueueDeadlock_Pre_Notification_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueDeadlock_Pre_Deadlock_Prepare(
+ ScoreTqReqEnqueueDeadlock_Context *ctx,
+ ScoreTqReqEnqueueDeadlock_Pre_Deadlock state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueDeadlock_Pre_Deadlock_One: {
+ /*
+ * While the owner of the thread queue is enqueued on another thread
+ * queue owned by the calling thread.
+ */
+ ctx->more = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueueDeadlock_Pre_Deadlock_More: {
+ /*
+ * While the owner of the thread queue is enqueued on another thread
+ * queue owned by a thread other than the calling thread, and so on,
+ * while the owner of the last thread queue of this dependency chain is
+ * enqueued on a thread queue owned by the calling thread.
+ */
+ ctx->more = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueDeadlock_Post_Result_Check(
+ ScoreTqReqEnqueueDeadlock_Context *ctx,
+ ScoreTqReqEnqueueDeadlock_Post_Result state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueDeadlock_Post_Result_Status: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_DEADLOCK.
+ */
+ /* Checked by action */
+ break;
+ }
+
+ case ScoreTqReqEnqueueDeadlock_Post_Result_Fatal: {
+ /*
+ * The system shall terminate with the INTERNAL_ERROR_CORE fatal source
+ * and the INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal code.
+ */
+ /* Checked by action */
+ break;
+ }
+
+ case ScoreTqReqEnqueueDeadlock_Post_Result_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueDeadlock_Action(
+ ScoreTqReqEnqueueDeadlock_Context *ctx
+)
+{
+ Status_Control status;
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ } else {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ }
+
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_A_ID, PRIO_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_A_ID, PRIO_HIGH );
+
+ TQSortMutexesByID( ctx->tq_ctx );
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_C );
+ TQSendAndWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+ if ( ctx->more ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_C_OBTAIN );
+ Yield();
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_B_OBTAIN );
+ Yield();
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_OBTAIN );
+ Yield();
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_B_OBTAIN
+ );
+ } else {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_C_OBTAIN
+ );
+ }
+
+ if ( ctx->tq_ctx->deadlock == TQ_DEADLOCK_FATAL ) {
+ status = TQEnqueueFatal( ctx->tq_ctx );
+ T_eq_int( status, STATUS_DEADLOCK );
+ } else {
+ status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_DEADLOCK ) );
+ }
+
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_C );
+
+ if ( ctx->more ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_C_RELEASE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_RELEASE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_RELEASE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_B_RELEASE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_B_RELEASE );
+ } else {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_C_RELEASE );
+ }
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+ );
+ TQSynchronizeRunner();
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_A_ID, PRIO_HIGH );
+ } else {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ }
+}
+
+static const ScoreTqReqEnqueueDeadlock_Entry
+ScoreTqReqEnqueueDeadlock_Entries[] = {
+ { 0, 0, 0, ScoreTqReqEnqueueDeadlock_Post_Result_Status },
+ { 0, 0, 0, ScoreTqReqEnqueueDeadlock_Post_Result_Fatal }
+};
+
+static const uint8_t
+ScoreTqReqEnqueueDeadlock_Map[] = {
+ 0, 0, 1, 1
+};
+
+static size_t ScoreTqReqEnqueueDeadlock_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqEnqueueDeadlock_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqEnqueueDeadlock_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqEnqueueDeadlock_Fixture = {
+ .setup = NULL,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = ScoreTqReqEnqueueDeadlock_Scope,
+ .initial_context = &ScoreTqReqEnqueueDeadlock_Instance
+};
+
+static const uint8_t ScoreTqReqEnqueueDeadlock_Weights[] = {
+ 2, 1
+};
+
+static void ScoreTqReqEnqueueDeadlock_Skip(
+ ScoreTqReqEnqueueDeadlock_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreTqReqEnqueueDeadlock_Entry
+ScoreTqReqEnqueueDeadlock_PopEntry( ScoreTqReqEnqueueDeadlock_Context *ctx )
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 2; ++i ) {
+ index += ScoreTqReqEnqueueDeadlock_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreTqReqEnqueueDeadlock_Entries[
+ ScoreTqReqEnqueueDeadlock_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqEnqueueDeadlock_TestVariant(
+ ScoreTqReqEnqueueDeadlock_Context *ctx
+)
+{
+ ScoreTqReqEnqueueDeadlock_Pre_Notification_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqEnqueueDeadlock_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqEnqueueDeadlock_Pre_Deadlock_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqEnqueueDeadlock_Action( ctx );
+ ScoreTqReqEnqueueDeadlock_Post_Result_Check(
+ ctx,
+ ctx->Map.entry.Post_Result
+ );
+}
+
+static T_fixture_node ScoreTqReqEnqueueDeadlock_Node;
+
+void ScoreTqReqEnqueueDeadlock_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqEnqueueDeadlock_Context *ctx;
+
+ ctx = &ScoreTqReqEnqueueDeadlock_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqEnqueueDeadlock_Node,
+ &ScoreTqReqEnqueueDeadlock_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueueDeadlock_Pre_Notification_Status;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueueDeadlock_Pre_Notification_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueueDeadlock_Pre_Deadlock_One;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqEnqueueDeadlock_PopEntry( ctx );
+ ScoreTqReqEnqueueDeadlock_TestVariant( ctx );
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-deadlock.h b/testsuites/validation/tr-tq-enqueue-deadlock.h
new file mode 100644
index 0000000000..be23bcf879
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-deadlock.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueDeadlock
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_ENQUEUE_DEADLOCK_H
+#define _TR_TQ_ENQUEUE_DEADLOCK_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqEnqueueDeadlock
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqEnqueueDeadlock_Pre_Notification_Status,
+ ScoreTqReqEnqueueDeadlock_Pre_Notification_Fatal,
+ ScoreTqReqEnqueueDeadlock_Pre_Notification_NA
+} ScoreTqReqEnqueueDeadlock_Pre_Notification;
+
+typedef enum {
+ ScoreTqReqEnqueueDeadlock_Pre_Deadlock_One,
+ ScoreTqReqEnqueueDeadlock_Pre_Deadlock_More,
+ ScoreTqReqEnqueueDeadlock_Pre_Deadlock_NA
+} ScoreTqReqEnqueueDeadlock_Pre_Deadlock;
+
+typedef enum {
+ ScoreTqReqEnqueueDeadlock_Post_Result_Status,
+ ScoreTqReqEnqueueDeadlock_Post_Result_Fatal,
+ ScoreTqReqEnqueueDeadlock_Post_Result_NA
+} ScoreTqReqEnqueueDeadlock_Post_Result;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreTqReqEnqueueDeadlock_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_ENQUEUE_DEADLOCK_H */
diff --git a/testsuites/validation/tr-tq-enqueue-fifo.c b/testsuites/validation/tr-tq-enqueue-fifo.c
new file mode 100644
index 0000000000..b64a99c0d1
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-fifo.c
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueFifo
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-enqueue-fifo.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqEnqueueFifo spec:/score/tq/req/enqueue-fifo
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Queue_NA : 1;
+ uint8_t Post_Position : 2;
+} ScoreTqReqEnqueueFifo_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/enqueue-fifo test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqEnqueueFifo_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 1 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqEnqueueFifo_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqEnqueueFifo_Context;
+
+static ScoreTqReqEnqueueFifo_Context
+ ScoreTqReqEnqueueFifo_Instance;
+
+static const char * const ScoreTqReqEnqueueFifo_PreDesc_Queue[] = {
+ "Empty",
+ "NonEmpty",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqEnqueueFifo_PreDesc[] = {
+ ScoreTqReqEnqueueFifo_PreDesc_Queue,
+ NULL
+};
+
+typedef ScoreTqReqEnqueueFifo_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void ScoreTqReqEnqueueFifo_Pre_Queue_Prepare(
+ ScoreTqReqEnqueueFifo_Context *ctx,
+ ScoreTqReqEnqueueFifo_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueFifo_Pre_Queue_Empty: {
+ /*
+ * While the queue is empty.
+ */
+ ctx->tq_ctx->how_many = 1;
+ break;
+ }
+
+ case ScoreTqReqEnqueueFifo_Pre_Queue_NonEmpty: {
+ /*
+ * While the queue is non-empty.
+ */
+ ctx->tq_ctx->how_many = 2;
+ break;
+ }
+
+ case ScoreTqReqEnqueueFifo_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueFifo_Post_Position_Check(
+ ScoreTqReqEnqueueFifo_Context *ctx,
+ ScoreTqReqEnqueueFifo_Post_Position state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ /* Event receives */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+
+ switch ( state ) {
+ case ScoreTqReqEnqueueFifo_Post_Position_First: {
+ /*
+ * The thread shall be the first thread in the queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueFifo_Post_Position_Last: {
+ /*
+ * The thread shall be the last thread in the queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueueFifo_Post_Position_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueFifo_Setup( ScoreTqReqEnqueueFifo_Context *ctx )
+{
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_ULTRA_HIGH );
+}
+
+static void ScoreTqReqEnqueueFifo_Setup_Wrap( void *arg )
+{
+ ScoreTqReqEnqueueFifo_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueueFifo_Setup( ctx );
+}
+
+static void ScoreTqReqEnqueueFifo_Teardown(
+ ScoreTqReqEnqueueFifo_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqEnqueueFifo_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqEnqueueFifo_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueueFifo_Teardown( ctx );
+}
+
+static void ScoreTqReqEnqueueFifo_Action( ScoreTqReqEnqueueFifo_Context *ctx )
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many >= 2 ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ }
+
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+static const ScoreTqReqEnqueueFifo_Entry
+ScoreTqReqEnqueueFifo_Entries[] = {
+ { 0, 0, ScoreTqReqEnqueueFifo_Post_Position_First },
+ { 0, 0, ScoreTqReqEnqueueFifo_Post_Position_Last }
+};
+
+static const uint8_t
+ScoreTqReqEnqueueFifo_Map[] = {
+ 0, 1
+};
+
+static size_t ScoreTqReqEnqueueFifo_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqEnqueueFifo_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqEnqueueFifo_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqEnqueueFifo_Fixture = {
+ .setup = ScoreTqReqEnqueueFifo_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqEnqueueFifo_Teardown_Wrap,
+ .scope = ScoreTqReqEnqueueFifo_Scope,
+ .initial_context = &ScoreTqReqEnqueueFifo_Instance
+};
+
+static inline ScoreTqReqEnqueueFifo_Entry ScoreTqReqEnqueueFifo_PopEntry(
+ ScoreTqReqEnqueueFifo_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqEnqueueFifo_Entries[
+ ScoreTqReqEnqueueFifo_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqEnqueueFifo_TestVariant(
+ ScoreTqReqEnqueueFifo_Context *ctx
+)
+{
+ ScoreTqReqEnqueueFifo_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqEnqueueFifo_Action( ctx );
+ ScoreTqReqEnqueueFifo_Post_Position_Check(
+ ctx,
+ ctx->Map.entry.Post_Position
+ );
+}
+
+static T_fixture_node ScoreTqReqEnqueueFifo_Node;
+
+void ScoreTqReqEnqueueFifo_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqEnqueueFifo_Context *ctx;
+
+ ctx = &ScoreTqReqEnqueueFifo_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqEnqueueFifo_Node,
+ &ScoreTqReqEnqueueFifo_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueueFifo_Pre_Queue_Empty;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueueFifo_Pre_Queue_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqEnqueueFifo_PopEntry( ctx );
+ ScoreTqReqEnqueueFifo_TestVariant( ctx );
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-fifo.h b/testsuites/validation/tr-tq-enqueue-fifo.h
new file mode 100644
index 0000000000..776cded8b9
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-fifo.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueFifo
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_ENQUEUE_FIFO_H
+#define _TR_TQ_ENQUEUE_FIFO_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqEnqueueFifo
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqEnqueueFifo_Pre_Queue_Empty,
+ ScoreTqReqEnqueueFifo_Pre_Queue_NonEmpty,
+ ScoreTqReqEnqueueFifo_Pre_Queue_NA
+} ScoreTqReqEnqueueFifo_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqEnqueueFifo_Post_Position_First,
+ ScoreTqReqEnqueueFifo_Post_Position_Last,
+ ScoreTqReqEnqueueFifo_Post_Position_NA
+} ScoreTqReqEnqueueFifo_Post_Position;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqEnqueueFifo_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_ENQUEUE_FIFO_H */
diff --git a/testsuites/validation/tr-tq-enqueue-mrsp.c b/testsuites/validation/tr-tq-enqueue-mrsp.c
new file mode 100644
index 0000000000..d48455b2eb
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-mrsp.c
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueMrsp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-enqueue-mrsp.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqEnqueueMrsp spec:/score/tq/req/enqueue-mrsp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_EligibleScheduler_NA : 1;
+ uint8_t Pre_QueueEligible_NA : 1;
+ uint8_t Pre_QueueIneligible_NA : 1;
+ uint8_t Post_Position : 3;
+} ScoreTqReqEnqueueMrsp_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/enqueue-mrsp test case.
+ */
+typedef struct {
+ /**
+ * @brief This this member is true, then the enqueueing thread shall have at
+ * least one helping scheduler which is an ineligible scheduler for the
+ * already enqueued threads.
+ */
+ bool helping;
+
+ /**
+ * @brief This member specifies the priority of an already enqueued thread
+ * with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ rtems_task_priority priority;;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued before a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_before;;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued after a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_after;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqEnqueueMrsp_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqEnqueueMrsp_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqEnqueueMrsp_Context;
+
+static ScoreTqReqEnqueueMrsp_Context
+ ScoreTqReqEnqueueMrsp_Instance;
+
+static const char * const ScoreTqReqEnqueueMrsp_PreDesc_EligibleScheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueueMrsp_PreDesc_QueueEligible[] = {
+ "None",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueueMrsp_PreDesc_QueueIneligible[] = {
+ "None",
+ "Only",
+ "Before",
+ "After",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqEnqueueMrsp_PreDesc[] = {
+ ScoreTqReqEnqueueMrsp_PreDesc_EligibleScheduler,
+ ScoreTqReqEnqueueMrsp_PreDesc_QueueEligible,
+ ScoreTqReqEnqueueMrsp_PreDesc_QueueIneligible,
+ NULL
+};
+
+/*
+ * The MrsP locking protocol uses a sticky thread queue enqueue. This means
+ * that threads waiting for the mutex ownership perform a busy wait and thus
+ * occupy the processor. For a full validation we need at least four
+ * processors.
+ */
+static bool CanDoFullValidation( void )
+{
+ return rtems_scheduler_get_processor_maximum() >= 4;
+}
+
+static void ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Prepare(
+ ScoreTqReqEnqueueMrsp_Context *ctx,
+ ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Home: {
+ /*
+ * While the enqueueing thread has no helping scheduler.
+ */
+ ctx->helping = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Helping: {
+ /*
+ * While the enqueueing thread has at least one helping scheduler.
+ */
+ ctx->helping = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueMrsp_Pre_QueueEligible_Prepare(
+ ScoreTqReqEnqueueMrsp_Context *ctx,
+ ScoreTqReqEnqueueMrsp_Pre_QueueEligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueMrsp_Pre_QueueEligible_None: {
+ /*
+ * While all priority queues of the thread queue associated with eligible
+ * schedulers of the enqueueing thread are empty.
+ */
+ ctx->priority = PRIO_PSEUDO_ISR;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueEligible_Equal: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is equal to the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ctx->priority = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueEligible_Low: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is lower than the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ctx->priority = PRIO_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueEligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_Prepare(
+ ScoreTqReqEnqueueMrsp_Context *ctx,
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_None: {
+ /*
+ * While no priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread.
+ */
+ ctx->other_before = false;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_Only: {
+ /*
+ * While exactly one priority queue of the thread queue exists which is
+ * not associated with an eligible scheduler of the enqueueing thread.
+ */
+ ctx->other_before = true;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_Before: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * this priority queue is positioned before all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ctx->other_before = true;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_After: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * this priority queue is positioned after all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ctx->other_before = false;
+ ctx->other_after = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueMrsp_Post_Position_Check(
+ ScoreTqReqEnqueueMrsp_Context *ctx,
+ ScoreTqReqEnqueueMrsp_Post_Position state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ /* The enqueue is sticky, so no enqueued thread is blocked by the scheduler */
+ T_null( TQGetNextUnblock( ctx->tq_ctx, &i )->thread );
+
+ switch ( state ) {
+ case ScoreTqReqEnqueueMrsp_Post_Position_InitialFirst: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the first priority queue of
+ * the thread queue.
+ */
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Post_Position_InitialLast: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the last priority queue of
+ * the thread queue.
+ */
+ if ( CanDoFullValidation() ) {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_C ) );
+ T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 2, TQGetCounter( ctx->tq_ctx ) );
+ } else {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Post_Position_Second: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ if ( CanDoFullValidation() ) {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ) );
+ T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 2, TQGetCounter( ctx->tq_ctx ) );
+ } else {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Post_Position_SecondFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ if ( CanDoFullValidation() ) {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ) );
+ T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_C ) );
+ T_eq_u32( 3, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 3, TQGetCounter( ctx->tq_ctx ) );
+ } else {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Post_Position_SecondLast: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ if ( CanDoFullValidation() ) {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_C ) );
+ T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ) );
+ T_eq_u32( 3, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 3, TQGetCounter( ctx->tq_ctx ) );
+ } else {
+ T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+ T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueueMrsp_Post_Position_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueueMrsp_Setup( ScoreTqReqEnqueueMrsp_Context *ctx )
+{
+ if ( CanDoFullValidation() ) {
+ RemoveProcessor( SCHEDULER_C_ID, 2 );
+ AddProcessor( SCHEDULER_B_ID, 2 );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_C_ID, PRIO_LOW );
+ }
+
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_LOW );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_B_ID, PRIO_LOW );
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ SCHEDULER_A_ID,
+ PRIO_ULTRA_HIGH
+ );
+}
+
+static void ScoreTqReqEnqueueMrsp_Setup_Wrap( void *arg )
+{
+ ScoreTqReqEnqueueMrsp_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueueMrsp_Setup( ctx );
+}
+
+static void ScoreTqReqEnqueueMrsp_Teardown(
+ ScoreTqReqEnqueueMrsp_Context *ctx
+)
+{
+ if ( CanDoFullValidation() ) {
+ RemoveProcessor( SCHEDULER_B_ID, 2 );
+ AddProcessor( SCHEDULER_C_ID, 2 );
+ }
+
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqEnqueueMrsp_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqEnqueueMrsp_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueueMrsp_Teardown( ctx );
+}
+
+static void ScoreTqReqEnqueueMrsp_Action( ScoreTqReqEnqueueMrsp_Context *ctx )
+{
+ Status_Control status;
+
+ TQResetCounter( ctx->tq_ctx );
+ TQClearDone( ctx->tq_ctx, TQ_BLOCKER_A );
+ TQClearDone( ctx->tq_ctx, TQ_BLOCKER_B );
+ TQClearDone( ctx->tq_ctx, TQ_BLOCKER_C );
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ if ( ctx->helping ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_RUNNER_SYNC
+ );
+ TQSynchronizeRunner();
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE |
+ TQ_EVENT_RUNNER_SYNC_2
+ );
+ }
+
+ if ( CanDoFullValidation() ) {
+ if ( ctx->other_before ) {
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+ }
+
+ if ( ctx->priority != PRIO_PSEUDO_ISR ) {
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+ }
+
+ if ( ctx->other_after ) {
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+ }
+ }
+
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+ TQWaitForDone( ctx->tq_ctx, TQ_BLOCKER_A );
+
+ if ( CanDoFullValidation() ) {
+ if ( ctx->priority != PRIO_PSEUDO_ISR ) {
+ TQWaitForDone( ctx->tq_ctx, TQ_BLOCKER_B );
+ }
+
+ if ( ctx->other_before || ctx->other_after ) {
+ TQWaitForDone( ctx->tq_ctx, TQ_BLOCKER_C );
+ }
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+
+ if ( ctx->helping ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_A_RELEASE | TQ_EVENT_RUNNER_SYNC
+ );
+ TQSynchronizeRunner2();
+ }
+}
+
+static const ScoreTqReqEnqueueMrsp_Entry
+ScoreTqReqEnqueueMrsp_Entries[] = {
+ { 1, 0, 0, 0, ScoreTqReqEnqueueMrsp_Post_Position_NA },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueMrsp_Post_Position_Second },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueMrsp_Post_Position_SecondLast },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueMrsp_Post_Position_SecondFirst },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueMrsp_Post_Position_InitialFirst },
+ { 0, 0, 0, 0, ScoreTqReqEnqueueMrsp_Post_Position_InitialLast }
+};
+
+static const uint8_t
+ScoreTqReqEnqueueMrsp_Map[] = {
+ 4, 5, 0, 0, 1, 0, 2, 3, 1, 0, 2, 3, 4, 5, 0, 0, 1, 0, 2, 3, 1, 0, 2, 3
+};
+
+static size_t ScoreTqReqEnqueueMrsp_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqEnqueueMrsp_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqEnqueueMrsp_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqEnqueueMrsp_Fixture = {
+ .setup = ScoreTqReqEnqueueMrsp_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqEnqueueMrsp_Teardown_Wrap,
+ .scope = ScoreTqReqEnqueueMrsp_Scope,
+ .initial_context = &ScoreTqReqEnqueueMrsp_Instance
+};
+
+static inline ScoreTqReqEnqueueMrsp_Entry ScoreTqReqEnqueueMrsp_PopEntry(
+ ScoreTqReqEnqueueMrsp_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqEnqueueMrsp_Entries[
+ ScoreTqReqEnqueueMrsp_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqEnqueueMrsp_TestVariant(
+ ScoreTqReqEnqueueMrsp_Context *ctx
+)
+{
+ ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqEnqueueMrsp_Pre_QueueEligible_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreTqReqEnqueueMrsp_Action( ctx );
+ ScoreTqReqEnqueueMrsp_Post_Position_Check(
+ ctx,
+ ctx->Map.entry.Post_Position
+ );
+}
+
+static T_fixture_node ScoreTqReqEnqueueMrsp_Node;
+
+void ScoreTqReqEnqueueMrsp_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqEnqueueMrsp_Context *ctx;
+
+ ctx = &ScoreTqReqEnqueueMrsp_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqEnqueueMrsp_Node,
+ &ScoreTqReqEnqueueMrsp_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Home;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueueMrsp_Pre_QueueEligible_None;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqEnqueueMrsp_Pre_QueueEligible_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_None;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqEnqueueMrsp_PopEntry( ctx );
+ ScoreTqReqEnqueueMrsp_TestVariant( ctx );
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-mrsp.h b/testsuites/validation/tr-tq-enqueue-mrsp.h
new file mode 100644
index 0000000000..98cb8b9883
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-mrsp.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueueMrsp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_ENQUEUE_MRSP_H
+#define _TR_TQ_ENQUEUE_MRSP_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqEnqueueMrsp
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Home,
+ ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_Helping,
+ ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler_NA
+} ScoreTqReqEnqueueMrsp_Pre_EligibleScheduler;
+
+typedef enum {
+ ScoreTqReqEnqueueMrsp_Pre_QueueEligible_None,
+ ScoreTqReqEnqueueMrsp_Pre_QueueEligible_Equal,
+ ScoreTqReqEnqueueMrsp_Pre_QueueEligible_Low,
+ ScoreTqReqEnqueueMrsp_Pre_QueueEligible_NA
+} ScoreTqReqEnqueueMrsp_Pre_QueueEligible;
+
+typedef enum {
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_None,
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_Only,
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_Before,
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_After,
+ ScoreTqReqEnqueueMrsp_Pre_QueueIneligible_NA
+} ScoreTqReqEnqueueMrsp_Pre_QueueIneligible;
+
+typedef enum {
+ ScoreTqReqEnqueueMrsp_Post_Position_InitialFirst,
+ ScoreTqReqEnqueueMrsp_Post_Position_InitialLast,
+ ScoreTqReqEnqueueMrsp_Post_Position_Second,
+ ScoreTqReqEnqueueMrsp_Post_Position_SecondFirst,
+ ScoreTqReqEnqueueMrsp_Post_Position_SecondLast,
+ ScoreTqReqEnqueueMrsp_Post_Position_NA
+} ScoreTqReqEnqueueMrsp_Post_Position;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqEnqueueMrsp_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_ENQUEUE_MRSP_H */
diff --git a/testsuites/validation/tr-tq-enqueue-priority-inherit.c b/testsuites/validation/tr-tq-enqueue-priority-inherit.c
new file mode 100644
index 0000000000..fc9a75e4fa
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-priority-inherit.c
@@ -0,0 +1,1736 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueuePriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-enqueue-priority-inherit.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqEnqueuePriorityInherit \
+ * spec:/score/tq/req/enqueue-priority-inherit
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_Scheduler_NA : 1;
+ uint32_t Pre_QueueEligible_NA : 1;
+ uint32_t Pre_QueueIneligible_NA : 1;
+ uint32_t Pre_PriorityForOwner_NA : 1;
+ uint32_t Pre_SchedulerForOwner_NA : 1;
+ uint32_t Pre_OwnerState_NA : 1;
+ uint32_t Post_Position : 4;
+ uint32_t Post_OwnerPriority : 2;
+ uint32_t Post_OwnerScheduler : 2;
+ uint32_t Post_OwnerOwnerPriority : 2;
+ uint32_t Post_OwnerOwnerScheduler : 2;
+} ScoreTqReqEnqueuePriorityInherit_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/enqueue-priority-inherit test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member specifies the priority of a thread on the thread queue
+ * with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ rtems_task_priority queue_priority;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued before a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_before;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued after a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_after;
+
+ /**
+ * @brief This this member is true, then the priorities of the enqueueing
+ * thread shall be dispensable for the owner of the thread queue.
+ */
+ bool vital_priority;
+
+ /**
+ * @brief This this member is true, then the eligible scheduler of the
+ * enqueueing thread shall be dispensable for the owner of the thread
+ * queue.
+ */
+ bool vital_scheduler;
+
+ /**
+ * @brief This member contains the priorities of the thread queue owner after
+ * the enqueue.
+ */
+ rtems_task_priority owner_priority[ 4 ];
+
+ /**
+ * @brief This member contains the priorities of the owner of the thread
+ * queue on which the thread queue owner is blocked after the enqueue.
+ */
+ rtems_task_priority owner_owner_priority[ 4 ];
+
+ /**
+ * @brief This member specifies which mutex obtain event shall be used to
+ * block the thread queue owner.
+ */
+ rtems_event_set owner_obtain;
+
+ /**
+ * @brief This member specifies which mutex release event shall be used to
+ * unblock the thread queue owner.
+ */
+ rtems_event_set owner_release;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqEnqueuePriorityInherit_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 6 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqEnqueuePriorityInherit_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqEnqueuePriorityInherit_Context;
+
+static ScoreTqReqEnqueuePriorityInherit_Context
+ ScoreTqReqEnqueuePriorityInherit_Instance;
+
+static const char * const ScoreTqReqEnqueuePriorityInherit_PreDesc_Scheduler[] = {
+ "One",
+ "Two",
+ "Three",
+ "More",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriorityInherit_PreDesc_QueueEligible[] = {
+ "None",
+ "High",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriorityInherit_PreDesc_QueueIneligible[] = {
+ "None",
+ "Only",
+ "Before",
+ "After",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriorityInherit_PreDesc_PriorityForOwner[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriorityInherit_PreDesc_SchedulerForOwner[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriorityInherit_PreDesc_OwnerState[] = {
+ "NotEnqueued",
+ "FIFO",
+ "Priority",
+ "PriorityInherit",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqEnqueuePriorityInherit_PreDesc[] = {
+ ScoreTqReqEnqueuePriorityInherit_PreDesc_Scheduler,
+ ScoreTqReqEnqueuePriorityInherit_PreDesc_QueueEligible,
+ ScoreTqReqEnqueuePriorityInherit_PreDesc_QueueIneligible,
+ ScoreTqReqEnqueuePriorityInherit_PreDesc_PriorityForOwner,
+ ScoreTqReqEnqueuePriorityInherit_PreDesc_SchedulerForOwner,
+ ScoreTqReqEnqueuePriorityInherit_PreDesc_OwnerState,
+ NULL
+};
+
+typedef ScoreTqReqEnqueuePriorityInherit_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ const rtems_tcb *thread;
+
+ do {
+ thread = TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+ } while ( thread == ctx->tq_ctx->runner_tcb );
+
+ return thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void CheckPriorityRaise(
+ const Context *ctx,
+ const rtems_task_priority *priority
+)
+{
+ if ( ctx->queue_priority == PRIO_ULTRA_HIGH ) {
+ T_eq_u32( priority[ 0 ], PRIO_ULTRA_HIGH );
+ } else {
+ T_eq_u32( priority[ 0 ], PRIO_VERY_HIGH );
+ }
+
+ if (
+ ctx->queue_priority == PRIO_VERY_HIGH ||
+ ctx->queue_priority == PRIO_ULTRA_HIGH
+ ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ T_eq_u32( priority[ 1 ], PRIO_ULTRA_LOW );
+ T_eq_u32( priority[ 2 ], PRIO_NORMAL );
+ } else {
+ T_eq_u32( priority[ 1 ], PRIO_NORMAL );
+ }
+ }
+}
+
+static void CheckPriorityNop(
+ const Context *ctx,
+ const rtems_task_priority *priority
+)
+{
+ if ( ctx->queue_priority == PRIO_ULTRA_HIGH ) {
+ T_eq_u32( priority[ 0 ], PRIO_ULTRA_HIGH );
+ } else {
+ T_eq_u32( priority[ 0 ], PRIO_VERY_HIGH );
+ }
+}
+
+static void CheckSchedulerNewHelper(
+ const Context *ctx,
+ const rtems_task_priority *priority
+)
+{
+ if (
+ ctx->vital_priority &&
+ ( ctx->queue_priority == PRIO_VERY_HIGH ||
+ ctx->queue_priority == PRIO_ULTRA_HIGH )
+ ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ T_eq_u32( priority[ 3 ], PRIO_VERY_LOW );
+ } else {
+ T_eq_u32( priority[ 2 ], PRIO_VERY_LOW );
+ T_eq_u32( priority[ 3 ], PRIO_INVALID );
+ }
+ } else {
+ if ( ctx->other_before || ctx->other_after ) {
+ T_eq_u32( priority[ 1 ], PRIO_ULTRA_LOW );
+ T_eq_u32( priority[ 2 ], PRIO_VERY_LOW );
+ } else {
+ T_eq_u32( priority[ 1 ], PRIO_VERY_LOW );
+ T_eq_u32( priority[ 2 ], PRIO_INVALID );
+ }
+
+ T_eq_u32( priority[ 3 ], PRIO_INVALID );
+ }
+}
+
+static void CheckSchedulerNop(
+ const Context *ctx,
+ const rtems_task_priority *priority
+)
+{
+ if (
+ ctx->vital_priority &&
+ ( ctx->queue_priority == PRIO_VERY_HIGH ||
+ ctx->queue_priority == PRIO_ULTRA_HIGH )
+ ) {
+ if ( !ctx->other_before && !ctx->other_after ) {
+ T_eq_u32( priority[ 2 ], PRIO_INVALID );
+ }
+
+ T_eq_u32( priority[ 3 ], PRIO_INVALID );
+ } else {
+ if ( ctx->other_before || ctx->other_after ) {
+ T_eq_u32( priority[ 1 ], PRIO_ULTRA_LOW );
+ } else {
+ T_eq_u32( priority[ 1 ], PRIO_INVALID );
+ }
+
+ T_eq_u32( priority[ 2 ], PRIO_INVALID );
+ T_eq_u32( priority[ 3 ], PRIO_INVALID );
+ }
+}
+
+static void GetPriorities(
+ const Context *ctx,
+ TQWorkerKind worker,
+ rtems_task_priority *priority
+)
+{
+ priority[ 0 ] = GetPriorityByScheduler(
+ ctx->tq_ctx->worker_id[ worker ],
+ SCHEDULER_A_ID
+ );
+ priority[ 1 ] = GetPriorityByScheduler(
+ ctx->tq_ctx->worker_id[ worker ],
+ SCHEDULER_B_ID
+ );
+ priority[ 2 ] = GetPriorityByScheduler(
+ ctx->tq_ctx->worker_id[ worker ],
+ SCHEDULER_C_ID
+ );
+ priority[ 3 ] = GetPriorityByScheduler(
+ ctx->tq_ctx->worker_id[ worker ],
+ SCHEDULER_D_ID
+ );
+}
+
+static void AddVitalPriority( Context *ctx, rtems_id scheduler_id )
+{
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_D, scheduler_id, PRIO_NORMAL );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+}
+
+static void AddVitalPriorityHelper( Context *ctx, rtems_id scheduler_id )
+{
+ TQSetScheduler( ctx->tq_ctx, TQ_HELPER_B, scheduler_id, PRIO_LOW );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_HELPER_B,
+ TQ_EVENT_MUTEX_B_OBTAIN | TQ_EVENT_MUTEX_B_RELEASE
+ );
+}
+
+static void AddVitalScheduler( Context *ctx, rtems_id scheduler_id )
+{
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_E, scheduler_id, PRIO_VERY_LOW );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_E,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_One: {
+ /*
+ * Where the system has exactly one schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() != 1 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_Two: {
+ /*
+ * Where the system has exactly two schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() != 2 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_Three: {
+ /*
+ * Where the system has exactly three schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() != 3 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_More: {
+ /*
+ * Where the system has at least three schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() < 4 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_None: {
+ /*
+ * While all priority queues of the thread queue associated with eligible
+ * schedulers of the enqueueing thread are empty.
+ */
+ ctx->queue_priority = PRIO_INVALID;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_High: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is higher than the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->queue_priority = PRIO_ULTRA_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_Equal: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is equal to the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->queue_priority = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_Low: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is lower than the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->queue_priority = PRIO_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_None: {
+ /*
+ * While each priority queue of the thread queue is associated with an
+ * eligible scheduler of the enqueueing thread.
+ */
+ ctx->other_before = false;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_Only: {
+ /*
+ * While exactly one priority queue of the thread queue exists which is
+ * not associated with an eligible scheduler of the enqueueing thread.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->other_before = true;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_Before: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * the priority queue is positioned before all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->other_before = true;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_After: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * the priority queue is positioned after all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->other_before = false;
+ ctx->other_after = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Vital: {
+ /*
+ * While at least one priority of the enqueueing thread is higher than
+ * the highest priority of the owner of the thread queue.
+ */
+ ctx->vital_priority = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Dispensable: {
+ /*
+ * While all priorities of the enqueueing thread are lower than or equal
+ * to the highest priority of the owner of the thread queue.
+ */
+ ctx->vital_priority = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Vital: {
+ /*
+ * While at least one eligible scheduler of the enqueueing thread is not
+ * an eligible scheduler of the owner of the thread queue.
+ */
+ ctx->vital_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Dispensable: {
+ /*
+ * While all eligible schedulers of the enqueueing thread are an eligible
+ * scheduler of the owner of the thread queue.
+ */
+ ctx->vital_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NotEnqueued: {
+ /*
+ * While the owner of the thread queue is not enqueued on a thread queue.
+ */
+ ctx->owner_obtain = 0;
+ ctx->owner_release = 0;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_FIFO: {
+ /*
+ * While the owner of the thread queue is enqueued on a thread queue in
+ * FIFO order.
+ */
+ ctx->owner_obtain = TQ_EVENT_MUTEX_FIFO_OBTAIN;
+ ctx->owner_release = TQ_EVENT_MUTEX_FIFO_RELEASE;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_Priority: {
+ /*
+ * While the owner of the thread queue is enqueued on a thread queue in
+ * priority order.
+ */
+ ctx->owner_obtain = TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN;
+ ctx->owner_release = TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_PriorityInherit: {
+ /*
+ * While the owner of the thread queue is enqueued on a thread queue in
+ * priority order with priority inheritance.
+ */
+ ctx->owner_obtain = TQ_EVENT_MUTEX_C_OBTAIN;
+ ctx->owner_release = TQ_EVENT_MUTEX_C_RELEASE;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Post_Position_Check(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_HELPER_A ) );
+
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the first priority queue of
+ * the thread queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the last priority queue of
+ * the thread queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_First: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_Second: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_Position_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Check(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise: {
+ /*
+ * Each priority of the enqueueing thread which is higher than the
+ * highest priority of the owner of the thread queue shall be made the
+ * highest priority of the owner.
+ */
+ CheckPriorityRaise( ctx, ctx->owner_priority );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop: {
+ /*
+ * The priorities of the owner of the thread queue shall not change.
+ */
+ CheckPriorityNop( ctx, ctx->owner_priority );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Check(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper: {
+ /*
+ * Each eligible scheduler of the enqueueing thread which is not an
+ * eligible scheduler of the owner of the thread queue shall be made a
+ * helping scheduler of the owner with the priority of the enqueueing
+ * thread.
+ */
+ CheckSchedulerNewHelper( ctx, ctx->owner_priority );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop: {
+ /*
+ * The set of eligible schedulers of the owner of the thread queue shall
+ * not change.
+ */
+ CheckSchedulerNop( ctx, ctx->owner_priority );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Check(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise: {
+ /*
+ * Each priority of the enqueueing thread which is higher than the
+ * highest priority of the owner of the thread queue on which the owner
+ * of the thread queue is enqueued shall be made the highest priority of
+ * the owner.
+ */
+ CheckPriorityRaise( ctx, ctx->owner_owner_priority );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop: {
+ /*
+ * The priorities of the owner of the thread queue on which the owner of
+ * the thread queue is enqueued shall not change.
+ */
+ if ( ctx->owner_obtain == TQ_EVENT_MUTEX_C_OBTAIN ) {
+ CheckPriorityNop( ctx, ctx->owner_owner_priority );
+ } else {
+ if ( ctx->vital_priority ) {
+ T_eq_u32( ctx->owner_owner_priority[ 0 ], PRIO_HIGH );
+ } else {
+ T_eq_u32( ctx->owner_owner_priority[ 0 ], PRIO_VERY_HIGH );
+ }
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Check(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper: {
+ /*
+ * Each eligible scheduler of the enqueueing thread which is not an
+ * eligible scheduler of the owner of the thread queue on which the owner
+ * of the thread queue is enqueued shall be made a helping scheduler of
+ * the owner with the priority of the enqueueing thread.
+ */
+ CheckSchedulerNewHelper( ctx, ctx->owner_owner_priority );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop: {
+ /*
+ * The set of eligible schedulers of the owner of the thread queue on
+ * which the owner of the thread queue is enqueued shall not change.
+ */
+ if ( ctx->owner_obtain == TQ_EVENT_MUTEX_C_OBTAIN ) {
+ CheckSchedulerNop( ctx, ctx->owner_owner_priority );
+ } else {
+ T_eq_u32( ctx->owner_owner_priority[ 1 ], PRIO_INVALID );
+ T_eq_u32( ctx->owner_owner_priority[ 2 ], PRIO_INVALID );
+ T_eq_u32( ctx->owner_owner_priority[ 3 ], PRIO_INVALID );
+ }
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Setup(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_C, PRIO_HIGH );
+
+ #if defined( RTEMS_SMP )
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ SCHEDULER_B_ID,
+ PRIO_ULTRA_LOW
+ );
+
+ if ( rtems_scheduler_get_processor_maximum() > 3 ) {
+ RemoveProcessor( SCHEDULER_C_ID, 3 );
+ AddProcessor( SCHEDULER_D_ID, 3 );
+ }
+ #endif
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Setup_Wrap( void *arg )
+{
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueuePriorityInherit_Setup( ctx );
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Teardown(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+
+ #if defined( RTEMS_SMP )
+ if ( rtems_scheduler_get_processor_maximum() > 3 ) {
+ RemoveProcessor( SCHEDULER_D_ID, 3 );
+ AddProcessor( SCHEDULER_C_ID, 3 );
+ }
+ #endif
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueuePriorityInherit_Teardown( ctx );
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Prepare(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx
+)
+{
+ ctx->tq_ctx->how_many = 1;
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_Action(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx
+)
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_A_OBTAIN );
+ TQSend(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_MUTEX_B_OBTAIN | TQ_EVENT_ENQUEUE_PREPARE
+ );
+
+ if ( ctx->owner_obtain != 0 ) {
+ TQSend( ctx->tq_ctx, TQ_HELPER_C, ctx->owner_obtain );
+ }
+
+ if ( ctx->other_before ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC_2
+ );
+ }
+
+ if ( ctx->queue_priority != PRIO_INVALID ) {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B , ctx->queue_priority );
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+ }
+
+ if ( ctx->other_after ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC_2
+ );
+ }
+
+ if ( ctx->vital_priority ) {
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_C, PRIO_HIGH );
+
+ if (
+ ctx->queue_priority == PRIO_VERY_HIGH ||
+ ctx->queue_priority == PRIO_ULTRA_HIGH
+ ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ AddVitalPriority( ctx, SCHEDULER_C_ID );
+ AddVitalPriorityHelper( ctx, SCHEDULER_C_ID );
+
+ if ( ctx->vital_scheduler ) {
+ AddVitalScheduler( ctx, SCHEDULER_D_ID );
+ }
+ } else {
+ AddVitalPriority( ctx, SCHEDULER_B_ID );
+ AddVitalPriorityHelper( ctx, SCHEDULER_B_ID );
+
+ if ( ctx->vital_scheduler ) {
+ AddVitalScheduler( ctx, SCHEDULER_C_ID );
+ }
+ }
+ } else {
+ if ( ctx->vital_scheduler ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ AddVitalScheduler( ctx, SCHEDULER_C_ID );
+ } else {
+ AddVitalScheduler( ctx, SCHEDULER_B_ID );
+ }
+ }
+ }
+ } else {
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_C, PRIO_VERY_HIGH );
+
+ if ( ctx->vital_scheduler ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ AddVitalScheduler( ctx, SCHEDULER_C_ID );
+ } else {
+ AddVitalScheduler( ctx, SCHEDULER_B_ID );
+ }
+ }
+ }
+
+ if ( ctx->owner_obtain != 0 ) {
+ TQClearDone( ctx->tq_ctx, TQ_HELPER_A );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ ctx->owner_obtain | ctx->owner_release
+ );
+ }
+
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+ );
+
+ GetPriorities( ctx, TQ_HELPER_A, ctx->owner_priority );
+ GetPriorities( ctx, TQ_HELPER_C, ctx->owner_owner_priority );
+
+ if ( ctx->owner_obtain != 0 ) {
+ TQSend( ctx->tq_ctx, TQ_HELPER_C, ctx->owner_release );
+ TQWaitForDone( ctx->tq_ctx, TQ_HELPER_A );
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_HELPER_A );
+ }
+
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_ENQUEUE_DONE );
+
+ if ( ctx->other_before || ctx->other_after ) {
+ TQSynchronizeRunner2();
+ } else {
+ TQSynchronizeRunner();
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_A_RELEASE );
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
+
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_MUTEX_B_RELEASE );
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_B );
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_B );
+}
+
+static const ScoreTqReqEnqueuePriorityInherit_Entry
+ScoreTqReqEnqueuePriorityInherit_Entries[] = {
+ { 1, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA },
+ { 1, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA },
+ { 1, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA },
+ { 1, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 1, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper },
+ { 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper }
+};
+
+static const uint8_t
+ScoreTqReqEnqueuePriorityInherit_Map[] = {
+ 0, 0, 0, 0, 12, 12, 12, 39, 0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 0, 0, 0,
+ 0, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 5, 5, 5, 5, 0, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 13, 13, 40, 0, 0, 0,
+ 0, 11, 11, 11, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 22, 22, 22, 46, 12, 12, 12, 39, 23, 23, 23, 47, 10, 10, 10, 10, 3,
+ 3, 3, 3, 24, 24, 24, 48, 3, 3, 3, 3, 14, 14, 14, 14, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 8, 8, 8, 29, 9, 9, 9, 30, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 3, 3, 3, 3, 6, 6, 6, 6, 3, 3, 3, 3, 5,
+ 5, 5, 5, 3, 3, 3, 3, 7, 7, 7, 7, 3, 3, 3, 3, 8, 8, 8, 29, 9, 9, 9, 30, 4, 4,
+ 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5,
+ 3, 3, 3, 3, 6, 6, 6, 6, 3, 3, 3, 3, 5, 5, 5, 5, 3, 3, 3, 3, 7, 7, 7, 7, 25,
+ 25, 25, 49, 13, 13, 13, 40, 26, 26, 26, 50, 11, 11, 11, 11, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 27, 27, 27, 51, 3, 3, 3, 3, 15, 15,
+ 15, 15, 3, 3, 3, 3, 28, 28, 28, 52, 3, 3, 3, 3, 16, 16, 16, 16, 22, 22, 22,
+ 46, 12, 12, 12, 39, 23, 23, 23, 47, 10, 10, 10, 10, 31, 31, 31, 53, 24, 24,
+ 24, 48, 32, 32, 32, 54, 14, 14, 14, 14, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 17, 17, 17, 41,
+ 8, 8, 8, 29, 9, 9, 9, 30, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 3, 3, 3, 3, 18, 18, 18, 42, 19, 19, 19, 43, 6, 6, 6, 6, 3, 3, 3, 3,
+ 20, 20, 20, 44, 21, 21, 21, 45, 7, 7, 7, 7, 17, 17, 17, 41, 8, 8, 8, 29, 9,
+ 9, 9, 30, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3,
+ 3, 3, 18, 18, 18, 42, 19, 19, 19, 43, 6, 6, 6, 6, 3, 3, 3, 3, 20, 20, 20, 44,
+ 21, 21, 21, 45, 7, 7, 7, 7, 25, 25, 25, 49, 13, 13, 13, 40, 26, 26, 26, 50,
+ 11, 11, 11, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 33, 33, 33,
+ 55, 27, 27, 27, 51, 34, 34, 34, 56, 15, 15, 15, 15, 35, 35, 35, 57, 28, 28,
+ 28, 52, 36, 36, 36, 58, 16, 16, 16, 16, 22, 22, 22, 46, 12, 12, 12, 39, 23,
+ 23, 23, 47, 10, 10, 10, 10, 31, 31, 31, 53, 24, 24, 24, 48, 32, 32, 32, 54,
+ 14, 14, 14, 14, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 17, 17, 17, 41, 8, 8, 8, 29, 9, 9, 9, 30,
+ 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 37, 37, 37, 59,
+ 18, 18, 18, 42, 19, 19, 19, 43, 6, 6, 6, 6, 38, 38, 38, 60, 20, 20, 20, 44,
+ 21, 21, 21, 45, 7, 7, 7, 7, 17, 17, 17, 41, 8, 8, 8, 29, 9, 9, 9, 30, 4, 4,
+ 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 37, 37, 37, 59, 18, 18,
+ 18, 42, 19, 19, 19, 43, 6, 6, 6, 6, 38, 38, 38, 60, 20, 20, 20, 44, 21, 21,
+ 21, 45, 7, 7, 7, 7, 25, 25, 25, 49, 13, 13, 13, 40, 26, 26, 26, 50, 11, 11,
+ 11, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 33, 33, 33, 55, 27,
+ 27, 27, 51, 34, 34, 34, 56, 15, 15, 15, 15, 35, 35, 35, 57, 28, 28, 28, 52,
+ 36, 36, 36, 58, 16, 16, 16, 16
+};
+
+static size_t ScoreTqReqEnqueuePriorityInherit_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqEnqueuePriorityInherit_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqEnqueuePriorityInherit_Fixture = {
+ .setup = ScoreTqReqEnqueuePriorityInherit_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqEnqueuePriorityInherit_Teardown_Wrap,
+ .scope = ScoreTqReqEnqueuePriorityInherit_Scope,
+ .initial_context = &ScoreTqReqEnqueuePriorityInherit_Instance
+};
+
+static const uint16_t ScoreTqReqEnqueuePriorityInherit_Weights[] = {
+ 256, 64, 16, 8, 4, 1
+};
+
+static void ScoreTqReqEnqueuePriorityInherit_Skip(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pcs[ 2 ] = ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pcs[ 3 ] = ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pcs[ 4 ] = ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pcs[ 5 ] = ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreTqReqEnqueuePriorityInherit_Entry
+ScoreTqReqEnqueuePriorityInherit_PopEntry(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 6; ++i ) {
+ index += ScoreTqReqEnqueuePriorityInherit_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreTqReqEnqueuePriorityInherit_Entries[
+ ScoreTqReqEnqueuePriorityInherit_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqEnqueuePriorityInherit_TestVariant(
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx
+)
+{
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqEnqueuePriorityInherit_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Prepare(
+ ctx,
+ ctx->Map.pcs[ 3 ]
+ );
+ ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Prepare(
+ ctx,
+ ctx->Map.pcs[ 4 ]
+ );
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_Prepare(
+ ctx,
+ ctx->Map.pcs[ 5 ]
+ );
+ ScoreTqReqEnqueuePriorityInherit_Action( ctx );
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_Check(
+ ctx,
+ ctx->Map.entry.Post_Position
+ );
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerPriority
+ );
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerScheduler
+ );
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerOwnerPriority
+ );
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerOwnerScheduler
+ );
+}
+
+static T_fixture_node ScoreTqReqEnqueuePriorityInherit_Node;
+
+void ScoreTqReqEnqueuePriorityInherit_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqEnqueuePriorityInherit_Context *ctx;
+
+ ctx = &ScoreTqReqEnqueuePriorityInherit_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqEnqueuePriorityInherit_Node,
+ &ScoreTqReqEnqueuePriorityInherit_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_One;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_None;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_None;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Vital;
+ ctx->Map.pcs[ 3 ] < ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 4 ] = ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Vital;
+ ctx->Map.pcs[ 4 ] < ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_NA;
+ ++ctx->Map.pcs[ 4 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 5 ] = ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NotEnqueued;
+ ctx->Map.pcs[ 5 ] < ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NA;
+ ++ctx->Map.pcs[ 5 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqEnqueuePriorityInherit_PopEntry(
+ ctx
+ );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqEnqueuePriorityInherit_Prepare( ctx );
+ ScoreTqReqEnqueuePriorityInherit_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-priority-inherit.h b/testsuites/validation/tr-tq-enqueue-priority-inherit.h
new file mode 100644
index 0000000000..526766a981
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-priority-inherit.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueuePriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_ENQUEUE_PRIORITY_INHERIT_H
+#define _TR_TQ_ENQUEUE_PRIORITY_INHERIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqEnqueuePriorityInherit
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_One,
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_Two,
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_Three,
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_More,
+ ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler_NA
+} ScoreTqReqEnqueuePriorityInherit_Pre_Scheduler;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_None,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_High,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_Equal,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_Low,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible_NA
+} ScoreTqReqEnqueuePriorityInherit_Pre_QueueEligible;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_None,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_Only,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_Before,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_After,
+ ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible_NA
+} ScoreTqReqEnqueuePriorityInherit_Pre_QueueIneligible;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Vital,
+ ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_Dispensable,
+ ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner_NA
+} ScoreTqReqEnqueuePriorityInherit_Pre_PriorityForOwner;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Vital,
+ ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_Dispensable,
+ ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner_NA
+} ScoreTqReqEnqueuePriorityInherit_Pre_SchedulerForOwner;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NotEnqueued,
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_FIFO,
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_Priority,
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_PriorityInherit,
+ ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState_NA
+} ScoreTqReqEnqueuePriorityInherit_Pre_OwnerState;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_First,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_Second,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriorityInherit_Post_Position_NA
+} ScoreTqReqEnqueuePriorityInherit_Post_Position;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority_NA
+} ScoreTqReqEnqueuePriorityInherit_Post_OwnerPriority;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler_NA
+} ScoreTqReqEnqueuePriorityInherit_Post_OwnerScheduler;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Raise,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority_NA
+} ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerPriority;
+
+typedef enum {
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NewHelper,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_Nop,
+ ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler_NA
+} ScoreTqReqEnqueuePriorityInherit_Post_OwnerOwnerScheduler;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreTqReqEnqueuePriorityInherit_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_ENQUEUE_PRIORITY_INHERIT_H */
diff --git a/testsuites/validation/tr-tq-enqueue-priority.c b/testsuites/validation/tr-tq-enqueue-priority.c
new file mode 100644
index 0000000000..6487c13aa2
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-priority.c
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueuePriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-enqueue-priority.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqEnqueuePriority \
+ * spec:/score/tq/req/enqueue-priority
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_EligibleScheduler_NA : 1;
+ uint8_t Pre_QueueEligible_NA : 1;
+ uint8_t Pre_QueueIneligible_NA : 1;
+ uint8_t Post_Position : 4;
+} ScoreTqReqEnqueuePriority_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/enqueue-priority test case.
+ */
+typedef struct {
+ /**
+ * @brief This this member is true, then the enqueueing thread shall have at
+ * least one helping scheduler.
+ */
+ bool helping;
+
+ /**
+ * @brief This member specifies the priority of a thread with an eligible
+ * scheduler equal to an eligible scheduler of the enqueueing thread.
+ */
+ rtems_task_priority priority;;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued before a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_before;;
+
+ /**
+ * @brief If this member is true, then a thread those eligible schedulers are
+ * ineligible scheduler to the enqueueing task should be enqueued after a
+ * thread with an eligible scheduler equal to an eligible scheduler of the
+ * enqueueing thread.
+ */
+ size_t other_after;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqEnqueuePriority_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqEnqueuePriority_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqEnqueuePriority_Context;
+
+static ScoreTqReqEnqueuePriority_Context
+ ScoreTqReqEnqueuePriority_Instance;
+
+static const char * const ScoreTqReqEnqueuePriority_PreDesc_EligibleScheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriority_PreDesc_QueueEligible[] = {
+ "None",
+ "High",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const ScoreTqReqEnqueuePriority_PreDesc_QueueIneligible[] = {
+ "None",
+ "Only",
+ "Before",
+ "After",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqEnqueuePriority_PreDesc[] = {
+ ScoreTqReqEnqueuePriority_PreDesc_EligibleScheduler,
+ ScoreTqReqEnqueuePriority_PreDesc_QueueEligible,
+ ScoreTqReqEnqueuePriority_PreDesc_QueueIneligible,
+ NULL
+};
+
+typedef ScoreTqReqEnqueuePriority_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ const rtems_tcb *thread;
+
+ do {
+ thread = TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+ } while ( thread == ctx->tq_ctx->runner_tcb );
+
+ return thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void AddHelper( TQContext *tq_ctx, rtems_id scheduler_id )
+{
+ TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_OBTAIN );
+ TQSetScheduler( tq_ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
+ TQSendAndWaitForExecutionStop(
+ tq_ctx,
+ TQ_BLOCKER_E,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+}
+
+static void RemoveHelper( TQContext *tq_ctx )
+{
+ TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_RELEASE );
+ TQMutexObtain( tq_ctx, TQ_MUTEX_A );
+ TQMutexRelease( tq_ctx, TQ_MUTEX_A );
+}
+
+static void ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Prepare(
+ ScoreTqReqEnqueuePriority_Context *ctx,
+ ScoreTqReqEnqueuePriority_Pre_EligibleScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Home: {
+ /*
+ * While the enqueueing thread has no helping scheduler.
+ */
+ ctx->helping = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Helping: {
+ /*
+ * While the enqueueing thread has at least one helping scheduler.
+ */
+ ctx->helping = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriority_Pre_QueueEligible_Prepare(
+ ScoreTqReqEnqueuePriority_Context *ctx,
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriority_Pre_QueueEligible_None: {
+ /*
+ * While all priority queues of the thread queue associated with eligible
+ * schedulers of the enqueueing thread are empty.
+ */
+ ctx->priority = PRIO_PSEUDO_ISR;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueEligible_High: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is higher than the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->priority = PRIO_ULTRA_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueEligible_Equal: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is equal to the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->priority = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueEligible_Low: {
+ /*
+ * While a priority queue of the thread queue associated with an eligible
+ * scheduler of the enqueueing thread is non-empty, while the highest
+ * priority of the priority queue is lower than the priority of the
+ * enqueueing thread with respect to the eligible scheduler.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->priority = PRIO_HIGH;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueEligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriority_Pre_QueueIneligible_Prepare(
+ ScoreTqReqEnqueuePriority_Context *ctx,
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriority_Pre_QueueIneligible_None: {
+ /*
+ * While no priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread.
+ */
+ ctx->other_before = false;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueIneligible_Only: {
+ /*
+ * While exactly one priority queue of the thread queue exists which is
+ * not associated with an eligible scheduler of the enqueueing thread.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->other_before = true;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueIneligible_Before: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * this priority queue is positioned before all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->other_before = true;
+ ctx->other_after = false;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueIneligible_After: {
+ /*
+ * While a priority queue of the thread queue exists which is not
+ * associated with an eligible scheduler of the enqueueing thread, while
+ * this priority queue is positioned after all priority queues which are
+ * associated with eligible schedulers of the enqueueing thread.
+ */
+ ++ctx->tq_ctx->how_many;
+ ctx->other_before = false;
+ ctx->other_after = true;
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Pre_QueueIneligible_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriority_Post_Position_Check(
+ ScoreTqReqEnqueuePriority_Context *ctx,
+ ScoreTqReqEnqueuePriority_Post_Position state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ /* Event receives */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+
+ switch ( state ) {
+ case ScoreTqReqEnqueuePriority_Post_Position_InitialFirst: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the first priority queue of
+ * the thread queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_InitialLast: {
+ /*
+ * A priority queue associated with the scheduler which contains exactly
+ * the enqueueing thread shall be created as the last priority queue of
+ * the thread queue.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_First: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_Second: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_FirstFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_SecondFirst: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_FirstLast: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_SecondLast: {
+ /*
+ * The enqueueing thread shall be enqueued in the priority queue
+ * associated with the scheduler.
+ *
+ * The position of the priority queue in the thread queue shall not
+ * change.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqEnqueuePriority_Post_Position_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqEnqueuePriority_Setup(
+ ScoreTqReqEnqueuePriority_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_LOW );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+
+ #if defined( RTEMS_SMP )
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_D, SCHEDULER_B_ID, PRIO_LOW );
+ #endif
+}
+
+static void ScoreTqReqEnqueuePriority_Setup_Wrap( void *arg )
+{
+ ScoreTqReqEnqueuePriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueuePriority_Setup( ctx );
+}
+
+static void ScoreTqReqEnqueuePriority_Teardown(
+ ScoreTqReqEnqueuePriority_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqEnqueuePriority_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqEnqueuePriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqEnqueuePriority_Teardown( ctx );
+}
+
+static void ScoreTqReqEnqueuePriority_Prepare(
+ ScoreTqReqEnqueuePriority_Context *ctx
+)
+{
+ ctx->tq_ctx->how_many = 1;
+}
+
+static void ScoreTqReqEnqueuePriority_Action(
+ ScoreTqReqEnqueuePriority_Context *ctx
+)
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->other_before ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC_2
+ );
+ }
+
+ if ( ctx->priority != PRIO_PSEUDO_ISR ) {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B , ctx->priority );
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+ );
+ }
+
+ if ( ctx->other_after ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC_2
+ );
+ }
+
+ if ( ctx->helping ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ AddHelper( ctx->tq_ctx, SCHEDULER_C_ID );
+ }
+ } else {
+ AddHelper( ctx->tq_ctx, SCHEDULER_B_ID );
+ }
+ }
+
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+ );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+
+ if ( ctx->other_before || ctx->other_after ) {
+ TQSynchronizeRunner2();
+ } else {
+ TQSynchronizeRunner();
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+
+ if ( ctx->helping ) {
+ if ( ctx->other_before || ctx->other_after ) {
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ RemoveHelper( ctx->tq_ctx );
+ }
+ } else {
+ RemoveHelper( ctx->tq_ctx );
+ }
+ }
+}
+
+static const ScoreTqReqEnqueuePriority_Entry
+ScoreTqReqEnqueuePriority_Entries[] = {
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_SecondLast },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_SecondFirst },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_InitialLast },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_Second },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_FirstLast },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_FirstFirst },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_Second },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_InitialFirst },
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_First },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_InitialFirst },
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_First }
+#else
+ { 1, 0, 0, 0, ScoreTqReqEnqueuePriority_Post_Position_NA }
+#endif
+};
+
+static const uint8_t
+ScoreTqReqEnqueuePriority_Map[] = {
+ 8, 3, 0, 0, 4, 0, 1, 2, 4, 0, 1, 2, 9, 0, 5, 6, 10, 3, 0, 0, 7, 0, 1, 2, 7,
+ 0, 1, 2, 11, 0, 5, 6
+};
+
+static size_t ScoreTqReqEnqueuePriority_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqEnqueuePriority_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqEnqueuePriority_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqEnqueuePriority_Fixture = {
+ .setup = ScoreTqReqEnqueuePriority_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqEnqueuePriority_Teardown_Wrap,
+ .scope = ScoreTqReqEnqueuePriority_Scope,
+ .initial_context = &ScoreTqReqEnqueuePriority_Instance
+};
+
+static inline ScoreTqReqEnqueuePriority_Entry
+ScoreTqReqEnqueuePriority_PopEntry( ScoreTqReqEnqueuePriority_Context *ctx )
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqEnqueuePriority_Entries[
+ ScoreTqReqEnqueuePriority_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqEnqueuePriority_TestVariant(
+ ScoreTqReqEnqueuePriority_Context *ctx
+)
+{
+ ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ ScoreTqReqEnqueuePriority_Action( ctx );
+ ScoreTqReqEnqueuePriority_Post_Position_Check(
+ ctx,
+ ctx->Map.entry.Post_Position
+ );
+}
+
+static T_fixture_node ScoreTqReqEnqueuePriority_Node;
+
+void ScoreTqReqEnqueuePriority_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqEnqueuePriority_Context *ctx;
+
+ ctx = &ScoreTqReqEnqueuePriority_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqEnqueuePriority_Node,
+ &ScoreTqReqEnqueuePriority_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Home;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqEnqueuePriority_Pre_QueueEligible_None;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqEnqueuePriority_Pre_QueueEligible_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqEnqueuePriority_Pre_QueueIneligible_None;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqEnqueuePriority_Pre_QueueIneligible_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqEnqueuePriority_PopEntry( ctx );
+ ScoreTqReqEnqueuePriority_Prepare( ctx );
+ ScoreTqReqEnqueuePriority_TestVariant( ctx );
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-enqueue-priority.h b/testsuites/validation/tr-tq-enqueue-priority.h
new file mode 100644
index 0000000000..97f5f883de
--- /dev/null
+++ b/testsuites/validation/tr-tq-enqueue-priority.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqEnqueuePriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_ENQUEUE_PRIORITY_H
+#define _TR_TQ_ENQUEUE_PRIORITY_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqEnqueuePriority
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Home,
+ ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_Helping,
+ ScoreTqReqEnqueuePriority_Pre_EligibleScheduler_NA
+} ScoreTqReqEnqueuePriority_Pre_EligibleScheduler;
+
+typedef enum {
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible_None,
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible_High,
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible_Equal,
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible_Low,
+ ScoreTqReqEnqueuePriority_Pre_QueueEligible_NA
+} ScoreTqReqEnqueuePriority_Pre_QueueEligible;
+
+typedef enum {
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible_None,
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible_Only,
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible_Before,
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible_After,
+ ScoreTqReqEnqueuePriority_Pre_QueueIneligible_NA
+} ScoreTqReqEnqueuePriority_Pre_QueueIneligible;
+
+typedef enum {
+ ScoreTqReqEnqueuePriority_Post_Position_InitialFirst,
+ ScoreTqReqEnqueuePriority_Post_Position_InitialLast,
+ ScoreTqReqEnqueuePriority_Post_Position_First,
+ ScoreTqReqEnqueuePriority_Post_Position_Second,
+ ScoreTqReqEnqueuePriority_Post_Position_FirstFirst,
+ ScoreTqReqEnqueuePriority_Post_Position_SecondFirst,
+ ScoreTqReqEnqueuePriority_Post_Position_FirstLast,
+ ScoreTqReqEnqueuePriority_Post_Position_SecondLast,
+ ScoreTqReqEnqueuePriority_Post_Position_NA
+} ScoreTqReqEnqueuePriority_Post_Position;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreTqReqEnqueuePriority_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_ENQUEUE_PRIORITY_H */
diff --git a/testsuites/validation/tr-tq-flush-fifo.c b/testsuites/validation/tr-tq-flush-fifo.c
new file mode 100644
index 0000000000..9b6821ac27
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-fifo.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushFifo
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-flush-fifo.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqFlushFifo spec:/score/tq/req/flush-fifo
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Queue_NA : 1;
+ uint8_t Post_Operation : 2;
+} ScoreTqReqFlushFifo_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/flush-fifo test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushFifo_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 1 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqFlushFifo_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqFlushFifo_Context;
+
+static ScoreTqReqFlushFifo_Context
+ ScoreTqReqFlushFifo_Instance;
+
+static const char * const ScoreTqReqFlushFifo_PreDesc_Queue[] = {
+ "Empty",
+ "NonEmpty",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqFlushFifo_PreDesc[] = {
+ ScoreTqReqFlushFifo_PreDesc_Queue,
+ NULL
+};
+
+typedef ScoreTqReqFlushFifo_Context Context;
+
+static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Flush( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+}
+
+static void SchedulerEvent(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+}
+
+static void ScoreTqReqFlushFifo_Pre_Queue_Prepare(
+ ScoreTqReqFlushFifo_Context *ctx,
+ ScoreTqReqFlushFifo_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushFifo_Pre_Queue_Empty: {
+ /*
+ * While the thread queue is empty.
+ */
+ ctx->tq_ctx->how_many = 0;
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_Queue_NonEmpty: {
+ /*
+ * While the thread queue has at least one enqueued thread.
+ */
+ ctx->tq_ctx->how_many = 3;
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushFifo_Post_Operation_Check(
+ ScoreTqReqFlushFifo_Context *ctx,
+ ScoreTqReqFlushFifo_Post_Operation state
+)
+{
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqFlushFifo_Post_Operation_Nop: {
+ /*
+ * No operation shall be performed.
+ */
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Post_Operation_TryExtract: {
+ /*
+ * The enqueued threads of the thread queue may be extracted in FIFO
+ * order.
+ */
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_D ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Post_Operation_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushFifo_Setup( ScoreTqReqFlushFifo_Context *ctx )
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_HIGH );
+}
+
+static void ScoreTqReqFlushFifo_Setup_Wrap( void *arg )
+{
+ ScoreTqReqFlushFifo_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushFifo_Setup( ctx );
+}
+
+static void ScoreTqReqFlushFifo_Teardown( ScoreTqReqFlushFifo_Context *ctx )
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqFlushFifo_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqFlushFifo_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushFifo_Teardown( ctx );
+}
+
+static void ScoreTqReqFlushFifo_Action( ScoreTqReqFlushFifo_Context *ctx )
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+}
+
+static const ScoreTqReqFlushFifo_Entry
+ScoreTqReqFlushFifo_Entries[] = {
+ { 0, 0, ScoreTqReqFlushFifo_Post_Operation_Nop },
+ { 0, 0, ScoreTqReqFlushFifo_Post_Operation_TryExtract }
+};
+
+static const uint8_t
+ScoreTqReqFlushFifo_Map[] = {
+ 0, 1
+};
+
+static size_t ScoreTqReqFlushFifo_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqFlushFifo_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqFlushFifo_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqFlushFifo_Fixture = {
+ .setup = ScoreTqReqFlushFifo_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqFlushFifo_Teardown_Wrap,
+ .scope = ScoreTqReqFlushFifo_Scope,
+ .initial_context = &ScoreTqReqFlushFifo_Instance
+};
+
+static inline ScoreTqReqFlushFifo_Entry ScoreTqReqFlushFifo_PopEntry(
+ ScoreTqReqFlushFifo_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqFlushFifo_Entries[
+ ScoreTqReqFlushFifo_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqFlushFifo_TestVariant( ScoreTqReqFlushFifo_Context *ctx )
+{
+ ScoreTqReqFlushFifo_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqFlushFifo_Action( ctx );
+ ScoreTqReqFlushFifo_Post_Operation_Check(
+ ctx,
+ ctx->Map.entry.Post_Operation
+ );
+}
+
+static T_fixture_node ScoreTqReqFlushFifo_Node;
+
+void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqFlushFifo_Context *ctx;
+
+ ctx = &ScoreTqReqFlushFifo_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqFlushFifo_Node,
+ &ScoreTqReqFlushFifo_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqFlushFifo_Pre_Queue_Empty;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqFlushFifo_Pre_Queue_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqFlushFifo_PopEntry( ctx );
+ ScoreTqReqFlushFifo_TestVariant( ctx );
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-flush-fifo.h b/testsuites/validation/tr-tq-flush-fifo.h
new file mode 100644
index 0000000000..dca1444891
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-fifo.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushFifo
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_FLUSH_FIFO_H
+#define _TR_TQ_FLUSH_FIFO_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqFlushFifo
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqFlushFifo_Pre_Queue_Empty,
+ ScoreTqReqFlushFifo_Pre_Queue_NonEmpty,
+ ScoreTqReqFlushFifo_Pre_Queue_NA
+} ScoreTqReqFlushFifo_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqFlushFifo_Post_Operation_Nop,
+ ScoreTqReqFlushFifo_Post_Operation_TryExtract,
+ ScoreTqReqFlushFifo_Post_Operation_NA
+} ScoreTqReqFlushFifo_Post_Operation;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_FLUSH_FIFO_H */
diff --git a/testsuites/validation/tr-tq-flush-priority-inherit.c b/testsuites/validation/tr-tq-flush-priority-inherit.c
new file mode 100644
index 0000000000..beb48ebbe7
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority-inherit.c
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-flush-priority-inherit.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqFlushPriorityInherit \
+ * spec:/score/tq/req/flush-priority-inherit
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Priority_NA : 1;
+ uint8_t Pre_Queue_NA : 1;
+ uint8_t Post_Extract : 2;
+ uint8_t Post_PriorityUpdate : 2;
+} ScoreTqReqFlushPriorityInherit_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/flush-priority-inherit test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief If this member is true, then a minimum priority of the owner of the
+ * thread queue shall be inherited from a thread enqueued on the thread
+ * queue.
+ */
+ bool minimum;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushPriorityInherit_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 2 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqFlushPriorityInherit_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqFlushPriorityInherit_Context;
+
+static ScoreTqReqFlushPriorityInherit_Context
+ ScoreTqReqFlushPriorityInherit_Instance;
+
+static const char * const ScoreTqReqFlushPriorityInherit_PreDesc_Priority[] = {
+ "Minimum",
+ "NotMinimum",
+ "NA"
+};
+
+static const char * const ScoreTqReqFlushPriorityInherit_PreDesc_Queue[] = {
+ "Empty",
+ "NonEmpty",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqFlushPriorityInherit_PreDesc[] = {
+ ScoreTqReqFlushPriorityInherit_PreDesc_Priority,
+ ScoreTqReqFlushPriorityInherit_PreDesc_Queue,
+ NULL
+};
+
+typedef ScoreTqReqFlushPriorityInherit_Context Context;
+
+static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+}
+
+static const T_scheduler_event *GetPriorityUpdate( Context *ctx, size_t *index )
+{
+ return T_scheduler_next(
+ &ctx->tq_ctx->scheduler_log.header,
+ T_SCHEDULER_UPDATE_PRIORITY,
+ index
+ );
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Flush( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+}
+
+static void SchedulerEvent(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Pre_Priority_Prepare(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Pre_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Pre_Priority_Minimum: {
+ /*
+ * While a minimum priority of the owner of the thread queue is inherited
+ * from a thread enqueued on the thread queue.
+ */
+ ctx->minimum = true;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Priority_NotMinimum: {
+ /*
+ * While no minimum priority of the owner of the thread queue is
+ * inherited from a thread enqueued on the thread queue.
+ */
+ ctx->minimum = false;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Pre_Queue_Prepare(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Pre_Queue_Empty: {
+ /*
+ * While the thread queue is empty.
+ */
+ ctx->tq_ctx->how_many = 0;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Queue_NonEmpty: {
+ /*
+ * While the thread queue has at least one enqueued thread.
+ */
+ ctx->tq_ctx->how_many = 3;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Post_Extract_Check(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Post_Extract state
+)
+{
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Post_Extract_Nop: {
+ /*
+ * No operation shall be performed.
+ */
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetPriorityUpdate( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_Extract_All: {
+ /*
+ * The enqueued threads of the thread queue shall be extracted in
+ * priority order for each priority queue associated with a scheduler.
+ * The priority queues of the thread queue shall be accessed in FIFO
+ * order.
+ */
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ if ( ctx->minimum ) {
+ /*
+ * This priority update is carried out by
+ * _Thread_queue_Flush_critical().
+ */
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ if ( ctx->minimum && rtems_configuration_get_maximum_processors() > 1 ) {
+ /*
+ * This superfluous priority update is carried out by
+ * _Thread_queue_Enqueue() since TQ_BLOCKER_B would have inherited its
+ * priority for scheduler B to TQ_BLOCKER_A if it were not flushed from
+ * the thread queue.
+ */
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event, &T_scheduler_event_null );
+
+ T_eq_u32(
+ GetPriority( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ),
+ PRIO_HIGH
+ );
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_Extract_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Check(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No: {
+ /*
+ * The current priority of the owner of the thread queue shall not be
+ * updated by the thread queue flush operation.
+ */
+ /* Checked by ``Extract`` post-condition state ``Nop`` */
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Yes: {
+ /*
+ * The current priority of the owner of the thread queue shall be updated
+ * by the thread queue flush operation to reflect the loss of inherited
+ * priorities of the flushed threads.
+ */
+ /* Checked by ``Extract`` post-condition state ``All`` */
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Setup(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Setup_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriorityInherit_Setup( ctx );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Teardown(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriorityInherit_Teardown( ctx );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Action(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ if ( ctx->minimum ) {
+ if ( rtems_configuration_get_maximum_processors() > 1 ) {
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_B_ID, PRIO_HIGH );
+ } else {
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_A_ID, PRIO_HIGH );
+ }
+
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_ULTRA_HIGH );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ } else {
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_A_ID, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_HIGH );
+
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ }
+
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+}
+
+static const ScoreTqReqFlushPriorityInherit_Entry
+ScoreTqReqFlushPriorityInherit_Entries[] = {
+ { 0, 1, 0, ScoreTqReqFlushPriorityInherit_Post_Extract_Nop,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No },
+ { 0, 0, 0, ScoreTqReqFlushPriorityInherit_Post_Extract_All,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Yes },
+ { 0, 0, 0, ScoreTqReqFlushPriorityInherit_Post_Extract_All,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No }
+};
+
+static const uint8_t
+ScoreTqReqFlushPriorityInherit_Map[] = {
+ 0, 1, 0, 2
+};
+
+static size_t ScoreTqReqFlushPriorityInherit_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqFlushPriorityInherit_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqFlushPriorityInherit_Fixture = {
+ .setup = ScoreTqReqFlushPriorityInherit_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqFlushPriorityInherit_Teardown_Wrap,
+ .scope = ScoreTqReqFlushPriorityInherit_Scope,
+ .initial_context = &ScoreTqReqFlushPriorityInherit_Instance
+};
+
+static inline ScoreTqReqFlushPriorityInherit_Entry
+ScoreTqReqFlushPriorityInherit_PopEntry(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqFlushPriorityInherit_Entries[
+ ScoreTqReqFlushPriorityInherit_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqFlushPriorityInherit_SetPreConditionStates(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->Map.entry.Pre_Priority_NA ) {
+ ctx->Map.pcs[ 0 ] = ScoreTqReqFlushPriorityInherit_Pre_Priority_NA;
+ } else {
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ }
+
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+}
+
+static void ScoreTqReqFlushPriorityInherit_TestVariant(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqFlushPriorityInherit_Action( ctx );
+ ScoreTqReqFlushPriorityInherit_Post_Extract_Check(
+ ctx,
+ ctx->Map.entry.Post_Extract
+ );
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Check(
+ ctx,
+ ctx->Map.entry.Post_PriorityUpdate
+ );
+}
+
+static T_fixture_node ScoreTqReqFlushPriorityInherit_Node;
+
+void ScoreTqReqFlushPriorityInherit_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = &ScoreTqReqFlushPriorityInherit_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqFlushPriorityInherit_Node,
+ &ScoreTqReqFlushPriorityInherit_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreTqReqFlushPriorityInherit_Pre_Priority_Minimum;
+ ctx->Map.pci[ 0 ] < ScoreTqReqFlushPriorityInherit_Pre_Priority_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreTqReqFlushPriorityInherit_Pre_Queue_Empty;
+ ctx->Map.pci[ 1 ] < ScoreTqReqFlushPriorityInherit_Pre_Queue_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqFlushPriorityInherit_PopEntry( ctx );
+ ScoreTqReqFlushPriorityInherit_SetPreConditionStates( ctx );
+ ScoreTqReqFlushPriorityInherit_TestVariant( ctx );
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-flush-priority-inherit.h b/testsuites/validation/tr-tq-flush-priority-inherit.h
new file mode 100644
index 0000000000..1b12f1afb0
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority-inherit.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_FLUSH_PRIORITY_INHERIT_H
+#define _TR_TQ_FLUSH_PRIORITY_INHERIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqFlushPriorityInherit
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_Minimum,
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_NotMinimum,
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_NA
+} ScoreTqReqFlushPriorityInherit_Pre_Priority;
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_Empty,
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_NonEmpty,
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_NA
+} ScoreTqReqFlushPriorityInherit_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Post_Extract_Nop,
+ ScoreTqReqFlushPriorityInherit_Post_Extract_All,
+ ScoreTqReqFlushPriorityInherit_Post_Extract_NA
+} ScoreTqReqFlushPriorityInherit_Post_Extract;
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Yes,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_NA
+} ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqFlushPriorityInherit_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_FLUSH_PRIORITY_INHERIT_H */
diff --git a/testsuites/validation/tr-tq-flush-priority.c b/testsuites/validation/tr-tq-flush-priority.c
new file mode 100644
index 0000000000..26e2a7fc3b
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority.c
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-flush-priority.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqFlushPriority \
+ * spec:/score/tq/req/flush-priority
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Queue_NA : 1;
+ uint8_t Post_Operation : 2;
+} ScoreTqReqFlushPriority_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/flush-priority test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushPriority_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushPriority_Run() parameter.
+ */
+ bool supports_multiple_priority_queues;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 1 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqFlushPriority_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqFlushPriority_Context;
+
+static ScoreTqReqFlushPriority_Context
+ ScoreTqReqFlushPriority_Instance;
+
+static const char * const ScoreTqReqFlushPriority_PreDesc_Queue[] = {
+ "Empty",
+ "NonEmpty",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqFlushPriority_PreDesc[] = {
+ ScoreTqReqFlushPriority_PreDesc_Queue,
+ NULL
+};
+
+typedef ScoreTqReqFlushPriority_Context Context;
+
+static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Flush( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+}
+
+static void SchedulerEvent(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+}
+
+static void ScoreTqReqFlushPriority_Pre_Queue_Prepare(
+ ScoreTqReqFlushPriority_Context *ctx,
+ ScoreTqReqFlushPriority_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriority_Pre_Queue_Empty: {
+ /*
+ * While the thread queue is empty.
+ */
+ ctx->tq_ctx->how_many = 0;
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Pre_Queue_NonEmpty: {
+ /*
+ * While the thread queue has at least one enqueued thread.
+ */
+ ctx->tq_ctx->how_many = 3;
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriority_Post_Operation_Check(
+ ScoreTqReqFlushPriority_Context *ctx,
+ ScoreTqReqFlushPriority_Post_Operation state
+)
+{
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqFlushPriority_Post_Operation_Nop: {
+ /*
+ * No operation shall be performed.
+ */
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Post_Operation_TryExtract: {
+ /*
+ * The enqueued threads of the thread queue may be extracted in priority
+ * order for each priority queue associated with a scheduler. The
+ * priority queues of the thread queue shall be accessed in FIFO order.
+ */
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Post_Operation_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriority_Setup(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+
+ if (
+ ctx->supports_multiple_priority_queues &&
+ rtems_configuration_get_maximum_processors() > 1
+ ) {
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, SCHEDULER_B_ID, PRIO_HIGH );
+ } else {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+ }
+
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_ULTRA_HIGH );
+}
+
+static void ScoreTqReqFlushPriority_Setup_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriority_Setup( ctx );
+}
+
+static void ScoreTqReqFlushPriority_Teardown(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqFlushPriority_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriority_Teardown( ctx );
+}
+
+static void ScoreTqReqFlushPriority_Action(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+}
+
+static const ScoreTqReqFlushPriority_Entry
+ScoreTqReqFlushPriority_Entries[] = {
+ { 0, 0, ScoreTqReqFlushPriority_Post_Operation_Nop },
+ { 0, 0, ScoreTqReqFlushPriority_Post_Operation_TryExtract }
+};
+
+static const uint8_t
+ScoreTqReqFlushPriority_Map[] = {
+ 0, 1
+};
+
+static size_t ScoreTqReqFlushPriority_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqFlushPriority_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqFlushPriority_Fixture = {
+ .setup = ScoreTqReqFlushPriority_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqFlushPriority_Teardown_Wrap,
+ .scope = ScoreTqReqFlushPriority_Scope,
+ .initial_context = &ScoreTqReqFlushPriority_Instance
+};
+
+static inline ScoreTqReqFlushPriority_Entry ScoreTqReqFlushPriority_PopEntry(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqFlushPriority_Entries[
+ ScoreTqReqFlushPriority_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqFlushPriority_TestVariant(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ ScoreTqReqFlushPriority_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqFlushPriority_Action( ctx );
+ ScoreTqReqFlushPriority_Post_Operation_Check(
+ ctx,
+ ctx->Map.entry.Post_Operation
+ );
+}
+
+static T_fixture_node ScoreTqReqFlushPriority_Node;
+
+void ScoreTqReqFlushPriority_Run(
+ TQContext *tq_ctx,
+ bool supports_multiple_priority_queues
+)
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = &ScoreTqReqFlushPriority_Instance;
+ ctx->tq_ctx = tq_ctx;
+ ctx->supports_multiple_priority_queues = supports_multiple_priority_queues;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqFlushPriority_Node,
+ &ScoreTqReqFlushPriority_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqFlushPriority_Pre_Queue_Empty;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqFlushPriority_Pre_Queue_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqFlushPriority_PopEntry( ctx );
+ ScoreTqReqFlushPriority_TestVariant( ctx );
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-flush-priority.h b/testsuites/validation/tr-tq-flush-priority.h
new file mode 100644
index 0000000000..72bc372ca1
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_FLUSH_PRIORITY_H
+#define _TR_TQ_FLUSH_PRIORITY_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqFlushPriority
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqFlushPriority_Pre_Queue_Empty,
+ ScoreTqReqFlushPriority_Pre_Queue_NonEmpty,
+ ScoreTqReqFlushPriority_Pre_Queue_NA
+} ScoreTqReqFlushPriority_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqFlushPriority_Post_Operation_Nop,
+ ScoreTqReqFlushPriority_Post_Operation_TryExtract,
+ ScoreTqReqFlushPriority_Post_Operation_NA
+} ScoreTqReqFlushPriority_Post_Operation;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ *
+ * @param supports_multiple_priority_queues is true, if the object using the
+ * thread queue supports multiple priority queues, otherwise it is false.
+ */
+void ScoreTqReqFlushPriority_Run(
+ TQContext *tq_ctx,
+ bool supports_multiple_priority_queues
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_FLUSH_PRIORITY_H */
diff --git a/testsuites/validation/tr-tq-surrender-mrsp.c b/testsuites/validation/tr-tq-surrender-mrsp.c
new file mode 100644
index 0000000000..156d4c4a5b
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender-mrsp.c
@@ -0,0 +1,1036 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrenderMrsp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-surrender-mrsp.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqSurrenderMrsp \
+ * spec:/score/tq/req/surrender-mrsp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_InheritedPriority_NA : 1;
+ uint32_t Pre_PreviousHelping_NA : 1;
+ uint32_t Pre_Scheduler_NA : 1;
+ uint32_t Pre_NewHelping_NA : 1;
+ uint32_t Pre_Suspended_NA : 1;
+ uint32_t Pre_WaitState_NA : 1;
+ uint32_t Post_Dequeue : 1;
+ uint32_t Post_Unblock : 1;
+ uint32_t Post_PreviousOwnerPriority : 2;
+ uint32_t Post_RemoveHelper : 2;
+ uint32_t Post_AddHelper : 2;
+ uint32_t Post_Suspended : 2;
+} ScoreTqReqSurrenderMrsp_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/surrender-mrsp test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then all priorities of the previous owner
+ * inherited from the thread queue thread shall be dispensable.
+ */
+ bool inherited_priorities_are_dispensible;
+
+ /**
+ * @brief If this member is true, then all helping schedulers of the previous
+ * owner thread gained through the thread queue shall be dispensable.
+ */
+ bool helping_schedules_are_dispensible;
+
+ /**
+ * @brief If this member is true, then the previous owner thread shall use
+ * helping scheduler.
+ */
+ bool use_helping_scheduler;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall gain a
+ * vital helping scheduler.
+ */
+ bool gains_new_helping_scheduler;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall be
+ * suspended.
+ */
+ bool suspended;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall be in the
+ * intend to block wait state.
+ */
+ bool intend_to_block;
+
+ /**
+ * @brief This member contains the current priority of the previous owner
+ * thread before the thread queue surrender operation.
+ */
+ rtems_task_priority priority_before;
+
+ /**
+ * @brief This member contains the current priority of the previous owner
+ * thread after the thread queue surrender operation.
+ */
+ rtems_task_priority priority_after;
+
+ /**
+ * @brief This member contains the identifier of the previous owner thread.
+ */
+ rtems_id previous_owner;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqSurrenderMrsp_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 6 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqSurrenderMrsp_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqSurrenderMrsp_Context;
+
+static ScoreTqReqSurrenderMrsp_Context
+ ScoreTqReqSurrenderMrsp_Instance;
+
+static const char * const ScoreTqReqSurrenderMrsp_PreDesc_InheritedPriority[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderMrsp_PreDesc_PreviousHelping[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderMrsp_PreDesc_Scheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderMrsp_PreDesc_NewHelping[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderMrsp_PreDesc_Suspended[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderMrsp_PreDesc_WaitState[] = {
+ "IntendToBlock",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqSurrenderMrsp_PreDesc[] = {
+ ScoreTqReqSurrenderMrsp_PreDesc_InheritedPriority,
+ ScoreTqReqSurrenderMrsp_PreDesc_PreviousHelping,
+ ScoreTqReqSurrenderMrsp_PreDesc_Scheduler,
+ ScoreTqReqSurrenderMrsp_PreDesc_NewHelping,
+ ScoreTqReqSurrenderMrsp_PreDesc_Suspended,
+ ScoreTqReqSurrenderMrsp_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqSurrenderMrsp_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static void ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Pre_InheritedPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Vital: {
+ /*
+ * While at least one priority inherited through the thread queue for the
+ * previous owner is the highest priority of the previous owner.
+ */
+ ctx->inherited_priorities_are_dispensible = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Dispensable: {
+ /*
+ * While all priorities inherited through the thread queue for the
+ * previous owner are not the highest priority of the previous owner.
+ */
+ ctx->inherited_priorities_are_dispensible = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Pre_PreviousHelping state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Vital: {
+ /*
+ * While at least one helping scheduler of the previous owner is only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->helping_schedules_are_dispensible = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Dispensable: {
+ /*
+ * While all helping scheduler of the previous owner are not only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->helping_schedules_are_dispensible = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Pre_Scheduler_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Pre_Scheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Pre_Scheduler_Home: {
+ /*
+ * While the previous owner executes in its home scheduler.
+ */
+ ctx->use_helping_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_Scheduler_Helping: {
+ /*
+ * While the previous owner executes in a helping scheduler which is
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->use_helping_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_Scheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Pre_NewHelping_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Pre_NewHelping state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Pre_NewHelping_Vital: {
+ /*
+ * While at least one helping scheduler of the new owner is only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_helping_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_NewHelping_Dispensable: {
+ /*
+ * While all helping scheduler of the new owner are not only available
+ * due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_helping_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_NewHelping_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Pre_Suspended_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Pre_Suspended state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Pre_Suspended_Yes: {
+ /*
+ * Whiel the new owner is suspended.
+ */
+ ctx->suspended = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_Suspended_No: {
+ /*
+ * Whiel the new owner is not suspended.
+ */
+ ctx->suspended = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_Suspended_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Pre_WaitState_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the new owner is in the intend to block wait state.
+ */
+ ctx->intend_to_block = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Post_Dequeue_Check(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Post_Dequeue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority: {
+ /*
+ * The first thread in priority order shall be dequeued from the thread
+ * queue.
+ */
+ /* Validation is done by spec:/score/tq/req/enqueue-priority */
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_Dequeue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Post_Unblock_Check(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Post_Unblock_No: {
+ /*
+ * The dequeued thread shall not be unblocked by the thread queue
+ * surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Check(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop: {
+ /*
+ * Each eligible priority of the previous owner which had the highest
+ * priority inherited through the thread queue shall be updated.
+ */
+ T_eq_u32( ctx->priority_after, PRIO_NORMAL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop: {
+ /*
+ * No eligible priority of the previous owner shall be updated.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Check(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper state
+)
+{
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes: {
+ /*
+ * Each helping scheduler of the previous owner which was only available
+ * due to a priority inherited through the thread queue shall be removed
+ * from the previous owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No: {
+ /*
+ * No helping scheduler shall be removed from the previous owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_LOW );
+ } else {
+ T_eq_u32( priority, PRIO_HIGH );
+ }
+ #else
+ T_rsc( sc, RTEMS_INVALID_ID );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_RemoveHelper_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Post_AddHelper_Check(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper state
+)
+{
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes: {
+ /*
+ * Each helping scheduler of the new owner which is only available due to
+ * a priority inherited through the thread queue shall be added to the
+ * new owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_VERY_HIGH );
+ } else {
+ T_eq_u32( priority, PRIO_LOW );
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_AddHelper_No: {
+ /*
+ * No helping scheduler shall added to the new owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ #else
+ T_rsc_success( sc );
+ T_eq_u32( priority, PRIO_HIGH );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_AddHelper_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Post_Suspended_Check(
+ ScoreTqReqSurrenderMrsp_Context *ctx,
+ ScoreTqReqSurrenderMrsp_Post_Suspended state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderMrsp_Post_Suspended_Yes: {
+ /*
+ * The new owner shall be suspended.
+ */
+ T_true( IsTaskSuspended( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ) );
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_Suspended_No: {
+ /*
+ * The new owner shall be not suspended.
+ */
+ T_false( IsTaskSuspended( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ) );
+ break;
+ }
+
+ case ScoreTqReqSurrenderMrsp_Post_Suspended_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderMrsp_Setup(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_B_ID, PRIO_LOW );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_NORMAL );
+}
+
+static void ScoreTqReqSurrenderMrsp_Setup_Wrap( void *arg )
+{
+ ScoreTqReqSurrenderMrsp_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrenderMrsp_Setup( ctx );
+}
+
+static void ScoreTqReqSurrenderMrsp_Teardown(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+}
+
+static void ScoreTqReqSurrenderMrsp_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqSurrenderMrsp_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrenderMrsp_Teardown( ctx );
+}
+
+static void ScoreTqReqSurrenderMrsp_Prepare(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ ctx->inherited_priorities_are_dispensible = true;
+ ctx->helping_schedules_are_dispensible = true;
+ ctx->use_helping_scheduler = false;
+ ctx->gains_new_helping_scheduler = false;
+ ctx->intend_to_block = false;
+}
+
+static void ScoreTqReqSurrenderMrsp_Action(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ ctx->previous_owner = ctx->tq_ctx->worker_id[ TQ_HELPER_A ];
+
+ SetSelfPriority( PRIO_LOW );
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_MUTEX_A_OBTAIN );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ }
+
+ /*
+ * Take only the priorities into account which are inherited from the
+ * priority inheritance mutex. This avoids having to deal with the ceiling
+ * priority.
+ */
+ ctx->priority_before = TQGetPriority( ctx->tq_ctx, TQ_HELPER_A );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = true;
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_BUSY_WAIT
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE
+ );
+ YieldTask( ctx->tq_ctx->worker_id[ TQ_HELPER_A ] );
+ TQWaitForEventsReceived( ctx->tq_ctx, TQ_BLOCKER_D );
+ TQWaitForIntendToBlock( ctx->tq_ctx, TQ_BLOCKER_D );
+ YieldTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_D ] );
+ }
+
+ if ( ctx->use_helping_scheduler ) {
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ WaitForHeir( 1, ctx->tq_ctx->worker_id[ TQ_HELPER_A ] );
+ }
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = false;
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_SCHEDULER_RECORD_START |
+ TQ_EVENT_SURRENDER
+ );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr(
+ TQGetOwner( ctx->tq_ctx ),
+ ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ]
+ );
+ ctx->priority_after = TQGetPriority( ctx->tq_ctx, TQ_HELPER_A );
+}
+
+static void ScoreTqReqSurrenderMrsp_Cleanup(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_SURRENDER
+ );
+ }
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+ }
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+}
+
+static const ScoreTqReqSurrenderMrsp_Entry
+ScoreTqReqSurrenderMrsp_Entries[] = {
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No }
+};
+
+static const uint8_t
+ScoreTqReqSurrenderMrsp_Map[] = {
+ 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 8, 9, 10, 11, 8, 9, 10, 11,
+ 12, 13, 14, 15, 12, 13, 14, 15
+};
+
+static size_t ScoreTqReqSurrenderMrsp_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqSurrenderMrsp_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqSurrenderMrsp_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqSurrenderMrsp_Fixture = {
+ .setup = ScoreTqReqSurrenderMrsp_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqSurrenderMrsp_Teardown_Wrap,
+ .scope = ScoreTqReqSurrenderMrsp_Scope,
+ .initial_context = &ScoreTqReqSurrenderMrsp_Instance
+};
+
+static inline ScoreTqReqSurrenderMrsp_Entry ScoreTqReqSurrenderMrsp_PopEntry(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqSurrenderMrsp_Entries[
+ ScoreTqReqSurrenderMrsp_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqSurrenderMrsp_TestVariant(
+ ScoreTqReqSurrenderMrsp_Context *ctx
+)
+{
+ ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ ScoreTqReqSurrenderMrsp_Pre_Scheduler_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreTqReqSurrenderMrsp_Pre_NewHelping_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ ScoreTqReqSurrenderMrsp_Pre_Suspended_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ ScoreTqReqSurrenderMrsp_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 5 ] );
+ ScoreTqReqSurrenderMrsp_Action( ctx );
+ ScoreTqReqSurrenderMrsp_Post_Dequeue_Check(
+ ctx,
+ ctx->Map.entry.Post_Dequeue
+ );
+ ScoreTqReqSurrenderMrsp_Post_Unblock_Check(
+ ctx,
+ ctx->Map.entry.Post_Unblock
+ );
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_PreviousOwnerPriority
+ );
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Check(
+ ctx,
+ ctx->Map.entry.Post_RemoveHelper
+ );
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Check(
+ ctx,
+ ctx->Map.entry.Post_AddHelper
+ );
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Check(
+ ctx,
+ ctx->Map.entry.Post_Suspended
+ );
+}
+
+static T_fixture_node ScoreTqReqSurrenderMrsp_Node;
+
+void ScoreTqReqSurrenderMrsp_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqSurrenderMrsp_Context *ctx;
+
+ ctx = &ScoreTqReqSurrenderMrsp_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqSurrenderMrsp_Node,
+ &ScoreTqReqSurrenderMrsp_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Vital;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Vital;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrenderMrsp_Pre_Scheduler_Home;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqSurrenderMrsp_Pre_Scheduler_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = ScoreTqReqSurrenderMrsp_Pre_NewHelping_Vital;
+ ctx->Map.pcs[ 3 ] < ScoreTqReqSurrenderMrsp_Pre_NewHelping_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 4 ] = ScoreTqReqSurrenderMrsp_Pre_Suspended_Yes;
+ ctx->Map.pcs[ 4 ] < ScoreTqReqSurrenderMrsp_Pre_Suspended_NA;
+ ++ctx->Map.pcs[ 4 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 5 ] = ScoreTqReqSurrenderMrsp_Pre_WaitState_IntendToBlock;
+ ctx->Map.pcs[ 5 ] < ScoreTqReqSurrenderMrsp_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 5 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqSurrenderMrsp_PopEntry( ctx );
+ ScoreTqReqSurrenderMrsp_Prepare( ctx );
+ ScoreTqReqSurrenderMrsp_TestVariant( ctx );
+ ScoreTqReqSurrenderMrsp_Cleanup( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-surrender-mrsp.h b/testsuites/validation/tr-tq-surrender-mrsp.h
new file mode 100644
index 0000000000..66be73adbb
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender-mrsp.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrenderMrsp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_SURRENDER_MRSP_H
+#define _TR_TQ_SURRENDER_MRSP_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqSurrenderMrsp
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Vital,
+ ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_Dispensable,
+ ScoreTqReqSurrenderMrsp_Pre_InheritedPriority_NA
+} ScoreTqReqSurrenderMrsp_Pre_InheritedPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Vital,
+ ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_Dispensable,
+ ScoreTqReqSurrenderMrsp_Pre_PreviousHelping_NA
+} ScoreTqReqSurrenderMrsp_Pre_PreviousHelping;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Pre_Scheduler_Home,
+ ScoreTqReqSurrenderMrsp_Pre_Scheduler_Helping,
+ ScoreTqReqSurrenderMrsp_Pre_Scheduler_NA
+} ScoreTqReqSurrenderMrsp_Pre_Scheduler;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Pre_NewHelping_Vital,
+ ScoreTqReqSurrenderMrsp_Pre_NewHelping_Dispensable,
+ ScoreTqReqSurrenderMrsp_Pre_NewHelping_NA
+} ScoreTqReqSurrenderMrsp_Pre_NewHelping;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Pre_Suspended_Yes,
+ ScoreTqReqSurrenderMrsp_Pre_Suspended_No,
+ ScoreTqReqSurrenderMrsp_Pre_Suspended_NA
+} ScoreTqReqSurrenderMrsp_Pre_Suspended;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Pre_WaitState_IntendToBlock,
+ ScoreTqReqSurrenderMrsp_Pre_WaitState_NA
+} ScoreTqReqSurrenderMrsp_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderMrsp_Post_Dequeue_NA
+} ScoreTqReqSurrenderMrsp_Post_Dequeue;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Post_Unblock_No,
+ ScoreTqReqSurrenderMrsp_Post_Unblock_NA
+} ScoreTqReqSurrenderMrsp_Post_Unblock;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority_NA
+} ScoreTqReqSurrenderMrsp_Post_PreviousOwnerPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_RemoveHelper_NA
+} ScoreTqReqSurrenderMrsp_Post_RemoveHelper;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_No,
+ ScoreTqReqSurrenderMrsp_Post_AddHelper_NA
+} ScoreTqReqSurrenderMrsp_Post_AddHelper;
+
+typedef enum {
+ ScoreTqReqSurrenderMrsp_Post_Suspended_Yes,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_No,
+ ScoreTqReqSurrenderMrsp_Post_Suspended_NA
+} ScoreTqReqSurrenderMrsp_Post_Suspended;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqSurrenderMrsp_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_SURRENDER_MRSP_H */
diff --git a/testsuites/validation/tr-tq-surrender-priority-inherit.c b/testsuites/validation/tr-tq-surrender-priority-inherit.c
new file mode 100644
index 0000000000..75b93e40d2
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender-priority-inherit.c
@@ -0,0 +1,2516 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpbarrier.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-surrender-priority-inherit.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit \
+ * spec:/score/tq/req/surrender-priority-inherit
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_SchedulerCount_NA : 1;
+ uint32_t Pre_InheritedPriority_NA : 1;
+ uint32_t Pre_PreviousHelping_NA : 1;
+ uint32_t Pre_UsedScheduler_NA : 1;
+ uint32_t Pre_NewPriority_NA : 1;
+ uint32_t Pre_NewHelping_NA : 1;
+ uint32_t Pre_Suspended_NA : 1;
+ uint32_t Pre_WaitState_NA : 1;
+ uint32_t Post_Dequeue : 1;
+ uint32_t Post_Unblock : 2;
+ uint32_t Post_PreviousOwnerPriority : 2;
+ uint32_t Post_NewPriority : 2;
+ uint32_t Post_RemoveHelper : 2;
+ uint32_t Post_AddHelper : 2;
+ uint32_t Post_Suspended : 2;
+} ScoreTqReqSurrenderPriorityInherit_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/surrender-priority-inherit test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains the barrier to synchronize the runner and the
+ * worker.
+ */
+ SMP_barrier_Control barrier;
+
+ /**
+ * @brief If this member is true, then all priorities of the previous owner
+ * inherited from the thread queue thread shall be dispensable.
+ */
+ bool inherited_priorities_are_dispensible;
+
+ /**
+ * @brief If this member is true, then all helping schedulers of the previous
+ * owner thread gained through the thread queue shall be dispensable.
+ */
+ bool helping_schedules_are_dispensible;
+
+ /**
+ * @brief If this member is true, then the previous owner thread shall use
+ * helping scheduler.
+ */
+ bool use_helping_scheduler;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall gain a
+ * vital priority.
+ */
+ bool gains_new_priority;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall gain a
+ * vital helping scheduler.
+ */
+ bool gains_new_helping_scheduler;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall be
+ * suspended.
+ */
+ bool suspended;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall be in the
+ * intend to block wait state.
+ */
+ bool intend_to_block;
+
+ /**
+ * @brief If this member is true, then the action was performed.
+ */
+ bool action_performed;
+
+ /**
+ * @brief This member contains the current priority of the previous owner
+ * thread before the thread queue surrender operation.
+ */
+ rtems_task_priority priority_before;
+
+ /**
+ * @brief This member contains the current priority of the previous owner
+ * thread after the thread queue surrender operation.
+ */
+ rtems_task_priority priority_after;
+
+ /**
+ * @brief This member contains the identifier of the previous owner thread.
+ */
+ rtems_id previous_owner;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqSurrenderPriorityInherit_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 8 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqSurrenderPriorityInherit_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqSurrenderPriorityInherit_Context;
+
+static ScoreTqReqSurrenderPriorityInherit_Context
+ ScoreTqReqSurrenderPriorityInherit_Instance;
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_SchedulerCount[] = {
+ "One",
+ "Two",
+ "More",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_InheritedPriority[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_PreviousHelping[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_UsedScheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_NewPriority[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_NewHelping[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_Suspended[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqSurrenderPriorityInherit_PreDesc[] = {
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_SchedulerCount,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_InheritedPriority,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_PreviousHelping,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_UsedScheduler,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_NewPriority,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_NewHelping,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_Suspended,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqSurrenderPriorityInherit_Context Context;
+
+#define NEW_OWNER TQ_BLOCKER_A
+
+#define PREV_OWNER_HELPER_A TQ_BLOCKER_B
+
+#define PREV_OWNER_HELPER_B TQ_BLOCKER_C
+
+#define PREV_OWNER_HELPER_C TQ_BLOCKER_D
+
+#define NEW_OWNER_NEW_HELPER TQ_BLOCKER_E
+
+#define NEW_OWNER_NEW_PRIORITY TQ_WORKER_F
+
+#define NEW_OWNER_OLD_PRIORITY TQ_HELPER_C
+
+#define PREV_OWNER_MOVER TQ_HELPER_A
+
+#define PREV_OWNER TQ_HELPER_A
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Surrender( Context *ctx )
+{
+ Status_Control status;
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ NEW_OWNER ] );
+ }
+
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr( TQGetOwner( ctx->tq_ctx ), ctx->tq_ctx->worker_tcb[ NEW_OWNER ] );
+ ctx->priority_after = GetSelfPriority();
+}
+
+#if defined(RTEMS_SMP)
+static void Delay( void *arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+}
+
+static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = Delay;
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+#endif
+
+static void Setup( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ TQSetScheduler( ctx->tq_ctx, NEW_OWNER, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, PREV_OWNER_HELPER_A, PRIO_VERY_HIGH );
+ TQSetScheduler(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_B,
+ SCHEDULER_B_ID,
+ PRIO_HIGH
+ );
+ TQSetPriority( ctx->tq_ctx, NEW_OWNER_OLD_PRIORITY, PRIO_VERY_LOW );
+ TQSetPriority( ctx->tq_ctx, NEW_OWNER_NEW_PRIORITY, PRIO_LOW );
+
+ TQSetPriority( ctx->tq_ctx, PREV_OWNER_MOVER, PRIO_ULTRA_HIGH );
+
+ if ( rtems_scheduler_get_processor_maximum() >= 3 ) {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_C,
+ SCHEDULER_C_ID,
+ PRIO_HIGH
+ );
+ }
+#else
+ TQSetPriority( ctx->tq_ctx, NEW_OWNER, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, PREV_OWNER_HELPER_A, PRIO_HIGH );
+#endif
+
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_B, PRIO_ULTRA_LOW );
+}
+
+static void Action( Context *ctx )
+{
+ Status_Control status;
+#if defined(RTEMS_SMP)
+ SMP_barrier_State state;
+#endif
+
+ ctx->action_performed = true;
+ ctx->previous_owner = ctx->tq_ctx->runner_id;
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
+#if defined(RTEMS_SMP)
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_MUTEX_B_OBTAIN
+ );
+#endif
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_A,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+
+#if defined(RTEMS_SMP)
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_B,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+
+ if ( ctx->gains_new_priority && ctx->gains_new_helping_scheduler ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_C,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+ }
+#endif
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+#if defined(RTEMS_SMP)
+ if ( ctx->intend_to_block ) {
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, NEW_OWNER, TQ_EVENT_ENQUEUE );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ } else {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+
+ if ( ctx->gains_new_priority ) {
+ TQSend(
+ ctx->tq_ctx,
+ NEW_OWNER_OLD_PRIORITY,
+ TQ_EVENT_HELPER_B_SYNC | TQ_EVENT_MUTEX_B_OBTAIN
+ );
+ TQSynchronizeRunner();
+ TQSend(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_PRIORITY,
+ TQ_EVENT_HELPER_B_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ SCHEDULER_C_ID,
+ PRIO_LOW
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+ } else if ( ctx->gains_new_helping_scheduler ) {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ SCHEDULER_A_ID,
+ PRIO_LOW
+ );
+ TQSend(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ TQ_EVENT_HELPER_B_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ }
+
+ /*
+ * In order to run the validation test variant also for the intend to block
+ * wait state, we would need at least three processors. Skip it for now.
+ */
+ if ( ctx->use_helping_scheduler && !ctx->intend_to_block ) {
+ ctx->tq_ctx->busy_wait[ PREV_OWNER_MOVER ] = true;
+ TQSend( ctx->tq_ctx, PREV_OWNER_MOVER, TQ_EVENT_BUSY_WAIT );
+
+ while ( rtems_scheduler_get_processor() != 1 ) {
+ /* Wait */
+ }
+
+ ctx->tq_ctx->busy_wait[ PREV_OWNER_MOVER ] = false;
+ }
+#else
+ TQSend(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_HELPER_B_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+#endif
+
+ Surrender( ctx );
+
+#if defined(RTEMS_SMP)
+ if ( ctx->intend_to_block ) {
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ }
+#endif
+}
+
+static void Cleanup( Context *ctx )
+{
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ NEW_OWNER ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_SURRENDER
+ );
+ TQWaitForExecutionStop( ctx->tq_ctx, NEW_OWNER );
+
+#if defined(RTEMS_SMP)
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_MUTEX_B_RELEASE
+ );
+
+ if ( ctx->gains_new_priority ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ NEW_OWNER_OLD_PRIORITY,
+ TQ_EVENT_MUTEX_B_RELEASE
+ );
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_PRIORITY,
+ TQ_EVENT_SURRENDER
+ );
+ }
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ TQ_EVENT_SURRENDER
+ );
+ }
+#endif
+
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+}
+
+static void SetupSticky( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ TQSetScheduler( ctx->tq_ctx, NEW_OWNER, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, PREV_OWNER_HELPER_A, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx,
+ PREV_OWNER_HELPER_B,
+ SCHEDULER_B_ID,
+ PRIO_LOW
+ );
+ TQSetPriority( ctx->tq_ctx, NEW_OWNER_NEW_HELPER, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, PREV_OWNER, PRIO_NORMAL );
+#endif
+}
+
+static void ActionSticky( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ ctx->action_performed = true;
+ ctx->previous_owner = ctx->tq_ctx->worker_id[ PREV_OWNER ];
+
+ SetSelfPriority( PRIO_LOW );
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSend( ctx->tq_ctx, PREV_OWNER, TQ_EVENT_MUTEX_A_OBTAIN );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, PREV_OWNER_HELPER_A, TQ_EVENT_MUTEX_A_OBTAIN );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_B,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ }
+
+ /*
+ * Take only the priorities into account which are inherited from the
+ * priority inheritance mutex. This avoids having to deal with the ceiling
+ * priority.
+ */
+ ctx->priority_before = TQGetPriority( ctx->tq_ctx, PREV_OWNER );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ ctx->tq_ctx->busy_wait[ PREV_OWNER ] = true;
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ PREV_OWNER,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_BUSY_WAIT
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_ENQUEUE
+ );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSend(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ TQ_EVENT_ENQUEUE
+ );
+ YieldTask( ctx->tq_ctx->worker_id[ PREV_OWNER ] );
+ TQWaitForEventsReceived( ctx->tq_ctx, NEW_OWNER_NEW_HELPER );
+ TQWaitForIntendToBlock( ctx->tq_ctx, NEW_OWNER_NEW_HELPER );
+ YieldTask( ctx->tq_ctx->worker_id[ NEW_OWNER_NEW_HELPER ] );
+ }
+
+ if ( ctx->use_helping_scheduler ) {
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ NEW_OWNER ] );
+ }
+
+ ctx->tq_ctx->busy_wait[ PREV_OWNER ] = false;
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ PREV_OWNER,
+ TQ_EVENT_SCHEDULER_RECORD_START |
+ TQ_EVENT_SURRENDER
+ );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr(
+ TQGetOwner( ctx->tq_ctx ),
+ ctx->tq_ctx->worker_tcb[ NEW_OWNER ]
+ );
+ ctx->priority_after = TQGetPriority( ctx->tq_ctx, PREV_OWNER );
+#endif
+}
+
+static void CleanupSticky( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ NEW_OWNER ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ NEW_OWNER,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ NEW_OWNER_NEW_HELPER,
+ TQ_EVENT_SURRENDER
+ );
+ }
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ PREV_OWNER,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_A,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ PREV_OWNER_HELPER_B,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+ }
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+#endif
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_One: {
+ /*
+ * Where the system has exactly one schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() != 1 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_Two: {
+ /*
+ * Where the system has exactly two schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() != 2 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_More: {
+ /*
+ * Where the system has at least three schedulers.
+ */
+ if ( rtems_scheduler_get_processor_maximum() < 3 ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Vital: {
+ /*
+ * While at least one priority inherited through the thread queue for the
+ * previous owner is the highest priority of the previous owner.
+ */
+ ctx->inherited_priorities_are_dispensible = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Dispensable: {
+ /*
+ * While all priorities inherited through the thread queue for the
+ * previous owner are not the highest priority of the previous owner.
+ */
+ ctx->inherited_priorities_are_dispensible = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Vital: {
+ /*
+ * While at least one helping scheduler of the previous owner is only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->helping_schedules_are_dispensible = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Dispensable: {
+ /*
+ * While all helping scheduler of the previous owner are not only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->helping_schedules_are_dispensible = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Home: {
+ /*
+ * While the previous owner executes in its home scheduler.
+ */
+ ctx->use_helping_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Helping: {
+ /*
+ * While the previous owner executes in a helping scheduler which is
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->use_helping_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Vital: {
+ /*
+ * While at least one highest priority of the new owner is only available
+ * due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_priority = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Dispensable: {
+ /*
+ * While all highest priorities of the new owner are not only available
+ * due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_priority = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Vital: {
+ /*
+ * While at least one helping scheduler of the new owner is only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_helping_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Dispensable: {
+ /*
+ * While all helping scheduler of the new owner are not only available
+ * due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_helping_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Yes: {
+ /*
+ * Whiel the new owner is suspended.
+ */
+ ctx->suspended = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_No: {
+ /*
+ * Whiel the new owner is not suspended.
+ */
+ ctx->suspended = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Blocked: {
+ /*
+ * While the new owner is in the blocked wait state.
+ */
+ ctx->intend_to_block = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the new owner is in the intend to block wait state.
+ */
+ ctx->intend_to_block = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority: {
+ /*
+ * The first thread in priority order shall be dequeued from the thread
+ * queue.
+ */
+ /* Validation is done by spec:/score/tq/req/enqueue-priority */
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes: {
+ /*
+ * The dequeued thread shall be unblocked by the thread queue surrender
+ * operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, NEW_OWNER ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No: {
+ /*
+ * The dequeued thread shall not be unblocked by the thread queue
+ * surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void
+ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop: {
+ /*
+ * Each eligible priority of the previous owner which had the highest
+ * priority inherited through the thread queue shall be updated.
+ */
+ T_eq_u32( ctx->priority_after, PRIO_NORMAL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop: {
+ /*
+ * No eligible priority of the previous owner shall be updated.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority state
+)
+{
+ rtems_id scheduler_id;
+ rtems_task_priority priority;
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise: {
+ /*
+ * Each eligible priority of the new owner which inherited the highest
+ * priority through the thread queue shall be updated.
+ */
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ NEW_OWNER ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ T_rsc_success( sc );
+ T_eq_u32( priority, PRIO_LOW );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop: {
+ /*
+ * No highest priority of the new owner shall be changed.
+ */
+ if ( ctx->gains_new_helping_scheduler ) {
+ scheduler_id = SCHEDULER_C_ID;
+ } else {
+ scheduler_id = SCHEDULER_A_ID;
+ }
+
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ NEW_OWNER ],
+ scheduler_id,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ #else
+ T_rsc_success( sc );
+ T_eq_u32( priority, PRIO_HIGH );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper state
+)
+{
+ rtems_task_priority priority;
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes: {
+ /*
+ * Each helping scheduler of the previous owner which was only available
+ * due to a priority inherited through the thread queue shall be removed
+ * from the previous owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No: {
+ /*
+ * No helping scheduler shall be removed from the previous owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_LOW );
+ } else {
+ T_eq_u32( priority, PRIO_HIGH );
+ }
+ #else
+ T_rsc( sc, RTEMS_INVALID_ID );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper state
+)
+{
+ rtems_id scheduler_id;
+ rtems_task_priority priority;
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes: {
+ /*
+ * Each helping scheduler of the new owner which is only available due to
+ * a priority inherited through the thread queue shall be added to the
+ * new owner.
+ */
+ if ( ctx->gains_new_priority ) {
+ scheduler_id = SCHEDULER_C_ID;
+ } else {
+ scheduler_id = SCHEDULER_A_ID;
+ }
+
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ NEW_OWNER ],
+ scheduler_id,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_VERY_HIGH );
+ } else {
+ T_eq_u32( priority, PRIO_LOW );
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No: {
+ /*
+ * No helping scheduler shall added to the new owner.
+ */
+ if ( ctx->gains_new_priority ) {
+ scheduler_id = SCHEDULER_C_ID;
+ } else {
+ scheduler_id = SCHEDULER_A_ID;
+ }
+
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ NEW_OWNER ],
+ scheduler_id,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ #else
+ T_rsc_success( sc );
+ T_eq_u32( priority, PRIO_HIGH );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes: {
+ /*
+ * The new owner shall be suspended.
+ */
+ T_true( IsTaskSuspended( ctx->tq_ctx->worker_id[ NEW_OWNER ] ) );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No: {
+ /*
+ * The new owner shall be not suspended.
+ */
+ T_false( IsTaskSuspended( ctx->tq_ctx->worker_id[ NEW_OWNER ] ) );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Setup(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ SetupSticky( ctx );
+ } else {
+ Setup( ctx );
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Setup_Wrap( void *arg )
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrenderPriorityInherit_Setup( ctx );
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Teardown(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrenderPriorityInherit_Teardown( ctx );
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ ctx->action_performed = false;
+ ctx->inherited_priorities_are_dispensible = true;
+ ctx->helping_schedules_are_dispensible = true;
+ ctx->use_helping_scheduler = false;
+ ctx->gains_new_priority = false;
+ ctx->gains_new_helping_scheduler = false;
+ ctx->intend_to_block = false;
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Action(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Cleanup(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->action_performed ) {
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ CleanupSticky( ctx );
+ } else {
+ Cleanup( ctx );
+ }
+ }
+}
+
+static const ScoreTqReqSurrenderPriorityInherit_Entry
+ScoreTqReqSurrenderPriorityInherit_Entries[] = {
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#endif
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA }
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No }
+#endif
+};
+
+static const uint8_t
+ScoreTqReqSurrenderPriorityInherit_Map[] = {
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 4, 5, 6, 7, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 8, 9, 10, 11, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2,
+ 3, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 2, 3, 2, 3, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 2, 3, 2, 3, 24, 25, 26, 27, 28, 29,
+ 30, 31, 4, 5, 6, 7, 2, 3, 2, 3, 24, 25, 26, 27, 28, 29, 30, 31, 4, 5, 6, 7,
+ 2, 3, 2, 3, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 2, 3, 2, 3, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 2, 3, 2, 3, 44, 45, 46, 47, 48,
+ 49, 50, 51, 8, 9, 10, 11, 2, 3, 2, 3, 44, 45, 46, 47, 48, 49, 50, 51, 8, 9,
+ 10, 11, 52, 53, 54, 55, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 52,
+ 53, 54, 55, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 56, 57, 58, 59,
+ 24, 25, 26, 27, 28, 29, 30, 31, 4, 5, 6, 7, 56, 57, 58, 59, 24, 25, 26, 27,
+ 28, 29, 30, 31, 4, 5, 6, 7, 60, 61, 62, 63, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 60, 61, 62, 63, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 64, 65, 66, 67, 44, 45, 46, 47, 48, 49, 50, 51, 8, 9, 10, 11, 64, 65, 66,
+ 67, 44, 45, 46, 47, 48, 49, 50, 51, 8, 9, 10, 11
+};
+
+static size_t ScoreTqReqSurrenderPriorityInherit_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqSurrenderPriorityInherit_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqSurrenderPriorityInherit_Fixture = {
+ .setup = ScoreTqReqSurrenderPriorityInherit_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqSurrenderPriorityInherit_Teardown_Wrap,
+ .scope = ScoreTqReqSurrenderPriorityInherit_Scope,
+ .initial_context = &ScoreTqReqSurrenderPriorityInherit_Instance
+};
+
+static const uint8_t ScoreTqReqSurrenderPriorityInherit_Weights[] = {
+ 128, 64, 32, 16, 8, 4, 2, 1
+};
+
+static void ScoreTqReqSurrenderPriorityInherit_Skip(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pcs[ 3 ] = ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pcs[ 4 ] = ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pcs[ 5 ] = ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA - 1;
+ /* Fall through */
+ case 6:
+ ctx->Map.pcs[ 6 ] = ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA - 1;
+ /* Fall through */
+ case 7:
+ ctx->Map.pcs[ 7 ] = ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreTqReqSurrenderPriorityInherit_Entry
+ScoreTqReqSurrenderPriorityInherit_PopEntry(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 8; ++i ) {
+ index += ScoreTqReqSurrenderPriorityInherit_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreTqReqSurrenderPriorityInherit_Entries[
+ ScoreTqReqSurrenderPriorityInherit_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_TestVariant(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqSurrenderPriorityInherit_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 3 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 4 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Prepare(
+ ctx,
+ ctx->Map.pcs[ 5 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Prepare(
+ ctx,
+ ctx->Map.pcs[ 6 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Prepare(
+ ctx,
+ ctx->Map.pcs[ 7 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Action( ctx );
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Check(
+ ctx,
+ ctx->Map.entry.Post_Dequeue
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Check(
+ ctx,
+ ctx->Map.entry.Post_Unblock
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_PreviousOwnerPriority
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_NewPriority
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Check(
+ ctx,
+ ctx->Map.entry.Post_RemoveHelper
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Check(
+ ctx,
+ ctx->Map.entry.Post_AddHelper
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Check(
+ ctx,
+ ctx->Map.entry.Post_Suspended
+ );
+}
+
+static T_fixture_node ScoreTqReqSurrenderPriorityInherit_Node;
+
+void ScoreTqReqSurrenderPriorityInherit_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = &ScoreTqReqSurrenderPriorityInherit_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqSurrenderPriorityInherit_Node,
+ &ScoreTqReqSurrenderPriorityInherit_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_One;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Vital;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Vital;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Home;
+ ctx->Map.pcs[ 3 ] < ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 4 ] = ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Vital;
+ ctx->Map.pcs[ 4 ] < ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_NA;
+ ++ctx->Map.pcs[ 4 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 5 ] = ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Vital;
+ ctx->Map.pcs[ 5 ] < ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA;
+ ++ctx->Map.pcs[ 5 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 6 ] = ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Yes;
+ ctx->Map.pcs[ 6 ] < ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA;
+ ++ctx->Map.pcs[ 6 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 7 ] = ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Blocked;
+ ctx->Map.pcs[ 7 ] < ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 7 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqSurrenderPriorityInherit_PopEntry(
+ ctx
+ );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqSurrenderPriorityInherit_Prepare( ctx );
+ ScoreTqReqSurrenderPriorityInherit_TestVariant( ctx );
+ ScoreTqReqSurrenderPriorityInherit_Cleanup( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-surrender-priority-inherit.h b/testsuites/validation/tr-tq-surrender-priority-inherit.h
new file mode 100644
index 0000000000..b792de1b2e
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender-priority-inherit.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_SURRENDER_PRIORITY_INHERIT_H
+#define _TR_TQ_SURRENDER_PRIORITY_INHERIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_One,
+ ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_Two,
+ ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_More,
+ ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_SchedulerCount;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Home,
+ ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_Helping,
+ ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_UsedScheduler;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_NewPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_No,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_Suspended;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Blocked,
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_IntendToBlock,
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_Dequeue;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_Unblock;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Raise,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_NewPriority_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_NewPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_AddHelper;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_Suspended;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqSurrenderPriorityInherit_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_SURRENDER_PRIORITY_INHERIT_H */
diff --git a/testsuites/validation/tr-tq-surrender.c b/testsuites/validation/tr-tq-surrender.c
new file mode 100644
index 0000000000..60091c2d0c
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender.c
@@ -0,0 +1,684 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpbarrier.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-surrender.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqSurrender spec:/score/tq/req/surrender
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_HasOwner_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Pre_WaitState_NA : 1;
+ uint8_t Post_Dequeue : 2;
+ uint8_t Post_Unblock : 2;
+} ScoreTqReqSurrender_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/surrender test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains the barrier to synchronize the runner and the
+ * worker.
+ */
+ SMP_barrier_Control barrier;
+
+ /**
+ * @brief If this member is true, then the dequeued thread shall be in the
+ * intend to block wait state.
+ */
+ bool intend_to_block;
+
+ /**
+ * @brief If this member contains the expected counter of worker B.
+ */
+ uint32_t expected_blocker_b_counter;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqSurrender_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqSurrender_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqSurrender_Context;
+
+static ScoreTqReqSurrender_Context
+ ScoreTqReqSurrender_Instance;
+
+static const char * const ScoreTqReqSurrender_PreDesc_HasOwner[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrender_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrender_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqSurrender_PreDesc[] = {
+ ScoreTqReqSurrender_PreDesc_HasOwner,
+ ScoreTqReqSurrender_PreDesc_Discipline,
+ ScoreTqReqSurrender_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqSurrender_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Surrender( void *arg )
+{
+ Context *ctx;
+ Status_Control status;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+#if defined(RTEMS_SMP)
+static void Delay( void *arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+}
+#endif
+
+static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+#if defined(RTEMS_SMP)
+ ctx->request.handler = Delay;
+#else
+ ctx->request.handler = Surrender;
+#endif
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+
+static void ScoreTqReqSurrender_Pre_HasOwner_Prepare(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Pre_HasOwner state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Pre_HasOwner_Yes: {
+ /*
+ * Where the thread queue has a previous owner thread.
+ */
+ if ( ctx->tq_ctx->get_owner == NULL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_HasOwner_No: {
+ /*
+ * Where the thread queue has no owner threads.
+ */
+ if ( ctx->tq_ctx->get_owner != NULL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_HasOwner_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Pre_Discipline_Prepare(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Pre_WaitState_Prepare(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Pre_WaitState_Blocked: {
+ /*
+ * While the dequeued thread is in the blocked wait state.
+ */
+ ctx->intend_to_block = false;
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the dequeued thread is in the intend to block wait state.
+ */
+ ctx->intend_to_block = true;
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Post_Dequeue_Check(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Post_Dequeue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Post_Dequeue_FIFO: {
+ /*
+ * The first thread in FIFO order shall be dequeued from the thread
+ * queue.
+ */
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
+ T_eq_u32(
+ TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ),
+ ctx->expected_blocker_b_counter
+ );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Dequeue_Priority: {
+ /*
+ * The first thread in priority order shall be dequeued from the thread
+ * queue.
+ */
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ), 2 );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Dequeue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Post_Unblock_Check(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqSurrender_Post_Unblock_Yes: {
+ /*
+ * The dequeued thread shall be unblocked by surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Unblock_No: {
+ /*
+ * The dequeued thread shall not be unblocked by surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Setup( ScoreTqReqSurrender_Context *ctx )
+{
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+
+ #if defined(RTEMS_SMP)
+ /*
+ * For the mutexes with priority ceiling protocol, we need a scheduler with
+ * two processors to set up the intend to block wait state.
+ */
+ RemoveProcessor( SCHEDULER_B_ID, 1 );
+ AddProcessor( SCHEDULER_A_ID, 1 );
+ #endif
+}
+
+static void ScoreTqReqSurrender_Setup_Wrap( void *arg )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrender_Setup( ctx );
+}
+
+static void ScoreTqReqSurrender_Teardown( ScoreTqReqSurrender_Context *ctx )
+{
+ TQReset( ctx->tq_ctx );
+
+ #if defined(RTEMS_SMP)
+ RemoveProcessor( SCHEDULER_A_ID, 1 );
+ AddProcessor( SCHEDULER_B_ID, 1 );
+ #endif
+}
+
+static void ScoreTqReqSurrender_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrender_Teardown( ctx );
+}
+
+static void ScoreTqReqSurrender_Action( ScoreTqReqSurrender_Context *ctx )
+{
+ Status_Control status;
+
+ TQResetCounter( ctx->tq_ctx );
+ ctx->expected_blocker_b_counter = 0;
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ if ( ctx->intend_to_block ) {
+ #if defined(RTEMS_SMP)
+ SMP_barrier_State state;
+ #endif
+
+ /*
+ * In uniprocessor configurations, it is impossible to dequeue a thread
+ * in FIFO order which is in the intend to block wait state. Run this
+ * test with just one worker.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ ctx->expected_blocker_b_counter = 2;
+ }
+
+
+ #if defined(RTEMS_SMP)
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+ #endif
+
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+ #if defined(RTEMS_SMP)
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ Surrender( ctx );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ #endif
+ } else {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A );
+
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_B );
+ ctx->expected_blocker_b_counter = 2;
+
+ Surrender( ctx );
+ }
+
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->expected_blocker_b_counter != 0 ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_SURRENDER
+ );
+ }
+}
+
+static const ScoreTqReqSurrender_Entry
+ScoreTqReqSurrender_Entries[] = {
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Unblock_Yes },
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Unblock_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_NA,
+ ScoreTqReqSurrender_Post_Unblock_NA },
+#else
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Unblock_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_NA,
+ ScoreTqReqSurrender_Post_Unblock_NA },
+#else
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Unblock_No },
+#endif
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Unblock_No },
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Unblock_No }
+};
+
+static const uint8_t
+ScoreTqReqSurrender_Map[] = {
+ 0, 2, 1, 3, 0, 4, 1, 5
+};
+
+static size_t ScoreTqReqSurrender_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqSurrender_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqSurrender_Fixture = {
+ .setup = ScoreTqReqSurrender_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqSurrender_Teardown_Wrap,
+ .scope = ScoreTqReqSurrender_Scope,
+ .initial_context = &ScoreTqReqSurrender_Instance
+};
+
+static const uint8_t ScoreTqReqSurrender_Weights[] = {
+ 4, 2, 1
+};
+
+static void ScoreTqReqSurrender_Skip(
+ ScoreTqReqSurrender_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrender_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrender_Pre_WaitState_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreTqReqSurrender_Entry ScoreTqReqSurrender_PopEntry(
+ ScoreTqReqSurrender_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 3; ++i ) {
+ index += ScoreTqReqSurrender_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreTqReqSurrender_Entries[
+ ScoreTqReqSurrender_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqSurrender_TestVariant( ScoreTqReqSurrender_Context *ctx )
+{
+ ScoreTqReqSurrender_Pre_HasOwner_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqSurrender_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqSurrender_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqSurrender_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreTqReqSurrender_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreTqReqSurrender_Action( ctx );
+ ScoreTqReqSurrender_Post_Dequeue_Check( ctx, ctx->Map.entry.Post_Dequeue );
+ ScoreTqReqSurrender_Post_Unblock_Check( ctx, ctx->Map.entry.Post_Unblock );
+}
+
+static T_fixture_node ScoreTqReqSurrender_Node;
+
+void ScoreTqReqSurrender_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = &ScoreTqReqSurrender_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqSurrender_Node,
+ &ScoreTqReqSurrender_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqSurrender_Pre_HasOwner_Yes;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqSurrender_Pre_HasOwner_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrender_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqSurrender_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrender_Pre_WaitState_Blocked;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqSurrender_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqSurrender_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqSurrender_TestVariant( ctx );
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-surrender.h b/testsuites/validation/tr-tq-surrender.h
new file mode 100644
index 0000000000..fd64bbfa02
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_SURRENDER_H
+#define _TR_TQ_SURRENDER_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqSurrender
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqSurrender_Pre_HasOwner_Yes,
+ ScoreTqReqSurrender_Pre_HasOwner_No,
+ ScoreTqReqSurrender_Pre_HasOwner_NA
+} ScoreTqReqSurrender_Pre_HasOwner;
+
+typedef enum {
+ ScoreTqReqSurrender_Pre_Discipline_FIFO,
+ ScoreTqReqSurrender_Pre_Discipline_Priority,
+ ScoreTqReqSurrender_Pre_Discipline_NA
+} ScoreTqReqSurrender_Pre_Discipline;
+
+typedef enum {
+ ScoreTqReqSurrender_Pre_WaitState_Blocked,
+ ScoreTqReqSurrender_Pre_WaitState_IntendToBlock,
+ ScoreTqReqSurrender_Pre_WaitState_NA
+} ScoreTqReqSurrender_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Dequeue_NA
+} ScoreTqReqSurrender_Post_Dequeue;
+
+typedef enum {
+ ScoreTqReqSurrender_Post_Unblock_Yes,
+ ScoreTqReqSurrender_Post_Unblock_No,
+ ScoreTqReqSurrender_Post_Unblock_NA
+} ScoreTqReqSurrender_Post_Unblock;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqSurrender_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_SURRENDER_H */
diff --git a/testsuites/validation/tr-tq-timeout-mrsp.c b/testsuites/validation/tr-tq-timeout-mrsp.c
new file mode 100644
index 0000000000..2ee4960495
--- /dev/null
+++ b/testsuites/validation/tr-tq-timeout-mrsp.c
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqTimeoutMrsp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-timeout-mrsp.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqTimeoutMrsp spec:/score/tq/req/timeout-mrsp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Scheduler_NA : 1;
+ uint8_t Pre_WaitState_NA : 1;
+ uint8_t Post_Status : 2;
+ uint8_t Post_Unblock : 1;
+} ScoreTqReqTimeoutMrsp_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/timeout-mrsp test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then the enqueued thread shall use a home
+ * scheduler other than the home scheduler of the owner.
+ */
+ bool other_scheduler;
+
+ /**
+ * @brief If this member is true, then the processor set of the schedulers
+ * shall be restored.
+ */
+ bool restore_scheduler;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqTimeoutMrsp_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqTimeoutMrsp_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqTimeoutMrsp_Context;
+
+static ScoreTqReqTimeoutMrsp_Context
+ ScoreTqReqTimeoutMrsp_Instance;
+
+static const char * const ScoreTqReqTimeoutMrsp_PreDesc_Scheduler[] = {
+ "Same",
+ "Other",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutMrsp_PreDesc_WaitState[] = {
+ "IntendToBlock",
+ "ReadyAgain",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqTimeoutMrsp_PreDesc[] = {
+ ScoreTqReqTimeoutMrsp_PreDesc_Scheduler,
+ ScoreTqReqTimeoutMrsp_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqTimeoutMrsp_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static void Tick( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ FinalClockTick();
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+static void ScoreTqReqTimeoutMrsp_Pre_Scheduler_Prepare(
+ ScoreTqReqTimeoutMrsp_Context *ctx,
+ ScoreTqReqTimeoutMrsp_Pre_Scheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutMrsp_Pre_Scheduler_Same: {
+ /*
+ * While the home scheduler of the thread is equal to the home scheduler
+ * of the thread queue owner.
+ */
+ ctx->other_scheduler = false;
+
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_LOW
+ );
+ RemoveProcessor( SCHEDULER_B_ID, 1 );
+ AddProcessor( SCHEDULER_A_ID, 1 );
+ ctx->restore_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Pre_Scheduler_Other: {
+ /*
+ * While the home scheduler of the thread is not equal to the home
+ * scheduler of the thread queue owner.
+ */
+ ctx->other_scheduler = true;
+
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ SCHEDULER_B_ID,
+ PRIO_NORMAL
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Pre_Scheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutMrsp_Pre_WaitState_Prepare(
+ ScoreTqReqTimeoutMrsp_Context *ctx,
+ ScoreTqReqTimeoutMrsp_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutMrsp_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the thread of the timeout operation is in the intend to block
+ * wait state.
+ */
+ Per_CPU_Control *cpu;
+
+ TQEnqueuePrepare( ctx->tq_ctx );
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ cpu = _Thread_Get_CPU( ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ] );
+
+ /*
+ * We have to make sure that the worker thread inserted its thread
+ * timer. Checking the intend to block wait state is not enough to
+ * ensure this.
+ */
+ while ( cpu->thread_dispatch_disable_level != 0 ) {
+ /* Wait */
+ }
+
+ Tick( ctx );
+ WaitForExecutionStop( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ TQEnqueueDone( ctx->tq_ctx );
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Pre_WaitState_ReadyAgain: {
+ /*
+ * While the thread of the timeout operation is in the ready again wait
+ * state.
+ */
+ TQEnqueuePrepare( ctx->tq_ctx );
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_TIMEOUT | TQ_EVENT_SURRENDER |
+ TQ_EVENT_SCHEDULER_RECORD_STOP
+ );
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQEnqueueDone( ctx->tq_ctx );
+ WaitForExecutionStop( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutMrsp_Post_Status_Check(
+ ScoreTqReqTimeoutMrsp_Context *ctx,
+ ScoreTqReqTimeoutMrsp_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutMrsp_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_eq_int(
+ ctx->tq_ctx->status[ TQ_BLOCKER_A ],
+ TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL )
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Post_Status_Timeout: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_TIMEOUT.
+ */
+ T_eq_int(
+ ctx->tq_ctx->status[ TQ_BLOCKER_A ],
+ TQConvertStatus( ctx->tq_ctx, STATUS_TIMEOUT )
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutMrsp_Post_Unblock_Check(
+ ScoreTqReqTimeoutMrsp_Context *ctx,
+ ScoreTqReqTimeoutMrsp_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqTimeoutMrsp_Post_Unblock_No: {
+ /*
+ * The thread of the timeout operation shall not be unblocked by the
+ * timeout operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqTimeoutMrsp_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutMrsp_Setup( ScoreTqReqTimeoutMrsp_Context *ctx )
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqTimeoutMrsp_Setup_Wrap( void *arg )
+{
+ ScoreTqReqTimeoutMrsp_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqTimeoutMrsp_Setup( ctx );
+}
+
+static void ScoreTqReqTimeoutMrsp_Teardown(
+ ScoreTqReqTimeoutMrsp_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqTimeoutMrsp_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqTimeoutMrsp_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqTimeoutMrsp_Teardown( ctx );
+}
+
+static void ScoreTqReqTimeoutMrsp_Prepare( ScoreTqReqTimeoutMrsp_Context *ctx )
+{
+ ctx->restore_scheduler = false;
+}
+
+static void ScoreTqReqTimeoutMrsp_Action( ScoreTqReqTimeoutMrsp_Context *ctx )
+{
+ /*
+ * The action is performed by the ``WaitState`` pre-condition preparation.
+ */
+}
+
+static void ScoreTqReqTimeoutMrsp_Cleanup( ScoreTqReqTimeoutMrsp_Context *ctx )
+{
+ if ( ctx->restore_scheduler ) {
+ RemoveProcessor( SCHEDULER_A_ID, 1 );
+ AddProcessor( SCHEDULER_B_ID, 1 );
+ }
+}
+
+static const ScoreTqReqTimeoutMrsp_Entry
+ScoreTqReqTimeoutMrsp_Entries[] = {
+ { 0, 0, 0, ScoreTqReqTimeoutMrsp_Post_Status_Timeout,
+ ScoreTqReqTimeoutMrsp_Post_Unblock_No },
+ { 0, 0, 0, ScoreTqReqTimeoutMrsp_Post_Status_Ok,
+ ScoreTqReqTimeoutMrsp_Post_Unblock_No }
+};
+
+static const uint8_t
+ScoreTqReqTimeoutMrsp_Map[] = {
+ 0, 1, 0, 1
+};
+
+static size_t ScoreTqReqTimeoutMrsp_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqTimeoutMrsp_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqTimeoutMrsp_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqTimeoutMrsp_Fixture = {
+ .setup = ScoreTqReqTimeoutMrsp_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqTimeoutMrsp_Teardown_Wrap,
+ .scope = ScoreTqReqTimeoutMrsp_Scope,
+ .initial_context = &ScoreTqReqTimeoutMrsp_Instance
+};
+
+static inline ScoreTqReqTimeoutMrsp_Entry ScoreTqReqTimeoutMrsp_PopEntry(
+ ScoreTqReqTimeoutMrsp_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqTimeoutMrsp_Entries[
+ ScoreTqReqTimeoutMrsp_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqTimeoutMrsp_TestVariant(
+ ScoreTqReqTimeoutMrsp_Context *ctx
+)
+{
+ ScoreTqReqTimeoutMrsp_Pre_Scheduler_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqTimeoutMrsp_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqTimeoutMrsp_Action( ctx );
+ ScoreTqReqTimeoutMrsp_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreTqReqTimeoutMrsp_Post_Unblock_Check( ctx, ctx->Map.entry.Post_Unblock );
+}
+
+static T_fixture_node ScoreTqReqTimeoutMrsp_Node;
+
+void ScoreTqReqTimeoutMrsp_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqTimeoutMrsp_Context *ctx;
+
+ ctx = &ScoreTqReqTimeoutMrsp_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqTimeoutMrsp_Node,
+ &ScoreTqReqTimeoutMrsp_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqTimeoutMrsp_Pre_Scheduler_Same;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqTimeoutMrsp_Pre_Scheduler_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqTimeoutMrsp_Pre_WaitState_IntendToBlock;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqTimeoutMrsp_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqTimeoutMrsp_PopEntry( ctx );
+ ScoreTqReqTimeoutMrsp_Prepare( ctx );
+ ScoreTqReqTimeoutMrsp_TestVariant( ctx );
+ ScoreTqReqTimeoutMrsp_Cleanup( ctx );
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-timeout-mrsp.h b/testsuites/validation/tr-tq-timeout-mrsp.h
new file mode 100644
index 0000000000..a08ef170c5
--- /dev/null
+++ b/testsuites/validation/tr-tq-timeout-mrsp.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqTimeoutMrsp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_TIMEOUT_MRSP_H
+#define _TR_TQ_TIMEOUT_MRSP_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqTimeoutMrsp
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqTimeoutMrsp_Pre_Scheduler_Same,
+ ScoreTqReqTimeoutMrsp_Pre_Scheduler_Other,
+ ScoreTqReqTimeoutMrsp_Pre_Scheduler_NA
+} ScoreTqReqTimeoutMrsp_Pre_Scheduler;
+
+typedef enum {
+ ScoreTqReqTimeoutMrsp_Pre_WaitState_IntendToBlock,
+ ScoreTqReqTimeoutMrsp_Pre_WaitState_ReadyAgain,
+ ScoreTqReqTimeoutMrsp_Pre_WaitState_NA
+} ScoreTqReqTimeoutMrsp_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqTimeoutMrsp_Post_Status_Ok,
+ ScoreTqReqTimeoutMrsp_Post_Status_Timeout,
+ ScoreTqReqTimeoutMrsp_Post_Status_NA
+} ScoreTqReqTimeoutMrsp_Post_Status;
+
+typedef enum {
+ ScoreTqReqTimeoutMrsp_Post_Unblock_No,
+ ScoreTqReqTimeoutMrsp_Post_Unblock_NA
+} ScoreTqReqTimeoutMrsp_Post_Unblock;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqTimeoutMrsp_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_TIMEOUT_MRSP_H */
diff --git a/testsuites/validation/tr-tq-timeout-priority-inherit.c b/testsuites/validation/tr-tq-timeout-priority-inherit.c
new file mode 100644
index 0000000000..6e744ba7d9
--- /dev/null
+++ b/testsuites/validation/tr-tq-timeout-priority-inherit.c
@@ -0,0 +1,2154 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqTimeoutPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-timeout-priority-inherit.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqTimeoutPriorityInherit \
+ * spec:/score/tq/req/timeout-priority-inherit
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_HomeScheduler_NA : 1;
+ uint32_t Pre_EligibleScheduler_NA : 1;
+ uint32_t Pre_Queue_NA : 1;
+ uint32_t Pre_OwnerPriority_NA : 1;
+ uint32_t Pre_OwnerState_NA : 1;
+ uint32_t Pre_OwnerQueue_NA : 1;
+ uint32_t Pre_OwnerOwnerPriority_NA : 1;
+ uint32_t Pre_WaitState_NA : 1;
+ uint32_t Post_Status : 2;
+ uint32_t Post_Unblock : 2;
+ uint32_t Post_OwnerPriority : 2;
+ uint32_t Post_OwnerOwnerPriority : 2;
+} ScoreTqReqTimeoutPriorityInherit_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/timeout-priority-inherit test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member specifies the scheduler of the thread.
+ */
+ rtems_id scheduler_id;
+
+ /**
+ * @brief If this member is true, then the thread shall have at least two
+ * eligible scheduler.
+ */
+ bool other_scheduler;
+
+ /**
+ * @brief This member specifies the queue node kind.
+ */
+ TQNodeKind queue_node;
+
+ /**
+ * @brief This member specifies the owner priority node kind.
+ */
+ TQNodeKind owner_node;
+
+ /**
+ * @brief This member specifies which mutex obtain event shall be used to
+ * block the thread queue owner.
+ */
+ rtems_event_set owner_obtain;
+
+ /**
+ * @brief This member specifies which mutex release event shall be used to
+ * unblock the thread queue owner.
+ */
+ rtems_event_set owner_release;
+
+ /**
+ * @brief This member specifies the owner queue node kind.
+ */
+ TQNodeKind owner_queue_node;
+
+ /**
+ * @brief This member specifies the kind of the priority node of the owner of
+ * the thread queue on which the owner of the thread queue is blocked.
+ */
+ TQNodeKind owner_owner_node;
+
+ /**
+ * @brief This member specifies the wait state.
+ */
+ TQWaitState wait_state;
+
+ /**
+ * @brief This member contains the thread queue priority.
+ */
+ rtems_task_priority queue_priority;
+
+ /**
+ * @brief This member contains the owner priority.
+ */
+ rtems_task_priority owner_priority;
+
+ /**
+ * @brief This member contains the owner priority after the timeout or
+ * surrender.
+ */
+ rtems_task_priority owner_priority_after;
+
+ /**
+ * @brief This member contains the priority of the thread queue on which the
+ * owner is enqueued.
+ */
+ rtems_task_priority owner_queue_priority;
+
+ /**
+ * @brief This member contains the priority of the owner of the thread queue
+ * on which the owner is enqueued.
+ */
+ rtems_task_priority owner_owner_priority;
+
+ /**
+ * @brief This member contains the priority after the timeout or surrender of
+ * the owner of the thread queue on which the owner is enqueued.
+ */
+ rtems_task_priority owner_owner_priority_after;
+
+ /**
+ * @brief If this member is true, then the queue helper shall surrender the
+ * thread queue.
+ */
+ bool queue_helper_surrender;
+
+ /**
+ * @brief If this member is true, then the owner helper shall release mutex
+ * A.
+ */
+ bool owner_helper_release;
+
+ /**
+ * @brief If this member is true, then the owner queue helper shall release
+ * the mutex on which the owner is blocked.
+ */
+ bool owner_queue_helper_release;
+
+ /**
+ * @brief If this member is true, then helper of the owner of the mutex which
+ * the owner blocked shall release mutex B.
+ */
+ bool owner_owner_helper_release;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqTimeoutPriorityInherit_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 8 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 8 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqTimeoutPriorityInherit_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqTimeoutPriorityInherit_Context;
+
+static ScoreTqReqTimeoutPriorityInherit_Context
+ ScoreTqReqTimeoutPriorityInherit_Instance;
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_HomeScheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_EligibleScheduler[] = {
+ "One",
+ "More",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_Queue[] = {
+ "Only",
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerPriority[] = {
+ "Only",
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerState[] = {
+ "NotEnqueued",
+ "FIFO",
+ "Priority",
+ "PriorityInherit",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerQueue[] = {
+ "Only",
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerOwnerPriority[] = {
+ "Only",
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqTimeoutPriorityInherit_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
+ "ReadyAgain",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqTimeoutPriorityInherit_PreDesc[] = {
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_HomeScheduler,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_EligibleScheduler,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_Queue,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerPriority,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerState,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerQueue,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_OwnerOwnerPriority,
+ ScoreTqReqTimeoutPriorityInherit_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqTimeoutPriorityInherit_Context Context;
+
+#define THREAD TQ_BLOCKER_A
+
+#define THREAD_HELPER_A TQ_HELPER_B
+
+#define THREAD_HELPER_B TQ_HELPER_C
+
+#define QUEUE_HELPER TQ_BLOCKER_B
+
+#define OWNER TQ_BLOCKER_C
+
+#define OWNER_HELPER TQ_BLOCKER_D
+
+#define OWNER_QUEUE_HELPER TQ_BLOCKER_E
+
+#define OWNER_OWNER TQ_WORKER_F
+
+#define OWNER_OWNER_HELPER TQ_HELPER_A
+
+static bool GetUnblock( const Context *ctx, size_t *index )
+{
+ while ( true ) {
+ const T_scheduler_event *event;
+
+ event = TQGetNextUnblock( ctx->tq_ctx, index );
+
+ if ( event == &T_scheduler_event_null ) {
+ return false;
+ }
+
+ if ( event->thread == ctx->tq_ctx->worker_tcb[ THREAD ] ) {
+ return true;
+ }
+ }
+}
+
+static void Tick( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ FinalClockTick();
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK &&
+ event->thread == ctx->tq_ctx->worker_tcb[ THREAD ]
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = Tick;
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+
+static void ThreadTimeout( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ _Thread_Timeout(
+ &ctx->tq_ctx->worker_tcb[ THREAD ]->Timer.Watchdog
+ );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+static void SchedulerUnblock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_UNBLOCK &&
+ event->thread == ctx->tq_ctx->worker_tcb[ THREAD ]
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+
+ if ( ctx->scheduler_id == SCHEDULER_B_ID ) {
+#if defined(RTEMS_SMP)
+ _SMP_Unicast_action( 1, ThreadTimeout, ctx );
+#else
+ T_unreachable();
+#endif
+ } else {
+ ctx->request.handler = ThreadTimeout;
+ CallWithinISRSubmit( &ctx->request );
+ }
+ }
+}
+
+static void GetPriorities( Context *ctx )
+{
+ ctx->owner_priority_after = GetPriorityByScheduler(
+ ctx->tq_ctx->worker_id[ OWNER ],
+ ctx->scheduler_id
+ );
+ ctx->owner_owner_priority_after = GetPriorityByScheduler(
+ ctx->tq_ctx->worker_id[ OWNER_OWNER ],
+ ctx->scheduler_id
+ );
+}
+
+static void PrepareThread( const Context *ctx )
+{
+ if ( ctx->other_scheduler ) {
+ rtems_id other_scheduler_id;
+
+ if ( ctx->scheduler_id == SCHEDULER_A_ID ) {
+ other_scheduler_id = SCHEDULER_B_ID;
+ } else {
+ other_scheduler_id = SCHEDULER_B_ID;
+ }
+
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD,
+ TQ_EVENT_MUTEX_D_OBTAIN
+ );
+
+ TQSetScheduler(
+ ctx->tq_ctx,
+ THREAD_HELPER_A,
+ other_scheduler_id,
+ PRIO_NEARLY_IDLE - 1
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD_HELPER_A,
+ TQ_EVENT_MUTEX_D_OBTAIN
+ );
+
+ if ( rtems_scheduler_get_processor_maximum() >= 3 ) {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ THREAD_HELPER_B,
+ SCHEDULER_C_ID,
+ PRIO_NORMAL
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD_HELPER_B,
+ TQ_EVENT_MUTEX_D_OBTAIN
+ );
+ }
+ }
+}
+
+static rtems_task_priority PrepareQueue(
+ Context *ctx,
+ rtems_task_priority priority
+)
+{
+ switch ( ctx->queue_node ) {
+ case TQ_NODE_ONLY:
+ ctx->queue_helper_surrender = false;
+ break;
+ case TQ_NODE_VITAL:
+ ctx->queue_helper_surrender = true;
+ TQSetScheduler(
+ ctx->tq_ctx,
+ QUEUE_HELPER,
+ ctx->scheduler_id,
+ priority + 1
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ QUEUE_HELPER,
+ TQ_EVENT_ENQUEUE
+ );
+ break;
+ case TQ_NODE_DISPENSABLE:
+ ctx->queue_helper_surrender = true;
+ --priority;
+ TQSetScheduler(
+ ctx->tq_ctx,
+ QUEUE_HELPER,
+ ctx->scheduler_id,
+ priority
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ QUEUE_HELPER,
+ TQ_EVENT_ENQUEUE
+ );
+ break;
+ }
+
+ ctx->queue_priority = priority;
+
+ return priority;
+}
+
+static rtems_task_priority PrepareOwner(
+ Context *ctx,
+ rtems_task_priority priority
+)
+{
+ switch ( ctx->owner_node ) {
+ case TQ_NODE_ONLY:
+ ctx->owner_helper_release = false;
+ TQSetPriority( ctx->tq_ctx, OWNER, PRIO_FLEXIBLE );
+ break;
+ case TQ_NODE_VITAL:
+ if ( ctx->scheduler_id == SCHEDULER_A_ID ) {
+ ctx->owner_helper_release = false;
+ TQSetPriority( ctx->tq_ctx, OWNER, priority + 1 );
+ } else {
+ ctx->owner_helper_release = true;
+ TQSetPriority( ctx->tq_ctx, OWNER, PRIO_FLEXIBLE );
+ TQSetScheduler(
+ ctx->tq_ctx,
+ OWNER_HELPER,
+ ctx->scheduler_id,
+ priority + 1
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_HELPER,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ break;
+ case TQ_NODE_DISPENSABLE:
+ --priority;
+
+ if ( ctx->scheduler_id == SCHEDULER_A_ID ) {
+ ctx->owner_helper_release = false;
+ TQSetPriority( ctx->tq_ctx, OWNER, priority );
+ } else {
+ ctx->owner_helper_release = true;
+ TQSetPriority( ctx->tq_ctx, OWNER, PRIO_FLEXIBLE );
+ TQSetScheduler(
+ ctx->tq_ctx,
+ OWNER_HELPER,
+ ctx->scheduler_id,
+ priority
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_HELPER,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ break;
+ }
+
+ ctx->owner_priority = priority;
+
+ return priority;
+}
+
+static rtems_task_priority PrepareOwnerQueue(
+ Context *ctx,
+ rtems_task_priority priority
+)
+{
+ if ( ctx->owner_obtain != 0 ) {
+ switch ( ctx->owner_queue_node ) {
+ case TQ_NODE_ONLY:
+ ctx->owner_queue_helper_release = false;
+ break;
+ case TQ_NODE_VITAL:
+ ctx->owner_queue_helper_release = true;
+ TQSetScheduler(
+ ctx->tq_ctx,
+ OWNER_QUEUE_HELPER,
+ ctx->scheduler_id,
+ priority + 1
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_QUEUE_HELPER,
+ ctx->owner_obtain
+ );
+ break;
+ case TQ_NODE_DISPENSABLE:
+ ctx->owner_queue_helper_release = true;
+ --priority;
+ TQSetScheduler(
+ ctx->tq_ctx,
+ OWNER_QUEUE_HELPER,
+ ctx->scheduler_id,
+ priority
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_QUEUE_HELPER,
+ ctx->owner_obtain
+ );
+ break;
+ }
+
+ ctx->owner_queue_priority = priority;
+ } else {
+ ctx->owner_queue_helper_release = false;
+ ctx->owner_queue_priority = PRIO_INVALID;
+ }
+
+ return priority;
+}
+
+static void PrepareOwnerOwner( Context *ctx, rtems_task_priority priority )
+{
+ if ( ctx->owner_obtain != 0 ) {
+ switch ( ctx->owner_owner_node ) {
+ case TQ_NODE_ONLY:
+ ctx->owner_owner_helper_release = false;
+ TQSetPriority( ctx->tq_ctx, OWNER_OWNER, PRIO_FLEXIBLE );
+ break;
+ case TQ_NODE_VITAL:
+ if ( ctx->scheduler_id == SCHEDULER_A_ID ) {
+ ctx->owner_owner_helper_release = false;
+ TQSetPriority( ctx->tq_ctx, OWNER_OWNER, priority + 1 );
+ } else {
+ ctx->owner_owner_helper_release = true;
+ TQSetPriority( ctx->tq_ctx, OWNER_OWNER, PRIO_FLEXIBLE );
+ TQSetScheduler(
+ ctx->tq_ctx,
+ OWNER_OWNER_HELPER,
+ ctx->scheduler_id,
+ priority + 1
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_OWNER_HELPER,
+ TQ_EVENT_MUTEX_B_OBTAIN
+ );
+ }
+ break;
+ case TQ_NODE_DISPENSABLE:
+ --priority;
+
+ if ( ctx->scheduler_id == SCHEDULER_A_ID ) {
+ ctx->owner_owner_helper_release = false;
+ TQSetPriority( ctx->tq_ctx, OWNER_OWNER, priority );
+ } else {
+ ctx->owner_owner_helper_release = true;
+ TQSetPriority( ctx->tq_ctx, OWNER_OWNER, PRIO_FLEXIBLE );
+ TQSetScheduler(
+ ctx->tq_ctx,
+ OWNER_OWNER_HELPER,
+ ctx->scheduler_id,
+ priority
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_OWNER_HELPER,
+ TQ_EVENT_MUTEX_B_OBTAIN
+ );
+ }
+ break;
+ }
+
+ ctx->owner_owner_priority = priority;
+ } else {
+ ctx->owner_owner_helper_release = false;
+ ctx->owner_owner_priority = PRIO_INVALID;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Home: {
+ /*
+ * While the home scheduler of the thread is the home scheduler of the
+ * thread queue owner.
+ */
+ ctx->scheduler_id = SCHEDULER_A_ID;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Helping: {
+ /*
+ * While the home scheduler of the thread is a helping scheduler of the
+ * thread queue owner.
+ */
+ ctx->scheduler_id = SCHEDULER_B_ID;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_One: {
+ /*
+ * While the thread has exactly one eligible scheduler.
+ */
+ ctx->other_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_More: {
+ /*
+ * While the thread has at least two eligible scheduler.
+ */
+ ctx->other_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Only: {
+ /*
+ * While the priority node of the thread is the only priority node in the
+ * priority queue associated with the scheduler of the thread queue.
+ */
+ ctx->queue_node = TQ_NODE_ONLY;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Vital: {
+ /*
+ * While the priority node of the thread is not the only priority node in
+ * the priority queue associated with the scheduler of the thread queue,
+ * while the priority node of the thread is the highest priority node in
+ * the priority queue.
+ */
+ ctx->queue_node = TQ_NODE_VITAL;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Dispensable: {
+ /*
+ * While the priority node of the thread is not the only priority node in
+ * the priority queue associated with the scheduler of the thread queue,
+ * while the priority node of the thread is not the highest priority node
+ * in the priority queue.
+ */
+ ctx->queue_node = TQ_NODE_DISPENSABLE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Only: {
+ /*
+ * While the priority node of the thread queue is the only priority node
+ * associated with the scheduler available to the owner.
+ */
+ ctx->owner_node = TQ_NODE_ONLY;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Vital: {
+ /*
+ * While the priority node of the thread queue is not the only priority
+ * node associated with the scheduler available to the owner, while the
+ * priority node of the thread queue is the highest priority node
+ * available to the owner.
+ */
+ ctx->owner_node = TQ_NODE_VITAL;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Dispensable: {
+ /*
+ * While the priority node of the thread queue is not the only priority
+ * node associated with the scheduler available to the owner, while the
+ * priority node of the thread queue is not the highest priority node
+ * available to the owner.
+ */
+ ctx->owner_node = TQ_NODE_DISPENSABLE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_NotEnqueued: {
+ /*
+ * While the owner of the thread queue is not enqueued on a thread queue.
+ */
+ ctx->owner_obtain = 0;
+ ctx->owner_release = 0;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_FIFO: {
+ /*
+ * While the owner of the thread queue is enqueued on a thread queue in
+ * FIFO order.
+ */
+ ctx->owner_obtain = TQ_EVENT_MUTEX_FIFO_OBTAIN;
+ ctx->owner_release = TQ_EVENT_MUTEX_FIFO_RELEASE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_Priority: {
+ /*
+ * While the owner of the thread queue is enqueued on a thread queue in
+ * priority order.
+ */
+ ctx->owner_obtain = TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN;
+ ctx->owner_release = TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_PriorityInherit: {
+ /*
+ * While the owner of the thread queue is enqueued on a thread queue in
+ * priority order with priority inheritance.
+ */
+ ctx->owner_obtain = TQ_EVENT_MUTEX_C_OBTAIN;
+ ctx->owner_release = TQ_EVENT_MUTEX_C_RELEASE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Only: {
+ /*
+ * While the priority node of the owner is the only priority node in the
+ * priority queue associated with the scheduler of the thread queue on
+ * which the owner is enqueued.
+ */
+ ctx->owner_queue_node = TQ_NODE_ONLY;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Vital: {
+ /*
+ * While the priority node of the owner is not the only priority node in
+ * the priority queue associated with the scheduler of the thread queue
+ * on which the owner is enqueued, while the priority node of the owner
+ * is the highest priority node in the priority queue.
+ */
+ ctx->owner_queue_node = TQ_NODE_VITAL;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Dispensable: {
+ /*
+ * While the priority node of the owner is not the only priority node in
+ * the priority queue associated with the scheduler of the thread queue
+ * on which the owner is enqueued, while the priority node of the owner
+ * is not the highest priority node in the priority queue.
+ */
+ ctx->owner_queue_node = TQ_NODE_DISPENSABLE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Only: {
+ /*
+ * While the priority node of the thread queue on which the owner is
+ * enqueued is the only priority node associated with the scheduler
+ * available to the owner of the thread queue on which the owner is
+ * enqueued.
+ */
+ ctx->owner_owner_node = TQ_NODE_ONLY;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Vital: {
+ /*
+ * While the priority node of the thread queue on which the owner is
+ * enqueued is not the only priority node associated with the scheduler
+ * available to the owner of the thread queue on which the owner is
+ * enqueued, while the priority node of the thread queue on which the
+ * owner is enqueued is the highest priority node available to the owner
+ * of the thread queue on which the owner is enqueued.
+ */
+ ctx->owner_owner_node = TQ_NODE_VITAL;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Dispensable: {
+ /*
+ * While the priority node of the thread queue on which the owner is
+ * enqueued is not the only priority node associated with the scheduler
+ * available to the owner of the thread queue on which the owner is
+ * enqueued, while the priority node of the thread queue is on which the
+ * owner is enqueued not the highest priority node available to the owner
+ * of the thread queue on which the owner is enqueued.
+ */
+ ctx->owner_owner_node = TQ_NODE_DISPENSABLE;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_Blocked: {
+ /*
+ * While the thread is in the blocked wait state.
+ */
+ ctx->wait_state = TQ_WAIT_STATE_BLOCKED;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the thread is in the intend to block wait state.
+ */
+ ctx->wait_state = TQ_WAIT_STATE_INTEND_TO_BLOCK;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_ReadyAgain: {
+ /*
+ * While the thread is in the ready again wait state.
+ */
+ ctx->wait_state = TQ_WAIT_STATE_READY_AGAIN;
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Post_Status_Check(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_eq_int(
+ ctx->tq_ctx->status[ THREAD ],
+ TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL )
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_TIMEOUT.
+ */
+ T_eq_int(
+ ctx->tq_ctx->status[ THREAD ],
+ TQConvertStatus( ctx->tq_ctx, STATUS_TIMEOUT )
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Check(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes: {
+ /*
+ * The thread shall be unblocked by the timeout operation.
+ */
+ T_true( GetUnblock( ctx, &i ) );
+ T_false( GetUnblock( ctx, &i ) );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No: {
+ /*
+ * The thread shall not be unblocked by the timeout operation.
+ */
+ T_false( GetUnblock( ctx, &i ) );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Check(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop: {
+ /*
+ * The priority of the owner with respect to the scheduler shall not
+ * change by the timeout operation.
+ */
+ T_eq_u32( ctx->owner_priority_after, ctx->owner_priority );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower: {
+ /*
+ * The priority of the owner with respect to the scheduler shall be
+ * lowered to the next highest priority.
+ */
+ T_eq_u32( ctx->owner_priority_after, ctx->owner_priority + 1 );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop: {
+ /*
+ * The owner shall not have a priority with respect to the scheduler.
+ */
+ T_eq_u32( ctx->owner_priority_after, PRIO_INVALID );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Check(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop: {
+ /*
+ * The priority of the owner of the thread queue on which the owner is
+ * enqueued with respect to the scheduler shall not change by the timeout
+ * operation.
+ */
+ T_eq_u32( ctx->owner_owner_priority_after, ctx->owner_owner_priority );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower: {
+ /*
+ * The priority of the owner of the thread queue on which the owner is
+ * enqueued with respect to the scheduler shall be lowered to the next
+ * highest priority.
+ */
+ T_eq_u32( ctx->owner_owner_priority_after, ctx->owner_owner_priority + 1 );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Drop: {
+ /*
+ * The owner of the thread queue on which the owner is enqueued shall not
+ * have a priority with respect to the scheduler.
+ */
+ T_eq_u32( ctx->owner_owner_priority_after, PRIO_INVALID );
+ break;
+ }
+
+ case ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Setup(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+ SetSelfPriority( PRIO_NEARLY_IDLE );
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Setup_Wrap( void *arg )
+{
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqTimeoutPriorityInherit_Setup( ctx );
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Teardown(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqTimeoutPriorityInherit_Teardown( ctx );
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Prepare(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ ctx->queue_helper_surrender = false;
+ ctx->owner_helper_release = false;
+ ctx->owner_queue_helper_release = false;
+ ctx->owner_owner_helper_release = false;
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Action(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ rtems_task_priority priority;
+
+ priority = PRIO_FLEXIBLE;
+ TQSetScheduler( ctx->tq_ctx, THREAD, ctx->scheduler_id, priority );
+
+ TQSend(
+ ctx->tq_ctx,
+ OWNER,
+ TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_ENQUEUE
+ );
+
+ if ( ctx->owner_obtain != 0 ) {
+ TQSend(
+ ctx->tq_ctx,
+ OWNER_OWNER,
+ TQ_EVENT_MUTEX_B_OBTAIN | ctx->owner_obtain
+ );
+ TQSend( ctx->tq_ctx, OWNER, ctx->owner_obtain | ctx->owner_release );
+ }
+
+ PrepareThread( ctx );
+ priority = PrepareQueue( ctx, priority );
+ priority = PrepareOwner( ctx, priority );
+ priority = PrepareOwnerQueue( ctx, priority );
+ PrepareOwnerOwner( ctx, priority );
+
+ TQClearDone( ctx->tq_ctx, THREAD );
+
+ switch ( ctx->wait_state ) {
+ case TQ_WAIT_STATE_BLOCKED:
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD,
+ TQ_EVENT_ENQUEUE_TIMED
+ );
+ Tick( ctx );
+ GetPriorities( ctx );
+ TQSend( ctx->tq_ctx, OWNER, TQ_EVENT_SURRENDER );
+ break;
+ case TQ_WAIT_STATE_INTEND_TO_BLOCK:
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD,
+ TQ_EVENT_ENQUEUE_TIMED
+ );
+ GetPriorities( ctx );
+ TQSend( ctx->tq_ctx, OWNER, TQ_EVENT_SURRENDER );
+ break;
+ case TQ_WAIT_STATE_READY_AGAIN:
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD,
+ TQ_EVENT_ENQUEUE_TIMED
+ );
+ T_scheduler_set_event_handler( SchedulerUnblock, ctx );
+ TQSend( ctx->tq_ctx, OWNER, TQ_EVENT_SURRENDER );
+ GetPriorities( ctx );
+ TQSend( ctx->tq_ctx, THREAD, TQ_EVENT_SURRENDER );
+ break;
+ }
+
+ TQWaitForDone( ctx->tq_ctx, THREAD );
+ TQWaitForExecutionStop( ctx->tq_ctx, THREAD );
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_Cleanup(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->owner_obtain != 0 ) {
+ TQSend(
+ ctx->tq_ctx,
+ OWNER_OWNER,
+ TQ_EVENT_MUTEX_B_RELEASE | ctx->owner_release
+ );
+
+ if ( ctx->owner_queue_helper_release ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_QUEUE_HELPER,
+ ctx->owner_release
+ );
+ }
+
+ if ( ctx->owner_owner_helper_release ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_OWNER_HELPER,
+ TQ_EVENT_MUTEX_B_RELEASE
+ );
+ }
+ }
+
+ TQSend( ctx->tq_ctx, OWNER, TQ_EVENT_MUTEX_A_RELEASE );
+
+ if ( ctx->queue_helper_surrender ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ QUEUE_HELPER,
+ TQ_EVENT_SURRENDER
+ );
+ }
+
+ if ( ctx->owner_helper_release ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ OWNER_HELPER,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+
+ if ( ctx->other_scheduler ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD,
+ TQ_EVENT_MUTEX_D_RELEASE
+ );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD_HELPER_A,
+ TQ_EVENT_MUTEX_D_RELEASE
+ );
+
+ if ( rtems_scheduler_get_processor_maximum() >= 3 ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ THREAD_HELPER_B,
+ TQ_EVENT_MUTEX_D_RELEASE
+ );
+ }
+ }
+}
+
+static const ScoreTqReqTimeoutPriorityInherit_Entry
+ScoreTqReqTimeoutPriorityInherit_Entries[] = {
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower },
+#endif
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#endif
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 0, 1, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower },
+ { 0, 0, 0, 0, 0, 0, 1, 1, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Drop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqTimeoutPriorityInherit_Post_Status_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA }
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Drop }
+#endif
+};
+
+static const uint8_t
+ScoreTqReqTimeoutPriorityInherit_Map[] = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 24, 25, 31, 24, 25, 31, 2, 2, 2, 24, 25, 31, 24, 25, 31,
+ 2, 2, 2, 1, 1, 1, 24, 25, 31, 2, 2, 2, 24, 25, 11, 24, 25, 11, 2, 2, 2, 24,
+ 25, 11, 24, 25, 11, 2, 2, 2, 1, 1, 1, 24, 25, 11, 2, 2, 2, 32, 33, 11, 32,
+ 33, 11, 2, 2, 2, 32, 33, 11, 32, 33, 11, 2, 2, 2, 1, 1, 1, 32, 33, 11, 2, 2,
+ 2, 44, 45, 11, 36, 37, 11, 2, 2, 2, 44, 45, 11, 36, 37, 11, 2, 2, 2, 1, 1, 1,
+ 36, 37, 11, 2, 2, 2, 1, 1, 1, 29, 30, 46, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 1, 1, 1, 29, 30, 46, 2, 2, 2, 1, 1, 1, 29, 30, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 1, 1, 1, 29, 30, 11, 2, 2, 2, 1, 1, 1, 38, 39, 11, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 1, 1, 1, 38, 39, 11, 2, 2, 2, 1, 1, 1, 40, 41, 11, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 40, 41, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 24, 25,
+ 31, 24, 25, 31, 2, 2, 2, 24, 25, 31, 24, 25, 31, 2, 2, 2, 1, 1, 1, 24, 25,
+ 31, 2, 2, 2, 24, 25, 11, 24, 25, 11, 2, 2, 2, 24, 25, 11, 24, 25, 11, 2, 2,
+ 2, 1, 1, 1, 24, 25, 11, 2, 2, 2, 32, 33, 11, 32, 33, 11, 2, 2, 2, 32, 33, 11,
+ 32, 33, 11, 2, 2, 2, 1, 1, 1, 32, 33, 11, 2, 2, 2, 44, 45, 11, 36, 37, 11, 2,
+ 2, 2, 44, 45, 11, 36, 37, 11, 2, 2, 2, 1, 1, 1, 36, 37, 11, 2, 2, 2, 1, 1, 1,
+ 29, 30, 46, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 29, 30, 46, 2, 2, 2,
+ 1, 1, 1, 29, 30, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 29, 30, 11,
+ 2, 2, 2, 1, 1, 1, 38, 39, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1,
+ 38, 39, 11, 2, 2, 2, 1, 1, 1, 40, 41, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 1, 1, 1, 40, 41, 11, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 29, 30, 11, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 29, 30, 11, 2, 2, 2, 1, 1, 1, 29,
+ 30, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 29, 30, 11, 2, 2, 2, 1,
+ 1, 1, 38, 39, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 38, 39, 11, 2,
+ 2, 2, 1, 1, 1, 40, 41, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 40,
+ 41, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0,
+ 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0,
+ 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 0, 0, 0, 3, 3, 3, 3, 3, 3, 5, 6, 12, 5, 6, 12, 3, 3, 3, 5, 6, 12, 5, 6,
+ 12, 3, 3, 3, 0, 0, 0, 5, 6, 12, 3, 3, 3, 5, 6, 4, 5, 6, 4, 3, 3, 3, 5, 6, 4,
+ 5, 6, 4, 3, 3, 3, 0, 0, 0, 5, 6, 4, 3, 3, 3, 9, 10, 4, 9, 10, 4, 3, 3, 3, 9,
+ 10, 4, 9, 10, 4, 3, 3, 3, 0, 0, 0, 9, 10, 4, 3, 3, 3, 22, 23, 4, 20, 21, 4,
+ 3, 3, 3, 22, 23, 4, 20, 21, 4, 3, 3, 3, 0, 0, 0, 20, 21, 4, 3, 3, 3, 0, 0, 0,
+ 7, 8, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 26, 3, 3, 3, 0,
+ 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 4, 3, 3, 3,
+ 0, 0, 0, 13, 14, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 13, 14, 4,
+ 3, 3, 3, 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 15,
+ 16, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0,
+ 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0,
+ 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 0, 0, 0, 3, 3, 3, 3, 3, 3, 5, 6, 12, 5, 6, 12, 3, 3, 3, 5, 6, 12, 5, 6,
+ 12, 3, 3, 3, 0, 0, 0, 5, 6, 12, 3, 3, 3, 5, 6, 4, 5, 6, 4, 3, 3, 3, 5, 6, 4,
+ 5, 6, 4, 3, 3, 3, 0, 0, 0, 5, 6, 4, 3, 3, 3, 9, 10, 4, 9, 10, 4, 3, 3, 3, 9,
+ 10, 4, 9, 10, 4, 3, 3, 3, 0, 0, 0, 9, 10, 4, 3, 3, 3, 22, 23, 4, 20, 21, 4,
+ 3, 3, 3, 22, 23, 4, 20, 21, 4, 3, 3, 3, 0, 0, 0, 20, 21, 4, 3, 3, 3, 0, 0, 0,
+ 7, 8, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 26, 3, 3, 3, 0,
+ 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 4, 3, 3, 3,
+ 0, 0, 0, 13, 14, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 13, 14, 4,
+ 3, 3, 3, 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 15,
+ 16, 4, 3, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0,
+ 0, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0,
+ 0, 0, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3,
+ 0, 0, 0, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3,
+ 3, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 4, 3, 3, 3, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 7, 8, 4, 3, 3, 3, 0, 0, 0, 13, 14, 4, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 13, 14, 4, 3, 3, 3, 0, 0, 0, 15, 16, 4,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 15, 16, 4, 17, 18, 19, 17, 18,
+ 19, 17, 18, 19, 17, 18, 19, 17, 18, 19, 17, 18, 19, 17, 18, 19, 0, 0, 0, 17,
+ 18, 19, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17,
+ 18, 4, 0, 0, 0, 17, 18, 4, 27, 28, 4, 27, 28, 4, 27, 28, 4, 27, 28, 4, 27,
+ 28, 4, 27, 28, 4, 27, 28, 4, 0, 0, 0, 27, 28, 4, 47, 48, 4, 42, 43, 4, 34,
+ 35, 4, 42, 43, 4, 42, 43, 4, 34, 35, 4, 34, 35, 4, 0, 0, 0, 34, 35, 4, 5, 6,
+ 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 0, 0, 0, 5,
+ 6, 12, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 0, 0,
+ 0, 5, 6, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9,
+ 10, 4, 0, 0, 0, 9, 10, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 22, 23, 4, 22, 23,
+ 4, 20, 21, 4, 20, 21, 4, 0, 0, 0, 20, 21, 4, 7, 8, 26, 0, 0, 0, 7, 8, 26, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 26, 0, 0, 0, 7, 8, 26, 7, 8, 4, 0, 0, 0, 7, 8,
+ 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13, 14, 4, 0, 0, 0,
+ 13, 14, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13, 14, 4, 15, 16,
+ 4, 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4, 0, 0, 0, 15, 16,
+ 4, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 0,
+ 0, 0, 5, 6, 19, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6,
+ 4, 0, 0, 0, 5, 6, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10,
+ 4, 9, 10, 4, 0, 0, 0, 9, 10, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 22, 23, 4,
+ 22, 23, 4, 20, 21, 4, 20, 21, 4, 0, 0, 0, 20, 21, 4, 5, 6, 12, 5, 6, 12, 5,
+ 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 0, 0, 0, 5, 6, 12, 5, 6, 4, 5,
+ 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 0, 0, 0, 5, 6, 4, 9, 10,
+ 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 0, 0, 0, 9,
+ 10, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 20,
+ 21, 4, 0, 0, 0, 20, 21, 4, 7, 8, 26, 0, 0, 0, 7, 8, 26, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 7, 8, 26, 0, 0, 0, 7, 8, 26, 7, 8, 4, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13, 14, 4, 0, 0, 0, 13, 14, 4, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13, 14, 4, 15, 16, 4, 0, 0, 0, 15,
+ 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4, 0, 0, 0, 15, 16, 4, 7, 8, 4, 0,
+ 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 7, 8, 4,
+ 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13,
+ 14, 4, 0, 0, 0, 13, 14, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13,
+ 14, 4, 15, 16, 4, 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4,
+ 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 7, 8, 4, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13, 14, 4, 0, 0, 0, 13, 14, 4, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13, 14, 4, 15, 16, 4, 0, 0, 0, 15,
+ 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4, 0, 0, 0, 15, 16, 4, 17, 18, 19,
+ 17, 18, 19, 17, 18, 19, 17, 18, 19, 17, 18, 19, 17, 18, 19, 17, 18, 19, 0, 0,
+ 0, 17, 18, 19, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17, 18, 4, 17, 18,
+ 4, 17, 18, 4, 0, 0, 0, 17, 18, 4, 27, 28, 4, 27, 28, 4, 27, 28, 4, 27, 28, 4,
+ 27, 28, 4, 27, 28, 4, 27, 28, 4, 0, 0, 0, 27, 28, 4, 47, 48, 4, 42, 43, 4,
+ 34, 35, 4, 42, 43, 4, 42, 43, 4, 34, 35, 4, 34, 35, 4, 0, 0, 0, 34, 35, 4, 5,
+ 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 0, 0, 0,
+ 5, 6, 12, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 0,
+ 0, 0, 5, 6, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9,
+ 10, 4, 0, 0, 0, 9, 10, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 22, 23, 4, 22, 23,
+ 4, 20, 21, 4, 20, 21, 4, 0, 0, 0, 20, 21, 4, 7, 8, 26, 0, 0, 0, 7, 8, 26, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 26, 0, 0, 0, 7, 8, 26, 7, 8, 4, 0, 0, 0, 7, 8,
+ 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13, 14, 4, 0, 0, 0,
+ 13, 14, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13, 14, 4, 15, 16,
+ 4, 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4, 0, 0, 0, 15, 16,
+ 4, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 5, 6, 19, 0,
+ 0, 0, 5, 6, 19, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6,
+ 4, 0, 0, 0, 5, 6, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10,
+ 4, 9, 10, 4, 0, 0, 0, 9, 10, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 22, 23, 4,
+ 22, 23, 4, 20, 21, 4, 20, 21, 4, 0, 0, 0, 20, 21, 4, 5, 6, 12, 5, 6, 12, 5,
+ 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 5, 6, 12, 0, 0, 0, 5, 6, 12, 5, 6, 4, 5,
+ 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 0, 0, 0, 5, 6, 4, 9, 10,
+ 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 9, 10, 4, 0, 0, 0, 9,
+ 10, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 22, 23, 4, 22, 23, 4, 20, 21, 4, 20,
+ 21, 4, 0, 0, 0, 20, 21, 4, 7, 8, 26, 0, 0, 0, 7, 8, 26, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 7, 8, 26, 0, 0, 0, 7, 8, 26, 7, 8, 4, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13, 14, 4, 0, 0, 0, 13, 14, 4, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13, 14, 4, 15, 16, 4, 0, 0, 0, 15,
+ 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4, 0, 0, 0, 15, 16, 4, 7, 8, 4, 0,
+ 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 7, 8, 4,
+ 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13,
+ 14, 4, 0, 0, 0, 13, 14, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13,
+ 14, 4, 15, 16, 4, 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4,
+ 0, 0, 0, 15, 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 7, 8, 4, 0, 0, 0, 7, 8, 4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 7, 8, 4, 0, 0, 0, 7, 8, 4, 13, 14, 4, 0, 0, 0, 13, 14, 4, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 13, 14, 4, 0, 0, 0, 13, 14, 4, 15, 16, 4, 0, 0, 0, 15,
+ 16, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 16, 4, 0, 0, 0, 15, 16, 4
+};
+
+static size_t ScoreTqReqTimeoutPriorityInherit_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqTimeoutPriorityInherit_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqTimeoutPriorityInherit_Fixture = {
+ .setup = ScoreTqReqTimeoutPriorityInherit_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqTimeoutPriorityInherit_Teardown_Wrap,
+ .scope = ScoreTqReqTimeoutPriorityInherit_Scope,
+ .initial_context = &ScoreTqReqTimeoutPriorityInherit_Instance
+};
+
+static inline ScoreTqReqTimeoutPriorityInherit_Entry
+ScoreTqReqTimeoutPriorityInherit_PopEntry(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqTimeoutPriorityInherit_Entries[
+ ScoreTqReqTimeoutPriorityInherit_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_SetPreConditionStates(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+
+ if ( ctx->Map.entry.Pre_OwnerQueue_NA ) {
+ ctx->Map.pcs[ 5 ] = ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_NA;
+ } else {
+ ctx->Map.pcs[ 5 ] = ctx->Map.pci[ 5 ];
+ }
+
+ if ( ctx->Map.entry.Pre_OwnerOwnerPriority_NA ) {
+ ctx->Map.pcs[ 6 ] = ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_NA;
+ } else {
+ ctx->Map.pcs[ 6 ] = ctx->Map.pci[ 6 ];
+ }
+
+ ctx->Map.pcs[ 7 ] = ctx->Map.pci[ 7 ];
+}
+
+static void ScoreTqReqTimeoutPriorityInherit_TestVariant(
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx
+)
+{
+ ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 3 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_Prepare(
+ ctx,
+ ctx->Map.pcs[ 4 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Prepare(
+ ctx,
+ ctx->Map.pcs[ 5 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 6 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_Prepare(
+ ctx,
+ ctx->Map.pcs[ 7 ]
+ );
+ ScoreTqReqTimeoutPriorityInherit_Action( ctx );
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Check(
+ ctx,
+ ctx->Map.entry.Post_Unblock
+ );
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerPriority
+ );
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerOwnerPriority
+ );
+}
+
+static T_fixture_node ScoreTqReqTimeoutPriorityInherit_Node;
+
+void ScoreTqReqTimeoutPriorityInherit_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqTimeoutPriorityInherit_Context *ctx;
+
+ ctx = &ScoreTqReqTimeoutPriorityInherit_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqTimeoutPriorityInherit_Node,
+ &ScoreTqReqTimeoutPriorityInherit_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Home;
+ ctx->Map.pci[ 0 ] < ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_One;
+ ctx->Map.pci[ 1 ] < ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Only;
+ ctx->Map.pci[ 2 ] < ScoreTqReqTimeoutPriorityInherit_Pre_Queue_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Only;
+ ctx->Map.pci[ 3 ] < ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_NotEnqueued;
+ ctx->Map.pci[ 4 ] < ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ for (
+ ctx->Map.pci[ 5 ] = ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Only;
+ ctx->Map.pci[ 5 ] < ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_NA;
+ ++ctx->Map.pci[ 5 ]
+ ) {
+ for (
+ ctx->Map.pci[ 6 ] = ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Only;
+ ctx->Map.pci[ 6 ] < ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_NA;
+ ++ctx->Map.pci[ 6 ]
+ ) {
+ for (
+ ctx->Map.pci[ 7 ] = ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_Blocked;
+ ctx->Map.pci[ 7 ] < ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_NA;
+ ++ctx->Map.pci[ 7 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqTimeoutPriorityInherit_PopEntry(
+ ctx
+ );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqTimeoutPriorityInherit_SetPreConditionStates(
+ ctx
+ );
+ ScoreTqReqTimeoutPriorityInherit_Prepare( ctx );
+ ScoreTqReqTimeoutPriorityInherit_TestVariant( ctx );
+ ScoreTqReqTimeoutPriorityInherit_Cleanup( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-timeout-priority-inherit.h b/testsuites/validation/tr-tq-timeout-priority-inherit.h
new file mode 100644
index 0000000000..a75665f659
--- /dev/null
+++ b/testsuites/validation/tr-tq-timeout-priority-inherit.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqTimeoutPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_TIMEOUT_PRIORITY_INHERIT_H
+#define _TR_TQ_TIMEOUT_PRIORITY_INHERIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqTimeoutPriorityInherit
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Home,
+ ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_Helping,
+ ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_HomeScheduler;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_One,
+ ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_More,
+ ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_EligibleScheduler;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Only,
+ ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Vital,
+ ScoreTqReqTimeoutPriorityInherit_Pre_Queue_Dispensable,
+ ScoreTqReqTimeoutPriorityInherit_Pre_Queue_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Only,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Vital,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_Dispensable,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_OwnerPriority;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_NotEnqueued,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_FIFO,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_Priority,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_PriorityInherit,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_OwnerState;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Only,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Vital,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_Dispensable,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_OwnerQueue;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Only,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Vital,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_Dispensable,
+ ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_OwnerOwnerPriority;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_Blocked,
+ ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_IntendToBlock,
+ ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_ReadyAgain,
+ ScoreTqReqTimeoutPriorityInherit_Pre_WaitState_NA
+} ScoreTqReqTimeoutPriorityInherit_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Ok,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_Timeout,
+ ScoreTqReqTimeoutPriorityInherit_Post_Status_NA
+} ScoreTqReqTimeoutPriorityInherit_Post_Status;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_No,
+ ScoreTqReqTimeoutPriorityInherit_Post_Unblock_NA
+} ScoreTqReqTimeoutPriorityInherit_Post_Unblock;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority_NA
+} ScoreTqReqTimeoutPriorityInherit_Post_OwnerPriority;
+
+typedef enum {
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Nop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Lower,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_Drop,
+ ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority_NA
+} ScoreTqReqTimeoutPriorityInherit_Post_OwnerOwnerPriority;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqTimeoutPriorityInherit_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_TIMEOUT_PRIORITY_INHERIT_H */
diff --git a/testsuites/validation/tr-tq-timeout.c b/testsuites/validation/tr-tq-timeout.c
new file mode 100644
index 0000000000..5575abdd57
--- /dev/null
+++ b/testsuites/validation/tr-tq-timeout.c
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqTimeout
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-timeout.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqTimeout spec:/score/tq/req/timeout
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_WaitState_NA : 1;
+ uint8_t Post_Status : 2;
+ uint8_t Post_Unblock : 2;
+} ScoreTqReqTimeout_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/timeout test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqTimeout_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 1 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqTimeout_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqTimeout_Context;
+
+static ScoreTqReqTimeout_Context
+ ScoreTqReqTimeout_Instance;
+
+static const char * const ScoreTqReqTimeout_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
+ "ReadyAgain",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqTimeout_PreDesc[] = {
+ ScoreTqReqTimeout_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqTimeout_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Tick( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ FinalClockTick();
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = Tick;
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+
+static void ThreadTimeout( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ _Thread_Timeout(
+ &ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ]->Timer.Watchdog
+ );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+static void SchedulerUnblock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_UNBLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = ThreadTimeout;
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+
+static void ScoreTqReqTimeout_Pre_WaitState_Prepare(
+ ScoreTqReqTimeout_Context *ctx,
+ ScoreTqReqTimeout_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeout_Pre_WaitState_Blocked: {
+ /*
+ * While the thread of the timeout operation is in the blocked wait
+ * state.
+ */
+ TQEnqueuePrepare( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ Yield();
+ Tick( ctx );
+ TQEnqueueDone( ctx->tq_ctx );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the thread of the timeout operation is in the intend to block
+ * wait state.
+ */
+ TQEnqueuePrepare( ctx->tq_ctx );
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ Yield();
+ TQEnqueueDone( ctx->tq_ctx );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Pre_WaitState_ReadyAgain: {
+ /*
+ * While the thread of the timeout operation is in the ready again wait
+ * state.
+ */
+ TQEnqueuePrepare( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ Yield();
+ T_scheduler_set_event_handler( SchedulerUnblock, ctx );
+ TQEnqueueDone( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeout_Post_Status_Check(
+ ScoreTqReqTimeout_Context *ctx,
+ ScoreTqReqTimeout_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqTimeout_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_eq_int(
+ ctx->tq_ctx->status[ TQ_BLOCKER_A ],
+ TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL )
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Post_Status_Timeout: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_TIMEOUT.
+ */
+ T_eq_int(
+ ctx->tq_ctx->status[ TQ_BLOCKER_A ],
+ TQConvertStatus( ctx->tq_ctx, STATUS_TIMEOUT )
+ );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeout_Post_Unblock_Check(
+ ScoreTqReqTimeout_Context *ctx,
+ ScoreTqReqTimeout_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqTimeout_Post_Unblock_Yes: {
+ /*
+ * The thread of the timeout operation shall be unblocked by the timeout
+ * operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Post_Unblock_No: {
+ /*
+ * The thread of the timeout operation shall not be unblocked by the
+ * timeout operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqTimeout_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqTimeout_Setup( ScoreTqReqTimeout_Context *ctx )
+{
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ } else {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+ }
+}
+
+static void ScoreTqReqTimeout_Setup_Wrap( void *arg )
+{
+ ScoreTqReqTimeout_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqTimeout_Setup( ctx );
+}
+
+static void ScoreTqReqTimeout_Teardown( ScoreTqReqTimeout_Context *ctx )
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqTimeout_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqTimeout_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqTimeout_Teardown( ctx );
+}
+
+static void ScoreTqReqTimeout_Action( ScoreTqReqTimeout_Context *ctx )
+{
+ /*
+ * The action is performed by the ``WaitState`` pre-condition preparation.
+ */
+}
+
+static const ScoreTqReqTimeout_Entry
+ScoreTqReqTimeout_Entries[] = {
+ { 0, 0, ScoreTqReqTimeout_Post_Status_Timeout,
+ ScoreTqReqTimeout_Post_Unblock_Yes },
+ { 0, 0, ScoreTqReqTimeout_Post_Status_Timeout,
+ ScoreTqReqTimeout_Post_Unblock_No },
+ { 0, 0, ScoreTqReqTimeout_Post_Status_Ok, ScoreTqReqTimeout_Post_Unblock_No }
+};
+
+static const uint8_t
+ScoreTqReqTimeout_Map[] = {
+ 0, 1, 2
+};
+
+static size_t ScoreTqReqTimeout_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqTimeout_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqTimeout_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqTimeout_Fixture = {
+ .setup = ScoreTqReqTimeout_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqTimeout_Teardown_Wrap,
+ .scope = ScoreTqReqTimeout_Scope,
+ .initial_context = &ScoreTqReqTimeout_Instance
+};
+
+static inline ScoreTqReqTimeout_Entry ScoreTqReqTimeout_PopEntry(
+ ScoreTqReqTimeout_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqTimeout_Entries[
+ ScoreTqReqTimeout_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqTimeout_TestVariant( ScoreTqReqTimeout_Context *ctx )
+{
+ ScoreTqReqTimeout_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqTimeout_Action( ctx );
+ ScoreTqReqTimeout_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreTqReqTimeout_Post_Unblock_Check( ctx, ctx->Map.entry.Post_Unblock );
+}
+
+static T_fixture_node ScoreTqReqTimeout_Node;
+
+void ScoreTqReqTimeout_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqTimeout_Context *ctx;
+
+ ctx = &ScoreTqReqTimeout_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture( &ScoreTqReqTimeout_Node, &ScoreTqReqTimeout_Fixture );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqTimeout_Pre_WaitState_Blocked;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqTimeout_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqTimeout_PopEntry( ctx );
+ ScoreTqReqTimeout_TestVariant( ctx );
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-timeout.h b/testsuites/validation/tr-tq-timeout.h
new file mode 100644
index 0000000000..487c32743d
--- /dev/null
+++ b/testsuites/validation/tr-tq-timeout.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqTimeout
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_TIMEOUT_H
+#define _TR_TQ_TIMEOUT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqTimeout
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqTimeout_Pre_WaitState_Blocked,
+ ScoreTqReqTimeout_Pre_WaitState_IntendToBlock,
+ ScoreTqReqTimeout_Pre_WaitState_ReadyAgain,
+ ScoreTqReqTimeout_Pre_WaitState_NA
+} ScoreTqReqTimeout_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqTimeout_Post_Status_Ok,
+ ScoreTqReqTimeout_Post_Status_Timeout,
+ ScoreTqReqTimeout_Post_Status_NA
+} ScoreTqReqTimeout_Post_Status;
+
+typedef enum {
+ ScoreTqReqTimeout_Post_Unblock_Yes,
+ ScoreTqReqTimeout_Post_Unblock_No,
+ ScoreTqReqTimeout_Post_Unblock_NA
+} ScoreTqReqTimeout_Post_Unblock;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqTimeout_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_TIMEOUT_H */