summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-09 16:05:27 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-03-24 11:10:48 +0100
commita4b771844a23aff751ba00a6b34b6e407c3b1fc3 (patch)
tree4d19aa6c64ca1bfe4ac3605829c5a333e94858d2
parentvalidation: Test semaphore operations (diff)
downloadrtems-a4b771844a23aff751ba00a6b34b6e407c3b1fc3.tar.bz2
validation: Test mutex operations
The test source code is generated from specification items by the "./spec2modules.py" script contained in the git://git.rtems.org/rtems-central.git Git repository. Please read the "How-To" section in the "Software Requirements Engineering" chapter of the RTEMS Software Engineering manual to get more information about the process. Update #3716.
-rw-r--r--spec/build/testsuites/validation/validation-no-clock-0.yml3
-rw-r--r--testsuites/validation/tr-mtx-seize-try.c866
-rw-r--r--testsuites/validation/tr-mtx-seize-try.h134
-rw-r--r--testsuites/validation/tr-mtx-seize-wait.c1139
-rw-r--r--testsuites/validation/tr-mtx-seize-wait.h151
-rw-r--r--testsuites/validation/tr-mtx-surrender.c1233
-rw-r--r--testsuites/validation/tr-mtx-surrender.h160
7 files changed, 3686 insertions, 0 deletions
diff --git a/spec/build/testsuites/validation/validation-no-clock-0.yml b/spec/build/testsuites/validation/validation-no-clock-0.yml
index e4b3205e02..8f2f12cdb3 100644
--- a/spec/build/testsuites/validation/validation-no-clock-0.yml
+++ b/spec/build/testsuites/validation/validation-no-clock-0.yml
@@ -12,6 +12,9 @@ ldflags: []
links: []
source:
- testsuites/validation/tc-score-fatal.c
+- testsuites/validation/tr-mtx-seize-try.c
+- testsuites/validation/tr-mtx-seize-wait.c
+- testsuites/validation/tr-mtx-surrender.c
- testsuites/validation/tr-sem-seize-try.c
- testsuites/validation/tr-sem-seize-wait.c
- testsuites/validation/tr-sem-surrender.c
diff --git a/testsuites/validation/tr-mtx-seize-try.c b/testsuites/validation/tr-mtx-seize-try.c
new file mode 100644
index 0000000000..8279780bff
--- /dev/null
+++ b/testsuites/validation/tr-mtx-seize-try.c
@@ -0,0 +1,866 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSeizeTry
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-mtx-seize-try.h"
+#include "tr-tq-enqueue-ceiling.h"
+#include "tr-tq-enqueue-fifo.h"
+#include "tr-tq-enqueue-mrsp.h"
+#include "tr-tq-enqueue-priority.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreMtxReqSeizeTry spec:/score/mtx/req/seize-try
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Protocol_NA : 1;
+ uint16_t Pre_Discipline_NA : 1;
+ uint16_t Pre_Recursive_NA : 1;
+ uint16_t Pre_Owner_NA : 1;
+ uint16_t Pre_Priority_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_Owner : 2;
+ uint16_t Post_Priority : 2;
+} ScoreMtxReqSeizeTry_Entry;
+
+/**
+ * @brief Test context for spec:/score/mtx/req/seize-try test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then the calling thread shall be the owner
+ * of the mutex.
+ */
+ bool owner_caller;;
+
+ /**
+ * @brief If this member is true, then a thread other than the calling thread
+ * shall be the owner of the mutex.
+ */
+ bool owner_other;;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * before the directive call.
+ */
+ rtems_task_priority priority_before;;
+
+ /**
+ * @brief This member contains the owner of the mutex after the directive
+ * call.
+ */
+ const rtems_tcb *owner_after;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * after the directive call.
+ */
+ rtems_task_priority priority_after;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreMtxReqSeizeTry_Run() parameter.
+ */
+ TQMtxContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 5 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 5 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreMtxReqSeizeTry_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreMtxReqSeizeTry_Context;
+
+static ScoreMtxReqSeizeTry_Context
+ ScoreMtxReqSeizeTry_Instance;
+
+static const char * const ScoreMtxReqSeizeTry_PreDesc_Protocol[] = {
+ "Ceiling",
+ "MrsP",
+ "Other",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeTry_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeTry_PreDesc_Recursive[] = {
+ "Allowed",
+ "Unavailable",
+ "Deadlock",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeTry_PreDesc_Owner[] = {
+ "None",
+ "Caller",
+ "Other",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeTry_PreDesc_Priority[] = {
+ "High",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const * const ScoreMtxReqSeizeTry_PreDesc[] = {
+ ScoreMtxReqSeizeTry_PreDesc_Protocol,
+ ScoreMtxReqSeizeTry_PreDesc_Discipline,
+ ScoreMtxReqSeizeTry_PreDesc_Recursive,
+ ScoreMtxReqSeizeTry_PreDesc_Owner,
+ ScoreMtxReqSeizeTry_PreDesc_Priority,
+ NULL
+};
+
+typedef ScoreMtxReqSeizeTry_Context Context;
+
+static Status_Control Status( const Context *ctx, Status_Control status )
+{
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+}
+
+static bool IsEnqueueStatus( const Context *ctx, Status_Control expected )
+{
+ return ctx->tq_ctx->base.status[ TQ_BLOCKER_A ] == Status( ctx, expected );
+}
+
+static void Action( Context *ctx )
+{
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ } else if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
+ }
+
+ TQSetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A, ctx->priority_before );
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = TQGetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+ if ( ctx->owner_caller ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ } else if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_B, TQ_EVENT_SURRENDER );
+ }
+
+ if ( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ }
+}
+
+static void ActionSticky( Context *ctx )
+{
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ } else if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ TQSetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A, ctx->priority_before );
+ TQClearDone( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+ TQSendAndWaitForExecutionStopOrIntendToBlock(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = TQGetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+ if ( ctx->owner_caller ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ } else if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_SURRENDER
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+ }
+
+ TQWaitForDone( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+ if ( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Pre_Protocol_Prepare(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Pre_Protocol state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Pre_Protocol_Ceiling: {
+ /*
+ * Where the mutex uses the priority ceiling locking protocol.
+ */
+ if (
+ ctx->tq_ctx->priority_ceiling == PRIO_INVALID ||
+ ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY
+ ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Protocol_MrsP: {
+ /*
+ * Where the mutex uses the MrsP locking protocol.
+ */
+ if (
+ ctx->tq_ctx->priority_ceiling == PRIO_INVALID ||
+ ctx->tq_ctx->base.enqueue_variant != TQ_ENQUEUE_STICKY
+ ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Protocol_Other: {
+ /*
+ * Where the mutex does not use the priority ceiling or MrsP locking
+ * protocol.
+ */
+ if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Protocol_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Pre_Discipline_Prepare(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue of the mutex uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue of the mutex uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Pre_Recursive_Prepare(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Pre_Recursive state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Pre_Recursive_Allowed: {
+ /*
+ * Where a recursive seize of the mutex is allowed.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_ALLOWED ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Recursive_Unavailable: {
+ /*
+ * Where a recursive seize of the mutex results in an unavailable status.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_UNAVAILABLE ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Recursive_Deadlock: {
+ /*
+ * Where a recursive seize of the mutex results in a deadlock status.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_DEADLOCK ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Recursive_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Pre_Owner_Prepare(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Pre_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Pre_Owner_None: {
+ /*
+ * While the mutex has no owner.
+ */
+ /* This is the default */
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Owner_Caller: {
+ /*
+ * While the owner of the mutex is the calling thread.
+ */
+ ctx->owner_caller = true;
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Owner_Other: {
+ /*
+ * While the owner of the mutex is a thread other than the calling
+ * thread.
+ */
+ ctx->owner_other = true;
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Pre_Priority_Prepare(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Pre_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Pre_Priority_High: {
+ /*
+ * While the calling thread has a current priority higher than the
+ * priority ceiling.
+ */
+ ctx->priority_before = ctx->tq_ctx->priority_ceiling - 1;
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Priority_Equal: {
+ /*
+ * While the calling thread has a current priority equal to the priority
+ * ceiling.
+ */
+ ctx->priority_before = ctx->tq_ctx->priority_ceiling;
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Priority_Low: {
+ /*
+ * While the calling thread has a current priority lower than the
+ * priority ceiling.
+ */
+ ctx->priority_before = ctx->tq_ctx->priority_ceiling + 1;
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Pre_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Post_Status_Check(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Status_MutexCeilingViolated: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_MUTEX_CEILING_VIOLATED.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_MUTEX_CEILING_VIOLATED ) );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Status_Deadlock: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_DEADLOCK.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_DEADLOCK ) );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Status_Unavailable: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_UNAVAILABLE.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_UNAVAILABLE ) );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Post_Owner_Check(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Post_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Post_Owner_Other: {
+ /*
+ * The owner of the mutex shall not be modified.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_B ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Owner_Caller: {
+ /*
+ * The owner of the mutex shall be the calling thread.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Owner_None: {
+ /*
+ * The mutex shall have no owner.
+ */
+ T_null( ctx->owner_after );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Post_Priority_Check(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ ScoreMtxReqSeizeTry_Post_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeTry_Post_Priority_Nop: {
+ /*
+ * The priorities of the calling thread shall not be modified.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Priority_Ceiling: {
+ /*
+ * The calling thread shall use the priority ceiling of the mutex.
+ */
+ T_eq_u32( ctx->priority_after, ctx->tq_ctx->priority_ceiling );
+ break;
+ }
+
+ case ScoreMtxReqSeizeTry_Post_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeTry_Prepare( ScoreMtxReqSeizeTry_Context *ctx )
+{
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ ctx->priority_before = PRIO_VERY_HIGH;
+}
+
+static void ScoreMtxReqSeizeTry_Action( ScoreMtxReqSeizeTry_Context *ctx )
+{
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+}
+
+static const ScoreMtxReqSeizeTry_Entry
+ScoreMtxReqSeizeTry_Entries[] = {
+ { 1, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_NA,
+ ScoreMtxReqSeizeTry_Post_Owner_NA, ScoreMtxReqSeizeTry_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeTry_Post_Status_Ok,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_Unavailable,
+ ScoreMtxReqSeizeTry_Post_Owner_Other, ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeTry_Post_Status_Unavailable,
+ ScoreMtxReqSeizeTry_Post_Owner_Other, ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_Ok,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Ceiling },
+ { 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_MutexCeilingViolated,
+ ScoreMtxReqSeizeTry_Post_Owner_None, ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 1, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_NA,
+ ScoreMtxReqSeizeTry_Post_Owner_NA, ScoreMtxReqSeizeTry_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeTry_Post_Status_Unavailable,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeTry_Post_Status_Deadlock,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_Ok,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_Unavailable,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeTry_Post_Status_Deadlock,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Priority_Nop }
+};
+
+static const uint8_t
+ScoreMtxReqSeizeTry_Map[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 5, 4, 4, 9, 9, 6, 2, 2, 2, 5, 4, 4, 10, 10, 6, 2, 2, 2, 5, 4, 4, 11, 11,
+ 6, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 5, 4, 4, 9, 9, 6, 2, 2, 2, 5, 4, 4, 10, 10, 6, 2, 2, 2, 5, 4,
+ 4, 11, 11, 6, 2, 2, 2, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1, 1, 7, 7, 7, 3, 3, 3,
+ 1, 1, 1, 8, 8, 8, 3, 3, 3, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1, 1, 7, 7, 7, 3, 3,
+ 3, 1, 1, 1, 8, 8, 8, 3, 3, 3
+};
+
+static size_t ScoreMtxReqSeizeTry_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreMtxReqSeizeTry_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreMtxReqSeizeTry_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreMtxReqSeizeTry_Fixture = {
+ .setup = NULL,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = ScoreMtxReqSeizeTry_Scope,
+ .initial_context = &ScoreMtxReqSeizeTry_Instance
+};
+
+static const uint8_t ScoreMtxReqSeizeTry_Weights[] = {
+ 54, 27, 9, 3, 1
+};
+
+static void ScoreMtxReqSeizeTry_Skip(
+ ScoreMtxReqSeizeTry_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSeizeTry_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSeizeTry_Pre_Recursive_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSeizeTry_Pre_Owner_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSeizeTry_Pre_Priority_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreMtxReqSeizeTry_Entry ScoreMtxReqSeizeTry_PopEntry(
+ ScoreMtxReqSeizeTry_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 5; ++i ) {
+ index += ScoreMtxReqSeizeTry_Weights[ i ] * ctx->Map.pci[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreMtxReqSeizeTry_Entries[
+ ScoreMtxReqSeizeTry_Map[ index ]
+ ];
+}
+
+static void ScoreMtxReqSeizeTry_SetPreConditionStates(
+ ScoreMtxReqSeizeTry_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+
+ if ( ctx->Map.entry.Pre_Priority_NA ) {
+ ctx->Map.pcs[ 4 ] = ScoreMtxReqSeizeTry_Pre_Priority_NA;
+ } else {
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+ }
+}
+
+static void ScoreMtxReqSeizeTry_TestVariant( ScoreMtxReqSeizeTry_Context *ctx )
+{
+ ScoreMtxReqSeizeTry_Pre_Protocol_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeTry_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreMtxReqSeizeTry_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeTry_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreMtxReqSeizeTry_Pre_Recursive_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeTry_Skip( ctx, 2 );
+ return;
+ }
+
+ ScoreMtxReqSeizeTry_Pre_Owner_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ ScoreMtxReqSeizeTry_Pre_Priority_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ ScoreMtxReqSeizeTry_Action( ctx );
+ ScoreMtxReqSeizeTry_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreMtxReqSeizeTry_Post_Owner_Check( ctx, ctx->Map.entry.Post_Owner );
+ ScoreMtxReqSeizeTry_Post_Priority_Check( ctx, ctx->Map.entry.Post_Priority );
+}
+
+static T_fixture_node ScoreMtxReqSeizeTry_Node;
+
+void ScoreMtxReqSeizeTry_Run( TQMtxContext *tq_ctx )
+{
+ ScoreMtxReqSeizeTry_Context *ctx;
+
+ ctx = &ScoreMtxReqSeizeTry_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreMtxReqSeizeTry_Node,
+ &ScoreMtxReqSeizeTry_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreMtxReqSeizeTry_Pre_Protocol_Ceiling;
+ ctx->Map.pci[ 0 ] < ScoreMtxReqSeizeTry_Pre_Protocol_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSeizeTry_Pre_Discipline_FIFO;
+ ctx->Map.pci[ 1 ] < ScoreMtxReqSeizeTry_Pre_Discipline_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSeizeTry_Pre_Recursive_Allowed;
+ ctx->Map.pci[ 2 ] < ScoreMtxReqSeizeTry_Pre_Recursive_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSeizeTry_Pre_Owner_None;
+ ctx->Map.pci[ 3 ] < ScoreMtxReqSeizeTry_Pre_Owner_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSeizeTry_Pre_Priority_High;
+ ctx->Map.pci[ 4 ] < ScoreMtxReqSeizeTry_Pre_Priority_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ ctx->Map.entry = ScoreMtxReqSeizeTry_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreMtxReqSeizeTry_SetPreConditionStates( ctx );
+ ScoreMtxReqSeizeTry_Prepare( ctx );
+ ScoreMtxReqSeizeTry_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-mtx-seize-try.h b/testsuites/validation/tr-mtx-seize-try.h
new file mode 100644
index 0000000000..84e7cd6849
--- /dev/null
+++ b/testsuites/validation/tr-mtx-seize-try.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSeizeTry
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_MTX_SEIZE_TRY_H
+#define _TR_MTX_SEIZE_TRY_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreMtxReqSeizeTry
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Pre_Protocol_Ceiling,
+ ScoreMtxReqSeizeTry_Pre_Protocol_MrsP,
+ ScoreMtxReqSeizeTry_Pre_Protocol_Other,
+ ScoreMtxReqSeizeTry_Pre_Protocol_NA
+} ScoreMtxReqSeizeTry_Pre_Protocol;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Pre_Discipline_FIFO,
+ ScoreMtxReqSeizeTry_Pre_Discipline_Priority,
+ ScoreMtxReqSeizeTry_Pre_Discipline_NA
+} ScoreMtxReqSeizeTry_Pre_Discipline;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Pre_Recursive_Allowed,
+ ScoreMtxReqSeizeTry_Pre_Recursive_Unavailable,
+ ScoreMtxReqSeizeTry_Pre_Recursive_Deadlock,
+ ScoreMtxReqSeizeTry_Pre_Recursive_NA
+} ScoreMtxReqSeizeTry_Pre_Recursive;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Pre_Owner_None,
+ ScoreMtxReqSeizeTry_Pre_Owner_Caller,
+ ScoreMtxReqSeizeTry_Pre_Owner_Other,
+ ScoreMtxReqSeizeTry_Pre_Owner_NA
+} ScoreMtxReqSeizeTry_Pre_Owner;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Pre_Priority_High,
+ ScoreMtxReqSeizeTry_Pre_Priority_Equal,
+ ScoreMtxReqSeizeTry_Pre_Priority_Low,
+ ScoreMtxReqSeizeTry_Pre_Priority_NA
+} ScoreMtxReqSeizeTry_Pre_Priority;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Post_Status_Ok,
+ ScoreMtxReqSeizeTry_Post_Status_MutexCeilingViolated,
+ ScoreMtxReqSeizeTry_Post_Status_Deadlock,
+ ScoreMtxReqSeizeTry_Post_Status_Unavailable,
+ ScoreMtxReqSeizeTry_Post_Status_NA
+} ScoreMtxReqSeizeTry_Post_Status;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Post_Owner_Other,
+ ScoreMtxReqSeizeTry_Post_Owner_Caller,
+ ScoreMtxReqSeizeTry_Post_Owner_None,
+ ScoreMtxReqSeizeTry_Post_Owner_NA
+} ScoreMtxReqSeizeTry_Post_Owner;
+
+typedef enum {
+ ScoreMtxReqSeizeTry_Post_Priority_Nop,
+ ScoreMtxReqSeizeTry_Post_Priority_Ceiling,
+ ScoreMtxReqSeizeTry_Post_Priority_NA
+} ScoreMtxReqSeizeTry_Post_Priority;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreMtxReqSeizeTry_Run( TQMtxContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_MTX_SEIZE_TRY_H */
diff --git a/testsuites/validation/tr-mtx-seize-wait.c b/testsuites/validation/tr-mtx-seize-wait.c
new file mode 100644
index 0000000000..367ca0da32
--- /dev/null
+++ b/testsuites/validation/tr-mtx-seize-wait.c
@@ -0,0 +1,1139 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSeizeWait
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-mtx-seize-wait.h"
+#include "tr-tq-enqueue-ceiling.h"
+#include "tr-tq-enqueue-deadlock.h"
+#include "tr-tq-enqueue-fifo.h"
+#include "tr-tq-enqueue-mrsp.h"
+#include "tr-tq-enqueue-priority-inherit.h"
+#include "tr-tq-enqueue-priority.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreMtxReqSeizeWait spec:/score/mtx/req/seize-wait
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_Protocol_NA : 1;
+ uint32_t Pre_Discipline_NA : 1;
+ uint32_t Pre_DeadlockResult_NA : 1;
+ uint32_t Pre_Recursive_NA : 1;
+ uint32_t Pre_Owner_NA : 1;
+ uint32_t Pre_Priority_NA : 1;
+ uint32_t Post_Status : 3;
+ uint32_t Post_Enqueued : 3;
+ uint32_t Post_Owner : 2;
+ uint32_t Post_Priority : 2;
+} ScoreMtxReqSeizeWait_Entry;
+
+/**
+ * @brief Test context for spec:/score/mtx/req/seize-wait test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then the calling thread shall be the owner
+ * of the mutex.
+ */
+ bool owner_caller;;
+
+ /**
+ * @brief If this member is true, then a thread other than the calling thread
+ * shall be the owner of the mutex.
+ */
+ bool owner_other;;
+
+ /**
+ * @brief If this member is true, then a deadlock shall occur.
+ */
+ bool deadlock;;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * before the directive call.
+ */
+ rtems_task_priority priority_before;;
+
+ /**
+ * @brief This member contains the owner of the mutex after the directive
+ * call.
+ */
+ const rtems_tcb *owner_after;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * after the directive call.
+ */
+ rtems_task_priority priority_after;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreMtxReqSeizeWait_Run() parameter.
+ */
+ TQMtxContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 6 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 6 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreMtxReqSeizeWait_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreMtxReqSeizeWait_Context;
+
+static ScoreMtxReqSeizeWait_Context
+ ScoreMtxReqSeizeWait_Instance;
+
+static const char * const ScoreMtxReqSeizeWait_PreDesc_Protocol[] = {
+ "None",
+ "Inherit",
+ "Ceiling",
+ "MrsP",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeWait_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeWait_PreDesc_DeadlockResult[] = {
+ "Status",
+ "Fatal",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeWait_PreDesc_Recursive[] = {
+ "Allowed",
+ "Deadlock",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeWait_PreDesc_Owner[] = {
+ "None",
+ "Caller",
+ "Other",
+ "Deadlock",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSeizeWait_PreDesc_Priority[] = {
+ "High",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const * const ScoreMtxReqSeizeWait_PreDesc[] = {
+ ScoreMtxReqSeizeWait_PreDesc_Protocol,
+ ScoreMtxReqSeizeWait_PreDesc_Discipline,
+ ScoreMtxReqSeizeWait_PreDesc_DeadlockResult,
+ ScoreMtxReqSeizeWait_PreDesc_Recursive,
+ ScoreMtxReqSeizeWait_PreDesc_Owner,
+ ScoreMtxReqSeizeWait_PreDesc_Priority,
+ NULL
+};
+
+typedef ScoreMtxReqSeizeWait_Context Context;
+
+static Status_Control Status( const Context *ctx, Status_Control status )
+{
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+}
+
+static bool IsEnqueueStatus( const Context *ctx, Status_Control expected )
+{
+ return ctx->tq_ctx->base.status[ TQ_BLOCKER_A ] == Status( ctx, expected );
+}
+
+static void Action( Context *ctx )
+{
+ TQEvent enqueue;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ } else if ( ctx->owner_other ) {
+ if ( ctx->deadlock ) {
+ TQSend(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+ );
+ }
+
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
+
+ if ( ctx->deadlock ) {
+ TQSend(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+ );
+ }
+ }
+
+ TQSetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A, ctx->priority_before );
+
+ if ( ctx->tq_ctx->base.deadlock == TQ_DEADLOCK_FATAL ) {
+ enqueue = TQ_EVENT_ENQUEUE_FATAL;
+ } else {
+ enqueue = TQ_EVENT_ENQUEUE;
+ }
+
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, enqueue );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = TQGetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+ if ( ctx->owner_caller ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ } else if ( ctx->owner_other ) {
+ if ( ctx->deadlock ) {
+ TQSend(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
+ );
+ }
+
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_B, TQ_EVENT_SURRENDER );
+
+ if ( ctx->deadlock ) {
+ TQSend(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
+ );
+ }
+ }
+
+ if ( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ }
+}
+
+static void ActionSticky( Context *ctx )
+{
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ } else if ( ctx->owner_other ) {
+ if ( ctx->deadlock ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+ );
+ }
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+
+ if ( ctx->deadlock ) {
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+ );
+ }
+
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ TQSetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A, ctx->priority_before );
+ TQClearDone( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+ TQSendAndWaitForExecutionStopOrIntendToBlock(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = TQGetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+ if ( ctx->owner_caller ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ } else if ( ctx->owner_other ) {
+ if ( ctx->deadlock ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
+ );
+ }
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->deadlock ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
+ );
+ }
+
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+ }
+
+ TQWaitForDone( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+ if ( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Pre_Protocol_Prepare(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Pre_Protocol state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Pre_Protocol_None: {
+ /*
+ * Where the mutex does not use a locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_NO_PROTOCOL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Protocol_Inherit: {
+ /*
+ * Where the mutex uses the priority inheritance locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_INHERIT ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Protocol_Ceiling: {
+ /*
+ * Where the mutex uses the priority ceiling locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_CEILING ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Protocol_MrsP: {
+ /*
+ * Where the mutex uses the MrsP locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_MRSP ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Protocol_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Pre_Discipline_Prepare(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue of the mutex uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue of the mutex uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Pre_DeadlockResult_Prepare(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Pre_DeadlockResult state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Pre_DeadlockResult_Status: {
+ /*
+ * Where a detected deadlock results in a return with a status code.
+ */
+ if ( ctx->tq_ctx->base.deadlock != TQ_DEADLOCK_STATUS ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_DeadlockResult_Fatal: {
+ /*
+ * Where a detected deadlock results in a fatal error.
+ */
+ if ( ctx->tq_ctx->base.deadlock != TQ_DEADLOCK_FATAL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_DeadlockResult_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Pre_Recursive_Prepare(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Pre_Recursive state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Pre_Recursive_Allowed: {
+ /*
+ * Where a recursive seize of the mutex is allowed.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_ALLOWED ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Recursive_Deadlock: {
+ /*
+ * Where a recursive seize of the mutex results in a deadlock.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_DEADLOCK ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Recursive_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Pre_Owner_Prepare(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Pre_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Pre_Owner_None: {
+ /*
+ * While the mutex has no owner.
+ */
+ /* This is the default */
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Owner_Caller: {
+ /*
+ * While the owner of the mutex is the calling thread.
+ */
+ ctx->owner_caller = true;
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Owner_Other: {
+ /*
+ * While the owner of the mutex is a thread other than the calling
+ * thread.
+ */
+ ctx->owner_other = true;
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Owner_Deadlock: {
+ /*
+ * While the attempt to seize the mutex results in a deadlock.
+ */
+ ctx->owner_other = true;
+ ctx->deadlock = true;
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Pre_Priority_Prepare(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Pre_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Pre_Priority_High: {
+ /*
+ * While the calling thread has a current priority higher than the
+ * priority ceiling.
+ */
+ ctx->priority_before = ctx->tq_ctx->priority_ceiling - 1;
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Priority_Equal: {
+ /*
+ * While the calling thread has a current priority equal to the priority
+ * ceiling.
+ */
+ ctx->priority_before = ctx->tq_ctx->priority_ceiling;
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Priority_Low: {
+ /*
+ * While the calling thread has a current priority lower than the
+ * priority ceiling.
+ */
+ ctx->priority_before = ctx->tq_ctx->priority_ceiling + 1;
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Pre_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Post_Status_Check(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Status_MutexCeilingViolated: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_MUTEX_CEILING_VIOLATED.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_MUTEX_CEILING_VIOLATED ) );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Status_DeadlockStatus: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_DEADLOCK.
+ */
+ T_true( IsEnqueueStatus( ctx, STATUS_DEADLOCK ) );
+ ScoreTqReqEnqueueDeadlock_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Status_DeadlockFatal: {
+ /*
+ * The system shall terminate with the INTERNAL_ERROR_CORE fatal source
+ * and the INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal code.
+ */
+ T_eq_int( ctx->tq_ctx->base.status[ TQ_BLOCKER_A ], STATUS_DEADLOCK );
+ ScoreTqReqEnqueueDeadlock_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Post_Enqueued_Check(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Post_Enqueued state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Post_Enqueued_No: {
+ /*
+ * The calling thread shall not be enqueued on the thread queue of the
+ * mutex.
+ */
+ /* The test runner would block if the worker is enqueued */
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Enqueued_FIFO: {
+ /*
+ * The calling thread shall be enqueued in FIFO order.
+ */
+ ScoreTqReqEnqueueFifo_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Enqueued_Priority: {
+ /*
+ * The calling thread shall be enqueued in priority order.
+ */
+ ScoreTqReqEnqueuePriority_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Enqueued_PriorityInherit: {
+ /*
+ * The calling thread shall be enqueued in priority order with priorit
+ * inheritance.
+ */
+ ScoreTqReqEnqueuePriorityInherit_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Enqueued_PriorityCeiling: {
+ /*
+ * The calling thread shall be enqueued in priority order according to
+ * the priority ceiling locking protocol.
+ */
+ ScoreTqReqEnqueueCeiling_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Enqueued_PriorityMrsP: {
+ /*
+ * The calling thread shall be enqueued in priority order according to
+ * the MrsP locking protocol.
+ */
+ ScoreTqReqEnqueueMrsp_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Enqueued_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Post_Owner_Check(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Post_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Post_Owner_Other: {
+ /*
+ * The owner of the mutex shall not be modified.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_B ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Owner_Caller: {
+ /*
+ * The owner of the mutex shall be the calling thread.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Owner_None: {
+ /*
+ * The mutex shall have no owner.
+ */
+ T_null( ctx->owner_after );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Post_Priority_Check(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ ScoreMtxReqSeizeWait_Post_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSeizeWait_Post_Priority_Nop: {
+ /*
+ * The priorities of the calling thread shall not be modified.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Priority_Ceiling: {
+ /*
+ * The calling thread shall use the priority ceiling of the mutex.
+ */
+ T_eq_u32( ctx->priority_after, ctx->tq_ctx->priority_ceiling );
+ break;
+ }
+
+ case ScoreMtxReqSeizeWait_Post_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSeizeWait_Prepare( ScoreMtxReqSeizeWait_Context *ctx )
+{
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ ctx->deadlock = false;
+ ctx->priority_before = PRIO_VERY_HIGH;
+}
+
+static void ScoreMtxReqSeizeWait_Action( ScoreMtxReqSeizeWait_Context *ctx )
+{
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_B,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+}
+
+static const ScoreMtxReqSeizeWait_Entry
+ScoreMtxReqSeizeWait_Entries[] = {
+ { 1, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_NA, ScoreMtxReqSeizeWait_Post_Owner_NA,
+ ScoreMtxReqSeizeWait_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_Ok,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_DeadlockStatus,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_DeadlockFatal,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_Ok,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Ceiling },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_FIFO,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_Priority,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_PriorityInherit,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_PriorityCeiling,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_DeadlockStatus,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_DeadlockFatal,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_DeadlockStatus,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSeizeWait_Post_Status_DeadlockFatal,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_MutexCeilingViolated,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_None,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_Ok,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 1, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_NA, ScoreMtxReqSeizeWait_Post_Owner_NA,
+ ScoreMtxReqSeizeWait_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_MutexCeilingViolated,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_NA,
+ ScoreMtxReqSeizeWait_Post_Enqueued_PriorityMrsP,
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Priority_Ceiling },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_DeadlockStatus,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSeizeWait_Post_Status_DeadlockFatal,
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Priority_Nop }
+};
+
+static const uint8_t
+ScoreMtxReqSeizeWait_Map[] = {
+ 1, 1, 1, 1, 1, 1, 5, 5, 5, 2, 2, 2, 1, 1, 1, 11, 11, 11, 5, 5, 5, 2, 2, 2, 1,
+ 1, 1, 1, 1, 1, 5, 5, 5, 3, 3, 3, 1, 1, 1, 12, 12, 12, 5, 5, 5, 3, 3, 3, 1, 1,
+ 1, 1, 1, 1, 6, 6, 6, 2, 2, 2, 1, 1, 1, 11, 11, 11, 6, 6, 6, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 6, 6, 6, 3, 3, 3, 1, 1, 1, 12, 12, 12, 6, 6, 6, 3, 3, 3, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 7, 7,
+ 7, 2, 2, 2, 1, 1, 1, 11, 11, 11, 7, 7, 7, 2, 2, 2, 1, 1, 1, 1, 1, 1, 7, 7, 7,
+ 3, 3, 3, 1, 1, 1, 12, 12, 12, 7, 7, 7, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 4, 4, 14, 14, 15, 8, 8, 8, 9, 9, 9,
+ 13, 4, 4, 18, 18, 15, 8, 8, 8, 9, 9, 9, 13, 4, 4, 14, 14, 15, 8, 8, 8, 10,
+ 10, 10, 13, 4, 4, 19, 19, 15, 8, 8, 8, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 4, 4, 14, 14, 15, 16, 17, 17, 16,
+ 9, 9, 13, 4, 4, 18, 18, 15, 16, 17, 17, 16, 9, 9, 13, 4, 4, 14, 14, 15, 16,
+ 17, 17, 16, 10, 10, 13, 4, 4, 19, 19, 15, 16, 17, 17, 16, 10, 10
+};
+
+static size_t ScoreMtxReqSeizeWait_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreMtxReqSeizeWait_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreMtxReqSeizeWait_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreMtxReqSeizeWait_Fixture = {
+ .setup = NULL,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = ScoreMtxReqSeizeWait_Scope,
+ .initial_context = &ScoreMtxReqSeizeWait_Instance
+};
+
+static const uint8_t ScoreMtxReqSeizeWait_Weights[] = {
+ 96, 48, 24, 12, 3, 1
+};
+
+static void ScoreMtxReqSeizeWait_Skip(
+ ScoreMtxReqSeizeWait_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSeizeWait_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSeizeWait_Pre_DeadlockResult_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSeizeWait_Pre_Recursive_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSeizeWait_Pre_Owner_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pci[ 5 ] = ScoreMtxReqSeizeWait_Pre_Priority_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreMtxReqSeizeWait_Entry ScoreMtxReqSeizeWait_PopEntry(
+ ScoreMtxReqSeizeWait_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 6; ++i ) {
+ index += ScoreMtxReqSeizeWait_Weights[ i ] * ctx->Map.pci[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreMtxReqSeizeWait_Entries[
+ ScoreMtxReqSeizeWait_Map[ index ]
+ ];
+}
+
+static void ScoreMtxReqSeizeWait_SetPreConditionStates(
+ ScoreMtxReqSeizeWait_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+
+ if ( ctx->Map.entry.Pre_Priority_NA ) {
+ ctx->Map.pcs[ 5 ] = ScoreMtxReqSeizeWait_Pre_Priority_NA;
+ } else {
+ ctx->Map.pcs[ 5 ] = ctx->Map.pci[ 5 ];
+ }
+}
+
+static void ScoreMtxReqSeizeWait_TestVariant(
+ ScoreMtxReqSeizeWait_Context *ctx
+)
+{
+ ScoreMtxReqSeizeWait_Pre_Protocol_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeWait_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreMtxReqSeizeWait_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeWait_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreMtxReqSeizeWait_Pre_DeadlockResult_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeWait_Skip( ctx, 2 );
+ return;
+ }
+
+ ScoreMtxReqSeizeWait_Pre_Recursive_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSeizeWait_Skip( ctx, 3 );
+ return;
+ }
+
+ ScoreMtxReqSeizeWait_Pre_Owner_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ ScoreMtxReqSeizeWait_Pre_Priority_Prepare( ctx, ctx->Map.pcs[ 5 ] );
+ ScoreMtxReqSeizeWait_Action( ctx );
+ ScoreMtxReqSeizeWait_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreMtxReqSeizeWait_Post_Enqueued_Check(
+ ctx,
+ ctx->Map.entry.Post_Enqueued
+ );
+ ScoreMtxReqSeizeWait_Post_Owner_Check( ctx, ctx->Map.entry.Post_Owner );
+ ScoreMtxReqSeizeWait_Post_Priority_Check(
+ ctx,
+ ctx->Map.entry.Post_Priority
+ );
+}
+
+static T_fixture_node ScoreMtxReqSeizeWait_Node;
+
+void ScoreMtxReqSeizeWait_Run( TQMtxContext *tq_ctx )
+{
+ ScoreMtxReqSeizeWait_Context *ctx;
+
+ ctx = &ScoreMtxReqSeizeWait_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreMtxReqSeizeWait_Node,
+ &ScoreMtxReqSeizeWait_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreMtxReqSeizeWait_Pre_Protocol_None;
+ ctx->Map.pci[ 0 ] < ScoreMtxReqSeizeWait_Pre_Protocol_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSeizeWait_Pre_Discipline_FIFO;
+ ctx->Map.pci[ 1 ] < ScoreMtxReqSeizeWait_Pre_Discipline_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSeizeWait_Pre_DeadlockResult_Status;
+ ctx->Map.pci[ 2 ] < ScoreMtxReqSeizeWait_Pre_DeadlockResult_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSeizeWait_Pre_Recursive_Allowed;
+ ctx->Map.pci[ 3 ] < ScoreMtxReqSeizeWait_Pre_Recursive_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSeizeWait_Pre_Owner_None;
+ ctx->Map.pci[ 4 ] < ScoreMtxReqSeizeWait_Pre_Owner_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ for (
+ ctx->Map.pci[ 5 ] = ScoreMtxReqSeizeWait_Pre_Priority_High;
+ ctx->Map.pci[ 5 ] < ScoreMtxReqSeizeWait_Pre_Priority_NA;
+ ++ctx->Map.pci[ 5 ]
+ ) {
+ ctx->Map.entry = ScoreMtxReqSeizeWait_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreMtxReqSeizeWait_SetPreConditionStates( ctx );
+ ScoreMtxReqSeizeWait_Prepare( ctx );
+ ScoreMtxReqSeizeWait_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-mtx-seize-wait.h b/testsuites/validation/tr-mtx-seize-wait.h
new file mode 100644
index 0000000000..1ac532b1ce
--- /dev/null
+++ b/testsuites/validation/tr-mtx-seize-wait.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSeizeWait
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_MTX_SEIZE_WAIT_H
+#define _TR_MTX_SEIZE_WAIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreMtxReqSeizeWait
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Pre_Protocol_None,
+ ScoreMtxReqSeizeWait_Pre_Protocol_Inherit,
+ ScoreMtxReqSeizeWait_Pre_Protocol_Ceiling,
+ ScoreMtxReqSeizeWait_Pre_Protocol_MrsP,
+ ScoreMtxReqSeizeWait_Pre_Protocol_NA
+} ScoreMtxReqSeizeWait_Pre_Protocol;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Pre_Discipline_FIFO,
+ ScoreMtxReqSeizeWait_Pre_Discipline_Priority,
+ ScoreMtxReqSeizeWait_Pre_Discipline_NA
+} ScoreMtxReqSeizeWait_Pre_Discipline;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Pre_DeadlockResult_Status,
+ ScoreMtxReqSeizeWait_Pre_DeadlockResult_Fatal,
+ ScoreMtxReqSeizeWait_Pre_DeadlockResult_NA
+} ScoreMtxReqSeizeWait_Pre_DeadlockResult;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Pre_Recursive_Allowed,
+ ScoreMtxReqSeizeWait_Pre_Recursive_Deadlock,
+ ScoreMtxReqSeizeWait_Pre_Recursive_NA
+} ScoreMtxReqSeizeWait_Pre_Recursive;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Pre_Owner_None,
+ ScoreMtxReqSeizeWait_Pre_Owner_Caller,
+ ScoreMtxReqSeizeWait_Pre_Owner_Other,
+ ScoreMtxReqSeizeWait_Pre_Owner_Deadlock,
+ ScoreMtxReqSeizeWait_Pre_Owner_NA
+} ScoreMtxReqSeizeWait_Pre_Owner;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Pre_Priority_High,
+ ScoreMtxReqSeizeWait_Pre_Priority_Equal,
+ ScoreMtxReqSeizeWait_Pre_Priority_Low,
+ ScoreMtxReqSeizeWait_Pre_Priority_NA
+} ScoreMtxReqSeizeWait_Pre_Priority;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Post_Status_Ok,
+ ScoreMtxReqSeizeWait_Post_Status_MutexCeilingViolated,
+ ScoreMtxReqSeizeWait_Post_Status_DeadlockStatus,
+ ScoreMtxReqSeizeWait_Post_Status_DeadlockFatal,
+ ScoreMtxReqSeizeWait_Post_Status_NA
+} ScoreMtxReqSeizeWait_Post_Status;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Post_Enqueued_No,
+ ScoreMtxReqSeizeWait_Post_Enqueued_FIFO,
+ ScoreMtxReqSeizeWait_Post_Enqueued_Priority,
+ ScoreMtxReqSeizeWait_Post_Enqueued_PriorityInherit,
+ ScoreMtxReqSeizeWait_Post_Enqueued_PriorityCeiling,
+ ScoreMtxReqSeizeWait_Post_Enqueued_PriorityMrsP,
+ ScoreMtxReqSeizeWait_Post_Enqueued_NA
+} ScoreMtxReqSeizeWait_Post_Enqueued;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Post_Owner_Other,
+ ScoreMtxReqSeizeWait_Post_Owner_Caller,
+ ScoreMtxReqSeizeWait_Post_Owner_None,
+ ScoreMtxReqSeizeWait_Post_Owner_NA
+} ScoreMtxReqSeizeWait_Post_Owner;
+
+typedef enum {
+ ScoreMtxReqSeizeWait_Post_Priority_Nop,
+ ScoreMtxReqSeizeWait_Post_Priority_Ceiling,
+ ScoreMtxReqSeizeWait_Post_Priority_NA
+} ScoreMtxReqSeizeWait_Post_Priority;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreMtxReqSeizeWait_Run( TQMtxContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_MTX_SEIZE_WAIT_H */
diff --git a/testsuites/validation/tr-mtx-surrender.c b/testsuites/validation/tr-mtx-surrender.c
new file mode 100644
index 0000000000..6e27ff2bc9
--- /dev/null
+++ b/testsuites/validation/tr-mtx-surrender.c
@@ -0,0 +1,1233 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-mtx-surrender.h"
+#include "tr-tq-surrender-mrsp.h"
+#include "tr-tq-surrender-priority-inherit.h"
+#include "tr-tq-surrender.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreMtxReqSurrender spec:/score/mtx/req/surrender
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_Protocol_NA : 1;
+ uint32_t Pre_Discipline_NA : 1;
+ uint32_t Pre_Recursive_NA : 1;
+ uint32_t Pre_OwnerCheck_NA : 1;
+ uint32_t Pre_Owner_NA : 1;
+ uint32_t Pre_Nested_NA : 1;
+ uint32_t Pre_Blocked_NA : 1;
+ uint32_t Pre_Priority_NA : 1;
+ uint32_t Post_Status : 2;
+ uint32_t Post_Owner : 3;
+ uint32_t Post_Surrender : 3;
+ uint32_t Post_Priority : 2;
+} ScoreMtxReqSurrender_Entry;
+
+/**
+ * @brief Test context for spec:/score/mtx/req/surrender test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then the calling thread shall be the owner
+ * of the mutex.
+ */
+ bool owner_caller;
+
+ /**
+ * @brief If this member is true, then a thread other than the calling thread
+ * shall be the owner of the mutex.
+ */
+ bool owner_other;
+
+ /**
+ * @brief If this member is true, then the calling thread shall have seized
+ * the mutex recursively.
+ */
+ bool nested;
+
+ /**
+ * @brief If this member is true, then there shall be a thread blocked
+ * waiting for the mutex.
+ */
+ bool blocked;
+
+ /**
+ * @brief This member contains the real priority of the calling thread.
+ */
+ rtems_task_priority priority_real;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * before the directive call.
+ */
+ rtems_task_priority priority_before;
+
+ /**
+ * @brief This member contains the return status of the directive call.
+ */
+ Status_Control status;
+
+ /**
+ * @brief This member contains the owner of the mutex after the directive
+ * call.
+ */
+ const rtems_tcb *owner_after;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * after the directive call.
+ */
+ rtems_task_priority priority_after;
+
+ /**
+ * @brief This member contains the counter snapshot after the directive call.
+ */
+ uint32_t counter;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreMtxReqSurrender_Run() parameter.
+ */
+ TQMtxContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 8 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 8 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreMtxReqSurrender_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreMtxReqSurrender_Context;
+
+static ScoreMtxReqSurrender_Context
+ ScoreMtxReqSurrender_Instance;
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Protocol[] = {
+ "None",
+ "Inherit",
+ "Ceiling",
+ "MrsP",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Recursive[] = {
+ "Allowed",
+ "NotAllowed",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_OwnerCheck[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Owner[] = {
+ "None",
+ "Caller",
+ "Other",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Nested[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Blocked[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Priority[] = {
+ "High",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const * const ScoreMtxReqSurrender_PreDesc[] = {
+ ScoreMtxReqSurrender_PreDesc_Protocol,
+ ScoreMtxReqSurrender_PreDesc_Discipline,
+ ScoreMtxReqSurrender_PreDesc_Recursive,
+ ScoreMtxReqSurrender_PreDesc_OwnerCheck,
+ ScoreMtxReqSurrender_PreDesc_Owner,
+ ScoreMtxReqSurrender_PreDesc_Nested,
+ ScoreMtxReqSurrender_PreDesc_Blocked,
+ ScoreMtxReqSurrender_PreDesc_Priority,
+ NULL
+};
+
+typedef ScoreMtxReqSurrender_Context Context;
+
+static Status_Control Status( const Context *ctx, Status_Control status )
+{
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+}
+
+static void Action( Context *ctx )
+{
+ Status_Control status;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ } else if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_HELPER_A, TQ_EVENT_ENQUEUE );
+ }
+
+ if ( ctx->nested ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ Yield();
+ }
+
+ TQResetCounter( &ctx->tq_ctx->base );
+ SetSelfPriority( ctx->priority_real );
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( &ctx->tq_ctx->base );
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ TQSchedulerRecordStop( &ctx->tq_ctx->base );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = GetSelfPriority();
+ SetSelfPriority( PRIO_NORMAL );
+ Yield();
+ ctx->counter = TQGetCounter( &ctx->tq_ctx->base );
+
+ if ( ctx->nested ) {
+ status = TQSurrender( &ctx->tq_ctx->base );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_HELPER_A, TQ_EVENT_SURRENDER );
+ }
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ }
+}
+
+static void ActionSticky( Context *ctx )
+{
+ Status_Control status;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ } else if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ if ( ctx->nested ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->blocked ) {
+ TQSendAndWaitForIntendToBlock(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+
+ TQResetCounter( &ctx->tq_ctx->base );
+ SetSelfPriority( ctx->priority_real );
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( &ctx->tq_ctx->base );
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ TQSchedulerRecordStop( &ctx->tq_ctx->base );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = GetSelfPriority();
+
+ if ( ctx->status == Status( ctx, STATUS_SUCCESSFUL ) ) {
+ TQWaitForExecutionStop( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+ }
+
+ ctx->counter = TQGetCounter( &ctx->tq_ctx->base );
+
+ if ( ctx->nested ) {
+ status = TQSurrender( &ctx->tq_ctx->base );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ TQ_EVENT_SURRENDER
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+ } else {
+ SetSelfPriority( PRIO_NORMAL );
+ }
+
+ if ( ctx->blocked ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Protocol_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Protocol state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Protocol_None: {
+ /*
+ * Where the mutex does not use a locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_NO_PROTOCOL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_Inherit: {
+ /*
+ * Where the mutex uses the priority inheritance locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_INHERIT ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_Ceiling: {
+ /*
+ * Where the mutex uses the priority ceiling locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_CEILING ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_MrsP: {
+ /*
+ * Where the mutex uses the MrsP locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_MRSP ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Discipline_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue of the mutex uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue of the mutex uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Recursive_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Recursive state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Recursive_Allowed: {
+ /*
+ * Where a recursive seize of the mutex is allowed.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_ALLOWED ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Recursive_NotAllowed: {
+ /*
+ * Where a recursive seize of the mutex is not allowed.
+ */
+ if ( ctx->tq_ctx->recursive == TQ_MTX_RECURSIVE_ALLOWED ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Recursive_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_OwnerCheck_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_OwnerCheck state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_OwnerCheck_Yes: {
+ /*
+ * Where the surrender checks that the mutex owner is the calling thread.
+ */
+ if ( ctx->tq_ctx->owner_check != TQ_MTX_CHECKS_OWNER ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_OwnerCheck_No: {
+ /*
+ * Where the surrender does not check that the mutex owner is the calling
+ * thread.
+ */
+ if ( ctx->tq_ctx->owner_check != TQ_MTX_NO_OWNER_CHECK ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_OwnerCheck_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Owner_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Owner_None: {
+ /*
+ * While the mutex has no owner.
+ */
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Owner_Caller: {
+ /*
+ * While the owner of the mutex is the calling thread.
+ */
+ ctx->owner_caller = true;
+ ctx->owner_other = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Owner_Other: {
+ /*
+ * While the owner of the mutex is a thread other than the calling
+ * thread.
+ */
+ ctx->owner_caller = false;
+ ctx->owner_other = true;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Nested_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Nested state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Nested_Yes: {
+ /*
+ * While calling thread seized the mutex recursively.
+ */
+ ctx->nested = true;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Nested_No: {
+ /*
+ * While calling thread seized the mutex not recursively.
+ */
+ ctx->nested = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Nested_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Blocked_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Blocked state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Blocked_Yes: {
+ /*
+ * While the mutex has threads blocked on the mutex.
+ */
+ ctx->blocked = true;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Blocked_No: {
+ /*
+ * While no threads are blocked on the mutex.
+ */
+ ctx->blocked = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Blocked_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Priority_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Priority_High: {
+ /*
+ * While the current priority of the calling thread without the
+ * priorities available through the mutex would be higher than the
+ * highest priority of the priorities available through the mutex.
+ */
+ ctx->priority_real = PRIO_ULTRA_HIGH;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Priority_Equal: {
+ /*
+ * While the current priority of the calling thread without the
+ * priorities available through the mutex would be equal to the highest
+ * priority of the priorities available through the mutex.
+ */
+ ctx->priority_real = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Priority_Low: {
+ /*
+ * While the current priority of the calling thread without the
+ * priorities available through the mutex would be lower than the highest
+ * priority of the priorities available through the mutex.
+ */
+ ctx->priority_real = PRIO_HIGH;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Status_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Status_NotOwner: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_NOT_OWNER.
+ */
+ T_eq_int( ctx->status, Status( ctx, STATUS_NOT_OWNER ) );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Owner_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Owner_None: {
+ /*
+ * The mutex shall have no owner.
+ */
+ T_null( ctx->owner_after );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_Caller: {
+ /*
+ * The owner of the mutex shall be the calling thread.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.runner_tcb
+ );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_Other: {
+ /*
+ * The owner of the mutex shall not be modified.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_HELPER_A ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_First: {
+ /*
+ * The owner of the mutex shall be dequeued thread.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Surrender_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Surrender state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Surrender_Nop: {
+ /*
+ * The thread queue of the mutex shall not be surrendered to a thread.
+ */
+ T_eq_u32( ctx->counter, 0 );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_FIFO: {
+ /*
+ * The thread queue of the mutex shall be surrendered in FIFO order.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrender_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_Priority: {
+ /*
+ * The thread queue of the mutex shall be surrendered in priority order.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrender_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_PriorityInherit: {
+ /*
+ * The thread queue of the mutex shall be surrendered in priority order
+ * with priority inheritance.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrenderPriorityInherit_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_MrsP: {
+ /*
+ * The thread queue of the mutex shall be surrendered in priority order
+ * with MrsP.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrenderMrsp_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Priority_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Priority_Nop: {
+ /*
+ * The current priority of the calling thread shall be not be modified.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Priority_Low: {
+ /*
+ * The current priority of the calling thread shall be lowered to reflect
+ * the removal of the priorities available through the mutex.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_real );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Prepare( ScoreMtxReqSurrender_Context *ctx )
+{
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ ctx->nested = false;
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ctx->priority_real = PRIO_ULTRA_HIGH;
+ } else {
+ ctx->priority_real = PRIO_NORMAL;
+ }
+}
+
+static void ScoreMtxReqSurrender_Action( ScoreMtxReqSurrender_Context *ctx )
+{
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+}
+
+static const ScoreMtxReqSurrender_Entry
+ScoreMtxReqSurrender_Entries[] = {
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 0, 1, ScoreMtxReqSurrender_Post_Status_NotOwner,
+ ScoreMtxReqSurrender_Post_Owner_Other,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 1, 0, 0, 0, 0, 0, 1, 0, 1, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 0, 1, ScoreMtxReqSurrender_Post_Status_NotOwner,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_Caller,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_Caller,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_FIFO,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_PriorityInherit,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Low },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_MrsP,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_PriorityInherit,
+ ScoreMtxReqSurrender_Post_Priority_Low },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Priority_Low },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_MrsP,
+ ScoreMtxReqSurrender_Post_Priority_Low }
+};
+
+static const uint8_t
+ScoreMtxReqSurrender_Map[] = {
+ 4, 4, 4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 7, 7, 7, 7, 7, 7, 10, 10, 10, 6, 6, 6, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7, 7,
+ 7, 7, 7, 10, 10, 10, 6, 6, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 4, 4, 4, 5, 5, 5, 2, 2, 2, 2, 2, 2, 10, 10, 10, 6, 6, 6, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 10, 10, 10, 6, 6, 6, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 4, 4, 4, 5, 5, 5, 4,
+ 4, 4, 5, 5, 5, 7, 7, 7, 7, 7, 7, 11, 11, 11, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 11, 11, 11,
+ 6, 6, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4, 5, 5,
+ 5, 2, 2, 2, 2, 2, 2, 11, 11, 11, 6, 6, 6, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 11, 11, 11, 6, 6, 6, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4,
+ 4, 4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 8, 8, 8, 7, 7, 7, 12, 12, 16, 6, 6, 6, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 8, 8, 7,
+ 7, 7, 12, 12, 16, 6, 6, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 4, 4, 4, 5, 5, 5, 2, 2, 2, 2, 2, 2, 12, 12, 16, 6, 6, 6, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 12,
+ 12, 16, 6, 6, 6, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 4, 4, 4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 8, 8, 8, 8, 8, 8, 13,
+ 13, 17, 9, 9, 14, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 8, 8, 8, 8, 8, 8, 13, 13, 17, 9, 9, 14, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4, 5, 5, 5, 2, 2, 2, 2, 2, 2, 13, 13, 17,
+ 9, 9, 14, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 13, 13, 17, 9, 9, 14, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 5, 5, 5, 4, 4, 4, 5,
+ 5, 5, 8, 8, 8, 8, 8, 8, 15, 15, 18, 9, 9, 14, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 8, 8, 8, 8, 8, 15, 15, 18, 9, 9,
+ 14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4, 5, 5, 5,
+ 2, 2, 2, 2, 2, 2, 15, 15, 18, 9, 9, 14, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 15, 15, 18, 9, 9, 14,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1
+};
+
+static size_t ScoreMtxReqSurrender_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreMtxReqSurrender_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreMtxReqSurrender_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreMtxReqSurrender_Fixture = {
+ .setup = NULL,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = ScoreMtxReqSurrender_Scope,
+ .initial_context = &ScoreMtxReqSurrender_Instance
+};
+
+static const uint16_t ScoreMtxReqSurrender_Weights[] = {
+ 288, 144, 72, 36, 12, 6, 3, 1
+};
+
+static void ScoreMtxReqSurrender_Skip(
+ ScoreMtxReqSurrender_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSurrender_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSurrender_Pre_Recursive_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSurrender_Pre_OwnerCheck_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSurrender_Pre_Owner_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pci[ 5 ] = ScoreMtxReqSurrender_Pre_Nested_NA - 1;
+ /* Fall through */
+ case 6:
+ ctx->Map.pci[ 6 ] = ScoreMtxReqSurrender_Pre_Blocked_NA - 1;
+ /* Fall through */
+ case 7:
+ ctx->Map.pci[ 7 ] = ScoreMtxReqSurrender_Pre_Priority_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreMtxReqSurrender_Entry ScoreMtxReqSurrender_PopEntry(
+ ScoreMtxReqSurrender_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 8; ++i ) {
+ index += ScoreMtxReqSurrender_Weights[ i ] * ctx->Map.pci[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreMtxReqSurrender_Entries[
+ ScoreMtxReqSurrender_Map[ index ]
+ ];
+}
+
+static void ScoreMtxReqSurrender_SetPreConditionStates(
+ ScoreMtxReqSurrender_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+
+ if ( ctx->Map.entry.Pre_Nested_NA ) {
+ ctx->Map.pcs[ 5 ] = ScoreMtxReqSurrender_Pre_Nested_NA;
+ } else {
+ ctx->Map.pcs[ 5 ] = ctx->Map.pci[ 5 ];
+ }
+
+ ctx->Map.pcs[ 6 ] = ctx->Map.pci[ 6 ];
+
+ if ( ctx->Map.entry.Pre_Priority_NA ) {
+ ctx->Map.pcs[ 7 ] = ScoreMtxReqSurrender_Pre_Priority_NA;
+ } else {
+ ctx->Map.pcs[ 7 ] = ctx->Map.pci[ 7 ];
+ }
+}
+
+static void ScoreMtxReqSurrender_TestVariant(
+ ScoreMtxReqSurrender_Context *ctx
+)
+{
+ ScoreMtxReqSurrender_Pre_Protocol_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_Recursive_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 2 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_OwnerCheck_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 3 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_Owner_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ ScoreMtxReqSurrender_Pre_Nested_Prepare( ctx, ctx->Map.pcs[ 5 ] );
+ ScoreMtxReqSurrender_Pre_Blocked_Prepare( ctx, ctx->Map.pcs[ 6 ] );
+ ScoreMtxReqSurrender_Pre_Priority_Prepare( ctx, ctx->Map.pcs[ 7 ] );
+ ScoreMtxReqSurrender_Action( ctx );
+ ScoreMtxReqSurrender_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreMtxReqSurrender_Post_Owner_Check( ctx, ctx->Map.entry.Post_Owner );
+ ScoreMtxReqSurrender_Post_Surrender_Check(
+ ctx,
+ ctx->Map.entry.Post_Surrender
+ );
+ ScoreMtxReqSurrender_Post_Priority_Check(
+ ctx,
+ ctx->Map.entry.Post_Priority
+ );
+}
+
+static T_fixture_node ScoreMtxReqSurrender_Node;
+
+void ScoreMtxReqSurrender_Run( TQMtxContext *tq_ctx )
+{
+ ScoreMtxReqSurrender_Context *ctx;
+
+ ctx = &ScoreMtxReqSurrender_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreMtxReqSurrender_Node,
+ &ScoreMtxReqSurrender_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreMtxReqSurrender_Pre_Protocol_None;
+ ctx->Map.pci[ 0 ] < ScoreMtxReqSurrender_Pre_Protocol_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSurrender_Pre_Discipline_FIFO;
+ ctx->Map.pci[ 1 ] < ScoreMtxReqSurrender_Pre_Discipline_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSurrender_Pre_Recursive_Allowed;
+ ctx->Map.pci[ 2 ] < ScoreMtxReqSurrender_Pre_Recursive_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSurrender_Pre_OwnerCheck_Yes;
+ ctx->Map.pci[ 3 ] < ScoreMtxReqSurrender_Pre_OwnerCheck_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSurrender_Pre_Owner_None;
+ ctx->Map.pci[ 4 ] < ScoreMtxReqSurrender_Pre_Owner_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ for (
+ ctx->Map.pci[ 5 ] = ScoreMtxReqSurrender_Pre_Nested_Yes;
+ ctx->Map.pci[ 5 ] < ScoreMtxReqSurrender_Pre_Nested_NA;
+ ++ctx->Map.pci[ 5 ]
+ ) {
+ for (
+ ctx->Map.pci[ 6 ] = ScoreMtxReqSurrender_Pre_Blocked_Yes;
+ ctx->Map.pci[ 6 ] < ScoreMtxReqSurrender_Pre_Blocked_NA;
+ ++ctx->Map.pci[ 6 ]
+ ) {
+ for (
+ ctx->Map.pci[ 7 ] = ScoreMtxReqSurrender_Pre_Priority_High;
+ ctx->Map.pci[ 7 ] < ScoreMtxReqSurrender_Pre_Priority_NA;
+ ++ctx->Map.pci[ 7 ]
+ ) {
+ ctx->Map.entry = ScoreMtxReqSurrender_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreMtxReqSurrender_SetPreConditionStates( ctx );
+ ScoreMtxReqSurrender_Prepare( ctx );
+ ScoreMtxReqSurrender_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-mtx-surrender.h b/testsuites/validation/tr-mtx-surrender.h
new file mode 100644
index 0000000000..79b044d184
--- /dev/null
+++ b/testsuites/validation/tr-mtx-surrender.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_MTX_SURRENDER_H
+#define _TR_MTX_SURRENDER_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreMtxReqSurrender
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Protocol_None,
+ ScoreMtxReqSurrender_Pre_Protocol_Inherit,
+ ScoreMtxReqSurrender_Pre_Protocol_Ceiling,
+ ScoreMtxReqSurrender_Pre_Protocol_MrsP,
+ ScoreMtxReqSurrender_Pre_Protocol_NA
+} ScoreMtxReqSurrender_Pre_Protocol;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Discipline_FIFO,
+ ScoreMtxReqSurrender_Pre_Discipline_Priority,
+ ScoreMtxReqSurrender_Pre_Discipline_NA
+} ScoreMtxReqSurrender_Pre_Discipline;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Recursive_Allowed,
+ ScoreMtxReqSurrender_Pre_Recursive_NotAllowed,
+ ScoreMtxReqSurrender_Pre_Recursive_NA
+} ScoreMtxReqSurrender_Pre_Recursive;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_OwnerCheck_Yes,
+ ScoreMtxReqSurrender_Pre_OwnerCheck_No,
+ ScoreMtxReqSurrender_Pre_OwnerCheck_NA
+} ScoreMtxReqSurrender_Pre_OwnerCheck;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Owner_None,
+ ScoreMtxReqSurrender_Pre_Owner_Caller,
+ ScoreMtxReqSurrender_Pre_Owner_Other,
+ ScoreMtxReqSurrender_Pre_Owner_NA
+} ScoreMtxReqSurrender_Pre_Owner;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Nested_Yes,
+ ScoreMtxReqSurrender_Pre_Nested_No,
+ ScoreMtxReqSurrender_Pre_Nested_NA
+} ScoreMtxReqSurrender_Pre_Nested;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Blocked_Yes,
+ ScoreMtxReqSurrender_Pre_Blocked_No,
+ ScoreMtxReqSurrender_Pre_Blocked_NA
+} ScoreMtxReqSurrender_Pre_Blocked;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Priority_High,
+ ScoreMtxReqSurrender_Pre_Priority_Equal,
+ ScoreMtxReqSurrender_Pre_Priority_Low,
+ ScoreMtxReqSurrender_Pre_Priority_NA
+} ScoreMtxReqSurrender_Pre_Priority;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Status_NotOwner,
+ ScoreMtxReqSurrender_Post_Status_NA
+} ScoreMtxReqSurrender_Post_Status;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Owner_Caller,
+ ScoreMtxReqSurrender_Post_Owner_Other,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Owner_NA
+} ScoreMtxReqSurrender_Post_Owner;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Surrender_FIFO,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Surrender_PriorityInherit,
+ ScoreMtxReqSurrender_Post_Surrender_MrsP,
+ ScoreMtxReqSurrender_Post_Surrender_NA
+} ScoreMtxReqSurrender_Post_Surrender;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Priority_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Low,
+ ScoreMtxReqSurrender_Post_Priority_NA
+} ScoreMtxReqSurrender_Post_Priority;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreMtxReqSurrender_Run( TQMtxContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_MTX_SURRENDER_H */