summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-09 16:16:34 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-03-24 11:10:49 +0100
commit43adf2ed59175fcdd1201730f0b17e739d8032e4 (patch)
tree6a844eaab596887f9a5e40547175f9780b18c320
parent1c87bb585f781e2ebd42af7f1bf045f97d065c5c (diff)
validation: Test Semaphore Manager
The test source code is generated from specification items by the "./spec2modules.py" script contained in the git://git.rtems.org/rtems-central.git Git repository. Please read the "How-To" section in the "Software Requirements Engineering" chapter of the RTEMS Software Engineering manual to get more information about the process. Update #3716.
-rw-r--r--spec/build/testsuites/validation/performance-no-clock-0.yml1
-rw-r--r--spec/build/testsuites/validation/validation-no-clock-0.yml7
-rw-r--r--spec/build/testsuites/validation/validation-one-cpu-0.yml2
-rw-r--r--spec/build/testsuites/validation/validation-smp-only-0.yml2
-rw-r--r--testsuites/validation/tc-sem-create.c1559
-rw-r--r--testsuites/validation/tc-sem-delete.c759
-rw-r--r--testsuites/validation/tc-sem-flush.c623
-rw-r--r--testsuites/validation/tc-sem-ident.c118
-rw-r--r--testsuites/validation/tc-sem-mrsp-obtain.c1202
-rw-r--r--testsuites/validation/tc-sem-obtain.c728
-rw-r--r--testsuites/validation/tc-sem-performance.c890
-rw-r--r--testsuites/validation/tc-sem-release.c613
-rw-r--r--testsuites/validation/tc-sem-set-priority.c1137
-rw-r--r--testsuites/validation/tc-sem-smp.c478
-rw-r--r--testsuites/validation/tc-sem-timeout.c461
-rw-r--r--testsuites/validation/tc-sem-uni.c226
16 files changed, 8806 insertions, 0 deletions
diff --git a/spec/build/testsuites/validation/performance-no-clock-0.yml b/spec/build/testsuites/validation/performance-no-clock-0.yml
index 02ca873f20..c8821d5cd4 100644
--- a/spec/build/testsuites/validation/performance-no-clock-0.yml
+++ b/spec/build/testsuites/validation/performance-no-clock-0.yml
@@ -15,6 +15,7 @@ source:
- testsuites/validation/tc-event-performance.c
- testsuites/validation/tc-message-performance.c
- testsuites/validation/tc-part-performance.c
+- testsuites/validation/tc-sem-performance.c
- testsuites/validation/ts-performance-no-clock-0.c
stlib: []
target: testsuites/validation/ts-performance-no-clock-0.exe
diff --git a/spec/build/testsuites/validation/validation-no-clock-0.yml b/spec/build/testsuites/validation/validation-no-clock-0.yml
index b82fa35f44..0910669521 100644
--- a/spec/build/testsuites/validation/validation-no-clock-0.yml
+++ b/spec/build/testsuites/validation/validation-no-clock-0.yml
@@ -63,6 +63,13 @@ source:
- testsuites/validation/tc-scheduler-ident.c
- testsuites/validation/tc-scheduler-remove-processor.c
- testsuites/validation/tc-score-fatal.c
+- testsuites/validation/tc-sem-create.c
+- testsuites/validation/tc-sem-flush.c
+- testsuites/validation/tc-sem-ident.c
+- testsuites/validation/tc-sem-obtain.c
+- testsuites/validation/tc-sem-release.c
+- testsuites/validation/tc-sem-set-priority.c
+- testsuites/validation/tc-sem-timeout.c
- testsuites/validation/tr-event-constant.c
- testsuites/validation/tr-mtx-seize-try.c
- testsuites/validation/tr-mtx-seize-wait.c
diff --git a/spec/build/testsuites/validation/validation-one-cpu-0.yml b/spec/build/testsuites/validation/validation-one-cpu-0.yml
index 52e28f4d98..d6518c33cd 100644
--- a/spec/build/testsuites/validation/validation-one-cpu-0.yml
+++ b/spec/build/testsuites/validation/validation-one-cpu-0.yml
@@ -17,6 +17,8 @@ source:
- testsuites/validation/tc-ratemon-get-status.c
- testsuites/validation/tc-ratemon-period.c
- testsuites/validation/tc-ratemon-timeout.c
+- testsuites/validation/tc-sem-delete.c
+- testsuites/validation/tc-sem-uni.c
- testsuites/validation/tc-score-fatal.c
- testsuites/validation/ts-validation-one-cpu-0.c
stlib: []
diff --git a/spec/build/testsuites/validation/validation-smp-only-0.yml b/spec/build/testsuites/validation/validation-smp-only-0.yml
index 545fa9d77f..4bef002802 100644
--- a/spec/build/testsuites/validation/validation-smp-only-0.yml
+++ b/spec/build/testsuites/validation/validation-smp-only-0.yml
@@ -18,6 +18,8 @@ source:
- testsuites/validation/tc-intr-smp-only.c
- testsuites/validation/tc-scheduler-smp-only.c
- testsuites/validation/tc-score-tq-smp.c
+- testsuites/validation/tc-sem-smp.c
+- testsuites/validation/tc-sem-mrsp-obtain.c
- testsuites/validation/ts-validation-smp-only-0.c
stlib: []
target: testsuites/validation/ts-validation-smp-only-0.exe
diff --git a/testsuites/validation/tc-sem-create.c b/testsuites/validation/tc-sem-create.c
new file mode 100644
index 0000000000..7012c28275
--- /dev/null
+++ b/testsuites/validation/tc-sem-create.c
@@ -0,0 +1,1559 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqCreate
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <limits.h>
+#include <rtems.h>
+#include <string.h>
+#include <rtems/rtems/semimpl.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqCreate spec:/rtems/sem/req/create
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Name_Valid,
+ RtemsSemReqCreate_Pre_Name_Invalid,
+ RtemsSemReqCreate_Pre_Name_NA
+} RtemsSemReqCreate_Pre_Name;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Id_Valid,
+ RtemsSemReqCreate_Pre_Id_Null,
+ RtemsSemReqCreate_Pre_Id_NA
+} RtemsSemReqCreate_Pre_Id;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Count_Zero,
+ RtemsSemReqCreate_Pre_Count_One,
+ RtemsSemReqCreate_Pre_Count_GtOne,
+ RtemsSemReqCreate_Pre_Count_NA
+} RtemsSemReqCreate_Pre_Count;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Binary_Yes,
+ RtemsSemReqCreate_Pre_Binary_No,
+ RtemsSemReqCreate_Pre_Binary_NA
+} RtemsSemReqCreate_Pre_Binary;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Simple_Yes,
+ RtemsSemReqCreate_Pre_Simple_No,
+ RtemsSemReqCreate_Pre_Simple_NA
+} RtemsSemReqCreate_Pre_Simple;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Inherit_Yes,
+ RtemsSemReqCreate_Pre_Inherit_No,
+ RtemsSemReqCreate_Pre_Inherit_NA
+} RtemsSemReqCreate_Pre_Inherit;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Ceiling_Yes,
+ RtemsSemReqCreate_Pre_Ceiling_No,
+ RtemsSemReqCreate_Pre_Ceiling_NA
+} RtemsSemReqCreate_Pre_Ceiling;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_MrsP_Yes,
+ RtemsSemReqCreate_Pre_MrsP_No,
+ RtemsSemReqCreate_Pre_MrsP_NA
+} RtemsSemReqCreate_Pre_MrsP;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Disc_FIFO,
+ RtemsSemReqCreate_Pre_Disc_Prio,
+ RtemsSemReqCreate_Pre_Disc_NA
+} RtemsSemReqCreate_Pre_Disc;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Prio_LeCur,
+ RtemsSemReqCreate_Pre_Prio_GtCur,
+ RtemsSemReqCreate_Pre_Prio_Invalid,
+ RtemsSemReqCreate_Pre_Prio_NA
+} RtemsSemReqCreate_Pre_Prio;
+
+typedef enum {
+ RtemsSemReqCreate_Pre_Free_Yes,
+ RtemsSemReqCreate_Pre_Free_No,
+ RtemsSemReqCreate_Pre_Free_NA
+} RtemsSemReqCreate_Pre_Free;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Status_InvName,
+ RtemsSemReqCreate_Post_Status_InvAddr,
+ RtemsSemReqCreate_Post_Status_InvNum,
+ RtemsSemReqCreate_Post_Status_InvPrio,
+ RtemsSemReqCreate_Post_Status_NotDef,
+ RtemsSemReqCreate_Post_Status_TooMany,
+ RtemsSemReqCreate_Post_Status_NA
+} RtemsSemReqCreate_Post_Status;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Name_Valid,
+ RtemsSemReqCreate_Post_Name_Invalid,
+ RtemsSemReqCreate_Post_Name_NA
+} RtemsSemReqCreate_Post_Name;
+
+typedef enum {
+ RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_IdVar_NA
+} RtemsSemReqCreate_Post_IdVar;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Variant_Cnt,
+ RtemsSemReqCreate_Post_Variant_Bin,
+ RtemsSemReqCreate_Post_Variant_PI,
+ RtemsSemReqCreate_Post_Variant_PC,
+ RtemsSemReqCreate_Post_Variant_SB,
+ RtemsSemReqCreate_Post_Variant_MrsP,
+ RtemsSemReqCreate_Post_Variant_NA
+} RtemsSemReqCreate_Post_Variant;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Disc_FIFO,
+ RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Disc_NA
+} RtemsSemReqCreate_Post_Disc;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Count_Initial,
+ RtemsSemReqCreate_Post_Count_NA
+} RtemsSemReqCreate_Post_Count;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Owner_NA
+} RtemsSemReqCreate_Post_Owner;
+
+typedef enum {
+ RtemsSemReqCreate_Post_Prio_Ceiling,
+ RtemsSemReqCreate_Post_Prio_Nop,
+ RtemsSemReqCreate_Post_Prio_NA
+} RtemsSemReqCreate_Post_Prio;
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_Name_NA : 1;
+ uint32_t Pre_Id_NA : 1;
+ uint32_t Pre_Count_NA : 1;
+ uint32_t Pre_Binary_NA : 1;
+ uint32_t Pre_Simple_NA : 1;
+ uint32_t Pre_Inherit_NA : 1;
+ uint32_t Pre_Ceiling_NA : 1;
+ uint32_t Pre_MrsP_NA : 1;
+ uint32_t Pre_Disc_NA : 1;
+ uint32_t Pre_Prio_NA : 1;
+ uint32_t Pre_Free_NA : 1;
+ uint32_t Post_Status : 3;
+ uint32_t Post_Name : 2;
+ uint32_t Post_IdVar : 2;
+ uint32_t Post_Variant : 3;
+ uint32_t Post_Disc : 2;
+ uint32_t Post_Count : 1;
+ uint32_t Post_Owner : 2;
+ uint32_t Post_Prio : 2;
+} RtemsSemReqCreate_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/create test case.
+ */
+typedef struct {
+ void *seized_objects;
+
+ rtems_status_code status;
+
+ Semaphore_Variant variant;;
+
+ Semaphore_Discipline discipline;;
+
+ uint32_t sem_count;;
+
+ Thread_Control *executing;;
+
+ Thread_Control *owner;;
+
+ rtems_name name;
+
+ uint32_t count;
+
+ rtems_attribute attribute_set;
+
+ rtems_task_priority priority_ceiling;
+
+ rtems_id *id;
+
+ rtems_id id_value;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 11 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqCreate_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqCreate_Context;
+
+static RtemsSemReqCreate_Context
+ RtemsSemReqCreate_Instance;
+
+static const char * const RtemsSemReqCreate_PreDesc_Name[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Id[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Count[] = {
+ "Zero",
+ "One",
+ "GtOne",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Binary[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Simple[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Inherit[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Ceiling[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_MrsP[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Disc[] = {
+ "FIFO",
+ "Prio",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Prio[] = {
+ "LeCur",
+ "GtCur",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSemReqCreate_PreDesc_Free[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqCreate_PreDesc[] = {
+ RtemsSemReqCreate_PreDesc_Name,
+ RtemsSemReqCreate_PreDesc_Id,
+ RtemsSemReqCreate_PreDesc_Count,
+ RtemsSemReqCreate_PreDesc_Binary,
+ RtemsSemReqCreate_PreDesc_Simple,
+ RtemsSemReqCreate_PreDesc_Inherit,
+ RtemsSemReqCreate_PreDesc_Ceiling,
+ RtemsSemReqCreate_PreDesc_MrsP,
+ RtemsSemReqCreate_PreDesc_Disc,
+ RtemsSemReqCreate_PreDesc_Prio,
+ RtemsSemReqCreate_PreDesc_Free,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+typedef RtemsSemReqCreate_Context Context;
+
+static rtems_status_code Create( void *arg, uint32_t *id )
+{
+ (void) arg;
+
+ return rtems_semaphore_create(
+ rtems_build_name( 'S', 'I', 'Z', 'E' ),
+ 1,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ 0,
+ id
+ );
+}
+
+static void GetSemAttributes( Context *ctx )
+{
+ if ( ctx->id_value != INVALID_ID ) {
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+ uintptr_t flags;
+
+ semaphore = _Semaphore_Get( ctx->id_value, &queue_context );
+ T_assert_not_null( semaphore );
+ ctx->sem_count = semaphore->Core_control.Semaphore.count;
+ ctx->owner = semaphore->Core_control.Wait_queue.Queue.owner;
+ flags = _Semaphore_Get_flags( semaphore );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+ ctx->variant = _Semaphore_Get_variant( flags );
+ ctx->discipline = _Semaphore_Get_discipline( flags );
+ } else {
+ ctx->sem_count = 123;
+ ctx->owner = (void *)(uintptr_t) 1;
+ ctx->variant = INT_MAX;
+ ctx->discipline = INT_MAX;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Name_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Name state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Name_Valid: {
+ /*
+ * While the ``name`` parameter is valid.
+ */
+ ctx->name = NAME;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Name_Invalid: {
+ /*
+ * While the ``name`` parameter is invalid.
+ */
+ ctx->name = 0;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Name_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Id_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter references an object of type rtems_id.
+ */
+ ctx->id = &ctx->id_value;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Id_Null: {
+ /*
+ * While the ``id`` parameter is NULL.
+ */
+ ctx->id = NULL;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Count_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Count state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Count_Zero: {
+ /*
+ * While the ``count`` parameter is zero.
+ */
+ ctx->count = 0;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Count_One: {
+ /*
+ * While the ``count`` parameter is one.
+ */
+ ctx->count = 1;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Count_GtOne: {
+ /*
+ * While the ``count`` parameter is greater than one.
+ */
+ ctx->count = UINT32_MAX;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Count_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Binary_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Binary state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Binary_Yes: {
+ /*
+ * While the ``attribute_set`` parameter specifies the binary semaphore
+ * class.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Binary_No: {
+ /*
+ * While the ``attribute_set`` parameter does not specify the binary
+ * semaphore class.
+ */
+ /* Use default */
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Binary_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Simple_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Simple state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Simple_Yes: {
+ /*
+ * While the ``attribute_set`` parameter specifies the simple binary
+ * semaphore class.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Simple_No: {
+ /*
+ * While the ``attribute_set`` parameter does not specify the simple
+ * binary semaphore class.
+ */
+ /* Use default */
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Simple_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Inherit_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Inherit state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Inherit_Yes: {
+ /*
+ * While the ``attribute_set`` parameter specifies the priority
+ * inheritance locking protocol.
+ */
+ ctx->attribute_set |= RTEMS_INHERIT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Inherit_No: {
+ /*
+ * While the ``attribute_set`` parameter does not specify the priority
+ * inheritance locking protocol.
+ */
+ ctx->attribute_set |= RTEMS_NO_INHERIT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Inherit_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Ceiling_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Ceiling state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Ceiling_Yes: {
+ /*
+ * While the ``attribute_set`` parameter specifies the priority ceiling
+ * locking protocol.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY_CEILING;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Ceiling_No: {
+ /*
+ * While the ``attribute_set`` parameter does not specify the priority
+ * ceiling locking protocol.
+ */
+ ctx->attribute_set |= RTEMS_NO_PRIORITY_CEILING;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Ceiling_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_MrsP_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_MrsP state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_MrsP_Yes: {
+ /*
+ * While the ``attribute_set`` parameter specifies the MrsP locking
+ * protocol.
+ */
+ ctx->attribute_set |= RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_MrsP_No: {
+ /*
+ * While the ``attribute_set`` parameter does not specify the MrsP
+ * locking protocol.
+ */
+ ctx->attribute_set |= RTEMS_NO_MULTIPROCESSOR_RESOURCE_SHARING;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_MrsP_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Disc_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Disc state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Disc_FIFO: {
+ /*
+ * While the ``attribute_set`` parameter specifies the FIFO task wait
+ * queue discipline or the default task wait queue discipline.
+ */
+ RTEMS_STATIC_ASSERT( RTEMS_DEFAULT_ATTRIBUTES == RTEMS_FIFO, RTEMS_FIFO );
+ ctx->attribute_set |= RTEMS_FIFO;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Disc_Prio: {
+ /*
+ * While the ``attribute_set`` parameter specifies the priority task wait
+ * queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Disc_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Prio_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Prio state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Prio_LeCur: {
+ /*
+ * While the ``priority_ceiling`` parameter is a valid task priority less
+ * than or equal to the current priority of the calling task with respect
+ * to the scheduler of the calling task at some point during the
+ * directive call.
+ */
+ ctx->priority_ceiling = 0;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Prio_GtCur: {
+ /*
+ * While the ``priority_ceiling`` parameter is a valid task priority
+ * greater than the current priority of the calling task with respect to
+ * the scheduler of the calling task at some point during the directive
+ * call.
+ */
+ ctx->priority_ceiling = 2;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Prio_Invalid: {
+ /*
+ * The ``priority_ceiling`` parameter shall not be a valid task priority
+ * with respect to the scheduler of the calling task at some point during
+ * the directive call.
+ */
+ ctx->priority_ceiling = UINT32_MAX;
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Prio_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Pre_Free_Prepare(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Pre_Free state
+)
+{
+ size_t i;
+
+ switch ( state ) {
+ case RtemsSemReqCreate_Pre_Free_Yes: {
+ /*
+ * While the system has at least one inactive semaphore object available.
+ */
+ /* Nothing to do */
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Free_No: {
+ /*
+ * While the system has no inactive semaphore object available.
+ */
+ i = 0;
+ ctx->seized_objects = T_seize_objects( Create, &i );
+ break;
+ }
+
+ case RtemsSemReqCreate_Pre_Free_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Status_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Status_Ok: {
+ /*
+ * The return status of rtems_semaphore_create() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_InvName: {
+ /*
+ * The return status of rtems_semaphore_create() shall be
+ * RTEMS_INVALID_NAME.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_NAME );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_semaphore_create() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_InvNum: {
+ /*
+ * The return status of rtems_semaphore_create() shall be
+ * RTEMS_INVALID_NUMBER.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_NUMBER );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_InvPrio: {
+ /*
+ * The return status of rtems_semaphore_create() shall be
+ * RTEMS_INVALID_PRIORITY.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_PRIORITY );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_NotDef: {
+ /*
+ * The return status of rtems_semaphore_create() shall be
+ * RTEMS_NOT_DEFINED.
+ */
+ T_rsc( ctx->status, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_TooMany: {
+ /*
+ * The return status of rtems_semaphore_create() shall be RTEMS_TOO_MANY.
+ */
+ T_rsc( ctx->status, RTEMS_TOO_MANY );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Name_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Name state
+)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Name_Valid: {
+ /*
+ * The unique object name shall identify the semaphore created by the
+ * rtems_semaphore_create() call.
+ */
+ id = 0;
+ sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
+ T_rsc_success( sc );
+ T_eq_u32( id, ctx->id_value );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Name_Invalid: {
+ /*
+ * The unique object name shall not identify a semaphore.
+ */
+ sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
+ T_rsc( sc, RTEMS_INVALID_NAME );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Name_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_IdVar_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_IdVar state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_IdVar_Set: {
+ /*
+ * The value of the object referenced by the ``id`` parameter shall be
+ * set to the object identifier of the created semaphore after the return
+ * of the rtems_semaphore_create() call.
+ */
+ T_eq_ptr( ctx->id, &ctx->id_value );
+ T_ne_u32( ctx->id_value, INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_IdVar_Nop: {
+ /*
+ * Objects referenced by the ``id`` parameter in past calls to
+ * rtems_semaphore_create() shall not be accessed by the
+ * rtems_semaphore_create() call.
+ */
+ T_eq_u32( ctx->id_value, INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_IdVar_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Variant_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Variant state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Variant_Cnt: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be a
+ * counting semaphore.
+ */
+ T_eq_int( ctx->variant, SEMAPHORE_VARIANT_COUNTING );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Variant_Bin: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be a
+ * binary semaphore not using a locking protocol.
+ */
+ T_eq_int( ctx->variant, SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Variant_PI: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be a
+ * binary semaphore using the priority inheritance locking protocol.
+ */
+ T_eq_int( ctx->variant, SEMAPHORE_VARIANT_MUTEX_INHERIT_PRIORITY );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Variant_PC: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be a
+ * binary semaphore using the priority ceiling locking protocol.
+ */
+ T_eq_int( ctx->variant, SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Variant_SB: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be a
+ * simple binary semaphore.
+ */
+ T_eq_int( ctx->variant, SEMAPHORE_VARIANT_SIMPLE_BINARY );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Variant_MrsP: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be a
+ * binary semaphore using the MrsP locking protocol.
+ */
+ #if defined(RTEMS_SMP)
+ T_eq_int( ctx->variant, SEMAPHORE_VARIANT_MRSP );
+ #else
+ T_true( false );
+ #endif
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Variant_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Disc_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Disc state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Disc_FIFO: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall use
+ * the FIFO task wait queue discipline.
+ */
+ T_eq_int( ctx->discipline, SEMAPHORE_DISCIPLINE_FIFO );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Disc_Prio: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall use
+ * the priority task wait queue discipline.
+ */
+ T_eq_int( ctx->discipline, SEMAPHORE_DISCIPLINE_PRIORITY );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Disc_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Count_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Count state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Count_Initial: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall have
+ * an initial count equal to the value of the ``count`` parameter.
+ */
+ T_eq_u32( ctx->sem_count, ctx->count );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Count_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Owner_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Owner state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Owner_Caller: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall be
+ * initially owned by the calling task.
+ */
+ T_eq_ptr( ctx->owner, ctx->executing );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Owner_No: {
+ /*
+ * The semaphore created by the rtems_semaphore_create() call shall not
+ * initially have an owner.
+ */
+ T_null( ctx->owner );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Owner_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Post_Prio_Check(
+ RtemsSemReqCreate_Context *ctx,
+ RtemsSemReqCreate_Post_Prio state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqCreate_Post_Prio_Ceiling: {
+ /*
+ * The current priority of the task which called rtems_semaphore_create()
+ * shall be equal to the value of the ``priority_ceiling`` parameter.
+ */
+ T_eq_u32( GetSelfPriority(), ctx->priority_ceiling );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Prio_Nop: {
+ /*
+ * The current priority of the task which called rtems_semaphore_create()
+ * shall not be modified by the rtems_semaphore_create() call.
+ */
+ T_eq_u32( GetSelfPriority(), 1 );
+ break;
+ }
+
+ case RtemsSemReqCreate_Post_Prio_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqCreate_Setup( RtemsSemReqCreate_Context *ctx )
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->executing = _Thread_Get_executing();
+}
+
+static void RtemsSemReqCreate_Setup_Wrap( void *arg )
+{
+ RtemsSemReqCreate_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqCreate_Setup( ctx );
+}
+
+static void RtemsSemReqCreate_Prepare( RtemsSemReqCreate_Context *ctx )
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ ctx->id_value = INVALID_ID;
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+
+ id = INVALID_ID;
+ sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
+ T_rsc( sc, RTEMS_INVALID_NAME );
+ T_eq_u32( id, INVALID_ID );
+}
+
+static void RtemsSemReqCreate_Action( RtemsSemReqCreate_Context *ctx )
+{
+ ctx->status = rtems_semaphore_create(
+ ctx->name,
+ ctx->count,
+ ctx->attribute_set,
+ ctx->priority_ceiling,
+ ctx->id
+ );
+
+ GetSemAttributes( ctx );
+}
+
+static void RtemsSemReqCreate_Cleanup( RtemsSemReqCreate_Context *ctx )
+{
+ rtems_status_code sc;
+
+ if ( ctx->id_value != INVALID_ID ) {
+ if ( ctx->count == 0 ) {
+ sc = rtems_semaphore_release( ctx->id_value );
+ T_rsc_success( sc );
+ }
+
+ sc = rtems_semaphore_delete( ctx->id_value );
+ T_rsc_success( sc );
+ }
+
+ T_surrender_objects( &ctx->seized_objects, rtems_semaphore_delete );
+}
+
+static const RtemsSemReqCreate_Entry
+RtemsSemReqCreate_Entries[] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_InvName,
+ RtemsSemReqCreate_Post_Name_Invalid, RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_Variant_NA, RtemsSemReqCreate_Post_Disc_NA,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_NA,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_InvAddr,
+ RtemsSemReqCreate_Post_Name_Invalid, RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_Variant_NA, RtemsSemReqCreate_Post_Disc_NA,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_NA,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_NotDef,
+ RtemsSemReqCreate_Post_Name_Invalid, RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_Variant_NA, RtemsSemReqCreate_Post_Disc_NA,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_NA,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_TooMany,
+ RtemsSemReqCreate_Post_Name_Invalid, RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_Variant_NA, RtemsSemReqCreate_Post_Disc_NA,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_NA,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_InvNum,
+ RtemsSemReqCreate_Post_Name_Invalid, RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_Variant_NA, RtemsSemReqCreate_Post_Disc_NA,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_NA,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_Cnt, RtemsSemReqCreate_Post_Disc_FIFO,
+ RtemsSemReqCreate_Post_Count_Initial, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_Cnt, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_Initial, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_InvPrio,
+ RtemsSemReqCreate_Post_Name_Invalid, RtemsSemReqCreate_Post_IdVar_Nop,
+ RtemsSemReqCreate_Post_Variant_NA, RtemsSemReqCreate_Post_Disc_NA,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_NA,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_SB, RtemsSemReqCreate_Post_Disc_FIFO,
+ RtemsSemReqCreate_Post_Count_Initial, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_SB, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_Initial, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_PI, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_Bin, RtemsSemReqCreate_Post_Disc_FIFO,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_Bin, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_PI, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_Bin, RtemsSemReqCreate_Post_Disc_FIFO,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_Bin, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_PC, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_MrsP, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_PC, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_No,
+ RtemsSemReqCreate_Post_Prio_Nop },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_PC, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Prio_Ceiling },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_MrsP, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Prio_Ceiling }
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RtemsSemReqCreate_Post_Status_Ok,
+ RtemsSemReqCreate_Post_Name_Valid, RtemsSemReqCreate_Post_IdVar_Set,
+ RtemsSemReqCreate_Post_Variant_PC, RtemsSemReqCreate_Post_Disc_Prio,
+ RtemsSemReqCreate_Post_Count_NA, RtemsSemReqCreate_Post_Owner_Caller,
+ RtemsSemReqCreate_Post_Prio_Ceiling }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqCreate_Map[] = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 10, 3, 10, 3, 10, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 18, 3, 7, 3, 7, 3, 2, 2, 2, 2, 2, 2, 19, 3, 7, 3, 7, 3,
+ 11, 3, 11, 3, 11, 3, 12, 3, 12, 3, 12, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 8, 3, 8, 3, 8, 3,
+ 9, 3, 9, 3, 9, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 13, 3, 13, 3, 13, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 16, 3, 16, 3, 7, 3, 2, 2, 2, 2, 2, 2, 17, 3, 17, 3, 7, 3, 14,
+ 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 8, 3, 8, 3, 8, 3, 9,
+ 3, 9, 3, 9, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static size_t RtemsSemReqCreate_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqCreate_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqCreate_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqCreate_Fixture = {
+ .setup = RtemsSemReqCreate_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSemReqCreate_Scope,
+ .initial_context = &RtemsSemReqCreate_Instance
+};
+
+static inline RtemsSemReqCreate_Entry RtemsSemReqCreate_PopEntry(
+ RtemsSemReqCreate_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqCreate_Entries[
+ RtemsSemReqCreate_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqCreate_TestVariant( RtemsSemReqCreate_Context *ctx )
+{
+ RtemsSemReqCreate_Pre_Name_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqCreate_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqCreate_Pre_Count_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqCreate_Pre_Binary_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ RtemsSemReqCreate_Pre_Simple_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ RtemsSemReqCreate_Pre_Inherit_Prepare( ctx, ctx->Map.pcs[ 5 ] );
+ RtemsSemReqCreate_Pre_Ceiling_Prepare( ctx, ctx->Map.pcs[ 6 ] );
+ RtemsSemReqCreate_Pre_MrsP_Prepare( ctx, ctx->Map.pcs[ 7 ] );
+ RtemsSemReqCreate_Pre_Disc_Prepare( ctx, ctx->Map.pcs[ 8 ] );
+ RtemsSemReqCreate_Pre_Prio_Prepare( ctx, ctx->Map.pcs[ 9 ] );
+ RtemsSemReqCreate_Pre_Free_Prepare( ctx, ctx->Map.pcs[ 10 ] );
+ RtemsSemReqCreate_Action( ctx );
+ RtemsSemReqCreate_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ RtemsSemReqCreate_Post_Name_Check( ctx, ctx->Map.entry.Post_Name );
+ RtemsSemReqCreate_Post_IdVar_Check( ctx, ctx->Map.entry.Post_IdVar );
+ RtemsSemReqCreate_Post_Variant_Check( ctx, ctx->Map.entry.Post_Variant );
+ RtemsSemReqCreate_Post_Disc_Check( ctx, ctx->Map.entry.Post_Disc );
+ RtemsSemReqCreate_Post_Count_Check( ctx, ctx->Map.entry.Post_Count );
+ RtemsSemReqCreate_Post_Owner_Check( ctx, ctx->Map.entry.Post_Owner );
+ RtemsSemReqCreate_Post_Prio_Check( ctx, ctx->Map.entry.Post_Prio );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqCreate( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqCreate, &RtemsSemReqCreate_Fixture )
+{
+ RtemsSemReqCreate_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqCreate_Pre_Name_Valid;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqCreate_Pre_Name_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqCreate_Pre_Id_Valid;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqCreate_Pre_Id_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSemReqCreate_Pre_Count_Zero;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqCreate_Pre_Count_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = RtemsSemReqCreate_Pre_Binary_Yes;
+ ctx->Map.pcs[ 3 ] < RtemsSemReqCreate_Pre_Binary_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 4 ] = RtemsSemReqCreate_Pre_Simple_Yes;
+ ctx->Map.pcs[ 4 ] < RtemsSemReqCreate_Pre_Simple_NA;
+ ++ctx->Map.pcs[ 4 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 5 ] = RtemsSemReqCreate_Pre_Inherit_Yes;
+ ctx->Map.pcs[ 5 ] < RtemsSemReqCreate_Pre_Inherit_NA;
+ ++ctx->Map.pcs[ 5 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 6 ] = RtemsSemReqCreate_Pre_Ceiling_Yes;
+ ctx->Map.pcs[ 6 ] < RtemsSemReqCreate_Pre_Ceiling_NA;
+ ++ctx->Map.pcs[ 6 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 7 ] = RtemsSemReqCreate_Pre_MrsP_Yes;
+ ctx->Map.pcs[ 7 ] < RtemsSemReqCreate_Pre_MrsP_NA;
+ ++ctx->Map.pcs[ 7 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 8 ] = RtemsSemReqCreate_Pre_Disc_FIFO;
+ ctx->Map.pcs[ 8 ] < RtemsSemReqCreate_Pre_Disc_NA;
+ ++ctx->Map.pcs[ 8 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 9 ] = RtemsSemReqCreate_Pre_Prio_LeCur;
+ ctx->Map.pcs[ 9 ] < RtemsSemReqCreate_Pre_Prio_NA;
+ ++ctx->Map.pcs[ 9 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 10 ] = RtemsSemReqCreate_Pre_Free_Yes;
+ ctx->Map.pcs[ 10 ] < RtemsSemReqCreate_Pre_Free_NA;
+ ++ctx->Map.pcs[ 10 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqCreate_PopEntry( ctx );
+ RtemsSemReqCreate_Prepare( ctx );
+ RtemsSemReqCreate_TestVariant( ctx );
+ RtemsSemReqCreate_Cleanup( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-delete.c b/testsuites/validation/tc-sem-delete.c
new file mode 100644
index 0000000000..7c31547eba
--- /dev/null
+++ b/testsuites/validation/tc-sem-delete.c
@@ -0,0 +1,759 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqDelete
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqDelete spec:/rtems/sem/req/delete
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationOneCpu0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqDelete_Pre_Id_NoObj,
+ RtemsSemReqDelete_Pre_Id_Counting,
+ RtemsSemReqDelete_Pre_Id_Simple,
+ RtemsSemReqDelete_Pre_Id_Binary,
+ RtemsSemReqDelete_Pre_Id_PrioCeiling,
+ RtemsSemReqDelete_Pre_Id_PrioInherit,
+ RtemsSemReqDelete_Pre_Id_MrsP,
+ RtemsSemReqDelete_Pre_Id_NA
+} RtemsSemReqDelete_Pre_Id;
+
+typedef enum {
+ RtemsSemReqDelete_Pre_Discipline_FIFO,
+ RtemsSemReqDelete_Pre_Discipline_Priority,
+ RtemsSemReqDelete_Pre_Discipline_NA
+} RtemsSemReqDelete_Pre_Discipline;
+
+typedef enum {
+ RtemsSemReqDelete_Pre_State_GtZeroOrNoOwner,
+ RtemsSemReqDelete_Pre_State_Zero,
+ RtemsSemReqDelete_Pre_State_Blocked,
+ RtemsSemReqDelete_Pre_State_NA
+} RtemsSemReqDelete_Pre_State;
+
+typedef enum {
+ RtemsSemReqDelete_Post_Status_Ok,
+ RtemsSemReqDelete_Post_Status_InvId,
+ RtemsSemReqDelete_Post_Status_InUse,
+ RtemsSemReqDelete_Post_Status_NA
+} RtemsSemReqDelete_Post_Status;
+
+typedef enum {
+ RtemsSemReqDelete_Post_Name_Valid,
+ RtemsSemReqDelete_Post_Name_Invalid,
+ RtemsSemReqDelete_Post_Name_NA
+} RtemsSemReqDelete_Post_Name;
+
+typedef enum {
+ RtemsSemReqDelete_Post_Flush_FIFO,
+ RtemsSemReqDelete_Post_Flush_Priority,
+ RtemsSemReqDelete_Post_Flush_No,
+ RtemsSemReqDelete_Post_Flush_NA
+} RtemsSemReqDelete_Post_Flush;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Pre_Discipline_NA : 1;
+ uint16_t Pre_State_NA : 1;
+ uint16_t Post_Status : 2;
+ uint16_t Post_Name : 2;
+ uint16_t Post_Flush : 2;
+} RtemsSemReqDelete_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/delete test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the worker task identifiers.
+ */
+ rtems_id worker_id[ 2 ];
+
+ /**
+ * @brief This member contains the worker activity counter.
+ */
+ uint32_t counter;
+
+ /**
+ * @brief This member contains the worker activity counter of a specific
+ * worker.
+ */
+ uint32_t worker_counter[ 2 ];
+
+ /**
+ * @brief This member specifies the expected rtems_semaphore_obtain() status.
+ */
+ rtems_status_code obtain_status;
+
+ /**
+ * @brief This member specifies if the initial count of the semaphore.
+ */
+ uint32_t count;
+
+ /**
+ * @brief This member specifies if the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ /**
+ * @brief This member contains the semaphore identifier.
+ */
+ rtems_id semaphore_id;
+
+ /**
+ * @brief If this member is true, then the ``name`` parameter shall be valid,
+ * otherwise it should be NULL.
+ */
+ bool valid_id;
+
+ /**
+ * @brief If this member is true, then tasks shall be blocked on the
+ * semaphore, otherwise no tasks shall be blocked on the semaphore.
+ */
+ bool blocked;
+
+ /**
+ * @brief This member specifies the ``name`` parameter for the
+ * rtems_semaphore_delete() call.
+ */
+ rtems_id id;
+
+ /**
+ * @brief This member specifies the expected rtems_semaphore_delete() status.
+ */
+ rtems_status_code delete_status;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqDelete_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqDelete_Context;
+
+static RtemsSemReqDelete_Context
+ RtemsSemReqDelete_Instance;
+
+static const char * const RtemsSemReqDelete_PreDesc_Id[] = {
+ "NoObj",
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeiling",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqDelete_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const RtemsSemReqDelete_PreDesc_State[] = {
+ "GtZeroOrNoOwner",
+ "Zero",
+ "Blocked",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqDelete_PreDesc[] = {
+ RtemsSemReqDelete_PreDesc_Id,
+ RtemsSemReqDelete_PreDesc_Discipline,
+ RtemsSemReqDelete_PreDesc_State,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+#define EVENT_OBTAIN RTEMS_EVENT_0
+
+typedef RtemsSemReqDelete_Context Context;
+
+static void WakeUp( Context *ctx, size_t index )
+{
+ SendEvents( ctx->worker_id[ index ], RTEMS_EVENT_0 );
+}
+
+static void Worker( rtems_task_argument arg, size_t index )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ /*
+ * In order to test the flush in FIFO order, we have to use the no-preempt
+ * mode.
+ */
+ SetMode( RTEMS_NO_PREEMPT, RTEMS_PREEMPT_MASK );
+
+ while ( true ) {
+ rtems_status_code sc;
+ rtems_event_set events;
+ uint32_t counter;
+
+ events = ReceiveAnyEvents();
+ T_eq_u32( events, RTEMS_EVENT_0 );
+
+ sc = rtems_semaphore_obtain(
+ ctx->semaphore_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc( sc, ctx->obtain_status );
+
+ counter = ctx->counter;
+ ++counter;
+ ctx->counter = counter;
+ ctx->worker_counter[ index ] = counter;
+
+ if ( sc == RTEMS_SUCCESSFUL ) {
+ sc = rtems_semaphore_release( ctx->semaphore_id );
+ T_rsc_success( sc );
+ }
+ }
+}
+
+static void WorkerZero( rtems_task_argument arg )
+{
+ Worker( arg, 0 );
+}
+
+static void WorkerOne( rtems_task_argument arg )
+{
+ Worker( arg, 1 );
+}
+
+static void RtemsSemReqDelete_Pre_Id_Prepare(
+ RtemsSemReqDelete_Context *ctx,
+ RtemsSemReqDelete_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqDelete_Pre_Id_NoObj: {
+ /*
+ * While the ``id`` parameter is not associated with a semaphore.
+ */
+ ctx->valid_id = false;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_Counting: {
+ /*
+ * While the ``id`` parameter is associated with a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ ctx->obtain_status = RTEMS_OBJECT_WAS_DELETED;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_Simple: {
+ /*
+ * While the ``id`` parameter is associated with a simple binary
+ * semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ ctx->obtain_status = RTEMS_OBJECT_WAS_DELETED;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_Binary: {
+ /*
+ * While the ``id`` parameter is associated with a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_PrioCeiling: {
+ /*
+ * While the ``id`` parameter is associated with a priority ceiling
+ * semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_PrioInherit: {
+ /*
+ * While the ``id`` parameter is associated with a priority inheritance
+ * semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_MrsP: {
+ /*
+ * While the ``id`` parameter is associated with a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqDelete_Pre_Discipline_Prepare(
+ RtemsSemReqDelete_Context *ctx,
+ RtemsSemReqDelete_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqDelete_Pre_Discipline_FIFO: {
+ /*
+ * While the semaphore uses the FIFO task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_FIFO;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Discipline_Priority: {
+ /*
+ * While the semaphore uses the priority task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqDelete_Pre_State_Prepare(
+ RtemsSemReqDelete_Context *ctx,
+ RtemsSemReqDelete_Pre_State state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqDelete_Pre_State_GtZeroOrNoOwner: {
+ /*
+ * While the semaphore has a count greater than zero or no owner.
+ */
+ ctx->blocked = false;
+ ctx->count = 1;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_State_Zero: {
+ /*
+ * While the semaphore has a count of zero or an owner.
+ */
+ ctx->blocked = false;
+ ctx->count = 0;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_State_Blocked: {
+ /*
+ * While the semaphore has tasks blocked on the semaphore.
+ */
+ ctx->blocked = true;
+ ctx->count = 0;
+ break;
+ }
+
+ case RtemsSemReqDelete_Pre_State_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqDelete_Post_Status_Check(
+ RtemsSemReqDelete_Context *ctx,
+ RtemsSemReqDelete_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqDelete_Post_Status_Ok: {
+ /*
+ * The return status of rtems_semaphore_delete() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ ctx->semaphore_id = 0;
+ T_rsc_success( ctx->delete_status );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Status_InvId: {
+ /*
+ * The return status of rtems_semaphore_delete() shall be
+ * RTEMS_INVALID_ID.
+ */
+ T_rsc( ctx->delete_status, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Status_InUse: {
+ /*
+ * The return status of rtems_semaphore_delete() shall be
+ * RTEMS_RESOURCE_IN_USE.
+ */
+ T_rsc( ctx->delete_status, RTEMS_RESOURCE_IN_USE );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqDelete_Post_Name_Check(
+ RtemsSemReqDelete_Context *ctx,
+ RtemsSemReqDelete_Post_Name state
+)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ switch ( state ) {
+ case RtemsSemReqDelete_Post_Name_Valid: {
+ /*
+ * The unique object name shall identify a semaphore.
+ */
+ id = 0;
+ sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
+ T_rsc_success( sc );
+ T_eq_u32( id, ctx->semaphore_id );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Name_Invalid: {
+ /*
+ * The unique object name shall not identify a semaphore.
+ */
+ sc = rtems_semaphore_ident( NAME, RTEMS_SEARCH_LOCAL_NODE, &id );
+ T_rsc( sc, RTEMS_INVALID_NAME );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Name_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqDelete_Post_Flush_Check(
+ RtemsSemReqDelete_Context *ctx,
+ RtemsSemReqDelete_Post_Flush state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqDelete_Post_Flush_FIFO: {
+ /*
+ * Tasks waiting at the semaphore shall be unblocked in FIFO order.
+ */
+ T_eq_u32( ctx->worker_counter[ 0 ], 1 );
+ T_eq_u32( ctx->worker_counter[ 1 ], 2 );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Flush_Priority: {
+ /*
+ * Tasks waiting at the semaphore shall be unblocked in priority order.
+ */
+ T_eq_u32( ctx->worker_counter[ 0 ], 2 );
+ T_eq_u32( ctx->worker_counter[ 1 ], 1 );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Flush_No: {
+ /*
+ * Tasks waiting at the semaphore shall remain blocked.
+ */
+ T_eq_u32( ctx->worker_counter[ 0 ], 0 );
+ T_eq_u32( ctx->worker_counter[ 1 ], 0 );
+ break;
+ }
+
+ case RtemsSemReqDelete_Post_Flush_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqDelete_Setup( RtemsSemReqDelete_Context *ctx )
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ SetSelfPriority( PRIO_NORMAL );
+ ctx->worker_id[ 0 ] = CreateTask( "WRK0", PRIO_HIGH );
+ StartTask( ctx->worker_id[ 0 ], WorkerZero, ctx );
+ ctx->worker_id[ 1 ] = CreateTask( "WRK1", PRIO_VERY_HIGH );
+ StartTask( ctx->worker_id[ 1 ], WorkerOne, ctx );
+}
+
+static void RtemsSemReqDelete_Setup_Wrap( void *arg )
+{
+ RtemsSemReqDelete_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqDelete_Setup( ctx );
+}
+
+static void RtemsSemReqDelete_Teardown( RtemsSemReqDelete_Context *ctx )
+{
+ size_t i;
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
+ DeleteTask( ctx->worker_id[ i ] );
+ }
+
+ RestoreRunnerPriority();
+}
+
+static void RtemsSemReqDelete_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqDelete_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqDelete_Teardown( ctx );
+}
+
+static void RtemsSemReqDelete_Prepare( RtemsSemReqDelete_Context *ctx )
+{
+ ctx->counter = 0;
+ ctx->worker_counter[ 0 ] = 0;
+ ctx->worker_counter[ 1 ] = 0;
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+ ctx->valid_id = true;
+ ctx->obtain_status = RTEMS_SUCCESSFUL;
+}
+
+static void RtemsSemReqDelete_Action( RtemsSemReqDelete_Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ ctx->count,
+ ctx->attribute_set,
+ PRIO_ULTRA_HIGH,
+ &ctx->semaphore_id
+ );
+ T_rsc_success( sc );
+
+ if ( ctx->blocked ) {
+ WakeUp( ctx, 0 );
+ WakeUp( ctx, 1 );
+ }
+
+ if ( ctx->valid_id ) {
+ ctx->id = ctx->semaphore_id;
+ } else {
+ ctx->id = 0;
+ }
+
+ ctx->delete_status = rtems_semaphore_delete( ctx->id );
+}
+
+static void RtemsSemReqDelete_Cleanup( RtemsSemReqDelete_Context *ctx )
+{
+ if ( ctx->semaphore_id != 0 ) {
+ rtems_status_code sc;
+
+ if ( ctx->count == 0 ) {
+ sc = rtems_semaphore_release( ctx->semaphore_id );
+ T_rsc_success( sc );
+ }
+
+ sc = rtems_semaphore_delete( ctx->semaphore_id );
+ T_rsc_success( sc );
+
+ ctx->semaphore_id = 0;
+ }
+}
+
+static const RtemsSemReqDelete_Entry
+RtemsSemReqDelete_Entries[] = {
+ { 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_Ok,
+ RtemsSemReqDelete_Post_Name_Invalid, RtemsSemReqDelete_Post_Flush_No },
+ { 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_InUse,
+ RtemsSemReqDelete_Post_Name_Valid, RtemsSemReqDelete_Post_Flush_No },
+ { 1, 0, 0, 0, RtemsSemReqDelete_Post_Status_NA,
+ RtemsSemReqDelete_Post_Name_NA, RtemsSemReqDelete_Post_Flush_NA },
+ { 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_InvId,
+ RtemsSemReqDelete_Post_Name_Valid, RtemsSemReqDelete_Post_Flush_No },
+ { 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_Ok,
+ RtemsSemReqDelete_Post_Name_Invalid, RtemsSemReqDelete_Post_Flush_FIFO },
+ { 0, 0, 0, 0, RtemsSemReqDelete_Post_Status_Ok,
+ RtemsSemReqDelete_Post_Name_Invalid, RtemsSemReqDelete_Post_Flush_Priority }
+};
+
+static const uint8_t
+RtemsSemReqDelete_Map[] = {
+ 3, 3, 3, 3, 3, 3, 0, 0, 4, 0, 0, 5, 0, 0, 4, 0, 0, 5, 0, 1, 1, 0, 1, 1, 2, 2,
+ 2, 0, 1, 1, 2, 2, 2, 0, 1, 1, 2, 2, 2, 0, 1, 1
+};
+
+static size_t RtemsSemReqDelete_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqDelete_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqDelete_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqDelete_Fixture = {
+ .setup = RtemsSemReqDelete_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqDelete_Teardown_Wrap,
+ .scope = RtemsSemReqDelete_Scope,
+ .initial_context = &RtemsSemReqDelete_Instance
+};
+
+static inline RtemsSemReqDelete_Entry RtemsSemReqDelete_PopEntry(
+ RtemsSemReqDelete_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqDelete_Entries[
+ RtemsSemReqDelete_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqDelete_TestVariant( RtemsSemReqDelete_Context *ctx )
+{
+ RtemsSemReqDelete_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqDelete_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqDelete_Pre_State_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqDelete_Action( ctx );
+ RtemsSemReqDelete_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ RtemsSemReqDelete_Post_Name_Check( ctx, ctx->Map.entry.Post_Name );
+ RtemsSemReqDelete_Post_Flush_Check( ctx, ctx->Map.entry.Post_Flush );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqDelete( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqDelete, &RtemsSemReqDelete_Fixture )
+{
+ RtemsSemReqDelete_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqDelete_Pre_Id_NoObj;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqDelete_Pre_Id_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqDelete_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqDelete_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSemReqDelete_Pre_State_GtZeroOrNoOwner;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqDelete_Pre_State_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqDelete_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqDelete_Prepare( ctx );
+ RtemsSemReqDelete_TestVariant( ctx );
+ RtemsSemReqDelete_Cleanup( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-flush.c b/testsuites/validation/tc-sem-flush.c
new file mode 100644
index 0000000000..af8f5407cd
--- /dev/null
+++ b/testsuites/validation/tc-sem-flush.c
@@ -0,0 +1,623 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqFlush
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "tr-tq-flush-fifo.h"
+#include "tr-tq-flush-priority-inherit.h"
+#include "tr-tq-flush-priority.h"
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqFlush spec:/rtems/sem/req/flush
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqFlush_Pre_Class_Counting,
+ RtemsSemReqFlush_Pre_Class_Simple,
+ RtemsSemReqFlush_Pre_Class_Binary,
+ RtemsSemReqFlush_Pre_Class_PrioCeiling,
+ RtemsSemReqFlush_Pre_Class_PrioInherit,
+ RtemsSemReqFlush_Pre_Class_MrsP,
+ RtemsSemReqFlush_Pre_Class_NA
+} RtemsSemReqFlush_Pre_Class;
+
+typedef enum {
+ RtemsSemReqFlush_Pre_Discipline_FIFO,
+ RtemsSemReqFlush_Pre_Discipline_Priority,
+ RtemsSemReqFlush_Pre_Discipline_NA
+} RtemsSemReqFlush_Pre_Discipline;
+
+typedef enum {
+ RtemsSemReqFlush_Pre_Id_Valid,
+ RtemsSemReqFlush_Pre_Id_Invalid,
+ RtemsSemReqFlush_Pre_Id_NA
+} RtemsSemReqFlush_Pre_Id;
+
+typedef enum {
+ RtemsSemReqFlush_Post_Action_InvId,
+ RtemsSemReqFlush_Post_Action_NotDef,
+ RtemsSemReqFlush_Post_Action_FlushFIFO,
+ RtemsSemReqFlush_Post_Action_FlushPriority,
+ RtemsSemReqFlush_Post_Action_FlushPriorityCeiling,
+ RtemsSemReqFlush_Post_Action_FlushPriorityInherit,
+ RtemsSemReqFlush_Post_Action_NA
+} RtemsSemReqFlush_Post_Action;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Class_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Pre_Id_NA : 1;
+ uint8_t Post_Action : 3;
+} RtemsSemReqFlush_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/flush test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the thread queue test context.
+ */
+ TQContext tq_ctx;;
+
+ /**
+ * @brief This member specifies if the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ /**
+ * @brief This member specifies if the initial count of the semaphore.
+ */
+ uint32_t initial_count;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 3 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqFlush_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqFlush_Context;
+
+static RtemsSemReqFlush_Context
+ RtemsSemReqFlush_Instance;
+
+static const char * const RtemsSemReqFlush_PreDesc_Class[] = {
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeiling",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqFlush_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const RtemsSemReqFlush_PreDesc_Id[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqFlush_PreDesc[] = {
+ RtemsSemReqFlush_PreDesc_Class,
+ RtemsSemReqFlush_PreDesc_Discipline,
+ RtemsSemReqFlush_PreDesc_Id,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+typedef RtemsSemReqFlush_Context Context;
+
+static void EnqueuePrepare( TQContext *tq_ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(
+ tq_ctx->thread_queue_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc_success( sc );
+}
+
+static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
+{
+ rtems_status_code sc;
+
+ (void) wait;
+ sc = rtems_semaphore_obtain(
+ tq_ctx->thread_queue_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc( sc, RTEMS_UNSATISFIED );
+
+ return STATUS_BUILD( STATUS_SUCCESSFUL, 0 );
+}
+
+static void Flush( TQContext *tq_ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_flush( tq_ctx->thread_queue_id );
+ T_rsc_success( sc );
+}
+
+static void RtemsSemReqFlush_Pre_Class_Prepare(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Pre_Class state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqFlush_Pre_Class_Counting: {
+ /*
+ * While the semaphore object is a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ ctx->initial_count = 0;
+ ctx->tq_ctx.enqueue_prepare = TQDoNothing;
+ ctx->tq_ctx.enqueue_done = TQDoNothing;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_Simple: {
+ /*
+ * While the semaphore object is a simple binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ ctx->initial_count = 0;
+ ctx->tq_ctx.enqueue_prepare = TQDoNothing;
+ ctx->tq_ctx.enqueue_done = TQDoNothing;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_Binary: {
+ /*
+ * While the semaphore object is a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_PrioCeiling: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_PrioInherit: {
+ /*
+ * While the semaphore object is a priority inheritance semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_MrsP: {
+ /*
+ * While the semaphore object is a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Pre_Discipline_Prepare(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqFlush_Pre_Discipline_FIFO: {
+ /*
+ * While the semaphore uses the FIFO task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Discipline_Priority: {
+ /*
+ * While the semaphore uses the priority task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Pre_Id_Prepare(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqFlush_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter is associated with the semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Id_Invalid: {
+ /*
+ * While the ``id`` parameter is not associated with a semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Post_Action_Check(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Post_Action state
+)
+{
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case RtemsSemReqFlush_Post_Action_InvId: {
+ /*
+ * The return status of rtems_semaphore_flush() shall be
+ * RTEMS_INVALID_ID.
+ */
+ sc = rtems_semaphore_flush( 0xffffffff );
+ T_rsc( sc, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_NotDef: {
+ /*
+ * The return status of rtems_semaphore_flush() shall be
+ * RTEMS_NOT_DEFINED.
+ */
+ sc = rtems_semaphore_flush( ctx->tq_ctx.thread_queue_id );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushFIFO: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-fifo.
+ */
+ ScoreTqReqFlushFifo_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushPriority: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-priority.
+ */
+ ScoreTqReqFlushPriority_Run( &ctx->tq_ctx, true );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushPriorityCeiling: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-priority.
+ */
+ ScoreTqReqFlushPriority_Run( &ctx->tq_ctx, false );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushPriorityInherit: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-priority-inherit.
+ */
+ ScoreTqReqFlushPriorityInherit_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Setup( RtemsSemReqFlush_Context *ctx )
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.enqueue = Enqueue;
+ ctx->tq_ctx.flush = Flush;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqFlush_Setup_Wrap( void *arg )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqFlush_Setup( ctx );
+}
+
+static void RtemsSemReqFlush_Teardown( RtemsSemReqFlush_Context *ctx )
+{
+ TQDestroy( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqFlush_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqFlush_Teardown( ctx );
+}
+
+static void RtemsSemReqFlush_Prepare( RtemsSemReqFlush_Context *ctx )
+{
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+}
+
+static void RtemsSemReqFlush_Action( RtemsSemReqFlush_Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ ctx->initial_count,
+ ctx->attribute_set,
+ PRIO_ULTRA_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+}
+
+static void RtemsSemReqFlush_Cleanup( RtemsSemReqFlush_Context *ctx )
+{
+ rtems_status_code sc;
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
+}
+
+static const RtemsSemReqFlush_Entry
+RtemsSemReqFlush_Entries[] = {
+ { 0, 1, 1, 0, RtemsSemReqFlush_Post_Action_InvId },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushFIFO },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriority },
+ { 1, 0, 0, 0, RtemsSemReqFlush_Post_Action_NA },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriorityCeiling },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriorityInherit },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_NotDef }
+#else
+ { 1, 0, 0, 0, RtemsSemReqFlush_Post_Action_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqFlush_Map[] = {
+ 1, 0, 2, 0, 1, 0, 2, 0, 1, 0, 2, 0, 3, 0, 4, 0, 3, 0, 5, 0, 3, 0, 6, 0
+};
+
+static size_t RtemsSemReqFlush_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqFlush_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqFlush_Fixture = {
+ .setup = RtemsSemReqFlush_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqFlush_Teardown_Wrap,
+ .scope = RtemsSemReqFlush_Scope,
+ .initial_context = &RtemsSemReqFlush_Instance
+};
+
+static inline RtemsSemReqFlush_Entry RtemsSemReqFlush_PopEntry(
+ RtemsSemReqFlush_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqFlush_Entries[
+ RtemsSemReqFlush_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqFlush_SetPreConditionStates(
+ RtemsSemReqFlush_Context *ctx
+)
+{
+ if ( ctx->Map.entry.Pre_Class_NA ) {
+ ctx->Map.pcs[ 0 ] = RtemsSemReqFlush_Pre_Class_NA;
+ } else {
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ }
+
+ if ( ctx->Map.entry.Pre_Discipline_NA ) {
+ ctx->Map.pcs[ 1 ] = RtemsSemReqFlush_Pre_Discipline_NA;
+ } else {
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ }
+
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+}
+
+static void RtemsSemReqFlush_TestVariant( RtemsSemReqFlush_Context *ctx )
+{
+ RtemsSemReqFlush_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqFlush_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqFlush_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqFlush_Action( ctx );
+ RtemsSemReqFlush_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqFlush( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqFlush, &RtemsSemReqFlush_Fixture )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = RtemsSemReqFlush_Pre_Class_Counting;
+ ctx->Map.pci[ 0 ] < RtemsSemReqFlush_Pre_Class_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = RtemsSemReqFlush_Pre_Discipline_FIFO;
+ ctx->Map.pci[ 1 ] < RtemsSemReqFlush_Pre_Discipline_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = RtemsSemReqFlush_Pre_Id_Valid;
+ ctx->Map.pci[ 2 ] < RtemsSemReqFlush_Pre_Id_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqFlush_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqFlush_SetPreConditionStates( ctx );
+ RtemsSemReqFlush_Prepare( ctx );
+ RtemsSemReqFlush_TestVariant( ctx );
+ RtemsSemReqFlush_Cleanup( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-ident.c b/testsuites/validation/tc-sem-ident.c
new file mode 100644
index 0000000000..183680fee0
--- /dev/null
+++ b/testsuites/validation/tc-sem-ident.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemValIdent
+ */
+
+/*
+ * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-object-ident.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemValIdent spec:/rtems/sem/val/ident
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @brief Test the rtems_semaphore_ident() directive.
+ *
+ * This test case performs the following actions:
+ *
+ * - Run the generic object identification tests for Classic API semaphore
+ * class objects defined by /rtems/req/ident.
+ *
+ * @{
+ */
+
+static rtems_status_code ClassicSemIdentAction(
+ rtems_name name,
+ uint32_t node,
+ rtems_id *id
+)
+{
+ return rtems_semaphore_ident( name, node, id );
+}
+
+/**
+ * @brief Run the generic object identification tests for Classic API semaphore
+ * class objects defined by /rtems/req/ident.
+ */
+static void RtemsSemValIdent_Action_0( void )
+{
+ rtems_status_code sc;
+ rtems_id id_local_object;
+
+ sc = rtems_semaphore_create(
+ ClassicObjectIdentName,
+ 0,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ 0,
+ &id_local_object
+ );
+ T_assert_rsc_success( sc );
+
+ RtemsReqIdent_Run(
+ id_local_object,
+ ClassicSemIdentAction
+ );
+
+ sc = rtems_semaphore_delete( id_local_object );
+ T_rsc_success( sc );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemValIdent( void )
+ */
+T_TEST_CASE( RtemsSemValIdent )
+{
+ RtemsSemValIdent_Action_0();
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-mrsp-obtain.c b/testsuites/validation/tc-sem-mrsp-obtain.c
new file mode 100644
index 0000000000..dcbf7655ab
--- /dev/null
+++ b/testsuites/validation/tc-sem-mrsp-obtain.c
@@ -0,0 +1,1202 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqMrspObtain
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqMrspObtain spec:/rtems/sem/req/mrsp-obtain
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqMrspObtain_Pre_Home_Idle,
+ RtemsSemReqMrspObtain_Pre_Home_Task,
+ RtemsSemReqMrspObtain_Pre_Home_TaskIdle,
+ RtemsSemReqMrspObtain_Pre_Home_Second,
+ RtemsSemReqMrspObtain_Pre_Home_SecondIdle,
+ RtemsSemReqMrspObtain_Pre_Home_NA
+} RtemsSemReqMrspObtain_Pre_Home;
+
+typedef enum {
+ RtemsSemReqMrspObtain_Pre_Helping_Idle,
+ RtemsSemReqMrspObtain_Pre_Helping_Task,
+ RtemsSemReqMrspObtain_Pre_Helping_Helping,
+ RtemsSemReqMrspObtain_Pre_Helping_HelpingIdle,
+ RtemsSemReqMrspObtain_Pre_Helping_Third,
+ RtemsSemReqMrspObtain_Pre_Helping_ThirdIdle,
+ RtemsSemReqMrspObtain_Pre_Helping_NA
+} RtemsSemReqMrspObtain_Pre_Helping;
+
+typedef enum {
+ RtemsSemReqMrspObtain_Pre_PriorityHome_None,
+ RtemsSemReqMrspObtain_Pre_PriorityHome_NewHigh,
+ RtemsSemReqMrspObtain_Pre_PriorityHome_NewEqual,
+ RtemsSemReqMrspObtain_Pre_PriorityHome_SecondHigh,
+ RtemsSemReqMrspObtain_Pre_PriorityHome_SecondEqual,
+ RtemsSemReqMrspObtain_Pre_PriorityHome_SecondLow,
+ RtemsSemReqMrspObtain_Pre_PriorityHome_NA
+} RtemsSemReqMrspObtain_Pre_PriorityHome;
+
+typedef enum {
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_None,
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_Helping,
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdHigh,
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdEqual,
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdLow,
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_NA
+} RtemsSemReqMrspObtain_Pre_PriorityHelping;
+
+typedef enum {
+ RtemsSemReqMrspObtain_Post_Home_Task,
+ RtemsSemReqMrspObtain_Post_Home_TaskIdle,
+ RtemsSemReqMrspObtain_Post_Home_Second,
+ RtemsSemReqMrspObtain_Post_Home_SecondIdle,
+ RtemsSemReqMrspObtain_Post_Home_NA
+} RtemsSemReqMrspObtain_Post_Home;
+
+typedef enum {
+ RtemsSemReqMrspObtain_Post_Helping_Idle,
+ RtemsSemReqMrspObtain_Post_Helping_Task,
+ RtemsSemReqMrspObtain_Post_Helping_TaskIdle,
+ RtemsSemReqMrspObtain_Post_Helping_Helping,
+ RtemsSemReqMrspObtain_Post_Helping_HelpingIdle,
+ RtemsSemReqMrspObtain_Post_Helping_Third,
+ RtemsSemReqMrspObtain_Post_Helping_ThirdIdle,
+ RtemsSemReqMrspObtain_Post_Helping_NA
+} RtemsSemReqMrspObtain_Post_Helping;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Home_NA : 1;
+ uint16_t Pre_Helping_NA : 1;
+ uint16_t Pre_PriorityHome_NA : 1;
+ uint16_t Pre_PriorityHelping_NA : 1;
+ uint16_t Post_Home : 3;
+ uint16_t Post_Helping : 3;
+} RtemsSemReqMrspObtain_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/mrsp-obtain test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the thread queue test context.
+ */
+ TQContext tq_ctx;
+
+ /**
+ * @brief This member contains the MrsP semaphore to obtain.
+ */
+ rtems_id sema_id;
+
+ /**
+ * @brief This member specifies the scheduler on which the task executes.
+ */
+ rtems_id task_scheduler;
+
+ /**
+ * @brief If this member is true, then the task shall already own a MrsP
+ * semaphore.
+ */
+ bool task_owns_mrsp_semaphore;
+
+ /**
+ * @brief If this member is true, then an idle task shall execute on
+ * scheduler A.
+ */
+ bool scheduler_a_idle;
+
+ /**
+ * @brief If this member is true, then an idle task shall execute on
+ * scheduler B.
+ */
+ bool scheduler_b_idle;
+
+ /**
+ * @brief If this member is true, then the second task shall be active.
+ */
+ bool second_active;
+
+ /**
+ * @brief This member specifies the priority of the second task.
+ */
+ rtems_task_priority second_priority;
+
+ /**
+ * @brief If this member is true, then the third task shall be active.
+ */
+ bool third_active;
+
+ /**
+ * @brief This member specifies the priority of the third task.
+ */
+ rtems_task_priority third_priority;
+
+ /**
+ * @brief If this member is true, then the helping task shall be active.
+ */
+ bool helping_active;
+
+ /**
+ * @brief This member specifies the priority of the MrsP semaphore with
+ * respect to scheduler A.
+ */
+ rtems_task_priority sema_priority_scheduler_a;
+
+ /**
+ * @brief This member specifies the priority of the MrsP semaphore with
+ * respect to scheduler B.
+ */
+ rtems_task_priority sema_priority_scheduler_b;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 4 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqMrspObtain_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqMrspObtain_Context;
+
+static RtemsSemReqMrspObtain_Context
+ RtemsSemReqMrspObtain_Instance;
+
+static const char * const RtemsSemReqMrspObtain_PreDesc_Home[] = {
+ "Idle",
+ "Task",
+ "TaskIdle",
+ "Second",
+ "SecondIdle",
+ "NA"
+};
+
+static const char * const RtemsSemReqMrspObtain_PreDesc_Helping[] = {
+ "Idle",
+ "Task",
+ "Helping",
+ "HelpingIdle",
+ "Third",
+ "ThirdIdle",
+ "NA"
+};
+
+static const char * const RtemsSemReqMrspObtain_PreDesc_PriorityHome[] = {
+ "None",
+ "NewHigh",
+ "NewEqual",
+ "SecondHigh",
+ "SecondEqual",
+ "SecondLow",
+ "NA"
+};
+
+static const char * const RtemsSemReqMrspObtain_PreDesc_PriorityHelping[] = {
+ "None",
+ "Helping",
+ "ThirdHigh",
+ "ThirdEqual",
+ "ThirdLow",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqMrspObtain_PreDesc[] = {
+ RtemsSemReqMrspObtain_PreDesc_Home,
+ RtemsSemReqMrspObtain_PreDesc_Helping,
+ RtemsSemReqMrspObtain_PreDesc_PriorityHome,
+ RtemsSemReqMrspObtain_PreDesc_PriorityHelping,
+ NULL
+};
+
+#define HELPING TQ_BLOCKER_A
+
+#define SECOND TQ_BLOCKER_B
+
+#define THIRD TQ_BLOCKER_C
+
+#define ASSISTANT TQ_BLOCKER_D
+
+#define MOVER TQ_BLOCKER_E
+
+typedef RtemsSemReqMrspObtain_Context Context;
+
+static void SetSemaphorePriority(
+ rtems_id id,
+ rtems_task_priority priority_a,
+ rtems_task_priority priority_b
+)
+{
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ sc = rtems_semaphore_set_priority(
+ id,
+ SCHEDULER_A_ID,
+ priority_a,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ sc = rtems_semaphore_set_priority(
+ id,
+ SCHEDULER_B_ID,
+ priority_b,
+ &priority
+ );
+ T_rsc_success( sc );
+}
+
+static void MoveToScheduler( Context *ctx, rtems_id scheduler_id )
+{
+ rtems_id other_scheduler_id;
+ uint32_t cpu;
+
+ if ( scheduler_id == SCHEDULER_A_ID ) {
+ other_scheduler_id = SCHEDULER_B_ID;
+ cpu = 0;
+ } else {
+ other_scheduler_id = SCHEDULER_A_ID;
+ cpu = 1;
+ }
+
+ TQSetScheduler( &ctx->tq_ctx, MOVER, other_scheduler_id, PRIO_VERY_HIGH );
+ ctx->tq_ctx.busy_wait[ MOVER ] = true;
+ TQSend( &ctx->tq_ctx, MOVER, TQ_EVENT_BUSY_WAIT );
+ TQWaitForEventsReceived( &ctx->tq_ctx, MOVER );
+ T_eq_u32( rtems_scheduler_get_processor(), cpu );
+ ctx->tq_ctx.busy_wait[ MOVER ] = false;
+ TQWaitForExecutionStop( &ctx->tq_ctx, MOVER );
+}
+
+static void RtemsSemReqMrspObtain_Pre_Home_Prepare(
+ RtemsSemReqMrspObtain_Context *ctx,
+ RtemsSemReqMrspObtain_Pre_Home state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqMrspObtain_Pre_Home_Idle: {
+ /*
+ * While an idle task executes on the processor owned by the home
+ * scheduler of the obtaining task.
+ */
+ ctx->scheduler_a_idle = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Home_Task: {
+ /*
+ * While the obtaining task executes on the processor owned by the home
+ * scheduler of the obtaining task.
+ */
+ ctx->task_scheduler = SCHEDULER_A_ID;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Home_TaskIdle: {
+ /*
+ * While an idle task on behalf of the obtaining task executes on the
+ * processor owned by the home scheduler of the obtaining task.
+ */
+ ctx->scheduler_a_idle = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Home_Second: {
+ /*
+ * While the second task executes on the processor owned by the home
+ * scheduler of the obtaining task.
+ */
+ ctx->second_active = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Home_SecondIdle: {
+ /*
+ * While an idle task on behalf of the second task executes on the
+ * processor owned by the home scheduler of the obtaining task.
+ */
+ ctx->second_active = true;
+ ctx->scheduler_a_idle = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Home_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqMrspObtain_Pre_Helping_Prepare(
+ RtemsSemReqMrspObtain_Context *ctx,
+ RtemsSemReqMrspObtain_Pre_Helping state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqMrspObtain_Pre_Helping_Idle: {
+ /*
+ * While an idle task executes on the processor owned by the helping
+ * scheduler of the obtaining task.
+ */
+ ctx->scheduler_b_idle = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Helping_Task: {
+ /*
+ * While the obtaining task executes on the processor owned by the
+ * helping scheduler of the obtaining task.
+ */
+ ctx->task_scheduler = SCHEDULER_B_ID;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Helping_Helping: {
+ /*
+ * While a helping task of the obtaining task executes on the processor
+ * owned by the helping scheduler of the obtaining task.
+ */
+ ctx->task_owns_mrsp_semaphore = true;
+ ctx->helping_active = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Helping_HelpingIdle: {
+ /*
+ * While an idle task on behalf of a helping task of the obtaining task
+ * executes on the processor owned by the helping scheduler of the
+ * obtaining task.
+ */
+ ctx->task_owns_mrsp_semaphore = true;
+ ctx->helping_active = true;
+ ctx->scheduler_b_idle = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Helping_Third: {
+ /*
+ * While the third task executes on the processor owned by the helping
+ * scheduler of the obtaining task.
+ */
+ ctx->third_active = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Helping_ThirdIdle: {
+ /*
+ * While an idle task on behalf of the third task executes on the
+ * processor owned by the helping scheduler of the obtaining task.
+ */
+ ctx->third_active = true;
+ ctx->scheduler_b_idle = true;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_Helping_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqMrspObtain_Pre_PriorityHome_Prepare(
+ RtemsSemReqMrspObtain_Context *ctx,
+ RtemsSemReqMrspObtain_Pre_PriorityHome state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_None: {
+ /*
+ * While no ceiling priority with respect to the home scheduler of the
+ * obtaining task is already available to the task.
+ */
+ ctx->second_priority = PRIO_HIGH;
+ ctx->sema_priority_scheduler_a = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_NewHigh: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the home
+ * scheduler of the obtaining task is higher than the ceiling priorities
+ * already available to the task.
+ */
+ ctx->task_owns_mrsp_semaphore = true;
+ ctx->sema_priority_scheduler_a = PRIO_HIGH;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_NewEqual: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the home
+ * scheduler of the obtaining task is equal to the ceiling priorities
+ * already available to the task.
+ */
+ ctx->task_owns_mrsp_semaphore = true;
+ ctx->sema_priority_scheduler_a = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_SecondHigh: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the home
+ * scheduler of the obtaining task is higher than the priority of the
+ * second task.
+ */
+ ctx->second_priority = PRIO_HIGH;
+ ctx->sema_priority_scheduler_a = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_SecondEqual: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the home
+ * scheduler of the obtaining task is equal to the priority of the second
+ * task.
+ */
+ ctx->second_priority = PRIO_HIGH;
+ ctx->sema_priority_scheduler_a = PRIO_HIGH;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_SecondLow: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the home
+ * scheduler of the obtaining task is lower than the priority of the
+ * second task.
+ */
+ ctx->second_priority = PRIO_HIGH;
+ ctx->sema_priority_scheduler_a = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHome_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqMrspObtain_Pre_PriorityHelping_Prepare(
+ RtemsSemReqMrspObtain_Context *ctx,
+ RtemsSemReqMrspObtain_Pre_PriorityHelping state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqMrspObtain_Pre_PriorityHelping_None: {
+ /*
+ * While no ceiling priority with respect to the helping scheduler of the
+ * obtaining task is already available to the task.
+ */
+ ctx->sema_priority_scheduler_b = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHelping_Helping: {
+ /*
+ * While ceiling priorities with respect to the helping scheduler of the
+ * obtaining task are already available to the task.
+ */
+ ctx->helping_active = true;
+ ctx->task_owns_mrsp_semaphore = true;
+ ctx->sema_priority_scheduler_b = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdHigh: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the
+ * helping scheduler of the obtaining task is higher than the priority of
+ * the third task.
+ */
+ ctx->third_priority = PRIO_LOW;
+ ctx->sema_priority_scheduler_b = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdEqual: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the
+ * helping scheduler of the obtaining task is equal to the priority of
+ * the third task.
+ */
+ ctx->third_priority = PRIO_NORMAL;
+ ctx->sema_priority_scheduler_b = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHelping_ThirdLow: {
+ /*
+ * While the ceiling priority of the semaphore with respect to the
+ * helping scheduler of the obtaining task is lower than the priority of
+ * the third task.
+ */
+ ctx->third_priority = PRIO_HIGH;
+ ctx->sema_priority_scheduler_b = PRIO_NORMAL;
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Pre_PriorityHelping_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqMrspObtain_Post_Home_Check(
+ RtemsSemReqMrspObtain_Context *ctx,
+ RtemsSemReqMrspObtain_Post_Home state
+)
+{
+ const Per_CPU_Control *cpu;
+ const Thread_Control *scheduled;
+ const Scheduler_Node *scheduler_node;
+ uint32_t task_cpu_index;
+
+ cpu = _Per_CPU_Get_by_index( 0 );
+ scheduled = cpu->heir;
+ task_cpu_index = rtems_scheduler_get_processor();
+
+ switch ( state ) {
+ case RtemsSemReqMrspObtain_Post_Home_Task: {
+ /*
+ * The obtaining task shall execute on the processor owned by the home
+ * scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ T_eq_ptr( scheduled, ctx->tq_ctx.runner_tcb );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Home_TaskIdle: {
+ /*
+ * An idle task on behalf of the obtaining task shall execute on the
+ * processor owned by the home scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 1 );
+ T_true( scheduled->is_idle );
+ scheduler_node = _Thread_Scheduler_get_node_by_index(
+ ctx->tq_ctx.runner_tcb,
+ 0
+ );
+ T_eq_ptr( scheduler_node->user, scheduled );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Home_Second: {
+ /*
+ * The second task shall execute on the processor owned by the home
+ * scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 1 );
+ T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ SECOND ] );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Home_SecondIdle: {
+ /*
+ * An idle task on behalf of the second task shall execute on the
+ * processor owned by the home scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 1 );
+ T_true( scheduled->is_idle );
+ scheduler_node = _Thread_Scheduler_get_node_by_index(
+ ctx->tq_ctx.worker_tcb[ SECOND ],
+ 0
+ );
+ T_eq_ptr( scheduler_node->user, scheduled );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Home_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqMrspObtain_Post_Helping_Check(
+ RtemsSemReqMrspObtain_Context *ctx,
+ RtemsSemReqMrspObtain_Post_Helping state
+)
+{
+ const Per_CPU_Control *cpu;
+ const Thread_Control *scheduled;
+ const Scheduler_Node *scheduler_node;
+ uint32_t task_cpu_index;
+
+ cpu = _Per_CPU_Get_by_index( 1 );
+ scheduled = cpu->heir;
+ task_cpu_index = rtems_scheduler_get_processor();
+
+ switch ( state ) {
+ case RtemsSemReqMrspObtain_Post_Helping_Idle: {
+ /*
+ * An idle task shall execute on the processor owned by the helping
+ * scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ T_true( scheduled->is_idle );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_Task: {
+ /*
+ * The obtaining task shall execute on the processor owned by the helping
+ * scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 1 );
+ T_eq_ptr( scheduled, ctx->tq_ctx.runner_tcb );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_TaskIdle: {
+ /*
+ * An idle task on behalf of the obtaining task shall execute on the
+ * processor owned by the helping scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ T_true( scheduled->is_idle );
+ scheduler_node = _Thread_Scheduler_get_node_by_index(
+ ctx->tq_ctx.runner_tcb,
+ 1
+ );
+ T_eq_ptr( scheduler_node->user, ctx->tq_ctx.runner_tcb );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_Helping: {
+ /*
+ * The helping task shall execute on the processor owned by the helping
+ * scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ HELPING ] );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_HelpingIdle: {
+ /*
+ * An idle task on behalf of the helping task shall execute on the
+ * processor owned by the helping scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ T_true( scheduled->is_idle );
+ scheduler_node = _Thread_Scheduler_get_node_by_index(
+ ctx->tq_ctx.worker_tcb[ HELPING ],
+ 1
+ );
+ T_eq_ptr( scheduler_node->user, scheduled );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_Third: {
+ /*
+ * The third task shall execute on the processor owned by the helping
+ * scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ THIRD ] );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_ThirdIdle: {
+ /*
+ * An idle task on behalf of the third task shall execute on the
+ * processor owned by the helping scheduler of the obtaining task.
+ */
+ T_eq_u32( task_cpu_index, 0 );
+ scheduler_node = _Thread_Scheduler_get_node_by_index(
+ ctx->tq_ctx.worker_tcb[ THIRD ],
+ 1
+ );
+ T_eq_ptr( scheduler_node->user, scheduled );
+ break;
+ }
+
+ case RtemsSemReqMrspObtain_Post_Helping_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqMrspObtain_Setup( RtemsSemReqMrspObtain_Context *ctx )
+{
+ rtems_status_code sc;
+ rtems_id mutex_b;
+ rtems_id mutex_c;
+
+ memset( ctx, 0, sizeof( *ctx ) );
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'S', 'E', 'M', 'A' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &ctx->sema_id
+ );
+ T_rsc_success( sc );
+
+ ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
+ ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+
+ DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ] );
+ DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ] );
+
+ mutex_b = 0;
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'T', 'X', 'B' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &mutex_b
+ );
+ T_rsc_success( sc );
+
+ mutex_c = 0;
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'T', 'X', 'C' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &mutex_c
+ );
+ T_rsc_success( sc );
+
+ ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ] = mutex_b;
+ ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ] = mutex_c;
+
+ TQSetScheduler( &ctx->tq_ctx, HELPING, SCHEDULER_B_ID, PRIO_VERY_LOW );
+ TQSetScheduler( &ctx->tq_ctx, THIRD, SCHEDULER_B_ID, PRIO_NORMAL );
+
+ TQMutexObtain( &ctx->tq_ctx, TQ_MUTEX_A );
+ TQSetScheduler( &ctx->tq_ctx, ASSISTANT, SCHEDULER_B_ID, PRIO_VERY_LOW );
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ ASSISTANT,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+
+ SetSemaphorePriority(
+ ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ],
+ PRIO_NORMAL,
+ PRIO_VERY_LOW
+ );
+}
+
+static void RtemsSemReqMrspObtain_Setup_Wrap( void *arg )
+{
+ RtemsSemReqMrspObtain_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqMrspObtain_Setup( ctx );
+}
+
+static void RtemsSemReqMrspObtain_Teardown(
+ RtemsSemReqMrspObtain_Context *ctx
+)
+{
+ TQMutexRelease( &ctx->tq_ctx, TQ_MUTEX_A );
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ ASSISTANT,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ TQDestroy( &ctx->tq_ctx );
+ DeleteMutex( ctx->sema_id );
+}
+
+static void RtemsSemReqMrspObtain_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqMrspObtain_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqMrspObtain_Teardown( ctx );
+}
+
+static void RtemsSemReqMrspObtain_Prepare( RtemsSemReqMrspObtain_Context *ctx )
+{
+ ctx->task_scheduler = INVALID_ID;
+ ctx->task_owns_mrsp_semaphore = false;
+ ctx->scheduler_a_idle = false;
+ ctx->scheduler_b_idle = false;
+ ctx->helping_active = false;
+ ctx->second_active = false;
+ ctx->third_active = false;
+}
+
+static void RtemsSemReqMrspObtain_Action( RtemsSemReqMrspObtain_Context *ctx )
+{
+ if ( ctx->task_owns_mrsp_semaphore ) {
+ TQMutexObtain( &ctx->tq_ctx, TQ_MUTEX_B );
+ }
+
+ if ( ctx->helping_active ) {
+ T_true( ctx->task_owns_mrsp_semaphore );
+
+ TQSendAndWaitForIntendToBlock(
+ &ctx->tq_ctx,
+ HELPING,
+ TQ_EVENT_MUTEX_B_OBTAIN
+ );
+
+ if ( ctx->scheduler_b_idle ) {
+ SuspendTask( ctx->tq_ctx.worker_id[ HELPING ] );
+ }
+ }
+
+ if ( ctx->scheduler_a_idle || ctx->second_active ) {
+ MoveToScheduler( ctx, SCHEDULER_B_ID );
+ }
+
+ if ( ctx->second_active ) {
+ T_false( ctx->third_active );
+
+ TQSetPriority( &ctx->tq_ctx, SECOND, ctx->second_priority );
+
+ if ( ctx->scheduler_a_idle ) {
+ SetSemaphorePriority(
+ ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ],
+ ctx->second_priority,
+ ctx->second_priority
+ );
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ SECOND,
+ TQ_EVENT_MUTEX_C_OBTAIN
+ );
+ } else {
+ ctx->tq_ctx.busy_wait[ SECOND ] = true;
+ TQSend( &ctx->tq_ctx, SECOND, TQ_EVENT_BUSY_WAIT );
+ TQWaitForEventsReceived( &ctx->tq_ctx, SECOND );
+ }
+ }
+
+ if ( ctx->third_active ) {
+ T_false( ctx->second_active );
+
+ TQSetPriority( &ctx->tq_ctx, THIRD, ctx->third_priority );
+
+ if ( ctx->scheduler_b_idle ) {
+ SetSemaphorePriority(
+ ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ],
+ ctx->third_priority,
+ ctx->third_priority
+ );
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ THIRD,
+ TQ_EVENT_MUTEX_C_OBTAIN
+ );
+ } else {
+ ctx->tq_ctx.busy_wait[ THIRD ] = true;
+ TQSend( &ctx->tq_ctx, THIRD, TQ_EVENT_BUSY_WAIT );
+ TQWaitForEventsReceived( &ctx->tq_ctx, THIRD );
+ }
+ }
+
+ SetSemaphorePriority(
+ ctx->sema_id,
+ ctx->sema_priority_scheduler_a,
+ ctx->sema_priority_scheduler_b
+ );
+ ObtainMutex( ctx->sema_id );
+}
+
+static void RtemsSemReqMrspObtain_Cleanup( RtemsSemReqMrspObtain_Context *ctx )
+{
+ ReleaseMutex( ctx->sema_id );
+
+ if ( ctx->task_owns_mrsp_semaphore ) {
+ TQMutexRelease( &ctx->tq_ctx, TQ_MUTEX_B );
+ }
+
+ if ( ctx->second_active ) {
+ MoveToScheduler( ctx, SCHEDULER_B_ID );
+
+ if ( ctx->scheduler_a_idle ) {
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ SECOND,
+ TQ_EVENT_MUTEX_C_RELEASE
+ );
+ } else {
+ ctx->tq_ctx.busy_wait[ SECOND ] = false;
+ TQWaitForExecutionStop( &ctx->tq_ctx, SECOND );
+ }
+ }
+
+ if ( ctx->third_active ) {
+ MoveToScheduler( ctx, SCHEDULER_A_ID );
+
+ if ( ctx->scheduler_b_idle ) {
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ THIRD,
+ TQ_EVENT_MUTEX_C_RELEASE
+ );
+ } else {
+ ctx->tq_ctx.busy_wait[ THIRD ] = false;
+ TQWaitForExecutionStop( &ctx->tq_ctx, THIRD );
+ }
+ }
+
+ if ( ctx->helping_active ) {
+ MoveToScheduler( ctx, SCHEDULER_A_ID );
+
+ if ( ctx->scheduler_b_idle ) {
+ ResumeTask( ctx->tq_ctx.worker_id[ HELPING ] );
+ }
+
+ TQSendAndWaitForExecutionStop(
+ &ctx->tq_ctx,
+ HELPING,
+ TQ_EVENT_MUTEX_B_RELEASE
+ );
+ }
+
+ MoveToScheduler( ctx, SCHEDULER_A_ID );
+}
+
+static const RtemsSemReqMrspObtain_Entry
+RtemsSemReqMrspObtain_Entries[] = {
+ { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
+ RtemsSemReqMrspObtain_Post_Helping_NA },
+ { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
+ RtemsSemReqMrspObtain_Post_Helping_NA },
+ { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
+ RtemsSemReqMrspObtain_Post_Helping_NA },
+ { 1, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_NA,
+ RtemsSemReqMrspObtain_Post_Helping_NA },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
+ RtemsSemReqMrspObtain_Post_Helping_Third },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
+ RtemsSemReqMrspObtain_Post_Helping_ThirdIdle },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_TaskIdle,
+ RtemsSemReqMrspObtain_Post_Helping_Task },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Second,
+ RtemsSemReqMrspObtain_Post_Helping_Task },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_SecondIdle,
+ RtemsSemReqMrspObtain_Post_Helping_Task },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
+ RtemsSemReqMrspObtain_Post_Helping_Helping },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
+ RtemsSemReqMrspObtain_Post_Helping_HelpingIdle },
+ { 0, 0, 0, 0, 0, RtemsSemReqMrspObtain_Post_Home_Task,
+ RtemsSemReqMrspObtain_Post_Helping_Idle }
+};
+
+static const uint8_t
+RtemsSemReqMrspObtain_Map[] = {
+ 2, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 1, 3, 0, 1, 1, 1, 3,
+ 0, 1, 1, 1, 6, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 1, 1, 1, 3, 3,
+ 1, 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 1,
+ 1, 1, 3, 3, 1, 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 0, 2, 2, 2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11,
+ 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 1, 3, 0, 1, 1, 1, 3, 0,
+ 1, 1, 1, 2, 0, 1, 1, 1, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 3, 3, 1, 1, 1, 3, 3, 1,
+ 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 9, 1, 1, 1, 0, 9, 1, 1, 1, 3, 3, 1, 1,
+ 1, 3, 3, 1, 1, 1, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 10, 1, 1, 1, 0, 10, 1, 1,
+ 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 4, 0, 4, 4, 4, 0, 4, 4, 4, 4,
+ 0, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 0, 5, 5, 5, 0,
+ 5, 5, 5, 5, 0, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1,
+ 1, 1, 0, 0, 0, 0, 0, 0, 6, 1, 1, 1, 0, 6, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1,
+ 1, 0, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 0, 3, 1, 1, 1,
+ 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 0,
+ 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 2,
+ 2, 2, 2, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 2, 2,
+ 2, 2, 0, 2, 2, 2, 2, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 3, 3, 3, 3, 2, 0, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1,
+ 7, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 1, 1, 1, 7, 7, 1, 1, 1, 7,
+ 7, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2,
+ 1, 1, 1, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1,
+ 1, 1, 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1, 2, 0, 1, 1, 1, 8, 0,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 1, 1, 1, 8, 8, 1, 1, 1, 8, 8, 1,
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 1, 1, 2, 2, 1, 1,
+ 1, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 1,
+ 0, 2, 1, 1, 1, 0, 2, 1, 1, 1, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static size_t RtemsSemReqMrspObtain_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqMrspObtain_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqMrspObtain_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqMrspObtain_Fixture = {
+ .setup = RtemsSemReqMrspObtain_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqMrspObtain_Teardown_Wrap,
+ .scope = RtemsSemReqMrspObtain_Scope,
+ .initial_context = &RtemsSemReqMrspObtain_Instance
+};
+
+static inline RtemsSemReqMrspObtain_Entry RtemsSemReqMrspObtain_PopEntry(
+ RtemsSemReqMrspObtain_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqMrspObtain_Entries[
+ RtemsSemReqMrspObtain_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqMrspObtain_TestVariant(
+ RtemsSemReqMrspObtain_Context *ctx
+)
+{
+ RtemsSemReqMrspObtain_Pre_Home_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqMrspObtain_Pre_Helping_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqMrspObtain_Pre_PriorityHome_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqMrspObtain_Pre_PriorityHelping_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ RtemsSemReqMrspObtain_Action( ctx );
+ RtemsSemReqMrspObtain_Post_Home_Check( ctx, ctx->Map.entry.Post_Home );
+ RtemsSemReqMrspObtain_Post_Helping_Check( ctx, ctx->Map.entry.Post_Helping );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqMrspObtain( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqMrspObtain, &RtemsSemReqMrspObtain_Fixture )
+{
+ RtemsSemReqMrspObtain_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqMrspObtain_Pre_Home_Idle;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqMrspObtain_Pre_Home_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqMrspObtain_Pre_Helping_Idle;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqMrspObtain_Pre_Helping_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSemReqMrspObtain_Pre_PriorityHome_None;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqMrspObtain_Pre_PriorityHome_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = RtemsSemReqMrspObtain_Pre_PriorityHelping_None;
+ ctx->Map.pcs[ 3 ] < RtemsSemReqMrspObtain_Pre_PriorityHelping_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqMrspObtain_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqMrspObtain_Prepare( ctx );
+ RtemsSemReqMrspObtain_TestVariant( ctx );
+ RtemsSemReqMrspObtain_Cleanup( ctx );
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-obtain.c b/testsuites/validation/tc-sem-obtain.c
new file mode 100644
index 0000000000..b07421f186
--- /dev/null
+++ b/testsuites/validation/tc-sem-obtain.c
@@ -0,0 +1,728 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqObtain
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "tr-mtx-seize-try.h"
+#include "tr-mtx-seize-wait.h"
+#include "tr-sem-seize-try.h"
+#include "tr-sem-seize-wait.h"
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqObtain spec:/rtems/sem/req/obtain
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqObtain_Pre_Class_Counting,
+ RtemsSemReqObtain_Pre_Class_Simple,
+ RtemsSemReqObtain_Pre_Class_Binary,
+ RtemsSemReqObtain_Pre_Class_PrioCeiling,
+ RtemsSemReqObtain_Pre_Class_PrioInherit,
+ RtemsSemReqObtain_Pre_Class_MrsP,
+ RtemsSemReqObtain_Pre_Class_NA
+} RtemsSemReqObtain_Pre_Class;
+
+typedef enum {
+ RtemsSemReqObtain_Pre_Discipline_FIFO,
+ RtemsSemReqObtain_Pre_Discipline_Priority,
+ RtemsSemReqObtain_Pre_Discipline_NA
+} RtemsSemReqObtain_Pre_Discipline;
+
+typedef enum {
+ RtemsSemReqObtain_Pre_Id_Valid,
+ RtemsSemReqObtain_Pre_Id_Invalid,
+ RtemsSemReqObtain_Pre_Id_NA
+} RtemsSemReqObtain_Pre_Id;
+
+typedef enum {
+ RtemsSemReqObtain_Pre_Wait_No,
+ RtemsSemReqObtain_Pre_Wait_Timeout,
+ RtemsSemReqObtain_Pre_Wait_Forever,
+ RtemsSemReqObtain_Pre_Wait_NA
+} RtemsSemReqObtain_Pre_Wait;
+
+typedef enum {
+ RtemsSemReqObtain_Post_Action_InvId,
+ RtemsSemReqObtain_Post_Action_SemSeizeTry,
+ RtemsSemReqObtain_Post_Action_SemSeizeWait,
+ RtemsSemReqObtain_Post_Action_MtxSeizeTry,
+ RtemsSemReqObtain_Post_Action_MtxSeizeWait,
+ RtemsSemReqObtain_Post_Action_InheritMtxSeizeTry,
+ RtemsSemReqObtain_Post_Action_InheritMtxSeizeWait,
+ RtemsSemReqObtain_Post_Action_CeilingMtxSeizeTry,
+ RtemsSemReqObtain_Post_Action_CeilingMtxSeizeWait,
+ RtemsSemReqObtain_Post_Action_MrsPMtxSeizeTry,
+ RtemsSemReqObtain_Post_Action_MrsPMtxSeizeWait,
+ RtemsSemReqObtain_Post_Action_NA
+} RtemsSemReqObtain_Post_Action;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Class_NA : 1;
+ uint16_t Pre_Discipline_NA : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Pre_Wait_NA : 1;
+ uint16_t Post_Action : 4;
+} RtemsSemReqObtain_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/obtain test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the thread queue test context.
+ */
+ union {
+ TQContext tq_ctx;
+ TQMtxContext tq_mtx_ctx;
+ TQSemContext tq_sem_ctx;
+ };
+
+ /**
+ * @brief This member specifies if the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 4 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqObtain_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqObtain_Context;
+
+static RtemsSemReqObtain_Context
+ RtemsSemReqObtain_Instance;
+
+static const char * const RtemsSemReqObtain_PreDesc_Class[] = {
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeiling",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqObtain_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const RtemsSemReqObtain_PreDesc_Id[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSemReqObtain_PreDesc_Wait[] = {
+ "No",
+ "Timeout",
+ "Forever",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqObtain_PreDesc[] = {
+ RtemsSemReqObtain_PreDesc_Class,
+ RtemsSemReqObtain_PreDesc_Discipline,
+ RtemsSemReqObtain_PreDesc_Id,
+ RtemsSemReqObtain_PreDesc_Wait,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+typedef RtemsSemReqObtain_Context Context;
+
+static void RtemsSemReqObtain_Pre_Class_Prepare(
+ RtemsSemReqObtain_Context *ctx,
+ RtemsSemReqObtain_Pre_Class state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqObtain_Pre_Class_Counting: {
+ /*
+ * While the semaphore object is a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Class_Simple: {
+ /*
+ * While the semaphore object is a simple binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Class_Binary: {
+ /*
+ * While the semaphore object is a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Class_PrioCeiling: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Class_PrioInherit: {
+ /*
+ * While the semaphore object is a priority inheritance semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Class_MrsP: {
+ /*
+ * While the semaphore object is a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Class_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqObtain_Pre_Discipline_Prepare(
+ RtemsSemReqObtain_Context *ctx,
+ RtemsSemReqObtain_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqObtain_Pre_Discipline_FIFO: {
+ /*
+ * While the semaphore uses the FIFO task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Discipline_Priority: {
+ /*
+ * While the semaphore uses the priority task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqObtain_Pre_Id_Prepare(
+ RtemsSemReqObtain_Context *ctx,
+ RtemsSemReqObtain_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqObtain_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter is associated with the semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Id_Invalid: {
+ /*
+ * While the ``id`` parameter is not associated with a semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqObtain_Pre_Wait_Prepare(
+ RtemsSemReqObtain_Context *ctx,
+ RtemsSemReqObtain_Pre_Wait state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqObtain_Pre_Wait_No: {
+ /*
+ * While the ``option_set`` parameter indicates the RTEMS_NO_WAIT option.
+ */
+ ctx->tq_ctx.wait = TQ_NO_WAIT;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Wait_Timeout: {
+ /*
+ * While the ``option_set`` parameter indicates the RTEMS_WAIT option,
+ * while the ``timeout`` parameter is not equal to RTEMS_NO_TIMEOUT.
+ */
+ ctx->tq_ctx.wait = TQ_WAIT_TIMED;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Wait_Forever: {
+ /*
+ * While the ``option_set`` parameter indicates the RTEMS_WAIT option,
+ * while the ``timeout`` parameter is equal to RTEMS_NO_TIMEOUT.
+ */
+ ctx->tq_ctx.wait = TQ_WAIT_FOREVER;
+ break;
+ }
+
+ case RtemsSemReqObtain_Pre_Wait_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqObtain_Post_Action_Check(
+ RtemsSemReqObtain_Context *ctx,
+ RtemsSemReqObtain_Post_Action state
+)
+{
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case RtemsSemReqObtain_Post_Action_InvId: {
+ /*
+ * The return status of rtems_semaphore_obtain() shall be
+ * RTEMS_INVALID_ID.
+ */
+ sc = rtems_semaphore_obtain( 0xffffffff, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
+ T_rsc( sc, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_SemSeizeTry: {
+ /*
+ * The calling task shall try to seize the semaphore as specified by
+ * /score/sem/req/seize-try.
+ */
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ScoreSemReqSeizeTry_Run( &ctx->tq_sem_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_SemSeizeWait: {
+ /*
+ * The calling task shall wait to seize the semaphore as specified by
+ * /score/sem/req/seize-wait.
+ */
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ScoreSemReqSeizeWait_Run( &ctx->tq_sem_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_MtxSeizeTry: {
+ /*
+ * The calling task shall try to seize the mutex as specified by
+ * /score/mtx/req/seize-try where an enqueue blocks, a recursive seize is
+ * allowed, and no locking protocol is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_MtxSeizeWait: {
+ /*
+ * The calling task shall wait to seize the mutex as specified by
+ * /score/mtx/req/seize-wait where an enqueue blocks, a recursive seize
+ * is allowed, and no locking protocol is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_InheritMtxSeizeTry: {
+ /*
+ * The calling task shall try to seize the mutex as specified by
+ * /score/mtx/req/seize-try where an enqueue blocks, a recursive seize is
+ * allowed, and a priority inheritance protocol is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_InheritMtxSeizeWait: {
+ /*
+ * The calling task shall wait to seize the mutex as specified by
+ * /score/mtx/req/seize-wait where an enqueue blocks, a recursive seize
+ * is allowed, and a priority inheritance protocol is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_CeilingMtxSeizeTry: {
+ /*
+ * The calling task shall try to seize the mutex as specified by
+ * /score/mtx/req/seize-try where an enqueue blocks, a recursive seize is
+ * allowed, and a priority ceiling is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_CeilingMtxSeizeWait: {
+ /*
+ * The calling task shall wait to seize the mutex as specified by
+ * /score/mtx/req/seize-wait where an enqueue blocks, a recursive seize
+ * is allowed, and a priority ceiling is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_MrsPMtxSeizeTry: {
+ /*
+ * The calling task shall try to seize the mutex as specified by
+ * /score/mtx/req/seize-try where an enqueue is sticky, a recursive seize
+ * returns an error status, and a priority ceiling is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSeizeTry_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_MrsPMtxSeizeWait: {
+ /*
+ * The calling task shall wait to seize the mutex as specified by
+ * /score/mtx/req/seize-wait where an enqueue is sticky, a recursive
+ * seize returns an error status, and a priority ceiling is used.
+ */
+ ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSeizeWait_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqObtain_Post_Action_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqObtain_Setup( RtemsSemReqObtain_Context *ctx )
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
+ ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqObtain_Setup_Wrap( void *arg )
+{
+ RtemsSemReqObtain_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqObtain_Setup( ctx );
+}
+
+static void RtemsSemReqObtain_Teardown( RtemsSemReqObtain_Context *ctx )
+{
+ TQDestroy( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqObtain_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqObtain_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqObtain_Teardown( ctx );
+}
+
+static void RtemsSemReqObtain_Prepare( RtemsSemReqObtain_Context *ctx )
+{
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+}
+
+static void RtemsSemReqObtain_Action( RtemsSemReqObtain_Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ 1,
+ ctx->attribute_set,
+ PRIO_VERY_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+
+ if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+ rtems_task_priority prio;
+
+ sc = rtems_semaphore_set_priority(
+ ctx->tq_ctx.thread_queue_id,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH,
+ &prio
+ );
+ T_rsc_success( sc );
+ }
+}
+
+static void RtemsSemReqObtain_Cleanup( RtemsSemReqObtain_Context *ctx )
+{
+ rtems_status_code sc;
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
+}
+
+static const RtemsSemReqObtain_Entry
+RtemsSemReqObtain_Entries[] = {
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InvId },
+ { 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA },
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_SemSeizeWait },
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_SemSeizeTry },
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MtxSeizeWait },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InvId },
+#else
+ { 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA },
+#endif
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MtxSeizeTry },
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_CeilingMtxSeizeWait },
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InheritMtxSeizeWait },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MrsPMtxSeizeWait },
+#else
+ { 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA },
+#endif
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_CeilingMtxSeizeTry },
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_InheritMtxSeizeTry },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_MrsPMtxSeizeTry }
+#else
+ { 1, 0, 0, 0, 0, RtemsSemReqObtain_Post_Action_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqObtain_Map[] = {
+ 3, 2, 2, 0, 0, 0, 3, 2, 2, 0, 0, 0, 3, 2, 2, 0, 0, 0, 3, 2, 2, 0, 0, 0, 6, 4,
+ 4, 0, 0, 0, 6, 4, 4, 0, 0, 0, 1, 1, 1, 1, 1, 1, 10, 7, 7, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 11, 8, 8, 0, 0, 0, 1, 1, 1, 1, 1, 1, 12, 9, 9, 5, 5, 5
+};
+
+static size_t RtemsSemReqObtain_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqObtain_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqObtain_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqObtain_Fixture = {
+ .setup = RtemsSemReqObtain_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqObtain_Teardown_Wrap,
+ .scope = RtemsSemReqObtain_Scope,
+ .initial_context = &RtemsSemReqObtain_Instance
+};
+
+static inline RtemsSemReqObtain_Entry RtemsSemReqObtain_PopEntry(
+ RtemsSemReqObtain_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqObtain_Entries[
+ RtemsSemReqObtain_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqObtain_TestVariant( RtemsSemReqObtain_Context *ctx )
+{
+ RtemsSemReqObtain_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqObtain_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqObtain_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqObtain_Pre_Wait_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ RtemsSemReqObtain_Action( ctx );
+ RtemsSemReqObtain_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqObtain( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqObtain, &RtemsSemReqObtain_Fixture )
+{
+ RtemsSemReqObtain_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqObtain_Pre_Class_Counting;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqObtain_Pre_Class_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqObtain_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqObtain_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSemReqObtain_Pre_Id_Valid;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqObtain_Pre_Id_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = RtemsSemReqObtain_Pre_Wait_No;
+ ctx->Map.pcs[ 3 ] < RtemsSemReqObtain_Pre_Wait_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqObtain_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqObtain_Prepare( ctx );
+ RtemsSemReqObtain_TestVariant( ctx );
+ RtemsSemReqObtain_Cleanup( ctx );
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-performance.c b/testsuites/validation/tc-sem-performance.c
new file mode 100644
index 0000000000..5bbff6a587
--- /dev/null
+++ b/testsuites/validation/tc-sem-performance.c
@@ -0,0 +1,890 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemValPerf
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemValPerf spec:/rtems/sem/val/perf
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesPerformanceNoClock0
+ *
+ * @brief This test case provides a context to run @ref RTEMSAPIClassicSem
+ * performance tests.
+ *
+ * @{
+ */
+
+/**
+ * @brief Test context for spec:/rtems/sem/val/perf test case.
+ */
+typedef struct {
+ /**
+ * @brief This member provides a mutex identifier.
+ */
+ rtems_id mutex_id;
+
+ /**
+ * @brief This member provides a worker identifier.
+ */
+ rtems_id worker_id;
+
+ /**
+ * @brief This member provides a status code.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member references the measure runtime context.
+ */
+ T_measure_runtime_context *context;
+
+ /**
+ * @brief This member provides the measure runtime request.
+ */
+ T_measure_runtime_request request;
+
+ /**
+ * @brief This member provides an optional measurement begin time point.
+ */
+ T_ticks begin;
+
+ /**
+ * @brief This member provides an optional measurement end time point.
+ */
+ T_ticks end;
+} RtemsSemValPerf_Context;
+
+static RtemsSemValPerf_Context
+ RtemsSemValPerf_Instance;
+
+typedef RtemsSemValPerf_Context Context;
+
+typedef enum {
+ EVENT_END = RTEMS_EVENT_0,
+ EVENT_OBTAIN = RTEMS_EVENT_1,
+ EVENT_OBTAIN_END = RTEMS_EVENT_2,
+ EVENT_RELEASE = RTEMS_EVENT_3,
+ EVENT_RELEASE_END = RTEMS_EVENT_4
+} Event;
+
+static void Send( const Context *ctx, rtems_event_set events )
+{
+ SendEvents( ctx->worker_id, events );
+}
+
+static void Worker( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+ rtems_status_code sc;
+ T_ticks ticks;
+
+ sc = rtems_event_receive(
+ RTEMS_ALL_EVENTS,
+ RTEMS_EVENT_ANY | RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT,
+ &events
+ );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+
+ if ( ( events & EVENT_OBTAIN ) != 0 ) {
+ sc = rtems_semaphore_obtain(
+ ctx->mutex_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_OBTAIN_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+ }
+
+ if ( ( events & EVENT_RELEASE ) != 0 ) {
+ sc = rtems_semaphore_release( ctx->mutex_id );
+ ticks = T_tick();
+ T_quiet_rsc_success( sc );
+
+ if ( ( events & EVENT_RELEASE_END ) != 0 ) {
+ ctx->end = ticks;
+ }
+ }
+ }
+}
+
+static void RtemsSemValPerf_Setup_Context( RtemsSemValPerf_Context *ctx )
+{
+ T_measure_runtime_config config;
+
+ memset( &config, 0, sizeof( config ) );
+ config.sample_count = 100;
+ ctx->request.arg = ctx;
+ ctx->request.flags = T_MEASURE_RUNTIME_REPORT_SAMPLES;
+ ctx->context = T_measure_runtime_create( &config );
+ T_assert_not_null( ctx->context );
+}
+
+/**
+ * @brief Create a mutex and a worker task.
+ */
+static void RtemsSemValPerf_Setup( RtemsSemValPerf_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+ ctx->mutex_id = CreateMutex();
+ ctx->worker_id = CreateTask( "WORK", PRIO_HIGH );
+ StartTask( ctx->worker_id, Worker, ctx );
+}
+
+static void RtemsSemValPerf_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemValPerf_Setup_Context( ctx );
+ RtemsSemValPerf_Setup( ctx );
+}
+
+/**
+ * @brief Delete the worker task and the mutex.
+ */
+static void RtemsSemValPerf_Teardown( RtemsSemValPerf_Context *ctx )
+{
+ DeleteTask( ctx->worker_id );
+ DeleteMutex( ctx->mutex_id );
+ RestoreRunnerPriority();
+}
+
+static void RtemsSemValPerf_Teardown_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemValPerf_Teardown( ctx );
+}
+
+static T_fixture RtemsSemValPerf_Fixture = {
+ .setup = RtemsSemValPerf_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemValPerf_Teardown_Wrap,
+ .scope = NULL,
+ .initial_context = &RtemsSemValPerf_Instance
+};
+
+/**
+ * @brief Obtain the available mutex.
+ */
+static void RtemsSemReqPerfMtxPiObtain_Body( RtemsSemValPerf_Context *ctx )
+{
+ ctx->status = rtems_semaphore_obtain(
+ ctx->mutex_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+}
+
+static void RtemsSemReqPerfMtxPiObtain_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiObtain_Body( ctx );
+}
+
+/**
+ * @brief Release the mutex. Discard samples interrupted by a clock tick.
+ */
+static bool RtemsSemReqPerfMtxPiObtain_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ ReleaseMutex( ctx->mutex_id );
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiObtain_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiObtain_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Obtain the mutex.
+ */
+static void RtemsSemReqPerfMtxPiRelease_Setup( RtemsSemValPerf_Context *ctx )
+{
+ ObtainMutex( ctx->mutex_id );
+}
+
+static void RtemsSemReqPerfMtxPiRelease_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiRelease_Setup( ctx );
+}
+
+/**
+ * @brief Release the mutex.
+ */
+static void RtemsSemReqPerfMtxPiRelease_Body( RtemsSemValPerf_Context *ctx )
+{
+ ctx->status = rtems_semaphore_release( ctx->mutex_id );
+}
+
+static void RtemsSemReqPerfMtxPiRelease_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiRelease_Body( ctx );
+}
+
+/**
+ * @brief Discard samples interrupted by a clock tick.
+ */
+static bool RtemsSemReqPerfMtxPiRelease_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc_success( ctx->status );
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiRelease_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiRelease_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Let one task wait on the mutex.
+ */
+static void RtemsSemReqPerfMtxPiReleaseOne_Setup(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ ObtainMutex( ctx->mutex_id );
+ Send( ctx, EVENT_OBTAIN );
+ SetPriority( ctx->worker_id, PRIO_LOW );
+ Send( ctx, EVENT_RELEASE );
+}
+
+static void RtemsSemReqPerfMtxPiReleaseOne_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiReleaseOne_Setup( ctx );
+}
+
+/**
+ * @brief Release the mutex.
+ */
+static void RtemsSemReqPerfMtxPiReleaseOne_Body( RtemsSemValPerf_Context *ctx )
+{
+ ctx->status = rtems_semaphore_release( ctx->mutex_id );
+}
+
+static void RtemsSemReqPerfMtxPiReleaseOne_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiReleaseOne_Body( ctx );
+}
+
+/**
+ * @brief Restore the worker priority. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsSemReqPerfMtxPiReleaseOne_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
+
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiReleaseOne_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiReleaseOne_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+#if defined(RTEMS_SMP)
+/**
+ * @brief Move worker to scheduler B.
+ */
+static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Prepare(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ SetScheduler( ctx->worker_id, SCHEDULER_B_ID, PRIO_NORMAL );
+}
+
+/**
+ * @brief Let one task wait on the mutex.
+ */
+static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ ObtainMutex( ctx->mutex_id );
+ Send( ctx, EVENT_OBTAIN | EVENT_OBTAIN_END | EVENT_RELEASE );
+ WaitForNextTask( 1, ctx->worker_id );
+}
+
+static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup( ctx );
+}
+
+/**
+ * @brief Release the mutex.
+ */
+static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Body(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ ctx->begin = T_tick();
+ ctx->status = rtems_semaphore_release( ctx->mutex_id );
+}
+
+static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiReleaseOtherCpu_Body( ctx );
+}
+
+/**
+ * @brief Make sure the worker waits for the next event. Set the measured
+ * runtime. Discard samples interrupted by a clock tick.
+ */
+static bool RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
+
+ WaitForNextTask( 1, ctx->worker_id );
+ *delta = ctx->end - ctx->begin;
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Move worker to scheduler A.
+ */
+static void RtemsSemReqPerfMtxPiReleaseOtherCpu_Cleanup(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ SetScheduler( ctx->worker_id, SCHEDULER_A_ID, PRIO_HIGH );
+}
+#endif
+
+/**
+ * @brief Let one task wait on the mutex.
+ */
+static void RtemsSemReqPerfMtxPiReleasePreempt_Setup(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ ObtainMutex( ctx->mutex_id );
+ Send( ctx, EVENT_OBTAIN | EVENT_OBTAIN_END | EVENT_RELEASE );
+}
+
+static void RtemsSemReqPerfMtxPiReleasePreempt_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiReleasePreempt_Setup( ctx );
+}
+
+/**
+ * @brief Release the mutex.
+ */
+static void RtemsSemReqPerfMtxPiReleasePreempt_Body(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ ctx->begin = T_tick();
+ ctx->status = rtems_semaphore_release( ctx->mutex_id );
+}
+
+static void RtemsSemReqPerfMtxPiReleasePreempt_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiReleasePreempt_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Discard samples interrupted by a clock
+ * tick.
+ */
+static bool RtemsSemReqPerfMtxPiReleasePreempt_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
+
+ *delta = ctx->end - ctx->begin;
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiReleasePreempt_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiReleasePreempt_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Make the mutex unavailable.
+ */
+static void RtemsSemReqPerfMtxPiTry_Prepare( RtemsSemValPerf_Context *ctx )
+{
+ Send( ctx, EVENT_OBTAIN );
+}
+
+/**
+ * @brief Try to obtain the unavailable mutex.
+ */
+static void RtemsSemReqPerfMtxPiTry_Body( RtemsSemValPerf_Context *ctx )
+{
+ ctx->status = rtems_semaphore_obtain( ctx->mutex_id, RTEMS_NO_WAIT, 0 );
+}
+
+static void RtemsSemReqPerfMtxPiTry_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiTry_Body( ctx );
+}
+
+/**
+ * @brief Discard samples interrupted by a clock tick.
+ */
+static bool RtemsSemReqPerfMtxPiTry_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_UNSATISFIED );
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiTry_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiTry_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @brief Make the mutex available.
+ */
+static void RtemsSemReqPerfMtxPiTry_Cleanup( RtemsSemValPerf_Context *ctx )
+{
+ Send( ctx, EVENT_RELEASE );
+}
+
+/**
+ * @brief Make the mutex unavailable.
+ */
+static void RtemsSemReqPerfMtxPiWaitForever_Setup(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ Send( ctx, EVENT_OBTAIN );
+ SetPriority( ctx->worker_id, PRIO_LOW );
+ Send( ctx, EVENT_END | EVENT_RELEASE );
+}
+
+static void RtemsSemReqPerfMtxPiWaitForever_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiWaitForever_Setup( ctx );
+}
+
+/**
+ * @brief Obtain the unavailable mutex and wait forever.
+ */
+static void RtemsSemReqPerfMtxPiWaitForever_Body(
+ RtemsSemValPerf_Context *ctx
+)
+{
+ ctx->begin = T_tick();
+ ctx->status = rtems_semaphore_obtain(
+ ctx->mutex_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+}
+
+static void RtemsSemReqPerfMtxPiWaitForever_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiWaitForever_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Restore the worker priority. Release the
+ * mutex. Discard samples interrupted by a clock tick.
+ */
+static bool RtemsSemReqPerfMtxPiWaitForever_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
+
+ *delta = ctx->end - ctx->begin;
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+ ReleaseMutex( ctx->mutex_id );
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiWaitForever_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiWaitForever_Teardown(
+ ctx,
+ delta,
+ tic,
+ toc,
+ retry
+ );
+}
+
+/**
+ * @brief Make the mutex unavailable.
+ */
+static void RtemsSemReqPerfMtxPiWaitTimed_Setup( RtemsSemValPerf_Context *ctx )
+{
+ Send( ctx, EVENT_OBTAIN );
+ SetPriority( ctx->worker_id, PRIO_LOW );
+ Send( ctx, EVENT_END | EVENT_RELEASE );
+}
+
+static void RtemsSemReqPerfMtxPiWaitTimed_Setup_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiWaitTimed_Setup( ctx );
+}
+
+/**
+ * @brief Obtain the unavailable mutex and wait forever.
+ */
+static void RtemsSemReqPerfMtxPiWaitTimed_Body( RtemsSemValPerf_Context *ctx )
+{
+ ctx->begin = T_tick();
+ ctx->status = rtems_semaphore_obtain(
+ ctx->mutex_id,
+ RTEMS_WAIT,
+ UINT32_MAX
+ );
+}
+
+static void RtemsSemReqPerfMtxPiWaitTimed_Body_Wrap( void *arg )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ RtemsSemReqPerfMtxPiWaitTimed_Body( ctx );
+}
+
+/**
+ * @brief Set the measured runtime. Restore the worker priority. Release the
+ * mutex. Discard samples interrupted by a clock tick.
+ */
+static bool RtemsSemReqPerfMtxPiWaitTimed_Teardown(
+ RtemsSemValPerf_Context *ctx,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ T_quiet_rsc( ctx->status, RTEMS_SUCCESSFUL );
+
+ *delta = ctx->end - ctx->begin;
+ SetPriority( ctx->worker_id, PRIO_HIGH );
+ ReleaseMutex( ctx->mutex_id );
+
+ return tic == toc;
+}
+
+static bool RtemsSemReqPerfMtxPiWaitTimed_Teardown_Wrap(
+ void *arg,
+ T_ticks *delta,
+ uint32_t tic,
+ uint32_t toc,
+ unsigned int retry
+)
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = arg;
+ return RtemsSemReqPerfMtxPiWaitTimed_Teardown( ctx, delta, tic, toc, retry );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemValPerf( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemValPerf, &RtemsSemValPerf_Fixture )
+{
+ RtemsSemValPerf_Context *ctx;
+
+ ctx = T_fixture_context();
+
+ ctx->request.name = "RtemsSemReqPerfMtxPiObtain";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsSemReqPerfMtxPiObtain_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiObtain_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsSemReqPerfMtxPiRelease";
+ ctx->request.setup = RtemsSemReqPerfMtxPiRelease_Setup_Wrap;
+ ctx->request.body = RtemsSemReqPerfMtxPiRelease_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiRelease_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsSemReqPerfMtxPiReleaseOne";
+ ctx->request.setup = RtemsSemReqPerfMtxPiReleaseOne_Setup_Wrap;
+ ctx->request.body = RtemsSemReqPerfMtxPiReleaseOne_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiReleaseOne_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ #if defined(RTEMS_SMP)
+ RtemsSemReqPerfMtxPiReleaseOtherCpu_Prepare( ctx );
+ ctx->request.name = "RtemsSemReqPerfMtxPiReleaseOtherCpu";
+ ctx->request.setup = RtemsSemReqPerfMtxPiReleaseOtherCpu_Setup_Wrap;
+ ctx->request.body = RtemsSemReqPerfMtxPiReleaseOtherCpu_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiReleaseOtherCpu_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+ RtemsSemReqPerfMtxPiReleaseOtherCpu_Cleanup( ctx );
+ #endif
+
+ ctx->request.name = "RtemsSemReqPerfMtxPiReleasePreempt";
+ ctx->request.setup = RtemsSemReqPerfMtxPiReleasePreempt_Setup_Wrap;
+ ctx->request.body = RtemsSemReqPerfMtxPiReleasePreempt_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiReleasePreempt_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ RtemsSemReqPerfMtxPiTry_Prepare( ctx );
+ ctx->request.name = "RtemsSemReqPerfMtxPiTry";
+ ctx->request.setup = NULL;
+ ctx->request.body = RtemsSemReqPerfMtxPiTry_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiTry_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+ RtemsSemReqPerfMtxPiTry_Cleanup( ctx );
+
+ ctx->request.name = "RtemsSemReqPerfMtxPiWaitForever";
+ ctx->request.setup = RtemsSemReqPerfMtxPiWaitForever_Setup_Wrap;
+ ctx->request.body = RtemsSemReqPerfMtxPiWaitForever_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiWaitForever_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+
+ ctx->request.name = "RtemsSemReqPerfMtxPiWaitTimed";
+ ctx->request.setup = RtemsSemReqPerfMtxPiWaitTimed_Setup_Wrap;
+ ctx->request.body = RtemsSemReqPerfMtxPiWaitTimed_Body_Wrap;
+ ctx->request.teardown = RtemsSemReqPerfMtxPiWaitTimed_Teardown_Wrap;
+ T_measure_runtime( ctx->context, &ctx->request );
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-release.c b/testsuites/validation/tc-sem-release.c
new file mode 100644
index 0000000000..cfb5315a2a
--- /dev/null
+++ b/testsuites/validation/tc-sem-release.c
@@ -0,0 +1,613 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqRelease
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "tr-mtx-surrender.h"
+#include "tr-sem-surrender.h"
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqRelease spec:/rtems/sem/req/release
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqRelease_Pre_Class_Counting,
+ RtemsSemReqRelease_Pre_Class_Simple,
+ RtemsSemReqRelease_Pre_Class_Binary,
+ RtemsSemReqRelease_Pre_Class_PrioCeiling,
+ RtemsSemReqRelease_Pre_Class_PrioInherit,
+ RtemsSemReqRelease_Pre_Class_MrsP,
+ RtemsSemReqRelease_Pre_Class_NA
+} RtemsSemReqRelease_Pre_Class;
+
+typedef enum {
+ RtemsSemReqRelease_Pre_Discipline_FIFO,
+ RtemsSemReqRelease_Pre_Discipline_Priority,
+ RtemsSemReqRelease_Pre_Discipline_NA
+} RtemsSemReqRelease_Pre_Discipline;
+
+typedef enum {
+ RtemsSemReqRelease_Pre_Id_Valid,
+ RtemsSemReqRelease_Pre_Id_Invalid,
+ RtemsSemReqRelease_Pre_Id_NA
+} RtemsSemReqRelease_Pre_Id;
+
+typedef enum {
+ RtemsSemReqRelease_Post_Action_InvId,
+ RtemsSemReqRelease_Post_Action_BinarySurrender,
+ RtemsSemReqRelease_Post_Action_CountingSurrender,
+ RtemsSemReqRelease_Post_Action_MtxSurrender,
+ RtemsSemReqRelease_Post_Action_InheritMtxSurrender,
+ RtemsSemReqRelease_Post_Action_CeilingMtxSurrender,
+ RtemsSemReqRelease_Post_Action_MrsPMtxSurrender,
+ RtemsSemReqRelease_Post_Action_NA
+} RtemsSemReqRelease_Post_Action;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Class_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Pre_Id_NA : 1;
+ uint8_t Post_Action : 3;
+} RtemsSemReqRelease_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/release test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the thread queue test context.
+ */
+ union {
+ TQContext tq_ctx;
+ TQMtxContext tq_mtx_ctx;
+ TQSemContext tq_sem_ctx;
+ };
+
+ /**
+ * @brief This member specifies if the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqRelease_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqRelease_Context;
+
+static RtemsSemReqRelease_Context
+ RtemsSemReqRelease_Instance;
+
+static const char * const RtemsSemReqRelease_PreDesc_Class[] = {
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeiling",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqRelease_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const RtemsSemReqRelease_PreDesc_Id[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqRelease_PreDesc[] = {
+ RtemsSemReqRelease_PreDesc_Class,
+ RtemsSemReqRelease_PreDesc_Discipline,
+ RtemsSemReqRelease_PreDesc_Id,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+typedef RtemsSemReqRelease_Context Context;
+
+static void RtemsSemReqRelease_Pre_Class_Prepare(
+ RtemsSemReqRelease_Context *ctx,
+ RtemsSemReqRelease_Pre_Class state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqRelease_Pre_Class_Counting: {
+ /*
+ * While the semaphore object is a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Class_Simple: {
+ /*
+ * While the semaphore object is a simple binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Class_Binary: {
+ /*
+ * While the semaphore object is a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Class_PrioCeiling: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Class_PrioInherit: {
+ /*
+ * While the semaphore object is a priority inheritance semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Class_MrsP: {
+ /*
+ * While the semaphore object is a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Class_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqRelease_Pre_Discipline_Prepare(
+ RtemsSemReqRelease_Context *ctx,
+ RtemsSemReqRelease_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqRelease_Pre_Discipline_FIFO: {
+ /*
+ * While the semaphore uses the FIFO task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Discipline_Priority: {
+ /*
+ * While the semaphore uses the priority task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqRelease_Pre_Id_Prepare(
+ RtemsSemReqRelease_Context *ctx,
+ RtemsSemReqRelease_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqRelease_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter is associated with the semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Id_Invalid: {
+ /*
+ * While the ``id`` parameter is not associated with a semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqRelease_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqRelease_Post_Action_Check(
+ RtemsSemReqRelease_Context *ctx,
+ RtemsSemReqRelease_Post_Action state
+)
+{
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case RtemsSemReqRelease_Post_Action_InvId: {
+ /*
+ * The return status of rtems_semaphore_release() shall be
+ * RTEMS_INVALID_ID.
+ */
+ sc = rtems_semaphore_release( 0xffffffff );
+ T_rsc( sc, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_BinarySurrender: {
+ /*
+ * The calling task shall surrender the binary semaphore as specified by
+ * /score/sem/req/surrender.
+ */
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = NULL;
+ ctx->tq_sem_ctx.variant = TQ_SEM_BINARY;
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ScoreSemReqSurrender_Run( &ctx->tq_sem_ctx );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_CountingSurrender: {
+ /*
+ * The calling task shall surrender the counting semaphore as specified
+ * by /score/sem/req/surrender.
+ */
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = NULL;
+ ctx->tq_sem_ctx.variant = TQ_SEM_COUNTING;
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ScoreSemReqSurrender_Run( &ctx->tq_sem_ctx );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_MtxSurrender: {
+ /*
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
+ * allowed, the owner is checked, and no locking protocol is used.
+ */
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_InheritMtxSurrender: {
+ /*
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
+ * allowed, the owner is checked, and a priority inheritance protocol is
+ * used.
+ */
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_INHERIT;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_CeilingMtxSurrender: {
+ /*
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
+ * allowed, the owner is checked, and a priority ceiling is used.
+ */
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_MrsPMtxSurrender: {
+ /*
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue is sticky, a recursive seize
+ * returns an error status, the owner is checked, and a priority ceiling
+ * is used.
+ */
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_STICKY;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
+ break;
+ }
+
+ case RtemsSemReqRelease_Post_Action_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqRelease_Setup( RtemsSemReqRelease_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
+ ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqRelease_Setup_Wrap( void *arg )
+{
+ RtemsSemReqRelease_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqRelease_Setup( ctx );
+}
+
+static void RtemsSemReqRelease_Teardown( RtemsSemReqRelease_Context *ctx )
+{
+ TQDestroy( &ctx->tq_ctx );
+ RestoreRunnerPriority();
+}
+
+static void RtemsSemReqRelease_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqRelease_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqRelease_Teardown( ctx );
+}
+
+static void RtemsSemReqRelease_Prepare( RtemsSemReqRelease_Context *ctx )
+{
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+}
+
+static void RtemsSemReqRelease_Action( RtemsSemReqRelease_Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ 1,
+ ctx->attribute_set,
+ PRIO_VERY_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+
+ if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+ rtems_task_priority prio;
+
+ sc = rtems_semaphore_set_priority(
+ ctx->tq_ctx.thread_queue_id,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH,
+ &prio
+ );
+ T_rsc_success( sc );
+ }
+}
+
+static void RtemsSemReqRelease_Cleanup( RtemsSemReqRelease_Context *ctx )
+{
+ rtems_status_code sc;
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
+}
+
+static const RtemsSemReqRelease_Entry
+RtemsSemReqRelease_Entries[] = {
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InvId },
+ { 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_CountingSurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_BinarySurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_MtxSurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_CeilingMtxSurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InheritMtxSurrender },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_MrsPMtxSurrender },
+#else
+ { 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InvId }
+#else
+ { 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqRelease_Map[] = {
+ 2, 0, 2, 0, 3, 0, 3, 0, 4, 0, 4, 0, 1, 1, 5, 0, 1, 1, 6, 0, 1, 1, 7, 8
+};
+
+static size_t RtemsSemReqRelease_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqRelease_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqRelease_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqRelease_Fixture = {
+ .setup = RtemsSemReqRelease_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqRelease_Teardown_Wrap,
+ .scope = RtemsSemReqRelease_Scope,
+ .initial_context = &RtemsSemReqRelease_Instance
+};
+
+static inline RtemsSemReqRelease_Entry RtemsSemReqRelease_PopEntry(
+ RtemsSemReqRelease_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqRelease_Entries[
+ RtemsSemReqRelease_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqRelease_TestVariant( RtemsSemReqRelease_Context *ctx )
+{
+ RtemsSemReqRelease_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqRelease_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqRelease_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqRelease_Action( ctx );
+ RtemsSemReqRelease_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqRelease( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqRelease, &RtemsSemReqRelease_Fixture )
+{
+ RtemsSemReqRelease_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqRelease_Pre_Class_Counting;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqRelease_Pre_Class_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqRelease_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqRelease_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSemReqRelease_Pre_Id_Valid;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqRelease_Pre_Id_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqRelease_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqRelease_Prepare( ctx );
+ RtemsSemReqRelease_TestVariant( ctx );
+ RtemsSemReqRelease_Cleanup( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-set-priority.c b/testsuites/validation/tc-sem-set-priority.c
new file mode 100644
index 0000000000..d4418ad4de
--- /dev/null
+++ b/testsuites/validation/tc-sem-set-priority.c
@@ -0,0 +1,1137 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqSetPriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqSetPriority \
+ * spec:/rtems/sem/req/set-priority
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqSetPriority_Pre_Class_Counting,
+ RtemsSemReqSetPriority_Pre_Class_Simple,
+ RtemsSemReqSetPriority_Pre_Class_Binary,
+ RtemsSemReqSetPriority_Pre_Class_PrioCeilingNoOwner,
+ RtemsSemReqSetPriority_Pre_Class_PrioCeilingOwner,
+ RtemsSemReqSetPriority_Pre_Class_PrioInherit,
+ RtemsSemReqSetPriority_Pre_Class_MrsP,
+ RtemsSemReqSetPriority_Pre_Class_NA
+} RtemsSemReqSetPriority_Pre_Class;
+
+typedef enum {
+ RtemsSemReqSetPriority_Pre_SemId_Valid,
+ RtemsSemReqSetPriority_Pre_SemId_Invalid,
+ RtemsSemReqSetPriority_Pre_SemId_NA
+} RtemsSemReqSetPriority_Pre_SemId;
+
+typedef enum {
+ RtemsSemReqSetPriority_Pre_SchedId_Invalid,
+ RtemsSemReqSetPriority_Pre_SchedId_Create,
+ RtemsSemReqSetPriority_Pre_SchedId_Other,
+ RtemsSemReqSetPriority_Pre_SchedId_NA
+} RtemsSemReqSetPriority_Pre_SchedId;
+
+typedef enum {
+ RtemsSemReqSetPriority_Pre_NewPrio_Current,
+ RtemsSemReqSetPriority_Pre_NewPrio_Valid,
+ RtemsSemReqSetPriority_Pre_NewPrio_Invalid,
+ RtemsSemReqSetPriority_Pre_NewPrio_NA
+} RtemsSemReqSetPriority_Pre_NewPrio;
+
+typedef enum {
+ RtemsSemReqSetPriority_Pre_OldPrio_Valid,
+ RtemsSemReqSetPriority_Pre_OldPrio_Null,
+ RtemsSemReqSetPriority_Pre_OldPrio_NA
+} RtemsSemReqSetPriority_Pre_OldPrio;
+
+typedef enum {
+ RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_Status_InvAddr,
+ RtemsSemReqSetPriority_Post_Status_InvId,
+ RtemsSemReqSetPriority_Post_Status_InvPrio,
+ RtemsSemReqSetPriority_Post_Status_NotDef,
+ RtemsSemReqSetPriority_Post_Status_NA
+} RtemsSemReqSetPriority_Post_Status;
+
+typedef enum {
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_OwnerPrio_New,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA
+} RtemsSemReqSetPriority_Post_OwnerPrio;
+
+typedef enum {
+ RtemsSemReqSetPriority_Post_SemPrio_Set,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_NA
+} RtemsSemReqSetPriority_Post_SemPrio;
+
+typedef enum {
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA
+} RtemsSemReqSetPriority_Post_OldPrioVar;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Class_NA : 1;
+ uint16_t Pre_SemId_NA : 1;
+ uint16_t Pre_SchedId_NA : 1;
+ uint16_t Pre_NewPrio_NA : 1;
+ uint16_t Pre_OldPrio_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_OwnerPrio : 2;
+ uint16_t Post_SemPrio : 2;
+ uint16_t Post_OldPrioVar : 2;
+} RtemsSemReqSetPriority_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/set-priority test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the scheduler identifier of the runner task.
+ */
+ rtems_id runner_scheduler_id;
+
+ /**
+ * @brief This member contains the scheduler identifier of a scheduler not
+ * used by the runner task.
+ */
+ rtems_id other_scheduler_id;
+
+ /**
+ * @brief This member specifies the initial count of the semaphore.
+ */
+ uint32_t count;
+
+ /**
+ * @brief This member specifies the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ /**
+ * @brief This member contains the semaphore identifier.
+ */
+ rtems_id the_semaphore_id;
+
+ /**
+ * @brief If this member is true, then the ``semaphore_id`` parameter value
+ * shall be associated with the semaphore, otherwise it shall be not
+ * associated with a semaphore.
+ */
+ bool valid_id;
+
+ /**
+ * @brief This member may contain the task priority returned by
+ * rtems_semaphore_set_priority().
+ */
+ rtems_task_priority old_priority_value;
+
+ /**
+ * @brief This member specifies the ``semaphore_id`` parameter for the
+ * rtems_semaphore_set_priority() call.
+ */
+ rtems_id semaphore_id;
+
+ /**
+ * @brief This member specifies the ``scheduler_id`` parameter for the
+ * rtems_semaphore_set_priority() call.
+ */
+ rtems_id scheduler_id;
+
+ /**
+ * @brief This member specifies the ``new_priority`` parameter for the
+ * rtems_semaphore_set_priority() call.
+ */
+ rtems_task_priority new_priority;
+
+ /**
+ * @brief This member specifies the ``old_priority`` parameter for the
+ * rtems_semaphore_set_priority() call.
+ */
+ rtems_task_priority *old_priority;
+
+ /**
+ * @brief This member contains the status of the
+ * rtems_semaphore_set_priority() call.
+ */
+ rtems_status_code status;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 5 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqSetPriority_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqSetPriority_Context;
+
+static RtemsSemReqSetPriority_Context
+ RtemsSemReqSetPriority_Instance;
+
+static const char * const RtemsSemReqSetPriority_PreDesc_Class[] = {
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeilingNoOwner",
+ "PrioCeilingOwner",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqSetPriority_PreDesc_SemId[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSemReqSetPriority_PreDesc_SchedId[] = {
+ "Invalid",
+ "Create",
+ "Other",
+ "NA"
+};
+
+static const char * const RtemsSemReqSetPriority_PreDesc_NewPrio[] = {
+ "Current",
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSemReqSetPriority_PreDesc_OldPrio[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqSetPriority_PreDesc[] = {
+ RtemsSemReqSetPriority_PreDesc_Class,
+ RtemsSemReqSetPriority_PreDesc_SemId,
+ RtemsSemReqSetPriority_PreDesc_SchedId,
+ RtemsSemReqSetPriority_PreDesc_NewPrio,
+ RtemsSemReqSetPriority_PreDesc_OldPrio,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+typedef RtemsSemReqSetPriority_Context Context;
+
+static void ReleaseSemaphore( const Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release( ctx->the_semaphore_id );
+ T_rsc_success( sc );
+}
+
+static void ChangeScheduler( rtems_id scheduler_id )
+{
+#if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ sc = rtems_task_set_scheduler( RTEMS_SELF, scheduler_id, PRIO_NORMAL );
+ T_rsc_success( sc );
+#else
+ (void) scheduler_id;
+#endif
+}
+
+static void CheckPriority(
+ const Context *ctx,
+ rtems_id scheduler_id,
+ rtems_task_priority priority
+)
+{
+ rtems_status_code sc;
+
+ ChangeScheduler( scheduler_id );
+
+ sc = rtems_semaphore_obtain(
+ ctx->the_semaphore_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc_success( sc );
+
+ T_eq_u32( GetSelfPriority(), priority );
+
+ ReleaseSemaphore( ctx );
+ ChangeScheduler( ctx->runner_scheduler_id );
+}
+
+static void CheckNotDefined(
+ const Context *ctx,
+ rtems_id scheduler_id
+)
+{
+#if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ ChangeScheduler( scheduler_id );
+
+ sc = rtems_semaphore_obtain(
+ ctx->the_semaphore_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+
+ ChangeScheduler( ctx->runner_scheduler_id );
+#else
+ (void) ctx;
+ (void) scheduler_id;
+#endif
+}
+
+static void RtemsSemReqSetPriority_Pre_Class_Prepare(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Pre_Class state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Pre_Class_Counting: {
+ /*
+ * While the semaphore object is a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_Simple: {
+ /*
+ * While the semaphore object is a simple binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_Binary: {
+ /*
+ * While the semaphore object is a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_PrioCeilingNoOwner: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore, while the
+ * semaphore has no owner.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_PrioCeilingOwner: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore, while the
+ * semaphore has an owner.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ ctx->count = 0;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_PrioInherit: {
+ /*
+ * While the semaphore object is a priority inheritance semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_MrsP: {
+ /*
+ * While the semaphore object is a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_Class_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Pre_SemId_Prepare(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Pre_SemId state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Pre_SemId_Valid: {
+ /*
+ * While the ``semaphore_id`` parameter is associated with the semaphore.
+ */
+ ctx->valid_id = true;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_SemId_Invalid: {
+ /*
+ * While the ``semaphore_id`` parameter is not associated with a
+ * semaphore.
+ */
+ ctx->valid_id = false;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_SemId_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Pre_SchedId_Prepare(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Pre_SchedId state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Pre_SchedId_Invalid: {
+ /*
+ * While the ``scheduler_id`` parameter is not associated with a
+ * scheduler.
+ */
+ ctx->scheduler_id = INVALID_ID;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_SchedId_Create: {
+ /*
+ * While the ``scheduler_id`` parameter is associated with the scheduler
+ * used to create the semaphore.
+ */
+ ctx->scheduler_id = ctx->runner_scheduler_id;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_SchedId_Other: {
+ /*
+ * While the ``scheduler_id`` parameter is associated with a scheduler
+ * other than the one used to create the semaphore.
+ */
+ ctx->scheduler_id = ctx->other_scheduler_id;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_SchedId_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Pre_NewPrio_Prepare(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Pre_NewPrio state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Pre_NewPrio_Current: {
+ /*
+ * While the ``new_priority`` parameter is equal to
+ * RTEMS_CURRENT_PRIORITY.
+ */
+ ctx->new_priority = RTEMS_CURRENT_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_NewPrio_Valid: {
+ /*
+ * While the ``new_priority`` parameter is not equal to
+ * RTEMS_CURRENT_PRIORITY and valid with respect to the scheduler
+ * specified by the ``scheduler_id`` parameter.
+ */
+ ctx->new_priority = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_NewPrio_Invalid: {
+ /*
+ * While the ``new_priority`` parameter is invalid with respect to the
+ * scheduler specified by the ``scheduler_id`` parameter.
+ */
+ ctx->new_priority = PRIO_INVALID;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_NewPrio_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Pre_OldPrio_Prepare(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Pre_OldPrio state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Pre_OldPrio_Valid: {
+ /*
+ * While the ``old_priority`` parameter references an object of type
+ * rtems_task_priority.
+ */
+ ctx->old_priority = &ctx->old_priority_value;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_OldPrio_Null: {
+ /*
+ * While the ``old_priority`` parameter is NULL.
+ */
+ ctx->old_priority = NULL;
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Pre_OldPrio_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Post_Status_Check(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Post_Status_Ok: {
+ /*
+ * The return status of rtems_semaphore_set_priority() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_semaphore_set_priority() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_Status_InvId: {
+ /*
+ * The return status of rtems_semaphore_set_priority() shall be
+ * RTEMS_INVALID_ID.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_Status_InvPrio: {
+ /*
+ * The return status of rtems_semaphore_set_priority() shall be
+ * RTEMS_INVALID_PRIORITY.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_PRIORITY );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_Status_NotDef: {
+ /*
+ * The return status of rtems_semaphore_set_priority() shall be
+ * RTEMS_NOT_DEFINED.
+ */
+ T_rsc( ctx->status, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Post_OwnerPrio_Check(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Post_OwnerPrio state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Post_OwnerPrio_Nop: {
+ /*
+ * The current priority of the owner task of the semaphore for the
+ * scheduler specified by the ``scheduler_id`` parameter shall not be
+ * modified by the rtems_semaphore_set_priority() call.
+ */
+ T_eq_u32( GetSelfPriority(), PRIO_HIGH );
+ ReleaseSemaphore( ctx );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_OwnerPrio_New: {
+ /*
+ * The current priority of the owner task of the semaphore for the
+ * scheduler specified by the ``scheduler_id`` parameter shall be less
+ * than or equal to the value of the ``new_priority`` parameter.
+ */
+ T_eq_u32( GetSelfPriority(), PRIO_VERY_HIGH );
+ ReleaseSemaphore( ctx );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_OwnerPrio_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Post_SemPrio_Check(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Post_SemPrio state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Post_SemPrio_Set: {
+ /*
+ * The priority used for the scheduler specified by the ``scheduler_id``
+ * parameter of the semaphore associated with the identifier specified by
+ * the ``semaphore_id`` parameter shall be set to the prioriy specified
+ * by the ``new_priority`` parameter during the
+ * rtems_semaphore_set_priority() call.
+ */
+ if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+ if ( ctx->scheduler_id == ctx->other_scheduler_id ) {
+ CheckPriority( ctx, ctx->runner_scheduler_id, PRIO_HIGH );
+ CheckPriority( ctx, ctx->other_scheduler_id, PRIO_VERY_HIGH );
+ } else {
+ CheckPriority( ctx, ctx->runner_scheduler_id, PRIO_VERY_HIGH );
+ #if defined(RTEMS_SMP)
+ CheckPriority( ctx, ctx->other_scheduler_id, 0 );
+ #endif
+ }
+ } else if ( ( ctx->attribute_set & RTEMS_PRIORITY_CEILING ) != 0 ) {
+ CheckPriority( ctx, ctx->runner_scheduler_id, PRIO_VERY_HIGH );
+ CheckNotDefined( ctx, ctx->other_scheduler_id );
+ }
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_SemPrio_Nop: {
+ /*
+ * Priorities used by semaphores shall not be modified by the
+ * rtems_semaphore_set_priority() call.
+ */
+ if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+ CheckPriority( ctx, ctx->runner_scheduler_id, PRIO_HIGH );
+ #if defined(RTEMS_SMP)
+ CheckPriority( ctx, ctx->other_scheduler_id, 0 );
+ #endif
+ } else if ( ( ctx->attribute_set & RTEMS_PRIORITY_CEILING ) != 0 ) {
+ CheckPriority( ctx, ctx->runner_scheduler_id, PRIO_HIGH );
+ CheckNotDefined( ctx, ctx->other_scheduler_id );
+ }
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_SemPrio_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Post_OldPrioVar_Check(
+ RtemsSemReqSetPriority_Context *ctx,
+ RtemsSemReqSetPriority_Post_OldPrioVar state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqSetPriority_Post_OldPrioVar_Set: {
+ /*
+ * The value of the object referenced by the ``old_priority`` parameter
+ * shall be set to the priority used for the scheduler specified by the
+ * ``scheduler_id`` parameter of the semaphore associated with the
+ * identifier specified by the ``semaphore_id`` parameter right before
+ * the priority is set by the rtems_semaphore_set_priority() call.
+ */
+ T_eq_ptr( ctx->old_priority, &ctx->old_priority_value );
+
+ if ( ctx->scheduler_id == ctx->other_scheduler_id ) {
+ T_eq_u32( ctx->old_priority_value, 0 );
+ } else {
+ T_eq_u32( ctx->old_priority_value, PRIO_HIGH );
+ }
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_OldPrioVar_Nop: {
+ /*
+ * Objects referenced by the ``old_priority`` parameter in past calls to
+ * rtems_semaphore_set_priority() shall not be accessed by the
+ * rtems_semaphore_set_priority() call.
+ */
+ T_eq_u32( ctx->old_priority_value, PRIO_INVALID );
+ break;
+ }
+
+ case RtemsSemReqSetPriority_Post_OldPrioVar_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqSetPriority_Setup( RtemsSemReqSetPriority_Context *ctx )
+{
+ rtems_status_code sc;
+
+ memset( ctx, 0, sizeof( *ctx ) );
+ SetSelfPriority( PRIO_NORMAL );
+
+ sc = rtems_task_get_scheduler( RTEMS_SELF, &ctx->runner_scheduler_id );
+ T_rsc_success( sc );
+
+ #if defined(RTEMS_SMP)
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_B_NAME,
+ &ctx->other_scheduler_id
+ );
+ T_rsc_success( sc );
+ #else
+ ctx->other_scheduler_id = INVALID_ID;
+ #endif
+}
+
+static void RtemsSemReqSetPriority_Setup_Wrap( void *arg )
+{
+ RtemsSemReqSetPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqSetPriority_Setup( ctx );
+}
+
+static void RtemsSemReqSetPriority_Teardown(
+ RtemsSemReqSetPriority_Context *ctx
+)
+{
+ RestoreRunnerPriority();
+}
+
+static void RtemsSemReqSetPriority_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqSetPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqSetPriority_Teardown( ctx );
+}
+
+static void RtemsSemReqSetPriority_Prepare(
+ RtemsSemReqSetPriority_Context *ctx
+)
+{
+ ctx->old_priority_value = PRIO_INVALID;
+ ctx->count = 1;
+ ctx->attribute_set = RTEMS_PRIORITY;
+ ctx->valid_id = true;
+}
+
+static void RtemsSemReqSetPriority_Action(
+ RtemsSemReqSetPriority_Context *ctx
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ ctx->count,
+ ctx->attribute_set,
+ PRIO_HIGH,
+ &ctx->the_semaphore_id
+ );
+ T_rsc_success( sc );
+
+ if ( ctx->valid_id ) {
+ ctx->semaphore_id = ctx->the_semaphore_id;
+ } else {
+ ctx->semaphore_id = INVALID_ID;
+ }
+
+ ctx->status = rtems_semaphore_set_priority(
+ ctx->semaphore_id,
+ ctx->scheduler_id,
+ ctx->new_priority,
+ ctx->old_priority
+ );
+}
+
+static void RtemsSemReqSetPriority_Cleanup(
+ RtemsSemReqSetPriority_Context *ctx
+)
+{
+ rtems_status_code sc;
+
+ T_eq_u32( GetSelfPriority(), PRIO_NORMAL );
+
+ sc = rtems_semaphore_delete( ctx->the_semaphore_id );
+ T_rsc_success( sc );
+}
+
+static const RtemsSemReqSetPriority_Entry
+RtemsSemReqSetPriority_Entries[] = {
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvAddr,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvId,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvAddr,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvId,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvAddr,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NotDef,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvId,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NotDef,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvPrio,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvPrio,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvAddr,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvId,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set },
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Set,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NotDef,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set },
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_OwnerPrio_New,
+ RtemsSemReqSetPriority_Post_SemPrio_Set,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set },
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvPrio,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_InvPrio,
+ RtemsSemReqSetPriority_Post_OwnerPrio_Nop,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Nop,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_Ok,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_Set,
+ RtemsSemReqSetPriority_Post_OldPrioVar_Set }
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSemReqSetPriority_Post_Status_NA,
+ RtemsSemReqSetPriority_Post_OwnerPrio_NA,
+ RtemsSemReqSetPriority_Post_SemPrio_NA,
+ RtemsSemReqSetPriority_Post_OldPrioVar_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqSetPriority_Map[] = {
+ 1, 0, 1, 0, 1, 0, 7, 0, 7, 0, 8, 0, 5, 2, 5, 2, 9, 2, 1, 0, 1, 0, 1, 0, 1, 0,
+ 1, 0, 1, 0, 3, 2, 3, 2, 3, 2, 1, 0, 1, 0, 1, 0, 7, 0, 7, 0, 8, 0, 5, 2, 5, 2,
+ 9, 2, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 2, 3, 2, 3, 2, 1, 0, 1, 0, 1, 0,
+ 7, 0, 7, 0, 8, 0, 5, 2, 5, 2, 9, 2, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 2,
+ 3, 2, 3, 2, 1, 0, 1, 0, 1, 0, 12, 0, 13, 0, 8, 0, 5, 2, 5, 2, 9, 2, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 2, 3, 2, 3, 2, 6, 4, 6, 4, 6, 4, 15, 4, 16, 4,
+ 17, 4, 14, 10, 14, 10, 18, 10, 6, 4, 6, 4, 6, 4, 6, 4, 6, 4, 6, 4, 11, 10,
+ 11, 10, 11, 10, 1, 0, 1, 0, 1, 0, 7, 0, 7, 0, 8, 0, 5, 2, 5, 2, 9, 2, 1, 0,
+ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 2, 3, 2, 3, 2, 1, 0, 1, 0, 1, 0, 12, 0, 13,
+ 0, 8, 0, 19, 2, 20, 2, 9, 2, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 3, 2, 3, 2,
+ 3, 2
+};
+
+static size_t RtemsSemReqSetPriority_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqSetPriority_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqSetPriority_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqSetPriority_Fixture = {
+ .setup = RtemsSemReqSetPriority_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqSetPriority_Teardown_Wrap,
+ .scope = RtemsSemReqSetPriority_Scope,
+ .initial_context = &RtemsSemReqSetPriority_Instance
+};
+
+static inline RtemsSemReqSetPriority_Entry RtemsSemReqSetPriority_PopEntry(
+ RtemsSemReqSetPriority_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqSetPriority_Entries[
+ RtemsSemReqSetPriority_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqSetPriority_TestVariant(
+ RtemsSemReqSetPriority_Context *ctx
+)
+{
+ RtemsSemReqSetPriority_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqSetPriority_Pre_SemId_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqSetPriority_Pre_SchedId_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqSetPriority_Pre_NewPrio_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ RtemsSemReqSetPriority_Pre_OldPrio_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ RtemsSemReqSetPriority_Action( ctx );
+ RtemsSemReqSetPriority_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ RtemsSemReqSetPriority_Post_OwnerPrio_Check(
+ ctx,
+ ctx->Map.entry.Post_OwnerPrio
+ );
+ RtemsSemReqSetPriority_Post_SemPrio_Check(
+ ctx,
+ ctx->Map.entry.Post_SemPrio
+ );
+ RtemsSemReqSetPriority_Post_OldPrioVar_Check(
+ ctx,
+ ctx->Map.entry.Post_OldPrioVar
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqSetPriority( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqSetPriority, &RtemsSemReqSetPriority_Fixture )
+{
+ RtemsSemReqSetPriority_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqSetPriority_Pre_Class_Counting;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqSetPriority_Pre_Class_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqSetPriority_Pre_SemId_Valid;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqSetPriority_Pre_SemId_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSemReqSetPriority_Pre_SchedId_Invalid;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqSetPriority_Pre_SchedId_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = RtemsSemReqSetPriority_Pre_NewPrio_Current;
+ ctx->Map.pcs[ 3 ] < RtemsSemReqSetPriority_Pre_NewPrio_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 4 ] = RtemsSemReqSetPriority_Pre_OldPrio_Valid;
+ ctx->Map.pcs[ 4 ] < RtemsSemReqSetPriority_Pre_OldPrio_NA;
+ ++ctx->Map.pcs[ 4 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqSetPriority_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqSetPriority_Prepare( ctx );
+ RtemsSemReqSetPriority_TestVariant( ctx );
+ RtemsSemReqSetPriority_Cleanup( ctx );
+ }
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-smp.c b/testsuites/validation/tc-sem-smp.c
new file mode 100644
index 0000000000..cfe82b3982
--- /dev/null
+++ b/testsuites/validation/tc-sem-smp.c
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemValSmp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/score/threaddispatch.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemValSmp spec:/rtems/sem/val/smp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @brief Tests SMP-specific semaphore behaviour.
+ *
+ * This test case performs the following actions:
+ *
+ * - Create a worker thread and a MrsP mutex. Use the mutex and the worker to
+ * perform a bad sticky thread queue enqueue.
+ *
+ * - Create two worker threads, a MrsP mutex, and a priority inheritance mutex.
+ * Use the mutexes and the workers to raise the current priority to a higher
+ * priority than the ceiling priority of the mutex while one of the workers
+ * waits on the mutex.
+ *
+ * - Let the first worker try to obtain the MrsP mutex. Check that it
+ * acquired the ceiling priority.
+ *
+ * - Let the second worker try to obtain the priority inheritance mutex.
+ * Check that the first worker inherited the priority from the second
+ * worker.
+ *
+ * - Set the real priority of the first worker. Check that it defines the
+ * current priority.
+ *
+ * - Release the MrsP mutex so that the first worker can to obtain it. It
+ * will replace a temporary priority node which is the maximum priority
+ * node. This is the first scenario we want to test.
+ *
+ * - Obtain the MrsP mutex for the runner thread to start the second scenario
+ * we would like to test.
+ *
+ * - Let the first worker try to obtain the MrsP mutex. Check that it
+ * acquired the ceiling priority.
+ *
+ * - Let the second worker try to obtain the priority inheritance mutex.
+ * Check that the first worker inherited the priority from the second
+ * worker.
+ *
+ * - Lower the priority of the second worker. Check that the inherited
+ * priority of the first worker reflects this priority change.
+ *
+ * - Change the real priority of the first worker so that it defines its
+ * current priority.
+ *
+ * - Release the MrsP mutex so that the first worker can to obtain it. It
+ * will replace a temporary priority node which is between the minimum and
+ * maximum priority node. This is the second scenario we want to test.
+ *
+ * - Clean up all used resources.
+ *
+ * @{
+ */
+
+/**
+ * @brief Test context for spec:/rtems/sem/val/smp test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the mutex identifier.
+ */
+ rtems_id mutex_id;;
+
+ /**
+ * @brief This member contains the second mutex identifier.
+ */
+ rtems_id mutex_2_id;;
+
+ /**
+ * @brief If this member is true, then the worker is done.
+ */
+ volatile bool done;
+
+ /**
+ * @brief If this member is true, then the second worker is done.
+ */
+ volatile bool done_2;
+} RtemsSemValSmp_Context;
+
+static RtemsSemValSmp_Context
+ RtemsSemValSmp_Instance;
+
+typedef RtemsSemValSmp_Context Context;
+
+static void BadEnqueueFatal(
+ rtems_fatal_source source,
+ rtems_fatal_code code,
+ void *arg
+)
+{
+ Per_CPU_Control *cpu_self;
+ Context *ctx;
+
+ T_eq_int( source, INTERNAL_ERROR_CORE );
+ T_eq_ulong(
+ code,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
+ );
+
+ SetFatalHandler( NULL, NULL );
+
+ cpu_self = _Per_CPU_Get();
+ _Thread_Dispatch_unnest( cpu_self );
+ _Thread_Dispatch_unnest( cpu_self );
+
+ ctx = arg;
+ ctx->done = true;
+ SuspendSelf();
+}
+
+static void BadEnqueueTask( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+ (void) _Thread_Dispatch_disable();
+ ObtainMutex( ctx->mutex_id );
+}
+
+static void ObtainReleaseMrsPTask( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+ ObtainMutex( ctx->mutex_2_id );
+ ctx->done = true;
+ ObtainMutex( ctx->mutex_id );
+ ReleaseMutex( ctx->mutex_id );
+ ReleaseMutex( ctx->mutex_2_id );
+ ctx->done = true;
+ SuspendSelf();
+}
+
+static void ObtainRelease2Task( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+ ctx->done_2 = true;
+ ObtainMutex( ctx->mutex_2_id );
+ ReleaseMutex( ctx->mutex_2_id );
+ ctx->done_2 = true;
+ SuspendSelf();
+}
+
+static void RtemsSemValSmp_Setup( RtemsSemValSmp_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+}
+
+static void RtemsSemValSmp_Setup_Wrap( void *arg )
+{
+ RtemsSemValSmp_Context *ctx;
+
+ ctx = arg;
+ RtemsSemValSmp_Setup( ctx );
+}
+
+static void RtemsSemValSmp_Teardown( RtemsSemValSmp_Context *ctx )
+{
+ RestoreRunnerPriority();
+}
+
+static void RtemsSemValSmp_Teardown_Wrap( void *arg )
+{
+ RtemsSemValSmp_Context *ctx;
+
+ ctx = arg;
+ RtemsSemValSmp_Teardown( ctx );
+}
+
+static T_fixture RtemsSemValSmp_Fixture = {
+ .setup = RtemsSemValSmp_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemValSmp_Teardown_Wrap,
+ .scope = NULL,
+ .initial_context = &RtemsSemValSmp_Instance
+};
+
+/**
+ * @brief Create a worker thread and a MrsP mutex. Use the mutex and the
+ * worker to perform a bad sticky thread queue enqueue.
+ */
+static void RtemsSemValSmp_Action_0( RtemsSemValSmp_Context *ctx )
+{
+ rtems_status_code sc;
+ rtems_id worker_id;
+ rtems_id scheduler_b_id;
+
+ ctx->done = false;
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &scheduler_b_id );
+ T_rsc_success( sc );
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'U', 'T', 'X' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_HIGH,
+ &ctx->mutex_id
+ );
+ T_rsc_success( sc );
+
+ worker_id = CreateTask( "WORK", PRIO_NORMAL );
+ SetScheduler( worker_id, scheduler_b_id, PRIO_NORMAL );
+
+ ObtainMutex( ctx->mutex_id );
+ SetFatalHandler( BadEnqueueFatal, ctx );
+ StartTask( worker_id, BadEnqueueTask, ctx );
+
+ while ( !ctx->done ) {
+ /* Wait */
+ }
+
+ DeleteTask( worker_id );
+ ReleaseMutex( ctx->mutex_id );
+ DeleteMutex( ctx->mutex_id );
+}
+
+/**
+ * @brief Create two worker threads, a MrsP mutex, and a priority inheritance
+ * mutex. Use the mutexes and the workers to raise the current priority to a
+ * higher priority than the ceiling priority of the mutex while one of the
+ * workers waits on the mutex.
+ */
+static void RtemsSemValSmp_Action_1( RtemsSemValSmp_Context *ctx )
+{
+ rtems_status_code sc;
+ rtems_id worker_id;
+ rtems_id worker_2_id;
+ rtems_id scheduler_b_id;
+ rtems_task_priority prio;
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &scheduler_b_id );
+ T_rsc_success( sc );
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'U', 'T', 'X' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_HIGH,
+ &ctx->mutex_id
+ );
+ T_rsc_success( sc );
+
+ sc = rtems_semaphore_set_priority(
+ ctx->mutex_id,
+ scheduler_b_id,
+ PRIO_HIGH,
+ &prio
+ );
+ T_rsc_success( sc );
+
+ ctx->mutex_2_id = CreateMutex();
+
+ worker_id = CreateTask( "WORK", PRIO_NORMAL );
+ SetScheduler( worker_id, scheduler_b_id, PRIO_NORMAL );
+
+ worker_2_id = CreateTask( "WRK2", PRIO_NORMAL );
+ SetScheduler( worker_2_id, scheduler_b_id, PRIO_VERY_HIGH );
+
+ /*
+ * Let the first worker try to obtain the MrsP mutex. Check that it acquired
+ * the ceiling priority.
+ */
+ ObtainMutex( ctx->mutex_id );
+ ctx->done = false;
+ StartTask( worker_id, ObtainReleaseMrsPTask, ctx );
+
+ while ( !ctx->done ) {
+ /* Wait */
+ }
+
+ ctx->done = false;
+ WaitForIntendToBlock( worker_id );
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_HIGH );
+
+ /*
+ * Let the second worker try to obtain the priority inheritance mutex. Check
+ * that the first worker inherited the priority from the second worker.
+ */
+ ctx->done_2 = false;
+ StartTask( worker_2_id, ObtainRelease2Task, ctx );
+
+ while ( !ctx->done_2 ) {
+ /* Wait */
+ }
+
+ ctx->done_2 = false;
+ WaitForExecutionStop( worker_2_id );
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_VERY_HIGH );
+
+ /*
+ * Set the real priority of the first worker. Check that it defines the
+ * current priority.
+ */
+ SetPriority( worker_id, PRIO_ULTRA_HIGH );
+
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_ULTRA_HIGH );
+
+ /*
+ * Release the MrsP mutex so that the first worker can to obtain it. It will
+ * replace a temporary priority node which is the maximum priority node.
+ * This is the first scenario we want to test.
+ */
+ ReleaseMutex( ctx->mutex_id );
+
+ while ( !ctx->done || !ctx->done_2 ) {
+ /* Wait */
+ }
+
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_ULTRA_HIGH );
+
+ /*
+ * Obtain the MrsP mutex for the runner thread to start the second scenario
+ * we would like to test.
+ */
+ ObtainMutex( ctx->mutex_id );
+
+ /*
+ * Let the first worker try to obtain the MrsP mutex. Check that it acquired
+ * the ceiling priority.
+ */
+ ctx->done = false;
+ sc = rtems_task_restart( worker_id, (rtems_task_argument) ctx );
+ T_rsc_success( sc );
+
+ while ( !ctx->done ) {
+ /* Wait */
+ }
+
+ ctx->done = false;
+ WaitForIntendToBlock( worker_id );
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_HIGH );
+
+ /*
+ * Let the second worker try to obtain the priority inheritance mutex. Check
+ * that the first worker inherited the priority from the second worker.
+ */
+ ctx->done_2 = false;
+ sc = rtems_task_restart( worker_2_id, (rtems_task_argument) ctx );
+ T_rsc_success( sc );
+
+ while ( !ctx->done_2 ) {
+ /* Wait */
+ }
+
+ ctx->done_2 = false;
+ WaitForExecutionStop( worker_2_id );
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_VERY_HIGH );
+
+ /*
+ * Lower the priority of the second worker. Check that the inherited
+ * priority of the first worker reflects this priority change.
+ */
+ SetPriority( worker_2_id, PRIO_LOW );
+
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_HIGH );
+
+ /*
+ * Change the real priority of the first worker so that it defines its
+ * current priority.
+ */
+ SetPriority( worker_id, PRIO_ULTRA_HIGH );
+
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_ULTRA_HIGH );
+
+ /*
+ * Release the MrsP mutex so that the first worker can to obtain it. It will
+ * replace a temporary priority node which is between the minimum and maximum
+ * priority node. This is the second scenario we want to test.
+ */
+ ReleaseMutex( ctx->mutex_id );
+
+ while ( !ctx->done || !ctx->done_2 ) {
+ /* Wait */
+ }
+
+ prio = GetPriorityByScheduler( worker_id, scheduler_b_id );
+ T_eq_u32( prio, PRIO_ULTRA_HIGH );
+
+ /*
+ * Clean up all used resources.
+ */
+ DeleteTask( worker_id );
+ DeleteTask( worker_2_id );
+ DeleteMutex( ctx->mutex_id );
+ DeleteMutex( ctx->mutex_2_id );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemValSmp( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemValSmp, &RtemsSemValSmp_Fixture )
+{
+ RtemsSemValSmp_Context *ctx;
+
+ ctx = T_fixture_context();
+
+ RtemsSemValSmp_Action_0( ctx );
+ RtemsSemValSmp_Action_1( ctx );
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-timeout.c b/testsuites/validation/tc-sem-timeout.c
new file mode 100644
index 0000000000..254a6be5db
--- /dev/null
+++ b/testsuites/validation/tc-sem-timeout.c
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqTimeout
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "tr-tq-timeout-mrsp.h"
+#include "tr-tq-timeout-priority-inherit.h"
+#include "tr-tq-timeout.h"
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqTimeout spec:/rtems/sem/req/timeout
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqTimeout_Pre_Class_Counting,
+ RtemsSemReqTimeout_Pre_Class_Simple,
+ RtemsSemReqTimeout_Pre_Class_Binary,
+ RtemsSemReqTimeout_Pre_Class_PrioCeiling,
+ RtemsSemReqTimeout_Pre_Class_PrioInherit,
+ RtemsSemReqTimeout_Pre_Class_MrsP,
+ RtemsSemReqTimeout_Pre_Class_NA
+} RtemsSemReqTimeout_Pre_Class;
+
+typedef enum {
+ RtemsSemReqTimeout_Pre_Discipline_FIFO,
+ RtemsSemReqTimeout_Pre_Discipline_Priority,
+ RtemsSemReqTimeout_Pre_Discipline_NA
+} RtemsSemReqTimeout_Pre_Discipline;
+
+typedef enum {
+ RtemsSemReqTimeout_Post_Action_Timeout,
+ RtemsSemReqTimeout_Post_Action_TimeoutMrsP,
+ RtemsSemReqTimeout_Post_Action_TimeoutPriorityInherit,
+ RtemsSemReqTimeout_Post_Action_NA
+} RtemsSemReqTimeout_Post_Action;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Class_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Post_Action : 2;
+} RtemsSemReqTimeout_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/timeout test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the thread queue test context.
+ */
+ TQContext tq_ctx;;
+
+ /**
+ * @brief This member specifies if the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqTimeout_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqTimeout_Context;
+
+static RtemsSemReqTimeout_Context
+ RtemsSemReqTimeout_Instance;
+
+static const char * const RtemsSemReqTimeout_PreDesc_Class[] = {
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeiling",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqTimeout_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqTimeout_PreDesc[] = {
+ RtemsSemReqTimeout_PreDesc_Class,
+ RtemsSemReqTimeout_PreDesc_Discipline,
+ NULL
+};
+
+static void RtemsSemReqTimeout_Pre_Class_Prepare(
+ RtemsSemReqTimeout_Context *ctx,
+ RtemsSemReqTimeout_Pre_Class state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqTimeout_Pre_Class_Counting: {
+ /*
+ * While the semaphore object is a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Class_Simple: {
+ /*
+ * While the semaphore object is a simple binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Class_Binary: {
+ /*
+ * While the semaphore object is a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Class_PrioCeiling: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Class_PrioInherit: {
+ /*
+ * While the semaphore object is a priority inheritance semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Class_MrsP: {
+ /*
+ * While the semaphore object is a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_STICKY;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Class_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqTimeout_Pre_Discipline_Prepare(
+ RtemsSemReqTimeout_Context *ctx,
+ RtemsSemReqTimeout_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqTimeout_Pre_Discipline_FIFO: {
+ /*
+ * While the semaphore uses the FIFO task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Discipline_Priority: {
+ /*
+ * While the semaphore uses the priority task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqTimeout_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqTimeout_Post_Action_Check(
+ RtemsSemReqTimeout_Context *ctx,
+ RtemsSemReqTimeout_Post_Action state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqTimeout_Post_Action_Timeout: {
+ /*
+ * The semaphore obtain timeout actions shall be done as specified by
+ * /score/tq/req/timeout.
+ */
+ ctx->tq_ctx.wait = TQ_WAIT_TIMED;
+ ScoreTqReqTimeout_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqTimeout_Post_Action_TimeoutMrsP: {
+ /*
+ * The semaphore obtain timeout actions shall be done as specified by
+ * /score/tq/req/timeout-mrsp.
+ */
+ ctx->tq_ctx.wait = TQ_WAIT_TIMED;
+ ScoreTqReqTimeoutMrsp_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqTimeout_Post_Action_TimeoutPriorityInherit: {
+ /*
+ * The semaphore obtain timeout actions shall be done as specified by
+ * /score/tq/req/timeout-priority-inherit.
+ */
+ ctx->tq_ctx.wait = TQ_WAIT_FOREVER;
+ ScoreTqReqTimeoutPriorityInherit_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqTimeout_Post_Action_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqTimeout_Setup( RtemsSemReqTimeout_Context *ctx )
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqTimeout_Setup_Wrap( void *arg )
+{
+ RtemsSemReqTimeout_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqTimeout_Setup( ctx );
+}
+
+static void RtemsSemReqTimeout_Teardown( RtemsSemReqTimeout_Context *ctx )
+{
+ TQDestroy( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqTimeout_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqTimeout_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqTimeout_Teardown( ctx );
+}
+
+static void RtemsSemReqTimeout_Prepare( RtemsSemReqTimeout_Context *ctx )
+{
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+ ctx->tq_ctx.thread_queue_id = 0;
+}
+
+static void RtemsSemReqTimeout_Action( RtemsSemReqTimeout_Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ OBJECT_NAME,
+ 1,
+ ctx->attribute_set,
+ PRIO_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+}
+
+static void RtemsSemReqTimeout_Cleanup( RtemsSemReqTimeout_Context *ctx )
+{
+ if ( ctx->tq_ctx.thread_queue_id != 0 ) { rtems_status_code sc;
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc ); }
+}
+
+static const RtemsSemReqTimeout_Entry
+RtemsSemReqTimeout_Entries[] = {
+ { 0, 0, 0, RtemsSemReqTimeout_Post_Action_Timeout },
+ { 1, 0, 0, RtemsSemReqTimeout_Post_Action_NA },
+ { 0, 0, 0, RtemsSemReqTimeout_Post_Action_TimeoutPriorityInherit },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, RtemsSemReqTimeout_Post_Action_NA }
+#else
+ { 0, 0, 0, RtemsSemReqTimeout_Post_Action_TimeoutMrsP }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqTimeout_Map[] = {
+ 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 1, 3
+};
+
+static size_t RtemsSemReqTimeout_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqTimeout_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqTimeout_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqTimeout_Fixture = {
+ .setup = RtemsSemReqTimeout_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqTimeout_Teardown_Wrap,
+ .scope = RtemsSemReqTimeout_Scope,
+ .initial_context = &RtemsSemReqTimeout_Instance
+};
+
+static inline RtemsSemReqTimeout_Entry RtemsSemReqTimeout_PopEntry(
+ RtemsSemReqTimeout_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqTimeout_Entries[
+ RtemsSemReqTimeout_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqTimeout_TestVariant( RtemsSemReqTimeout_Context *ctx )
+{
+ RtemsSemReqTimeout_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqTimeout_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqTimeout_Action( ctx );
+ RtemsSemReqTimeout_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqTimeout( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqTimeout, &RtemsSemReqTimeout_Fixture )
+{
+ RtemsSemReqTimeout_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSemReqTimeout_Pre_Class_Counting;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqTimeout_Pre_Class_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSemReqTimeout_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqTimeout_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqTimeout_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqTimeout_Prepare( ctx );
+ RtemsSemReqTimeout_TestVariant( ctx );
+ RtemsSemReqTimeout_Cleanup( ctx );
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sem-uni.c b/testsuites/validation/tc-sem-uni.c
new file mode 100644
index 0000000000..08c38adb6c
--- /dev/null
+++ b/testsuites/validation/tc-sem-uni.c
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemValUni
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemValUni spec:/rtems/sem/val/uni
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationOneCpu0
+ *
+ * @brief Tests uniprocessor-specific semaphore behaviour.
+ *
+ * This test case performs the following actions:
+ *
+ * - Create a worker thread and two MrsP mutexes. Obtain the Mrsp mutexes and
+ * check that a task yield works (owner is not sticky). We need two mutexes
+ * since the uniprocessor schedulers do not increment the stick level in the
+ * scheduler unblock operation.
+ *
+ * - Yield and let the worker obtain the MrsP mutexes.
+ *
+ * - Yield and let the worker release the MrsP mutexes.
+ *
+ * - Clean up all used resources.
+ *
+ * @{
+ */
+
+/**
+ * @brief Test context for spec:/rtems/sem/val/uni test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the mutex identifier.
+ */
+ rtems_id mutex_id;
+
+ /**
+ * @brief This member contains the second mutex identifier.
+ */
+ rtems_id mutex_2_id;
+
+ /**
+ * @brief This member contains a progress counter.
+ */
+ uint32_t counter;
+} RtemsSemValUni_Context;
+
+static RtemsSemValUni_Context
+ RtemsSemValUni_Instance;
+
+typedef RtemsSemValUni_Context Context;
+
+static void ObtainReleaseMrsPTask( rtems_task_argument arg )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+ ObtainMutex( ctx->mutex_id );
+ ObtainMutex( ctx->mutex_2_id );
+ ctx->counter = 1;
+ Yield();
+ ReleaseMutex( ctx->mutex_2_id );
+ ReleaseMutex( ctx->mutex_id );
+ ctx->counter = 2;
+ (void) ReceiveAnyEvents();
+}
+
+static void RtemsSemValUni_Setup( RtemsSemValUni_Context *ctx )
+{
+ SetSelfPriority( PRIO_NORMAL );
+}
+
+static void RtemsSemValUni_Setup_Wrap( void *arg )
+{
+ RtemsSemValUni_Context *ctx;
+
+ ctx = arg;
+ RtemsSemValUni_Setup( ctx );
+}
+
+static void RtemsSemValUni_Teardown( RtemsSemValUni_Context *ctx )
+{
+ RestoreRunnerPriority();
+}
+
+static void RtemsSemValUni_Teardown_Wrap( void *arg )
+{
+ RtemsSemValUni_Context *ctx;
+
+ ctx = arg;
+ RtemsSemValUni_Teardown( ctx );
+}
+
+static T_fixture RtemsSemValUni_Fixture = {
+ .setup = RtemsSemValUni_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemValUni_Teardown_Wrap,
+ .scope = NULL,
+ .initial_context = &RtemsSemValUni_Instance
+};
+
+/**
+ * @brief Create a worker thread and two MrsP mutexes. Obtain the Mrsp mutexes
+ * and check that a task yield works (owner is not sticky). We need two
+ * mutexes since the uniprocessor schedulers do not increment the stick level
+ * in the scheduler unblock operation.
+ */
+static void RtemsSemValUni_Action_0( RtemsSemValUni_Context *ctx )
+{
+ rtems_status_code sc;
+ rtems_id worker_id;
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'T', 'X', '1' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &ctx->mutex_id
+ );
+ T_rsc_success( sc );
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'T', 'X', '2' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &ctx->mutex_2_id
+ );
+ T_rsc_success( sc );
+
+ ctx->counter = 0;
+
+ worker_id = CreateTask( "WORK", PRIO_NORMAL );
+ StartTask( worker_id, ObtainReleaseMrsPTask, ctx );
+
+ /*
+ * Yield and let the worker obtain the MrsP mutexes.
+ */
+ Yield();
+ T_eq_u32( ctx->counter, 1 );
+
+ /*
+ * Yield and let the worker release the MrsP mutexes.
+ */
+ Yield();
+ T_eq_u32( ctx->counter, 2 );
+
+ /*
+ * Clean up all used resources.
+ */
+ DeleteTask( worker_id );
+ DeleteMutex( ctx->mutex_2_id );
+ DeleteMutex( ctx->mutex_id );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemValUni( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemValUni, &RtemsSemValUni_Fixture )
+{
+ RtemsSemValUni_Context *ctx;
+
+ ctx = T_fixture_context();
+
+ RtemsSemValUni_Action_0( ctx );
+}
+
+/** @} */