summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-07 09:27:27 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-15 07:50:50 +0200
commita2ec94667c40beac0ae90c57a30c2784897e5272 (patch)
tree726ef02c6e9dfd8bc4643b33ce2cfdf2bff393b5
parentcc63ab1da7b40d7e286ada0deeeb5f190cb007a1 (diff)
validation: Test rtems_semaphore_flush()
-rw-r--r--spec/build/testsuites/validation/validation-0.yml3
-rw-r--r--testsuites/validation/tc-sem-flush.c623
-rw-r--r--testsuites/validation/tr-tq-flush-fifo.c6
-rw-r--r--testsuites/validation/tr-tq-flush-priority-inherit.c578
-rw-r--r--testsuites/validation/tr-tq-flush-priority-inherit.h103
-rw-r--r--testsuites/validation/tr-tq-flush-priority.c416
-rw-r--r--testsuites/validation/tr-tq-flush-priority.h97
7 files changed, 1823 insertions, 3 deletions
diff --git a/spec/build/testsuites/validation/validation-0.yml b/spec/build/testsuites/validation/validation-0.yml
index 23947c4577..f8eb5bf79d 100644
--- a/spec/build/testsuites/validation/validation-0.yml
+++ b/spec/build/testsuites/validation/validation-0.yml
@@ -71,6 +71,7 @@ source:
- testsuites/validation/tc-score-fatal.c
- testsuites/validation/tc-score-thread.c
- testsuites/validation/tc-sem-create.c
+- testsuites/validation/tc-sem-flush.c
- testsuites/validation/tc-sem-ident.c
- testsuites/validation/tc-sem-obtain.c
- testsuites/validation/tc-sem-release.c
@@ -121,6 +122,8 @@ source:
- testsuites/validation/tr-tq-enqueue-mrsp.c
- testsuites/validation/tr-tq-enqueue-priority.c
- testsuites/validation/tr-tq-flush-fifo.c
+- testsuites/validation/tr-tq-flush-priority.c
+- testsuites/validation/tr-tq-flush-priority-inherit.c
- testsuites/validation/ts-validation-0.c
stlib: []
target: testsuites/validation/ts-validation-0.exe
diff --git a/testsuites/validation/tc-sem-flush.c b/testsuites/validation/tc-sem-flush.c
new file mode 100644
index 0000000000..f352916351
--- /dev/null
+++ b/testsuites/validation/tc-sem-flush.c
@@ -0,0 +1,623 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSemReqFlush
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <string.h>
+
+#include "tr-tq-flush-fifo.h"
+#include "tr-tq-flush-priority-inherit.h"
+#include "tr-tq-flush-priority.h"
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSemReqFlush spec:/rtems/sem/req/flush
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSemReqFlush_Pre_Class_Counting,
+ RtemsSemReqFlush_Pre_Class_Simple,
+ RtemsSemReqFlush_Pre_Class_Binary,
+ RtemsSemReqFlush_Pre_Class_PrioCeiling,
+ RtemsSemReqFlush_Pre_Class_PrioInherit,
+ RtemsSemReqFlush_Pre_Class_MrsP,
+ RtemsSemReqFlush_Pre_Class_NA
+} RtemsSemReqFlush_Pre_Class;
+
+typedef enum {
+ RtemsSemReqFlush_Pre_Discipline_FIFO,
+ RtemsSemReqFlush_Pre_Discipline_Priority,
+ RtemsSemReqFlush_Pre_Discipline_NA
+} RtemsSemReqFlush_Pre_Discipline;
+
+typedef enum {
+ RtemsSemReqFlush_Pre_Id_Valid,
+ RtemsSemReqFlush_Pre_Id_Invalid,
+ RtemsSemReqFlush_Pre_Id_NA
+} RtemsSemReqFlush_Pre_Id;
+
+typedef enum {
+ RtemsSemReqFlush_Post_Action_InvId,
+ RtemsSemReqFlush_Post_Action_NotDef,
+ RtemsSemReqFlush_Post_Action_FlushFIFO,
+ RtemsSemReqFlush_Post_Action_FlushPriority,
+ RtemsSemReqFlush_Post_Action_FlushPriorityCeiling,
+ RtemsSemReqFlush_Post_Action_FlushPriorityInherit,
+ RtemsSemReqFlush_Post_Action_NA
+} RtemsSemReqFlush_Post_Action;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Class_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Pre_Id_NA : 1;
+ uint8_t Post_Action : 3;
+} RtemsSemReqFlush_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/sem/req/flush test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the thread queue test context.
+ */
+ TQContext tq_ctx;;
+
+ /**
+ * @brief This member specifies if the attribute set of the semaphore.
+ */
+ rtems_attribute attribute_set;
+
+ /**
+ * @brief This member specifies if the initial count of the semaphore.
+ */
+ uint32_t initial_count;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 3 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSemReqFlush_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSemReqFlush_Context;
+
+static RtemsSemReqFlush_Context
+ RtemsSemReqFlush_Instance;
+
+static const char * const RtemsSemReqFlush_PreDesc_Class[] = {
+ "Counting",
+ "Simple",
+ "Binary",
+ "PrioCeiling",
+ "PrioInherit",
+ "MrsP",
+ "NA"
+};
+
+static const char * const RtemsSemReqFlush_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const RtemsSemReqFlush_PreDesc_Id[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const * const RtemsSemReqFlush_PreDesc[] = {
+ RtemsSemReqFlush_PreDesc_Class,
+ RtemsSemReqFlush_PreDesc_Discipline,
+ RtemsSemReqFlush_PreDesc_Id,
+ NULL
+};
+
+#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+typedef RtemsSemReqFlush_Context Context;
+
+static void EnqueuePrepare( TQContext *tq_ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(
+ tq_ctx->thread_queue_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc_success( sc );
+}
+
+static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
+{
+ rtems_status_code sc;
+
+ (void) wait;
+ sc = rtems_semaphore_obtain(
+ tq_ctx->thread_queue_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc( sc, RTEMS_UNSATISFIED );
+
+ return STATUS_BUILD( STATUS_SUCCESSFUL, 0 );
+}
+
+static void Flush( TQContext *tq_ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_flush( tq_ctx->thread_queue_id );
+ T_rsc_success( sc );
+}
+
+static void RtemsSemReqFlush_Pre_Class_Prepare(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Pre_Class state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqFlush_Pre_Class_Counting: {
+ /*
+ * While the semaphore object is a counting semaphore.
+ */
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ ctx->initial_count = 0;
+ ctx->tq_ctx.enqueue_prepare = TQDoNothing;
+ ctx->tq_ctx.enqueue_done = TQDoNothing;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_Simple: {
+ /*
+ * While the semaphore object is a simple binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ ctx->initial_count = 0;
+ ctx->tq_ctx.enqueue_prepare = TQDoNothing;
+ ctx->tq_ctx.enqueue_done = TQDoNothing;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_Binary: {
+ /*
+ * While the semaphore object is a binary semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_PrioCeiling: {
+ /*
+ * While the semaphore object is a priority ceiling semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_PrioInherit: {
+ /*
+ * While the semaphore object is a priority inheritance semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_MrsP: {
+ /*
+ * While the semaphore object is a MrsP semaphore.
+ */
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Class_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Pre_Discipline_Prepare(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqFlush_Pre_Discipline_FIFO: {
+ /*
+ * While the semaphore uses the FIFO task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Discipline_Priority: {
+ /*
+ * While the semaphore uses the priority task wait queue discipline.
+ */
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Pre_Id_Prepare(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSemReqFlush_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter is associated with the semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Id_Invalid: {
+ /*
+ * While the ``id`` parameter is not associated with a semaphore.
+ */
+ /* Nothing to prepare */
+ break;
+ }
+
+ case RtemsSemReqFlush_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Post_Action_Check(
+ RtemsSemReqFlush_Context *ctx,
+ RtemsSemReqFlush_Post_Action state
+)
+{
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case RtemsSemReqFlush_Post_Action_InvId: {
+ /*
+ * The return status of rtems_semaphore_flush() shall be
+ * RTEMS_INVALID_ID.
+ */
+ sc = rtems_semaphore_flush( 0xffffffff );
+ T_rsc( sc, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_NotDef: {
+ /*
+ * The return status of rtems_semaphore_flush() shall be
+ * RTEMS_NOT_DEFINED.
+ */
+ sc = rtems_semaphore_flush( ctx->tq_ctx.thread_queue_id );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushFIFO: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-fifo.
+ */
+ ScoreTqReqFlushFifo_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushPriority: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-priority.
+ */
+ ScoreTqReqFlushPriority_Run( &ctx->tq_ctx, true );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushPriorityCeiling: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-priority.
+ */
+ ScoreTqReqFlushPriority_Run( &ctx->tq_ctx, false );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_FlushPriorityInherit: {
+ /*
+ * The calling task shall flush the semaphore as specified by
+ * /score/tq/req/flush-priority-inherit.
+ */
+ ScoreTqReqFlushPriorityInherit_Run( &ctx->tq_ctx );
+ break;
+ }
+
+ case RtemsSemReqFlush_Post_Action_NA:
+ break;
+ }
+}
+
+static void RtemsSemReqFlush_Setup( RtemsSemReqFlush_Context *ctx )
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.enqueue = Enqueue;
+ ctx->tq_ctx.flush = Flush;
+ ctx->tq_ctx.surrender = TQDoNothing;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqFlush_Setup_Wrap( void *arg )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqFlush_Setup( ctx );
+}
+
+static void RtemsSemReqFlush_Teardown( RtemsSemReqFlush_Context *ctx )
+{
+ TQDestroy( &ctx->tq_ctx );
+}
+
+static void RtemsSemReqFlush_Teardown_Wrap( void *arg )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSemReqFlush_Teardown( ctx );
+}
+
+static void RtemsSemReqFlush_Prepare( RtemsSemReqFlush_Context *ctx )
+{
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+}
+
+static void RtemsSemReqFlush_Action( RtemsSemReqFlush_Context *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ ctx->initial_count,
+ ctx->attribute_set,
+ PRIO_ULTRA_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+}
+
+static void RtemsSemReqFlush_Cleanup( RtemsSemReqFlush_Context *ctx )
+{
+ rtems_status_code sc;
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
+}
+
+static const RtemsSemReqFlush_Entry
+RtemsSemReqFlush_Entries[] = {
+ { 0, 1, 1, 0, RtemsSemReqFlush_Post_Action_InvId },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushFIFO },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriority },
+ { 1, 0, 0, 0, RtemsSemReqFlush_Post_Action_NA },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriorityCeiling },
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_FlushPriorityInherit },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, RtemsSemReqFlush_Post_Action_NotDef }
+#else
+ { 1, 0, 0, 0, RtemsSemReqFlush_Post_Action_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSemReqFlush_Map[] = {
+ 1, 0, 2, 0, 1, 0, 2, 0, 1, 0, 2, 0, 3, 0, 4, 0, 3, 0, 5, 0, 3, 0, 6, 0
+};
+
+static size_t RtemsSemReqFlush_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSemReqFlush_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSemReqFlush_Fixture = {
+ .setup = RtemsSemReqFlush_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSemReqFlush_Teardown_Wrap,
+ .scope = RtemsSemReqFlush_Scope,
+ .initial_context = &RtemsSemReqFlush_Instance
+};
+
+static inline RtemsSemReqFlush_Entry RtemsSemReqFlush_PopEntry(
+ RtemsSemReqFlush_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSemReqFlush_Entries[
+ RtemsSemReqFlush_Map[ index ]
+ ];
+}
+
+static void RtemsSemReqFlush_SetPreConditionStates(
+ RtemsSemReqFlush_Context *ctx
+)
+{
+ if ( ctx->Map.entry.Pre_Class_NA ) {
+ ctx->Map.pcs[ 0 ] = RtemsSemReqFlush_Pre_Class_NA;
+ } else {
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ }
+
+ if ( ctx->Map.entry.Pre_Discipline_NA ) {
+ ctx->Map.pcs[ 1 ] = RtemsSemReqFlush_Pre_Discipline_NA;
+ } else {
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ }
+
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+}
+
+static void RtemsSemReqFlush_TestVariant( RtemsSemReqFlush_Context *ctx )
+{
+ RtemsSemReqFlush_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSemReqFlush_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSemReqFlush_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSemReqFlush_Action( ctx );
+ RtemsSemReqFlush_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
+}
+
+/**
+ * @fn void T_case_body_RtemsSemReqFlush( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSemReqFlush, &RtemsSemReqFlush_Fixture )
+{
+ RtemsSemReqFlush_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = RtemsSemReqFlush_Pre_Class_Counting;
+ ctx->Map.pci[ 0 ] < RtemsSemReqFlush_Pre_Class_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = RtemsSemReqFlush_Pre_Discipline_FIFO;
+ ctx->Map.pci[ 1 ] < RtemsSemReqFlush_Pre_Discipline_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = RtemsSemReqFlush_Pre_Id_Valid;
+ ctx->Map.pci[ 2 ] < RtemsSemReqFlush_Pre_Id_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ ctx->Map.entry = RtemsSemReqFlush_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSemReqFlush_SetPreConditionStates( ctx );
+ RtemsSemReqFlush_Prepare( ctx );
+ RtemsSemReqFlush_TestVariant( ctx );
+ RtemsSemReqFlush_Cleanup( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-flush-fifo.c b/testsuites/validation/tr-tq-flush-fifo.c
index e6f35f1937..27aa25ee75 100644
--- a/testsuites/validation/tr-tq-flush-fifo.c
+++ b/testsuites/validation/tr-tq-flush-fifo.c
@@ -275,10 +275,10 @@ static void ScoreTqReqFlushFifo_Action( ScoreTqReqFlushFifo_Context *ctx )
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
if ( ctx->tq_ctx->how_many > 0 ) {
- TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
- TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
T_scheduler_set_event_handler( SchedulerEvent, ctx );
- TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
} else {
TQSchedulerRecordStart( ctx->tq_ctx );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
diff --git a/testsuites/validation/tr-tq-flush-priority-inherit.c b/testsuites/validation/tr-tq-flush-priority-inherit.c
new file mode 100644
index 0000000000..7d2befa676
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority-inherit.c
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-flush-priority-inherit.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqFlushPriorityInherit \
+ * spec:/score/tq/req/flush-priority-inherit
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Priority_NA : 1;
+ uint8_t Pre_Queue_NA : 1;
+ uint8_t Post_Extract : 2;
+ uint8_t Post_PriorityUpdate : 2;
+} ScoreTqReqFlushPriorityInherit_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/flush-priority-inherit test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief If this member is true, then a minimum priority of the owner of the
+ * thread queue shall be inherited from a thread enqueued on the thread
+ * queue.
+ */
+ bool minimum;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushPriorityInherit_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 2 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqFlushPriorityInherit_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqFlushPriorityInherit_Context;
+
+static ScoreTqReqFlushPriorityInherit_Context
+ ScoreTqReqFlushPriorityInherit_Instance;
+
+static const char * const ScoreTqReqFlushPriorityInherit_PreDesc_Priority[] = {
+ "Minimum",
+ "NotMinimum",
+ "NA"
+};
+
+static const char * const ScoreTqReqFlushPriorityInherit_PreDesc_Queue[] = {
+ "Empty",
+ "NonEmpty",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqFlushPriorityInherit_PreDesc[] = {
+ ScoreTqReqFlushPriorityInherit_PreDesc_Priority,
+ ScoreTqReqFlushPriorityInherit_PreDesc_Queue,
+ NULL
+};
+
+typedef ScoreTqReqFlushPriorityInherit_Context Context;
+
+static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+}
+
+static const T_scheduler_event *GetPriorityUpdate( Context *ctx, size_t *index )
+{
+ return T_scheduler_next(
+ &ctx->tq_ctx->scheduler_log.header,
+ T_SCHEDULER_UPDATE_PRIORITY,
+ index
+ );
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Flush( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+}
+
+static void SchedulerEvent( void *arg, const T_scheduler_event *event )
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if ( event->operation == T_SCHEDULER_BLOCK ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Pre_Priority_Prepare(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Pre_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Pre_Priority_Minimum: {
+ /*
+ * While a minimum priority of the owner of the thread queue is inherited
+ * from a thread enqueued on the thread queue.
+ */
+ ctx->minimum = true;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Priority_NotMinimum: {
+ /*
+ * While no minimum priority of the owner of the thread queue is
+ * inherited from a thread enqueued on the thread queue.
+ */
+ ctx->minimum = false;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Pre_Queue_Prepare(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Pre_Queue_Empty: {
+ /*
+ * While the thread queue is empty.
+ */
+ ctx->tq_ctx->how_many = 0;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Queue_NonEmpty: {
+ /*
+ * While the thread queue has at least one enqueued thread.
+ */
+ ctx->tq_ctx->how_many = 3;
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Post_Extract_Check(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Post_Extract state
+)
+{
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Post_Extract_Nop: {
+ /*
+ * No operation shall be performed.
+ */
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetPriorityUpdate( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_Extract_All: {
+ /*
+ * The enqueued threads of the thread queue shall be extracted in
+ * priority order for each priority queue associated with a scheduler.
+ * The priority queues of the thread queue shall be accessed in FIFO
+ * order.
+ */
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ if ( ctx->minimum ) {
+ /*
+ * This priority update is carried out by
+ * _Thread_queue_Flush_critical().
+ */
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ if ( ctx->minimum && rtems_configuration_get_maximum_processors() > 1 ) {
+ /*
+ * This superfluous priority update is carried out by
+ * _Thread_queue_Enqueue() since TQ_BLOCKER_B would have inherited its
+ * priority for scheduler B to TQ_BLOCKER_A if it were not flushed from
+ * the thread queue.
+ */
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event, &T_scheduler_event_null );
+
+ T_eq_u32(
+ GetPriority( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ),
+ PRIO_HIGH
+ );
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_Extract_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Check(
+ ScoreTqReqFlushPriorityInherit_Context *ctx,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No: {
+ /*
+ * The current priority of the owner of the thread queue shall not be
+ * updated by the thread queue flush operation.
+ */
+ /* Checked by ``Extract`` post-condition state ``Nop`` */
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Yes: {
+ /*
+ * The current priority of the owner of the thread queue shall be updated
+ * by the thread queue flush operation to reflect the loss of inherited
+ * priorities of the flushed threads.
+ */
+ /* Checked by ``Extract`` post-condition state ``All`` */
+ break;
+ }
+
+ case ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriorityInherit_Setup(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Setup_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriorityInherit_Setup( ctx );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Teardown(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriorityInherit_Teardown( ctx );
+}
+
+static void ScoreTqReqFlushPriorityInherit_Action(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ if ( ctx->minimum ) {
+ if ( rtems_configuration_get_maximum_processors() > 1 ) {
+ TQSetScheduler(
+ ctx->tq_ctx, TQ_BLOCKER_B,
+ ctx->tq_ctx->other_scheduler_id,
+ PRIO_HIGH
+ );
+ } else {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ ctx->tq_ctx->runner_scheduler_id,
+ PRIO_HIGH
+ );
+ }
+
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_ULTRA_HIGH );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ } else {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ ctx->tq_ctx->runner_scheduler_id,
+ PRIO_HIGH
+ );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_HIGH );
+
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ }
+
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+}
+
+static const ScoreTqReqFlushPriorityInherit_Entry
+ScoreTqReqFlushPriorityInherit_Entries[] = {
+ { 0, 1, 0, ScoreTqReqFlushPriorityInherit_Post_Extract_Nop,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No },
+ { 0, 0, 0, ScoreTqReqFlushPriorityInherit_Post_Extract_All,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Yes },
+ { 0, 0, 0, ScoreTqReqFlushPriorityInherit_Post_Extract_All,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No }
+};
+
+static const uint8_t
+ScoreTqReqFlushPriorityInherit_Map[] = {
+ 0, 1, 0, 2
+};
+
+static size_t ScoreTqReqFlushPriorityInherit_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqFlushPriorityInherit_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqFlushPriorityInherit_Fixture = {
+ .setup = ScoreTqReqFlushPriorityInherit_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqFlushPriorityInherit_Teardown_Wrap,
+ .scope = ScoreTqReqFlushPriorityInherit_Scope,
+ .initial_context = &ScoreTqReqFlushPriorityInherit_Instance
+};
+
+static inline ScoreTqReqFlushPriorityInherit_Entry
+ScoreTqReqFlushPriorityInherit_PopEntry(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqFlushPriorityInherit_Entries[
+ ScoreTqReqFlushPriorityInherit_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqFlushPriorityInherit_SetPreConditionStates(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->Map.entry.Pre_Priority_NA ) {
+ ctx->Map.pcs[ 0 ] = ScoreTqReqFlushPriorityInherit_Pre_Priority_NA;
+ } else {
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ }
+
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+}
+
+static void ScoreTqReqFlushPriorityInherit_TestVariant(
+ ScoreTqReqFlushPriorityInherit_Context *ctx
+)
+{
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqFlushPriorityInherit_Action( ctx );
+ ScoreTqReqFlushPriorityInherit_Post_Extract_Check(
+ ctx,
+ ctx->Map.entry.Post_Extract
+ );
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Check(
+ ctx,
+ ctx->Map.entry.Post_PriorityUpdate
+ );
+}
+
+static T_fixture_node ScoreTqReqFlushPriorityInherit_Node;
+
+void ScoreTqReqFlushPriorityInherit_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqFlushPriorityInherit_Context *ctx;
+
+ ctx = &ScoreTqReqFlushPriorityInherit_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqFlushPriorityInherit_Node,
+ &ScoreTqReqFlushPriorityInherit_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreTqReqFlushPriorityInherit_Pre_Priority_Minimum;
+ ctx->Map.pci[ 0 ] < ScoreTqReqFlushPriorityInherit_Pre_Priority_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreTqReqFlushPriorityInherit_Pre_Queue_Empty;
+ ctx->Map.pci[ 1 ] < ScoreTqReqFlushPriorityInherit_Pre_Queue_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqFlushPriorityInherit_PopEntry( ctx );
+ ScoreTqReqFlushPriorityInherit_SetPreConditionStates( ctx );
+ ScoreTqReqFlushPriorityInherit_TestVariant( ctx );
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-flush-priority-inherit.h b/testsuites/validation/tr-tq-flush-priority-inherit.h
new file mode 100644
index 0000000000..1b12f1afb0
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority-inherit.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_FLUSH_PRIORITY_INHERIT_H
+#define _TR_TQ_FLUSH_PRIORITY_INHERIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqFlushPriorityInherit
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_Minimum,
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_NotMinimum,
+ ScoreTqReqFlushPriorityInherit_Pre_Priority_NA
+} ScoreTqReqFlushPriorityInherit_Pre_Priority;
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_Empty,
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_NonEmpty,
+ ScoreTqReqFlushPriorityInherit_Pre_Queue_NA
+} ScoreTqReqFlushPriorityInherit_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Post_Extract_Nop,
+ ScoreTqReqFlushPriorityInherit_Post_Extract_All,
+ ScoreTqReqFlushPriorityInherit_Post_Extract_NA
+} ScoreTqReqFlushPriorityInherit_Post_Extract;
+
+typedef enum {
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_No,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_Yes,
+ ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate_NA
+} ScoreTqReqFlushPriorityInherit_Post_PriorityUpdate;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqFlushPriorityInherit_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_FLUSH_PRIORITY_INHERIT_H */
diff --git a/testsuites/validation/tr-tq-flush-priority.c b/testsuites/validation/tr-tq-flush-priority.c
new file mode 100644
index 0000000000..432c65d8c5
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority.c
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-tq-flush-priority.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqFlushPriority \
+ * spec:/score/tq/req/flush-priority
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Queue_NA : 1;
+ uint8_t Post_Operation : 2;
+} ScoreTqReqFlushPriority_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/flush-priority test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushPriority_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushPriority_Run() parameter.
+ */
+ bool supports_multiple_priority_queues;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 1 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqFlushPriority_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqFlushPriority_Context;
+
+static ScoreTqReqFlushPriority_Context
+ ScoreTqReqFlushPriority_Instance;
+
+static const char * const ScoreTqReqFlushPriority_PreDesc_Queue[] = {
+ "Empty",
+ "NonEmpty",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqFlushPriority_PreDesc[] = {
+ ScoreTqReqFlushPriority_PreDesc_Queue,
+ NULL
+};
+
+typedef ScoreTqReqFlushPriority_Context Context;
+
+static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Flush( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+}
+
+static void SchedulerEvent( void *arg, const T_scheduler_event *event )
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if ( event->operation == T_SCHEDULER_BLOCK ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+}
+
+static void ScoreTqReqFlushPriority_Pre_Queue_Prepare(
+ ScoreTqReqFlushPriority_Context *ctx,
+ ScoreTqReqFlushPriority_Pre_Queue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushPriority_Pre_Queue_Empty: {
+ /*
+ * While the thread queue is empty.
+ */
+ ctx->tq_ctx->how_many = 0;
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Pre_Queue_NonEmpty: {
+ /*
+ * While the thread queue has at least one enqueued thread.
+ */
+ ctx->tq_ctx->how_many = 3;
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Pre_Queue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriority_Post_Operation_Check(
+ ScoreTqReqFlushPriority_Context *ctx,
+ ScoreTqReqFlushPriority_Post_Operation state
+)
+{
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqFlushPriority_Post_Operation_Nop: {
+ /*
+ * No operation shall be performed.
+ */
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Post_Operation_TryExtract: {
+ /*
+ * The enqueued threads of the thread queue may be extracted in priority
+ * order for each priority queue associated with a scheduler. The
+ * priority queues of the thread queue shall be accessed in FIFO order.
+ */
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ break;
+ }
+
+ case ScoreTqReqFlushPriority_Post_Operation_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushPriority_Setup(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+
+ if (
+ ctx->supports_multiple_priority_queues &&
+ rtems_configuration_get_maximum_processors() > 1
+ ) {
+ TQSetScheduler(
+ ctx->tq_ctx, TQ_BLOCKER_B,
+ ctx->tq_ctx->other_scheduler_id,
+ PRIO_HIGH
+ );
+ } else {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+ }
+
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_ULTRA_HIGH );
+}
+
+static void ScoreTqReqFlushPriority_Setup_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriority_Setup( ctx );
+}
+
+static void ScoreTqReqFlushPriority_Teardown(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ TQReset( ctx->tq_ctx );
+}
+
+static void ScoreTqReqFlushPriority_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqFlushPriority_Teardown( ctx );
+}
+
+static void ScoreTqReqFlushPriority_Action(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+}
+
+static const ScoreTqReqFlushPriority_Entry
+ScoreTqReqFlushPriority_Entries[] = {
+ { 0, 0, ScoreTqReqFlushPriority_Post_Operation_Nop },
+ { 0, 0, ScoreTqReqFlushPriority_Post_Operation_TryExtract }
+};
+
+static const uint8_t
+ScoreTqReqFlushPriority_Map[] = {
+ 0, 1
+};
+
+static size_t ScoreTqReqFlushPriority_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqFlushPriority_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqFlushPriority_Fixture = {
+ .setup = ScoreTqReqFlushPriority_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqFlushPriority_Teardown_Wrap,
+ .scope = ScoreTqReqFlushPriority_Scope,
+ .initial_context = &ScoreTqReqFlushPriority_Instance
+};
+
+static inline ScoreTqReqFlushPriority_Entry ScoreTqReqFlushPriority_PopEntry(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return ScoreTqReqFlushPriority_Entries[
+ ScoreTqReqFlushPriority_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqFlushPriority_TestVariant(
+ ScoreTqReqFlushPriority_Context *ctx
+)
+{
+ ScoreTqReqFlushPriority_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqFlushPriority_Action( ctx );
+ ScoreTqReqFlushPriority_Post_Operation_Check(
+ ctx,
+ ctx->Map.entry.Post_Operation
+ );
+}
+
+static T_fixture_node ScoreTqReqFlushPriority_Node;
+
+void ScoreTqReqFlushPriority_Run(
+ TQContext *tq_ctx,
+ bool supports_multiple_priority_queues
+)
+{
+ ScoreTqReqFlushPriority_Context *ctx;
+
+ ctx = &ScoreTqReqFlushPriority_Instance;
+ ctx->tq_ctx = tq_ctx;
+ ctx->supports_multiple_priority_queues = supports_multiple_priority_queues;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqFlushPriority_Node,
+ &ScoreTqReqFlushPriority_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqFlushPriority_Pre_Queue_Empty;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqFlushPriority_Pre_Queue_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqFlushPriority_PopEntry( ctx );
+ ScoreTqReqFlushPriority_TestVariant( ctx );
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-flush-priority.h b/testsuites/validation/tr-tq-flush-priority.h
new file mode 100644
index 0000000000..72bc372ca1
--- /dev/null
+++ b/testsuites/validation/tr-tq-flush-priority.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqFlushPriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_FLUSH_PRIORITY_H
+#define _TR_TQ_FLUSH_PRIORITY_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqFlushPriority
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqFlushPriority_Pre_Queue_Empty,
+ ScoreTqReqFlushPriority_Pre_Queue_NonEmpty,
+ ScoreTqReqFlushPriority_Pre_Queue_NA
+} ScoreTqReqFlushPriority_Pre_Queue;
+
+typedef enum {
+ ScoreTqReqFlushPriority_Post_Operation_Nop,
+ ScoreTqReqFlushPriority_Post_Operation_TryExtract,
+ ScoreTqReqFlushPriority_Post_Operation_NA
+} ScoreTqReqFlushPriority_Post_Operation;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ *
+ * @param supports_multiple_priority_queues is true, if the object using the
+ * thread queue supports multiple priority queues, otherwise it is false.
+ */
+void ScoreTqReqFlushPriority_Run(
+ TQContext *tq_ctx,
+ bool supports_multiple_priority_queues
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_FLUSH_PRIORITY_H */