summaryrefslogtreecommitdiffstats
path: root/testsuites
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2022-08-31 11:26:49 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-08-31 11:29:56 +0200
commitc4472cef3e1c916ddb708ab3d4a46178b78f88a4 (patch)
tree5436465be5dc391142eaed2abeca5a9e14f6ee58 /testsuites
parentvalidation: Support a partial thread queue flush (diff)
downloadrtems-c4472cef3e1c916ddb708ab3d4a46178b78f88a4.tar.bz2
validation: Move flush filter stop validation
This gets rid of a cyclic dependency in the specification graph. Update #3716.
Diffstat (limited to 'testsuites')
-rw-r--r--testsuites/validation/tc-futex-wake.c70
-rw-r--r--testsuites/validation/tc-sem-flush.c2
-rw-r--r--testsuites/validation/tr-tq-flush-fifo.c409
-rw-r--r--testsuites/validation/tr-tq-flush-fifo.h33
4 files changed, 408 insertions, 106 deletions
diff --git a/testsuites/validation/tc-futex-wake.c b/testsuites/validation/tc-futex-wake.c
index 75364e6976..aa0a93a3ef 100644
--- a/testsuites/validation/tc-futex-wake.c
+++ b/testsuites/validation/tc-futex-wake.c
@@ -70,9 +70,8 @@
*/
typedef enum {
- NewlibReqFutexWake_Pre_Count_Negative,
- NewlibReqFutexWake_Pre_Count_Partial,
- NewlibReqFutexWake_Pre_Count_All,
+ NewlibReqFutexWake_Pre_Count_NegativeOrZero,
+ NewlibReqFutexWake_Pre_Count_Positive,
NewlibReqFutexWake_Pre_Count_NA
} NewlibReqFutexWake_Pre_Count;
@@ -83,8 +82,7 @@ typedef enum {
typedef enum {
NewlibReqFutexWake_Post_Flush_No,
- NewlibReqFutexWake_Post_Flush_Partial,
- NewlibReqFutexWake_Post_Flush_All,
+ NewlibReqFutexWake_Post_Flush_Yes,
NewlibReqFutexWake_Post_Flush_NA
} NewlibReqFutexWake_Post_Flush;
@@ -147,9 +145,8 @@ static NewlibReqFutexWake_Context
NewlibReqFutexWake_Instance;
static const char * const NewlibReqFutexWake_PreDesc_Count[] = {
- "Negative",
- "Partial",
- "All",
+ "NegativeOrZero",
+ "Positive",
"NA"
};
@@ -189,18 +186,18 @@ static uint32_t Flush( TQContext *tq_ctx, uint32_t thread_count, bool all )
{
Context *ctx;
int count;
- int how_many;
- ctx = ToContext( tq_ctx );
- how_many = (int) ctx->tq_ctx.how_many;
+ (void) thread_count;
- count = _Futex_Wake( &ctx->futex, 1 );
- T_eq_int( count, how_many > 0 ? 1 : 0 );
+ ctx = ToContext( tq_ctx );
- count = _Futex_Wake( &ctx->futex, INT_MAX );
- T_eq_int( count, how_many > 1 ? how_many - 1 : 0 );
+ if ( all ) {
+ count = _Futex_Wake( &ctx->futex, INT_MAX );
+ } else {
+ count = _Futex_Wake( &ctx->futex, 1 );
+ }
- return thread_count;
+ return (uint32_t) count;
}
static void NewlibReqFutexWake_Pre_Count_Prepare(
@@ -209,29 +206,17 @@ static void NewlibReqFutexWake_Pre_Count_Prepare(
)
{
switch ( state ) {
- case NewlibReqFutexWake_Pre_Count_Negative: {
+ case NewlibReqFutexWake_Pre_Count_NegativeOrZero: {
/*
- * While the ``count`` parameter is less than zero.
+ * While the ``count`` parameter is less or equal to than zero.
*/
/* This state is prepared by Enqueue() */
break;
}
- case NewlibReqFutexWake_Pre_Count_Partial: {
+ case NewlibReqFutexWake_Pre_Count_Positive: {
/*
- * While the ``count`` parameter is greater than or equal to zero, while
- * the ``count`` parameter is less than the count of threads enqueued on
- * the thread queue of the futex object.
- */
- /* This state is prepared by Flush() */
- break;
- }
-
- case NewlibReqFutexWake_Pre_Count_All: {
- /*
- * While the ``count`` parameter is greater than or equal to zero, while
- * the ``count`` parameter is greater than or equal to the count of
- * threads enqueued on the thread queue of the futex object.
+ * While the ``count`` parameter is greater than zero.
*/
/* This state is prepared by Flush() */
break;
@@ -277,21 +262,12 @@ static void NewlibReqFutexWake_Post_Flush_Check(
break;
}
- case NewlibReqFutexWake_Post_Flush_Partial: {
+ case NewlibReqFutexWake_Post_Flush_Yes: {
/*
* The first count threads specified by the ``count`` parameter shall be
* extracted from the thread queue of the futex object in FIFO order.
*/
- /* This state is checked by Flush() */
- break;
- }
-
- case NewlibReqFutexWake_Post_Flush_All: {
- /*
- * All threads shall be extracted from the thread queue of the futex
- * object in FIFO order.
- */
- ScoreTqReqFlushFifo_Run( &ctx->tq_ctx );
+ ScoreTqReqFlushFifo_Run( &ctx->tq_ctx, true );
break;
}
@@ -357,14 +333,12 @@ NewlibReqFutexWake_Entries[] = {
{ 0, 0, NewlibReqFutexWake_Post_Result_Count,
NewlibReqFutexWake_Post_Flush_No },
{ 0, 0, NewlibReqFutexWake_Post_Result_Count,
- NewlibReqFutexWake_Post_Flush_Partial },
- { 0, 0, NewlibReqFutexWake_Post_Result_Count,
- NewlibReqFutexWake_Post_Flush_All }
+ NewlibReqFutexWake_Post_Flush_Yes }
};
static const uint8_t
NewlibReqFutexWake_Map[] = {
- 0, 1, 2
+ 0, 1
};
static size_t NewlibReqFutexWake_Scope( void *arg, char *buf, size_t n )
@@ -421,7 +395,7 @@ T_TEST_CASE_FIXTURE( NewlibReqFutexWake, &NewlibReqFutexWake_Fixture )
ctx->Map.index = 0;
for (
- ctx->Map.pcs[ 0 ] = NewlibReqFutexWake_Pre_Count_Negative;
+ ctx->Map.pcs[ 0 ] = NewlibReqFutexWake_Pre_Count_NegativeOrZero;
ctx->Map.pcs[ 0 ] < NewlibReqFutexWake_Pre_Count_NA;
++ctx->Map.pcs[ 0 ]
) {
diff --git a/testsuites/validation/tc-sem-flush.c b/testsuites/validation/tc-sem-flush.c
index 59b836dba8..9a5d00b570 100644
--- a/testsuites/validation/tc-sem-flush.c
+++ b/testsuites/validation/tc-sem-flush.c
@@ -407,7 +407,7 @@ static void RtemsSemReqFlush_Post_Action_Check(
* The calling task shall flush the semaphore as specified by
* /score/tq/req/flush-fifo.
*/
- ScoreTqReqFlushFifo_Run( &ctx->tq_ctx );
+ ScoreTqReqFlushFifo_Run( &ctx->tq_ctx, false );
break;
}
diff --git a/testsuites/validation/tr-tq-flush-fifo.c b/testsuites/validation/tr-tq-flush-fifo.c
index a50e41a66e..2a52065fe8 100644
--- a/testsuites/validation/tr-tq-flush-fifo.c
+++ b/testsuites/validation/tr-tq-flush-fifo.c
@@ -67,7 +67,10 @@
typedef struct {
uint8_t Skip : 1;
- uint8_t Pre_Queue_NA : 1;
+ uint8_t Pre_MayStop_NA : 1;
+ uint8_t Pre_QueueEmpty_NA : 1;
+ uint8_t Pre_Stop_NA : 1;
+ uint8_t Pre_WaitState_NA : 1;
uint8_t Post_Operation : 2;
} ScoreTqReqFlushFifo_Entry;
@@ -76,9 +79,20 @@ typedef struct {
*/
typedef struct {
/**
+ * @brief If this member is true, then the flush filter shall return NULL.
+ */
+ bool stop;
+
+ /**
+ * @brief If this member is true, then the least recently enqueued thread
+ * shall be in the intend to block wait state.
+ */
+ bool intend_to_block;
+
+ /**
* @brief This member contains the call within ISR request.
*/
- CallWithinISRRequest request;;
+ CallWithinISRRequest request;
/**
* @brief This member contains a copy of the corresponding
@@ -86,11 +100,23 @@ typedef struct {
*/
TQContext *tq_ctx;
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqFlushFifo_Run() parameter.
+ */
+ bool may_stop;
+
struct {
/**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 4 ];
+
+ /**
* @brief This member defines the pre-condition states for the next action.
*/
- size_t pcs[ 1 ];
+ size_t pcs[ 4 ];
/**
* @brief If this member is true, then the test action loop is executed.
@@ -118,14 +144,35 @@ typedef struct {
static ScoreTqReqFlushFifo_Context
ScoreTqReqFlushFifo_Instance;
-static const char * const ScoreTqReqFlushFifo_PreDesc_Queue[] = {
- "Empty",
- "NonEmpty",
+static const char * const ScoreTqReqFlushFifo_PreDesc_MayStop[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqFlushFifo_PreDesc_QueueEmpty[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqFlushFifo_PreDesc_Stop[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqFlushFifo_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
"NA"
};
static const char * const * const ScoreTqReqFlushFifo_PreDesc[] = {
- ScoreTqReqFlushFifo_PreDesc_Queue,
+ ScoreTqReqFlushFifo_PreDesc_MayStop,
+ ScoreTqReqFlushFifo_PreDesc_QueueEmpty,
+ ScoreTqReqFlushFifo_PreDesc_Stop,
+ ScoreTqReqFlushFifo_PreDesc_WaitState,
NULL
};
@@ -141,13 +188,24 @@ static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
return ctx->tq_ctx->worker_tcb[ worker ];
}
-static void Flush( void *arg )
+static void BlockerAFlush( Context *ctx )
+{
+ TQSchedulerRecordStart( ctx->tq_ctx );
+
+ if ( ctx->stop ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_PARTIAL );
+ } else {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
+ }
+}
+
+static void InterruptFlush( void *arg )
{
Context *ctx;
ctx = arg;
TQSchedulerRecordStart( ctx->tq_ctx );
- TQFlush( ctx->tq_ctx, true );
+ TQFlush( ctx->tq_ctx, !ctx->stop );
}
static void SchedulerEvent(
@@ -164,20 +222,109 @@ static void SchedulerEvent(
when == T_SCHEDULER_BEFORE &&
event->operation == T_SCHEDULER_BLOCK
) {
- ctx->request.handler = Flush;
- ctx->request.arg = ctx;
- CallWithinISRSubmit( &ctx->request );
T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = InterruptFlush;
+ CallWithinISRSubmit( &ctx->request );
}
}
-static void ScoreTqReqFlushFifo_Pre_Queue_Prepare(
- ScoreTqReqFlushFifo_Context *ctx,
- ScoreTqReqFlushFifo_Pre_Queue state
+static uint32_t CheckExtractions( Context *ctx )
+{
+ uint32_t extracted_threads;
+ size_t i;
+ const T_scheduler_event *event;
+
+ extracted_threads = 0;
+ i = 0;
+
+ if ( !ctx->intend_to_block ) {
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetUnblock( ctx, &i );
+
+ if ( event != &T_scheduler_event_null ) {
+ if ( ctx->intend_to_block ) {
+ T_eq_ptr( event->executing, NULL );
+ } else {
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+ ++extracted_threads;
+ }
+
+ event = GetUnblock( ctx, &i );
+
+ if ( event != &T_scheduler_event_null ) {
+ if ( ctx->intend_to_block ) {
+ T_eq_ptr( event->executing, NULL );
+ } else {
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+ ++extracted_threads;
+ }
+
+ event = GetUnblock( ctx, &i );
+
+ if ( event != &T_scheduler_event_null ) {
+ if ( ctx->intend_to_block ) {
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_D ) );
+ } else {
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+ ++extracted_threads;
+ }
+
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ T_eq_u32( extracted_threads, ctx->tq_ctx->flush_count );
+
+ return extracted_threads;
+}
+
+static void ScoreTqReqFlushFifo_Pre_MayStop_Prepare(
+ ScoreTqReqFlushFifo_Context *ctx,
+ ScoreTqReqFlushFifo_Pre_MayStop state
)
{
switch ( state ) {
- case ScoreTqReqFlushFifo_Pre_Queue_Empty: {
+ case ScoreTqReqFlushFifo_Pre_MayStop_Yes: {
+ /*
+ * Where the flush filter may return NULL.
+ */
+ if ( !ctx->may_stop ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_MayStop_No: {
+ /*
+ * Where the flush filter does not return NULL.
+ */
+ if ( ctx->may_stop ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_MayStop_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushFifo_Pre_QueueEmpty_Prepare(
+ ScoreTqReqFlushFifo_Context *ctx,
+ ScoreTqReqFlushFifo_Pre_QueueEmpty state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushFifo_Pre_QueueEmpty_Yes: {
/*
* While the thread queue is empty.
*/
@@ -185,7 +332,7 @@ static void ScoreTqReqFlushFifo_Pre_Queue_Prepare(
break;
}
- case ScoreTqReqFlushFifo_Pre_Queue_NonEmpty: {
+ case ScoreTqReqFlushFifo_Pre_QueueEmpty_No: {
/*
* While the thread queue has at least one enqueued thread.
*/
@@ -193,7 +340,63 @@ static void ScoreTqReqFlushFifo_Pre_Queue_Prepare(
break;
}
- case ScoreTqReqFlushFifo_Pre_Queue_NA:
+ case ScoreTqReqFlushFifo_Pre_QueueEmpty_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushFifo_Pre_Stop_Prepare(
+ ScoreTqReqFlushFifo_Context *ctx,
+ ScoreTqReqFlushFifo_Pre_Stop state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushFifo_Pre_Stop_Yes: {
+ /*
+ * While the flush filter returns NULL for an enqueued thread.
+ */
+ ctx->stop = true;
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_Stop_No: {
+ /*
+ * While the flush filter does not return NULL for an enqueued thread.
+ */
+ ctx->stop = false;
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_Stop_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqFlushFifo_Pre_WaitState_Prepare(
+ ScoreTqReqFlushFifo_Context *ctx,
+ ScoreTqReqFlushFifo_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqFlushFifo_Pre_WaitState_Blocked: {
+ /*
+ * While the least recently enqueued thread on the thread queue is in the
+ * blocked wait state.
+ */
+ ctx->intend_to_block = false;
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the least recently enqueued thread on the thread queue is in the
+ * intend to block wait state.
+ */
+ ctx->intend_to_block = true;
+ break;
+ }
+
+ case ScoreTqReqFlushFifo_Pre_WaitState_NA:
break;
}
}
@@ -203,40 +406,39 @@ static void ScoreTqReqFlushFifo_Post_Operation_Check(
ScoreTqReqFlushFifo_Post_Operation state
)
{
- size_t i;
- const T_scheduler_event *event;
-
- i = 0;
+ size_t i;
+ uint32_t extracted_threads;
switch ( state ) {
case ScoreTqReqFlushFifo_Post_Operation_Nop: {
/*
- * No operation shall be performed.
+ * No thread queue extraction operation shall be performed.
*/
/* Event receive */
+ i = 0;
T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
break;
}
- case ScoreTqReqFlushFifo_Post_Operation_TryExtract: {
+ case ScoreTqReqFlushFifo_Post_Operation_ExtractAll: {
/*
- * The enqueued threads of the thread queue may be extracted in FIFO
+ * The enqueued threads shall be extracted from the thread queue in FIFO
* order.
*/
- event = GetUnblock( ctx, &i );
- T_eq_ptr( event->executing, NULL );
- T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
-
- event = GetUnblock( ctx, &i );
- T_eq_ptr( event->executing, NULL );
- T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
-
- event = GetUnblock( ctx, &i );
- T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_D ) );
- T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+ extracted_threads = CheckExtractions( ctx );
+ T_eq_sz( extracted_threads, ctx->tq_ctx->how_many );
+ break;
+ }
- T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ case ScoreTqReqFlushFifo_Post_Operation_ExtractPartial: {
+ /*
+ * The enqueued threads which precede in FIFO order the enqueued thread
+ * for which the flush filter returned NULL shall be extracted from the
+ * thread queue in FIFO order.
+ */
+ extracted_threads = CheckExtractions( ctx );
+ T_lt_sz( extracted_threads, ctx->tq_ctx->how_many );
break;
}
@@ -247,6 +449,7 @@ static void ScoreTqReqFlushFifo_Post_Operation_Check(
static void ScoreTqReqFlushFifo_Setup( ScoreTqReqFlushFifo_Context *ctx )
{
+ ctx->request.arg = ctx;
TQReset( ctx->tq_ctx );
TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
@@ -279,31 +482,45 @@ static void ScoreTqReqFlushFifo_Teardown_Wrap( void *arg )
static void ScoreTqReqFlushFifo_Action( ScoreTqReqFlushFifo_Context *ctx )
{
+ uint32_t flush_count;
+
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
if ( ctx->tq_ctx->how_many > 0 ) {
TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
- T_scheduler_set_event_handler( SchedulerEvent, ctx );
+
+ if ( ctx->intend_to_block ) {
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ }
+
TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+
+ if ( !ctx->intend_to_block ) {
+ BlockerAFlush( ctx );
+ }
} else {
- TQSchedulerRecordStart( ctx->tq_ctx );
- TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
+ BlockerAFlush( ctx );
}
+ flush_count = ctx->tq_ctx->flush_count;
TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+ ctx->tq_ctx->flush_count = flush_count;
}
static const ScoreTqReqFlushFifo_Entry
ScoreTqReqFlushFifo_Entries[] = {
- { 0, 0, ScoreTqReqFlushFifo_Post_Operation_Nop },
- { 0, 0, ScoreTqReqFlushFifo_Post_Operation_TryExtract }
+ { 0, 0, 0, 1, 1, ScoreTqReqFlushFifo_Post_Operation_Nop },
+ { 0, 0, 0, 0, 0, ScoreTqReqFlushFifo_Post_Operation_ExtractAll },
+ { 0, 0, 0, 0, 0, ScoreTqReqFlushFifo_Post_Operation_ExtractPartial },
+ { 1, 0, 0, 0, 0, ScoreTqReqFlushFifo_Post_Operation_NA }
};
static const uint8_t
ScoreTqReqFlushFifo_Map[] = {
- 0, 1
+ 0, 0, 0, 0, 2, 2, 1, 1, 0, 0, 0, 0, 3, 3, 1, 1
};
static size_t ScoreTqReqFlushFifo_Scope( void *arg, char *buf, size_t n )
@@ -327,22 +544,86 @@ static T_fixture ScoreTqReqFlushFifo_Fixture = {
.initial_context = &ScoreTqReqFlushFifo_Instance
};
+static const uint8_t ScoreTqReqFlushFifo_Weights[] = {
+ 8, 4, 2, 1
+};
+
+static void ScoreTqReqFlushFifo_Skip(
+ ScoreTqReqFlushFifo_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pci[ 1 ] = ScoreTqReqFlushFifo_Pre_QueueEmpty_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pci[ 2 ] = ScoreTqReqFlushFifo_Pre_Stop_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pci[ 3 ] = ScoreTqReqFlushFifo_Pre_WaitState_NA - 1;
+ break;
+ }
+}
+
static inline ScoreTqReqFlushFifo_Entry ScoreTqReqFlushFifo_PopEntry(
ScoreTqReqFlushFifo_Context *ctx
)
{
size_t index;
- index = ctx->Map.index;
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 4; ++i ) {
+ index += ScoreTqReqFlushFifo_Weights[ i ] * ctx->Map.pci[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
ctx->Map.index = index + 1;
+
return ScoreTqReqFlushFifo_Entries[
ScoreTqReqFlushFifo_Map[ index ]
];
}
+static void ScoreTqReqFlushFifo_SetPreConditionStates(
+ ScoreTqReqFlushFifo_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+
+ if ( ctx->Map.entry.Pre_Stop_NA ) {
+ ctx->Map.pcs[ 2 ] = ScoreTqReqFlushFifo_Pre_Stop_NA;
+ } else {
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ }
+
+ if ( ctx->Map.entry.Pre_WaitState_NA ) {
+ ctx->Map.pcs[ 3 ] = ScoreTqReqFlushFifo_Pre_WaitState_NA;
+ } else {
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ }
+}
+
static void ScoreTqReqFlushFifo_TestVariant( ScoreTqReqFlushFifo_Context *ctx )
{
- ScoreTqReqFlushFifo_Pre_Queue_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ ScoreTqReqFlushFifo_Pre_MayStop_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqFlushFifo_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqFlushFifo_Pre_QueueEmpty_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ ScoreTqReqFlushFifo_Pre_Stop_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreTqReqFlushFifo_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 3 ] );
ScoreTqReqFlushFifo_Action( ctx );
ScoreTqReqFlushFifo_Post_Operation_Check(
ctx,
@@ -352,12 +633,13 @@ static void ScoreTqReqFlushFifo_TestVariant( ScoreTqReqFlushFifo_Context *ctx )
static T_fixture_node ScoreTqReqFlushFifo_Node;
-void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx )
+void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx, bool may_stop )
{
ScoreTqReqFlushFifo_Context *ctx;
ctx = &ScoreTqReqFlushFifo_Instance;
ctx->tq_ctx = tq_ctx;
+ ctx->may_stop = may_stop;
ctx = T_push_fixture(
&ScoreTqReqFlushFifo_Node,
@@ -365,14 +647,39 @@ void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx )
);
ctx->Map.in_action_loop = true;
ctx->Map.index = 0;
+ ctx->Map.skip = false;
for (
- ctx->Map.pcs[ 0 ] = ScoreTqReqFlushFifo_Pre_Queue_Empty;
- ctx->Map.pcs[ 0 ] < ScoreTqReqFlushFifo_Pre_Queue_NA;
- ++ctx->Map.pcs[ 0 ]
+ ctx->Map.pci[ 0 ] = ScoreTqReqFlushFifo_Pre_MayStop_Yes;
+ ctx->Map.pci[ 0 ] < ScoreTqReqFlushFifo_Pre_MayStop_NA;
+ ++ctx->Map.pci[ 0 ]
) {
- ctx->Map.entry = ScoreTqReqFlushFifo_PopEntry( ctx );
- ScoreTqReqFlushFifo_TestVariant( ctx );
+ for (
+ ctx->Map.pci[ 1 ] = ScoreTqReqFlushFifo_Pre_QueueEmpty_Yes;
+ ctx->Map.pci[ 1 ] < ScoreTqReqFlushFifo_Pre_QueueEmpty_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = ScoreTqReqFlushFifo_Pre_Stop_Yes;
+ ctx->Map.pci[ 2 ] < ScoreTqReqFlushFifo_Pre_Stop_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = ScoreTqReqFlushFifo_Pre_WaitState_Blocked;
+ ctx->Map.pci[ 3 ] < ScoreTqReqFlushFifo_Pre_WaitState_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqFlushFifo_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqFlushFifo_SetPreConditionStates( ctx );
+ ScoreTqReqFlushFifo_TestVariant( ctx );
+ }
+ }
+ }
}
T_pop_fixture();
diff --git a/testsuites/validation/tr-tq-flush-fifo.h b/testsuites/validation/tr-tq-flush-fifo.h
index dca1444891..8e05972816 100644
--- a/testsuites/validation/tr-tq-flush-fifo.h
+++ b/testsuites/validation/tr-tq-flush-fifo.h
@@ -64,14 +64,33 @@ extern "C" {
*/
typedef enum {
- ScoreTqReqFlushFifo_Pre_Queue_Empty,
- ScoreTqReqFlushFifo_Pre_Queue_NonEmpty,
- ScoreTqReqFlushFifo_Pre_Queue_NA
-} ScoreTqReqFlushFifo_Pre_Queue;
+ ScoreTqReqFlushFifo_Pre_MayStop_Yes,
+ ScoreTqReqFlushFifo_Pre_MayStop_No,
+ ScoreTqReqFlushFifo_Pre_MayStop_NA
+} ScoreTqReqFlushFifo_Pre_MayStop;
+
+typedef enum {
+ ScoreTqReqFlushFifo_Pre_QueueEmpty_Yes,
+ ScoreTqReqFlushFifo_Pre_QueueEmpty_No,
+ ScoreTqReqFlushFifo_Pre_QueueEmpty_NA
+} ScoreTqReqFlushFifo_Pre_QueueEmpty;
+
+typedef enum {
+ ScoreTqReqFlushFifo_Pre_Stop_Yes,
+ ScoreTqReqFlushFifo_Pre_Stop_No,
+ ScoreTqReqFlushFifo_Pre_Stop_NA
+} ScoreTqReqFlushFifo_Pre_Stop;
+
+typedef enum {
+ ScoreTqReqFlushFifo_Pre_WaitState_Blocked,
+ ScoreTqReqFlushFifo_Pre_WaitState_IntendToBlock,
+ ScoreTqReqFlushFifo_Pre_WaitState_NA
+} ScoreTqReqFlushFifo_Pre_WaitState;
typedef enum {
ScoreTqReqFlushFifo_Post_Operation_Nop,
- ScoreTqReqFlushFifo_Post_Operation_TryExtract,
+ ScoreTqReqFlushFifo_Post_Operation_ExtractAll,
+ ScoreTqReqFlushFifo_Post_Operation_ExtractPartial,
ScoreTqReqFlushFifo_Post_Operation_NA
} ScoreTqReqFlushFifo_Post_Operation;
@@ -79,8 +98,10 @@ typedef enum {
* @brief Runs the parameterized test case.
*
* @param[in,out] tq_ctx is the thread queue test context.
+ *
+ * @param may_stop is true, if a partial flush is supported.
*/
-void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx );
+void ScoreTqReqFlushFifo_Run( TQContext *tq_ctx, bool may_stop );
/** @} */