summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-21 08:29:09 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-10-28 18:59:27 +0200
commite432887c3dd6e64c34516e6430b1419c89e39187 (patch)
tree0cb506c50ec501c85a0152bdb6b83ccce040b805
parent1349720073d327b7ac9b31b1d4b5cd5b2f59ed69 (diff)
validation: Test rtems_semaphore_release()
-rw-r--r--spec/build/testsuites/validation/validation-0.yml4
-rw-r--r--testsuites/validation/tc-sem-release.c1581
-rw-r--r--testsuites/validation/tr-mtx-surrender.c1215
-rw-r--r--testsuites/validation/tr-mtx-surrender.h159
-rw-r--r--testsuites/validation/tr-sem-surrender.c566
-rw-r--r--testsuites/validation/tr-sem-surrender.h118
-rw-r--r--testsuites/validation/tr-tq-surrender-priority-inherit.c1798
-rw-r--r--testsuites/validation/tr-tq-surrender-priority-inherit.h156
-rw-r--r--testsuites/validation/tr-tq-surrender.c685
-rw-r--r--testsuites/validation/tr-tq-surrender.h109
10 files changed, 4975 insertions, 1416 deletions
diff --git a/spec/build/testsuites/validation/validation-0.yml b/spec/build/testsuites/validation/validation-0.yml
index 81200e640b..c09934af3e 100644
--- a/spec/build/testsuites/validation/validation-0.yml
+++ b/spec/build/testsuites/validation/validation-0.yml
@@ -111,10 +111,12 @@ source:
- testsuites/validation/tr-event-send-receive.c
- testsuites/validation/tr-mtx-seize-try.c
- testsuites/validation/tr-mtx-seize-wait.c
+- testsuites/validation/tr-mtx-surrender.c
- testsuites/validation/tr-object-ident.c
- testsuites/validation/tr-object-ident-local.c
- testsuites/validation/tr-sem-seize-try.c
- testsuites/validation/tr-sem-seize-wait.c
+- testsuites/validation/tr-sem-surrender.c
- testsuites/validation/tr-tq-enqueue-ceiling.c
- testsuites/validation/tr-tq-enqueue-deadlock.c
- testsuites/validation/tr-tq-enqueue-fifo.c
@@ -123,6 +125,8 @@ source:
- testsuites/validation/tr-tq-flush-fifo.c
- testsuites/validation/tr-tq-flush-priority.c
- testsuites/validation/tr-tq-flush-priority-inherit.c
+- testsuites/validation/tr-tq-surrender.c
+- testsuites/validation/tr-tq-surrender-priority-inherit.c
- testsuites/validation/ts-validation-0.c
stlib: []
target: testsuites/validation/ts-validation-0.exe
diff --git a/testsuites/validation/tc-sem-release.c b/testsuites/validation/tc-sem-release.c
index d5c50ae74e..33cecd4777 100644
--- a/testsuites/validation/tc-sem-release.c
+++ b/testsuites/validation/tc-sem-release.c
@@ -54,10 +54,11 @@
#include <rtems.h>
#include <string.h>
-#include <rtems/rtems/semimpl.h>
-#include "ts-config.h"
+#include "tr-mtx-surrender.h"
+#include "tr-sem-surrender.h"
#include "tx-support.h"
+#include "tx-thread-queue.h"
#include <rtems/test.h>
@@ -86,242 +87,53 @@ typedef enum {
} RtemsSemReqRelease_Pre_Discipline;
typedef enum {
- RtemsSemReqRelease_Pre_Count_LessMax,
- RtemsSemReqRelease_Pre_Count_Max,
- RtemsSemReqRelease_Pre_Count_Blocked,
- RtemsSemReqRelease_Pre_Count_NA
-} RtemsSemReqRelease_Pre_Count;
-
-typedef enum {
- RtemsSemReqRelease_Pre_Owner_No,
- RtemsSemReqRelease_Pre_Owner_Self,
- RtemsSemReqRelease_Pre_Owner_Other,
- RtemsSemReqRelease_Pre_Owner_Blocked,
- RtemsSemReqRelease_Pre_Owner_Nested,
- RtemsSemReqRelease_Pre_Owner_BlockedNested,
- RtemsSemReqRelease_Pre_Owner_BlockedOther,
- RtemsSemReqRelease_Pre_Owner_BlockedNestedOther,
- RtemsSemReqRelease_Pre_Owner_NA
-} RtemsSemReqRelease_Pre_Owner;
-
-typedef enum {
RtemsSemReqRelease_Pre_Id_Valid,
RtemsSemReqRelease_Pre_Id_Invalid,
RtemsSemReqRelease_Pre_Id_NA
} RtemsSemReqRelease_Pre_Id;
typedef enum {
- RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Status_NotOwner,
- RtemsSemReqRelease_Post_Status_Unsat,
- RtemsSemReqRelease_Post_Status_NA
-} RtemsSemReqRelease_Post_Status;
-
-typedef enum {
- RtemsSemReqRelease_Post_Count_Zero,
- RtemsSemReqRelease_Post_Count_One,
- RtemsSemReqRelease_Post_Count_PlusOne,
- RtemsSemReqRelease_Post_Count_Max,
- RtemsSemReqRelease_Post_Count_Nop,
- RtemsSemReqRelease_Post_Count_NA
-} RtemsSemReqRelease_Post_Count;
-
-typedef enum {
- RtemsSemReqRelease_Post_Owner_No,
- RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Owner_Other,
- RtemsSemReqRelease_Post_Owner_FIFO,
- RtemsSemReqRelease_Post_Owner_Priority,
- RtemsSemReqRelease_Post_Owner_MrsP,
- RtemsSemReqRelease_Post_Owner_NA
-} RtemsSemReqRelease_Post_Owner;
-
-typedef enum {
- RtemsSemReqRelease_Post_Next_FIFO,
- RtemsSemReqRelease_Post_Next_Priority,
- RtemsSemReqRelease_Post_Next_MrsP,
- RtemsSemReqRelease_Post_Next_NA
-} RtemsSemReqRelease_Post_Next;
-
-typedef enum {
- RtemsSemReqRelease_Post_CallerPrio_Inherit,
- RtemsSemReqRelease_Post_CallerPrio_Ceiling,
- RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerPrio_NA
-} RtemsSemReqRelease_Post_CallerPrio;
-
-typedef enum {
- RtemsSemReqRelease_Post_CallerCPU_Home,
- RtemsSemReqRelease_Post_CallerCPU_Other,
- RtemsSemReqRelease_Post_CallerCPU_NA
-} RtemsSemReqRelease_Post_CallerCPU;
+ RtemsSemReqRelease_Post_Action_InvId,
+ RtemsSemReqRelease_Post_Action_BinarySurrender,
+ RtemsSemReqRelease_Post_Action_CountingSurrender,
+ RtemsSemReqRelease_Post_Action_MtxSurrender,
+ RtemsSemReqRelease_Post_Action_InheritMtxSurrender,
+ RtemsSemReqRelease_Post_Action_CeilingMtxSurrender,
+ RtemsSemReqRelease_Post_Action_MrsPMtxSurrender,
+ RtemsSemReqRelease_Post_Action_NA
+} RtemsSemReqRelease_Post_Action;
typedef struct {
- uint32_t Skip : 1;
- uint32_t Pre_Class_NA : 1;
- uint32_t Pre_Discipline_NA : 1;
- uint32_t Pre_Count_NA : 1;
- uint32_t Pre_Owner_NA : 1;
- uint32_t Pre_Id_NA : 1;
- uint32_t Post_Status : 3;
- uint32_t Post_Count : 3;
- uint32_t Post_Owner : 3;
- uint32_t Post_Next : 2;
- uint32_t Post_CallerPrio : 2;
- uint32_t Post_CallerCPU : 2;
+ uint8_t Skip : 1;
+ uint8_t Pre_Class_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Pre_Id_NA : 1;
+ uint8_t Post_Action : 3;
} RtemsSemReqRelease_Entry;
-typedef enum {
- BLOCKER_A,
- BLOCKER_B,
- BLOCKER_C,
- HELPER_HOME,
- HELPER_OTHER,
- WORKER_COUNT
-} WorkerKind;
-
/**
* @brief Test context for spec:/rtems/sem/req/release test case.
*/
typedef struct {
/**
- * @brief This member contains the identifier of the runner home scheduler.
- */
- rtems_id runner_scheduler_id;
-
- /**
- * @brief This member contains the identifier of another scheduler.
- */
- rtems_id other_scheduler_id;
-
- /**
- * @brief This member contains the identifier of a third scheduler.
- */
- rtems_id third_scheduler_id;
-
- /**
- * @brief This member contains the identifier of the scheduler owning the
- * processor of the calling task after the rtems_semaphore_release() call.
- */
- rtems_id after_release_scheduler_id;
-
- /**
- * @brief This member contains the current priority of the calling task after
- * the rtems_semaphore_release() call.
- */
- rtems_id after_release_priority;
-
- /**
- * @brief This member contains the runner task identifier.
- */
- rtems_id runner_id;
-
- /**
- * @brief This member contains the worker task identifiers.
- */
- rtems_id worker_id[ WORKER_COUNT ];
-
- /**
- * @brief If this member is true, then the worker shall busy wait.
- */
- volatile bool busy_wait;
-
- /**
- * @brief This member contains the worker activity counter.
- */
- uint32_t counter;
-
- /**
- * @brief This member contains the semaphore obtain counter of a specific
- * worker.
- */
- uint32_t obtain_counter[ WORKER_COUNT ];
-
- /**
- * @brief This member contains the count of the semaphore after the
- * rtems_semaphore_release() call.
- */
- uint32_t sem_count;
-
- /**
- * @brief This member contains identifier of the owner of the semaphore after
- * the rtems_semaphore_release() call or zero if it had no owner.
+ * @brief This member contains the thread queue test context.
*/
- rtems_id owner;
+ union {
+ TQContext tq_ctx;
+ TQMtxContext tq_mtx_ctx;
+ TQSemContext tq_sem_ctx;
+ };
/**
* @brief This member specifies if the attribute set of the semaphore.
*/
rtems_attribute attribute_set;
- /**
- * @brief This member specifies if the initial count of the semaphore.
- */
- uint32_t count;
-
- /**
- * @brief This member contains the semaphore identifier.
- */
- rtems_id semaphore_id;
-
- /**
- * @brief If this member is true, then the ``id`` parameter shall be valid.
- */
- bool valid_id;
-
- /**
- * @brief If this member is true, then other tasks shall be blocked on the
- * semaphore.
- */
- bool blocked;
-
- /**
- * @brief If this member is true, then a task other than the runner task
- * shall be the owner of the semaphore.
- */
- bool owner_other;
-
- /**
- * @brief If this member is true, then the runner task shall obtain the
- * semaphore recursively.
- */
- bool nested;
-
- /**
- * @brief If this member is true, then the runner task shall migrate to
- * another scheduler due to the locking protocol used by the semaphore.
- */
- bool other_scheduler;
-
- /**
- * @brief If this member is true, then the properties of the semaphore shall
- * be obtained.
- */
- bool need_properties;
-
- /**
- * @brief This member specifies the ``id`` parameter for the
- * rtems_semaphore_release() call.
- */
- rtems_id id;
-
- /**
- * @brief This member contains the rtems_semaphore_release() return status.
- */
- rtems_status_code status;
-
struct {
/**
- * @brief This member defines the pre-condition indices for the next
- * action.
- */
- size_t pci[ 5 ];
-
- /**
* @brief This member defines the pre-condition states for the next action.
*/
- size_t pcs[ 5 ];
+ size_t pcs[ 3 ];
/**
* @brief If this member is true, then the test action loop is executed.
@@ -365,25 +177,6 @@ static const char * const RtemsSemReqRelease_PreDesc_Discipline[] = {
"NA"
};
-static const char * const RtemsSemReqRelease_PreDesc_Count[] = {
- "LessMax",
- "Max",
- "Blocked",
- "NA"
-};
-
-static const char * const RtemsSemReqRelease_PreDesc_Owner[] = {
- "No",
- "Self",
- "Other",
- "Blocked",
- "Nested",
- "BlockedNested",
- "BlockedOther",
- "BlockedNestedOther",
- "NA"
-};
-
static const char * const RtemsSemReqRelease_PreDesc_Id[] = {
"Valid",
"Invalid",
@@ -393,8 +186,6 @@ static const char * const RtemsSemReqRelease_PreDesc_Id[] = {
static const char * const * const RtemsSemReqRelease_PreDesc[] = {
RtemsSemReqRelease_PreDesc_Class,
RtemsSemReqRelease_PreDesc_Discipline,
- RtemsSemReqRelease_PreDesc_Count,
- RtemsSemReqRelease_PreDesc_Owner,
RtemsSemReqRelease_PreDesc_Id,
NULL
};
@@ -403,459 +194,6 @@ static const char * const * const RtemsSemReqRelease_PreDesc[] = {
typedef RtemsSemReqRelease_Context Context;
-typedef enum {
- EVENT_HELPER_SYNC = RTEMS_EVENT_0,
- EVENT_OBTAIN = RTEMS_EVENT_1,
- EVENT_GET_PROPERTIES = RTEMS_EVENT_2,
- EVENT_OBTAIN_SYNC = RTEMS_EVENT_3,
- EVENT_RELEASE = RTEMS_EVENT_4,
- EVENT_RUNNER_SYNC = RTEMS_EVENT_5,
- EVENT_BUSY_WAIT = RTEMS_EVENT_6
-} Event;
-
-static void SynchronizeRunner( void )
-{
- rtems_event_set events;
-
- events = ReceiveAnyEvents();
- T_eq_u32( events, EVENT_RUNNER_SYNC );
-}
-
-static void Send(
- const Context *ctx,
- WorkerKind worker,
- rtems_event_set events
-)
-{
- SendEvents( ctx->worker_id[ worker ], events );
-}
-
-static void MoveBackHome( Context *ctx )
-{
-#if defined(RTEMS_SMP)
- rtems_task_priority priority;
-
- /* Move us back to a processor of our home scheduler */
- ctx->busy_wait = true;
- Send( ctx, HELPER_OTHER, EVENT_BUSY_WAIT );
- priority = SetPriority( ctx->worker_id[ HELPER_OTHER ], PRIO_VERY_ULTRA_HIGH );
- SetPriority( ctx->worker_id[ HELPER_OTHER ], priority );
- ctx->busy_wait = false;
-#else
- (void) ctx;
-#endif
-}
-
-static bool CanUseThirdScheduler( void )
-{
- return rtems_scheduler_get_processor_maximum() >= 4;
-}
-
-static bool IsFIFO( const Context *ctx )
-{
- return ( ctx->attribute_set & RTEMS_PRIORITY ) == 0;
-}
-
-static bool IsMrsP( const Context *ctx )
-{
- return ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0;
-}
-
-static bool IsPrioCeiling( const Context *ctx )
-{
- return ( ctx->attribute_set & RTEMS_PRIORITY_CEILING ) != 0;
-}
-
-#if defined(RTEMS_SMP)
-static void SetWorkerScheduler(
- const Context *ctx,
- WorkerKind worker,
- rtems_id scheduler_id,
- Priority priority
-)
-{
- rtems_status_code sc;
-
- sc = rtems_task_set_scheduler(
- ctx->worker_id[ worker ],
- scheduler_id,
- priority
- );
- T_rsc_success( sc );
-}
-
-static void SendAndWaitForIntendToBlock(
- const Context *ctx,
- WorkerKind worker,
- rtems_event_set events
-)
-{
- Thread_Control *the_thread;
- Thread_Wait_flags intend_to_block;
-
- Send( ctx, worker, events );
- the_thread = GetThread( ctx->worker_id[ worker ] );
- T_assert_not_null( the_thread );
- intend_to_block = THREAD_WAIT_CLASS_OBJECT |
- THREAD_WAIT_STATE_INTEND_TO_BLOCK;
-
- while ( _Thread_Wait_flags_get_acquire( the_thread ) != intend_to_block ) {
- /* Wait */
- }
-}
-
-static void BlockMrsP( Context *ctx )
-{
- if ( CanUseThirdScheduler() ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_A,
- ctx->third_scheduler_id,
- PRIO_HIGH
- );
- SetWorkerScheduler(
- ctx,
- BLOCKER_C,
- ctx->third_scheduler_id,
- PRIO_ULTRA_HIGH
- );
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_A,
- EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_RELEASE
- );
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_B,
- EVENT_OBTAIN | EVENT_RELEASE
- );
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_C,
- EVENT_OBTAIN | EVENT_OBTAIN_SYNC | EVENT_RELEASE
- );
- } else {
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_B,
- EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_OBTAIN_SYNC | EVENT_RELEASE
- );
- }
-}
-#endif
-
-static void Obtain( const Context *ctx )
-{
- rtems_status_code sc;
-
- sc = rtems_semaphore_obtain(
- ctx->semaphore_id,
- RTEMS_WAIT,
- RTEMS_NO_TIMEOUT
- );
- T_rsc_success( sc );
-}
-
-static void Release( const Context *ctx )
-{
- rtems_status_code sc;
-
- sc = rtems_semaphore_release( ctx->semaphore_id );
- T_rsc_success( sc );
-}
-
-static void BlockNormal( Context *ctx )
-{
- rtems_event_set first;
- rtems_event_set last;
-
- first = EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_RELEASE;
- last = EVENT_OBTAIN | EVENT_OBTAIN_SYNC | EVENT_RELEASE;
-
- if ( IsFIFO( ctx ) ) {
- Send( ctx, BLOCKER_A, first );
- } else {
- Send( ctx, BLOCKER_A, last );
- }
-
-#if defined(RTEMS_SMP)
- Send( ctx, BLOCKER_B, EVENT_OBTAIN | EVENT_RELEASE | EVENT_HELPER_SYNC );
- SynchronizeRunner();
-#else
- Send( ctx, BLOCKER_B, EVENT_OBTAIN | EVENT_RELEASE );
-#endif
-
- if ( IsFIFO( ctx ) ) {
- Send( ctx, BLOCKER_C, last );
- } else {
- Send( ctx, BLOCKER_C, first );
- }
-
- MoveBackHome( ctx );
-}
-
-static void BlockPrioCeiling( const Context *ctx )
-{
- SetPriority( ctx->worker_id[ BLOCKER_A ], PRIO_ULTRA_HIGH );
- Send( ctx, BLOCKER_A, EVENT_OBTAIN | EVENT_OBTAIN_SYNC | EVENT_RELEASE );
- Yield();
- SetPriority( ctx->worker_id[ BLOCKER_A ], PRIO_HIGH );
-
- SetPriority( ctx->worker_id[ BLOCKER_B ], PRIO_ULTRA_HIGH );
- Send( ctx, BLOCKER_B, EVENT_OBTAIN | EVENT_RELEASE );
- Yield();
- SetPriority( ctx->worker_id[ BLOCKER_B ], PRIO_VERY_HIGH );
-
- Send(
- ctx,
- BLOCKER_C,
- EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_RELEASE
- );
- Yield();
-}
-
-static void PrepareForAction( Context *ctx )
-{
- rtems_status_code sc;
-
- sc = rtems_semaphore_create(
- NAME,
- ctx->count,
- ctx->attribute_set,
- PRIO_ULTRA_HIGH,
- &ctx->semaphore_id
- );
- T_rsc_success( sc );
-
- if ( ctx->valid_id ) {
- ctx->id = ctx->semaphore_id;
- } else {
- ctx->id = 0;
- }
-
-#if defined(RTEMS_SMP)
- if ( !IsPrioCeiling( ctx ) ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_B,
- ctx->other_scheduler_id,
- PRIO_LOW
- );
- }
-#endif
-
- if ( ctx->owner_other ) {
- Event event;
-
- event = EVENT_OBTAIN;
-#if defined(RTEMS_SMP)
- event |= EVENT_OBTAIN_SYNC;
-#endif
-
- Send( ctx, BLOCKER_B, event );
-#if defined(RTEMS_SMP)
- SynchronizeRunner();
-#endif
- }
-
- if ( ctx->nested ) {
- Obtain( ctx );
- }
-
- if ( ctx->blocked ) {
-#if defined(RTEMS_SMP)
- if ( IsMrsP( ctx ) ) {
- BlockMrsP( ctx );
- } else if ( IsPrioCeiling( ctx ) ) {
- BlockPrioCeiling( ctx );
- } else {
- BlockNormal( ctx );
- }
-#else
- if ( IsPrioCeiling( ctx ) || IsMrsP( ctx ) ) {
- BlockPrioCeiling( ctx );
- } else {
- BlockNormal( ctx );
- }
-#endif
- }
-
- if ( ctx->other_scheduler ) {
- ctx->busy_wait = true;
- Send( ctx, HELPER_HOME, EVENT_BUSY_WAIT );
- ctx->busy_wait = false;
- }
-}
-
-static void GetSemaphoreProperties( Context *ctx )
-{
- Semaphore_Control *semaphore;
- Thread_queue_Context queue_context;
- Thread_Control *owner;
-
- if ( !ctx->need_properties ) {
- return;
- }
-
- ctx->need_properties = false;
-
- semaphore = _Semaphore_Get( ctx->semaphore_id, &queue_context );
- T_assert_not_null( semaphore );
- ctx->sem_count = semaphore->Core_control.Semaphore.count;
- owner = semaphore->Core_control.Wait_queue.Queue.owner;
- _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
-
- if ( owner != NULL ) {
- ctx->owner = owner->Object.id;
- } else {
- ctx->owner = 0;
- }
-}
-
-static void CleanupAfterAction( Context *ctx )
-{
- rtems_status_code sc;
-
- sc = rtems_scheduler_ident_by_processor(
- rtems_scheduler_get_processor(),
- &ctx->after_release_scheduler_id
- );
- T_rsc_success( sc );
-
- ctx->after_release_priority = GetSelfPriority();
-
- if ( ctx->nested ) {
- Release( ctx );
- }
-
- if ( ctx->count == 0 && ctx->status != RTEMS_SUCCESSFUL ) {
- Release( ctx );
- }
-
- if ( ctx->owner_other ) {
- Send( ctx, BLOCKER_B, EVENT_RELEASE );
- }
-
- if ( ctx->blocked ) {
- SynchronizeRunner();
-
-#if defined(RTEMS_SMP)
- if ( IsMrsP( ctx ) ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_A,
- ctx->runner_scheduler_id,
- PRIO_HIGH
- );
- SetWorkerScheduler(
- ctx,
- BLOCKER_C,
- ctx->runner_scheduler_id,
- PRIO_ULTRA_HIGH
- );
- }
-#endif
- }
-
- Obtain( ctx );
- Release( ctx );
-
-#if defined(RTEMS_SMP)
- if ( !IsPrioCeiling( ctx ) ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_B,
- ctx->runner_scheduler_id,
- PRIO_VERY_HIGH
- );
- }
-#endif
-
- sc = rtems_semaphore_delete( ctx->semaphore_id );
- T_rsc_success( sc );
-}
-
-static void Worker( rtems_task_argument arg, WorkerKind worker )
-{
- Context *ctx;
-
- ctx = (Context *) arg;
-
- while ( true ) {
- rtems_event_set events;
-
- events = ReceiveAnyEvents();
-
-#if defined(RTEMS_SMP)
- if ( ( events & EVENT_HELPER_SYNC ) != 0 ) {
- SendEvents( ctx->worker_id[ HELPER_OTHER ], EVENT_RUNNER_SYNC );
- }
-#endif
-
- if ( ( events & EVENT_OBTAIN ) != 0 ) {
- uint32_t counter;
-
- Obtain( ctx );
-
- counter = ctx->counter;
- ++counter;
- ctx->counter = counter;
- ctx->obtain_counter[ worker ] = counter;
- }
-
- if ( ( events & EVENT_GET_PROPERTIES ) != 0 ) {
- GetSemaphoreProperties( ctx );
- }
-
- if ( ( events & EVENT_OBTAIN_SYNC ) != 0 ) {
- SendEvents( ctx->runner_id, EVENT_RUNNER_SYNC );
- }
-
-#if defined(RTEMS_SMP)
- if ( ( events & EVENT_BUSY_WAIT ) != 0 ) {
- while ( ctx->busy_wait ) {
- /* Wait */
- }
- }
-#endif
-
- if ( ( events & EVENT_RELEASE ) != 0 ) {
- Release( ctx );
- }
-
- if ( ( events & EVENT_RUNNER_SYNC ) != 0 ) {
- SendEvents( ctx->runner_id, EVENT_RUNNER_SYNC );
- }
- }
-}
-
-static void BlockerA( rtems_task_argument arg )
-{
- Worker( arg, BLOCKER_A );
-}
-
-static void BlockerB( rtems_task_argument arg )
-{
- Worker( arg, BLOCKER_B );
-}
-
-static void BlockerC( rtems_task_argument arg )
-{
- Worker( arg, BLOCKER_C );
-}
-
-#if defined(RTEMS_SMP)
-static void HelperHome( rtems_task_argument arg )
-{
- Worker( arg, HELPER_HOME );
-}
-
-static void HelperOther( rtems_task_argument arg )
-{
- Worker( arg, HELPER_OTHER );
-}
-#endif
-
static void RtemsSemReqRelease_Pre_Class_Prepare(
RtemsSemReqRelease_Context *ctx,
RtemsSemReqRelease_Pre_Class state
@@ -927,6 +265,7 @@ static void RtemsSemReqRelease_Pre_Discipline_Prepare(
* While the semaphore uses the FIFO task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
break;
}
@@ -935,6 +274,7 @@ static void RtemsSemReqRelease_Pre_Discipline_Prepare(
* While the semaphore uses the priority task wait queue discipline.
*/
ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
break;
}
@@ -943,150 +283,6 @@ static void RtemsSemReqRelease_Pre_Discipline_Prepare(
}
}
-static void RtemsSemReqRelease_Pre_Count_Prepare(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Pre_Count state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Pre_Count_LessMax: {
- /*
- * While the count of the semaphore is less than the maximum count.
- */
- if ( ( ctx->attribute_set & RTEMS_SIMPLE_BINARY_SEMAPHORE ) != 0 ) {
- ctx->count = 0;
- } else {
- ctx->count = UINT32_MAX - 1;
- }
- break;
- }
-
- case RtemsSemReqRelease_Pre_Count_Max: {
- /*
- * While the count of the semaphore is equal to the maximum count.
- */
- if ( ( ctx->attribute_set & RTEMS_SIMPLE_BINARY_SEMAPHORE ) != 0 ) {
- ctx->count = 1;
- } else {
- ctx->count = UINT32_MAX;
- }
- break;
- }
-
- case RtemsSemReqRelease_Pre_Count_Blocked: {
- /*
- * While the semaphore has tasks blocked on the semaphore.
- */
- ctx->blocked = true;
- ctx->count = 0;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Count_NA:
- break;
- }
-}
-
-static void RtemsSemReqRelease_Pre_Owner_Prepare(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Pre_Owner state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Pre_Owner_No: {
- /*
- * While the semaphore has no owner.
- */
- ctx->count = 1;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_Self: {
- /*
- * While the calling task is the owner of the semaphore, while the
- * calling task did not recursively obtain the semaphore.
- */
- ctx->count = 0;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_Other: {
- /*
- * While a task other than the calling task is the owner of the
- * semaphore.
- */
- ctx->count = 1;
- ctx->owner_other = true;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_Blocked: {
- /*
- * While the calling task is the owner of the semaphore, while the
- * calling task did not recursively obtain the semaphore, while tasks are
- * blocked on the semaphore.
- */
- ctx->count = 0;
- ctx->blocked = true;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_Nested: {
- /*
- * While the calling task is the owner of the semaphore, while the
- * calling task did recursively obtain the semaphore.
- */
- ctx->count = 0;
- ctx->nested = true;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_BlockedNested: {
- /*
- * While the calling task is the owner of the semaphore, while the
- * calling task did recursively obtain the semaphore, while tasks are
- * blocked on the semaphore.
- */
- ctx->count = 0;
- ctx->blocked = true;
- ctx->nested = true;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_BlockedOther: {
- /*
- * While the calling task is the owner of the semaphore, while the
- * calling task did not recursively obtain the semaphore, while tasks are
- * blocked on the semaphore, while the calling task executes on a
- * processor owned by a scheduler other than its home scheduler due to a
- * locking protocol mechanism provided by the semaphore.
- */
- ctx->count = 0;
- ctx->blocked = true;
- ctx->other_scheduler = true;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_BlockedNestedOther: {
- /*
- * While the calling task is the owner of the semaphore, while the
- * calling task did recursively obtain the semaphore, while tasks are
- * blocked on the semaphore, while the calling task executes on a
- * processor owned by a scheduler other than its home scheduler due to a
- * locking protocol mechanism provided by the semaphore.
- */
- ctx->count = 0;
- ctx->blocked = true;
- ctx->nested = true;
- ctx->other_scheduler = true;
- break;
- }
-
- case RtemsSemReqRelease_Pre_Owner_NA:
- break;
- }
-}
-
static void RtemsSemReqRelease_Pre_Id_Prepare(
RtemsSemReqRelease_Context *ctx,
RtemsSemReqRelease_Pre_Id state
@@ -1097,7 +293,7 @@ static void RtemsSemReqRelease_Pre_Id_Prepare(
/*
* While the ``id`` parameter is associated with the semaphore.
*/
- ctx->valid_id = true;
+ /* Nothing to prepare */
break;
}
@@ -1105,7 +301,7 @@ static void RtemsSemReqRelease_Pre_Id_Prepare(
/*
* While the ``id`` parameter is not associated with a semaphore.
*/
- ctx->valid_id = false;
+ /* Nothing to prepare */
break;
}
@@ -1114,335 +310,132 @@ static void RtemsSemReqRelease_Pre_Id_Prepare(
}
}
-static void RtemsSemReqRelease_Post_Status_Check(
+static void RtemsSemReqRelease_Post_Action_Check(
RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Post_Status state
+ RtemsSemReqRelease_Post_Action state
)
{
- switch ( state ) {
- case RtemsSemReqRelease_Post_Status_Ok: {
- /*
- * The return status of rtems_semaphore_release() shall be
- * RTEMS_SUCCESSFUL.
- */
- T_rsc_success( ctx->status );
- break;
- }
+ rtems_status_code sc;
- case RtemsSemReqRelease_Post_Status_InvId: {
+ switch ( state ) {
+ case RtemsSemReqRelease_Post_Action_InvId: {
/*
* The return status of rtems_semaphore_release() shall be
* RTEMS_INVALID_ID.
*/
- T_rsc( ctx->status, RTEMS_INVALID_ID );
+ sc = rtems_semaphore_release( 0xffffffff );
+ T_rsc( sc, RTEMS_INVALID_ID );
break;
}
- case RtemsSemReqRelease_Post_Status_NotOwner: {
+ case RtemsSemReqRelease_Post_Action_BinarySurrender: {
/*
- * The return status of rtems_semaphore_release() shall be
- * RTEMS_NOT_OWNER_OF_RESOURCE.
+ * The calling task shall surrender the binary semaphore as specified by
+ * /score/sem/req/surrender.
*/
- T_rsc( ctx->status, RTEMS_NOT_OWNER_OF_RESOURCE );
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = NULL;
+ ctx->tq_sem_ctx.variant = TQ_SEM_BINARY;
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ScoreSemReqSurrender_Run( &ctx->tq_sem_ctx );
break;
}
- case RtemsSemReqRelease_Post_Status_Unsat: {
- /*
- * The return status of rtems_semaphore_release() shall be
- * RTEMS_UNSATISFIED.
- */
- T_rsc( ctx->status, RTEMS_UNSATISFIED );
- break;
- }
-
- case RtemsSemReqRelease_Post_Status_NA:
- break;
- }
-}
-
-static void RtemsSemReqRelease_Post_Count_Check(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Post_Count state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Post_Count_Zero: {
+ case RtemsSemReqRelease_Post_Action_CountingSurrender: {
/*
- * The count of the semaphore shall be zero.
+ * The calling task shall surrender the counting semaphore as specified
+ * by /score/sem/req/surrender.
*/
- T_eq_u32( ctx->sem_count, 0 );
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = NULL;
+ ctx->tq_sem_ctx.variant = TQ_SEM_COUNTING;
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ScoreSemReqSurrender_Run( &ctx->tq_sem_ctx );
break;
}
- case RtemsSemReqRelease_Post_Count_One: {
+ case RtemsSemReqRelease_Post_Action_MtxSurrender: {
/*
- * The count of the semaphore shall be one.
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue blocks and a recursive seize
+ * is allowed.
*/
- T_eq_u32( ctx->sem_count, 1 );
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
- case RtemsSemReqRelease_Post_Count_PlusOne: {
+ case RtemsSemReqRelease_Post_Action_InheritMtxSurrender: {
/*
- * The count of the semaphore shall be incremented by one.
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue blocks and a recursive seize
+ * is allowed.
*/
- T_eq_u32( ctx->sem_count, ctx->count + 1 );
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_INHERIT;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
- case RtemsSemReqRelease_Post_Count_Max: {
+ case RtemsSemReqRelease_Post_Action_CeilingMtxSurrender: {
/*
- * The count of the semaphore shall be the maximum count.
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue blocks, a recursive seize is
+ * allowed, and a priority ceiling is used.
*/
- T_eq_u32( ctx->sem_count, UINT32_MAX );
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
- case RtemsSemReqRelease_Post_Count_Nop: {
+ case RtemsSemReqRelease_Post_Action_MrsPMtxSurrender: {
/*
- * The count of the semaphore shall not be modified.
+ * The calling task shall surrender the mutex as specified by
+ * /score/mtx/req/surrender where an enqueue is sticky, a recursive seize
+ * returns an error status, and a priority ceiling is used.
*/
- T_eq_u32( ctx->sem_count, ctx->count );
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_STICKY;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ScoreMtxReqSurrender_Run( &ctx->tq_mtx_ctx );
break;
}
- case RtemsSemReqRelease_Post_Count_NA:
- break;
- }
-}
-
-static void RtemsSemReqRelease_Post_Owner_Check(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Post_Owner state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Post_Owner_No: {
- /*
- * The semaphore shall not have an owner.
- */
- T_eq_u32( ctx->owner, 0 );
- break;
- }
-
- case RtemsSemReqRelease_Post_Owner_Self: {
- /*
- * The owner of the semaphore shall be the calling task.
- */
- T_eq_u32( ctx->owner, ctx->runner_id );
- break;
- }
-
- case RtemsSemReqRelease_Post_Owner_Other: {
- /*
- * The owner of the semaphore shall be the other task.
- */
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_B ] );
- break;
- }
-
- case RtemsSemReqRelease_Post_Owner_FIFO: {
- /*
- * The owner of the semaphore shall be the first task unblocked in FIFO
- * order.
- */
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_A ] );
- break;
- }
-
- case RtemsSemReqRelease_Post_Owner_Priority: {
- /*
- * The owner of the semaphore shall be the first task unblocked in
- * priority order.
- */
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_C ] );
- break;
- }
-
- case RtemsSemReqRelease_Post_Owner_MrsP: {
- /*
- * The owner of the semaphore shall be the first task unblocked in MrsP
- * priority order.
- */
- if ( CanUseThirdScheduler() ) {
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_A ] );
- } else {
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_B ] );
- }
- break;
- }
-
- case RtemsSemReqRelease_Post_Owner_NA:
- break;
- }
-}
-
-static void RtemsSemReqRelease_Post_Next_Check(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Post_Next state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Post_Next_FIFO: {
- /*
- * The first blocked task in FIFO order shall be made ready.
- */
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 2 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 3 );
- break;
- }
-
- case RtemsSemReqRelease_Post_Next_Priority: {
- /*
- * The first blocked task in priority order shall be made ready.
- */
- if ( ctx->owner_other ) {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 0 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 0 );
- } else {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 3 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 2 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 1 );
- }
- break;
- }
-
- case RtemsSemReqRelease_Post_Next_MrsP: {
- /*
- * The first blocked task in MrsP priority order shall be made ready.
- */
- if ( CanUseThirdScheduler() ) {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 2 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 3 );
- } else {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 0 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 0 );
- }
- break;
- }
-
- case RtemsSemReqRelease_Post_Next_NA:
- break;
- }
-}
-
-static void RtemsSemReqRelease_Post_CallerPrio_Check(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Post_CallerPrio state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Post_CallerPrio_Inherit: {
- /*
- * The current priority of the calling task shall be the inherited
- * priority of the semaphore.
- */
- T_eq_u32( ctx->after_release_priority, PRIO_ULTRA_HIGH );
- break;
- }
-
- case RtemsSemReqRelease_Post_CallerPrio_Ceiling: {
- /*
- * The current priority of the calling task shall be the ceiling priority
- * of the semaphore.
- */
- T_eq_u32( ctx->after_release_priority, PRIO_ULTRA_HIGH );
- break;
- }
-
- case RtemsSemReqRelease_Post_CallerPrio_Real: {
- /*
- * The current priority of the calling task shall be its real priority.
- */
- T_eq_u32( ctx->after_release_priority, PRIO_NORMAL );
- break;
- }
-
- case RtemsSemReqRelease_Post_CallerPrio_NA:
- break;
- }
-}
-
-static void RtemsSemReqRelease_Post_CallerCPU_Check(
- RtemsSemReqRelease_Context *ctx,
- RtemsSemReqRelease_Post_CallerCPU state
-)
-{
- switch ( state ) {
- case RtemsSemReqRelease_Post_CallerCPU_Home: {
- /*
- * The calling task shall execute on a processor owned by its home
- * scheduler.
- */
- T_eq_u32( ctx->after_release_scheduler_id, ctx->runner_scheduler_id );
- break;
- }
-
- case RtemsSemReqRelease_Post_CallerCPU_Other: {
- /*
- * The calling task shall execute on a processor not owned by its home
- * scheduler.
- */
- if ( IsMrsP( ctx ) && CanUseThirdScheduler() ) {
- T_eq_u32( ctx->after_release_scheduler_id, ctx->third_scheduler_id );
- } else {
- T_eq_u32( ctx->after_release_scheduler_id, ctx->other_scheduler_id );
- }
- break;
- }
-
- case RtemsSemReqRelease_Post_CallerCPU_NA:
+ case RtemsSemReqRelease_Post_Action_NA:
break;
}
}
static void RtemsSemReqRelease_Setup( RtemsSemReqRelease_Context *ctx )
{
- rtems_status_code sc;
-
- memset( ctx, 0, sizeof( *ctx ) );
- ctx->runner_id = rtems_task_self();
SetSelfPriority( PRIO_NORMAL );
- ctx->worker_id[ BLOCKER_A ] = CreateTask( "BLKA", PRIO_HIGH );
- StartTask( ctx->worker_id[ BLOCKER_A ], BlockerA, ctx );
- ctx->worker_id[ BLOCKER_B ] = CreateTask( "BLKB", PRIO_VERY_HIGH );
- StartTask( ctx->worker_id[ BLOCKER_B ], BlockerB, ctx );
- ctx->worker_id[ BLOCKER_C ] = CreateTask( "BLKC", PRIO_ULTRA_HIGH );
- StartTask( ctx->worker_id[ BLOCKER_C ], BlockerC, ctx );
-
- sc = rtems_task_get_scheduler( RTEMS_SELF, &ctx->runner_scheduler_id );
- T_rsc_success( sc );
-
- #if defined(RTEMS_SMP)
- ctx->worker_id[ HELPER_HOME ] = CreateTask( "HLPH", PRIO_VERY_ULTRA_HIGH );
- StartTask( ctx->worker_id[ HELPER_HOME ], HelperHome, ctx );
- ctx->worker_id[ HELPER_OTHER ] = CreateTask( "HLPO", PRIO_VERY_LOW );
- StartTask( ctx->worker_id[ HELPER_OTHER ], HelperOther, ctx );
-
- sc = rtems_scheduler_ident(
- TEST_SCHEDULER_B_NAME,
- &ctx->other_scheduler_id
- );
- T_rsc_success( sc );
-
- sc = rtems_scheduler_ident(
- TEST_SCHEDULER_C_NAME,
- &ctx->third_scheduler_id
- );
- T_rsc_success( sc );
-
- SetWorkerScheduler(
- ctx,
- HELPER_OTHER,
- ctx->other_scheduler_id,
- PRIO_VERY_LOW
- );
- #endif
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
+ ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
}
static void RtemsSemReqRelease_Setup_Wrap( void *arg )
@@ -1456,12 +449,7 @@ static void RtemsSemReqRelease_Setup_Wrap( void *arg )
static void RtemsSemReqRelease_Teardown( RtemsSemReqRelease_Context *ctx )
{
- size_t i;
-
- for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
- DeleteTask( ctx->worker_id[ i ] );
- }
-
+ TQDestroy( &ctx->tq_ctx );
RestoreRunnerPriority();
}
@@ -1476,257 +464,65 @@ static void RtemsSemReqRelease_Teardown_Wrap( void *arg )
static void RtemsSemReqRelease_Prepare( RtemsSemReqRelease_Context *ctx )
{
- size_t i;
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+}
- ctx->counter = 0;
+static void RtemsSemReqRelease_Action( RtemsSemReqRelease_Context *ctx )
+{
+ rtems_status_code sc;
- for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
- ctx->obtain_counter[ i ] = 0;
- }
+ sc = rtems_semaphore_create(
+ NAME,
+ 1,
+ ctx->attribute_set,
+ PRIO_VERY_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
- ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
- ctx->blocked = false;
- ctx->owner_other = false;
- ctx->nested = false;
- ctx->other_scheduler = false;
- ctx->need_properties = true;
+ if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+ rtems_task_priority prio;
+
+ sc = rtems_semaphore_set_priority(
+ ctx->tq_ctx.thread_queue_id,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH,
+ &prio
+ );
+ T_rsc_success( sc );
+ }
}
-static void RtemsSemReqRelease_Action( RtemsSemReqRelease_Context *ctx )
+static void RtemsSemReqRelease_Cleanup( RtemsSemReqRelease_Context *ctx )
{
- PrepareForAction( ctx );
- ctx->status = rtems_semaphore_release( ctx->id );
- GetSemaphoreProperties( ctx );
- CleanupAfterAction( ctx );
+ rtems_status_code sc;
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id ); T_rsc_success( sc );
}
static const RtemsSemReqRelease_Entry
RtemsSemReqRelease_Entries[] = {
- { 1, 0, 0, 0, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
- { 0, 0, 0, 0, 1, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_Nop, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
- { 0, 0, 0, 0, 1, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_One, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Ceiling,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 0, 1, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_PlusOne, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 0, 1, 0, RtemsSemReqRelease_Post_Status_Unsat,
- RtemsSemReqRelease_Post_Count_Max, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 0, 1, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_Zero, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_FIFO, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 0, 1, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_Zero, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_Priority,
- RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NotOwner,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_No,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_No,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_No,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NotOwner,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Other,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Other,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Priority,
- RtemsSemReqRelease_Post_Next_Priority,
- RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Ceiling,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Inherit,
- RtemsSemReqRelease_Post_CallerCPU_Home },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InvId },
+ { 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_CountingSurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_BinarySurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_MtxSurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_CeilingMtxSurrender },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InheritMtxSurrender },
#if defined(RTEMS_SMP)
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Inherit,
- RtemsSemReqRelease_Post_CallerCPU_Other },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_MrsPMtxSurrender },
#else
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
+ { 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA },
#endif
#if defined(RTEMS_SMP)
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
+ { 0, 0, 0, 0, RtemsSemReqRelease_Post_Action_InvId }
#else
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Ceiling,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#endif
-#if defined(RTEMS_SMP)
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
-#else
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Ceiling,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#endif
-#if defined(RTEMS_SMP)
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
-#else
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
-#endif
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_FIFO,
- RtemsSemReqRelease_Post_Next_FIFO, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Inherit,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#if defined(RTEMS_SMP)
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Priority,
- RtemsSemReqRelease_Post_Next_Priority,
- RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#else
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
-#endif
-#if defined(RTEMS_SMP)
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Inherit,
- RtemsSemReqRelease_Post_CallerCPU_Other },
-#else
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
-#endif
-#if defined(RTEMS_SMP)
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_MrsP,
- RtemsSemReqRelease_Post_Next_MrsP, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#else
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Priority,
- RtemsSemReqRelease_Post_Next_Priority,
- RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#endif
-#if defined(RTEMS_SMP)
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_Ok,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_MrsP,
- RtemsSemReqRelease_Post_Next_MrsP, RtemsSemReqRelease_Post_CallerPrio_Real,
- RtemsSemReqRelease_Post_CallerCPU_Home },
-#else
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA },
-#endif
-#if defined(RTEMS_SMP)
- { 0, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_InvId,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_Self,
- RtemsSemReqRelease_Post_Next_NA,
- RtemsSemReqRelease_Post_CallerPrio_Ceiling,
- RtemsSemReqRelease_Post_CallerCPU_Other }
-#else
- { 1, 0, 0, 1, 0, 0, RtemsSemReqRelease_Post_Status_NA,
- RtemsSemReqRelease_Post_Count_NA, RtemsSemReqRelease_Post_Owner_NA,
- RtemsSemReqRelease_Post_Next_NA, RtemsSemReqRelease_Post_CallerPrio_NA,
- RtemsSemReqRelease_Post_CallerCPU_NA }
+ { 1, 0, 0, 0, RtemsSemReqRelease_Post_Action_NA }
#endif
};
static const uint8_t
RtemsSemReqRelease_Map[] = {
- 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1,
- 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 6, 1, 6, 1,
- 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1,
- 7, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 3, 1, 3, 1, 3, 1, 3, 1,
- 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 8, 1,
- 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1,
- 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 9, 1, 9, 1, 9, 1,
- 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 11, 12, 4, 13, 14, 23, 4, 15, 4, 15, 4, 2,
- 2, 2, 2, 10, 11, 12, 4, 13, 14, 23, 4, 15, 4, 15, 4, 2, 2, 2, 2, 10, 11, 12,
- 4, 13, 14, 23, 4, 15, 4, 15, 4, 2, 2, 2, 2, 10, 11, 12, 4, 13, 14, 16, 4, 15,
- 4, 15, 4, 2, 2, 2, 2, 10, 11, 12, 4, 13, 14, 16, 4, 15, 4, 15, 4, 2, 2, 2, 2,
- 10, 11, 12, 4, 13, 14, 16, 4, 15, 4, 15, 4, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 11, 12, 5, 13, 14, 16, 5,
- 17, 5, 17, 5, 2, 2, 2, 2, 10, 11, 12, 5, 13, 14, 16, 5, 17, 5, 17, 5, 2, 2,
- 2, 2, 10, 11, 12, 5, 13, 14, 16, 5, 17, 5, 17, 5, 2, 2, 2, 2, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 11, 12, 4, 13, 14, 16,
- 18, 15, 4, 24, 18, 25, 19, 26, 19, 10, 11, 12, 4, 13, 14, 16, 18, 15, 4, 24,
- 18, 25, 19, 26, 19, 10, 11, 12, 4, 13, 14, 16, 18, 15, 4, 24, 18, 25, 19, 26,
- 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
- 11, 12, 5, 13, 14, 27, 5, 20, 21, 20, 21, 28, 29, 22, 22, 10, 11, 12, 5, 13,
- 14, 27, 5, 20, 21, 20, 21, 28, 29, 22, 22, 10, 11, 12, 5, 13, 14, 27, 5, 20,
- 21, 20, 21, 28, 29, 22, 22
+ 2, 0, 2, 0, 3, 0, 3, 0, 4, 0, 4, 0, 1, 1, 5, 0, 1, 1, 6, 0, 1, 1, 7, 8
};
static size_t RtemsSemReqRelease_Scope( void *arg, char *buf, size_t n )
@@ -1763,48 +559,13 @@ static inline RtemsSemReqRelease_Entry RtemsSemReqRelease_PopEntry(
];
}
-static void RtemsSemReqRelease_SetPreConditionStates(
- RtemsSemReqRelease_Context *ctx
-)
-{
- ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
- ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
-
- if ( ctx->Map.entry.Pre_Count_NA ) {
- ctx->Map.pcs[ 2 ] = RtemsSemReqRelease_Pre_Count_NA;
- } else {
- ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
- }
-
- if ( ctx->Map.entry.Pre_Owner_NA ) {
- ctx->Map.pcs[ 3 ] = RtemsSemReqRelease_Pre_Owner_NA;
- } else {
- ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
- }
-
- ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
-}
-
static void RtemsSemReqRelease_TestVariant( RtemsSemReqRelease_Context *ctx )
{
RtemsSemReqRelease_Pre_Class_Prepare( ctx, ctx->Map.pcs[ 0 ] );
RtemsSemReqRelease_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
- RtemsSemReqRelease_Pre_Count_Prepare( ctx, ctx->Map.pcs[ 2 ] );
- RtemsSemReqRelease_Pre_Owner_Prepare( ctx, ctx->Map.pcs[ 3 ] );
- RtemsSemReqRelease_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ RtemsSemReqRelease_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
RtemsSemReqRelease_Action( ctx );
- RtemsSemReqRelease_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
- RtemsSemReqRelease_Post_Count_Check( ctx, ctx->Map.entry.Post_Count );
- RtemsSemReqRelease_Post_Owner_Check( ctx, ctx->Map.entry.Post_Owner );
- RtemsSemReqRelease_Post_Next_Check( ctx, ctx->Map.entry.Post_Next );
- RtemsSemReqRelease_Post_CallerPrio_Check(
- ctx,
- ctx->Map.entry.Post_CallerPrio
- );
- RtemsSemReqRelease_Post_CallerCPU_Check(
- ctx,
- ctx->Map.entry.Post_CallerCPU
- );
+ RtemsSemReqRelease_Post_Action_Check( ctx, ctx->Map.entry.Post_Action );
}
/**
@@ -1819,41 +580,29 @@ T_TEST_CASE_FIXTURE( RtemsSemReqRelease, &RtemsSemReqRelease_Fixture )
ctx->Map.index = 0;
for (
- ctx->Map.pci[ 0 ] = RtemsSemReqRelease_Pre_Class_Counting;
- ctx->Map.pci[ 0 ] < RtemsSemReqRelease_Pre_Class_NA;
- ++ctx->Map.pci[ 0 ]
+ ctx->Map.pcs[ 0 ] = RtemsSemReqRelease_Pre_Class_Counting;
+ ctx->Map.pcs[ 0 ] < RtemsSemReqRelease_Pre_Class_NA;
+ ++ctx->Map.pcs[ 0 ]
) {
for (
- ctx->Map.pci[ 1 ] = RtemsSemReqRelease_Pre_Discipline_FIFO;
- ctx->Map.pci[ 1 ] < RtemsSemReqRelease_Pre_Discipline_NA;
- ++ctx->Map.pci[ 1 ]
+ ctx->Map.pcs[ 1 ] = RtemsSemReqRelease_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < RtemsSemReqRelease_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
) {
for (
- ctx->Map.pci[ 2 ] = RtemsSemReqRelease_Pre_Count_LessMax;
- ctx->Map.pci[ 2 ] < RtemsSemReqRelease_Pre_Count_NA;
- ++ctx->Map.pci[ 2 ]
+ ctx->Map.pcs[ 2 ] = RtemsSemReqRelease_Pre_Id_Valid;
+ ctx->Map.pcs[ 2 ] < RtemsSemReqRelease_Pre_Id_NA;
+ ++ctx->Map.pcs[ 2 ]
) {
- for (
- ctx->Map.pci[ 3 ] = RtemsSemReqRelease_Pre_Owner_No;
- ctx->Map.pci[ 3 ] < RtemsSemReqRelease_Pre_Owner_NA;
- ++ctx->Map.pci[ 3 ]
- ) {
- for (
- ctx->Map.pci[ 4 ] = RtemsSemReqRelease_Pre_Id_Valid;
- ctx->Map.pci[ 4 ] < RtemsSemReqRelease_Pre_Id_NA;
- ++ctx->Map.pci[ 4 ]
- ) {
- ctx->Map.entry = RtemsSemReqRelease_PopEntry( ctx );
-
- if ( ctx->Map.entry.Skip ) {
- continue;
- }
-
- RtemsSemReqRelease_SetPreConditionStates( ctx );
- RtemsSemReqRelease_Prepare( ctx );
- RtemsSemReqRelease_TestVariant( ctx );
- }
+ ctx->Map.entry = RtemsSemReqRelease_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
}
+
+ RtemsSemReqRelease_Prepare( ctx );
+ RtemsSemReqRelease_TestVariant( ctx );
+ RtemsSemReqRelease_Cleanup( ctx );
}
}
}
diff --git a/testsuites/validation/tr-mtx-surrender.c b/testsuites/validation/tr-mtx-surrender.c
new file mode 100644
index 0000000000..64fcaa9b1b
--- /dev/null
+++ b/testsuites/validation/tr-mtx-surrender.c
@@ -0,0 +1,1215 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-mtx-surrender.h"
+#include "tr-tq-surrender-priority-inherit.h"
+#include "tr-tq-surrender.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreMtxReqSurrender spec:/score/mtx/req/surrender
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_Protocol_NA : 1;
+ uint32_t Pre_Discipline_NA : 1;
+ uint32_t Pre_Recursive_NA : 1;
+ uint32_t Pre_OwnerCheck_NA : 1;
+ uint32_t Pre_Owner_NA : 1;
+ uint32_t Pre_Nested_NA : 1;
+ uint32_t Pre_Blocked_NA : 1;
+ uint32_t Pre_Priority_NA : 1;
+ uint32_t Post_Status : 2;
+ uint32_t Post_Owner : 3;
+ uint32_t Post_Surrender : 3;
+ uint32_t Post_Priority : 2;
+} ScoreMtxReqSurrender_Entry;
+
+/**
+ * @brief Test context for spec:/score/mtx/req/surrender test case.
+ */
+typedef struct {
+ /**
+ * @brief If this member is true, then the calling thread shall be the owner
+ * of the mutex.
+ */
+ bool owner_caller;
+
+ /**
+ * @brief If this member is true, then a thread other than the calling thread
+ * shall be the owner of the mutex.
+ */
+ bool owner_other;
+
+ /**
+ * @brief If this member is true, then the calling thread shall have seized
+ * the mutex recursively.
+ */
+ bool nested;
+
+ /**
+ * @brief If this member is true, then there shall be a thread blocked
+ * waiting for the mutex.
+ */
+ bool blocked;
+
+ /**
+ * @brief This member contains the real priority of the calling thread.
+ */
+ rtems_task_priority priority_real;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * before the directive call.
+ */
+ rtems_task_priority priority_before;
+
+ /**
+ * @brief This member contains the return status of the directive call.
+ */
+ Status_Control status;
+
+ /**
+ * @brief This member contains the owner of the mutex after the directive
+ * call.
+ */
+ const rtems_tcb *owner_after;
+
+ /**
+ * @brief This member contains the current priority of the calling thread
+ * after the directive call.
+ */
+ rtems_task_priority priority_after;
+
+ /**
+ * @brief This member contains the counter snapshot after the directive call.
+ */
+ uint32_t counter;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreMtxReqSurrender_Run() parameter.
+ */
+ TQMtxContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 8 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 8 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreMtxReqSurrender_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreMtxReqSurrender_Context;
+
+static ScoreMtxReqSurrender_Context
+ ScoreMtxReqSurrender_Instance;
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Protocol[] = {
+ "None",
+ "Inherit",
+ "Ceiling",
+ "MrsP",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Recursive[] = {
+ "Allowed",
+ "NotAllowed",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_OwnerCheck[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Owner[] = {
+ "None",
+ "Caller",
+ "Other",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Nested[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Blocked[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreMtxReqSurrender_PreDesc_Priority[] = {
+ "High",
+ "Equal",
+ "Low",
+ "NA"
+};
+
+static const char * const * const ScoreMtxReqSurrender_PreDesc[] = {
+ ScoreMtxReqSurrender_PreDesc_Protocol,
+ ScoreMtxReqSurrender_PreDesc_Discipline,
+ ScoreMtxReqSurrender_PreDesc_Recursive,
+ ScoreMtxReqSurrender_PreDesc_OwnerCheck,
+ ScoreMtxReqSurrender_PreDesc_Owner,
+ ScoreMtxReqSurrender_PreDesc_Nested,
+ ScoreMtxReqSurrender_PreDesc_Blocked,
+ ScoreMtxReqSurrender_PreDesc_Priority,
+ NULL
+};
+
+typedef ScoreMtxReqSurrender_Context Context;
+
+static Status_Control Status( const Context *ctx, Status_Control status )
+{
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+}
+
+static void Action( Context *ctx )
+{
+ Status_Control status;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ } else if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_HELPER_A, TQ_EVENT_ENQUEUE );
+ }
+
+ if ( ctx->nested ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ Yield();
+ }
+
+ TQResetCounter( &ctx->tq_ctx->base );
+ SetSelfPriority( ctx->priority_real );
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( &ctx->tq_ctx->base );
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ TQSchedulerRecordStop( &ctx->tq_ctx->base );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = GetSelfPriority();
+ SetSelfPriority( PRIO_NORMAL );
+ Yield();
+ ctx->counter = TQGetCounter( &ctx->tq_ctx->base );
+
+ if ( ctx->nested ) {
+ status = TQSurrender( &ctx->tq_ctx->base );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_HELPER_A, TQ_EVENT_SURRENDER );
+ }
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ }
+}
+
+static void ActionSticky( Context *ctx )
+{
+ Status_Control status;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ } else if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ if ( ctx->nested ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->blocked ) {
+ TQSendAndWaitForIntendToBlock(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+
+ TQResetCounter( &ctx->tq_ctx->base );
+ SetSelfPriority( ctx->priority_real );
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( &ctx->tq_ctx->base );
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ TQSchedulerRecordStop( &ctx->tq_ctx->base );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = GetSelfPriority();
+
+ if ( ctx->status == Status( ctx, STATUS_SUCCESSFUL ) ) {
+ TQWaitForExecutionStop( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+ }
+
+ ctx->counter = TQGetCounter( &ctx->tq_ctx->base );
+
+ if ( ctx->nested ) {
+ status = TQSurrender( &ctx->tq_ctx->base );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ TQ_EVENT_SURRENDER
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+ } else {
+ SetSelfPriority( PRIO_NORMAL );
+ }
+
+ if ( ctx->blocked ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Protocol_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Protocol state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Protocol_None: {
+ /*
+ * Where the mutex does not use a locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_NO_PROTOCOL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_Inherit: {
+ /*
+ * Where the mutex uses the priority inheritance locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_INHERIT ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_Ceiling: {
+ /*
+ * Where the mutex uses the priority ceiling locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_CEILING ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_MrsP: {
+ /*
+ * Where the mutex uses the MrsP locking protocol.
+ */
+ if ( ctx->tq_ctx->protocol != TQ_MTX_MRSP ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Protocol_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Discipline_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue of the mutex uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue of the mutex uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Recursive_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Recursive state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Recursive_Allowed: {
+ /*
+ * Where a recursive seize of the mutex is allowed.
+ */
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_ALLOWED ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Recursive_NotAllowed: {
+ /*
+ * Where a recursive seize of the mutex is not allowed.
+ */
+ if ( ctx->tq_ctx->recursive == TQ_MTX_RECURSIVE_ALLOWED ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Recursive_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_OwnerCheck_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_OwnerCheck state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_OwnerCheck_Yes: {
+ /*
+ * Where the surrender checks that the mutex owner is the calling thread.
+ */
+ if ( ctx->tq_ctx->owner_check != TQ_MTX_CHECKS_OWNER ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_OwnerCheck_No: {
+ /*
+ * Where the surrender does not check that the mutex owner is the calling
+ * thread.
+ */
+ if ( ctx->tq_ctx->owner_check != TQ_MTX_NO_OWNER_CHECK ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_OwnerCheck_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Owner_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Owner_None: {
+ /*
+ * While the mutex has no owner.
+ */
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Owner_Caller: {
+ /*
+ * While the owner of the mutex is the calling thread.
+ */
+ ctx->owner_caller = true;
+ ctx->owner_other = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Owner_Other: {
+ /*
+ * While the owner of the mutex is a thread other than the calling
+ * thread.
+ */
+ ctx->owner_caller = false;
+ ctx->owner_other = true;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Nested_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Nested state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Nested_Yes: {
+ /*
+ * While calling thread seized the mutex recursively.
+ */
+ ctx->nested = true;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Nested_No: {
+ /*
+ * While calling thread seized the mutex not recursively.
+ */
+ ctx->nested = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Nested_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Blocked_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Blocked state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Blocked_Yes: {
+ /*
+ * While the mutex has threads blocked on the mutex.
+ */
+ ctx->blocked = true;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Blocked_No: {
+ /*
+ * While no threads are blocked on the mutex.
+ */
+ ctx->blocked = false;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Blocked_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Pre_Priority_Prepare(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Pre_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Pre_Priority_High: {
+ /*
+ * While the current priority of the calling thread without the
+ * priorities available through the mutex would be higher than the
+ * highest priority of the priorities available through the mutex.
+ */
+ ctx->priority_real = PRIO_ULTRA_HIGH;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Priority_Equal: {
+ /*
+ * While the current priority of the calling thread without the
+ * priorities available through the mutex would be equal to the highest
+ * priority of the priorities available through the mutex.
+ */
+ ctx->priority_real = PRIO_VERY_HIGH;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Priority_Low: {
+ /*
+ * While the current priority of the calling thread without the
+ * priorities available through the mutex would be lower than the highest
+ * priority of the priorities available through the mutex.
+ */
+ ctx->priority_real = PRIO_HIGH;
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Pre_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Status_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Status_NotOwner: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_NOT_OWNER.
+ */
+ T_eq_int( ctx->status, Status( ctx, STATUS_NOT_OWNER ) );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Owner_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Owner state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Owner_None: {
+ /*
+ * The mutex shall have no owner.
+ */
+ T_null( ctx->owner_after );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_Caller: {
+ /*
+ * The owner of the mutex shall be the calling thread.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.runner_tcb
+ );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_Other: {
+ /*
+ * The owner of the mutex shall not be modified.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_HELPER_A ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_First: {
+ /*
+ * The owner of the mutex shall be dequeued thread.
+ */
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+ );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Owner_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Surrender_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Surrender state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Surrender_Nop: {
+ /*
+ * The thread queue of the mutex shall not be surrendered to a thread.
+ */
+ T_eq_u32( ctx->counter, 0 );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_FIFO: {
+ /*
+ * The thread queue of the mutex shall be surrendered in FIFO order.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrender_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_Priority: {
+ /*
+ * The thread queue of the mutex shall be surrendered in priority order.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrender_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_PriorityInherit: {
+ /*
+ * The thread queue of the mutex shall be surrendered in priority order
+ * priority inheritance.
+ */
+ T_eq_u32( ctx->counter, 1 );
+ ScoreTqReqSurrenderPriorityInherit_Run( &ctx->tq_ctx->base );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Surrender_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Post_Priority_Check(
+ ScoreMtxReqSurrender_Context *ctx,
+ ScoreMtxReqSurrender_Post_Priority state
+)
+{
+ switch ( state ) {
+ case ScoreMtxReqSurrender_Post_Priority_Nop: {
+ /*
+ * The current priority of the calling thread shall be not be modified.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Priority_Low: {
+ /*
+ * The current priority of the calling thread shall be lowered to reflect
+ * the removal of the priorities available through the mutex.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_real );
+ break;
+ }
+
+ case ScoreMtxReqSurrender_Post_Priority_NA:
+ break;
+ }
+}
+
+static void ScoreMtxReqSurrender_Prepare( ScoreMtxReqSurrender_Context *ctx )
+{
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ ctx->nested = false;
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ctx->priority_real = PRIO_ULTRA_HIGH;
+ } else {
+ ctx->priority_real = PRIO_NORMAL;
+ }
+}
+
+static void ScoreMtxReqSurrender_Action( ScoreMtxReqSurrender_Context *ctx )
+{
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+}
+
+static const ScoreMtxReqSurrender_Entry
+ScoreMtxReqSurrender_Entries[] = {
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 0, 1, ScoreMtxReqSurrender_Post_Status_NotOwner,
+ ScoreMtxReqSurrender_Post_Owner_Other,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 1, 0, 0, 0, 0, 0, 1, 0, 1, ScoreMtxReqSurrender_Post_Status_NA,
+ ScoreMtxReqSurrender_Post_Owner_NA, ScoreMtxReqSurrender_Post_Surrender_NA,
+ ScoreMtxReqSurrender_Post_Priority_NA },
+ { 0, 0, 0, 0, 0, 0, 1, 0, 1, ScoreMtxReqSurrender_Post_Status_NotOwner,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_Caller,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_Caller,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_PriorityInherit,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_FIFO,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_PriorityInherit,
+ ScoreMtxReqSurrender_Post_Priority_Low },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Priority_Nop },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Low },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Priority_Low }
+};
+
+static const uint8_t
+ScoreMtxReqSurrender_Map[] = {
+ 4, 4, 4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 7, 7, 7, 7, 7, 7, 11, 11, 11, 6, 6, 6, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7, 7,
+ 7, 7, 7, 11, 11, 11, 6, 6, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 4, 4, 4, 5, 5, 5, 2, 2, 2, 2, 2, 2, 11, 11, 11, 6, 6, 6, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 11, 11, 11, 6, 6, 6, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 4, 4, 4, 5, 5, 5, 4,
+ 4, 4, 5, 5, 5, 7, 7, 7, 7, 7, 7, 12, 12, 12, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 12, 12, 12,
+ 6, 6, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4, 5, 5,
+ 5, 2, 2, 2, 2, 2, 2, 12, 12, 12, 6, 6, 6, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 12, 12, 12, 6, 6, 6, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4,
+ 4, 4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 8, 8, 8, 7, 7, 7, 9, 9, 13, 6, 6, 6, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 8, 8, 7,
+ 7, 7, 9, 9, 13, 6, 6, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 4, 4, 4, 5, 5, 5, 2, 2, 2, 2, 2, 2, 9, 9, 13, 6, 6, 6, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 9, 9,
+ 13, 6, 6, 6, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 4, 4, 4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 8, 8, 8, 8, 8, 8, 14, 14,
+ 16, 10, 10, 15, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 8, 8, 8, 8, 8, 8, 14, 14, 16, 10, 10, 15, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4, 5, 5, 5, 2, 2, 2, 2, 2, 2, 14, 14, 16,
+ 10, 10, 15, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 14, 14, 16, 10, 10, 15, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 5, 5, 5, 4, 4, 4, 5,
+ 5, 5, 8, 8, 8, 8, 8, 8, 9, 9, 13, 10, 10, 15, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 8, 8, 8, 8, 8, 9, 9, 13, 10, 10,
+ 15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4, 5, 5, 5,
+ 2, 2, 2, 2, 2, 2, 9, 9, 13, 10, 10, 15, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 9, 9, 13, 10, 10, 15,
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1
+};
+
+static size_t ScoreMtxReqSurrender_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreMtxReqSurrender_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreMtxReqSurrender_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreMtxReqSurrender_Fixture = {
+ .setup = NULL,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = ScoreMtxReqSurrender_Scope,
+ .initial_context = &ScoreMtxReqSurrender_Instance
+};
+
+static const uint16_t ScoreMtxReqSurrender_Weights[] = {
+ 288, 144, 72, 36, 12, 6, 3, 1
+};
+
+static void ScoreMtxReqSurrender_Skip(
+ ScoreMtxReqSurrender_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSurrender_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSurrender_Pre_Recursive_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSurrender_Pre_OwnerCheck_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSurrender_Pre_Owner_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pci[ 5 ] = ScoreMtxReqSurrender_Pre_Nested_NA - 1;
+ /* Fall through */
+ case 6:
+ ctx->Map.pci[ 6 ] = ScoreMtxReqSurrender_Pre_Blocked_NA - 1;
+ /* Fall through */
+ case 7:
+ ctx->Map.pci[ 7 ] = ScoreMtxReqSurrender_Pre_Priority_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreMtxReqSurrender_Entry ScoreMtxReqSurrender_PopEntry(
+ ScoreMtxReqSurrender_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 8; ++i ) {
+ index += ScoreMtxReqSurrender_Weights[ i ] * ctx->Map.pci[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreMtxReqSurrender_Entries[
+ ScoreMtxReqSurrender_Map[ index ]
+ ];
+}
+
+static void ScoreMtxReqSurrender_SetPreConditionStates(
+ ScoreMtxReqSurrender_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+
+ if ( ctx->Map.entry.Pre_Nested_NA ) {
+ ctx->Map.pcs[ 5 ] = ScoreMtxReqSurrender_Pre_Nested_NA;
+ } else {
+ ctx->Map.pcs[ 5 ] = ctx->Map.pci[ 5 ];
+ }
+
+ ctx->Map.pcs[ 6 ] = ctx->Map.pci[ 6 ];
+
+ if ( ctx->Map.entry.Pre_Priority_NA ) {
+ ctx->Map.pcs[ 7 ] = ScoreMtxReqSurrender_Pre_Priority_NA;
+ } else {
+ ctx->Map.pcs[ 7 ] = ctx->Map.pci[ 7 ];
+ }
+}
+
+static void ScoreMtxReqSurrender_TestVariant(
+ ScoreMtxReqSurrender_Context *ctx
+)
+{
+ ScoreMtxReqSurrender_Pre_Protocol_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_Recursive_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 2 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_OwnerCheck_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreMtxReqSurrender_Skip( ctx, 3 );
+ return;
+ }
+
+ ScoreMtxReqSurrender_Pre_Owner_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ ScoreMtxReqSurrender_Pre_Nested_Prepare( ctx, ctx->Map.pcs[ 5 ] );
+ ScoreMtxReqSurrender_Pre_Blocked_Prepare( ctx, ctx->Map.pcs[ 6 ] );
+ ScoreMtxReqSurrender_Pre_Priority_Prepare( ctx, ctx->Map.pcs[ 7 ] );
+ ScoreMtxReqSurrender_Action( ctx );
+ ScoreMtxReqSurrender_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreMtxReqSurrender_Post_Owner_Check( ctx, ctx->Map.entry.Post_Owner );
+ ScoreMtxReqSurrender_Post_Surrender_Check(
+ ctx,
+ ctx->Map.entry.Post_Surrender
+ );
+ ScoreMtxReqSurrender_Post_Priority_Check(
+ ctx,
+ ctx->Map.entry.Post_Priority
+ );
+}
+
+static T_fixture_node ScoreMtxReqSurrender_Node;
+
+void ScoreMtxReqSurrender_Run( TQMtxContext *tq_ctx )
+{
+ ScoreMtxReqSurrender_Context *ctx;
+
+ ctx = &ScoreMtxReqSurrender_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreMtxReqSurrender_Node,
+ &ScoreMtxReqSurrender_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pci[ 0 ] = ScoreMtxReqSurrender_Pre_Protocol_None;
+ ctx->Map.pci[ 0 ] < ScoreMtxReqSurrender_Pre_Protocol_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = ScoreMtxReqSurrender_Pre_Discipline_FIFO;
+ ctx->Map.pci[ 1 ] < ScoreMtxReqSurrender_Pre_Discipline_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = ScoreMtxReqSurrender_Pre_Recursive_Allowed;
+ ctx->Map.pci[ 2 ] < ScoreMtxReqSurrender_Pre_Recursive_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = ScoreMtxReqSurrender_Pre_OwnerCheck_Yes;
+ ctx->Map.pci[ 3 ] < ScoreMtxReqSurrender_Pre_OwnerCheck_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = ScoreMtxReqSurrender_Pre_Owner_None;
+ ctx->Map.pci[ 4 ] < ScoreMtxReqSurrender_Pre_Owner_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ for (
+ ctx->Map.pci[ 5 ] = ScoreMtxReqSurrender_Pre_Nested_Yes;
+ ctx->Map.pci[ 5 ] < ScoreMtxReqSurrender_Pre_Nested_NA;
+ ++ctx->Map.pci[ 5 ]
+ ) {
+ for (
+ ctx->Map.pci[ 6 ] = ScoreMtxReqSurrender_Pre_Blocked_Yes;
+ ctx->Map.pci[ 6 ] < ScoreMtxReqSurrender_Pre_Blocked_NA;
+ ++ctx->Map.pci[ 6 ]
+ ) {
+ for (
+ ctx->Map.pci[ 7 ] = ScoreMtxReqSurrender_Pre_Priority_High;
+ ctx->Map.pci[ 7 ] < ScoreMtxReqSurrender_Pre_Priority_NA;
+ ++ctx->Map.pci[ 7 ]
+ ) {
+ ctx->Map.entry = ScoreMtxReqSurrender_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreMtxReqSurrender_SetPreConditionStates( ctx );
+ ScoreMtxReqSurrender_Prepare( ctx );
+ ScoreMtxReqSurrender_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-mtx-surrender.h b/testsuites/validation/tr-mtx-surrender.h
new file mode 100644
index 0000000000..ff8670ba72
--- /dev/null
+++ b/testsuites/validation/tr-mtx-surrender.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreMtxReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_MTX_SURRENDER_H
+#define _TR_MTX_SURRENDER_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreMtxReqSurrender
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Protocol_None,
+ ScoreMtxReqSurrender_Pre_Protocol_Inherit,
+ ScoreMtxReqSurrender_Pre_Protocol_Ceiling,
+ ScoreMtxReqSurrender_Pre_Protocol_MrsP,
+ ScoreMtxReqSurrender_Pre_Protocol_NA
+} ScoreMtxReqSurrender_Pre_Protocol;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Discipline_FIFO,
+ ScoreMtxReqSurrender_Pre_Discipline_Priority,
+ ScoreMtxReqSurrender_Pre_Discipline_NA
+} ScoreMtxReqSurrender_Pre_Discipline;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Recursive_Allowed,
+ ScoreMtxReqSurrender_Pre_Recursive_NotAllowed,
+ ScoreMtxReqSurrender_Pre_Recursive_NA
+} ScoreMtxReqSurrender_Pre_Recursive;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_OwnerCheck_Yes,
+ ScoreMtxReqSurrender_Pre_OwnerCheck_No,
+ ScoreMtxReqSurrender_Pre_OwnerCheck_NA
+} ScoreMtxReqSurrender_Pre_OwnerCheck;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Owner_None,
+ ScoreMtxReqSurrender_Pre_Owner_Caller,
+ ScoreMtxReqSurrender_Pre_Owner_Other,
+ ScoreMtxReqSurrender_Pre_Owner_NA
+} ScoreMtxReqSurrender_Pre_Owner;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Nested_Yes,
+ ScoreMtxReqSurrender_Pre_Nested_No,
+ ScoreMtxReqSurrender_Pre_Nested_NA
+} ScoreMtxReqSurrender_Pre_Nested;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Blocked_Yes,
+ ScoreMtxReqSurrender_Pre_Blocked_No,
+ ScoreMtxReqSurrender_Pre_Blocked_NA
+} ScoreMtxReqSurrender_Pre_Blocked;
+
+typedef enum {
+ ScoreMtxReqSurrender_Pre_Priority_High,
+ ScoreMtxReqSurrender_Pre_Priority_Equal,
+ ScoreMtxReqSurrender_Pre_Priority_Low,
+ ScoreMtxReqSurrender_Pre_Priority_NA
+} ScoreMtxReqSurrender_Pre_Priority;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Status_Ok,
+ ScoreMtxReqSurrender_Post_Status_NotOwner,
+ ScoreMtxReqSurrender_Post_Status_NA
+} ScoreMtxReqSurrender_Post_Status;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Owner_None,
+ ScoreMtxReqSurrender_Post_Owner_Caller,
+ ScoreMtxReqSurrender_Post_Owner_Other,
+ ScoreMtxReqSurrender_Post_Owner_First,
+ ScoreMtxReqSurrender_Post_Owner_NA
+} ScoreMtxReqSurrender_Post_Owner;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Surrender_Nop,
+ ScoreMtxReqSurrender_Post_Surrender_FIFO,
+ ScoreMtxReqSurrender_Post_Surrender_Priority,
+ ScoreMtxReqSurrender_Post_Surrender_PriorityInherit,
+ ScoreMtxReqSurrender_Post_Surrender_NA
+} ScoreMtxReqSurrender_Post_Surrender;
+
+typedef enum {
+ ScoreMtxReqSurrender_Post_Priority_Nop,
+ ScoreMtxReqSurrender_Post_Priority_Low,
+ ScoreMtxReqSurrender_Post_Priority_NA
+} ScoreMtxReqSurrender_Post_Priority;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreMtxReqSurrender_Run( TQMtxContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_MTX_SURRENDER_H */
diff --git a/testsuites/validation/tr-sem-surrender.c b/testsuites/validation/tr-sem-surrender.c
new file mode 100644
index 0000000000..754b88f1e1
--- /dev/null
+++ b/testsuites/validation/tr-sem-surrender.c
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreSemReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tr-sem-surrender.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreSemReqSurrender spec:/score/sem/req/surrender
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Variant_NA : 1;
+ uint16_t Pre_Discipline_NA : 1;
+ uint16_t Pre_Count_NA : 1;
+ uint16_t Post_Status : 2;
+ uint16_t Post_Dequeue : 2;
+ uint16_t Post_Count : 3;
+} ScoreSemReqSurrender_Entry;
+
+/**
+ * @brief Test context for spec:/score/sem/req/surrender test case.
+ */
+typedef struct {
+ /**
+ * @brief This member specifies the semaphore count before the directive
+ * call.
+ */
+ uint32_t count_before;
+
+ /**
+ * @brief This member contains the return status of the directive call.
+ */
+ Status_Control status;
+
+ /**
+ * @brief This member contains the semaphore count after the directive call.
+ */
+ uint32_t count_after;
+
+ /**
+ * @brief If this member is true, then there shall be threads blocked on the
+ * semaphore.
+ */
+ bool blocked;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreSemReqSurrender_Run() parameter.
+ */
+ TQSemContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreSemReqSurrender_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreSemReqSurrender_Context;
+
+static ScoreSemReqSurrender_Context
+ ScoreSemReqSurrender_Instance;
+
+static const char * const ScoreSemReqSurrender_PreDesc_Variant[] = {
+ "Binary",
+ "Counting",
+ "NA"
+};
+
+static const char * const ScoreSemReqSurrender_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreSemReqSurrender_PreDesc_Count[] = {
+ "LessMax",
+ "Max",
+ "Blocked",
+ "NA"
+};
+
+static const char * const * const ScoreSemReqSurrender_PreDesc[] = {
+ ScoreSemReqSurrender_PreDesc_Variant,
+ ScoreSemReqSurrender_PreDesc_Discipline,
+ ScoreSemReqSurrender_PreDesc_Count,
+ NULL
+};
+
+typedef ScoreSemReqSurrender_Context Context;
+
+static Status_Control Status( const Context *ctx, Status_Control status )
+{
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+}
+
+static void ScoreSemReqSurrender_Pre_Variant_Prepare(
+ ScoreSemReqSurrender_Context *ctx,
+ ScoreSemReqSurrender_Pre_Variant state
+)
+{
+ switch ( state ) {
+ case ScoreSemReqSurrender_Pre_Variant_Binary: {
+ /*
+ * Where the semaphore is a binary semaphore.
+ */
+ if ( ctx->tq_ctx->variant != TQ_SEM_BINARY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Variant_Counting: {
+ /*
+ * Where the semaphore is a counting semaphore.
+ */
+ if ( ctx->tq_ctx->variant != TQ_SEM_COUNTING ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Variant_NA:
+ break;
+ }
+}
+
+static void ScoreSemReqSurrender_Pre_Discipline_Prepare(
+ ScoreSemReqSurrender_Context *ctx,
+ ScoreSemReqSurrender_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreSemReqSurrender_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue of the semaphore uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue of the semaphore uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreSemReqSurrender_Pre_Count_Prepare(
+ ScoreSemReqSurrender_Context *ctx,
+ ScoreSemReqSurrender_Pre_Count state
+)
+{
+ switch ( state ) {
+ case ScoreSemReqSurrender_Pre_Count_LessMax: {
+ /*
+ * While the count of the semaphore is less than the maximum count.
+ */
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->variant == TQ_SEM_BINARY ) {
+ ctx->count_before = 0;
+ } else {
+ ctx->count_before = UINT32_MAX - 1;
+ }
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Count_Max: {
+ /*
+ * While the count of the semaphore is equal to the maximum count.
+ */
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->variant == TQ_SEM_BINARY ) {
+ ctx->count_before = 1;
+ } else {
+ ctx->count_before = UINT32_MAX;
+ }
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Count_Blocked: {
+ /*
+ * While the semaphore has threads blocked on the semaphore.
+ */
+ ctx->blocked = true;
+ ctx->count_before = 0;
+ break;
+ }
+
+ case ScoreSemReqSurrender_Pre_Count_NA:
+ break;
+ }
+}
+
+static void ScoreSemReqSurrender_Post_Status_Check(
+ ScoreSemReqSurrender_Context *ctx,
+ ScoreSemReqSurrender_Post_Status state
+)
+{
+ switch ( state ) {
+ case ScoreSemReqSurrender_Post_Status_Ok: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_SUCCESSFUL.
+ */
+ T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Status_MaxCountExceeded: {
+ /*
+ * The return status of the directive call shall be derived from
+ * STATUS_MAXIMUM_COUNT_EXCEEDED.
+ */
+ T_eq_int( ctx->status, Status( ctx, STATUS_MAXIMUM_COUNT_EXCEEDED ) );
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Status_NA:
+ break;
+ }
+}
+
+static void ScoreSemReqSurrender_Post_Dequeue_Check(
+ ScoreSemReqSurrender_Context *ctx,
+ ScoreSemReqSurrender_Post_Dequeue state
+)
+{
+ switch ( state ) {
+ case ScoreSemReqSurrender_Post_Dequeue_FIFO: {
+ /*
+ * The first thread in FIFO order shall be dequeued from the thread
+ * queue.
+ */
+ /* TODO */
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Dequeue_Priority: {
+ /*
+ * The first thread in priority order shall be dequeued from the thread
+ * queue.
+ */
+ /* TODO */
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Dequeue_NA:
+ break;
+ }
+}
+
+static void ScoreSemReqSurrender_Post_Count_Check(
+ ScoreSemReqSurrender_Context *ctx,
+ ScoreSemReqSurrender_Post_Count state
+)
+{
+ switch ( state ) {
+ case ScoreSemReqSurrender_Post_Count_Zero: {
+ /*
+ * The count of the semaphore shall be zero.
+ */
+ T_eq_u32( ctx->count_after, 0 );
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Count_One: {
+ /*
+ * The count of the semaphore shall be one.
+ */
+ T_eq_u32( ctx->count_after, 1 );
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Count_PlusOne: {
+ /*
+ * The count of the semaphore shall be incremented by one.
+ */
+ T_eq_u32( ctx->count_after, ctx->count_before + 1 );
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Count_Nop: {
+ /*
+ * The count of the semaphore shall not be modified.
+ */
+ T_eq_u32( ctx->count_after, ctx->count_before );
+ break;
+ }
+
+ case ScoreSemReqSurrender_Post_Count_NA:
+ break;
+ }
+}
+
+static void ScoreSemReqSurrender_Setup( ScoreSemReqSurrender_Context *ctx )
+{
+ ctx->tq_ctx->base.wait = TQ_WAIT_FOREVER;
+ TQReset( &ctx->tq_ctx->base );
+}
+
+static void ScoreSemReqSurrender_Setup_Wrap( void *arg )
+{
+ ScoreSemReqSurrender_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreSemReqSurrender_Setup( ctx );
+}
+
+static void ScoreSemReqSurrender_Action( ScoreSemReqSurrender_Context *ctx )
+{
+ TQSemSetCount( ctx->tq_ctx, ctx->count_before );
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ }
+
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ ctx->count_after = TQSemGetCount( ctx->tq_ctx );
+}
+
+static const ScoreSemReqSurrender_Entry
+ScoreSemReqSurrender_Entries[] = {
+ { 0, 0, 0, 0, ScoreSemReqSurrender_Post_Status_Ok,
+ ScoreSemReqSurrender_Post_Dequeue_NA, ScoreSemReqSurrender_Post_Count_One },
+ { 0, 0, 0, 0, ScoreSemReqSurrender_Post_Status_Ok,
+ ScoreSemReqSurrender_Post_Dequeue_FIFO,
+ ScoreSemReqSurrender_Post_Count_Zero },
+ { 0, 0, 0, 0, ScoreSemReqSurrender_Post_Status_Ok,
+ ScoreSemReqSurrender_Post_Dequeue_Priority,
+ ScoreSemReqSurrender_Post_Count_Zero },
+ { 0, 0, 0, 0, ScoreSemReqSurrender_Post_Status_Ok,
+ ScoreSemReqSurrender_Post_Dequeue_NA,
+ ScoreSemReqSurrender_Post_Count_PlusOne },
+ { 0, 0, 0, 0, ScoreSemReqSurrender_Post_Status_MaxCountExceeded,
+ ScoreSemReqSurrender_Post_Dequeue_NA, ScoreSemReqSurrender_Post_Count_Nop }
+};
+
+static const uint8_t
+ScoreSemReqSurrender_Map[] = {
+ 0, 0, 1, 0, 0, 2, 3, 4, 1, 3, 4, 2
+};
+
+static size_t ScoreSemReqSurrender_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreSemReqSurrender_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreSemReqSurrender_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreSemReqSurrender_Fixture = {
+ .setup = ScoreSemReqSurrender_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = ScoreSemReqSurrender_Scope,
+ .initial_context = &ScoreSemReqSurrender_Instance
+};
+
+static const uint8_t ScoreSemReqSurrender_Weights[] = {
+ 6, 3, 1
+};
+
+static void ScoreSemReqSurrender_Skip(
+ ScoreSemReqSurrender_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreSemReqSurrender_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pcs[ 2 ] = ScoreSemReqSurrender_Pre_Count_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreSemReqSurrender_Entry ScoreSemReqSurrender_PopEntry(
+ ScoreSemReqSurrender_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 3; ++i ) {
+ index += ScoreSemReqSurrender_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreSemReqSurrender_Entries[
+ ScoreSemReqSurrender_Map[ index ]
+ ];
+}
+
+static void ScoreSemReqSurrender_TestVariant(
+ ScoreSemReqSurrender_Context *ctx
+)
+{
+ ScoreSemReqSurrender_Pre_Variant_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreSemReqSurrender_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreSemReqSurrender_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreSemReqSurrender_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreSemReqSurrender_Pre_Count_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreSemReqSurrender_Action( ctx );
+ ScoreSemReqSurrender_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ ScoreSemReqSurrender_Post_Dequeue_Check( ctx, ctx->Map.entry.Post_Dequeue );
+ ScoreSemReqSurrender_Post_Count_Check( ctx, ctx->Map.entry.Post_Count );
+}
+
+static T_fixture_node ScoreSemReqSurrender_Node;
+
+void ScoreSemReqSurrender_Run( TQSemContext *tq_ctx )
+{
+ ScoreSemReqSurrender_Context *ctx;
+
+ ctx = &ScoreSemReqSurrender_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreSemReqSurrender_Node,
+ &ScoreSemReqSurrender_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreSemReqSurrender_Pre_Variant_Binary;
+ ctx->Map.pcs[ 0 ] < ScoreSemReqSurrender_Pre_Variant_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreSemReqSurrender_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < ScoreSemReqSurrender_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreSemReqSurrender_Pre_Count_LessMax;
+ ctx->Map.pcs[ 2 ] < ScoreSemReqSurrender_Pre_Count_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = ScoreSemReqSurrender_PopEntry( ctx );
+ ScoreSemReqSurrender_TestVariant( ctx );
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-sem-surrender.h b/testsuites/validation/tr-sem-surrender.h
new file mode 100644
index 0000000000..b1958c1cc1
--- /dev/null
+++ b/testsuites/validation/tr-sem-surrender.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreSemReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_SEM_SURRENDER_H
+#define _TR_SEM_SURRENDER_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreSemReqSurrender
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreSemReqSurrender_Pre_Variant_Binary,
+ ScoreSemReqSurrender_Pre_Variant_Counting,
+ ScoreSemReqSurrender_Pre_Variant_NA
+} ScoreSemReqSurrender_Pre_Variant;
+
+typedef enum {
+ ScoreSemReqSurrender_Pre_Discipline_FIFO,
+ ScoreSemReqSurrender_Pre_Discipline_Priority,
+ ScoreSemReqSurrender_Pre_Discipline_NA
+} ScoreSemReqSurrender_Pre_Discipline;
+
+typedef enum {
+ ScoreSemReqSurrender_Pre_Count_LessMax,
+ ScoreSemReqSurrender_Pre_Count_Max,
+ ScoreSemReqSurrender_Pre_Count_Blocked,
+ ScoreSemReqSurrender_Pre_Count_NA
+} ScoreSemReqSurrender_Pre_Count;
+
+typedef enum {
+ ScoreSemReqSurrender_Post_Status_Ok,
+ ScoreSemReqSurrender_Post_Status_MaxCountExceeded,
+ ScoreSemReqSurrender_Post_Status_NA
+} ScoreSemReqSurrender_Post_Status;
+
+typedef enum {
+ ScoreSemReqSurrender_Post_Dequeue_FIFO,
+ ScoreSemReqSurrender_Post_Dequeue_Priority,
+ ScoreSemReqSurrender_Post_Dequeue_NA
+} ScoreSemReqSurrender_Post_Dequeue;
+
+typedef enum {
+ ScoreSemReqSurrender_Post_Count_Zero,
+ ScoreSemReqSurrender_Post_Count_One,
+ ScoreSemReqSurrender_Post_Count_PlusOne,
+ ScoreSemReqSurrender_Post_Count_Nop,
+ ScoreSemReqSurrender_Post_Count_NA
+} ScoreSemReqSurrender_Post_Count;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue context.
+ */
+void ScoreSemReqSurrender_Run( TQSemContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_SEM_SURRENDER_H */
diff --git a/testsuites/validation/tr-tq-surrender-priority-inherit.c b/testsuites/validation/tr-tq-surrender-priority-inherit.c
new file mode 100644
index 0000000000..69ba973762
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender-priority-inherit.c
@@ -0,0 +1,1798 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpbarrier.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-surrender-priority-inherit.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit \
+ * spec:/score/tq/req/surrender-priority-inherit
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint32_t Skip : 1;
+ uint32_t Pre_EnqueueVariant_NA : 1;
+ uint32_t Pre_InheritedPriority_NA : 1;
+ uint32_t Pre_PreviousHelping_NA : 1;
+ uint32_t Pre_Scheduler_NA : 1;
+ uint32_t Pre_NewHelping_NA : 1;
+ uint32_t Pre_Suspended_NA : 1;
+ uint32_t Pre_WaitState_NA : 1;
+ uint32_t Post_Dequeue : 1;
+ uint32_t Post_Unblock : 2;
+ uint32_t Post_PreviousOwnerPriority : 2;
+ uint32_t Post_RemoveHelper : 2;
+ uint32_t Post_AddHelper : 2;
+ uint32_t Post_Suspended : 2;
+} ScoreTqReqSurrenderPriorityInherit_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/surrender-priority-inherit test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains the barrier to synchronize the runner and the
+ * worker.
+ */
+ SMP_barrier_Control barrier;
+
+ /**
+ * @brief If this member is true, then all priorities of the previous owner
+ * inherited from the thread queue thread shall be dispensable.
+ */
+ bool inherited_priorities_are_dispensible;
+
+ /**
+ * @brief If this member is true, then all helping schedulers of the previous
+ * owner thread gained through the thread queue shall be dispensable.
+ */
+ bool helping_schedules_are_dispensible;
+
+ /**
+ * @brief If this member is true, then the previous owner thread shall use
+ * helping scheduler.
+ */
+ bool use_helping_scheduler;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall gain a
+ * vital helping scheduler.
+ */
+ bool gains_new_helping_scheduler;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall be
+ * suspended.
+ */
+ bool suspended;
+
+ /**
+ * @brief If this member is true, then the new owner thread shall be in the
+ * intend to block wait state.
+ */
+ bool intend_to_block;
+
+ /**
+ * @brief If this member is true, then the action was performed.
+ */
+ bool action_performed;
+
+ /**
+ * @brief This member contains the current priority of the previous owner
+ * thread before the thread queue surrender operation.
+ */
+ rtems_task_priority priority_before;
+
+ /**
+ * @brief This member contains the current priority of the previous owner
+ * thread after the thread queue surrender operation.
+ */
+ rtems_task_priority priority_after;
+
+ /**
+ * @brief This member contains the identifier of the previous owner thread.
+ */
+ rtems_id previous_owner;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqSurrenderPriorityInherit_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 7 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqSurrenderPriorityInherit_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqSurrenderPriorityInherit_Context;
+
+static ScoreTqReqSurrenderPriorityInherit_Context
+ ScoreTqReqSurrenderPriorityInherit_Instance;
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_EnqueueVariant[] = {
+ "Blocking",
+ "Sticky",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_InheritedPriority[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_PreviousHelping[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_Scheduler[] = {
+ "Home",
+ "Helping",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_NewHelping[] = {
+ "Vital",
+ "Dispensable",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_Suspended[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrenderPriorityInherit_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqSurrenderPriorityInherit_PreDesc[] = {
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_EnqueueVariant,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_InheritedPriority,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_PreviousHelping,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_Scheduler,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_NewHelping,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_Suspended,
+ ScoreTqReqSurrenderPriorityInherit_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqSurrenderPriorityInherit_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Surrender( Context *ctx )
+{
+ Status_Control status;
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr( TQGetOwner( ctx->tq_ctx ), ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ] );
+ ctx->priority_after = GetSelfPriority();
+}
+
+#if defined(RTEMS_SMP)
+static void Delay( void *arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+}
+
+static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = Delay;
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+#endif
+
+static void Setup( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_B_ID, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_LOW );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_ULTRA_HIGH );
+#else
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+#endif
+
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_B, PRIO_VERY_LOW );
+}
+
+static void Action( Context *ctx )
+{
+ Status_Control status;
+#if defined(RTEMS_SMP)
+ SMP_barrier_State state;
+#endif
+
+ ctx->action_performed = true;
+ ctx->previous_owner = ctx->tq_ctx->runner_id;
+
+ if (
+ ctx->inherited_priorities_are_dispensible
+#if defined(RTEMS_SMP)
+ || ctx->helping_schedules_are_dispensible
+#endif
+ ) {
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+ }
+
+#if defined(RTEMS_SMP)
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+#endif
+ }
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+#if defined(RTEMS_SMP)
+ if ( ctx->intend_to_block ) {
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ } else {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_HELPER_B_SYNC
+ );
+ TQSynchronizeRunner();
+ }
+
+ /*
+ * In order to run the validation test variant also for the intend to block
+ * wait state, we would need at least three processors. Skip it for now.
+ */
+ if ( ctx->use_helping_scheduler && !ctx->intend_to_block ) {
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = true;
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_BUSY_WAIT );
+
+ while ( rtems_scheduler_get_processor() != 1 ) {
+ /* Wait */
+ }
+
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = false;
+ }
+#else
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_HELPER_B_SYNC
+ );
+ TQSynchronizeRunner();
+#endif
+
+ Surrender( ctx );
+
+#if defined(RTEMS_SMP)
+ if ( ctx->intend_to_block ) {
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ }
+#endif
+}
+
+static void Cleanup( Context *ctx )
+{
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A );
+
+#if defined(RTEMS_SMP)
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_SURRENDER
+ );
+ }
+#endif
+
+ if (
+ ctx->inherited_priorities_are_dispensible
+#if defined(RTEMS_SMP)
+ || ctx->helping_schedules_are_dispensible
+#endif
+ ) {
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_RELEASE );
+ }
+
+#if defined(RTEMS_SMP)
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+#endif
+ }
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+}
+
+static void SetupSticky( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_B_ID, PRIO_LOW );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_NORMAL );
+#endif
+}
+
+static void ActionSticky( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ ctx->action_performed = true;
+ ctx->previous_owner = ctx->tq_ctx->worker_id[ TQ_HELPER_A ];
+
+ SetSelfPriority( PRIO_LOW );
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_MUTEX_A_OBTAIN );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ }
+
+ /*
+ * Take only the priorities into account which are inherited from the
+ * priority inheritance mutex. This avoids having to deal with the ceiling
+ * priority.
+ */
+ ctx->priority_before = TQGetPriority( ctx->tq_ctx, TQ_HELPER_A );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = true;
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_BUSY_WAIT
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE
+ );
+ YieldTask( ctx->tq_ctx->worker_id[ TQ_HELPER_A ] );
+ TQWaitForEventsReceived( ctx->tq_ctx, TQ_BLOCKER_D );
+ TQWaitForIntendToBlock( ctx->tq_ctx, TQ_BLOCKER_D );
+ YieldTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_D ] );
+ }
+
+ if ( ctx->use_helping_scheduler ) {
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = false;
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_SCHEDULER_RECORD_START |
+ TQ_EVENT_SURRENDER
+ );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr(
+ TQGetOwner( ctx->tq_ctx ),
+ ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ]
+ );
+ ctx->priority_after = TQGetPriority( ctx->tq_ctx, TQ_HELPER_A );
+#endif
+}
+
+static void CleanupSticky( Context *ctx )
+{
+#if defined(RTEMS_SMP)
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_SURRENDER
+ );
+ }
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+ }
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+#endif
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Blocking: {
+ /*
+ * Where the thread queue enqueue operation is blocking.
+ */
+ if ( ctx->tq_ctx->enqueue_variant != TQ_ENQUEUE_BLOCKS ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Sticky: {
+ /*
+ * Where the thread queue enqueue operation is sticky.
+ */
+ if ( ctx->tq_ctx->enqueue_variant != TQ_ENQUEUE_STICKY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Vital: {
+ /*
+ * While at least one priority inherited through the thread queue for the
+ * previous owner is the highest priority of the previous owner.
+ */
+ ctx->inherited_priorities_are_dispensible = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Dispensable: {
+ /*
+ * While all priorities inherited through the thread queue for the
+ * previous owner are not the highest priority of the previous owner.
+ */
+ ctx->inherited_priorities_are_dispensible = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Vital: {
+ /*
+ * While at least one helping scheduler of the previous owner is only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->helping_schedules_are_dispensible = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Dispensable: {
+ /*
+ * While all helping scheduler of the previous owner are not only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->helping_schedules_are_dispensible = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Home: {
+ /*
+ * While the previous owner executes in its home scheduler.
+ */
+ ctx->use_helping_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Helping: {
+ /*
+ * While the previous owner executes in a helping scheduler which is
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->use_helping_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Vital: {
+ /*
+ * While at least one helping scheduler of the new owner is only
+ * available due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_helping_scheduler = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Dispensable: {
+ /*
+ * While all helping scheduler of the new owner are not only available
+ * due to a priority inherited through the thread queue.
+ */
+ ctx->gains_new_helping_scheduler = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Yes: {
+ /*
+ * Whiel the new owner is suspended.
+ */
+ ctx->suspended = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_No: {
+ /*
+ * Whiel the new owner is not suspended.
+ */
+ ctx->suspended = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Blocked: {
+ /*
+ * While the new owner is in the blocked wait state.
+ */
+ ctx->intend_to_block = false;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the new owner is in the intend to block wait state.
+ */
+ ctx->intend_to_block = true;
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority: {
+ /*
+ * The first thread in priority order shall be dequeued from the thread
+ * queue.
+ */
+ /* Validation is done by spec:/score/tq/req/enqueue-priority */
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes: {
+ /*
+ * The dequeued thread shall be unblocked by the thread queue surrender
+ * operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No: {
+ /*
+ * The dequeued thread shall not be unblocked by the thread queue
+ * surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void
+ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop: {
+ /*
+ * Each eligible priority of the previous owner which had the highest
+ * priority inherited through the thread queue shall be updated.
+ */
+ T_eq_u32( ctx->priority_after, PRIO_NORMAL );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop: {
+ /*
+ * No eligible priority of the previous owner shall be updated.
+ */
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper state
+)
+{
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes: {
+ /*
+ * Each helping scheduler of the previous owner which was only available
+ * due to a priority inherited through the thread queue shall be removed
+ * from the previous owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No: {
+ /*
+ * No helping scheduler shall be removed from the previous owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_LOW );
+ } else {
+ T_eq_u32( priority, PRIO_HIGH );
+ }
+ #else
+ T_rsc( sc, RTEMS_INVALID_ID );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper state
+)
+{
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes: {
+ /*
+ * Each helping scheduler of the new owner which is only available due to
+ * a priority inherited through the thread queue shall be added to the
+ * new owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_VERY_HIGH );
+ } else {
+ T_eq_u32( priority, PRIO_LOW );
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No: {
+ /*
+ * No helping scheduler shall added to the new owner.
+ */
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ #else
+ T_rsc_success( sc );
+ T_eq_u32( priority, PRIO_HIGH );
+ #endif
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Check(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes: {
+ /*
+ * The new owner shall be suspended.
+ */
+ T_true( IsTaskSuspended( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ) );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No: {
+ /*
+ * The new owner shall be not suspended.
+ */
+ T_false( IsTaskSuspended( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ) );
+ break;
+ }
+
+ case ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Setup(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ SetupSticky( ctx );
+ } else {
+ Setup( ctx );
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Setup_Wrap( void *arg )
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrenderPriorityInherit_Setup( ctx );
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Teardown(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrenderPriorityInherit_Teardown( ctx );
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Prepare(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ ctx->action_performed = false;
+ ctx->inherited_priorities_are_dispensible = true;
+ ctx->helping_schedules_are_dispensible = true;
+ ctx->use_helping_scheduler = false;
+ ctx->gains_new_helping_scheduler = false;
+ ctx->intend_to_block = false;
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Action(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_Cleanup(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ if ( ctx->action_performed ) {
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ CleanupSticky( ctx );
+ } else {
+ Cleanup( ctx );
+ }
+ }
+}
+
+static const ScoreTqReqSurrenderPriorityInherit_Entry
+ScoreTqReqSurrenderPriorityInherit_Entries[] = {
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes },
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA }
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No }
+#endif
+};
+
+static const uint8_t
+ScoreTqReqSurrenderPriorityInherit_Map[] = {
+ 1, 1, 18, 9, 2, 2, 19, 10, 1, 1, 18, 9, 2, 2, 19, 10, 3, 3, 20, 11, 24, 7,
+ 25, 12, 3, 3, 20, 11, 7, 7, 26, 12, 4, 4, 21, 13, 5, 5, 22, 14, 4, 4, 21, 13,
+ 5, 5, 22, 14, 6, 6, 23, 15, 27, 8, 28, 16, 6, 6, 23, 15, 8, 8, 29, 16, 0, 1,
+ 0, 9, 0, 2, 0, 10, 0, 1, 0, 9, 0, 2, 0, 10, 0, 3, 0, 11, 17, 7, 17, 12, 0, 3,
+ 0, 11, 0, 7, 0, 12, 0, 4, 0, 13, 0, 5, 0, 14, 0, 4, 0, 13, 0, 5, 0, 14, 0, 6,
+ 0, 15, 17, 8, 17, 16, 0, 6, 0, 15, 0, 8, 0, 16
+};
+
+static size_t ScoreTqReqSurrenderPriorityInherit_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ ScoreTqReqSurrenderPriorityInherit_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqSurrenderPriorityInherit_Fixture = {
+ .setup = ScoreTqReqSurrenderPriorityInherit_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqSurrenderPriorityInherit_Teardown_Wrap,
+ .scope = ScoreTqReqSurrenderPriorityInherit_Scope,
+ .initial_context = &ScoreTqReqSurrenderPriorityInherit_Instance
+};
+
+static const uint8_t ScoreTqReqSurrenderPriorityInherit_Weights[] = {
+ 64, 32, 16, 8, 4, 2, 1
+};
+
+static void ScoreTqReqSurrenderPriorityInherit_Skip(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pcs[ 3 ] = ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pcs[ 4 ] = ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pcs[ 5 ] = ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA - 1;
+ /* Fall through */
+ case 6:
+ ctx->Map.pcs[ 6 ] = ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreTqReqSurrenderPriorityInherit_Entry
+ScoreTqReqSurrenderPriorityInherit_PopEntry(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 7; ++i ) {
+ index += ScoreTqReqSurrenderPriorityInherit_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreTqReqSurrenderPriorityInherit_Entries[
+ ScoreTqReqSurrenderPriorityInherit_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqSurrenderPriorityInherit_TestVariant(
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx
+)
+{
+ ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqSurrenderPriorityInherit_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 3 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Prepare(
+ ctx,
+ ctx->Map.pcs[ 4 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Prepare(
+ ctx,
+ ctx->Map.pcs[ 5 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Prepare(
+ ctx,
+ ctx->Map.pcs[ 6 ]
+ );
+ ScoreTqReqSurrenderPriorityInherit_Action( ctx );
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Check(
+ ctx,
+ ctx->Map.entry.Post_Dequeue
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Check(
+ ctx,
+ ctx->Map.entry.Post_Unblock
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Check(
+ ctx,
+ ctx->Map.entry.Post_PreviousOwnerPriority
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Check(
+ ctx,
+ ctx->Map.entry.Post_RemoveHelper
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Check(
+ ctx,
+ ctx->Map.entry.Post_AddHelper
+ );
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Check(
+ ctx,
+ ctx->Map.entry.Post_Suspended
+ );
+}
+
+static T_fixture_node ScoreTqReqSurrenderPriorityInherit_Node;
+
+void ScoreTqReqSurrenderPriorityInherit_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqSurrenderPriorityInherit_Context *ctx;
+
+ ctx = &ScoreTqReqSurrenderPriorityInherit_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqSurrenderPriorityInherit_Node,
+ &ScoreTqReqSurrenderPriorityInherit_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Blocking;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Vital;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Vital;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 3 ] = ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Home;
+ ctx->Map.pcs[ 3 ] < ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_NA;
+ ++ctx->Map.pcs[ 3 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 4 ] = ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Vital;
+ ctx->Map.pcs[ 4 ] < ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA;
+ ++ctx->Map.pcs[ 4 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 5 ] = ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Yes;
+ ctx->Map.pcs[ 5 ] < ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA;
+ ++ctx->Map.pcs[ 5 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 6 ] = ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Blocked;
+ ctx->Map.pcs[ 6 ] < ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 6 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqSurrenderPriorityInherit_PopEntry(
+ ctx
+ );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqSurrenderPriorityInherit_Prepare( ctx );
+ ScoreTqReqSurrenderPriorityInherit_TestVariant( ctx );
+ ScoreTqReqSurrenderPriorityInherit_Cleanup( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-surrender-priority-inherit.h b/testsuites/validation/tr-tq-surrender-priority-inherit.h
new file mode 100644
index 0000000000..57129a30eb
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender-priority-inherit.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_SURRENDER_PRIORITY_INHERIT_H
+#define _TR_TQ_SURRENDER_PRIORITY_INHERIT_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqSurrenderPriorityInherit
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Blocking,
+ ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_Sticky,
+ ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_EnqueueVariant;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_InheritedPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_PreviousHelping;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Home,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_Helping,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_Scheduler;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Vital,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_Dispensable,
+ ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_NewHelping;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_No,
+ ScoreTqReqSurrenderPriorityInherit_Pre_Suspended_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_Suspended;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_Blocked,
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_IntendToBlock,
+ ScoreTqReqSurrenderPriorityInherit_Pre_WaitState_NA
+} ScoreTqReqSurrenderPriorityInherit_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_Priority,
+ ScoreTqReqSurrenderPriorityInherit_Post_Dequeue_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_Dequeue;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Unblock_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_Unblock;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Drop,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_Nop,
+ ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_PreviousOwnerPriority;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_RemoveHelper;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_AddHelper_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_AddHelper;
+
+typedef enum {
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_Yes,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_No,
+ ScoreTqReqSurrenderPriorityInherit_Post_Suspended_NA
+} ScoreTqReqSurrenderPriorityInherit_Post_Suspended;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqSurrenderPriorityInherit_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_SURRENDER_PRIORITY_INHERIT_H */
diff --git a/testsuites/validation/tr-tq-surrender.c b/testsuites/validation/tr-tq-surrender.c
new file mode 100644
index 0000000000..c7b6b4e9b3
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/smpbarrier.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tr-tq-surrender.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreTqReqSurrender spec:/score/tq/req/surrender
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidation0
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_HasOwner_NA : 1;
+ uint8_t Pre_Discipline_NA : 1;
+ uint8_t Pre_WaitState_NA : 1;
+ uint8_t Post_Dequeue : 2;
+ uint8_t Post_Unblock : 2;
+} ScoreTqReqSurrender_Entry;
+
+/**
+ * @brief Test context for spec:/score/tq/req/surrender test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member contains the barrier to synchronize the runner and the
+ * worker.
+ */
+ SMP_barrier_Control barrier;
+
+ /**
+ * @brief If this member is true, then the dequeued thread shall be in the
+ * intend to block wait state.
+ */
+ bool intend_to_block;
+
+ /**
+ * @brief If this member contains the expected counter of worker B.
+ */
+ uint32_t expected_blocker_b_counter;
+
+ /**
+ * @brief This member contains a copy of the corresponding
+ * ScoreTqReqSurrender_Run() parameter.
+ */
+ TQContext *tq_ctx;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ ScoreTqReqSurrender_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} ScoreTqReqSurrender_Context;
+
+static ScoreTqReqSurrender_Context
+ ScoreTqReqSurrender_Instance;
+
+static const char * const ScoreTqReqSurrender_PreDesc_HasOwner[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrender_PreDesc_Discipline[] = {
+ "FIFO",
+ "Priority",
+ "NA"
+};
+
+static const char * const ScoreTqReqSurrender_PreDesc_WaitState[] = {
+ "Blocked",
+ "IntendToBlock",
+ "NA"
+};
+
+static const char * const * const ScoreTqReqSurrender_PreDesc[] = {
+ ScoreTqReqSurrender_PreDesc_HasOwner,
+ ScoreTqReqSurrender_PreDesc_Discipline,
+ ScoreTqReqSurrender_PreDesc_WaitState,
+ NULL
+};
+
+typedef ScoreTqReqSurrender_Context Context;
+
+static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+{
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+}
+
+static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+{
+ return ctx->tq_ctx->worker_tcb[ worker ];
+}
+
+static void Surrender( void *arg )
+{
+ Context *ctx;
+ Status_Control status;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+}
+
+#if defined(RTEMS_SMP)
+static void Delay( void *arg )
+{
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+}
+#endif
+
+static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+#if defined(RTEMS_SMP)
+ ctx->request.handler = Delay;
+#else
+ ctx->request.handler = Surrender;
+#endif
+ CallWithinISRSubmit( &ctx->request );
+ }
+}
+
+static void ScoreTqReqSurrender_Pre_HasOwner_Prepare(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Pre_HasOwner state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Pre_HasOwner_Yes: {
+ /*
+ * Where the thread queue has a previous owner thread.
+ */
+ if ( ctx->tq_ctx->get_owner == NULL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_HasOwner_No: {
+ /*
+ * Where the thread queue has no owner threads.
+ */
+ if ( ctx->tq_ctx->get_owner != NULL ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_HasOwner_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Pre_Discipline_Prepare(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Pre_Discipline state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Pre_Discipline_FIFO: {
+ /*
+ * Where the thread queue uses the FIFO discipline.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_Discipline_Priority: {
+ /*
+ * Where the thread queue uses the priority discipline.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_PRIORITY ) {
+ ctx->Map.skip = true;
+ }
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_Discipline_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Pre_WaitState_Prepare(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Pre_WaitState state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Pre_WaitState_Blocked: {
+ /*
+ * While the dequeued thread is in the blocked wait state.
+ */
+ ctx->intend_to_block = false;
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_WaitState_IntendToBlock: {
+ /*
+ * While the dequeued thread is in the intend to block wait state.
+ */
+ ctx->intend_to_block = true;
+ break;
+ }
+
+ case ScoreTqReqSurrender_Pre_WaitState_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Post_Dequeue_Check(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Post_Dequeue state
+)
+{
+ switch ( state ) {
+ case ScoreTqReqSurrender_Post_Dequeue_FIFO: {
+ /*
+ * The first thread in FIFO order shall be dequeued from the thread
+ * queue.
+ */
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
+ T_eq_u32(
+ TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ),
+ ctx->expected_blocker_b_counter
+ );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Dequeue_Priority: {
+ /*
+ * The first thread in priority order shall be dequeued from the thread
+ * queue.
+ */
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ), 2 );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Dequeue_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Post_Unblock_Check(
+ ScoreTqReqSurrender_Context *ctx,
+ ScoreTqReqSurrender_Post_Unblock state
+)
+{
+ size_t i;
+
+ i = 0;
+
+ switch ( state ) {
+ case ScoreTqReqSurrender_Post_Unblock_Yes: {
+ /*
+ * The dequeued thread shall be unblocked by surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Unblock_No: {
+ /*
+ * The dequeued thread shall not be unblocked by surrender operation.
+ */
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ break;
+ }
+
+ case ScoreTqReqSurrender_Post_Unblock_NA:
+ break;
+ }
+}
+
+static void ScoreTqReqSurrender_Setup( ScoreTqReqSurrender_Context *ctx )
+{
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+
+ #if defined(RTEMS_SMP)
+ /*
+ * For the mutexes with priority ceiling protocol, we need a scheduler with
+ * two processors to set up the intend to block wait state.
+ */
+ RemoveProcessor( SCHEDULER_B_ID, 1 );
+ AddProcessor( SCHEDULER_A_ID, 1 );
+ #endif
+}
+
+static void ScoreTqReqSurrender_Setup_Wrap( void *arg )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrender_Setup( ctx );
+}
+
+static void ScoreTqReqSurrender_Teardown( ScoreTqReqSurrender_Context *ctx )
+{
+ TQReset( ctx->tq_ctx );
+
+ #if defined(RTEMS_SMP)
+ RemoveProcessor( SCHEDULER_A_ID, 1 );
+ AddProcessor( SCHEDULER_B_ID, 1 );
+ #endif
+}
+
+static void ScoreTqReqSurrender_Teardown_Wrap( void *arg )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ ScoreTqReqSurrender_Teardown( ctx );
+}
+
+static void ScoreTqReqSurrender_Action( ScoreTqReqSurrender_Context *ctx )
+{
+ Status_Control status;
+
+ TQResetCounter( ctx->tq_ctx );
+ ctx->expected_blocker_b_counter = 0;
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ if ( ctx->intend_to_block ) {
+ #if defined(RTEMS_SMP)
+ SMP_barrier_State state;
+ #endif
+
+ /*
+ * In uniprocessor configurations, it is impossible to dequeue a thread
+ * in FIFO order which is in the intend to block wait state. Run this
+ * test with just one worker.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ ctx->expected_blocker_b_counter = 2;
+ }
+
+
+ #if defined(RTEMS_SMP)
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+ #endif
+
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+ #if defined(RTEMS_SMP)
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ Surrender( ctx );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ #endif
+ } else {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A );
+
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_B );
+ ctx->expected_blocker_b_counter = 2;
+
+ Surrender( ctx );
+ }
+
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->expected_blocker_b_counter != 0 ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_SURRENDER
+ );
+ }
+}
+
+static const ScoreTqReqSurrender_Entry
+ScoreTqReqSurrender_Entries[] = {
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Unblock_Yes },
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Unblock_Yes },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_NA,
+ ScoreTqReqSurrender_Post_Unblock_NA },
+#else
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Unblock_No },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_NA,
+ ScoreTqReqSurrender_Post_Unblock_NA },
+#else
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Unblock_No },
+#endif
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Unblock_No },
+ { 0, 0, 0, 0, ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Unblock_No }
+};
+
+static const uint8_t
+ScoreTqReqSurrender_Map[] = {
+ 0, 2, 1, 3, 0, 4, 1, 5
+};
+
+static size_t ScoreTqReqSurrender_Scope( void *arg, char *buf, size_t n )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( ScoreTqReqSurrender_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture ScoreTqReqSurrender_Fixture = {
+ .setup = ScoreTqReqSurrender_Setup_Wrap,
+ .stop = NULL,
+ .teardown = ScoreTqReqSurrender_Teardown_Wrap,
+ .scope = ScoreTqReqSurrender_Scope,
+ .initial_context = &ScoreTqReqSurrender_Instance
+};
+
+static const uint8_t ScoreTqReqSurrender_Weights[] = {
+ 4, 2, 1
+};
+
+static void ScoreTqReqSurrender_Skip(
+ ScoreTqReqSurrender_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrender_Pre_Discipline_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrender_Pre_WaitState_NA - 1;
+ break;
+ }
+}
+
+static inline ScoreTqReqSurrender_Entry ScoreTqReqSurrender_PopEntry(
+ ScoreTqReqSurrender_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 3; ++i ) {
+ index += ScoreTqReqSurrender_Weights[ i ] * ctx->Map.pcs[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return ScoreTqReqSurrender_Entries[
+ ScoreTqReqSurrender_Map[ index ]
+ ];
+}
+
+static void ScoreTqReqSurrender_TestVariant( ScoreTqReqSurrender_Context *ctx )
+{
+ ScoreTqReqSurrender_Pre_HasOwner_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqSurrender_Skip( ctx, 0 );
+ return;
+ }
+
+ ScoreTqReqSurrender_Pre_Discipline_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+
+ if ( ctx->Map.skip ) {
+ ScoreTqReqSurrender_Skip( ctx, 1 );
+ return;
+ }
+
+ ScoreTqReqSurrender_Pre_WaitState_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ ScoreTqReqSurrender_Action( ctx );
+ ScoreTqReqSurrender_Post_Dequeue_Check( ctx, ctx->Map.entry.Post_Dequeue );
+ ScoreTqReqSurrender_Post_Unblock_Check( ctx, ctx->Map.entry.Post_Unblock );
+}
+
+static T_fixture_node ScoreTqReqSurrender_Node;
+
+void ScoreTqReqSurrender_Run( TQContext *tq_ctx )
+{
+ ScoreTqReqSurrender_Context *ctx;
+
+ ctx = &ScoreTqReqSurrender_Instance;
+ ctx->tq_ctx = tq_ctx;
+
+ ctx = T_push_fixture(
+ &ScoreTqReqSurrender_Node,
+ &ScoreTqReqSurrender_Fixture
+ );
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pcs[ 0 ] = ScoreTqReqSurrender_Pre_HasOwner_Yes;
+ ctx->Map.pcs[ 0 ] < ScoreTqReqSurrender_Pre_HasOwner_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = ScoreTqReqSurrender_Pre_Discipline_FIFO;
+ ctx->Map.pcs[ 1 ] < ScoreTqReqSurrender_Pre_Discipline_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = ScoreTqReqSurrender_Pre_WaitState_Blocked;
+ ctx->Map.pcs[ 2 ] < ScoreTqReqSurrender_Pre_WaitState_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = ScoreTqReqSurrender_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ ScoreTqReqSurrender_TestVariant( ctx );
+ }
+ }
+ }
+
+ T_pop_fixture();
+}
+
+/** @} */
diff --git a/testsuites/validation/tr-tq-surrender.h b/testsuites/validation/tr-tq-surrender.h
new file mode 100644
index 0000000000..fd64bbfa02
--- /dev/null
+++ b/testsuites/validation/tr-tq-surrender.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreTqReqSurrender
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifndef _TR_TQ_SURRENDER_H
+#define _TR_TQ_SURRENDER_H
+
+#include "tx-thread-queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestCaseScoreTqReqSurrender
+ *
+ * @{
+ */
+
+typedef enum {
+ ScoreTqReqSurrender_Pre_HasOwner_Yes,
+ ScoreTqReqSurrender_Pre_HasOwner_No,
+ ScoreTqReqSurrender_Pre_HasOwner_NA
+} ScoreTqReqSurrender_Pre_HasOwner;
+
+typedef enum {
+ ScoreTqReqSurrender_Pre_Discipline_FIFO,
+ ScoreTqReqSurrender_Pre_Discipline_Priority,
+ ScoreTqReqSurrender_Pre_Discipline_NA
+} ScoreTqReqSurrender_Pre_Discipline;
+
+typedef enum {
+ ScoreTqReqSurrender_Pre_WaitState_Blocked,
+ ScoreTqReqSurrender_Pre_WaitState_IntendToBlock,
+ ScoreTqReqSurrender_Pre_WaitState_NA
+} ScoreTqReqSurrender_Pre_WaitState;
+
+typedef enum {
+ ScoreTqReqSurrender_Post_Dequeue_FIFO,
+ ScoreTqReqSurrender_Post_Dequeue_Priority,
+ ScoreTqReqSurrender_Post_Dequeue_NA
+} ScoreTqReqSurrender_Post_Dequeue;
+
+typedef enum {
+ ScoreTqReqSurrender_Post_Unblock_Yes,
+ ScoreTqReqSurrender_Post_Unblock_No,
+ ScoreTqReqSurrender_Post_Unblock_NA
+} ScoreTqReqSurrender_Post_Unblock;
+
+/**
+ * @brief Runs the parameterized test case.
+ *
+ * @param[in,out] tq_ctx is the thread queue test context.
+ */
+void ScoreTqReqSurrender_Run( TQContext *tq_ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TR_TQ_SURRENDER_H */