summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-20 15:48:41 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-24 13:13:51 +0200
commit3c60360142c7adc84e6835eb548d6237e3f5a360 (patch)
treee4aeca53e5d60206bcf121117c8c080c755bb021
parentspec: Move futex specification (diff)
downloadrtems-central-3c60360142c7adc84e6835eb548d6237e3f5a360.tar.bz2
spec: Generalize semaphore release
-rw-r--r--spec/rtems/sem/req/release.yml1456
-rw-r--r--spec/score/mtx/req/surrender.yml753
-rw-r--r--spec/score/sem/req/surrender.yml267
-rw-r--r--spec/score/status/if/maximum-count-exceeded.yml12
-rw-r--r--spec/score/status/if/not-owner.yml12
-rw-r--r--spec/score/tq/req/surrender-priority-inherit.yml888
-rw-r--r--spec/score/tq/req/surrender.yml356
7 files changed, 2438 insertions, 1306 deletions
diff --git a/spec/rtems/sem/req/release.yml b/spec/rtems/sem/req/release.yml
index 0fdf801d..09ad3df6 100644
--- a/spec/rtems/sem/req/release.yml
+++ b/spec/rtems/sem/req/release.yml
@@ -7,182 +7,92 @@ links:
- role: interface-function
uid: ../if/release
post-conditions:
-- name: Status
+- name: Action
states:
- - name: Ok
- test-code: |
- T_rsc_success( ctx->status );
- text: |
- The return status of ${../if/release:/name} shall be
- ${../../status/if/successful:/name}.
- name: InvId
test-code: |
- T_rsc( ctx->status, RTEMS_INVALID_ID );
+ sc = rtems_semaphore_release( 0xffffffff );
+ T_rsc( sc, RTEMS_INVALID_ID );
text: |
The return status of ${../if/release:/name} shall be
${../../status/if/invalid-id:/name}.
- - name: NotOwner
- test-code: |
- T_rsc( ctx->status, RTEMS_NOT_OWNER_OF_RESOURCE );
- text: |
- The return status of ${../if/release:/name} shall be
- ${../../status/if/not-owner-of-resource:/name}.
- - name: Unsat
- test-code: |
- T_rsc( ctx->status, RTEMS_UNSATISFIED );
- text: |
- The return status of ${../if/release:/name} shall be
- ${../../status/if/unsatisfied:/name}.
- test-epilogue: null
- test-prologue: null
-- name: Count
- states:
- - name: Zero
- test-code: |
- T_eq_u32( ctx->sem_count, 0 );
- text: |
- The count of the semaphore shall be zero.
- - name: One
- test-code: |
- T_eq_u32( ctx->sem_count, 1 );
- text: |
- The count of the semaphore shall be one.
- - name: PlusOne
- test-code: |
- T_eq_u32( ctx->sem_count, ctx->count + 1 );
- text: |
- The count of the semaphore shall be incremented by one.
- - name: Max
- test-code: |
- T_eq_u32( ctx->sem_count, UINT32_MAX );
- text: |
- The count of the semaphore shall be the maximum count.
- - name: Nop
- test-code: |
- T_eq_u32( ctx->sem_count, ctx->count );
- text: |
- The count of the semaphore shall not be modified.
+ - name: BinarySurrender
+ test-code: |
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = NULL;
+ ctx->tq_sem_ctx.variant = TQ_SEM_BINARY;
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ${/score/sem/req/surrender:/test-run}( &ctx->tq_sem_ctx );
+ text: |
+ The calling task shall surrender the binary semaphore as specified by
+ ${/score/sem/req/surrender}.
+ - name: CountingSurrender
+ test-code: |
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = NULL;
+ ctx->tq_sem_ctx.variant = TQ_SEM_COUNTING;
+ ctx->tq_sem_ctx.get_count = TQSemGetCountClassic;
+ ctx->tq_sem_ctx.set_count = TQSemSetCountClassic;
+ ${/score/sem/req/surrender:/test-run}( &ctx->tq_sem_ctx );
+ text: |
+ The calling task shall surrender the counting semaphore as specified by
+ ${/score/sem/req/surrender}.
+ - name: MtxSurrender
+ test-code: |
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_NO_PROTOCOL;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ${/score/mtx/req/surrender:/test-run}( &ctx->tq_mtx_ctx );
+ text: |
+ The calling task shall surrender the mutex as specified by
+ ${/score/mtx/req/surrender} where an enqueue blocks and a recursive seize
+ is allowed.
+ - name: InheritMtxSurrender
+ test-code: |
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_INHERIT;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+ ${/score/mtx/req/surrender:/test-run}( &ctx->tq_mtx_ctx );
+ text: |
+ The calling task shall surrender the mutex as specified by
+ ${/score/mtx/req/surrender} where an enqueue blocks and a recursive seize
+ is allowed.
+ - name: CeilingMtxSurrender
+ test-code: |
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_PRIORITY_CEILING;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ${/score/mtx/req/surrender:/test-run}( &ctx->tq_mtx_ctx );
+ text: |
+ The calling task shall surrender the mutex as specified by
+ ${/score/mtx/req/surrender} where an enqueue blocks, a recursive seize is
+ allowed, and a priority ceiling is used.
+ - name: MrsPMtxSurrender
+ test-code: |
+ ctx->tq_ctx.enqueue_variant = TQ_ENQUEUE_STICKY;
+ ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+ ctx->tq_mtx_ctx.protocol = TQ_MTX_MRSP;
+ ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
+ ctx->tq_mtx_ctx.owner_check = TQ_MTX_CHECKS_OWNER;
+ ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+ ${/score/mtx/req/surrender:/test-run}( &ctx->tq_mtx_ctx );
+ text: |
+ The calling task shall surrender the mutex as specified by
+ ${/score/mtx/req/surrender} where an enqueue is sticky, a recursive seize
+ returns an error status, and a priority ceiling is used.
test-epilogue: null
- test-prologue: null
-- name: Owner
- states:
- - name: 'No'
- test-code: |
- T_eq_u32( ctx->owner, 0 );
- text: |
- The semaphore shall not have an owner.
- - name: Self
- test-code: |
- T_eq_u32( ctx->owner, ctx->runner_id );
- text: |
- The owner of the semaphore shall be the calling task.
- - name: Other
- test-code: |
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_B ] );
- text: |
- The owner of the semaphore shall be the other task.
- - name: FIFO
- test-code: |
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_A ] );
- text: |
- The owner of the semaphore shall be the first task unblocked in FIFO
- order.
- - name: Priority
- test-code: |
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_C ] );
- text: |
- The owner of the semaphore shall be the first task unblocked in priority
- order.
- - name: MrsP
- test-code: |
- if ( CanUseThirdScheduler() ) {
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_A ] );
- } else {
- T_eq_u32( ctx->owner, ctx->worker_id[ BLOCKER_B ] );
- }
- text: |
- The owner of the semaphore shall be the first task unblocked in MrsP
- priority order.
- test-epilogue: null
- test-prologue: null
-- name: Next
- states:
- - name: FIFO
- test-code: |
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 2 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 3 );
- text: |
- The first blocked task in FIFO order shall be made ready.
- - name: Priority
- test-code: |
- if ( ctx->owner_other ) {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 0 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 0 );
- } else {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 3 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 2 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 1 );
- }
- text: |
- The first blocked task in priority order shall be made ready.
- - name: MrsP
- test-code: |
- if ( CanUseThirdScheduler() ) {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 2 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 3 );
- } else {
- T_eq_u32( ctx->obtain_counter[ BLOCKER_A ], 0 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_B ], 1 );
- T_eq_u32( ctx->obtain_counter[ BLOCKER_C ], 0 );
- }
- text: |
- The first blocked task in MrsP priority order shall be made ready.
- test-epilogue: null
- test-prologue: null
-- name: CallerPrio
- states:
- - name: Inherit
- test-code: |
- T_eq_u32( ctx->after_release_priority, PRIO_ULTRA_HIGH );
- text: |
- The current priority of the calling task shall be the inherited priority
- of the semaphore.
- - name: Ceiling
- test-code: |
- T_eq_u32( ctx->after_release_priority, PRIO_ULTRA_HIGH );
- text: |
- The current priority of the calling task shall be the ceiling priority of
- the semaphore.
- - name: Real
- test-code: |
- T_eq_u32( ctx->after_release_priority, PRIO_NORMAL );
- text: |
- The current priority of the calling task shall be its real priority.
- test-epilogue: null
- test-prologue: null
-- name: CallerCPU
- states:
- - name: Home
- test-code: |
- T_eq_u32( ctx->after_release_scheduler_id, ctx->runner_scheduler_id );
- text: |
- The calling task shall execute on a processor owned by its home
- scheduler.
- - name: Other
- test-code: |
- if ( IsMrsP( ctx ) && CanUseThirdScheduler() ) {
- T_eq_u32( ctx->after_release_scheduler_id, ctx->third_scheduler_id );
- } else {
- T_eq_u32( ctx->after_release_scheduler_id, ctx->other_scheduler_id );
- }
- text: |
- The calling task shall execute on a processor not owned by its home
- scheduler.
- test-epilogue: null
- test-prologue: null
+ test-prologue: |
+ rtems_status_code sc;
pre-conditions:
- name: Class
states:
@@ -224,122 +134,28 @@ pre-conditions:
- name: FIFO
test-code: |
ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
text: |
While the semaphore uses the FIFO task wait queue discipline.
- name: Priority
test-code: |
ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
text: |
While the semaphore uses the priority task wait queue discipline.
test-epilogue: null
test-prologue: null
-- name: Count
- states:
- - name: LessMax
- test-code: |
- if ( ( ctx->attribute_set & RTEMS_SIMPLE_BINARY_SEMAPHORE ) != 0 ) {
- ctx->count = 0;
- } else {
- ctx->count = UINT32_MAX - 1;
- }
- text: |
- While the count of the semaphore is less than the maximum count.
- - name: Max
- test-code: |
- if ( ( ctx->attribute_set & RTEMS_SIMPLE_BINARY_SEMAPHORE ) != 0 ) {
- ctx->count = 1;
- } else {
- ctx->count = UINT32_MAX;
- }
- text: |
- While the count of the semaphore is equal to the maximum count.
- - name: Blocked
- test-code: |
- ctx->blocked = true;
- ctx->count = 0;
- text: |
- While the semaphore has tasks blocked on the semaphore.
- test-epilogue: null
- test-prologue: null
-- name: Owner
- states:
- - name: 'No'
- test-code: |
- ctx->count = 1;
- text: |
- While the semaphore has no owner.
- - name: Self
- test-code: |
- ctx->count = 0;
- text: |
- While the calling task is the owner of the semaphore, while the calling
- task did not recursively obtain the semaphore.
- - name: Other
- test-code: |
- ctx->count = 1;
- ctx->owner_other = true;
- text: |
- While a task other than the calling task is the owner of the semaphore.
- - name: Blocked
- test-code: |
- ctx->count = 0;
- ctx->blocked = true;
- text: |
- While the calling task is the owner of the semaphore, while the calling
- task did not recursively obtain the semaphore, while tasks are blocked on
- the semaphore.
- - name: Nested
- test-code: |
- ctx->count = 0;
- ctx->nested = true;
- text: |
- While the calling task is the owner of the semaphore, while the calling
- task did recursively obtain the semaphore.
- - name: BlockedNested
- test-code: |
- ctx->count = 0;
- ctx->blocked = true;
- ctx->nested = true;
- text: |
- While the calling task is the owner of the semaphore, while the calling
- task did recursively obtain the semaphore, while tasks are blocked on the
- semaphore.
- - name: BlockedOther
- test-code: |
- ctx->count = 0;
- ctx->blocked = true;
- ctx->other_scheduler = true;
- text: |
- While the calling task is the owner of the semaphore, while the calling
- task did not recursively obtain the semaphore, while tasks are blocked on
- the semaphore, while the calling task executes on a processor owned by a
- scheduler other than its home scheduler due to a locking protocol
- mechanism provided by the semaphore.
- - name: BlockedNestedOther
- test-code: |
- ctx->count = 0;
- ctx->blocked = true;
- ctx->nested = true;
- ctx->other_scheduler = true;
- text: |
- While the calling task is the owner of the semaphore, while the calling
- task did recursively obtain the semaphore, while tasks are blocked on the
- semaphore, while the calling task executes on a processor owned by a
- scheduler other than its home scheduler due to a locking protocol
- mechanism provided by the semaphore.
- test-epilogue: null
- test-prologue: null
- name: Id
states:
- name: Valid
test-code: |
- ctx->valid_id = true;
+ /* Nothing to prepare */
text: |
While the ${../if/release:/params[0]/name} parameter is associated with
the semaphore.
- name: Invalid
test-code: |
- ctx->valid_id = false;
+ /* Nothing to prepare */
text: |
While the ${../if/release:/params[0]/name} parameter is not associated
with a semaphore.
@@ -352,1156 +168,184 @@ skip-reasons:
NeedsPriorityDiscipline: |
Binary semaphores with a locking protocol are required to use the priority
task wait queue discipline.
- NoNestedMrsP: |
- Semaphores using the MrsP locking protocol cannot be recursively obtained.
- NoOtherScheduler: |
- Where the system was built with SMP support disabled, exactly one scheduler
- is present in an application.
+ NoMrsP: |
+ Where the system is build with SMP support disabled, the MrsP locking
+ protocol is not available.
test-action: |
- PrepareForAction( ctx );
- ctx->status = rtems_semaphore_release( ctx->id );
- GetSemaphoreProperties( ctx );
- CleanupAfterAction( ctx );
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ 1,
+ ctx->attribute_set,
+ PRIO_VERY_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+
+ if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+ rtems_task_priority prio;
+
+ sc = rtems_semaphore_set_priority(
+ ctx->tq_ctx.thread_queue_id,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH,
+ &prio
+ );
+ T_rsc_success( sc );
+ }
test-brief: null
-test-cleanup: null
+test-cleanup:
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id );
+ T_rsc_success( sc );
test-context:
- brief: |
- This member contains the identifier of the runner home scheduler.
- description: null
- member: |
- rtems_id runner_scheduler_id
-- brief: |
- This member contains the identifier of another scheduler.
+ This member contains the thread queue test context.
description: null
member: |
- rtems_id other_scheduler_id
-- brief: |
- This member contains the identifier of a third scheduler.
- description: null
- member: |
- rtems_id third_scheduler_id
-- brief: |
- This member contains the identifier of the scheduler owning the processor
- of the calling task after the ${../if/release:/name} call.
- description: null
- member: |
- rtems_id after_release_scheduler_id
-- brief: |
- This member contains the current priority of the calling task after the
- ${../if/release:/name} call.
- description: null
- member: |
- rtems_id after_release_priority
-- brief: |
- This member contains the runner task identifier.
- description: null
- member: |
- rtems_id runner_id
-- brief: |
- This member contains the worker task identifiers.
- description: null
- member: |
- rtems_id worker_id[ WORKER_COUNT ]
-- brief: |
- If this member is true, then the worker shall busy wait.
- description: null
- member: |
- volatile bool busy_wait
-- brief: |
- This member contains the worker activity counter.
- description: null
- member: |
- uint32_t counter
-- brief: |
- This member contains the semaphore obtain counter of a specific worker.
- description: null
- member: |
- uint32_t obtain_counter[ WORKER_COUNT ]
-- brief: |
- This member contains the count of the semaphore after the
- ${../if/release:/name} call.
- description: null
- member: |
- uint32_t sem_count
-- brief: |
- This member contains identifier of the owner of the semaphore after the
- ${../if/release:/name} call or zero if it had no owner.
- description: null
- member: |
- rtems_id owner
+ union {
+ TQContext tq_ctx;
+ TQMtxContext tq_mtx_ctx;
+ TQSemContext tq_sem_ctx;
+ }
- brief: |
This member specifies if the attribute set of the semaphore.
description: null
member: |
rtems_attribute attribute_set
-- brief: |
- This member specifies if the initial count of the semaphore.
- description: null
- member: |
- uint32_t count
-- brief: |
- This member contains the semaphore identifier.
- description: null
- member: |
- rtems_id semaphore_id
-- brief: |
- If this member is true, then the ${../if/release:/params[0]/name}
- parameter shall be valid.
- description: null
- member: |
- bool valid_id
-- brief: |
- If this member is true, then other tasks shall be blocked on the semaphore.
- description: null
- member: |
- bool blocked
-- brief: |
- If this member is true, then a task other than the runner task shall be the
- owner of the semaphore.
- description: null
- member: |
- bool owner_other
-- brief: |
- If this member is true, then the runner task shall obtain the semaphore
- recursively.
- description: null
- member: |
- bool nested
-- brief: |
- If this member is true, then the runner task shall migrate to another
- scheduler due to the locking protocol used by the semaphore.
- description: null
- member: |
- bool other_scheduler
-- brief: |
- If this member is true, then the properties of the semaphore
- shall be obtained.
- description: null
- member: |
- bool need_properties
-- brief: |
- This member specifies the ${../if/release:/params[0]/name} parameter for
- the ${../if/release:/name} call.
- description: null
- member: |
- rtems_id id
-- brief: |
- This member contains the ${../if/release:/name} return status.
- description: null
- member: |
- rtems_status_code status
-test-context-support: |
- typedef enum {
- BLOCKER_A,
- BLOCKER_B,
- BLOCKER_C,
- HELPER_HOME,
- HELPER_OTHER,
- WORKER_COUNT
- } WorkerKind;
+test-context-support: null
test-description: null
test-header: null
test-includes:
- rtems.h
- string.h
-- rtems/rtems/semimpl.h
test-local-includes:
- tx-support.h
-- ts-config.h
+- tr-mtx-surrender.h
+- tr-sem-surrender.h
+- tx-thread-queue.h
test-prepare: |
- size_t i;
-
- ctx->counter = 0;
-
- for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
- ctx->obtain_counter[ i ] = 0;
- }
-
ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
- ctx->blocked = false;
- ctx->owner_other = false;
- ctx->nested = false;
- ctx->other_scheduler = false;
- ctx->need_properties = true;
test-setup:
brief: null
code: |
- rtems_status_code sc;
-
- memset( ctx, 0, sizeof( *ctx ) );
- ctx->runner_id = rtems_task_self();
SetSelfPriority( PRIO_NORMAL );
- ctx->worker_id[ BLOCKER_A ] = CreateTask( "BLKA", PRIO_HIGH );
- StartTask( ctx->worker_id[ BLOCKER_A ], BlockerA, ctx );
- ctx->worker_id[ BLOCKER_B ] = CreateTask( "BLKB", PRIO_VERY_HIGH );
- StartTask( ctx->worker_id[ BLOCKER_B ], BlockerB, ctx );
- ctx->worker_id[ BLOCKER_C ] = CreateTask( "BLKC", PRIO_ULTRA_HIGH );
- StartTask( ctx->worker_id[ BLOCKER_C ], BlockerC, ctx );
-
- sc = rtems_task_get_scheduler( RTEMS_SELF, &ctx->runner_scheduler_id );
- T_rsc_success( sc );
-
- #if defined(RTEMS_SMP)
- ctx->worker_id[ HELPER_HOME ] = CreateTask( "HLPH", PRIO_VERY_ULTRA_HIGH );
- StartTask( ctx->worker_id[ HELPER_HOME ], HelperHome, ctx );
- ctx->worker_id[ HELPER_OTHER ] = CreateTask( "HLPO", PRIO_VERY_LOW );
- StartTask( ctx->worker_id[ HELPER_OTHER ], HelperOther, ctx );
-
- sc = rtems_scheduler_ident(
- TEST_SCHEDULER_B_NAME,
- &ctx->other_scheduler_id
- );
- T_rsc_success( sc );
-
- sc = rtems_scheduler_ident(
- TEST_SCHEDULER_C_NAME,
- &ctx->third_scheduler_id
- );
- T_rsc_success( sc );
-
- SetWorkerScheduler(
- ctx,
- HELPER_OTHER,
- ctx->other_scheduler_id,
- PRIO_VERY_LOW
- );
- #endif
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
+ ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+ ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+ ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+ ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
description: null
test-stop: null
test-support: |
#define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
typedef ${.:/test-context-type} Context;
-
- typedef enum {
- EVENT_HELPER_SYNC = RTEMS_EVENT_0,
- EVENT_OBTAIN = RTEMS_EVENT_1,
- EVENT_GET_PROPERTIES = RTEMS_EVENT_2,
- EVENT_OBTAIN_SYNC = RTEMS_EVENT_3,
- EVENT_RELEASE = RTEMS_EVENT_4,
- EVENT_RUNNER_SYNC = RTEMS_EVENT_5,
- EVENT_BUSY_WAIT = RTEMS_EVENT_6
- } Event;
-
- static void SynchronizeRunner( void )
- {
- rtems_event_set events;
-
- events = ReceiveAnyEvents();
- T_eq_u32( events, EVENT_RUNNER_SYNC );
- }
-
- static void Send(
- const Context *ctx,
- WorkerKind worker,
- rtems_event_set events
- )
- {
- SendEvents( ctx->worker_id[ worker ], events );
- }
-
- static void MoveBackHome( Context *ctx )
- {
- #if defined(RTEMS_SMP)
- rtems_task_priority priority;
-
- /* Move us back to a processor of our home scheduler */
- ctx->busy_wait = true;
- Send( ctx, HELPER_OTHER, EVENT_BUSY_WAIT );
- priority = SetPriority( ctx->worker_id[ HELPER_OTHER ], PRIO_VERY_ULTRA_HIGH );
- SetPriority( ctx->worker_id[ HELPER_OTHER ], priority );
- ctx->busy_wait = false;
- #else
- (void) ctx;
- #endif
- }
-
- static bool CanUseThirdScheduler( void )
- {
- return rtems_scheduler_get_processor_maximum() >= 4;
- }
-
- static bool IsFIFO( const Context *ctx )
- {
- return ( ctx->attribute_set & RTEMS_PRIORITY ) == 0;
- }
-
- static bool IsMrsP( const Context *ctx )
- {
- return ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0;
- }
-
- static bool IsPrioCeiling( const Context *ctx )
- {
- return ( ctx->attribute_set & RTEMS_PRIORITY_CEILING ) != 0;
- }
-
- #if defined(RTEMS_SMP)
- static void SetWorkerScheduler(
- const Context *ctx,
- WorkerKind worker,
- rtems_id scheduler_id,
- Priority priority
- )
- {
- rtems_status_code sc;
-
- sc = rtems_task_set_scheduler(
- ctx->worker_id[ worker ],
- scheduler_id,
- priority
- );
- T_rsc_success( sc );
- }
-
- static void SendAndWaitForIntendToBlock(
- const Context *ctx,
- WorkerKind worker,
- rtems_event_set events
- )
- {
- Thread_Control *the_thread;
- Thread_Wait_flags intend_to_block;
-
- Send( ctx, worker, events );
- the_thread = GetThread( ctx->worker_id[ worker ] );
- T_assert_not_null( the_thread );
- intend_to_block = THREAD_WAIT_CLASS_OBJECT |
- THREAD_WAIT_STATE_INTEND_TO_BLOCK;
-
- while ( _Thread_Wait_flags_get_acquire( the_thread ) != intend_to_block ) {
- /* Wait */
- }
- }
-
- static void BlockMrsP( Context *ctx )
- {
- if ( CanUseThirdScheduler() ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_A,
- ctx->third_scheduler_id,
- PRIO_HIGH
- );
- SetWorkerScheduler(
- ctx,
- BLOCKER_C,
- ctx->third_scheduler_id,
- PRIO_ULTRA_HIGH
- );
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_A,
- EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_RELEASE
- );
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_B,
- EVENT_OBTAIN | EVENT_RELEASE
- );
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_C,
- EVENT_OBTAIN | EVENT_OBTAIN_SYNC | EVENT_RELEASE
- );
- } else {
- SendAndWaitForIntendToBlock(
- ctx,
- BLOCKER_B,
- EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_OBTAIN_SYNC | EVENT_RELEASE
- );
- }
- }
- #endif
-
- static void Obtain( const Context *ctx )
- {
- rtems_status_code sc;
-
- sc = rtems_semaphore_obtain(
- ctx->semaphore_id,
- RTEMS_WAIT,
- RTEMS_NO_TIMEOUT
- );
- T_rsc_success( sc );
- }
-
- static void Release( const Context *ctx )
- {
- rtems_status_code sc;
-
- sc = rtems_semaphore_release( ctx->semaphore_id );
- T_rsc_success( sc );
- }
-
- static void BlockNormal( Context *ctx )
- {
- rtems_event_set first;
- rtems_event_set last;
-
- first = EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_RELEASE;
- last = EVENT_OBTAIN | EVENT_OBTAIN_SYNC | EVENT_RELEASE;
-
- if ( IsFIFO( ctx ) ) {
- Send( ctx, BLOCKER_A, first );
- } else {
- Send( ctx, BLOCKER_A, last );
- }
-
- #if defined(RTEMS_SMP)
- Send( ctx, BLOCKER_B, EVENT_OBTAIN | EVENT_RELEASE | EVENT_HELPER_SYNC );
- SynchronizeRunner();
- #else
- Send( ctx, BLOCKER_B, EVENT_OBTAIN | EVENT_RELEASE );
- #endif
-
- if ( IsFIFO( ctx ) ) {
- Send( ctx, BLOCKER_C, last );
- } else {
- Send( ctx, BLOCKER_C, first );
- }
-
- MoveBackHome( ctx );
- }
-
- static void BlockPrioCeiling( const Context *ctx )
- {
- SetPriority( ctx->worker_id[ BLOCKER_A ], PRIO_ULTRA_HIGH );
- Send( ctx, BLOCKER_A, EVENT_OBTAIN | EVENT_OBTAIN_SYNC | EVENT_RELEASE );
- Yield();
- SetPriority( ctx->worker_id[ BLOCKER_A ], PRIO_HIGH );
-
- SetPriority( ctx->worker_id[ BLOCKER_B ], PRIO_ULTRA_HIGH );
- Send( ctx, BLOCKER_B, EVENT_OBTAIN | EVENT_RELEASE );
- Yield();
- SetPriority( ctx->worker_id[ BLOCKER_B ], PRIO_VERY_HIGH );
-
- Send(
- ctx,
- BLOCKER_C,
- EVENT_OBTAIN | EVENT_GET_PROPERTIES | EVENT_RELEASE
- );
- Yield();
- }
-
- static void PrepareForAction( Context *ctx )
- {
- rtems_status_code sc;
-
- sc = rtems_semaphore_create(
- NAME,
- ctx->count,
- ctx->attribute_set,
- PRIO_ULTRA_HIGH,
- &ctx->semaphore_id
- );
- T_rsc_success( sc );
-
- if ( ctx->valid_id ) {
- ctx->id = ctx->semaphore_id;
- } else {
- ctx->id = 0;
- }
-
- #if defined(RTEMS_SMP)
- if ( !IsPrioCeiling( ctx ) ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_B,
- ctx->other_scheduler_id,
- PRIO_LOW
- );
- }
- #endif
-
- if ( ctx->owner_other ) {
- Event event;
-
- event = EVENT_OBTAIN;
- #if defined(RTEMS_SMP)
- event |= EVENT_OBTAIN_SYNC;
- #endif
-
- Send( ctx, BLOCKER_B, event );
- #if defined(RTEMS_SMP)
- SynchronizeRunner();
- #endif
- }
-
- if ( ctx->nested ) {
- Obtain( ctx );
- }
-
- if ( ctx->blocked ) {
- #if defined(RTEMS_SMP)
- if ( IsMrsP( ctx ) ) {
- BlockMrsP( ctx );
- } else if ( IsPrioCeiling( ctx ) ) {
- BlockPrioCeiling( ctx );
- } else {
- BlockNormal( ctx );
- }
- #else
- if ( IsPrioCeiling( ctx ) || IsMrsP( ctx ) ) {
- BlockPrioCeiling( ctx );
- } else {
- BlockNormal( ctx );
- }
- #endif
- }
-
- if ( ctx->other_scheduler ) {
- ctx->busy_wait = true;
- Send( ctx, HELPER_HOME, EVENT_BUSY_WAIT );
- ctx->busy_wait = false;
- }
- }
-
- static void GetSemaphoreProperties( Context *ctx )
- {
- Semaphore_Control *semaphore;
- Thread_queue_Context queue_context;
- Thread_Control *owner;
-
- if ( !ctx->need_properties ) {
- return;
- }
-
- ctx->need_properties = false;
-
- semaphore = _Semaphore_Get( ctx->semaphore_id, &queue_context );
- T_assert_not_null( semaphore );
- ctx->sem_count = semaphore->Core_control.Semaphore.count;
- owner = semaphore->Core_control.Wait_queue.Queue.owner;
- _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
-
- if ( owner != NULL ) {
- ctx->owner = owner->Object.id;
- } else {
- ctx->owner = 0;
- }
- }
-
- static void CleanupAfterAction( Context *ctx )
- {
- rtems_status_code sc;
-
- sc = rtems_scheduler_ident_by_processor(
- rtems_scheduler_get_processor(),
- &ctx->after_release_scheduler_id
- );
- T_rsc_success( sc );
-
- ctx->after_release_priority = GetSelfPriority();
-
- if ( ctx->nested ) {
- Release( ctx );
- }
-
- if ( ctx->count == 0 && ctx->status != RTEMS_SUCCESSFUL ) {
- Release( ctx );
- }
-
- if ( ctx->owner_other ) {
- Send( ctx, BLOCKER_B, EVENT_RELEASE );
- }
-
- if ( ctx->blocked ) {
- SynchronizeRunner();
-
- #if defined(RTEMS_SMP)
- if ( IsMrsP( ctx ) ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_A,
- ctx->runner_scheduler_id,
- PRIO_HIGH
- );
- SetWorkerScheduler(
- ctx,
- BLOCKER_C,
- ctx->runner_scheduler_id,
- PRIO_ULTRA_HIGH
- );
- }
- #endif
- }
-
- Obtain( ctx );
- Release( ctx );
-
- #if defined(RTEMS_SMP)
- if ( !IsPrioCeiling( ctx ) ) {
- SetWorkerScheduler(
- ctx,
- BLOCKER_B,
- ctx->runner_scheduler_id,
- PRIO_VERY_HIGH
- );
- }
- #endif
-
- sc = rtems_semaphore_delete( ctx->semaphore_id );
- T_rsc_success( sc );
- }
-
- static void Worker( rtems_task_argument arg, WorkerKind worker )
- {
- Context *ctx;
-
- ctx = (Context *) arg;
-
- while ( true ) {
- rtems_event_set events;
-
- events = ReceiveAnyEvents();
-
- #if defined(RTEMS_SMP)
- if ( ( events & EVENT_HELPER_SYNC ) != 0 ) {
- SendEvents( ctx->worker_id[ HELPER_OTHER ], EVENT_RUNNER_SYNC );
- }
- #endif
-
- if ( ( events & EVENT_OBTAIN ) != 0 ) {
- uint32_t counter;
-
- Obtain( ctx );
-
- counter = ctx->counter;
- ++counter;
- ctx->counter = counter;
- ctx->obtain_counter[ worker ] = counter;
- }
-
- if ( ( events & EVENT_GET_PROPERTIES ) != 0 ) {
- GetSemaphoreProperties( ctx );
- }
-
- if ( ( events & EVENT_OBTAIN_SYNC ) != 0 ) {
- SendEvents( ctx->runner_id, EVENT_RUNNER_SYNC );
- }
-
- #if defined(RTEMS_SMP)
- if ( ( events & EVENT_BUSY_WAIT ) != 0 ) {
- while ( ctx->busy_wait ) {
- /* Wait */
- }
- }
- #endif
-
- if ( ( events & EVENT_RELEASE ) != 0 ) {
- Release( ctx );
- }
-
- if ( ( events & EVENT_RUNNER_SYNC ) != 0 ) {
- SendEvents( ctx->runner_id, EVENT_RUNNER_SYNC );
- }
- }
- }
-
- static void BlockerA( rtems_task_argument arg )
- {
- Worker( arg, BLOCKER_A );
- }
-
- static void BlockerB( rtems_task_argument arg )
- {
- Worker( arg, BLOCKER_B );
- }
-
- static void BlockerC( rtems_task_argument arg )
- {
- Worker( arg, BLOCKER_C );
- }
-
- #if defined(RTEMS_SMP)
- static void HelperHome( rtems_task_argument arg )
- {
- Worker( arg, HELPER_HOME );
- }
-
- static void HelperOther( rtems_task_argument arg )
- {
- Worker( arg, HELPER_OTHER );
- }
- #endif
test-target: testsuites/validation/tc-sem-release.c
test-teardown:
brief: null
code: |
- size_t i;
-
- for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
- DeleteTask( ctx->worker_id[ i ] );
- }
-
+ TQDestroy( &ctx->tq_ctx );
RestoreRunnerPriority();
description: null
text: ${.:text-template}
transition-map:
- enabled-by: true
post-conditions:
- Status: InvId
- Count: Nop
- Owner: N/A
- Next: N/A
- CallerPrio: Real
- CallerCPU: Home
+ Action: InvId
pre-conditions:
- Id:
- - Invalid
- Class:
- - Counting
- - Simple
+ Class: all
Discipline: all
- Count: all
- Owner: N/A
-- enabled-by: true
- post-conditions:
- Status: InvId
- Count: N/A
- Owner:
- - if:
- pre-conditions:
- Owner: 'No'
- then: 'No'
- - if:
- pre-conditions:
- Owner: Other
- then: Other
- - else: Self
- Next: N/A
- CallerPrio: Real
- CallerCPU: Home
- pre-conditions:
Id:
- Invalid
- Class:
- - Binary
- Discipline:
- - FIFO
- Count: N/A
- Owner:
- - 'No'
- - Self
- - Other
- - Blocked
- - Nested
- - BlockedNested
- enabled-by: true
post-conditions:
- Status: InvId
- Count: N/A
- Owner:
- - if:
- pre-conditions:
- Owner: 'No'
- then: 'No'
- - if:
- pre-conditions:
- Owner: Other
- then: Other
- - else: Self
- Next: N/A
- CallerPrio:
- - if:
- and:
- - pre-conditions:
- Class:
- - PrioCeiling
- - MrsP
- - post-conditions:
- Owner: Self
- then: Ceiling
- - if:
- pre-conditions:
- Class: PrioInherit
- Owner:
- - Blocked
- - BlockedNested
- then: Inherit
- - else: Real
- CallerCPU: Home
+ Action: BinarySurrender
pre-conditions:
- Id:
- - Invalid
Class:
- - Binary
- - PrioCeiling
- - PrioInherit
- - MrsP
- Discipline:
- - Priority
- Count: N/A
- Owner:
- - 'No'
- - Self
- - Other
- - Blocked
- - Nested
- - BlockedNested
-- enabled-by: true
- post-conditions:
- Status: Unsat
- Count: Max
- Owner: N/A
- Next: N/A
- CallerPrio: Real
- CallerCPU: Home
- pre-conditions:
+ - Simple
+ Discipline: all
Id:
- Valid
- Class:
- - Counting
- Discipline: all
- Count:
- - Max
- Owner: N/A
- enabled-by: true
post-conditions:
- Status: Ok
- Count:
- - if:
- pre-conditions:
- Count: Blocked
- then: Zero
- - else: PlusOne
- Owner: N/A
- Next:
- - if:
- pre-conditions:
- Count: Blocked
- then-specified-by: Discipline
- - else: N/A
- CallerPrio: Real
- CallerCPU: Home
+ Action: CountingSurrender
pre-conditions:
- Id:
- - Valid
Class:
- Counting
Discipline: all
- Count:
- - LessMax
- - Blocked
- Owner: N/A
-- enabled-by: true
- post-conditions:
- Status: Ok
- Count:
- - if:
- pre-conditions:
- Count: Blocked
- then: Zero
- - else: One
- Owner: N/A
- Next:
- - if:
- pre-conditions:
- Count: Blocked
- then-specified-by: Discipline
- - else: N/A
- CallerPrio: Real
- CallerCPU: Home
- pre-conditions:
Id:
- Valid
- Class:
- - Simple
- Discipline: all
- Count:
- - LessMax
- - Max
- - Blocked
- Owner: N/A
- enabled-by: true
post-conditions:
- Status: NotOwner
- Count: N/A
- Owner:
- - if:
- pre-conditions:
- Owner: Other
- then: Other
- - else: 'No'
- Next: N/A
- CallerPrio: Real
- CallerCPU: Home
+ Action: MtxSurrender
pre-conditions:
- Id:
- - Valid
Class:
- Binary
Discipline: all
- Count: N/A
- Owner:
- - 'No'
- - Other
-- enabled-by: true
- post-conditions:
- Status: Ok
- Count: N/A
- Owner:
- - if:
- pre-conditions:
- Owner: Blocked
- then-specified-by: Discipline
- - if:
- pre-conditions:
- Owner:
- - Nested
- - BlockedNested
- then: Self
- - else: 'No'
- Next:
- - if:
- pre-conditions:
- Owner: Blocked
- then-specified-by: Discipline
- - else: N/A
- CallerPrio: Real
- CallerCPU: Home
- pre-conditions:
Id:
- Valid
- Class:
- - Binary
- Discipline: all
- Count: N/A
- Owner:
- - Self
- - Blocked
- - Nested
- - BlockedNested
- enabled-by: true
post-conditions:
- Status: NotOwner
- Count: N/A
- Owner:
- - if:
- pre-conditions:
- Owner: Other
- then: Other
- - else: 'No'
- Next: N/A
- CallerPrio: Real
- CallerCPU: Home
+ Action: CeilingMtxSurrender
pre-conditions:
- Id:
- - Valid
Class:
- PrioCeiling
- - PrioInherit
- - MrsP
Discipline:
- Priority
- Count: N/A
- Owner:
- - 'No'
- - Other
+ Id:
+ - Valid
- enabled-by: true
post-conditions:
- Status: Ok
- Count: N/A
- Owner:
- - if:
- pre-conditions:
- Owner: Blocked
- then: Priority
- - else: 'No'
- Next:
- - if:
- pre-conditions:
- Owner: Blocked
- then: Priority
- - else: N/A
- CallerPrio: Real
- CallerCPU: Home
+ Action: InheritMtxSurrender
pre-conditions:
- Id:
- - Valid
Class:
- - PrioCeiling
- PrioInherit
- - MrsP
Discipline:
- Priority
- Count: N/A
- Owner:
- - Self
- - Blocked
-- enabled-by: true
- post-conditions:
- Status: Ok
- Count: N/A
- Owner: Self
- Next: N/A
- CallerPrio:
- - if:
- pre-conditions:
- Class: PrioInherit
- Owner: BlockedNested
- then: Inherit
- - if:
- pre-conditions:
- Class: PrioInherit
- Owner: Nested
- then: Real
- - else: Ceiling
- CallerCPU: Home
- pre-conditions:
Id:
- Valid
+- enabled-by: true
+ post-conditions: NoMrsP
+ pre-conditions:
Class:
- - PrioCeiling
- - PrioInherit
- MrsP
Discipline:
- Priority
- Count: N/A
- Owner:
- - Nested
- - BlockedNested
+ Id: all
- enabled-by: true
post-conditions: NeedsPriorityDiscipline
pre-conditions:
- Id: all
Class:
- PrioCeiling
- PrioInherit
- MrsP
Discipline:
- FIFO
- Count: all
- Owner: all
-- enabled-by: true
- post-conditions: NoOtherScheduler
- pre-conditions:
- Id: all
- Class:
- - Binary
- Discipline: all
- Count: N/A
- Owner:
- - BlockedOther
- - BlockedNestedOther
-- enabled-by: true
- post-conditions: NoOtherScheduler
- pre-conditions:
Id: all
- Class:
- - PrioCeiling
- - PrioInherit
- - MrsP
- Discipline:
- - Priority
- Count: N/A
- Owner:
- - BlockedOther
- - BlockedNestedOther
-- enabled-by: RTEMS_SMP
- post-conditions:
- Status: InvId
- Count: N/A
- Owner: Self
- Next: N/A
- CallerPrio: Inherit
- CallerCPU: Other
- pre-conditions:
- Id:
- - Invalid
- Class:
- - PrioInherit
- Discipline:
- - Priority
- Count: N/A
- Owner:
- - BlockedOther
- - BlockedNestedOther
- enabled-by: RTEMS_SMP
post-conditions:
- Status: InvId
- Count: N/A
- Owner: Self
- Next: N/A
- CallerPrio: Ceiling
- CallerCPU: Other
+ Action: InvId
pre-conditions:
- Id:
- - Invalid
Class:
- MrsP
Discipline:
- Priority
- Count: N/A
- Owner:
- - BlockedOther
-- enabled-by: RTEMS_SMP
- post-conditions:
- Status: Ok
- Count: N/A
- Owner: Priority
- Next: Priority
- CallerPrio: Real
- CallerCPU: Home
- pre-conditions:
Id:
- - Valid
- Class:
- - PrioInherit
- Discipline:
- - Priority
- Count: N/A
- Owner:
- - BlockedOther
+ - Invalid
- enabled-by: RTEMS_SMP
post-conditions:
- Status: Ok
- Count: N/A
- Owner: MrsP
- Next: MrsP
- CallerPrio: Real
- CallerCPU: Home
+ Action: MrsPMtxSurrender
pre-conditions:
- Id:
- - Valid
Class:
- MrsP
Discipline:
- Priority
- Count: N/A
- Owner:
- - Blocked
- - BlockedOther
-- enabled-by: RTEMS_SMP
- post-conditions:
- Status: Ok
- Count: N/A
- Owner: Self
- Next: N/A
- CallerPrio: Inherit
- CallerCPU: Other
- pre-conditions:
Id:
- Valid
- Class:
- - PrioInherit
- Discipline:
- - Priority
- Count: N/A
- Owner:
- - BlockedNestedOther
-- enabled-by: RTEMS_SMP
- post-conditions: NoNestedMrsP
- pre-conditions:
- Id: all
- Class:
- - MrsP
- Discipline:
- - Priority
- Count: N/A
- Owner:
- - Nested
- - BlockedNested
- - BlockedNestedOther
type: requirement
diff --git a/spec/score/mtx/req/surrender.yml b/spec/score/mtx/req/surrender.yml
new file mode 100644
index 00000000..94f6e4fb
--- /dev/null
+++ b/spec/score/mtx/req/surrender.yml
@@ -0,0 +1,753 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: requirement-refinement
+ uid: ../if/group
+post-conditions:
+- name: Status
+ states:
+ - name: Ok
+ test-code: |
+ T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+ text: |
+ The return status of the directive call shall be derived from
+ ${../../status/if/successful:/name}.
+ - name: NotOwner
+ test-code: |
+ T_eq_int( ctx->status, Status( ctx, STATUS_NOT_OWNER ) );
+ text: |
+ The return status of the directive call shall be derived from
+ ${../../status/if/not-owner:/name}.
+ test-epilogue: null
+ test-prologue: null
+- name: Owner
+ states:
+ - name: None
+ test-code: |
+ T_null( ctx->owner_after );
+ text: |
+ The mutex shall have no owner.
+ - name: Caller
+ test-code: |
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.runner_tcb
+ );
+ text: |
+ The owner of the mutex shall be the calling thread.
+ - name: Other
+ test-code: |
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_HELPER_A ]
+ );
+ text: |
+ The owner of the mutex shall not be modified.
+ - name: First
+ test-code: |
+ T_eq_ptr(
+ ctx->owner_after,
+ ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+ );
+ text: |
+ The owner of the mutex shall be dequeued thread.
+ test-epilogue: null
+ test-prologue: null
+- name: Surrender
+ states:
+ - name: Nop
+ test-code: |
+ T_eq_u32( ctx->counter, 0 );
+ text: |
+ The thread queue of the mutex shall not be surrendered to a thread.
+ - name: FIFO
+ test-code: |
+ T_eq_u32( ctx->counter, 1 );
+ ${../../tq/req/surrender:/test-run}( &ctx->tq_ctx->base );
+ text: |
+ The thread queue of the mutex shall be surrendered in FIFO order.
+ - name: Priority
+ test-code: |
+ T_eq_u32( ctx->counter, 1 );
+ ${../../tq/req/surrender:/test-run}( &ctx->tq_ctx->base );
+ text: |
+ The thread queue of the mutex shall be surrendered in priority order.
+ - name: PriorityInherit
+ test-code: |
+ T_eq_u32( ctx->counter, 1 );
+ ${../../tq/req/surrender-priority-inherit:/test-run}( &ctx->tq_ctx->base );
+ text: |
+ The thread queue of the mutex shall be surrendered in priority order
+ priority inheritance.
+ test-epilogue: null
+ test-prologue: null
+- name: Priority
+ states:
+ - name: Nop
+ test-code: |
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ text: |
+ The ${/glossary/priority-current:/term} of the calling thread shall be
+ not be modified.
+ - name: Low
+ test-code: |
+ T_eq_u32( ctx->priority_after, ctx->priority_real );
+ text: |
+ The ${/glossary/priority-current:/term} of the calling thread shall be
+ lowered to reflect the removal of the priorities available through the
+ mutex.
+ test-epilogue: null
+ test-prologue: null
+pre-conditions:
+- name: Protocol
+ states:
+ - name: None
+ test-code: |
+ if ( ctx->tq_ctx->protocol != TQ_MTX_NO_PROTOCOL ) {
+ ${.:skip}
+ }
+ text: |
+ Where the mutex does not use a locking protocol.
+ - name: Inherit
+ test-code: |
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_INHERIT ) {
+ ${.:skip}
+ }
+ text: |
+ Where the mutex uses the priority inheritance locking protocol.
+ - name: Ceiling
+ test-code: |
+ if ( ctx->tq_ctx->protocol != TQ_MTX_PRIORITY_CEILING ) {
+ ${.:skip}
+ }
+ text: |
+ Where the mutex uses the priority ceiling locking protocol.
+ - name: MrsP
+ test-code: |
+ if ( ctx->tq_ctx->protocol != TQ_MTX_MRSP ) {
+ ${.:skip}
+ }
+ text: |
+ Where the mutex uses the MrsP locking protocol.
+ test-epilogue: null
+ test-prologue: null
+- name: Discipline
+ states:
+ - name: FIFO
+ test-code: |
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue of the mutex uses the FIFO discipline.
+ - name: Priority
+ test-code: |
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue of the mutex uses the priority discipline.
+ test-epilogue: null
+ test-prologue: null
+- name: Recursive
+ states:
+ - name: Allowed
+ test-code: |
+ if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_ALLOWED ) {
+ ${.:skip}
+ }
+ text: |
+ Where a recursive seize of the mutex is allowed.
+ - name: NotAllowed
+ test-code: |
+ if ( ctx->tq_ctx->recursive == TQ_MTX_RECURSIVE_ALLOWED ) {
+ ${.:skip}
+ }
+ text: |
+ Where a recursive seize of the mutex is not allowed.
+ test-epilogue: null
+ test-prologue: null
+- name: OwnerCheck
+ states:
+ - name: 'Yes'
+ test-code: |
+ if ( ctx->tq_ctx->owner_check != TQ_MTX_CHECKS_OWNER ) {
+ ${.:skip}
+ }
+ text: |
+ Where the surrender checks that the mutex owner is the calling thread.
+ - name: 'No'
+ test-code: |
+ if ( ctx->tq_ctx->owner_check != TQ_MTX_NO_OWNER_CHECK ) {
+ ${.:skip}
+ }
+ text: |
+ Where the surrender does not check that the mutex owner is the calling
+ thread.
+ test-epilogue: null
+ test-prologue: null
+- name: Owner
+ states:
+ - name: None
+ test-code: |
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ text: |
+ While the mutex has no owner.
+ - name: Caller
+ test-code: |
+ ctx->owner_caller = true;
+ ctx->owner_other = false;
+ text: |
+ While the owner of the mutex is the calling thread.
+ - name: Other
+ test-code: |
+ ctx->owner_caller = false;
+ ctx->owner_other = true;
+ text: |
+ While the owner of the mutex is a thread other than the calling thread.
+ test-epilogue: null
+ test-prologue: null
+- name: Nested
+ states:
+ - name: 'Yes'
+ test-code: |
+ ctx->nested = true;
+ text: |
+ While calling thread seized the mutex recursively.
+ - name: 'No'
+ test-code: |
+ ctx->nested = false;
+ text: |
+ While calling thread seized the mutex not recursively.
+ test-epilogue: null
+ test-prologue: null
+- name: Blocked
+ states:
+ - name: 'Yes'
+ test-code: |
+ ctx->blocked = true;
+ text: |
+ While the mutex has threads blocked on the mutex.
+ - name: 'No'
+ test-code: |
+ ctx->blocked = false;
+ text: |
+ While no threads are blocked on the mutex.
+ test-epilogue: null
+ test-prologue: null
+- name: Priority
+ states:
+ - name: High
+ test-code: |
+ ctx->priority_real = PRIO_ULTRA_HIGH;
+ text: |
+ While the ${/glossary/priority-current:/term} of the calling thread
+ without the priorities available through the mutex would be higher than
+ the highest priority of the priorities available through the mutex.
+ - name: Equal
+ test-code: |
+ ctx->priority_real = PRIO_VERY_HIGH;
+ text: |
+ While the ${/glossary/priority-current:/term} of the calling thread
+ without the priorities available through the mutex would be equal to the
+ highest priority of the priorities available through the mutex.
+ - name: Low
+ test-code: |
+ ctx->priority_real = PRIO_HIGH;
+ text: |
+ While the ${/glossary/priority-current:/term} of the calling thread
+ without the priorities available through the mutex would be lower than
+ the highest priority of the priorities available through the mutex.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons:
+ BlockedNeedsOwner: |
+ Blocked threads only exist, if the mutex has an owner.
+ NestedNeedsRecursive: |
+ Where the mutex does not allow a recursive seize, the mutex cannot be
+ recursively seized.
+ MustBeOwner: |
+ Where the mutex does not check that the mutex owner is the calling thread,
+ the mutex owner must be the calling thread, otherwise the system behaviour
+ is undefined.
+ PriorityDisciplineByProtocol: |
+ The locking protocol require a priority discipline.
+test-action: |
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+ If this member is true, then the calling thread shall be the owner of the
+ mutex.
+ description: null
+ member: |
+ bool owner_caller
+- brief: |
+ If this member is true, then a thread other than the calling thread shall
+ be the owner of the mutex.
+ description: null
+ member: |
+ bool owner_other
+- brief: |
+ If this member is true, then the calling thread shall have seized the mutex
+ recursively.
+ description: null
+ member: |
+ bool nested
+- brief: |
+ If this member is true, then there shall be a thread blocked waiting for
+ the mutex.
+ description: null
+ member: |
+ bool blocked
+- brief: |
+ This member contains the real priority of the calling thread.
+ description: null
+ member: |
+ rtems_task_priority priority_real
+- brief: |
+ This member contains the current priority of the calling thread before the
+ directive call.
+ description: null
+ member: |
+ rtems_task_priority priority_before
+- brief: |
+ This member contains the return status of the directive call.
+ description: null
+ member: |
+ Status_Control status
+- brief: |
+ This member contains the owner of the mutex after the directive call.
+ description: null
+ member: |
+ const rtems_tcb *owner_after
+- brief: |
+ This member contains the current priority of the calling thread after the
+ directive call.
+ description: null
+ member: |
+ rtems_task_priority priority_after
+- brief: |
+ This member contains the counter snapshot after the directive call.
+ description: null
+ member: |
+ uint32_t counter
+test-context-support: null
+test-description: null
+test-header:
+ code: null
+ freestanding: false
+ includes: []
+ local-includes:
+ - tx-thread-queue.h
+ run-params:
+ - description: |
+ is the thread queue context.
+ dir: inout
+ name: tq_ctx
+ specifier: TQMtxContext *${.:name}
+ target: testsuites/validation/tr-mtx-surrender.h
+test-includes: []
+test-local-includes:
+- tr-mtx-surrender.h
+- tr-tq-surrender.h
+- tr-tq-surrender-priority-inherit.h
+test-prepare: |
+ ctx->owner_caller = false;
+ ctx->owner_other = false;
+ ctx->nested = false;
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ctx->priority_real = PRIO_ULTRA_HIGH;
+ } else {
+ ctx->priority_real = PRIO_NORMAL;
+ }
+test-setup: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ static Status_Control Status( const Context *ctx, Status_Control status )
+ {
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+ }
+
+ static void Action( Context *ctx )
+ {
+ Status_Control status;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ } else if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_HELPER_A, TQ_EVENT_ENQUEUE );
+ }
+
+ if ( ctx->nested ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ Yield();
+ }
+
+ TQResetCounter( &ctx->tq_ctx->base );
+ SetSelfPriority( ctx->priority_real );
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( &ctx->tq_ctx->base );
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ TQSchedulerRecordStop( &ctx->tq_ctx->base );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = GetSelfPriority();
+ SetSelfPriority( PRIO_NORMAL );
+ Yield();
+ ctx->counter = TQGetCounter( &ctx->tq_ctx->base );
+
+ if ( ctx->nested ) {
+ status = TQSurrender( &ctx->tq_ctx->base );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->owner_other ) {
+ TQSend( &ctx->tq_ctx->base, TQ_HELPER_A, TQ_EVENT_SURRENDER );
+ }
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+ }
+ }
+
+ static void ActionSticky( Context *ctx )
+ {
+ Status_Control status;
+
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ SCHEDULER_A_ID,
+ PRIO_VERY_HIGH
+ );
+ TQSetScheduler(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ SCHEDULER_B_ID,
+ PRIO_VERY_HIGH
+ );
+
+ if ( ctx->owner_caller ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ } else if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ if ( ctx->nested ) {
+ status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->blocked ) {
+ TQSendAndWaitForIntendToBlock(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+
+ TQResetCounter( &ctx->tq_ctx->base );
+ SetSelfPriority( ctx->priority_real );
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( &ctx->tq_ctx->base );
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ TQSchedulerRecordStop( &ctx->tq_ctx->base );
+ ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+ ctx->priority_after = GetSelfPriority();
+
+ if ( ctx->status == Status( ctx, STATUS_SUCCESSFUL ) ) {
+ TQWaitForExecutionStop( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+ }
+
+ ctx->counter = TQGetCounter( &ctx->tq_ctx->base );
+
+ if ( ctx->nested ) {
+ status = TQSurrender( &ctx->tq_ctx->base );
+ T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ctx->owner_other ) {
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_HELPER_A,
+ TQ_EVENT_SURRENDER
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+ } else {
+ SetSelfPriority( PRIO_NORMAL );
+ }
+
+ if ( ctx->blocked ) {
+ TQSendAndSynchronizeRunner(
+ &ctx->tq_ctx->base,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ }
+ }
+test-target: testsuites/validation/tr-mtx-surrender.c
+test-teardown: null
+text: |
+ When the calling thread surrenders the mutex.
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Owner:
+ - if:
+ pre-conditions:
+ Nested: 'Yes'
+ then: Caller
+ - if:
+ pre-conditions:
+ Blocked: 'Yes'
+ then: First
+ - else: None
+ Surrender:
+ - if:
+ post-conditions:
+ Owner: First
+ then-specified-by: Discipline
+ - else: Nop
+ Priority: Nop
+ pre-conditions:
+ Protocol:
+ - None
+ Discipline: all
+ Recursive: all
+ OwnerCheck: all
+ Owner:
+ - Caller
+ Nested: all
+ Blocked: all
+ Priority: N/A
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Owner:
+ - if:
+ pre-conditions:
+ Nested: 'Yes'
+ then: Caller
+ - if:
+ pre-conditions:
+ Blocked: 'Yes'
+ then: First
+ - else: None
+ Surrender:
+ - if:
+ and:
+ - post-conditions:
+ Owner: First
+ - pre-conditions:
+ Protocol: MrsP
+ then: PriorityInherit
+ - if:
+ post-conditions:
+ Owner: First
+ then: Priority
+ - else: Nop
+ Priority:
+ - if:
+ pre-conditions:
+ Nested: 'No'
+ Priority: Low
+ then: Low
+ - else: Nop
+ pre-conditions:
+ Protocol:
+ - Ceiling
+ - MrsP
+ Discipline: all
+ Recursive: all
+ OwnerCheck: all
+ Owner:
+ - Caller
+ Nested: all
+ Blocked: all
+ Priority: all
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Owner:
+ - if:
+ pre-conditions:
+ Nested: 'Yes'
+ then: Caller
+ - else: First
+ Surrender:
+ - if:
+ post-conditions:
+ Owner: First
+ then: PriorityInherit
+ - else: Nop
+ Priority:
+ - if:
+ pre-conditions:
+ Nested: 'No'
+ Priority: Low
+ then: Low
+ - else: Nop
+ pre-conditions:
+ Protocol:
+ - Inherit
+ Discipline: all
+ Recursive: all
+ OwnerCheck: all
+ Owner:
+ - Caller
+ Nested: all
+ Blocked:
+ - 'Yes'
+ Priority: all
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Owner:
+ - if:
+ pre-conditions:
+ Nested: 'Yes'
+ then: Caller
+ - if:
+ pre-conditions:
+ Blocked: 'Yes'
+ then: First
+ - else: None
+ Surrender:
+ - if:
+ post-conditions:
+ Owner: First
+ then: PriorityInherit
+ - else: Nop
+ Priority: Nop
+ pre-conditions:
+ Protocol:
+ - Inherit
+ Discipline: all
+ Recursive: all
+ OwnerCheck: all
+ Owner:
+ - Caller
+ Nested: all
+ Blocked:
+ - 'No'
+ Priority: N/A
+- enabled-by: true
+ post-conditions:
+ Status: NotOwner
+ Owner:
+ - specified-by: Owner
+ Surrender: Nop
+ Priority: Nop
+ pre-conditions:
+ Protocol: all
+ Discipline: all
+ Recursive: all
+ OwnerCheck:
+ - 'Yes'
+ Owner: all
+ Owner:
+ - None
+ - Other
+ Nested: N/A
+ Blocked: all
+ Priority: N/A
+- enabled-by: true
+ post-conditions: BlockedNeedsOwner
+ pre-conditions:
+ Protocol: all
+ Discipline: all
+ Recursive: all
+ OwnerCheck: all
+ Owner:
+ - None
+ Nested: N/A
+ Blocked:
+ - 'Yes'
+ Priority: N/A
+- enabled-by: true
+ post-conditions: MustBeOwner
+ pre-conditions:
+ Protocol: all
+ Discipline: all
+ Recursive: all
+ OwnerCheck:
+ - 'No'
+ Owner:
+ - None
+ - Other
+ Nested: all
+ Blocked: all
+ Priority: all
+- enabled-by: true
+ post-conditions: NestedNeedsRecursive
+ pre-conditions:
+ Protocol: all
+ Discipline: all
+ Recursive:
+ - NotAllowed
+ OwnerCheck: all
+ Owner: all
+ Nested:
+ - 'Yes'
+ Blocked: all
+ Priority: all
+- enabled-by: true
+ post-conditions: PriorityDisciplineByProtocol
+ pre-conditions:
+ Protocol:
+ - Inherit
+ - Ceiling
+ - MrsP
+ Discipline:
+ - FIFO
+ Recursive: all
+ OwnerCheck: all
+ Owner: all
+ Nested: all
+ Blocked: all
+ Priority: all
+type: requirement
diff --git a/spec/score/sem/req/surrender.yml b/spec/score/sem/req/surrender.yml
new file mode 100644
index 00000000..fc767941
--- /dev/null
+++ b/spec/score/sem/req/surrender.yml
@@ -0,0 +1,267 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: requirement-refinement
+ uid: ../if/group
+post-conditions:
+- name: Status
+ states:
+ - name: Ok
+ test-code: |
+ T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+ text: |
+ The return status of the directive call shall be derived from
+ ${../../status/if/successful:/name}.
+ - name: MaxCountExceeded
+ test-code: |
+ T_eq_int( ctx->status, Status( ctx, STATUS_MAXIMUM_COUNT_EXCEEDED ) );
+ text: |
+ The return status of the directive call shall be derived from
+ ${../../status/if/maximum-count-exceeded:/name}.
+ test-epilogue: null
+ test-prologue: null
+- name: Dequeue
+ states:
+ - name: FIFO
+ test-code: |
+ /* TODO */
+ text: |
+ The first thread in FIFO order shall be dequeued from the thread queue.
+ - name: Priority
+ test-code: |
+ /* TODO */
+ text: |
+ The first thread in priority order shall be dequeued from the thread
+ queue.
+ test-epilogue: null
+ test-prologue: null
+- name: Count
+ states:
+ - name: Zero
+ test-code: |
+ T_eq_u32( ctx->count_after, 0 );
+ text: |
+ The count of the semaphore shall be zero.
+ - name: One
+ test-code: |
+ T_eq_u32( ctx->count_after, 1 );
+ text: |
+ The count of the semaphore shall be one.
+ - name: PlusOne
+ test-code: |
+ T_eq_u32( ctx->count_after, ctx->count_before + 1 );
+ text: |
+ The count of the semaphore shall be incremented by one.
+ - name: Nop
+ test-code: |
+ T_eq_u32( ctx->count_after, ctx->count_before );
+ text: |
+ The count of the semaphore shall not be modified.
+ test-epilogue: null
+ test-prologue: null
+pre-conditions:
+- name: Variant
+ states:
+ - name: Binary
+ test-code: |
+ if ( ctx->tq_ctx->variant != TQ_SEM_BINARY ) {
+ ${.:skip}
+ }
+ text: |
+ Where the semaphore is a binary semaphore.
+ - name: Counting
+ test-code: |
+ if ( ctx->tq_ctx->variant != TQ_SEM_COUNTING ) {
+ ${.:skip}
+ }
+ text: |
+ Where the semaphore is a counting semaphore.
+ test-epilogue: null
+ test-prologue: null
+- name: Discipline
+ states:
+ - name: FIFO
+ test-code: |
+ if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue of the semaphore uses the FIFO discipline.
+ - name: Priority
+ test-code: |
+ if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue of the semaphore uses the priority discipline.
+ test-epilogue: null
+ test-prologue: null
+- name: Count
+ states:
+ - name: LessMax
+ test-code: |
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->variant == TQ_SEM_BINARY ) {
+ ctx->count_before = 0;
+ } else {
+ ctx->count_before = UINT32_MAX - 1;
+ }
+ text: |
+ While the count of the semaphore is less than the maximum count.
+ - name: Max
+ test-code: |
+ ctx->blocked = false;
+
+ if ( ctx->tq_ctx->variant == TQ_SEM_BINARY ) {
+ ctx->count_before = 1;
+ } else {
+ ctx->count_before = UINT32_MAX;
+ }
+ text: |
+ While the count of the semaphore is equal to the maximum count.
+ - name: Blocked
+ test-code: |
+ ctx->blocked = true;
+ ctx->count_before = 0;
+ text: |
+ While the semaphore has threads blocked on the semaphore.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons: {}
+test-action: |
+ TQSemSetCount( ctx->tq_ctx, ctx->count_before );
+
+ if ( ctx->blocked ) {
+ TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+ }
+
+ ctx->status = TQSurrender( &ctx->tq_ctx->base );
+ ctx->count_after = TQSemGetCount( ctx->tq_ctx );
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+ This member specifies the semaphore count before the directive call.
+ description: null
+ member: |
+ uint32_t count_before
+- brief: |
+ This member contains the return status of the directive call.
+ description: null
+ member: |
+ Status_Control status
+- brief: |
+ This member contains the semaphore count after the directive call.
+ description: null
+ member: |
+ uint32_t count_after
+- brief: |
+ If this member is true, then there shall be threads blocked on the
+ semaphore.
+ description: null
+ member: |
+ bool blocked
+test-context-support: null
+test-description: null
+test-header:
+ code: null
+ freestanding: false
+ includes: []
+ local-includes:
+ - tx-thread-queue.h
+ run-params:
+ - description: |
+ is the thread queue context.
+ dir: inout
+ name: tq_ctx
+ specifier: TQSemContext *${.:name}
+ target: testsuites/validation/tr-sem-surrender.h
+test-includes: []
+test-local-includes:
+- tr-sem-surrender.h
+test-prepare: null
+test-setup:
+ brief: null
+ code: |
+ ctx->tq_ctx->base.wait = TQ_WAIT_FOREVER;
+ TQReset( &ctx->tq_ctx->base );
+ description: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ static Status_Control Status( const Context *ctx, Status_Control status )
+ {
+ return TQConvertStatus( &ctx->tq_ctx->base, status );
+ }
+test-target: testsuites/validation/tr-sem-surrender.c
+test-teardown: null
+text: |
+ When the calling surrenders the semaphore.
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Dequeue: N/A
+ Count: One
+ pre-conditions:
+ Variant:
+ - Binary
+ Discipline: all
+ Count:
+ - LessMax
+ - Max
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Dequeue:
+ - specified-by: Discipline
+ Count: Zero
+ pre-conditions:
+ Variant:
+ - Binary
+ Discipline: all
+ Count:
+ - Blocked
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Dequeue: N/A
+ Count: PlusOne
+ pre-conditions:
+ Variant:
+ - Counting
+ Discipline: all
+ Count:
+ - LessMax
+- enabled-by: true
+ post-conditions:
+ Status: MaxCountExceeded
+ Dequeue: N/A
+ Count: Nop
+ pre-conditions:
+ Variant:
+ - Counting
+ Discipline: all
+ Count:
+ - Max
+- enabled-by: true
+ post-conditions:
+ Status: Ok
+ Dequeue:
+ - specified-by: Discipline
+ Count: Zero
+ pre-conditions:
+ Variant:
+ - Counting
+ Discipline: all
+ Count:
+ - Blocked
+type: requirement
diff --git a/spec/score/status/if/maximum-count-exceeded.yml b/spec/score/status/if/maximum-count-exceeded.yml
new file mode 100644
index 00000000..f8381351
--- /dev/null
+++ b/spec/score/status/if/maximum-count-exceeded.yml
@@ -0,0 +1,12 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+index-entries: []
+interface-type: unspecified
+links:
+- role: interface-placement
+ uid: header
+name: STATUS_MAXIMUM_COUNT_EXCEEDED
+references: []
+type: interface
diff --git a/spec/score/status/if/not-owner.yml b/spec/score/status/if/not-owner.yml
new file mode 100644
index 00000000..2d4c831f
--- /dev/null
+++ b/spec/score/status/if/not-owner.yml
@@ -0,0 +1,12 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+index-entries: []
+interface-type: unspecified
+links:
+- role: interface-placement
+ uid: header
+name: STATUS_NOT_OWNER
+references: []
+type: interface
diff --git a/spec/score/tq/req/surrender-priority-inherit.yml b/spec/score/tq/req/surrender-priority-inherit.yml
new file mode 100644
index 00000000..2ec5871a
--- /dev/null
+++ b/spec/score/tq/req/surrender-priority-inherit.yml
@@ -0,0 +1,888 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: requirement-refinement
+ uid: ../if/group
+post-conditions:
+- name: Dequeue
+ states:
+ - name: Priority
+ test-code: |
+ /* Validation is done by spec:/score/tq/req/enqueue-priority */
+ text: |
+ The first thread in priority order shall be dequeued from the thread
+ queue.
+ test-epilogue: null
+ test-prologue: null
+- name: Unblock
+ states:
+ - name: 'Yes'
+ test-code: |
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ text: |
+ The dequeued thread shall be unblocked by the thread queue surrender
+ operation.
+ - name: 'No'
+ test-code: |
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ text: |
+ The dequeued thread shall not be unblocked by the thread queue surrender
+ operation.
+ test-epilogue: null
+ test-prologue: |
+ size_t i;
+
+ i = 0;
+- name: PreviousOwnerPriority
+ states:
+ - name: Drop
+ test-code: |
+ T_eq_u32( ctx->priority_after, PRIO_NORMAL );
+ text: |
+ Each ${/glossary/priority-eligible:/term} of the previous owner which had
+ the highest priority inherited through the thread queue shall be updated.
+ - name: Nop
+ test-code: |
+ T_eq_u32( ctx->priority_after, ctx->priority_before );
+ text: |
+ No ${/glossary/priority-eligible:/term} of the previous owner shall be
+ updated.
+ test-epilogue: null
+ test-prologue: null
+- name: RemoveHelper
+ states:
+ - name: 'Yes'
+ test-code: |
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ text: |
+ Each ${/glossary/scheduler-helping:/term} of the previous owner which was
+ only available due to a priority inherited through the thread queue shall
+ be removed from the previous owner.
+ - name: 'No'
+ test-code: |
+ sc = rtems_task_get_priority(
+ ctx->previous_owner,
+ SCHEDULER_B_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_LOW );
+ } else {
+ T_eq_u32( priority, PRIO_HIGH );
+ }
+ #else
+ T_rsc( sc, RTEMS_INVALID_ID );
+ #endif
+ text: |
+ No ${/glossary/scheduler-helping:/term} shall be removed from the
+ previous owner.
+ test-epilogue: null
+ test-prologue: |
+ rtems_status_code sc;
+ rtems_task_priority priority;
+- name: AddHelper
+ states:
+ - name: 'Yes'
+ test-code: |
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ T_eq_u32( priority, PRIO_VERY_HIGH );
+ } else {
+ T_eq_u32( priority, PRIO_LOW );
+ }
+ text: |
+ Each ${/glossary/scheduler-helping:/term} of the new owner which is only
+ available due to a priority inherited through the thread queue shall be
+ added to the new owner.
+ - name: 'No'
+ test-code: |
+ sc = rtems_task_get_priority(
+ ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ],
+ SCHEDULER_A_ID,
+ &priority
+ );
+ #if defined(RTEMS_SMP)
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ #else
+ T_rsc_success( sc );
+ T_eq_u32( priority, PRIO_HIGH );
+ #endif
+ text: |
+ No ${/glossary/scheduler-helping:/term} shall added to the new owner.
+ test-epilogue: null
+ test-prologue: |
+ rtems_status_code sc;
+ rtems_task_priority priority;
+- name: Suspended
+ states:
+ - name: 'Yes'
+ test-code: |
+ T_true( IsTaskSuspended( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ) );
+ text: |
+ The new owner shall be suspended.
+ - name: 'No'
+ test-code: |
+ T_false( IsTaskSuspended( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ) );
+ text: |
+ The new owner shall be not suspended.
+ test-epilogue: null
+ test-prologue: null
+pre-conditions:
+- name: EnqueueVariant
+ states:
+ - name: Blocking
+ test-code: |
+ if ( ctx->tq_ctx->enqueue_variant != TQ_ENQUEUE_BLOCKS ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue enqueue operation is blocking.
+ - name: Sticky
+ test-code: |
+ if ( ctx->tq_ctx->enqueue_variant != TQ_ENQUEUE_STICKY ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue enqueue operation is sticky.
+ test-epilogue: null
+ test-prologue: null
+- name: InheritedPriority
+ states:
+ - name: Vital
+ test-code: |
+ ctx->inherited_priorities_are_dispensible = false;
+ text: |
+ While at least one priority inherited through the thread queue for the
+ previous owner is the highest priority of the previous owner.
+ - name: Dispensable
+ test-code: |
+ ctx->inherited_priorities_are_dispensible = true;
+ text: |
+ While all priorities inherited through the thread queue for the previous
+ owner are not the highest priority of the previous owner.
+ test-epilogue: null
+ test-prologue: null
+- name: PreviousHelping
+ states:
+ - name: Vital
+ test-code: |
+ ctx->helping_schedules_are_dispensible = false;
+ text: |
+ While at least one ${/glossary/scheduler-helping:/term} of the previous
+ owner is only available due to a priority inherited through the thread
+ queue.
+ - name: Dispensable
+ test-code: |
+ ctx->helping_schedules_are_dispensible = true;
+ text: |
+ While all ${/glossary/scheduler-helping:/term} of the previous owner are
+ not only available due to a priority inherited through the thread queue.
+ test-epilogue: null
+ test-prologue: null
+- name: Scheduler
+ states:
+ - name: Home
+ test-code: |
+ ctx->use_helping_scheduler = false;
+ text: |
+ While the previous owner executes in its
+ ${/glossary/scheduler-home:/term}.
+ - name: Helping
+ test-code: |
+ ctx->use_helping_scheduler = true;
+ text: |
+ While the previous owner executes in a
+ ${/glossary/scheduler-helping:/term} which is available due to a priority
+ inherited through the thread queue.
+ test-epilogue: null
+ test-prologue: null
+- name: NewHelping
+ states:
+ - name: Vital
+ test-code: |
+ ctx->gains_new_helping_scheduler = true;
+ text: |
+ While at least one ${/glossary/scheduler-helping:/term} of the new owner
+ is only available due to a priority inherited through the thread queue.
+ - name: Dispensable
+ test-code: |
+ ctx->gains_new_helping_scheduler = false;
+ text: |
+ While all ${/glossary/scheduler-helping:/term} of the new owner are not
+ only available due to a priority inherited through the thread queue.
+ test-epilogue: null
+ test-prologue: null
+- name: Suspended
+ states:
+ - name: 'Yes'
+ test-code: |
+ ctx->suspended = true;
+ text: |
+ Whiel the new owner is suspended.
+ - name: 'No'
+ test-code: |
+ ctx->suspended = false;
+ text: |
+ Whiel the new owner is not suspended.
+ test-epilogue: null
+ test-prologue: null
+- name: WaitState
+ states:
+ - name: Blocked
+ test-code: |
+ ctx->intend_to_block = false;
+ text: |
+ While the new owner is in the blocked wait state.
+ - name: IntendToBlock
+ test-code: |
+ ctx->intend_to_block = true;
+ text: |
+ While the new owner is in the intend to block wait
+ state.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons:
+ StickyHasNoBlocking: |
+ When a sticky thread queue enqueue operation is performed, the blocked wait
+ state cannot occur.
+ OnlyOneCPU: |
+ Where the system was built with SMP support disabled, exactly one scheduler
+ is present in an application using exactly one processor. There is at most
+ one executing thread. Thread queues with an owner can only be surrendered
+ by the previous owner. Thus, the new owner of the thread queue cannot be
+ in the intend to block wait state.
+test-action: |
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ ActionSticky( ctx );
+ } else {
+ Action( ctx );
+ }
+test-brief: null
+test-cleanup: |
+ if ( ctx->action_performed ) {
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ CleanupSticky( ctx );
+ } else {
+ Cleanup( ctx );
+ }
+ }
+test-context:
+- brief: |
+ This member contains the call within ISR request.
+ description: null
+ member: |
+ CallWithinISRRequest request;
+- brief: |
+ This member contains the barrier to synchronize the runner and the worker.
+ description: null
+ member: |
+ SMP_barrier_Control barrier
+- brief: |
+ If this member is true, then all priorities of the previous owner inherited
+ from the thread queue thread shall be dispensable.
+ description: null
+ member: |
+ bool inherited_priorities_are_dispensible
+- brief: |
+ If this member is true, then all helping schedulers of the previous owner
+ thread gained through the thread queue shall be dispensable.
+ description: null
+ member: |
+ bool helping_schedules_are_dispensible
+- brief: |
+ If this member is true, then the previous owner thread shall use helping
+ scheduler.
+ description: null
+ member: |
+ bool use_helping_scheduler
+- brief: |
+ If this member is true, then the new owner thread shall gain a vital
+ helping scheduler.
+ description: null
+ member: |
+ bool gains_new_helping_scheduler
+- brief: |
+ If this member is true, then the new owner thread shall be suspended.
+ description: null
+ member: |
+ bool suspended
+- brief: |
+ If this member is true, then the new owner thread shall be in the intend to
+ block wait state.
+ description: null
+ member: |
+ bool intend_to_block
+- brief: |
+ If this member is true, then the action was performed.
+ description: null
+ member: |
+ bool action_performed
+- brief: |
+ This member contains the current priority of the previous owner thread
+ before the thread queue surrender operation.
+ description: null
+ member: |
+ rtems_task_priority priority_before
+- brief: |
+ This member contains the current priority of the previous owner thread
+ after the thread queue surrender operation.
+ description: null
+ member: |
+ rtems_task_priority priority_after
+- brief: |
+ This member contains the identifier of the previous owner thread.
+ description: null
+ member: |
+ rtems_id previous_owner
+test-context-support: null
+test-description: null
+test-header:
+ code: null
+ freestanding: false
+ includes: []
+ local-includes:
+ - tx-thread-queue.h
+ run-params:
+ - description: |
+ is the thread queue test context.
+ dir: inout
+ name: tq_ctx
+ specifier: TQContext *${.:name}
+ target: testsuites/validation/tr-tq-surrender-priority-inherit.h
+test-includes:
+- rtems/score/smpbarrier.h
+- rtems/score/threadimpl.h
+test-local-includes:
+- tx-support.h
+- tr-tq-surrender-priority-inherit.h
+test-prepare: |
+ ctx->action_performed = false;
+ ctx->inherited_priorities_are_dispensible = true;
+ ctx->helping_schedules_are_dispensible = true;
+ ctx->use_helping_scheduler = false;
+ ctx->gains_new_helping_scheduler = false;
+ ctx->intend_to_block = false;
+test-setup:
+ brief: null
+ code: |
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+
+ if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+ SetupSticky( ctx );
+ } else {
+ Setup( ctx );
+ }
+ description: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+ {
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+ }
+
+ static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+ {
+ return ctx->tq_ctx->worker_tcb[ worker ];
+ }
+
+ static void Surrender( Context *ctx )
+ {
+ Status_Control status;
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ ctx->priority_before = GetSelfPriority();
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr( TQGetOwner( ctx->tq_ctx ), ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ] );
+ ctx->priority_after = GetSelfPriority();
+ }
+
+ #if defined(RTEMS_SMP)
+ static void Delay( void *arg )
+ {
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ }
+
+ static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+ )
+ {
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->request.handler = Delay;
+ CallWithinISRSubmit( &ctx->request );
+ }
+ }
+ #endif
+
+ static void Setup( Context *ctx )
+ {
+ #if defined(RTEMS_SMP)
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_B_ID, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_LOW );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_ULTRA_HIGH );
+ #else
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+ #endif
+
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_B, PRIO_VERY_LOW );
+ }
+
+ static void Action( Context *ctx )
+ {
+ Status_Control status;
+ #if defined(RTEMS_SMP)
+ SMP_barrier_State state;
+ #endif
+
+ ctx->action_performed = true;
+ ctx->previous_owner = ctx->tq_ctx->runner_id;
+
+ if (
+ ctx->inherited_priorities_are_dispensible
+ #if defined(RTEMS_SMP)
+ || ctx->helping_schedules_are_dispensible
+ #endif
+ ) {
+ TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+ }
+
+ #if defined(RTEMS_SMP)
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ #endif
+ }
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ #if defined(RTEMS_SMP)
+ if ( ctx->intend_to_block ) {
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ } else {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+ }
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_HELPER_B_SYNC
+ );
+ TQSynchronizeRunner();
+ }
+
+ /*
+ * In order to run the validation test variant also for the intend to block
+ * wait state, we would need at least three processors. Skip it for now.
+ */
+ if ( ctx->use_helping_scheduler && !ctx->intend_to_block ) {
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = true;
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_BUSY_WAIT );
+
+ while ( rtems_scheduler_get_processor() != 1 ) {
+ /* Wait */
+ }
+
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = false;
+ }
+ #else
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_HELPER_B_SYNC
+ );
+ TQSynchronizeRunner();
+ #endif
+
+ Surrender( ctx );
+
+ #if defined(RTEMS_SMP)
+ if ( ctx->intend_to_block ) {
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ }
+ #endif
+ }
+
+ static void Cleanup( Context *ctx )
+ {
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A );
+
+ #if defined(RTEMS_SMP)
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_SURRENDER
+ );
+ }
+ #endif
+
+ if (
+ ctx->inherited_priorities_are_dispensible
+ #if defined(RTEMS_SMP)
+ || ctx->helping_schedules_are_dispensible
+ #endif
+ ) {
+ TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_RELEASE );
+ }
+
+ #if defined(RTEMS_SMP)
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+ #endif
+ }
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+ }
+
+ static void SetupSticky( Context *ctx )
+ {
+ #if defined(RTEMS_SMP)
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, SCHEDULER_B_ID, PRIO_NORMAL );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, SCHEDULER_B_ID, PRIO_LOW );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_HELPER_A, PRIO_NORMAL );
+ #endif
+ }
+
+ static void ActionSticky( Context *ctx )
+ {
+ #if defined(RTEMS_SMP)
+ ctx->action_performed = true;
+ ctx->previous_owner = ctx->tq_ctx->worker_id[ TQ_HELPER_A ];
+
+ SetSelfPriority( PRIO_LOW );
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSend( ctx->tq_ctx, TQ_HELPER_A, TQ_EVENT_MUTEX_A_OBTAIN );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_OBTAIN
+ );
+ }
+ }
+
+ /*
+ * Take only the priorities into account which are inherited from the
+ * priority inheritance mutex. This avoids having to deal with the ceiling
+ * priority.
+ */
+ ctx->priority_before = TQGetPriority( ctx->tq_ctx, TQ_HELPER_A );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = true;
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_ENQUEUE | TQ_EVENT_BUSY_WAIT
+ );
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ TQSendAndWaitForIntendToBlock(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_ENQUEUE
+ );
+
+ SetSelfScheduler( SCHEDULER_B_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_ENQUEUE
+ );
+ YieldTask( ctx->tq_ctx->worker_id[ TQ_HELPER_A ] );
+ TQWaitForEventsReceived( ctx->tq_ctx, TQ_BLOCKER_D );
+ TQWaitForIntendToBlock( ctx->tq_ctx, TQ_BLOCKER_D );
+ YieldTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_D ] );
+ }
+
+ if ( ctx->use_helping_scheduler ) {
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+ }
+
+ if ( ctx->suspended ) {
+ SuspendTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ ctx->tq_ctx->busy_wait[ TQ_HELPER_A ] = false;
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_SCHEDULER_RECORD_START |
+ TQ_EVENT_SURRENDER
+ );
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ T_eq_ptr(
+ TQGetOwner( ctx->tq_ctx ),
+ ctx->tq_ctx->worker_tcb[ TQ_BLOCKER_A ]
+ );
+ ctx->priority_after = TQGetPriority( ctx->tq_ctx, TQ_HELPER_A );
+ #endif
+ }
+
+ static void CleanupSticky( Context *ctx )
+ {
+ #if defined(RTEMS_SMP)
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_ULTRA_HIGH );
+
+ if ( ctx->suspended ) {
+ ResumeTask( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] );
+ }
+
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->gains_new_helping_scheduler ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_D,
+ TQ_EVENT_SURRENDER
+ );
+ }
+
+ if (
+ ctx->inherited_priorities_are_dispensible ||
+ ctx->helping_schedules_are_dispensible
+ ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_HELPER_A,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+
+ if ( ctx->inherited_priorities_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+
+ if ( ctx->helping_schedules_are_dispensible ) {
+ TQSendAndSynchronizeRunner(
+ ctx->tq_ctx,
+ TQ_BLOCKER_C,
+ TQ_EVENT_MUTEX_A_RELEASE
+ );
+ }
+ }
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+ #endif
+ }
+test-target: testsuites/validation/tr-tq-surrender-priority-inherit.c
+test-teardown:
+ brief: null
+ code: |
+ SetSelfScheduler( SCHEDULER_A_ID, PRIO_NORMAL );
+ description: null
+text: |
+ When the thread queue enqueue operation timed out.
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Dequeue: Priority
+ Unblock:
+ - if:
+ pre-conditions:
+ Suspended: 'Yes'
+ then: 'No'
+ - if:
+ pre-conditions:
+ WaitState: IntendToBlock
+ then: 'No'
+ - else: 'Yes'
+ RemoveHelper:
+ - if:
+ pre-conditions:
+ PreviousHelping: Dispensable
+ then: 'No'
+ - else: 'Yes'
+ AddHelper:
+ - if:
+ pre-conditions:
+ NewHelping: Dispensable
+ then: 'No'
+ - else: 'Yes'
+ PreviousOwnerPriority:
+ - if:
+ pre-conditions:
+ InheritedPriority: Vital
+ then: Drop
+ - else: Nop
+ Suspended:
+ - specified-by: Suspended
+ pre-conditions:
+ EnqueueVariant: all
+ InheritedPriority: all
+ PreviousHelping: all
+ Scheduler: all
+ NewHelping: all
+ Suspended: all
+ WaitState: all
+- enabled-by: true
+ post-conditions: StickyHasNoBlocking
+ pre-conditions:
+ EnqueueVariant:
+ - Sticky
+ InheritedPriority: all
+ PreviousHelping: all
+ Scheduler: all
+ NewHelping: all
+ Suspended: all
+ WaitState:
+ - Blocked
+- enabled-by:
+ not: RTEMS_SMP
+ post-conditions: OnlyOneCPU
+ pre-conditions:
+ EnqueueVariant: all
+ InheritedPriority: all
+ PreviousHelping: all
+ Scheduler: all
+ NewHelping: all
+ Suspended: all
+ WaitState:
+ - IntendToBlock
+- enabled-by:
+ not: RTEMS_SMP
+ post-conditions: OnlyOneCPU
+ pre-conditions:
+ EnqueueVariant: all
+ InheritedPriority: all
+ PreviousHelping:
+ - Vital
+ Scheduler: all
+ NewHelping: all
+ Suspended: all
+ WaitState: all
+- enabled-by:
+ not: RTEMS_SMP
+ post-conditions: OnlyOneCPU
+ pre-conditions:
+ EnqueueVariant: all
+ InheritedPriority: all
+ PreviousHelping: all
+ Scheduler:
+ - Helping
+ NewHelping: all
+ Suspended: all
+ WaitState: all
+- enabled-by:
+ not: RTEMS_SMP
+ post-conditions: OnlyOneCPU
+ pre-conditions:
+ EnqueueVariant: all
+ InheritedPriority: all
+ PreviousHelping: all
+ Scheduler: all
+ NewHelping:
+ - Vital
+ Suspended: all
+ WaitState: all
+type: requirement
diff --git a/spec/score/tq/req/surrender.yml b/spec/score/tq/req/surrender.yml
new file mode 100644
index 00000000..7f77ab93
--- /dev/null
+++ b/spec/score/tq/req/surrender.yml
@@ -0,0 +1,356 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: requirement-refinement
+ uid: ../if/group
+post-conditions:
+- name: Dequeue
+ states:
+ - name: FIFO
+ test-code: |
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
+ T_eq_u32(
+ TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ),
+ ctx->expected_blocker_b_counter
+ );
+ text: |
+ The first thread in FIFO order shall be dequeued from the thread queue.
+ - name: Priority
+ test-code: |
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ), 1 );
+ T_eq_u32( TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ), 2 );
+ text: |
+ The first thread in priority order shall be dequeued from the thread
+ queue.
+ test-epilogue: null
+ test-prologue: null
+- name: Unblock
+ states:
+ - name: 'Yes'
+ test-code: |
+ T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ text: |
+ The dequeued thread shall be unblocked by surrender operation.
+ - name: 'No'
+ test-code: |
+ T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+ text: |
+ The dequeued thread shall not be unblocked by surrender operation.
+ test-epilogue: null
+ test-prologue: |
+ size_t i;
+
+ i = 0;
+pre-conditions:
+- name: HasOwner
+ states:
+ - name: 'Yes'
+ test-code: |
+ if ( ctx->tq_ctx->get_owner == NULL ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue has a previous owner thread.
+ - name: 'No'
+ test-code: |
+ if ( ctx->tq_ctx->get_owner != NULL ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue has no owner threads.
+ test-epilogue: null
+ test-prologue: null
+- name: Discipline
+ states:
+ - name: FIFO
+ test-code: |
+ if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue uses the FIFO discipline.
+ - name: Priority
+ test-code: |
+ if ( ctx->tq_ctx->discipline != TQ_PRIORITY ) {
+ ${.:skip}
+ }
+ text: |
+ Where the thread queue uses the priority discipline.
+ test-epilogue: null
+ test-prologue: null
+- name: WaitState
+ states:
+ - name: Blocked
+ test-code: |
+ ctx->intend_to_block = false;
+ text: |
+ While the dequeued thread is in the blocked wait state.
+ - name: IntendToBlock
+ test-code: |
+ ctx->intend_to_block = true;
+ text: |
+ While the dequeued thread is in the intend to block wait state.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons:
+ OnlyOneExecutingThread: |
+ Where the system was built with SMP support disabled, there is at most one
+ executing thread. Thread queues with an owner can only be surrendered by
+ the previous owner thread. Thus, the dequeued thread cannot be in the
+ intend to block wait state.
+test-action: |
+ Status_Control status;
+
+ TQResetCounter( ctx->tq_ctx );
+ ctx->expected_blocker_b_counter = 0;
+
+ status = TQEnqueue( ctx->tq_ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ if ( ctx->intend_to_block ) {
+ #if defined(RTEMS_SMP)
+ SMP_barrier_State state;
+ #endif
+
+ /*
+ * In uniprocessor configurations, it is impossible to dequeue a thread
+ * in FIFO order which is in the intend to block wait state. Run this
+ * test with just one worker.
+ */
+ if ( ctx->tq_ctx->discipline != TQ_FIFO ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ ctx->expected_blocker_b_counter = 2;
+ }
+
+
+ #if defined(RTEMS_SMP)
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &state );
+ #endif
+
+ T_scheduler_set_event_handler( SchedulerBlock, ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+ #if defined(RTEMS_SMP)
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ Surrender( ctx );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ #endif
+ } else {
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A );
+
+ TQSend(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_HELPER_A_SYNC | TQ_EVENT_ENQUEUE
+ );
+ TQSynchronizeRunner();
+ TQWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_B );
+ ctx->expected_blocker_b_counter = 2;
+
+ Surrender( ctx );
+ }
+
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_A,
+ TQ_EVENT_SURRENDER
+ );
+
+ if ( ctx->expected_blocker_b_counter != 0 ) {
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_SURRENDER
+ );
+ }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+ This member contains the call within ISR request.
+ description: null
+ member: |
+ CallWithinISRRequest request;
+- brief: |
+ This member contains the barrier to synchronize the runner and the worker.
+ description: null
+ member: |
+ SMP_barrier_Control barrier
+- brief: |
+ If this member is true, then the dequeued thread shall be in the intend to
+ block wait state.
+ description: null
+ member: |
+ bool intend_to_block
+- brief: |
+ If this member contains the expected counter of worker B.
+ description: null
+ member: |
+ uint32_t expected_blocker_b_counter
+test-context-support: null
+test-description: null
+test-header:
+ code: null
+ freestanding: false
+ includes: []
+ local-includes:
+ - tx-thread-queue.h
+ run-params:
+ - description: |
+ is the thread queue test context.
+ dir: inout
+ name: tq_ctx
+ specifier: TQContext *${.:name}
+ target: testsuites/validation/tr-tq-surrender.h
+test-includes:
+- rtems/score/smpbarrier.h
+- rtems/score/threadimpl.h
+test-local-includes:
+- tx-support.h
+- tr-tq-surrender.h
+test-prepare: null
+test-setup:
+ brief: null
+ code: |
+ ctx->request.arg = ctx;
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+
+ #if defined(RTEMS_SMP)
+ /*
+ * For the mutexes with priority ceiling protocol, we need a scheduler with
+ * two processors to set up the intend to block wait state.
+ */
+ RemoveProcessor( SCHEDULER_B_ID, 1 );
+ AddProcessor( SCHEDULER_A_ID, 1 );
+ #endif
+ description: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+ {
+ return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+ }
+
+ static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+ {
+ return ctx->tq_ctx->worker_tcb[ worker ];
+ }
+
+ static void Surrender( void *arg )
+ {
+ Context *ctx;
+ Status_Control status;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+
+ status = TQSurrender( ctx->tq_ctx );
+ T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ }
+
+ #if defined(RTEMS_SMP)
+ static void Delay( void *arg )
+ {
+ Context *ctx;
+ SMP_barrier_State state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &state );
+
+ /* B0 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+ /* B1 */
+ _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+ }
+ #endif
+
+ static void SchedulerBlock(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+ )
+ {
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_BLOCK
+ ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ #if defined(RTEMS_SMP)
+ ctx->request.handler = Delay;
+ #else
+ ctx->request.handler = Surrender;
+ #endif
+ CallWithinISRSubmit( &ctx->request );
+ }
+ }
+test-target: testsuites/validation/tr-tq-surrender.c
+test-teardown:
+ brief: null
+ code: |
+ TQReset( ctx->tq_ctx );
+
+ #if defined(RTEMS_SMP)
+ RemoveProcessor( SCHEDULER_A_ID, 1 );
+ AddProcessor( SCHEDULER_B_ID, 1 );
+ #endif
+ description: null
+text: |
+ When the thread queue enqueue operation timed out.
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Dequeue:
+ - specified-by: Discipline
+ Unblock:
+ - if:
+ pre-conditions:
+ WaitState: IntendToBlock
+ then: 'No'
+ - else: 'Yes'
+ pre-conditions:
+ HasOwner: all
+ Discipline: all
+ WaitState: all
+- enabled-by:
+ not: RTEMS_SMP
+ post-conditions: OnlyOneExecutingThread
+ pre-conditions:
+ HasOwner:
+ - 'Yes'
+ Discipline: all
+ WaitState:
+ - IntendToBlock
+type: requirement