summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-07 09:31:38 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-09-07 19:58:14 +0200
commitf37d7e9fde6a318a317b29eaca96add3d22adfcc (patch)
tree37ec9f310841a25a99bc3fb1f078259e9f35ab83
parentspec: Fix fatal error specification (diff)
downloadrtems-central-f37d7e9fde6a318a317b29eaca96add3d22adfcc.tar.bz2
spec: Specify rtems_semaphore_flush()
-rw-r--r--spec/rtems/sem/req/flush.yml328
-rw-r--r--spec/score/tq/req/flush-fifo.yml6
-rw-r--r--spec/score/tq/req/flush-priority-inherit.yml303
-rw-r--r--spec/score/tq/req/flush-priority.yml202
4 files changed, 836 insertions, 3 deletions
diff --git a/spec/rtems/sem/req/flush.yml b/spec/rtems/sem/req/flush.yml
new file mode 100644
index 00000000..76a3d37c
--- /dev/null
+++ b/spec/rtems/sem/req/flush.yml
@@ -0,0 +1,328 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: interface-function
+ uid: ../if/flush
+post-conditions:
+- name: Action
+ states:
+ - name: InvId
+ test-code: |
+ sc = rtems_semaphore_flush( 0xffffffff );
+ T_rsc( sc, RTEMS_INVALID_ID );
+ text: |
+ The return status of ${../if/flush:/name} shall be
+ ${../../status/if/invalid-id:/name}.
+ - name: NotDef
+ test-code: |
+ sc = rtems_semaphore_flush( ctx->tq_ctx.thread_queue_id );
+ T_rsc( sc, RTEMS_NOT_DEFINED );
+ text: |
+ The return status of ${../if/flush:/name} shall be
+ ${../../status/if/not-defined:/name}.
+ - name: FlushFIFO
+ test-code: |
+ ${/score/tq/req/flush-fifo:/test-run}( &ctx->tq_ctx );
+ text: |
+ The calling task shall flush the semaphore as specified by
+ ${/score/tq/req/flush-fifo}.
+ - name: FlushPriority
+ test-code: |
+ ${/score/tq/req/flush-priority:/test-run}( &ctx->tq_ctx, true );
+ text: |
+ The calling task shall flush the semaphore as specified by
+ ${/score/tq/req/flush-priority}.
+ - name: FlushPriorityCeiling
+ test-code: |
+ ${/score/tq/req/flush-priority:/test-run}( &ctx->tq_ctx, false );
+ text: |
+ The calling task shall flush the semaphore as specified by
+ ${/score/tq/req/flush-priority}.
+ - name: FlushPriorityInherit
+ test-code: |
+ ${/score/tq/req/flush-priority-inherit:/test-run}( &ctx->tq_ctx );
+ text: |
+ The calling task shall flush the semaphore as specified by
+ ${/score/tq/req/flush-priority-inherit}.
+ test-epilogue: null
+ test-prologue: |
+ rtems_status_code sc;
+pre-conditions:
+- name: Class
+ states:
+ - name: Counting
+ test-code: |
+ ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
+ ctx->initial_count = 0;
+ ctx->tq_ctx.enqueue_prepare = TQDoNothing;
+ ctx->tq_ctx.enqueue_done = TQDoNothing;
+ text: |
+ While the semaphore object is a counting semaphore.
+ - name: Simple
+ test-code: |
+ ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
+ ctx->initial_count = 0;
+ ctx->tq_ctx.enqueue_prepare = TQDoNothing;
+ ctx->tq_ctx.enqueue_done = TQDoNothing;
+ text: |
+ While the semaphore object is a simple binary semaphore.
+ - name: Binary
+ test-code: |
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ text: |
+ While the semaphore object is a binary semaphore.
+ - name: PrioCeiling
+ test-code: |
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ text: |
+ While the semaphore object is a priority ceiling semaphore.
+ - name: PrioInherit
+ test-code: |
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ text: |
+ While the semaphore object is a priority inheritance semaphore.
+ - name: MrsP
+ test-code: |
+ ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+ ctx->initial_count = 1;
+ ctx->tq_ctx.enqueue_prepare = EnqueuePrepare;
+ ctx->tq_ctx.enqueue_done = TQSurrenderClassicSem;
+ text: |
+ While the semaphore object is a MrsP semaphore.
+ test-epilogue: null
+ test-prologue: null
+- name: Discipline
+ states:
+ - name: FIFO
+ test-code: |
+ ctx->attribute_set |= RTEMS_FIFO;
+ ctx->tq_ctx.discipline = TQ_FIFO;
+ text: |
+ While the semaphore uses the FIFO task wait queue discipline.
+ - name: Priority
+ test-code: |
+ ctx->attribute_set |= RTEMS_PRIORITY;
+ ctx->tq_ctx.discipline = TQ_PRIORITY;
+ text: |
+ While the semaphore uses the priority task wait queue discipline.
+ test-epilogue: null
+ test-prologue: null
+- name: Id
+ states:
+ - name: Valid
+ test-code: |
+ /* Nothing to prepare */
+ text: |
+ While the ${../if/flush:/params[0]/name} parameter is associated with
+ the semaphore.
+ - name: Invalid
+ test-code: |
+ /* Nothing to prepare */
+ text: |
+ While the ${../if/flush:/params[0]/name} parameter is not associated
+ with a semaphore.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons:
+ NeedsPriorityDiscipline: |
+ Binary semaphores with a locking protocol are required to use the priority
+ task wait queue discipline.
+ NoMrsP: |
+ Where the system is build with SMP support disabled, the MrsP locking
+ protocol is not available.
+test-action: |
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_create(
+ NAME,
+ ctx->initial_count,
+ ctx->attribute_set,
+ PRIO_ULTRA_HIGH,
+ &ctx->tq_ctx.thread_queue_id
+ );
+ T_rsc_success( sc );
+test-brief: null
+test-cleanup:
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_delete( ctx->tq_ctx.thread_queue_id );
+ T_rsc_success( sc );
+test-context:
+- brief: |
+ This member contains the thread queue test context.
+ description: null
+ member: |
+ TQContext tq_ctx;
+- brief: |
+ This member specifies if the attribute set of the semaphore.
+ description: null
+ member: |
+ rtems_attribute attribute_set
+- brief: |
+ This member specifies if the initial count of the semaphore.
+ description: null
+ member: |
+ uint32_t initial_count
+test-context-support: null
+test-description: null
+test-header: null
+test-includes:
+- rtems.h
+- string.h
+test-local-includes:
+- tr-tq-flush-fifo.h
+- tr-tq-flush-priority.h
+- tr-tq-flush-priority-inherit.h
+- tx-thread-queue.h
+- tx-support.h
+test-prepare: |
+ ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
+test-setup:
+ brief: null
+ code: |
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->tq_ctx.enqueue = Enqueue;
+ ctx->tq_ctx.flush = Flush;
+ ctx->tq_ctx.surrender = TQDoNothing;
+ ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+ TQInitialize( &ctx->tq_ctx );
+ description: null
+test-stop: null
+test-support: |
+ #define NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
+ typedef ${.:/test-context-type} Context;
+
+ static void EnqueuePrepare( TQContext *tq_ctx )
+ {
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(
+ tq_ctx->thread_queue_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc_success( sc );
+ }
+
+ static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
+ {
+ rtems_status_code sc;
+
+ (void) wait;
+ sc = rtems_semaphore_obtain(
+ tq_ctx->thread_queue_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc( sc, RTEMS_UNSATISFIED );
+
+ return STATUS_BUILD( STATUS_SUCCESSFUL, 0 );
+ }
+
+ static void Flush( TQContext *tq_ctx )
+ {
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_flush( tq_ctx->thread_queue_id );
+ T_rsc_success( sc );
+ }
+test-target: testsuites/validation/tc-sem-flush.c
+test-teardown:
+ brief: null
+ code: |
+ TQDestroy( &ctx->tq_ctx );
+ description: null
+text: ${.:text-template}
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Action: InvId
+ pre-conditions:
+ Class: N/A
+ Discipline: N/A
+ Id:
+ - Invalid
+- enabled-by: true
+ post-conditions:
+ Action:
+ - if:
+ pre-conditions:
+ Discipline: Priority
+ then: FlushPriority
+ - else: FlushFIFO
+ pre-conditions:
+ Class:
+ - Binary
+ - Counting
+ - Simple
+ Discipline: all
+ Id:
+ - Valid
+- enabled-by: true
+ post-conditions:
+ Action: FlushPriorityCeiling
+ pre-conditions:
+ Class:
+ - PrioCeiling
+ Discipline:
+ - Priority
+ Id:
+ - Valid
+- enabled-by: true
+ post-conditions:
+ Action: FlushPriorityInherit
+ pre-conditions:
+ Class:
+ - PrioInherit
+ Discipline:
+ - Priority
+ Id:
+ - Valid
+- enabled-by: true
+ post-conditions: NoMrsP
+ pre-conditions:
+ Class:
+ - MrsP
+ Discipline:
+ - Priority
+ Id:
+ - Valid
+- enabled-by: true
+ post-conditions: NeedsPriorityDiscipline
+ pre-conditions:
+ Class:
+ - PrioCeiling
+ - PrioInherit
+ - MrsP
+ Discipline:
+ - FIFO
+ Id:
+ - Valid
+- enabled-by: RTEMS_SMP
+ post-conditions:
+ Action: NotDef
+ pre-conditions:
+ Class:
+ - MrsP
+ Discipline:
+ - Priority
+ Id:
+ - Valid
+type: requirement
diff --git a/spec/score/tq/req/flush-fifo.yml b/spec/score/tq/req/flush-fifo.yml
index 636288bc..f7842175 100644
--- a/spec/score/tq/req/flush-fifo.yml
+++ b/spec/score/tq/req/flush-fifo.yml
@@ -69,10 +69,10 @@ test-action: |
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
if ( ctx->tq_ctx->how_many > 0 ) {
- TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
- TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
T_scheduler_set_event_handler( SchedulerEvent, ctx );
- TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
} else {
TQSchedulerRecordStart( ctx->tq_ctx );
TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
diff --git a/spec/score/tq/req/flush-priority-inherit.yml b/spec/score/tq/req/flush-priority-inherit.yml
new file mode 100644
index 00000000..16dbf03e
--- /dev/null
+++ b/spec/score/tq/req/flush-priority-inherit.yml
@@ -0,0 +1,303 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: requirement-refinement
+ uid: ../if/group
+- role: validation
+ uid: flush-filter
+- role: validation
+ uid: flush-remove-timer
+- role: validation
+ uid: flush-unblock
+post-conditions:
+- name: Extract
+ states:
+ - name: Nop
+ test-code: |
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetPriorityUpdate( ctx, &i ), &T_scheduler_event_null );
+ text: |
+ No operation shall be performed.
+ - name: All
+ test-code: |
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ if ( ctx->minimum ) {
+ /*
+ * This priority update is carried out by
+ * _Thread_queue_Flush_critical().
+ */
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ if ( ctx->minimum ) {
+ /*
+ * This superfluous priority update is carried out by
+ * _Thread_queue_Enqueue() since TQ_BLOCKER_B would have inherited its
+ * priority to TQ_BLOCKER_A if it were not flushed from the thread
+ * queue.
+ */
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ }
+
+ event = GetPriorityUpdate( ctx, &i );
+ T_eq_ptr( event, &T_scheduler_event_null );
+
+ T_eq_u32(
+ GetPriority( ctx->tq_ctx->worker_id[ TQ_BLOCKER_A ] ),
+ PRIO_HIGH
+ );
+ text: |
+ The enqueued threads of the thread queue shall be extracted in
+ ${/glossary/priority:/term} order for each priority queue associated with
+ a scheduler. The priority queues of the thread queue shall be accessed
+ in ${/glossary/fifo:/term} order.
+ test-epilogue: null
+ test-prologue: |
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+- name: PriorityUpdate
+ states:
+ - name: 'No'
+ test-code: |
+ /* Checked by ``Extract`` post-condition state ``Nop`` */
+ text: |
+ The ${/glossary/priority-current:/term} of the owner of the thread queue
+ shall not be updated by the thread queue flush operation.
+ - name: 'Yes'
+ test-code: |
+ /* Checked by ``Extract`` post-condition state ``All`` */
+ text: |
+ The ${/glossary/priority-current:/term} of the owner of the thread queue
+ shall be updated by the thread queue flush operation to reflect the loss
+ of inherited priorities of the flushed threads.
+ test-epilogue: null
+ test-prologue: null
+pre-conditions:
+- name: Priority
+ states:
+ - name: Minimum
+ test-code: |
+ ctx->minimum = true;
+ text: |
+ While a minimum priority of the owner of the thread queue is inherited
+ from a thread enqueued on the thread queue.
+ - name: NotMinimum
+ test-code: |
+ ctx->minimum = false;
+ text: |
+ While no minimum priority of the owner of the thread queue is inherited
+ from a thread enqueued on the thread queue.
+ test-epilogue: null
+ test-prologue: null
+- name: Queue
+ states:
+ - name: Empty
+ test-code: |
+ ctx->tq_ctx->how_many = 0;
+ text: |
+ While the thread queue is empty.
+ - name: NonEmpty
+ test-code: |
+ ctx->tq_ctx->how_many = 3;
+ text: |
+ While the thread queue has at least one enqueued thread.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons: {}
+test-action: |
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ if ( ctx->minimum ) {
+ if ( rtems_configuration_get_maximum_processors() > 1 ) {
+ TQSetScheduler(
+ ctx->tq_ctx, TQ_BLOCKER_B,
+ ctx->tq_ctx->other_scheduler_id,
+ PRIO_HIGH
+ );
+ } else {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ ctx->tq_ctx->runner_scheduler_id,
+ PRIO_HIGH
+ );
+ }
+
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_ULTRA_HIGH );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ } else {
+ TQSetScheduler(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ ctx->tq_ctx->runner_scheduler_id,
+ PRIO_HIGH
+ );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_HIGH );
+
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ }
+
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+ This member contains the call within ISR request.
+ description: null
+ member: |
+ CallWithinISRRequest request;
+- brief: |
+ If this member is true, then a minimum priority of the owner of the thread
+ queue shall be inherited from a thread enqueued on the thread queue.
+ description: null
+ member: |
+ bool minimum
+test-context-support: null
+test-description: null
+test-header:
+ code: null
+ freestanding: false
+ includes: []
+ local-includes:
+ - tx-thread-queue.h
+ run-params:
+ - description: |
+ is the thread queue test context.
+ dir: inout
+ name: tq_ctx
+ specifier: TQContext *${.:name}
+ target: testsuites/validation/tr-tq-flush-priority-inherit.h
+test-includes: []
+test-local-includes:
+- tx-support.h
+- tr-tq-flush-priority-inherit.h
+test-prepare: null
+test-setup:
+ brief: null
+ code: |
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_HIGH );
+ description: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+ {
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+ }
+
+ static const T_scheduler_event *GetPriorityUpdate( Context *ctx, size_t *index )
+ {
+ return T_scheduler_next(
+ &ctx->tq_ctx->scheduler_log.header,
+ T_SCHEDULER_UPDATE_PRIORITY,
+ index
+ );
+ }
+
+ static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+ {
+ return ctx->tq_ctx->worker_tcb[ worker ];
+ }
+
+ static void Flush( void *arg )
+ {
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+ }
+
+ static void SchedulerEvent( void *arg, const T_scheduler_event *event )
+ {
+ Context *ctx;
+
+ ctx = arg;
+
+ if ( event->operation == T_SCHEDULER_BLOCK ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+ }
+test-target: testsuites/validation/tr-tq-flush-priority-inherit.c
+test-teardown:
+ brief: null
+ code: |
+ TQReset( ctx->tq_ctx );
+ description: null
+text: |
+ When the ${/glossary/priority:/term} thread queue with support for
+ ${/glossary/priorityinheritance:/term} is flushed.
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Extract: Nop
+ PriorityUpdate: 'No'
+ pre-conditions:
+ Queue:
+ - Empty
+ Priority: N/A
+- enabled-by: true
+ post-conditions:
+ Extract: All
+ PriorityUpdate: 'No'
+ pre-conditions:
+ Queue:
+ - NonEmpty
+ Priority:
+ - NotMinimum
+- enabled-by: true
+ post-conditions:
+ Extract: All
+ PriorityUpdate: 'Yes'
+ pre-conditions:
+ Queue:
+ - NonEmpty
+ Priority:
+ - Minimum
+type: requirement
diff --git a/spec/score/tq/req/flush-priority.yml b/spec/score/tq/req/flush-priority.yml
new file mode 100644
index 00000000..322d81dd
--- /dev/null
+++ b/spec/score/tq/req/flush-priority.yml
@@ -0,0 +1,202 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links:
+- role: requirement-refinement
+ uid: ../if/group
+- role: validation
+ uid: flush-filter
+- role: validation
+ uid: flush-remove-timer
+- role: validation
+ uid: flush-unblock
+post-conditions:
+- name: Operation
+ states:
+ - name: Nop
+ test-code: |
+ /* Event receive */
+ T_eq_ptr( GetUnblock( ctx, &i )->thread, GetTCB( ctx, TQ_BLOCKER_A ) );
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ text: |
+ No operation shall be performed.
+ - name: TryExtract
+ test-code: |
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_D ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, NULL );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_C ) );
+
+ event = GetUnblock( ctx, &i );
+ T_eq_ptr( event->executing, GetTCB( ctx, TQ_BLOCKER_B ) );
+ T_eq_ptr( event->thread, GetTCB( ctx, TQ_BLOCKER_B ) );
+
+ T_eq_ptr( GetUnblock( ctx, &i ), &T_scheduler_event_null );
+ text: |
+ The enqueued threads of the thread queue may be extracted in
+ ${/glossary/priority:/term} order for each priority queue associated with
+ a scheduler. The priority queues of the thread queue shall be accessed
+ in ${/glossary/fifo:/term} order.
+ test-epilogue: |
+ test-prologue: |
+ size_t i;
+ const T_scheduler_event *event;
+
+ i = 0;
+pre-conditions:
+- name: Queue
+ states:
+ - name: Empty
+ test-code: |
+ ctx->tq_ctx->how_many = 0;
+ text: |
+ While the thread queue is empty.
+ - name: NonEmpty
+ test-code: |
+ ctx->tq_ctx->how_many = 3;
+ text: |
+ While the thread queue has at least one enqueued thread.
+ test-epilogue: null
+ test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons: {}
+test-action: |
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+ if ( ctx->tq_ctx->how_many > 0 ) {
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
+ T_scheduler_set_event_handler( SchedulerEvent, ctx );
+ TQSendAndWaitForExecutionStop(
+ ctx->tq_ctx,
+ TQ_BLOCKER_B,
+ TQ_EVENT_ENQUEUE
+ );
+ } else {
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+ }
+
+ TQSchedulerRecordStop( ctx->tq_ctx );
+ TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+ This member contains the call within ISR request.
+ description: null
+ member: |
+ CallWithinISRRequest request;
+test-context-support: null
+test-description: null
+test-header:
+ code: null
+ freestanding: false
+ includes: []
+ local-includes:
+ - tx-thread-queue.h
+ run-params:
+ - description: |
+ is the thread queue test context.
+ dir: inout
+ name: tq_ctx
+ specifier: TQContext *${.:name}
+ - description: |
+ is true, if the object using the thread queue supports multiple priority
+ queues, otherwise it is false.
+ dir: null
+ name: supports_multiple_priority_queues
+ specifier: bool ${.:name}
+ target: testsuites/validation/tr-tq-flush-priority.h
+test-includes: []
+test-local-includes:
+- tx-support.h
+- tr-tq-flush-priority.h
+test-prepare: null
+test-setup:
+ brief: null
+ code: |
+ TQReset( ctx->tq_ctx );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+
+ if (
+ ctx->supports_multiple_priority_queues &&
+ rtems_configuration_get_maximum_processors() > 1
+ ) {
+ TQSetScheduler(
+ ctx->tq_ctx, TQ_BLOCKER_B,
+ ctx->tq_ctx->other_scheduler_id,
+ PRIO_HIGH
+ );
+ } else {
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_HIGH );
+ }
+
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
+ TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_D, PRIO_ULTRA_HIGH );
+ description: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ static const T_scheduler_event *GetUnblock( Context *ctx, size_t *index )
+ {
+ return TQGetNextUnblock( ctx->tq_ctx, index );
+ }
+
+ static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+ {
+ return ctx->tq_ctx->worker_tcb[ worker ];
+ }
+
+ static void Flush( void *arg )
+ {
+ Context *ctx;
+
+ ctx = arg;
+ TQSchedulerRecordStart( ctx->tq_ctx );
+ TQFlush( ctx->tq_ctx );
+ }
+
+ static void SchedulerEvent( void *arg, const T_scheduler_event *event )
+ {
+ Context *ctx;
+
+ ctx = arg;
+
+ if ( event->operation == T_SCHEDULER_BLOCK ) {
+ ctx->request.handler = Flush;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+ T_scheduler_set_event_handler( NULL, NULL );
+ }
+ }
+test-target: testsuites/validation/tr-tq-flush-priority.c
+test-teardown:
+ brief: null
+ code: |
+ TQReset( ctx->tq_ctx );
+ description: null
+text: |
+ When the ${/glossary/priority:/term} thread queue is flushed.
+transition-map:
+- enabled-by: true
+ post-conditions:
+ Operation: Nop
+ pre-conditions:
+ Queue:
+ - Empty
+- enabled-by: true
+ post-conditions:
+ Operation: TryExtract
+ pre-conditions:
+ Queue:
+ - NonEmpty
+type: requirement