summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-04-08 15:19:30 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-07-12 14:01:24 +0200
commit16e8e2d8e32bd86f84d9cc028e30b25a5e579e13 (patch)
treeb35dbdedd0343c4526aabfa58cc576fe36da3028
parent634b1ed868611ccfea1b72abf74bcf3311661196 (diff)
validation: Add thread queue test support
-rw-r--r--spec/build/testsuites/validation/libvalidation.yml1
-rw-r--r--testsuites/validation/tx-thread-queue.c584
-rw-r--r--testsuites/validation/tx-thread-queue.h392
3 files changed, 977 insertions, 0 deletions
diff --git a/spec/build/testsuites/validation/libvalidation.yml b/spec/build/testsuites/validation/libvalidation.yml
index 77a36f420b..cd661e16e0 100644
--- a/spec/build/testsuites/validation/libvalidation.yml
+++ b/spec/build/testsuites/validation/libvalidation.yml
@@ -15,5 +15,6 @@ source:
- testsuites/validation/tx-interrupt.c
- testsuites/validation/tx-memory-alloc.c
- testsuites/validation/tx-support.c
+- testsuites/validation/tx-thread-queue.c
target: validation
type: build
diff --git a/testsuites/validation/tx-thread-queue.c b/testsuites/validation/tx-thread-queue.c
new file mode 100644
index 0000000000..870814b126
--- /dev/null
+++ b/testsuites/validation/tx-thread-queue.c
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the implementation of the thread queue test
+ * support.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-thread-queue.h"
+#include "tx-support.h"
+#include "ts-config.h"
+
+#include <rtems/rtems/semimpl.h>
+
+void TQSend(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+ SendEvents( ctx->worker_id[ worker ], events );
+}
+
+void TQSendAndWaitForExecutionStop(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+#if defined( RTEMS_SMP )
+ ctx->event_received[ worker ] = false;
+#endif
+
+ SendEvents( ctx->worker_id[ worker ], events );
+
+#if defined( RTEMS_SMP )
+ while ( !ctx->event_received[ worker ] ) {
+ /* Wait */
+ }
+
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+#endif
+}
+
+void TQWaitForIntendToBlock( const TQContext *ctx, TQWorkerKind worker )
+{
+ const rtems_tcb *thread;
+ Thread_Wait_flags intend_to_block;
+
+ thread = GetThread( ctx->worker_id[ worker ] );
+ T_assert_not_null( thread );
+ intend_to_block = THREAD_WAIT_CLASS_OBJECT |
+ THREAD_WAIT_STATE_INTEND_TO_BLOCK;
+
+ while ( _Thread_Wait_flags_get_acquire( thread ) != intend_to_block ) {
+ /* Wait */
+ }
+}
+
+void TQSendAndWaitForIntendToBlock(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+ TQSend( ctx, worker, events );
+ TQWaitForIntendToBlock( ctx, worker );
+}
+
+void TQClearDone( TQContext *ctx, TQWorkerKind worker )
+{
+ ctx->done[ worker ] = false;
+}
+
+void TQWaitForDone( const TQContext *ctx, TQWorkerKind worker )
+{
+ while ( !ctx->done[ worker ] ) {
+ /* Wait */
+ }
+}
+
+void TQSynchronizeRunner( void )
+{
+ ReceiveAllEvents( TQ_EVENT_RUNNER_SYNC );
+}
+
+void TQSynchronizeRunner2( void )
+{
+ ReceiveAllEvents( TQ_EVENT_RUNNER_SYNC | TQ_EVENT_RUNNER_SYNC_2 );
+}
+
+void TQResetCounter( TQContext *ctx )
+{
+ ctx->counter = 0;
+ memset( &ctx->worker_counter, 0, sizeof( ctx->worker_counter ) );
+}
+
+uint32_t TQGetCounter( const TQContext *ctx )
+{
+ return ctx->counter;
+}
+
+uint32_t TQGetWorkerCounter( const TQContext *ctx, TQWorkerKind worker )
+{
+ return ctx->worker_counter[ worker ];
+}
+
+void TQMutexObtain( const TQContext *ctx, TQMutex mutex )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(
+ ctx->mutex_id[ mutex ],
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc_success( sc );
+}
+
+void TQMutexRelease( const TQContext *ctx, TQMutex mutex )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release( ctx->mutex_id[ mutex ] );
+ T_rsc_success( sc );
+}
+
+void TQSetPriority(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ Priority priority
+)
+{
+ SetPriority( ctx->worker_id[ worker ], priority );
+}
+
+void TQSetScheduler(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_id scheduler_id,
+ Priority priority
+)
+{
+#if defined( RTEMS_SMP )
+ rtems_status_code sc;
+
+ sc = rtems_task_set_scheduler(
+ ctx->worker_id[ worker ],
+ scheduler_id,
+ priority
+ );
+ T_rsc_success( sc );
+#else
+ (void) scheduler_id;
+ SetPriority( ctx->worker_id[ worker ], priority );
+#endif
+}
+
+static void Worker( rtems_task_argument arg, TQWorkerKind worker )
+{
+ TQContext *ctx;
+
+ ctx = (TQContext *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+
+ events = ReceiveAnyEvents();
+ ctx->event_received[ worker ] = true;
+
+ if ( ( events & TQ_EVENT_HELPER_OTHER_SYNC ) != 0 ) {
+ SendEvents( ctx->worker_id[ TQ_HELPER_OTHER ], TQ_EVENT_RUNNER_SYNC );
+ }
+
+ if ( ( events & TQ_EVENT_HELPER_THIRD_SYNC ) != 0 ) {
+ SendEvents( ctx->worker_id[ TQ_HELPER_THIRD ], TQ_EVENT_RUNNER_SYNC );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE_PREPARE ) != 0 ) {
+ TQEnqueuePrepare( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE ) != 0 ) {
+ uint32_t counter;
+
+ TQEnqueue( ctx, ctx->wait );
+
+ counter = ctx->counter;
+ ++counter;
+ ctx->counter = counter;
+ ctx->worker_counter[ worker ] = counter;
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE_DONE ) != 0 ) {
+ TQEnqueueDone( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_SURRENDER ) != 0 ) {
+ TQSurrender( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_A_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_A );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_A_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_A );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_B_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_B );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_B_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_B );
+ }
+
+ if ( ( events & TQ_EVENT_BUSY_WAIT ) != 0 ) {
+ while ( ctx->busy_wait[ worker ] ) {
+ /* Wait */
+ }
+ }
+
+ if ( ( events & TQ_EVENT_RUNNER_SYNC ) != 0 ) {
+ SendEvents( ctx->runner_id, TQ_EVENT_RUNNER_SYNC );
+ }
+
+ if ( ( events & TQ_EVENT_RUNNER_SYNC_2 ) != 0 ) {
+ SendEvents( ctx->runner_id, TQ_EVENT_RUNNER_SYNC_2 );
+ }
+
+ ctx->done[ worker ] = true;
+ }
+}
+
+static void BlockerA( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_A );
+}
+
+static void BlockerB( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_B );
+}
+
+static void BlockerC( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_C );
+}
+
+static void BlockerD( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_D );
+}
+
+static void BlockerE( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_E );
+}
+
+#if defined( RTEMS_SMP )
+static void HelperHome( rtems_task_argument arg )
+{
+ Worker( arg, TQ_HELPER_HOME );
+}
+
+static void HelperOther( rtems_task_argument arg )
+{
+ Worker( arg, TQ_HELPER_OTHER );
+}
+
+static void HelperThird( rtems_task_argument arg )
+{
+ Worker( arg, TQ_HELPER_THIRD );
+}
+#endif
+
+void TQInitialize( TQContext *ctx )
+{
+ rtems_status_code sc;
+ size_t i;
+
+ ctx->runner_id = rtems_task_self();
+ ctx->runner_tcb = GetThread( RTEMS_SELF );
+ SetSelfPriority( PRIO_NORMAL );
+ ctx->worker_id[ TQ_BLOCKER_A ] = CreateTask( "BLKA", PRIO_HIGH );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_A ], BlockerA, ctx );
+ ctx->worker_id[ TQ_BLOCKER_B ] = CreateTask( "BLKB", PRIO_VERY_HIGH );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_B ], BlockerB, ctx );
+ ctx->worker_id[ TQ_BLOCKER_C ] = CreateTask( "BLKC", PRIO_ULTRA_HIGH );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_C ], BlockerC, ctx );
+ ctx->worker_id[ TQ_BLOCKER_D ] = CreateTask( "BLKD", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_D ], BlockerD, ctx );
+ ctx->worker_id[ TQ_BLOCKER_E ] = CreateTask( "BLKE", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_E ], BlockerE, ctx );
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->mutex_id ); ++i ) {
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'T', 'X', 'A' + i ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
+ 0,
+ &ctx->mutex_id[ i ]
+ );
+ T_rsc_success( sc );
+ }
+
+ sc = rtems_task_get_scheduler( RTEMS_SELF, &ctx->runner_scheduler_id );
+ T_rsc_success( sc );
+
+#if defined( RTEMS_SMP )
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_B_NAME,
+ &ctx->other_scheduler_id
+ );
+ T_rsc_success( sc );
+
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_C_NAME,
+ &ctx->third_scheduler_id
+ );
+ T_rsc_success( sc );
+
+ ctx->worker_id[ TQ_HELPER_HOME ] = CreateTask( "HLPH", PRIO_VERY_ULTRA_HIGH );
+ StartTask( ctx->worker_id[ TQ_HELPER_HOME ], HelperHome, ctx );
+
+ ctx->worker_id[ TQ_HELPER_OTHER ] = CreateTask( "HLPO", PRIO_VERY_LOW );
+ StartTask( ctx->worker_id[ TQ_HELPER_OTHER ], HelperOther, ctx );
+
+ TQSetScheduler(
+ ctx,
+ TQ_HELPER_OTHER,
+ ctx->other_scheduler_id,
+ PRIO_VERY_LOW
+ );
+
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ ctx->worker_id[ TQ_HELPER_THIRD ] = CreateTask( "HLPT", PRIO_VERY_LOW );
+ StartTask( ctx->worker_id[ TQ_HELPER_THIRD ], HelperThird, ctx );
+
+ TQSetScheduler(
+ ctx,
+ TQ_HELPER_THIRD,
+ ctx->third_scheduler_id,
+ PRIO_VERY_LOW
+ );
+ }
+#endif
+
+ for (i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_tcb ); ++i) {
+ ctx->worker_tcb[ i ] = GetThread( ctx->worker_id[ i ] );
+ }
+}
+
+void TQDestroy( TQContext *ctx )
+{
+ size_t i;
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
+ DeleteTask( ctx->worker_id[ i ] );
+ }
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->mutex_id ); ++i ) {
+ if ( ctx->mutex_id[ i ] != 0 ) {
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_delete( ctx->mutex_id[ i ] );
+ T_rsc_success( sc );
+ }
+ }
+
+ RestoreRunnerPriority();
+}
+
+void TQReset( TQContext *ctx )
+{
+ rtems_id scheduler_id;
+
+ scheduler_id = ctx->runner_scheduler_id;
+ TQSetScheduler( ctx, TQ_BLOCKER_A, scheduler_id, PRIO_HIGH );
+ TQSetScheduler( ctx, TQ_BLOCKER_B, scheduler_id, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx, TQ_BLOCKER_C, scheduler_id, PRIO_ULTRA_HIGH );
+ TQSetScheduler( ctx, TQ_BLOCKER_D, scheduler_id, PRIO_LOW );
+ TQSetScheduler( ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
+}
+
+void TQGetProperties( TQContext *ctx, TQWorkerKind enqueued_worker )
+{
+ ( *ctx->get_properties )( ctx, enqueued_worker );
+}
+
+Status_Control TQConvertStatus( TQContext *ctx, Status_Control status )
+{
+ return ( *ctx->convert_status )( status );
+}
+
+void TQEnqueuePrepare( TQContext *ctx )
+{
+ ( *ctx->enqueue_prepare )( ctx, ctx->how_many );
+}
+
+Status_Control TQEnqueue( TQContext *ctx, TQWait wait )
+{
+ return ( *ctx->enqueue )( ctx, wait );
+}
+
+void TQEnqueueDone( TQContext *ctx )
+{
+ ( *ctx->enqueue_done )( ctx );
+}
+
+void TQSurrender( TQContext *ctx )
+{
+ ( *ctx->surrender )( ctx );
+}
+
+void TQSchedulerRecordStart( TQContext *ctx )
+{
+ T_scheduler_log *log;
+
+ log = T_scheduler_record_40( &ctx->scheduler_log );
+ T_null( log );
+}
+
+void TQSchedulerRecordStop( TQContext *ctx )
+{
+ T_scheduler_log *log;
+
+ log = T_scheduler_record( NULL );
+ T_eq_ptr( &log->header, &ctx->scheduler_log.header );
+}
+
+const T_scheduler_event *TQGetNextBlock( TQContext *ctx, size_t *index )
+{
+ return T_scheduler_next(
+ &ctx->scheduler_log.header,
+ T_SCHEDULER_BLOCK,
+ index
+ );
+}
+
+const T_scheduler_event *TQGetNextUnblock( TQContext *ctx, size_t *index )
+{
+ return T_scheduler_next(
+ &ctx->scheduler_log.header,
+ T_SCHEDULER_UNBLOCK,
+ index
+ );
+}
+
+Status_Control TQConvertStatusClassic( Status_Control status )
+{
+ return STATUS_BUILD( STATUS_GET_CLASSIC( status ), 0 );
+}
+
+void TQEnqueuePrepareClassicSem( TQContext *ctx, uint32_t how_many )
+{
+ Status_Control status;
+
+ (void) how_many;
+
+ status = TQEnqueue( ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
+}
+
+Status_Control TQEnqueueClassicSem( TQContext *ctx, TQWait wait )
+{
+ rtems_status_code sc;
+ rtems_option option;
+ rtems_option timeout;
+
+ switch ( wait ) {
+ case TQ_WAIT_FOREVER:
+ option = RTEMS_WAIT;
+ timeout = RTEMS_NO_TIMEOUT;
+ break;
+ case TQ_WAIT_TICKS:
+ option = RTEMS_WAIT;
+ timeout = UINT32_MAX;
+ break;
+ default:
+ option = RTEMS_NO_WAIT;
+ timeout = 0;
+ break;
+ }
+
+ sc = rtems_semaphore_obtain( ctx->thread_queue_id, option, timeout );
+
+ return STATUS_BUILD( sc, 0 );
+}
+
+void TQSurrenderClassicSem( TQContext *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release( ctx->thread_queue_id );
+ T_rsc_success( sc );
+}
+
+uint32_t TQSemGetCount( TQSemContext *ctx )
+{
+ return ( *ctx->get_count )( ctx );
+}
+
+void TQSemSetCount( TQSemContext *ctx, uint32_t count )
+{
+ ( *ctx->set_count )( ctx, count );
+}
+
+uint32_t TQSemGetCountClassic( TQSemContext *ctx )
+{
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+ uint32_t count;
+
+ semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
+ T_assert_not_null( semaphore );
+ count = semaphore->Core_control.Semaphore.count;
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+
+ return count;
+}
+
+void TQSemSetCountClassic( TQSemContext *ctx, uint32_t count )
+{
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+
+ semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
+ T_assert_not_null( semaphore );
+ semaphore->Core_control.Semaphore.count = count;
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+}
+
+rtems_tcb *TQMtxGetOwner( TQMtxContext *ctx )
+{
+ return ( *ctx->get_owner )( ctx );
+}
+
+rtems_tcb *TQMtxGetOwnerClassic( TQMtxContext *ctx )
+{
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+ rtems_tcb *thread;
+
+ semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
+ T_assert_not_null( semaphore );
+ thread = semaphore->Core_control.Wait_queue.Queue.owner;
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+
+ return thread;
+}
diff --git a/testsuites/validation/tx-thread-queue.h b/testsuites/validation/tx-thread-queue.h
new file mode 100644
index 0000000000..73ad49e6e2
--- /dev/null
+++ b/testsuites/validation/tx-thread-queue.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This header file provides the functions to test the
+ * @ref RTEMSScoreThreadQueue.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TX_THREAD_QUEUE_H
+#define _TX_THREAD_QUEUE_H
+
+#include "tx-support.h"
+
+#include <rtems/test-scheduler.h>
+#include <rtems/score/status.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestSuites
+ *
+ * @{
+ */
+
+typedef enum {
+ TQ_BLOCKER_A,
+ TQ_BLOCKER_B,
+ TQ_BLOCKER_C,
+ TQ_BLOCKER_D,
+ TQ_BLOCKER_E,
+ TQ_HELPER_HOME,
+ TQ_HELPER_OTHER,
+ TQ_HELPER_THIRD,
+ TQ_WORKER_COUNT
+} TQWorkerKind;
+
+typedef enum {
+ TQ_MUTEX_A,
+ TQ_MUTEX_B,
+ TQ_MUTEX_COUNT
+} TQMutex;
+
+typedef enum {
+ TQ_FIFO,
+ TQ_PRIORITY
+} TQDiscipline;
+
+typedef enum {
+ TQ_NO_WAIT,
+ TQ_WAIT_FOREVER,
+ TQ_WAIT_TICKS
+} TQWait;
+
+typedef enum {
+ TQ_EVENT_ENQUEUE_PREPARE = RTEMS_EVENT_0,
+ TQ_EVENT_ENQUEUE = RTEMS_EVENT_1,
+ TQ_EVENT_ENQUEUE_DONE = RTEMS_EVENT_2,
+ TQ_EVENT_SURRENDER = RTEMS_EVENT_3,
+ TQ_EVENT_RUNNER_SYNC = RTEMS_EVENT_4,
+ TQ_EVENT_RUNNER_SYNC_2 = RTEMS_EVENT_5,
+ TQ_EVENT_HELPER_OTHER_SYNC = RTEMS_EVENT_6,
+ TQ_EVENT_HELPER_THIRD_SYNC = RTEMS_EVENT_7,
+ TQ_EVENT_MUTEX_A_OBTAIN = RTEMS_EVENT_8,
+ TQ_EVENT_MUTEX_A_RELEASE = RTEMS_EVENT_9,
+ TQ_EVENT_MUTEX_B_OBTAIN = RTEMS_EVENT_10,
+ TQ_EVENT_MUTEX_B_RELEASE = RTEMS_EVENT_11,
+ TQ_EVENT_BUSY_WAIT = RTEMS_EVENT_12
+} TQEvent;
+
+typedef enum {
+ TQ_ENQUEUE_BLOCKS,
+ TQ_ENQUEUE_STICKY
+} TQEnqueueVariant;
+
+typedef struct TQContext {
+ /**
+ * @brief This member defines the thread queue discipline.
+ */
+ TQDiscipline discipline;
+
+ /**
+ * @brief This member defines the enqueue wait behaviour.
+ *
+ * If TQ_NO_WAIT is used, then there no thread queue enqueue shall be
+ * performed.
+ */
+ TQWait wait;
+
+ /**
+ * @brief This member defines the enqueue variant.
+ */
+ TQEnqueueVariant enqueue_variant;
+
+ /**
+ * @brief This member contains the identifier of the runner home scheduler.
+ */
+ rtems_id runner_scheduler_id;
+
+ /**
+ * @brief This member contains the identifier of another scheduler.
+ */
+ rtems_id other_scheduler_id;
+
+ /**
+ * @brief This member contains the identifier of a third scheduler.
+ */
+ rtems_id third_scheduler_id;
+
+ /**
+ * @brief This member contains the runner task identifier.
+ */
+ rtems_id runner_id;
+
+ /**
+ * @brief This member contains a reference to the runner task control block.
+ */
+ rtems_tcb *runner_tcb;
+
+ /**
+ * @brief This member contains the worker task identifiers.
+ */
+ rtems_id worker_id[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief This member contains references to the worker task control
+ * blocks.
+ */
+ rtems_tcb *worker_tcb[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief When a worker received an event, the corresponding element shall be
+ * set to true.
+ */
+ volatile bool event_received[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief If this member is true, then the worker shall busy wait on request.
+ */
+ volatile bool busy_wait[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief When a worker is done processing its current event set, the
+ * corresponding element shall be set to true.
+ */
+ volatile bool done[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief This member provides the counter used for the worker counters.
+ */
+ uint32_t counter;
+
+ /**
+ * @brief When a worker returned from TQEnqueue() the counter is incremented
+ * and stored in this member.
+ */
+ uint32_t worker_counter[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief This member contains the identifier of an object providing the
+ * thread queue under test.
+ */
+ rtems_id thread_queue_id;
+
+ /**
+ * @brief This member contains the identifier of priority inheritance
+ * mutexes.
+ */
+ rtems_id mutex_id[ TQ_MUTEX_COUNT ];
+
+ /**
+ * @brief This member provides the scheduler log.
+ */
+ T_scheduler_log_40 scheduler_log;
+
+ /**
+ * @brief This member provides the get properties handler.
+ */
+ void ( *get_properties )( struct TQContext *, TQWorkerKind );
+
+ /**
+ * @brief This member provides the status convert handler.
+ */
+ Status_Control ( *convert_status )( Status_Control );
+
+ /**
+ * @brief This this member specifies how many threads shall be enqueued.
+ */
+ uint32_t how_many;
+
+ /**
+ * @brief This member provides the thread queue enqueue prepare handler.
+ */
+ void ( *enqueue_prepare )( struct TQContext *, uint32_t );
+
+ /**
+ * @brief This member provides the thread queue enqueue handler.
+ */
+ Status_Control ( *enqueue )( struct TQContext *, TQWait );
+
+ /**
+ * @brief This member provides the thread queue enqueue done handler.
+ */
+ void ( *enqueue_done )( struct TQContext * );
+
+ /**
+ * @brief This member provides the thread queue surrender handler.
+ */
+ void ( *surrender )( struct TQContext * );
+} TQContext;
+
+void TQSend(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQSendAndWaitForExecutionStop(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQWaitForIntendToBlock( const TQContext *ctx, TQWorkerKind worker );
+
+void TQSendAndWaitForIntendToBlock(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQClearDone( TQContext *ctx, TQWorkerKind worker );
+
+void TQWaitForDone( const TQContext *ctx, TQWorkerKind worker );
+
+void TQSynchronizeRunner( void );
+
+void TQSynchronizeRunner2( void );
+
+void TQResetCounter( TQContext *ctx );
+
+uint32_t TQGetCounter( const TQContext *ctx );
+
+uint32_t TQGetWorkerCounter( const TQContext *ctx, TQWorkerKind worker );
+
+void TQMutexObtain( const TQContext *ctx, TQMutex mutex );
+
+void TQMutexRelease( const TQContext *ctx, TQMutex mutex );
+
+void TQSetPriority(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ Priority priority
+);
+
+void TQSetScheduler(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_id scheduler_id,
+ Priority priority
+);
+
+void TQInitialize( TQContext *ctx );
+
+void TQDestroy( TQContext *ctx );
+
+void TQReset( TQContext *ctx );
+
+void TQGetProperties( TQContext *ctx, TQWorkerKind enqueued_worker );
+
+Status_Control TQConvertStatus( TQContext *ctx, Status_Control status );
+
+void TQEnqueuePrepare( TQContext *ctx );
+
+Status_Control TQEnqueue( TQContext *ctx, TQWait wait );
+
+void TQEnqueueDone( TQContext *ctx );
+
+void TQSurrender( TQContext *ctx );
+
+void TQSchedulerRecordStart( TQContext *ctx );
+
+void TQSchedulerRecordStop( TQContext *ctx );
+
+const T_scheduler_event *TQGetNextBlock( TQContext *ctx, size_t *index );
+
+const T_scheduler_event *TQGetNextUnblock( TQContext *ctx, size_t *index );
+
+Status_Control TQConvertStatusClassic( Status_Control status );
+
+void TQEnqueuePrepareClassicSem( TQContext *ctx, uint32_t how_many );
+
+Status_Control TQEnqueueClassicSem( TQContext *ctx, TQWait wait );
+
+void TQSurrenderClassicSem( TQContext *ctx );
+
+typedef struct TQSemContext {
+ /**
+ * @brief This member contains the base thread queue test context.
+ */
+ TQContext base;
+
+ /**
+ * @brief This member provides the semaphore get count handler.
+ */
+ uint32_t ( *get_count )( struct TQSemContext * );
+
+ /**
+ * @brief This member provides the semaphore set count handler.
+ */
+ void ( *set_count )( struct TQSemContext *, uint32_t );
+} TQSemContext;
+
+uint32_t TQSemGetCount( TQSemContext *ctx );
+
+void TQSemSetCount( TQSemContext *ctx, uint32_t count );
+
+uint32_t TQSemGetCountClassic( TQSemContext *ctx );
+
+void TQSemSetCountClassic( TQSemContext *ctx, uint32_t count );
+
+typedef enum {
+ TQ_MTX_RECURSIVE_YES,
+ TQ_MTX_RECURSIVE_NO_STATUS,
+ TQ_MTX_RECURSIVE_NO_FATAL
+} TQMtxRecursive;
+
+typedef struct TQMtxContext {
+ /**
+ * @brief This member contains the base thread queue test context.
+ */
+ TQContext base;
+
+ /**
+ * @brief This member defines the recursive seize behaviour.
+ */
+ TQMtxRecursive recursive;
+
+ /**
+ * @brief This member defines the priority ceiling of the mutex.
+ *
+ * Use PRIO_INVALID to indicate that the mutex does not provide a priority
+ * ceiling.
+ */
+ rtems_task_priority priority_ceiling;
+
+ /**
+ * @brief This member provides the mutex get owner handler.
+ */
+ rtems_tcb *( *get_owner )( struct TQMtxContext * );
+} TQMtxContext;
+
+rtems_tcb *TQMtxGetOwner( TQMtxContext *ctx );
+
+rtems_tcb *TQMtxGetOwnerClassic( TQMtxContext *ctx );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TX_THREAD_QUEUE_H */