summaryrefslogtreecommitdiffstats
path: root/testsuites/validation
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-09 15:49:53 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-03-24 11:10:48 +0100
commit40cc4f5009dd248d803ea25a0d1277951e9abb0b (patch)
tree34aa5a9d9354e1124ffc7d09b2f2e40820a0e47c /testsuites/validation
parentlibtest: Increase line length (diff)
downloadrtems-40cc4f5009dd248d803ea25a0d1277951e9abb0b.tar.bz2
validation: Improve support library
Update #3716.
Diffstat (limited to 'testsuites/validation')
-rw-r--r--testsuites/validation/tc-intr-entry-install.c2
-rw-r--r--testsuites/validation/tc-intr-entry-remove.c2
-rw-r--r--testsuites/validation/tc-intr-handler-iterate.c2
-rw-r--r--testsuites/validation/ts-config.h126
-rw-r--r--testsuites/validation/tx-interrupt.c38
-rw-r--r--testsuites/validation/tx-io-relax.c67
-rw-r--r--testsuites/validation/tx-memory-alloc.c79
-rw-r--r--testsuites/validation/tx-preemption-intervention.c155
-rw-r--r--testsuites/validation/tx-support.c889
-rw-r--r--testsuites/validation/tx-support.h470
-rw-r--r--testsuites/validation/tx-thread-queue.c854
-rw-r--r--testsuites/validation/tx-thread-queue.h525
-rw-r--r--testsuites/validation/tx-timecounter.c160
-rw-r--r--testsuites/validation/tx-timer-server.c150
-rw-r--r--testsuites/validation/tx-wrap-thread-queue.c144
15 files changed, 3637 insertions, 26 deletions
diff --git a/testsuites/validation/tc-intr-entry-install.c b/testsuites/validation/tc-intr-entry-install.c
index 37583249aa..f5280c1e73 100644
--- a/testsuites/validation/tc-intr-entry-install.c
+++ b/testsuites/validation/tc-intr-entry-install.c
@@ -1041,7 +1041,7 @@ static void RtemsIntrReqEntryInstall_Setup(
rtems_status_code sc;
ctx->initialized_during_setup = bsp_interrupt_is_initialized();
- ctx->test_vector = GetTestableInterruptVector();
+ ctx->test_vector = GetTestableInterruptVector( NULL );
sc = rtems_interrupt_get_attributes( ctx->test_vector, &ctx->attributes );
T_rsc_success( sc );
}
diff --git a/testsuites/validation/tc-intr-entry-remove.c b/testsuites/validation/tc-intr-entry-remove.c
index 65cbbb28d3..ad07464de6 100644
--- a/testsuites/validation/tc-intr-entry-remove.c
+++ b/testsuites/validation/tc-intr-entry-remove.c
@@ -1081,7 +1081,7 @@ static void RtemsIntrReqEntryRemove_Setup(
rtems_status_code sc;
ctx->initialized_during_setup = bsp_interrupt_is_initialized();
- ctx->test_vector = GetTestableInterruptVector();
+ ctx->test_vector = GetTestableInterruptVector( NULL );
sc = rtems_interrupt_get_attributes( ctx->test_vector, &ctx->attributes );
T_rsc_success( sc );
}
diff --git a/testsuites/validation/tc-intr-handler-iterate.c b/testsuites/validation/tc-intr-handler-iterate.c
index 2014e829bf..e643eb5e50 100644
--- a/testsuites/validation/tc-intr-handler-iterate.c
+++ b/testsuites/validation/tc-intr-handler-iterate.c
@@ -505,7 +505,7 @@ static void RtemsIntrReqHandlerIterate_Setup(
rtems_status_code sc;
ctx->initialized_during_setup = bsp_interrupt_is_initialized();
- ctx->test_vector = GetTestableInterruptVector();
+ ctx->test_vector = GetTestableInterruptVector( NULL );
rtems_interrupt_entry_initialize(
&ctx->entry,
EntryRoutine,
diff --git a/testsuites/validation/ts-config.h b/testsuites/validation/ts-config.h
new file mode 100644
index 0000000000..ecdd3b8f8d
--- /dev/null
+++ b/testsuites/validation/ts-config.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This header file provides the constants used by the test suite
+ * configuration.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TS_CONFIG_H
+#define _TS_CONFIG_H
+
+#include <rtems.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestSuites
+ *
+ * @{
+ */
+
+#define TEST_MICROSECONDS_PER_TICK 1000
+
+#define TEST_RUNNER_NAME rtems_build_name( 'R', 'U', 'N', ' ' )
+
+#define TEST_RUNNER_ARGUMENT 123456789
+
+#define TEST_RUNNER_INITIAL_MODES RTEMS_NO_ASR
+
+#define TEST_SCHEDULER_A_NAME rtems_build_name( 'A', ' ', ' ', ' ' )
+
+#define TEST_SCHEDULER_B_NAME rtems_build_name( 'B', ' ', ' ', ' ' )
+
+#define TEST_SCHEDULER_C_NAME rtems_build_name( 'C', ' ', ' ', ' ' )
+
+#define TEST_SCHEDULER_D_NAME rtems_build_name( 'D', ' ', ' ', ' ' )
+
+#if defined( __OPTIMIZE__ ) && !defined( RTEMS_COVERAGE )
+#define TEST_BASE_STACK_SIZE RTEMS_MINIMUM_STACK_SIZE
+#else
+#define TEST_BASE_STACK_SIZE ( 4 * RTEMS_MINIMUM_STACK_SIZE )
+#endif
+
+#define TEST_MAXIMUM_TLS_SIZE \
+ RTEMS_ALIGN_UP( 64, RTEMS_TASK_STORAGE_ALIGNMENT )
+
+#define TEST_MINIMUM_STACK_SIZE \
+ ( TEST_BASE_STACK_SIZE + CPU_STACK_ALIGNMENT )
+
+#define TEST_IDLE_STACK_SIZE \
+ ( TEST_BASE_STACK_SIZE + 2 * CPU_STACK_ALIGNMENT )
+
+#define TEST_INTERRUPT_STACK_SIZE \
+ ( TEST_BASE_STACK_SIZE + 4 * CPU_INTERRUPT_STACK_ALIGNMENT )
+
+#define TEST_MAXIMUM_BARRIERS 7
+
+#define TEST_MAXIMUM_MESSAGE_QUEUES 3
+
+#define TEST_MAXIMUM_PARTITIONS 4
+
+#define TEST_MAXIMUM_PERIODS 2
+
+#define TEST_MAXIMUM_SEMAPHORES 7
+
+#define TEST_MAXIMUM_TASKS 32
+
+#define TEST_MAXIMUM_TIMERS 10
+
+#define TEST_MAXIMUM_USER_EXTENSIONS 5
+
+/*
+ * Use at least two so that the CPU time budget decrement in
+ * _Scheduler_default_Tick() does not always result in a zero.
+ */
+#define TEST_TICKS_PER_TIMESLICE 2
+
+void *test_task_stack_allocate( size_t size );
+
+void test_task_stack_deallocate( void *stack );
+
+void *test_idle_task_stack_allocate( uint32_t cpu_index, size_t size );
+
+extern rtems_task_argument test_runner_argument;
+
+extern rtems_task_priority test_runner_initial_priority;
+
+extern rtems_mode test_runner_initial_modes;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TS_CONFIG_H */
diff --git a/testsuites/validation/tx-interrupt.c b/testsuites/validation/tx-interrupt.c
index e75c7a2aa0..16f1e18c47 100644
--- a/testsuites/validation/tx-interrupt.c
+++ b/testsuites/validation/tx-interrupt.c
@@ -45,6 +45,30 @@
#include <bsp/irq-generic.h>
+static bool HasRequiredAttributes(
+ const rtems_interrupt_attributes *required,
+ const rtems_interrupt_attributes *actual
+)
+{
+ if ( required == NULL ) {
+ return true;
+ }
+
+ if ( required->can_get_affinity && !actual->can_get_affinity ) {
+ return false;
+ }
+
+ if ( required->can_raise && !actual->can_raise ) {
+ return false;
+ }
+
+ if ( required->can_raise_on && !actual->can_raise_on ) {
+ return false;
+ }
+
+ return true;
+}
+
rtems_vector_number GetValidInterruptVectorNumber(
const rtems_interrupt_attributes *required
)
@@ -57,11 +81,7 @@ rtems_vector_number GetValidInterruptVectorNumber(
sc = rtems_interrupt_get_attributes( vector, &attr );
- if (
- sc == RTEMS_SUCCESSFUL &&
- ( required == NULL ||
- !required->can_get_affinity || attr.can_get_affinity )
- ) {
+ if ( sc == RTEMS_SUCCESSFUL && HasRequiredAttributes( required, &attr ) ) {
break;
}
}
@@ -69,7 +89,9 @@ rtems_vector_number GetValidInterruptVectorNumber(
return vector;
}
-rtems_vector_number GetTestableInterruptVector( void )
+rtems_vector_number GetTestableInterruptVector(
+ const rtems_interrupt_attributes *required
+)
{
rtems_vector_number vector;
@@ -87,6 +109,10 @@ rtems_vector_number GetTestableInterruptVector( void )
continue;
}
+ if ( !HasRequiredAttributes( required, &attr ) ) {
+ continue;
+ }
+
if ( HasInterruptVectorEntriesInstalled( vector ) ) {
continue;
}
diff --git a/testsuites/validation/tx-io-relax.c b/testsuites/validation/tx-io-relax.c
new file mode 100644
index 0000000000..1930c66b19
--- /dev/null
+++ b/testsuites/validation/tx-io-relax.c
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the implementation of SetIORelaxHandler().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-support.h"
+
+void __real__IO_Relax( void );
+
+void __wrap__IO_Relax( void );
+
+static void ( *io_relax_handler )( void * );
+
+static void *io_relax_arg;
+
+void __wrap__IO_Relax( void )
+{
+ void ( *handler )( void * );
+
+ handler = io_relax_handler;
+
+ if ( handler != NULL ) {
+ ( *handler )( io_relax_arg );
+ }
+
+ __real__IO_Relax();
+}
+
+void SetIORelaxHandler( void ( *handler )( void * ), void *arg )
+{
+ io_relax_handler = handler;
+ io_relax_arg = arg;
+}
diff --git a/testsuites/validation/tx-memory-alloc.c b/testsuites/validation/tx-memory-alloc.c
new file mode 100644
index 0000000000..82b79bc558
--- /dev/null
+++ b/testsuites/validation/tx-memory-alloc.c
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the implementation of
+ * MemoryAllocationFailWhen() and __wrap_rtems_malloc().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-support.h"
+
+void *__real_rtems_malloc( size_t );
+
+void *__wrap_rtems_malloc( size_t );
+
+static uint32_t fail_when_counter;
+
+void MemoryAllocationFailWhen( uint32_t counter )
+{
+ fail_when_counter = counter;
+}
+
+static bool IsFail( void )
+{
+ uint32_t counter;
+
+ counter = fail_when_counter;
+
+ if ( counter == 1 ) {
+ fail_when_counter = 0;
+ return true;
+ }
+
+ if ( counter > 1 ) {
+ fail_when_counter = counter - 1;
+ }
+
+ return false;
+}
+
+void *__wrap_rtems_malloc( size_t size )
+{
+ if ( IsFail() ) {
+ return NULL;
+ }
+
+ return __real_rtems_malloc( size );
+}
diff --git a/testsuites/validation/tx-preemption-intervention.c b/testsuites/validation/tx-preemption-intervention.c
new file mode 100644
index 0000000000..612190567e
--- /dev/null
+++ b/testsuites/validation/tx-preemption-intervention.c
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the implementation of
+ * SetPreemptionIntervention().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-support.h"
+
+#include <rtems/sysinit.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/threadimpl.h>
+
+#include <rtems/test.h>
+
+#if defined(RTEMS_SMP)
+typedef struct {
+ void ( *handler )( void * );
+ void *arg;
+ Scheduler_Context scheduler_context;
+ Scheduler_Node scheduler_node;
+ Thread_Control thread;
+} PreemptionInterventionContext;
+
+static PreemptionInterventionContext preemption_intervention_instance;
+
+static bool PreemptionInterventionAskForHelp(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+)
+{
+ PreemptionInterventionContext *ctx;
+ void ( *handler )( void * );
+ void *arg;
+
+ (void) scheduler;
+ (void) thread;
+ (void) node;
+
+ ctx = &preemption_intervention_instance;
+ handler = ctx->handler;
+ arg = ctx->arg;
+ ctx->handler = NULL;
+ ctx->arg = NULL;
+ ( *handler )( arg );
+
+ return true;
+}
+
+static const Scheduler_Control preemption_intervention_scheduler = {
+ .context = &preemption_intervention_instance.scheduler_context,
+ .Operations = {
+ .ask_for_help = PreemptionInterventionAskForHelp
+ }
+};
+
+static void PreemptionInterventionInitialize( void )
+{
+ PreemptionInterventionContext *ctx;
+
+ ctx = &preemption_intervention_instance;
+
+ _Chain_Initialize_node( &ctx->thread.Scheduler.Help_node );
+ _Thread_queue_Initialize(
+ &ctx->thread.Join_queue,
+ "Preemption Intervention"
+ );
+ _ISR_lock_Initialize(
+ &ctx->scheduler_context.Lock,
+ "Preemption Intervention"
+ );
+ _Scheduler_Node_do_initialize(
+ &preemption_intervention_scheduler,
+ &ctx->scheduler_node,
+ &ctx->thread,
+ 0
+ );
+ _Chain_Initialize_one(
+ &ctx->thread.Scheduler.Scheduler_nodes,
+ &ctx->scheduler_node.Thread.Scheduler_node.Chain
+ );
+}
+
+RTEMS_SYSINIT_ITEM(
+ PreemptionInterventionInitialize,
+ RTEMS_SYSINIT_DEVICE_DRIVERS,
+ RTEMS_SYSINIT_ORDER_MIDDLE
+);
+#endif
+
+void SetPreemptionIntervention(
+ struct Per_CPU_Control *cpu,
+ void ( *handler )( void * ),
+ void *arg
+)
+{
+#if defined(RTEMS_SMP)
+ PreemptionInterventionContext *ctx;
+ rtems_interrupt_level level;
+ ISR_lock_Context lock_context;
+
+ ctx = &preemption_intervention_instance;
+ T_quiet_assert_null( ctx->handler );
+ ctx->handler = handler;
+ ctx->arg = arg;
+
+ rtems_interrupt_local_disable( level );
+ _Per_CPU_Acquire( cpu, &lock_context );
+ _Chain_Append_unprotected(
+ &cpu->Threads_in_need_for_help,
+ &ctx->thread.Scheduler.Help_node
+ );
+ _Per_CPU_Release( cpu, &lock_context );
+ rtems_interrupt_local_enable( level );
+#else
+ (void) cpu;
+ (void) handler;
+ (void) arg;
+#endif
+}
diff --git a/testsuites/validation/tx-support.c b/testsuites/validation/tx-support.c
index dcb7603832..b09d3cc3cd 100644
--- a/testsuites/validation/tx-support.c
+++ b/testsuites/validation/tx-support.c
@@ -5,11 +5,8 @@
*
* @ingroup RTEMSTestSuites
*
- * @brief This source file contains the definition of DeleteTask(),
- * DoCreateTask(), GetMode(), GetPriority(), GetSelfPriority(),
- * ReceiveAnyEvents(), RestoreRunnerASR(), RestoreRunnerMode(),
- * RestoreRunnerPriority(), SendEvents(), SetMode(), SetSelfPriority(),
- * SetPriority(), and StartTask().
+ * @brief This source file contains the implementation of support functions for
+ * the validation test cases.
*/
/*
@@ -42,8 +39,16 @@
#endif
#include "tx-support.h"
+#include "ts-config.h"
#include <rtems/test.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/threaddispatch.h>
+#include <rtems/score/threadimpl.h>
+#include <rtems/rtems/semimpl.h>
+
+#include <string.h>
rtems_id DoCreateTask( rtems_name name, rtems_task_priority priority )
{
@@ -53,7 +58,7 @@ rtems_id DoCreateTask( rtems_name name, rtems_task_priority priority )
sc = rtems_task_create(
name,
priority,
- RTEMS_MINIMUM_STACK_SIZE,
+ TEST_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&id
@@ -77,33 +82,115 @@ void DeleteTask( rtems_id id )
rtems_status_code sc;
sc = rtems_task_delete( id );
- T_rsc_success( sc );
+ T_quiet_rsc_success( sc );
}
}
-rtems_event_set ReceiveAnyEvents( void )
+void SuspendTask( rtems_id id )
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_suspend( id );
+ T_quiet_rsc_success( sc );
+}
+
+void SuspendSelf( void )
+{
+ SuspendTask( RTEMS_SELF );
+}
+
+void ResumeTask( rtems_id id )
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_resume( id );
+ T_quiet_rsc_success( sc );
+}
+
+bool IsTaskSuspended( rtems_id id )
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_is_suspended( id );
+ T_quiet_true( sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED );
+
+ return sc == RTEMS_ALREADY_SUSPENDED;
+}
+
+rtems_event_set QueryPendingEvents( void )
{
rtems_status_code sc;
rtems_event_set events;
events = 0;
sc = rtems_event_receive(
+ RTEMS_PENDING_EVENTS,
+ RTEMS_EVENT_ALL | RTEMS_NO_WAIT,
+ 0,
+ &events
+ );
+ T_quiet_rsc_success( sc );
+
+ return events;
+}
+
+rtems_event_set PollAnyEvents( void )
+{
+ rtems_event_set events;
+
+ events = 0;
+ (void) rtems_event_receive(
+ RTEMS_ALL_EVENTS,
+ RTEMS_EVENT_ANY | RTEMS_NO_WAIT,
+ 0,
+ &events
+ );
+
+ return events;
+}
+
+rtems_event_set ReceiveAnyEvents( void )
+{
+ return ReceiveAnyEventsTimed( RTEMS_NO_TIMEOUT );
+}
+
+rtems_event_set ReceiveAnyEventsTimed( rtems_interval ticks )
+{
+ rtems_event_set events;
+
+ events = 0;
+ (void) rtems_event_receive(
RTEMS_ALL_EVENTS,
RTEMS_EVENT_ANY | RTEMS_WAIT,
- RTEMS_NO_TIMEOUT,
+ ticks,
&events
);
- T_rsc_success( sc );
return events;
}
+void ReceiveAllEvents( rtems_event_set events )
+{
+ rtems_status_code sc;
+ rtems_event_set received;
+
+ received = 0;
+ sc = rtems_event_receive(
+ events,
+ RTEMS_EVENT_ALL | RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT,
+ &received
+ );
+ T_quiet_rsc_success( sc );
+ T_quiet_eq_u32( received, events );
+}
+
void SendEvents( rtems_id id, rtems_event_set events )
{
rtems_status_code sc;
sc = rtems_event_send( id, events );
- T_rsc_success( sc );
+ T_quiet_rsc_success( sc );
}
rtems_mode GetMode( void )
@@ -117,7 +204,7 @@ rtems_mode SetMode( rtems_mode set, rtems_mode mask )
rtems_mode previous;
sc = rtems_task_mode( set, mask, &previous );
- T_rsc_success( sc );
+ T_quiet_rsc_success( sc );
return previous;
}
@@ -127,13 +214,32 @@ rtems_task_priority GetPriority( rtems_id id )
return SetPriority( id, RTEMS_CURRENT_PRIORITY );
}
+rtems_task_priority GetPriorityByScheduler(
+ rtems_id task_id,
+ rtems_id scheduler_id
+)
+{
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ priority = PRIO_INVALID;
+ sc = rtems_task_get_priority( task_id, scheduler_id, &priority );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ return PRIO_INVALID;
+ }
+
+ return priority;
+}
+
rtems_task_priority SetPriority( rtems_id id, rtems_task_priority priority )
{
rtems_status_code sc;
rtems_task_priority previous;
+ previous = PRIO_INVALID;
sc = rtems_task_set_priority( id, priority, &previous );
- T_rsc_success( sc );
+ T_quiet_rsc_success( sc );
return previous;
}
@@ -148,12 +254,293 @@ rtems_task_priority SetSelfPriority( rtems_task_priority priority )
return SetPriority( RTEMS_SELF, priority );
}
+rtems_task_priority SetSelfPriorityNoYield( rtems_task_priority priority )
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ /*
+ * If the priority is lowered, then this sequence ensures that we do not
+ * carry out an implicit yield.
+ */
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'T', 'E', 'M', 'P' ),
+ 0,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING,
+ 1,
+ &id
+ );
+ T_quiet_rsc_success( sc );
+
+ priority = SetSelfPriority( priority );
+ ReleaseMutex( id );
+ DeleteMutex( id );
+
+ return priority;
+}
+
+rtems_id GetScheduler( rtems_id id )
+{
+ rtems_status_code sc;
+ rtems_id scheduler_id;
+
+ scheduler_id = 0xffffffff;
+ sc = rtems_task_get_scheduler( id, &scheduler_id );
+ T_quiet_rsc_success( sc );
+
+ return scheduler_id;
+}
+
+rtems_id GetSelfScheduler( void )
+{
+ return GetScheduler( RTEMS_SELF );
+}
+
+void SetScheduler(
+ rtems_id task_id,
+ rtems_id scheduler_id,
+ rtems_task_priority priority
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_set_scheduler( task_id, scheduler_id, priority );
+ T_quiet_rsc_success( sc );
+}
+
+void SetSelfScheduler( rtems_id scheduler_id, rtems_task_priority priority )
+{
+ SetScheduler( RTEMS_SELF, scheduler_id, priority );
+}
+
+void GetAffinity( rtems_id id, cpu_set_t *set )
+{
+ rtems_status_code sc;
+
+ CPU_ZERO( set );
+ sc = rtems_task_get_affinity( id, sizeof( *set ), set );
+ T_quiet_rsc_success( sc );
+}
+
+void GetSelfAffinity( cpu_set_t *set )
+{
+ GetAffinity( RTEMS_SELF, set );
+}
+
+void SetAffinity( rtems_id id, const cpu_set_t *set )
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_set_affinity( id, sizeof( *set ), set );
+ T_quiet_rsc_success( sc );
+}
+
+void SetSelfAffinity( const cpu_set_t *set )
+{
+ SetAffinity( RTEMS_SELF, set );
+}
+
+void SetAffinityOne( rtems_id id, uint32_t cpu_index )
+{
+ cpu_set_t set;
+
+ CPU_ZERO( &set );
+ CPU_SET( (int) cpu_index, &set );
+ SetAffinity( id, &set );
+}
+
+void SetSelfAffinityOne( uint32_t cpu_index )
+{
+ SetAffinityOne( RTEMS_SELF, cpu_index );
+}
+
+void SetAffinityAll( rtems_id id )
+{
+ cpu_set_t set;
+
+ CPU_FILL( &set );
+ SetAffinity( id, &set );
+}
+
+void SetSelfAffinityAll( void )
+{
+ SetAffinityAll( RTEMS_SELF );
+}
+
+void Yield( void )
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_wake_after( RTEMS_YIELD_PROCESSOR );
+ T_quiet_rsc_success( sc );
+}
+
+void YieldTask( rtems_id id )
+{
+ Thread_Control *the_thread;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+
+ the_thread = _Thread_Get( id, &lock_context );
+
+ if ( the_thread == NULL ) {
+ return;
+ }
+
+ cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
+ _ISR_lock_ISR_enable( &lock_context);
+ _Thread_Yield( the_thread );
+ _Thread_Dispatch_direct( cpu_self );
+}
+
+void AddProcessor( rtems_id scheduler_id, uint32_t cpu_index )
+{
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_add_processor( scheduler_id, cpu_index );
+ T_quiet_rsc_success( sc );
+}
+
+void RemoveProcessor( rtems_id scheduler_id, uint32_t cpu_index )
+{
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_remove_processor( scheduler_id, cpu_index );
+ T_quiet_rsc_success( sc );
+}
+
+rtems_id CreateMutex( void )
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ id = INVALID_ID;
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'U', 'T', 'X' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
+ 0,
+ &id
+ );
+ T_rsc_success( sc );
+
+ return id;
+}
+
+rtems_id CreateMutexNoProtocol( void )
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ id = INVALID_ID;
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'U', 'T', 'X' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY,
+ 0,
+ &id
+ );
+ T_rsc_success( sc );
+
+ return id;
+}
+
+rtems_id CreateMutexFIFO( void )
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ id = INVALID_ID;
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'U', 'T', 'X' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_FIFO,
+ 0,
+ &id
+ );
+ T_rsc_success( sc );
+
+ return id;
+}
+
+void DeleteMutex( rtems_id id )
+{
+ if ( id != INVALID_ID ) {
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_delete( id );
+ T_rsc_success( sc );
+ }
+}
+
+bool IsMutexOwner( rtems_id id )
+{
+ Semaphore_Control *the_semaphore;
+ Thread_queue_Context queue_context;
+
+ the_semaphore = _Semaphore_Get( id, &queue_context );
+ if ( the_semaphore == NULL ) {
+ return false;
+ }
+
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+ return the_semaphore->Core_control.Wait_queue.Queue.owner ==
+ _Thread_Get_executing();
+}
+
+void ObtainMutex( rtems_id id )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain( id, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
+ T_rsc_success( sc );
+}
+
+void ObtainMutexTimed( rtems_id id, rtems_interval ticks )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain( id, RTEMS_WAIT, ticks );
+ T_rsc_success( sc );
+}
+
+void ObtainMutexDeadlock( rtems_id id )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain( id, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
+ T_rsc( sc, RTEMS_INCORRECT_STATE );
+}
+
+void ReleaseMutex( rtems_id id )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release( id );
+ T_rsc_success( sc );
+}
+
+Thread_queue_Queue *GetMutexThreadQueue( rtems_id id )
+{
+ Semaphore_Control *the_semaphore;
+ Thread_queue_Context queue_context;
+
+ the_semaphore = _Semaphore_Get( id, &queue_context );
+ if ( the_semaphore == NULL ) {
+ return NULL;
+ }
+
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+ return &the_semaphore->Core_control.Wait_queue.Queue;
+}
+
void RestoreRunnerASR( void )
{
rtems_status_code sc;
sc = rtems_signal_catch( NULL, RTEMS_DEFAULT_MODES );
- T_rsc_success( sc );
+ T_quiet_rsc_success( sc );
}
void RestoreRunnerMode( void )
@@ -162,10 +549,480 @@ void RestoreRunnerMode( void )
rtems_mode mode;
sc = rtems_task_mode( RTEMS_DEFAULT_MODES, RTEMS_ALL_MODE_MASKS, &mode );
- T_rsc_success( sc );
+ T_quiet_rsc_success( sc );
}
void RestoreRunnerPriority( void )
{
- SetSelfPriority( PRIO_ULTRA_HIGH );
+ SetSelfPriority( 1 );
+}
+
+Thread_Control *GetThread( rtems_id id )
+{
+ Thread_Control *the_thread;
+ ISR_lock_Context lock_context;
+
+ the_thread = _Thread_Get( id, &lock_context );
+
+ if ( the_thread == NULL ) {
+ return NULL;
+ }
+
+ _ISR_lock_ISR_enable( &lock_context);
+ return the_thread;
+}
+
+Thread_Control *GetExecuting( void )
+{
+ return _Thread_Get_executing();
+}
+
+void KillZombies( void )
+{
+ _RTEMS_Lock_allocator();
+ _Thread_Kill_zombies();
+ _RTEMS_Unlock_allocator();
+}
+
+void WaitForExecutionStop( rtems_id task_id )
+{
+#if defined( RTEMS_SMP )
+ Thread_Control *the_thread;
+
+ the_thread = GetThread( task_id );
+ T_assert_not_null( the_thread );
+
+ while ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
+ /* Wait */
+ }
+#else
+ (void) task_id;
+#endif
+}
+
+void WaitForIntendToBlock( rtems_id task_id )
+{
+#if defined( RTEMS_SMP )
+ Thread_Control *the_thread;
+ Thread_Wait_flags intend_to_block;
+
+ the_thread = GetThread( task_id );
+ T_assert_not_null( the_thread );
+
+ intend_to_block = THREAD_WAIT_CLASS_OBJECT |
+ THREAD_WAIT_STATE_INTEND_TO_BLOCK;
+
+ while ( _Thread_Wait_flags_get_acquire( the_thread ) != intend_to_block ) {
+ /* Wait */
+ }
+#else
+ (void) task_id;
+#endif
+}
+
+void WaitForHeir( uint32_t cpu_index, rtems_id task_id )
+{
+ Per_CPU_Control *cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ while ( cpu->heir->Object.id != task_id ) {
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ }
+}
+
+void WaitForNextTask( uint32_t cpu_index, rtems_id task_id )
+{
+ Per_CPU_Control *cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ while ( cpu->heir->Object.id == task_id ) {
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ }
+
+ while ( cpu->thread_dispatch_disable_level != 0 ) {
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ }
+}
+
+void GetTaskTimerInfo( rtems_id id, TaskTimerInfo *info )
+{
+ GetTaskTimerInfoByThread( GetThread( id ), info );
+}
+
+void GetTaskTimerInfoByThread(
+ struct _Thread_Control *thread,
+ TaskTimerInfo *info
+)
+{
+ info->expire_ticks = 0;
+ info->expire_timespec.tv_sec = -1;
+ info->expire_timespec.tv_nsec = -1;
+
+ if ( thread != NULL ) {
+ ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
+ Per_CPU_Control *cpu;
+
+ _ISR_lock_ISR_disable_and_acquire( &thread->Timer.Lock, &lock_context );
+ info->expire_ticks = thread->Timer.Watchdog.expire;
+#if defined( RTEMS_SMP )
+ cpu = thread->Timer.Watchdog.cpu;
+#else
+ cpu = _Per_CPU_Get();
+#endif
+ _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context_2 );
+
+ if ( _Watchdog_Is_scheduled( &thread->Timer.Watchdog ) ) {
+ const Watchdog_Header *hdr;
+
+ hdr = thread->Timer.header;
+
+ if ( hdr == &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ] ) {
+ info->state = TASK_TIMER_TICKS;
+ } else {
+ _Watchdog_Ticks_to_timespec(
+ info->expire_ticks,
+ &info->expire_timespec
+ );
+
+ if ( hdr == &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ] ) {
+ info->state = TASK_TIMER_REALTIME;
+ } else {
+ T_quiet_eq_ptr(
+ hdr,
+ &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ]
+ );
+ info->state = TASK_TIMER_MONOTONIC;
+ }
+ }
+ } else {
+ info->state = TASK_TIMER_INACTIVE;
+ }
+
+ _Watchdog_Per_CPU_release_critical( cpu, &lock_context_2 );
+ _ISR_lock_Release_and_ISR_enable( &thread->Timer.Lock, &lock_context );
+ } else {
+ info->state = TASK_TIMER_INVALID;
+ }
}
+
+#if defined( RTEMS_SMP )
+static void DoWatchdogTick( void *arg )
+{
+ (void) arg;
+ _Watchdog_Tick( _Per_CPU_Get() );
+}
+#endif
+
+void ClockTick( void )
+{
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable();
+#if defined( RTEMS_SMP )
+ DoWatchdogTick( NULL );
+ _SMP_Othercast_action( DoWatchdogTick, NULL );
+#else
+ _Watchdog_Tick( cpu_self );
+#endif
+ _Thread_Dispatch_enable( cpu_self );
+}
+
+static void FinalWatchdogTick( Per_CPU_Control *cpu )
+{
+ ISR_lock_Context lock_context;
+ Watchdog_Header *header;
+ Watchdog_Control *first;
+
+ _ISR_lock_ISR_disable_and_acquire( &cpu->Watchdog.Lock, &lock_context );
+
+ header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
+ first = _Watchdog_Header_first( header );
+
+ if ( first != NULL ) {
+ _Watchdog_Tickle(
+ header,
+ first,
+ UINT64_MAX,
+ &cpu->Watchdog.Lock,
+ &lock_context
+ );
+ }
+
+ header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
+ first = _Watchdog_Header_first( header );
+
+ if ( first != NULL ) {
+ _Watchdog_Tickle(
+ header,
+ first,
+ UINT64_MAX,
+ &cpu->Watchdog.Lock,
+ &lock_context
+ );
+ }
+
+ header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
+ first = _Watchdog_Header_first( header );
+
+ if ( first != NULL ) {
+ _Watchdog_Tickle(
+ header,
+ first,
+ UINT64_MAX,
+ &cpu->Watchdog.Lock,
+ &lock_context
+ );
+ }
+
+ _ISR_lock_Release_and_ISR_enable( &cpu->Watchdog.Lock, &lock_context );
+}
+
+#if defined( RTEMS_SMP )
+static void DoFinalWatchdogTick( void *arg )
+{
+ (void) arg;
+ FinalWatchdogTick( _Per_CPU_Get() );
+}
+#endif
+
+void FinalClockTick( void )
+{
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable();
+#if defined( RTEMS_SMP )
+ DoFinalWatchdogTick( NULL );
+ _SMP_Othercast_action( DoFinalWatchdogTick, NULL );
+#else
+ FinalWatchdogTick( cpu_self );
+#endif
+ _Thread_Dispatch_enable( cpu_self );
+}
+
+static FatalHandler fatal_handler;
+
+static void *fatal_arg;
+
+void FatalInitialExtension(
+ rtems_fatal_source source,
+ bool always_set_to_false,
+ rtems_fatal_code code
+)
+{
+ FatalHandler fatal;
+
+ T_quiet_false( always_set_to_false );
+ fatal = fatal_handler;
+
+ if ( fatal != NULL ) {
+ ( *fatal )( source, code, fatal_arg );
+ }
+}
+
+void SetFatalHandler( FatalHandler fatal, void *arg )
+{
+ fatal_handler = fatal;
+ fatal_arg = arg;
+}
+
+static rtems_id task_switch_id;
+
+static rtems_task_switch_extension task_switch_extension;
+
+static void TaskSwitchExtension( rtems_tcb *executing, rtems_tcb *heir )
+{
+ ( *task_switch_extension )( executing, heir );
+}
+
+void SetTaskSwitchExtension( rtems_task_switch_extension task_switch )
+{
+ rtems_task_switch_extension last;
+ rtems_status_code sc;
+
+ last = task_switch_extension;
+
+ if ( task_switch == NULL ) {
+ if ( last != NULL ) {
+ sc = rtems_extension_delete( task_switch_id );
+ T_quiet_rsc_success( sc );
+
+ task_switch_extension = NULL;
+ }
+ } else {
+ task_switch_extension = task_switch;
+
+ if ( last == NULL ) {
+ rtems_extensions_table table = {
+ .thread_switch = TaskSwitchExtension
+ };
+
+ sc = rtems_extension_create(
+ rtems_build_name( 'T', 'S', 'W', 'I' ),
+ &table,
+ &task_switch_id
+ );
+ T_quiet_rsc_success( sc );
+ }
+ }
+}
+
+void ClearExtensionCalls( ExtensionCalls *calls )
+{
+ memset( calls, 0, sizeof( *calls ) );
+}
+
+void CopyExtensionCalls( const ExtensionCalls *from, ExtensionCalls *to )
+{
+ memcpy( to, from, sizeof( *to ) );
+}
+
+#if defined(RTEMS_SMP)
+static volatile bool delay_thread_dispatch;
+
+static void DelayThreadDispatchHandler( void *arg )
+{
+ (void) arg;
+
+ while ( delay_thread_dispatch ) {
+ /* Wait */
+ }
+}
+
+static const Per_CPU_Job_context delay_thread_dispatch_context = {
+ .handler = DelayThreadDispatchHandler
+};
+
+static Per_CPU_Job delay_thread_dispatch_job = {
+ .context = &delay_thread_dispatch_context
+};
+#endif
+
+void StartDelayThreadDispatch( uint32_t cpu_index )
+{
+#if defined(RTEMS_SMP)
+ if ( rtems_configuration_get_maximum_processors() > cpu_index ) {
+ delay_thread_dispatch = true;
+ _Per_CPU_Submit_job(
+ _Per_CPU_Get_by_index( cpu_index ),
+ &delay_thread_dispatch_job
+ );
+ }
+#endif
+}
+
+void StopDelayThreadDispatch( uint32_t cpu_index )
+{
+#if defined(RTEMS_SMP)
+ if ( rtems_configuration_get_maximum_processors() > cpu_index ) {
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable();
+ delay_thread_dispatch = false;
+ _Per_CPU_Wait_for_job(
+ _Per_CPU_Get_by_index( cpu_index ),
+ &delay_thread_dispatch_job
+ );
+ _Thread_Dispatch_enable( cpu_self );
+ }
+#endif
+}
+
+bool AreInterruptsEnabled( void )
+{
+ return _ISR_Get_level() == 0;
+}
+
+static bool IsWhiteSpace( char c )
+{
+ return c == ' ' || c == '\t';
+}
+
+bool IsWhiteSpaceOnly( const char *s )
+{
+ char c;
+
+ while ( ( c = *s ) != '\0' ) {
+ if ( !IsWhiteSpace( c ) ) {
+ return false;
+ }
+
+ ++s;
+ }
+
+ return true;
+}
+
+static const char *EatWhiteSpace( const char *s )
+{
+ char c;
+
+ while ( ( c = *s ) != '\0' ) {
+ if ( !IsWhiteSpace( c ) ) {
+ break;
+ }
+
+ ++s;
+ }
+
+ return s;
+}
+
+bool IsEqualIgnoreWhiteSpace( const char *a, const char *b )
+{
+ while ( true ) {
+ a = EatWhiteSpace( a );
+ b = EatWhiteSpace( b );
+
+ if ( *a != *b ) {
+ return false;
+ }
+
+ if ( *a == '\0' ) {
+ return true;
+ }
+
+ ++a;
+ ++b;
+ }
+
+ return true;
+}
+
+#if defined(RTEMS_SMP)
+bool TicketLockIsAvailable( const SMP_ticket_lock_Control *lock )
+{
+ unsigned int now_serving;
+ unsigned int next_ticket;
+
+ now_serving = _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
+ next_ticket = _Atomic_Load_uint( &lock->next_ticket, ATOMIC_ORDER_RELAXED );
+
+ return now_serving == next_ticket;
+}
+
+void TicketLockWaitForOwned( const SMP_ticket_lock_Control *lock )
+{
+ while ( TicketLockIsAvailable( lock ) ) {
+ /* Wait */
+ }
+}
+
+void TicketLockWaitForOthers(
+ const SMP_ticket_lock_Control *lock,
+ unsigned int others
+)
+{
+ unsigned int expected;
+ unsigned int actual;
+
+ expected = _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
+ expected += others + 1;
+
+ do {
+ actual = _Atomic_Load_uint( &lock->next_ticket, ATOMIC_ORDER_RELAXED );
+ } while ( expected != actual );
+}
+#endif
diff --git a/testsuites/validation/tx-support.h b/testsuites/validation/tx-support.h
index b0e466fda1..b647551b4e 100644
--- a/testsuites/validation/tx-support.h
+++ b/testsuites/validation/tx-support.h
@@ -40,6 +40,7 @@
#include <rtems.h>
#include <rtems/irq-extension.h>
#include <rtems/score/atomic.h>
+#include <rtems/score/threadq.h>
#ifdef __cplusplus
extern "C" {
@@ -53,6 +54,7 @@ extern "C" {
typedef enum {
PRIO_PSEUDO_ISR,
+ PRIO_VERY_ULTRA_HIGH,
PRIO_ULTRA_HIGH,
PRIO_VERY_HIGH,
PRIO_HIGH,
@@ -63,24 +65,84 @@ typedef enum {
} Priority;
/**
+ * @brief This constants represents the default priority of the runner task.
+ */
+#define PRIO_DEFAULT 1
+
+/**
+ * @brief This constants represents an invalid RTEMS task priority value.
+ *
+ * It should be an invalid priority value which is not equal to
+ * RTEMS_CURRENT_PRIORITY and RTEMS_TIMER_SERVER_DEFAULT_PRIORITY.
+ */
+#define PRIO_INVALID 0xfffffffe
+
+/**
+ * @brief This constants represents a priority which is close to the priority
+ * of the idle thread.
+ *
+ * It may be used for the runner thread together with PRIO_FLEXIBLE for worker
+ * threads.
+ */
+#define PRIO_NEARLY_IDLE 126
+
+/**
+ * @brief This constants represents a priority with a wider range of higher and
+ * lower priorities around it.
+ *
+ * It may be used for the worker threads together with PRIO_NEARLY_IDLE for the
+ * runner thread.
+ */
+#define PRIO_FLEXIBLE 64
+
+/**
* @brief This constants represents an invalid RTEMS object identifier.
*/
#define INVALID_ID 0xfffffffd
+/**
+ * @brief This constants represents an object name for tests.
+ */
+#define OBJECT_NAME rtems_build_name( 'T', 'E', 'S', 'T' )
+
#define CreateTask( name, priority ) \
DoCreateTask( \
rtems_build_name( name[ 0 ], name[ 1 ], name[ 2 ], name[ 3 ] ), \
priority \
)
+#define SCHEDULER_A_ID 0xf010001
+
+#define SCHEDULER_B_ID 0xf010002
+
+#define SCHEDULER_C_ID 0xf010003
+
+#define SCHEDULER_D_ID 0xf010004
+
rtems_id DoCreateTask( rtems_name name, rtems_task_priority priority );
void StartTask( rtems_id id, rtems_task_entry entry, void *arg );
void DeleteTask( rtems_id id );
+void SuspendTask( rtems_id id );
+
+void SuspendSelf( void );
+
+void ResumeTask( rtems_id id );
+
+bool IsTaskSuspended( rtems_id id );
+
+rtems_event_set QueryPendingEvents( void );
+
+rtems_event_set PollAnyEvents( void );
+
rtems_event_set ReceiveAnyEvents( void );
+rtems_event_set ReceiveAnyEventsTimed( rtems_interval ticks );
+
+void ReceiveAllEvents( rtems_event_set events );
+
void SendEvents( rtems_id id, rtems_event_set events );
rtems_mode GetMode( void );
@@ -89,18 +151,230 @@ rtems_mode SetMode( rtems_mode set, rtems_mode mask );
rtems_task_priority GetPriority( rtems_id id );
+rtems_task_priority GetPriorityByScheduler(
+ rtems_id task_id,
+ rtems_id scheduler_id
+);
+
rtems_task_priority SetPriority( rtems_id id, rtems_task_priority priority );
rtems_task_priority GetSelfPriority( void );
rtems_task_priority SetSelfPriority( rtems_task_priority priority );
+rtems_task_priority SetSelfPriorityNoYield( rtems_task_priority priority );
+
+rtems_id GetScheduler( rtems_id id );
+
+rtems_id GetSelfScheduler( void );
+
+void SetScheduler(
+ rtems_id task_id,
+ rtems_id scheduler_id,
+ rtems_task_priority priority
+);
+
+void SetSelfScheduler( rtems_id scheduler_id, rtems_task_priority priority );
+
+void GetAffinity( rtems_id id, cpu_set_t *set );
+
+void GetSelfAffinity( cpu_set_t *set );
+
+void SetAffinity( rtems_id id, const cpu_set_t *set );
+
+void SetSelfAffinity( const cpu_set_t *set );
+
+void SetAffinityOne( rtems_id id, uint32_t cpu_index );
+
+void SetSelfAffinityOne( uint32_t cpu_index );
+
+void SetAffinityAll( rtems_id id );
+
+void SetSelfAffinityAll( void );
+
+void Yield( void );
+
+void YieldTask( rtems_id id );
+
+void AddProcessor( rtems_id scheduler_id, uint32_t cpu_index );
+
+void RemoveProcessor( rtems_id scheduler_id, uint32_t cpu_index );
+
+rtems_id CreateMutex( void );
+
+rtems_id CreateMutexNoProtocol( void );
+
+rtems_id CreateMutexFIFO( void );
+
+bool IsMutexOwner( rtems_id id );
+
+void DeleteMutex( rtems_id id );
+
+void ObtainMutex( rtems_id id );
+
+void ObtainMutexTimed( rtems_id id, rtems_interval ticks );
+
+void ObtainMutexDeadlock( rtems_id id );
+
+void ReleaseMutex( rtems_id id );
+
+struct Thread_queue_Queue;
+
+struct Thread_queue_Queue *GetMutexThreadQueue( rtems_id id );
+
void RestoreRunnerASR( void );
void RestoreRunnerMode( void );
void RestoreRunnerPriority( void );
+struct _Thread_Control;
+
+struct _Thread_Control *GetThread( rtems_id id );
+
+struct _Thread_Control *GetExecuting( void );
+
+void KillZombies( void );
+
+void WaitForExecutionStop( rtems_id task_id );
+
+void WaitForIntendToBlock( rtems_id task_id );
+
+void WaitForHeir( uint32_t cpu_index, rtems_id task_id );
+
+void WaitForNextTask( uint32_t cpu_index, rtems_id task_id );
+
+typedef enum {
+ TASK_TIMER_INVALID,
+ TASK_TIMER_INACTIVE,
+ TASK_TIMER_TICKS,
+ TASK_TIMER_REALTIME,
+ TASK_TIMER_MONOTONIC
+} TaskTimerState;
+
+typedef struct {
+ TaskTimerState state;
+ uint64_t expire_ticks;
+ struct timespec expire_timespec;
+} TaskTimerInfo;
+
+void GetTaskTimerInfo( rtems_id id, TaskTimerInfo *info );
+
+void GetTaskTimerInfoByThread(
+ struct _Thread_Control *thread,
+ TaskTimerInfo *info
+);
+
+void ClockTick( void );
+
+/**
+ * @brief Simulates a clock tick with the final expire time point of
+ * UINT64_MAX for all clocks.
+ *
+ * This function does not update the clock ticks counter.
+ */
+void FinalClockTick( void );
+
+/**
+ * @brief Simulates a single clock tick using the software timecounter.
+ *
+ * In contrast to ClockTick(), this function updates also CLOCK_MONOTONIC and
+ * CLOCK_REALTIME to the next software timecounter clock tick time point.
+ *
+ * This function is designed for test suites not having a clock driver.
+ */
+void TimecounterTick( void );
+
+typedef uint32_t ( *GetTimecountHandler )( void );
+
+/**
+ * @brief Sets the get timecount handler.
+ *
+ * Using this function will replace the timecounter of the clock driver.
+ *
+ * @return Returns the previous get timecount handler.
+ */
+GetTimecountHandler SetGetTimecountHandler( GetTimecountHandler handler );
+
+/**
+ * @brief This constant represents the fake frequency of the software
+ * timecounter.
+ */
+#define SOFTWARE_TIMECOUNTER_FREQUENCY 1000000
+
+/**
+ * @brief Gets the software timecount counter value.
+ *
+ * @return Returns the current software timecounter counter value.
+ */
+uint32_t GetTimecountCounter( void );
+
+/**
+ * @brief Sets and gets the software timecount counter value.
+ *
+ * @param counter is the new software timecounter counter value.
+ *
+ * @return Returns the previous software timecounter counter value.
+ */
+uint32_t SetTimecountCounter( uint32_t counter );
+
+/**
+ * @brief Return the task id of the timer server task
+ *
+ * This function is an attempt to avoid using RTEMS internal global
+ * _Timer_server throughout the validation test code.
+ *
+ * @return Returns the task id of the timer server task, if
+ * rtems_timer_initiate_server() has been invoked before,
+ * otherwise - if the timer server task does not exist -
+ * RTEMS_INVALID_ID is returned.
+ */
+rtems_id GetTimerServerTaskId( void );
+
+/**
+ * @brief Undo the effects of rtems_timer_initiate_server()
+ *
+ * If rtems_timer_initiate_server() was never called before,
+ * nothing is done.
+ *
+ * If rtems_timer_initiate_server() was called before, the
+ * created thread and other resources are freed so that
+ * rtems_timer_initiate_server() can be called again.
+ * There should be no pending timers which are not yet executed
+ * by the server task. Naturally, there should be no
+ * timer server timers scheduled for execution.
+ *
+ * @return Returns true, if rtems_timer_initiate_server() has been
+ * invoked before and the timer server task has indeed been deleted,
+ * otherwise false.
+ */
+bool DeleteTimerServer( void );
+
+typedef struct {
+ struct {
+ const void *begin;
+ void *free_begin;
+ const void *end;
+ } areas[ 2 ];
+ size_t count;
+} MemoryContext;
+
+void MemorySave( MemoryContext *ctx );
+
+void MemoryRestore( const MemoryContext *ctx );
+
+/**
+ * @brief Fails a dynamic memory allocation when the counter reaches zero.
+ *
+ * This function initializes an internal counter which is decremented before
+ * each dynamic memory allocation though the rtems_malloc() directive. When
+ * the counter decrements from one to zero, the allocation fails and NULL will
+ * be returned.
+ *
+ * @param counter is the initial counter value.
+ */
+void MemoryAllocationFailWhen( uint32_t counter );
+
typedef struct {
Chain_Node node;
void ( *handler )( void * );
@@ -114,14 +388,208 @@ void CallWithinISRSubmit( CallWithinISRRequest *request );
void CallWithinISRWait( const CallWithinISRRequest *request );
+typedef struct {
+ Thread_queue_Operations tq_ops;
+ const Thread_queue_Operations *wrapped_ops;
+ Thread_queue_Control thread_queue;
+ CallWithinISRRequest isr_request;
+} WrapThreadQueueContext;
+
+void WrapThreadQueueInitialize(
+ WrapThreadQueueContext *ctx,
+ void ( *handler )( void * ),
+ void *arg
+);
+
+void WrapThreadQueueExtract(
+ WrapThreadQueueContext *ctx,
+ struct _Thread_Control *thread
+);
+
+void WrapThreadQueueExtractDirect(
+ WrapThreadQueueContext *ctx,
+ Thread_Control *thread
+);
+
+void WrapThreadQueueDestroy( WrapThreadQueueContext *ctx );
+
+struct Per_CPU_Control;
+
+void SetPreemptionIntervention(
+ struct Per_CPU_Control *cpu,
+ void ( *handler )( void * ),
+ void *arg
+);
+
rtems_vector_number GetValidInterruptVectorNumber(
const rtems_interrupt_attributes *required
);
-rtems_vector_number GetTestableInterruptVector( void );
+rtems_vector_number GetTestableInterruptVector(
+ const rtems_interrupt_attributes *required
+);
bool HasInterruptVectorEntriesInstalled( rtems_vector_number vector );
+/**
+ * @brief Get the clock and context of a timer from RTEMS internal data.
+ *
+ * With exception of TIMER_DORMANT, the return values are bits or-ed together.
+ *
+ * @param id The timer ID.
+ *
+ * @retval TIMER_DORMANT Either the id argument is invalid or the timer has
+ * never been used before.
+ * @return The TIMER_CLASS_BIT_ON_TASK is set, if the timer server routine
+ * was or will be executed in task context, otherwise it was or will be
+ * executed in interrupt context.
+ *
+ * The TIMER_CLASS_BIT_TIME_OF_DAY is set, if the clock used is or was the
+ * ${/glossary/clock-realtime:/term}, otherwise the
+ * ${/glossary/clock-tick:/term} based clock is or was used.
+ */
+Timer_Classes GetTimerClass( rtems_id id );
+
+/**
+ * @brief This structure provides data used by RTEMS to schedule a timer
+ * service routine.
+ */
+typedef struct {
+ /**
+ * @brief This member contains a reference to the timer service routine.
+ */
+ rtems_timer_service_routine_entry routine;
+ /**
+ * @brief This member contains a reference to the user data to be provided
+ * to the timer service routine.
+ */
+ void *user_data;
+ /**
+ * @brief This member contains the timer interval in ticks or seconds.
+ */
+ Watchdog_Interval interval;
+} Timer_Scheduling_Data;
+
+/**
+ * @brief Get data related to scheduling a timer service routine
+ * from RTEMS internal structures.
+ *
+ * @param id The timer ID.
+ * @param[out] data If the reference is not NULL, the data retrieved from
+ * internal RTEMS structures is stored here.
+ */
+void GetTimerSchedulingData(
+ rtems_id id,
+ Timer_Scheduling_Data *data
+);
+
+/**
+ * @brief The various states of a timer.
+ */
+typedef enum {
+ TIMER_INVALID,
+ TIMER_INACTIVE,
+ TIMER_SCHEDULED,
+ TIMER_PENDING
+} Timer_States;
+
+/**
+ * @brief Get the state of a timer from RTEMS internal data.
+ *
+ * @param id The timer ID.
+ *
+ * @retval TIMER_INVALID The id argument is invalid.
+ * @retval TIMER_INACTIVE The timer is not scheduled (i.e. it is
+ * new, run off, or canceled).
+ * @retval TIMER_SCHEDULED The timer is scheduled.
+ * @retval TIMER_PENDING The timer is pending.
+ */
+Timer_States GetTimerState( rtems_id id );
+
+/**
+ * @brief Mark the realtime clock as never set.
+ *
+ * This function manipulates RTEMS internal data structures to undo the
+ * effect of rtems_clock_set(). If the clock is not set, the function has no
+ * effect.
+ */
+void UnsetClock( void );
+
+void FatalInitialExtension(
+ rtems_fatal_source source,
+ bool always_set_to_false,
+ rtems_fatal_code code
+);
+
+typedef void ( *FatalHandler )(
+ rtems_fatal_source source,
+ rtems_fatal_code code,
+ void *arg
+);
+
+void SetFatalHandler( FatalHandler fatal, void *arg );
+
+void SetTaskSwitchExtension( rtems_task_switch_extension task_switch );
+
+typedef struct {
+ uint32_t fatal;
+ uint32_t thread_begin;
+ uint32_t thread_create;
+ uint32_t thread_delete;
+ uint32_t thread_exitted;
+ uint32_t thread_restart;
+ uint32_t thread_start;
+ uint32_t thread_switch;
+ uint32_t thread_terminate;
+} ExtensionCalls;
+
+void ClearExtensionCalls( ExtensionCalls *calls );
+
+void CopyExtensionCalls( const ExtensionCalls *from, ExtensionCalls *to );
+
+void SetIORelaxHandler( void ( *handler )( void * ), void *arg );
+
+void StartDelayThreadDispatch( uint32_t cpu_index );
+
+void StopDelayThreadDispatch( uint32_t cpu_index );
+
+bool AreInterruptsEnabled( void );
+
+bool IsWhiteSpaceOnly( const char *s );
+
+bool IsEqualIgnoreWhiteSpace( const char *a, const char *b );
+
+#if defined(RTEMS_SMP)
+bool TicketLockIsAvailable( const SMP_ticket_lock_Control *lock );
+
+void TicketLockWaitForOwned( const SMP_ticket_lock_Control *lock );
+
+void TicketLockWaitForOthers(
+ const SMP_ticket_lock_Control *lock,
+ unsigned int others
+);
+
+static inline bool ISRLockIsAvailable( const ISR_lock_Control *lock )
+{
+ return TicketLockIsAvailable( &lock->Lock.Ticket_lock );
+}
+
+static inline void ISRLockWaitForOwned( const ISR_lock_Control *lock )
+{
+ TicketLockWaitForOwned( &lock->Lock.Ticket_lock );
+}
+
+static inline void ISRLockWaitForOthers(
+ const ISR_lock_Control *lock,
+ unsigned int others
+)
+{
+ TicketLockWaitForOthers( &lock->Lock.Ticket_lock, others );
+}
+#endif
+
+void *IdleBody( uintptr_t ignored );
+
/** @} */
#ifdef __cplusplus
diff --git a/testsuites/validation/tx-thread-queue.c b/testsuites/validation/tx-thread-queue.c
new file mode 100644
index 0000000000..1b0e8665c7
--- /dev/null
+++ b/testsuites/validation/tx-thread-queue.c
@@ -0,0 +1,854 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the implementation of the thread queue test
+ * support.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-thread-queue.h"
+#include "tx-support.h"
+#include "ts-config.h"
+
+#include <rtems/score/threadimpl.h>
+#include <rtems/rtems/semimpl.h>
+
+void TQSend(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+#if defined( RTEMS_SMP )
+ ctx->event_received[ worker ] = false;
+#endif
+
+ SendEvents( ctx->worker_id[ worker ], events );
+}
+
+void TQWaitForEventsReceived( const TQContext *ctx, TQWorkerKind worker )
+{
+#if defined( RTEMS_SMP )
+ while ( !ctx->event_received[ worker ] ) {
+ /* Wait */
+ }
+#endif
+}
+
+void TQWaitForExecutionStop( const TQContext *ctx, TQWorkerKind worker )
+{
+#if defined( RTEMS_SMP )
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+#endif
+}
+
+void TQSendAndWaitForExecutionStop(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+ TQSend( ctx, worker, events );
+
+#if defined( RTEMS_SMP )
+ TQWaitForEventsReceived( ctx, worker );
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+#endif
+}
+
+void TQWaitForIntendToBlock( const TQContext *ctx, TQWorkerKind worker )
+{
+ const rtems_tcb *thread;
+ Thread_Wait_flags intend_to_block;
+
+ thread = ctx->worker_tcb[ worker ];
+ intend_to_block = THREAD_WAIT_CLASS_OBJECT |
+ THREAD_WAIT_STATE_INTEND_TO_BLOCK;
+
+ while ( _Thread_Wait_flags_get_acquire( thread ) != intend_to_block ) {
+ /* Wait */
+ }
+}
+
+void TQSendAndWaitForIntendToBlock(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+ TQSend( ctx, worker, events );
+
+#if defined( RTEMS_SMP )
+ TQWaitForEventsReceived( ctx, worker );
+ TQWaitForIntendToBlock( ctx, worker );
+#endif
+}
+
+void TQSendAndWaitForExecutionStopOrIntendToBlock(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+#if defined( RTEMS_SMP )
+ const rtems_tcb *thread;
+ Thread_Wait_flags intend_to_block;
+#endif
+
+ TQSend( ctx, worker, events );
+
+#if defined( RTEMS_SMP )
+ TQWaitForEventsReceived( ctx, worker );
+ thread = ctx->worker_tcb[ worker ];
+ intend_to_block = THREAD_WAIT_CLASS_OBJECT |
+ THREAD_WAIT_STATE_INTEND_TO_BLOCK;
+
+ while (
+ _Thread_Is_executing_on_a_processor( thread ) &&
+ _Thread_Wait_flags_get_acquire( thread ) != intend_to_block
+ ) {
+ /* Wait */
+ }
+#endif
+}
+
+void TQSendAndSynchronizeRunner(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+)
+{
+ T_quiet_eq_u32( QueryPendingEvents() & TQ_EVENT_RUNNER_SYNC, 0 );
+ TQSend( ctx, worker, events | TQ_EVENT_RUNNER_SYNC );
+ TQSynchronizeRunner();
+}
+
+void TQClearDone( TQContext *ctx, TQWorkerKind worker )
+{
+ ctx->done[ worker ] = false;
+}
+
+void TQWaitForDone( const TQContext *ctx, TQWorkerKind worker )
+{
+ while ( !ctx->done[ worker ] ) {
+ /* Wait */
+ }
+}
+
+void TQSynchronizeRunner( void )
+{
+ ReceiveAllEvents( TQ_EVENT_RUNNER_SYNC );
+}
+
+void TQSynchronizeRunner2( void )
+{
+ ReceiveAllEvents( TQ_EVENT_RUNNER_SYNC | TQ_EVENT_RUNNER_SYNC_2 );
+}
+
+void TQResetCounter( TQContext *ctx )
+{
+ ctx->counter = 0;
+ memset( &ctx->worker_counter, 0, sizeof( ctx->worker_counter ) );
+}
+
+uint32_t TQGetCounter( const TQContext *ctx )
+{
+ return ctx->counter;
+}
+
+uint32_t TQGetWorkerCounter( const TQContext *ctx, TQWorkerKind worker )
+{
+ return ctx->worker_counter[ worker ];
+}
+
+void TQMutexObtain( const TQContext *ctx, TQMutex mutex )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(
+ ctx->mutex_id[ mutex ],
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ T_rsc_success( sc );
+}
+
+void TQMutexRelease( const TQContext *ctx, TQMutex mutex )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release( ctx->mutex_id[ mutex ] );
+ T_rsc_success( sc );
+}
+
+void TQSetPriority(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ Priority priority
+)
+{
+ SetPriority( ctx->worker_id[ worker ], priority );
+}
+
+Priority TQGetPriority( const TQContext *ctx, TQWorkerKind worker )
+{
+ return GetPriority( ctx->worker_id[ worker ] );
+}
+
+void TQSetScheduler(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_id scheduler_id,
+ Priority priority
+)
+{
+#if defined( RTEMS_SMP )
+ rtems_status_code sc;
+
+ sc = rtems_task_set_scheduler(
+ ctx->worker_id[ worker ],
+ scheduler_id,
+ priority
+ );
+ T_rsc_success( sc );
+#else
+ (void) scheduler_id;
+ SetPriority( ctx->worker_id[ worker ], priority );
+#endif
+}
+
+static void Count( TQContext *ctx, TQWorkerKind worker )
+{
+ unsigned int counter;
+
+ counter = _Atomic_Fetch_add_uint( &ctx->counter, 1, ATOMIC_ORDER_RELAXED );
+ ctx->worker_counter[ worker ] = counter + 1;
+}
+
+static void Enqueue( TQContext *ctx, TQWorkerKind worker, TQWait wait )
+{
+ ctx->status[ worker ] = TQEnqueue( ctx, wait );
+ Count( ctx, worker );
+}
+
+static void ThreadQueueDeadlock(
+ rtems_fatal_source source,
+ rtems_fatal_code code,
+ void *arg
+)
+{
+ TQContext *ctx;
+
+ ctx = arg;
+ T_eq_int( source, INTERNAL_ERROR_CORE );
+ T_eq_int( code, INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
+ SetFatalHandler( NULL, NULL );
+ longjmp( ctx->before_enqueue, 1 );
+}
+
+static void Worker( rtems_task_argument arg, TQWorkerKind worker )
+{
+ TQContext *ctx;
+
+ ctx = (TQContext *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+
+ events = ReceiveAnyEvents();
+ ctx->event_received[ worker ] = true;
+
+ if ( ( events & TQ_EVENT_HELPER_A_SYNC ) != 0 ) {
+ SendEvents( ctx->worker_id[ TQ_HELPER_A ], TQ_EVENT_RUNNER_SYNC );
+ }
+
+ if ( ( events & TQ_EVENT_HELPER_B_SYNC ) != 0 ) {
+ SendEvents( ctx->worker_id[ TQ_HELPER_B ], TQ_EVENT_RUNNER_SYNC );
+ }
+
+ if ( ( events & TQ_EVENT_SCHEDULER_RECORD_START ) != 0 ) {
+ TQSchedulerRecordStart( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE_PREPARE ) != 0 ) {
+ TQEnqueuePrepare( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE ) != 0 ) {
+ Enqueue( ctx, worker, ctx->wait );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE_TIMED ) != 0 ) {
+ Enqueue( ctx, worker, TQ_WAIT_TIMED );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE_FATAL ) != 0 ) {
+ SetFatalHandler( ThreadQueueDeadlock, ctx );
+
+ if ( setjmp( ctx->before_enqueue ) == 0 ) {
+ ctx->status[ worker ] = STATUS_MINUS_ONE;
+ Enqueue( ctx, worker, ctx->wait );
+ } else {
+ ctx->status[ worker ] = STATUS_DEADLOCK;
+ }
+ }
+
+ if ( ( events & TQ_EVENT_TIMEOUT ) != 0 ) {
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable();
+ _Thread_Timeout( &ctx->worker_tcb[ worker ]->Timer.Watchdog );
+ _Thread_Dispatch_direct( cpu_self );
+ }
+
+ if ( ( events & TQ_EVENT_FLUSH ) != 0 ) {
+ TQFlush( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_ENQUEUE_DONE ) != 0 ) {
+ TQEnqueueDone( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_SURRENDER ) != 0 ) {
+ Status_Control status;
+
+ status = TQSurrender( ctx );
+ T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_A_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_A );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_A_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_A );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_B_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_B );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_B_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_B );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_C_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_C );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_C_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_C );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_D_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_D );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_D_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_D );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_NO_PROTOCOL );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_NO_PROTOCOL );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_FIFO_OBTAIN ) != 0 ) {
+ TQMutexObtain( ctx, TQ_MUTEX_FIFO );
+ }
+
+ if ( ( events & TQ_EVENT_MUTEX_FIFO_RELEASE ) != 0 ) {
+ TQMutexRelease( ctx, TQ_MUTEX_FIFO );
+ }
+
+ if ( ( events & TQ_EVENT_PIN ) != 0 ) {
+ _Thread_Pin( _Thread_Get_executing() );
+ }
+
+ if ( ( events & TQ_EVENT_UNPIN ) != 0 ) {
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable();
+ _Thread_Unpin( _Thread_Get_executing(), cpu_self );
+ _Thread_Dispatch_direct( cpu_self );
+ }
+
+ if ( ( events & TQ_EVENT_SCHEDULER_RECORD_STOP ) != 0 ) {
+ TQSchedulerRecordStop( ctx );
+ }
+
+ if ( ( events & TQ_EVENT_RUNNER_SYNC ) != 0 ) {
+ SendEvents( ctx->runner_id, TQ_EVENT_RUNNER_SYNC );
+ }
+
+ if ( ( events & TQ_EVENT_COUNT ) != 0 ) {
+ Count( ctx, worker );
+ }
+
+ if ( ( events & TQ_EVENT_BUSY_WAIT ) != 0 ) {
+ while ( ctx->busy_wait[ worker ] ) {
+ /* Wait */
+ }
+ }
+
+ if ( ( events & TQ_EVENT_RUNNER_SYNC_2 ) != 0 ) {
+ SendEvents( ctx->runner_id, TQ_EVENT_RUNNER_SYNC_2 );
+ }
+
+ ctx->done[ worker ] = true;
+ }
+}
+
+static void BlockerA( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_A );
+}
+
+static void BlockerB( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_B );
+}
+
+static void BlockerC( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_C );
+}
+
+static void BlockerD( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_D );
+}
+
+static void BlockerE( rtems_task_argument arg )
+{
+ Worker( arg, TQ_BLOCKER_E );
+}
+
+static void WorkerF( rtems_task_argument arg )
+{
+ Worker( arg, TQ_WORKER_F );
+}
+
+static void HelperA( rtems_task_argument arg )
+{
+ Worker( arg, TQ_HELPER_A );
+}
+
+static void HelperB( rtems_task_argument arg )
+{
+ Worker( arg, TQ_HELPER_B );
+}
+
+static void HelperC( rtems_task_argument arg )
+{
+ Worker( arg, TQ_HELPER_C );
+}
+
+void TQInitialize( TQContext *ctx )
+{
+ rtems_status_code sc;
+ size_t i;
+
+ ctx->runner_id = rtems_task_self();
+ ctx->runner_tcb = GetThread( RTEMS_SELF );
+
+ /*
+ * Use a lower priority than all started worker tasks to make sure they wait
+ * for events.
+ */
+ SetSelfPriority( PRIO_VERY_LOW );
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->mutex_id ); ++i ) {
+ rtems_attribute attributes;
+
+ attributes = RTEMS_BINARY_SEMAPHORE;
+
+ if ( i == TQ_MUTEX_NO_PROTOCOL ) {
+ attributes |= RTEMS_PRIORITY;
+ } else if ( i == TQ_MUTEX_FIFO ) {
+ attributes |= RTEMS_FIFO;
+ } else {
+ attributes |= RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY;
+ }
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'M', 'T', 'X', 'A' + i ),
+ 1,
+ attributes,
+ 0,
+ &ctx->mutex_id[ i ]
+ );
+ T_rsc_success( sc );
+ }
+
+ ctx->worker_id[ TQ_BLOCKER_A ] = CreateTask( "BLKA", PRIO_HIGH );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_A ], BlockerA, ctx );
+ ctx->worker_id[ TQ_BLOCKER_B ] = CreateTask( "BLKB", PRIO_VERY_HIGH );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_B ], BlockerB, ctx );
+ ctx->worker_id[ TQ_BLOCKER_C ] = CreateTask( "BLKC", PRIO_ULTRA_HIGH );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_C ], BlockerC, ctx );
+ ctx->worker_id[ TQ_BLOCKER_D ] = CreateTask( "BLKD", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_D ], BlockerD, ctx );
+ ctx->worker_id[ TQ_BLOCKER_E ] = CreateTask( "BLKE", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_BLOCKER_E ], BlockerE, ctx );
+ ctx->worker_id[ TQ_WORKER_F ] = CreateTask( "WRKF", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_WORKER_F ], WorkerF, ctx );
+ ctx->worker_id[ TQ_HELPER_A ] = CreateTask( "HLPA", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_HELPER_A ], HelperA, ctx );
+ ctx->worker_id[ TQ_HELPER_B ] = CreateTask( "HLPB", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_HELPER_B ], HelperB, ctx );
+ ctx->worker_id[ TQ_HELPER_C ] = CreateTask( "HLPC", PRIO_LOW );
+ StartTask( ctx->worker_id[ TQ_HELPER_C ], HelperC, ctx );
+
+ for (i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_tcb ); ++i) {
+ ctx->worker_tcb[ i ] = GetThread( ctx->worker_id[ i ] );
+ }
+
+ SetSelfPriority( PRIO_NORMAL );
+}
+
+void TQDestroy( TQContext *ctx )
+{
+ size_t i;
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->worker_id ); ++i ) {
+ DeleteTask( ctx->worker_id[ i ] );
+ }
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->mutex_id ); ++i ) {
+ if ( ctx->mutex_id[ i ] != 0 ) {
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_delete( ctx->mutex_id[ i ] );
+ T_rsc_success( sc );
+ }
+ }
+
+ RestoreRunnerPriority();
+}
+
+void TQReset( TQContext *ctx )
+{
+ rtems_id scheduler_id;
+
+ scheduler_id = SCHEDULER_A_ID;
+ SetScheduler( ctx->runner_id, scheduler_id, PRIO_NORMAL );
+ TQSetScheduler( ctx, TQ_BLOCKER_A, scheduler_id, PRIO_HIGH );
+ TQSetScheduler( ctx, TQ_BLOCKER_B, scheduler_id, PRIO_VERY_HIGH );
+ TQSetScheduler( ctx, TQ_BLOCKER_C, scheduler_id, PRIO_ULTRA_HIGH );
+ TQSetScheduler( ctx, TQ_BLOCKER_D, scheduler_id, PRIO_LOW );
+ TQSetScheduler( ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
+ TQSetScheduler( ctx, TQ_HELPER_A, scheduler_id, PRIO_LOW );
+ TQSetScheduler( ctx, TQ_HELPER_B, scheduler_id, PRIO_LOW );
+ TQSetScheduler( ctx, TQ_HELPER_C, scheduler_id, PRIO_LOW );
+}
+
+void TQSortMutexesByID( TQContext *ctx )
+{
+ size_t i;
+ size_t n;
+
+ n = 3;
+
+ /* Bubble sort */
+ for ( i = 1; i < n ; ++i ) {
+ size_t j;
+
+ for ( j = 0; j < n - i; ++j ) {
+ if ( ctx->mutex_id[ j ] > ctx->mutex_id[ j + 1 ] ) {
+ rtems_id tmp;
+
+ tmp = ctx->mutex_id[ j ];
+ ctx->mutex_id[ j ] = ctx->mutex_id[ j + 1 ];
+ ctx->mutex_id[ j + 1 ] = tmp;
+ }
+ }
+ }
+}
+
+void TQGetProperties( TQContext *ctx, TQWorkerKind enqueued_worker )
+{
+ ( *ctx->get_properties )( ctx, enqueued_worker );
+}
+
+Status_Control TQConvertStatus( TQContext *ctx, Status_Control status )
+{
+ return ( *ctx->convert_status )( status );
+}
+
+void TQEnqueuePrepare( TQContext *ctx )
+{
+ ( *ctx->enqueue_prepare )( ctx );
+}
+
+Status_Control TQEnqueue( TQContext *ctx, TQWait wait )
+{
+ return ( *ctx->enqueue )( ctx, wait );
+}
+
+Status_Control TQEnqueueFatal( TQContext *ctx )
+{
+ Status_Control status;
+
+ SetFatalHandler( ThreadQueueDeadlock, ctx );
+ status = STATUS_MINUS_ONE;
+
+ if ( setjmp( ctx->before_enqueue ) == 0 ) {
+ status = TQEnqueue( ctx, ctx->wait );
+ } else {
+ status = STATUS_DEADLOCK;
+ }
+
+ return status;
+}
+
+void TQEnqueueDone( TQContext *ctx )
+{
+ ( *ctx->enqueue_done )( ctx );
+}
+
+Status_Control TQSurrender( TQContext *ctx )
+{
+ return ( *ctx->surrender )( ctx );
+}
+
+void TQFlush( TQContext *ctx )
+{
+ ( *ctx->flush )( ctx );
+}
+
+rtems_tcb *TQGetOwner( TQContext *ctx )
+{
+ rtems_tcb *( *get_owner )( TQContext * );
+
+ get_owner = ctx->get_owner;
+
+ if ( get_owner == NULL ) {
+ return NULL;
+ }
+
+ return ( *get_owner )( ctx );
+}
+
+void TQSchedulerRecordStart( TQContext *ctx )
+{
+ T_scheduler_log *log;
+
+ log = T_scheduler_record_40( &ctx->scheduler_log );
+ T_null( log );
+}
+
+void TQSchedulerRecordStop( TQContext *ctx )
+{
+ T_scheduler_log *log;
+
+ log = T_scheduler_record( NULL );
+ T_eq_ptr( &log->header, &ctx->scheduler_log.header );
+}
+
+const T_scheduler_event *TQGetNextAny( TQContext *ctx, size_t *index )
+{
+ return T_scheduler_next_any(
+ &ctx->scheduler_log.header,
+ index
+ );
+}
+
+const T_scheduler_event *TQGetNextBlock( TQContext *ctx, size_t *index )
+{
+ return T_scheduler_next(
+ &ctx->scheduler_log.header,
+ T_SCHEDULER_BLOCK,
+ index
+ );
+}
+
+const T_scheduler_event *TQGetNextUnblock( TQContext *ctx, size_t *index )
+{
+ return T_scheduler_next(
+ &ctx->scheduler_log.header,
+ T_SCHEDULER_UNBLOCK,
+ index
+ );
+}
+
+const T_scheduler_event *TQGetNextUpdatePriority(
+ TQContext *ctx,
+ size_t *index
+)
+{
+ return T_scheduler_next(
+ &ctx->scheduler_log.header,
+ T_SCHEDULER_UPDATE_PRIORITY,
+ index
+ );
+}
+
+const T_scheduler_event *TQGetNextAskForHelp(
+ TQContext *ctx,
+ size_t *index
+)
+{
+ return T_scheduler_next(
+ &ctx->scheduler_log.header,
+ T_SCHEDULER_ASK_FOR_HELP,
+ index
+ );
+}
+
+void TQDoNothing( TQContext *ctx )
+{
+ (void) ctx;
+}
+
+Status_Control TQDoNothingSuccessfully( TQContext *ctx )
+{
+ (void) ctx;
+
+ return STATUS_SUCCESSFUL;
+}
+
+Status_Control TQConvertStatusClassic( Status_Control status )
+{
+ return STATUS_BUILD( STATUS_GET_CLASSIC( status ), 0 );
+}
+
+Status_Control TQConvertStatusPOSIX( Status_Control status )
+{
+ return STATUS_BUILD( 0, STATUS_GET_POSIX( status ) );
+}
+
+void TQEnqueuePrepareDefault( TQContext *ctx )
+{
+ Status_Control status;
+
+ status = TQEnqueue( ctx, TQ_NO_WAIT );
+ T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
+}
+
+void TQEnqueueDoneDefault( TQContext *ctx )
+{
+ Status_Control status;
+
+ status = TQSurrender( ctx );
+ T_eq_int( status, TQConvertStatus( ctx, STATUS_SUCCESSFUL ) );
+}
+
+Status_Control TQEnqueueClassicSem( TQContext *ctx, TQWait wait )
+{
+ rtems_status_code sc;
+ rtems_option option;
+ rtems_option timeout;
+
+ switch ( wait ) {
+ case TQ_WAIT_FOREVER:
+ option = RTEMS_WAIT;
+ timeout = RTEMS_NO_TIMEOUT;
+ break;
+ case TQ_WAIT_TIMED:
+ option = RTEMS_WAIT;
+ timeout = UINT32_MAX;
+ break;
+ default:
+ option = RTEMS_NO_WAIT;
+ timeout = 0;
+ break;
+ }
+
+ sc = rtems_semaphore_obtain( ctx->thread_queue_id, option, timeout );
+
+ return STATUS_BUILD( sc, 0 );
+}
+
+Status_Control TQSurrenderClassicSem( TQContext *ctx )
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release( ctx->thread_queue_id );
+
+ return STATUS_BUILD( sc, 0 );
+}
+
+rtems_tcb *TQGetOwnerClassicSem( TQContext *ctx )
+{
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+ rtems_tcb *thread;
+
+ semaphore = _Semaphore_Get( ctx->thread_queue_id, &queue_context );
+ T_assert_not_null( semaphore );
+ thread = semaphore->Core_control.Wait_queue.Queue.owner;
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+
+ return thread;
+}
+
+uint32_t TQSemGetCount( TQSemContext *ctx )
+{
+ return ( *ctx->get_count )( ctx );
+}
+
+void TQSemSetCount( TQSemContext *ctx, uint32_t count )
+{
+ ( *ctx->set_count )( ctx, count );
+}
+
+uint32_t TQSemGetCountClassic( TQSemContext *ctx )
+{
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+ uint32_t count;
+
+ semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
+ T_assert_not_null( semaphore );
+ count = semaphore->Core_control.Semaphore.count;
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+
+ return count;
+}
+
+void TQSemSetCountClassic( TQSemContext *ctx, uint32_t count )
+{
+ Semaphore_Control *semaphore;
+ Thread_queue_Context queue_context;
+
+ semaphore = _Semaphore_Get( ctx->base.thread_queue_id, &queue_context );
+ T_assert_not_null( semaphore );
+ semaphore->Core_control.Semaphore.count = count;
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+}
diff --git a/testsuites/validation/tx-thread-queue.h b/testsuites/validation/tx-thread-queue.h
new file mode 100644
index 0000000000..d9a1a4db8d
--- /dev/null
+++ b/testsuites/validation/tx-thread-queue.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This header file provides the functions to test the
+ * @ref RTEMSScoreThreadQueue.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TX_THREAD_QUEUE_H
+#define _TX_THREAD_QUEUE_H
+
+#include "tx-support.h"
+
+#include <rtems/test-scheduler.h>
+#include <rtems/score/atomic.h>
+#include <rtems/score/status.h>
+
+#include <setjmp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSTestSuites
+ *
+ * @{
+ */
+
+typedef enum {
+ TQ_NODE_ONLY,
+ TQ_NODE_VITAL,
+ TQ_NODE_DISPENSABLE
+} TQNodeKind;
+
+typedef enum {
+ TQ_WAIT_STATE_BLOCKED,
+ TQ_WAIT_STATE_INTEND_TO_BLOCK,
+ TQ_WAIT_STATE_READY_AGAIN
+} TQWaitState;
+
+typedef enum {
+ TQ_BLOCKER_A,
+ TQ_BLOCKER_B,
+ TQ_BLOCKER_C,
+ TQ_BLOCKER_D,
+ TQ_BLOCKER_E,
+ TQ_WORKER_F,
+ TQ_HELPER_A,
+ TQ_HELPER_B,
+ TQ_HELPER_C,
+ TQ_WORKER_COUNT
+} TQWorkerKind;
+
+typedef enum {
+ TQ_MUTEX_A,
+ TQ_MUTEX_B,
+ TQ_MUTEX_C,
+ TQ_MUTEX_D,
+ TQ_MUTEX_NO_PROTOCOL,
+ TQ_MUTEX_FIFO,
+ TQ_MUTEX_COUNT
+} TQMutex;
+
+typedef enum {
+ TQ_FIFO,
+ TQ_PRIORITY
+} TQDiscipline;
+
+typedef enum {
+ TQ_NO_WAIT,
+ TQ_WAIT_FOREVER,
+ TQ_WAIT_TIMED
+} TQWait;
+
+typedef enum {
+ TQ_DEADLOCK_STATUS,
+ TQ_DEADLOCK_FATAL
+} TQDeadlock;
+
+typedef enum {
+ TQ_EVENT_ENQUEUE_PREPARE = RTEMS_EVENT_0,
+ TQ_EVENT_ENQUEUE = RTEMS_EVENT_1,
+ TQ_EVENT_ENQUEUE_DONE = RTEMS_EVENT_2,
+ TQ_EVENT_SURRENDER = RTEMS_EVENT_3,
+ TQ_EVENT_RUNNER_SYNC = RTEMS_EVENT_4,
+ TQ_EVENT_RUNNER_SYNC_2 = RTEMS_EVENT_5,
+ TQ_EVENT_HELPER_A_SYNC = RTEMS_EVENT_6,
+ TQ_EVENT_HELPER_B_SYNC = RTEMS_EVENT_7,
+ TQ_EVENT_MUTEX_A_OBTAIN = RTEMS_EVENT_8,
+ TQ_EVENT_MUTEX_A_RELEASE = RTEMS_EVENT_9,
+ TQ_EVENT_MUTEX_B_OBTAIN = RTEMS_EVENT_10,
+ TQ_EVENT_MUTEX_B_RELEASE = RTEMS_EVENT_11,
+ TQ_EVENT_BUSY_WAIT = RTEMS_EVENT_12,
+ TQ_EVENT_FLUSH = RTEMS_EVENT_13,
+ TQ_EVENT_SCHEDULER_RECORD_START = RTEMS_EVENT_14,
+ TQ_EVENT_SCHEDULER_RECORD_STOP = RTEMS_EVENT_15,
+ TQ_EVENT_TIMEOUT = RTEMS_EVENT_16,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN = RTEMS_EVENT_17,
+ TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE = RTEMS_EVENT_18,
+ TQ_EVENT_ENQUEUE_FATAL = RTEMS_EVENT_19,
+ TQ_EVENT_MUTEX_C_OBTAIN = RTEMS_EVENT_20,
+ TQ_EVENT_MUTEX_C_RELEASE = RTEMS_EVENT_21,
+ TQ_EVENT_MUTEX_FIFO_OBTAIN = RTEMS_EVENT_22,
+ TQ_EVENT_MUTEX_FIFO_RELEASE = RTEMS_EVENT_23,
+ TQ_EVENT_ENQUEUE_TIMED = RTEMS_EVENT_24,
+ TQ_EVENT_MUTEX_D_OBTAIN = RTEMS_EVENT_25,
+ TQ_EVENT_MUTEX_D_RELEASE = RTEMS_EVENT_26,
+ TQ_EVENT_PIN = RTEMS_EVENT_27,
+ TQ_EVENT_UNPIN = RTEMS_EVENT_28,
+ TQ_EVENT_COUNT = RTEMS_EVENT_29
+} TQEvent;
+
+typedef enum {
+ TQ_ENQUEUE_BLOCKS,
+ TQ_ENQUEUE_STICKY
+} TQEnqueueVariant;
+
+typedef struct TQContext {
+ /**
+ * @brief This member defines the thread queue discipline.
+ */
+ TQDiscipline discipline;
+
+ /**
+ * @brief This member defines the enqueue wait behaviour.
+ *
+ * If TQ_NO_WAIT is used, then no thread queue enqueue shall be performed.
+ */
+ TQWait wait;
+
+ /**
+ * @brief This member defines the enqueue variant.
+ */
+ TQEnqueueVariant enqueue_variant;
+
+ /**
+ * @brief This member defines the deadlock enqueue behaviour.
+ */
+ TQDeadlock deadlock;
+
+ /**
+ * @brief This member contains the runner task identifier.
+ */
+ rtems_id runner_id;
+
+ /**
+ * @brief This member contains a reference to the runner task control block.
+ */
+ rtems_tcb *runner_tcb;
+
+ /**
+ * @brief This member contains the worker task identifiers.
+ */
+ rtems_id worker_id[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief This member contains references to the worker task control
+ * blocks.
+ */
+ rtems_tcb *worker_tcb[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief When a worker received an event, the corresponding element shall be
+ * set to true.
+ */
+ volatile bool event_received[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief If this member is true, then the worker shall busy wait on request.
+ */
+ volatile bool busy_wait[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief When a worker is done processing its current event set, the
+ * corresponding element shall be set to true.
+ */
+ volatile bool done[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief This member provides the counter used for the worker counters.
+ */
+ Atomic_Uint counter;
+
+ /**
+ * @brief When a worker returned from TQEnqueue() the counter is incremented
+ * and stored in this member.
+ */
+ uint32_t worker_counter[ TQ_WORKER_COUNT ];
+
+ /**
+ * @brief This member contains the last return status of a TQEnqueue() of the
+ * corresponding worker.
+ */
+ Status_Control status[ TQ_WORKER_COUNT ];
+
+ union {
+ /**
+ * @brief This member contains the identifier of an object providing the
+ * thread queue under test.
+ */
+ rtems_id thread_queue_id;
+
+ /**
+ * @brief This member contains the reference to object containing the
+ * thread queue under test.
+ */
+ void *thread_queue_object;
+ };
+
+ /**
+ * @brief This member contains the identifier of priority inheritance
+ * mutexes.
+ */
+ rtems_id mutex_id[ TQ_MUTEX_COUNT ];
+
+ /**
+ * @brief This member provides the scheduler log.
+ */
+ T_scheduler_log_40 scheduler_log;
+
+ /**
+ * @brief This member provides the get properties handler.
+ */
+ void ( *get_properties )( struct TQContext *, TQWorkerKind );
+
+ /**
+ * @brief This member provides the status convert handler.
+ */
+ Status_Control ( *convert_status )( Status_Control );
+
+ /**
+ * @brief This this member specifies how many threads shall be enqueued.
+ */
+ uint32_t how_many;
+
+ /**
+ * @brief This this member provides a context to jump back to before the
+ * enqueue.
+ */
+ jmp_buf before_enqueue;
+
+ /**
+ * @brief This member provides the thread queue enqueue prepare handler.
+ */
+ void ( *enqueue_prepare )( struct TQContext * );
+
+ /**
+ * @brief This member provides the thread queue enqueue handler.
+ */
+ Status_Control ( *enqueue )( struct TQContext *, TQWait );
+
+ /**
+ * @brief This member provides the thread queue enqueue done handler.
+ */
+ void ( *enqueue_done )( struct TQContext * );
+
+ /**
+ * @brief This member provides the thread queue surrender handler.
+ */
+ Status_Control ( *surrender )( struct TQContext * );
+
+ /**
+ * @brief This member provides the thread queue flush handler.
+ */
+ void ( *flush )( struct TQContext * );
+
+ /**
+ * @brief This member provides the get owner handler.
+ */
+ rtems_tcb *( *get_owner )( struct TQContext * );
+} TQContext;
+
+void TQSend(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQSendAndWaitForExecutionStop(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQSendAndWaitForIntendToBlock(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQSendAndWaitForExecutionStopOrIntendToBlock(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQSendAndSynchronizeRunner(
+ TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_event_set events
+);
+
+void TQWaitForEventsReceived( const TQContext *ctx, TQWorkerKind worker );
+
+void TQWaitForIntendToBlock( const TQContext *ctx, TQWorkerKind worker );
+
+void TQWaitForExecutionStop( const TQContext *ctx, TQWorkerKind worker );
+
+void TQClearDone( TQContext *ctx, TQWorkerKind worker );
+
+void TQWaitForDone( const TQContext *ctx, TQWorkerKind worker );
+
+void TQSynchronizeRunner( void );
+
+void TQSynchronizeRunner2( void );
+
+void TQResetCounter( TQContext *ctx );
+
+uint32_t TQGetCounter( const TQContext *ctx );
+
+uint32_t TQGetWorkerCounter( const TQContext *ctx, TQWorkerKind worker );
+
+void TQMutexObtain( const TQContext *ctx, TQMutex mutex );
+
+void TQMutexRelease( const TQContext *ctx, TQMutex mutex );
+
+void TQSetPriority(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ Priority priority
+);
+
+Priority TQGetPriority( const TQContext *ctx, TQWorkerKind worker );
+
+void TQSetScheduler(
+ const TQContext *ctx,
+ TQWorkerKind worker,
+ rtems_id scheduler_id,
+ Priority priority
+);
+
+void TQInitialize( TQContext *ctx );
+
+void TQDestroy( TQContext *ctx );
+
+void TQReset( TQContext *ctx );
+
+void TQSortMutexesByID( TQContext *ctx );
+
+void TQGetProperties( TQContext *ctx, TQWorkerKind enqueued_worker );
+
+Status_Control TQConvertStatus( TQContext *ctx, Status_Control status );
+
+void TQEnqueuePrepare( TQContext *ctx );
+
+Status_Control TQEnqueue( TQContext *ctx, TQWait wait );
+
+Status_Control TQEnqueueFatal( TQContext *ctx );
+
+void TQEnqueueDone( TQContext *ctx );
+
+Status_Control TQSurrender( TQContext *ctx );
+
+void TQFlush( TQContext *ctx );
+
+rtems_tcb *TQGetOwner( TQContext *ctx );
+
+void TQSchedulerRecordStart( TQContext *ctx );
+
+void TQSchedulerRecordStop( TQContext *ctx );
+
+const T_scheduler_event *TQGetNextAny( TQContext *ctx, size_t *index );
+
+const T_scheduler_event *TQGetNextBlock( TQContext *ctx, size_t *index );
+
+const T_scheduler_event *TQGetNextUnblock( TQContext *ctx, size_t *index );
+
+const T_scheduler_event *TQGetNextUpdatePriority(
+ TQContext *ctx,
+ size_t *index
+);
+
+const T_scheduler_event *TQGetNextAskForHelp( TQContext *ctx, size_t *index );
+
+void TQDoNothing( TQContext *ctx );
+
+Status_Control TQDoNothingSuccessfully( TQContext *ctx );
+
+Status_Control TQConvertStatusClassic( Status_Control status );
+
+Status_Control TQConvertStatusPOSIX( Status_Control status );
+
+void TQEnqueuePrepareDefault( TQContext *ctx );
+
+void TQEnqueueDoneDefault( TQContext *ctx );
+
+Status_Control TQEnqueueClassicSem( TQContext *ctx, TQWait wait );
+
+Status_Control TQSurrenderClassicSem( TQContext *ctx );
+
+rtems_tcb *TQGetOwnerClassicSem( TQContext *ctx );
+
+typedef enum {
+ TQ_SEM_BINARY,
+ TQ_SEM_COUNTING
+} TQSemVariant;
+
+typedef struct TQSemContext {
+ /**
+ * @brief This member contains the base thread queue test context.
+ */
+ TQContext base;
+
+ /**
+ * @brief This member defines the semaphore variant.
+ */
+ TQSemVariant variant;
+
+ /**
+ * @brief This member provides the semaphore get count handler.
+ */
+ uint32_t ( *get_count )( struct TQSemContext * );
+
+ /**
+ * @brief This member provides the semaphore set count handler.
+ */
+ void ( *set_count )( struct TQSemContext *, uint32_t );
+} TQSemContext;
+
+Status_Control TQSemSurrender( TQSemContext *ctx );
+
+uint32_t TQSemGetCount( TQSemContext *ctx );
+
+void TQSemSetCount( TQSemContext *ctx, uint32_t count );
+
+Status_Control TQSemSurrenderClassic( TQSemContext *ctx );
+
+uint32_t TQSemGetCountClassic( TQSemContext *ctx );
+
+void TQSemSetCountClassic( TQSemContext *ctx, uint32_t count );
+
+typedef enum {
+ TQ_MTX_NO_PROTOCOL,
+ TQ_MTX_PRIORITY_INHERIT,
+ TQ_MTX_PRIORITY_CEILING,
+ TQ_MTX_MRSP
+} TQMtxProtocol;
+
+typedef enum {
+ TQ_MTX_RECURSIVE_ALLOWED,
+ TQ_MTX_RECURSIVE_DEADLOCK,
+ TQ_MTX_RECURSIVE_UNAVAILABLE
+} TQMtxRecursive;
+
+typedef enum {
+ TQ_MTX_NO_OWNER_CHECK,
+ TQ_MTX_CHECKS_OWNER
+} TQMtxOwnerCheck;
+
+typedef struct TQMtxContext {
+ /**
+ * @brief This member contains the base thread queue test context.
+ */
+ TQContext base;
+
+ /**
+ * @brief This member defines the locking protocol.
+ */
+ TQMtxProtocol protocol;
+
+ /**
+ * @brief This member defines the recursive seize behaviour.
+ */
+ TQMtxRecursive recursive;
+
+ /**
+ * @brief This member defines the owner check behaviour.
+ */
+ TQMtxOwnerCheck owner_check;
+
+ /**
+ * @brief This member defines the priority ceiling of the mutex.
+ *
+ * Use PRIO_INVALID to indicate that the mutex does not provide a priority
+ * ceiling.
+ */
+ rtems_task_priority priority_ceiling;
+} TQMtxContext;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TX_THREAD_QUEUE_H */
diff --git a/testsuites/validation/tx-timecounter.c b/testsuites/validation/tx-timecounter.c
new file mode 100644
index 0000000000..c6738914a4
--- /dev/null
+++ b/testsuites/validation/tx-timecounter.c
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the definition of SetGetTimecountHandler(),
+ * GetTimecountCounter(), and SetTimecountCounter().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-support.h"
+
+#include <rtems/sysinit.h>
+#include <rtems/timecounter.h>
+#include <rtems/score/atomic.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/threaddispatch.h>
+
+typedef struct {
+ struct timecounter base;
+ GetTimecountHandler handler;
+ Atomic_Ulong counter;
+} TimecounterControl;
+
+static TimecounterControl TimecounterInstance;
+
+GetTimecountHandler SetGetTimecountHandler( GetTimecountHandler handler )
+{
+ GetTimecountHandler previous;
+
+ previous = TimecounterInstance.handler;
+ TimecounterInstance.handler = handler;
+ return previous;
+}
+
+uint32_t GetTimecountCounter( void )
+{
+ return (uint32_t) _Atomic_Load_ulong(
+ &TimecounterInstance.counter,
+ ATOMIC_ORDER_RELAXED
+ );
+}
+
+uint32_t SetTimecountCounter( uint32_t counter )
+{
+ return (uint32_t) _Atomic_Exchange_ulong(
+ &TimecounterInstance.counter,
+ counter,
+ ATOMIC_ORDER_RELAXED
+ );
+}
+
+static uint32_t GetTimecountSoftware( void )
+{
+ return (uint32_t) _Atomic_Fetch_add_ulong(
+ &TimecounterInstance.counter,
+ 1,
+ ATOMIC_ORDER_RELAXED
+ );
+}
+
+static uint32_t GetTimecountWrapper( struct timecounter *tc )
+{
+ TimecounterControl *self;
+
+ self = (TimecounterControl *) tc;
+ return ( *self->handler )();
+}
+
+static void InstallTimecounter( void )
+{
+ TimecounterControl *self;
+
+ self = &TimecounterInstance;
+ self->handler = GetTimecountSoftware;
+ self->base.tc_get_timecount = GetTimecountWrapper;
+ self->base.tc_counter_mask = 0xffffffff;
+ self->base.tc_frequency = SOFTWARE_TIMECOUNTER_FREQUENCY;
+ self->base.tc_quality = RTEMS_TIMECOUNTER_QUALITY_CLOCK_DRIVER + 1;
+ rtems_timecounter_install( &self->base );
+}
+
+RTEMS_SYSINIT_ITEM(
+ InstallTimecounter,
+ RTEMS_SYSINIT_DEVICE_DRIVERS,
+ RTEMS_SYSINIT_ORDER_LAST
+);
+
+static void DoTimecounterTick( void *arg )
+{
+ (void) arg;
+ _Timecounter_Tick();
+}
+
+void TimecounterTick( void )
+{
+ unsigned long counter_ticks_per_clock_tick;
+ Per_CPU_Control *cpu_self;
+ bool success;
+
+ counter_ticks_per_clock_tick =
+ SOFTWARE_TIMECOUNTER_FREQUENCY / rtems_clock_get_ticks_per_second();
+ cpu_self = _Thread_Dispatch_disable();
+
+ do {
+ unsigned long old_counter;
+ unsigned long new_counter;
+
+ old_counter = _Atomic_Load_ulong(
+ &TimecounterInstance.counter,
+ ATOMIC_ORDER_RELAXED
+ );
+ new_counter = old_counter + counter_ticks_per_clock_tick -
+ ( old_counter % counter_ticks_per_clock_tick );
+ success = _Atomic_Compare_exchange_ulong(
+ &TimecounterInstance.counter,
+ &old_counter,
+ new_counter,
+ ATOMIC_ORDER_RELAXED,
+ ATOMIC_ORDER_RELAXED
+ );
+ } while ( !success );
+
+ DoTimecounterTick( NULL );
+#if defined( RTEMS_SMP )
+ _SMP_Othercast_action( DoTimecounterTick, NULL );
+#endif
+ _Thread_Dispatch_enable( cpu_self );
+}
diff --git a/testsuites/validation/tx-timer-server.c b/testsuites/validation/tx-timer-server.c
new file mode 100644
index 0000000000..80ba853fb1
--- /dev/null
+++ b/testsuites/validation/tx-timer-server.c
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the definition of DeleteTimerServer().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+#include <rtems/rtems/timerimpl.h>
+#include <rtems/score/todimpl.h>
+
+rtems_id GetTimerServerTaskId( void )
+{
+ if ( _Timer_server == NULL ) {
+ return RTEMS_INVALID_ID;
+ }
+ return _Timer_server->server_id;
+}
+
+bool DeleteTimerServer( void )
+{
+ Timer_server_Control *server;
+
+ server = _Timer_server;
+
+ if ( server == NULL ) {
+ return false;
+ }
+
+ DeleteTask( server->server_id );
+ _ISR_lock_Destroy( &server->Lock );
+ T_true( _Chain_Is_empty( &server->Pending ) );
+ _Timer_server = NULL;
+
+ return true;
+}
+
+Timer_Classes GetTimerClass( rtems_id id )
+{
+ /* This code is derived from rtems_timer_get_information() */
+ Timer_Classes result = TIMER_DORMANT;
+ Timer_Control *the_timer;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu;
+
+ the_timer = _Timer_Get( id, &lock_context );
+ if ( the_timer != NULL ) {
+ cpu = _Timer_Acquire_critical( the_timer, &lock_context );
+ result = the_timer->the_class;
+ _Timer_Release( cpu, &lock_context );
+ }
+
+ return result;
+}
+
+void GetTimerSchedulingData(
+ rtems_id id,
+ Timer_Scheduling_Data *data
+)
+{
+ /* This code is derived from rtems_timer_get_information() */
+ Timer_Control *the_timer;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu;
+
+ if ( data == NULL ) {
+ return;
+ }
+
+ the_timer = _Timer_Get( id, &lock_context );
+ if ( the_timer != NULL ) {
+ cpu = _Timer_Acquire_critical( the_timer, &lock_context );
+ data->routine = the_timer->routine;
+ data->user_data = the_timer->user_data;
+ data->interval = the_timer->initial;
+ _Timer_Release( cpu, &lock_context );
+ }
+}
+
+Timer_States GetTimerState( rtems_id id )
+{
+ /* This code is derived from rtems_timer_cancel() and _timer_cancel() */
+ Timer_States result = TIMER_INVALID;
+ Timer_Control *the_timer;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu;
+ Timer_Classes the_class;
+ Timer_server_Control *timer_server = _Timer_server;
+ ISR_lock_Context lock_context_server;
+
+ the_timer = _Timer_Get( id, &lock_context );
+ if ( the_timer != NULL ) {
+ result = TIMER_INACTIVE;
+ cpu = _Timer_Acquire_critical( the_timer, &lock_context );
+ the_class = the_timer->the_class;
+
+ if ( _Watchdog_Is_scheduled( &the_timer->Ticker ) ) {
+ result = TIMER_SCHEDULED;
+ } else if ( _Timer_Is_on_task_class( the_class ) ) {
+ _Assert( timer_server != NULL );
+ _Timer_server_Acquire_critical( timer_server, &lock_context_server );
+ if ( _Watchdog_Get_state( &the_timer->Ticker ) == WATCHDOG_PENDING ) {
+ result = TIMER_PENDING;
+ }
+ _Timer_server_Release_critical( timer_server, &lock_context_server );
+ }
+ _Timer_Release( cpu, &lock_context );
+ }
+
+ return result;
+}
+
+void UnsetClock( void )
+{
+ _TOD.is_set = false;
+}
diff --git a/testsuites/validation/tx-wrap-thread-queue.c b/testsuites/validation/tx-wrap-thread-queue.c
new file mode 100644
index 0000000000..93b7b55f37
--- /dev/null
+++ b/testsuites/validation/tx-wrap-thread-queue.c
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestSuites
+ *
+ * @brief This source file contains the implementation of the thread queue
+ * wrapper.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tx-support.h"
+
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
+
+#include <string.h>
+
+void WrapThreadQueueInitialize(
+ WrapThreadQueueContext *ctx,
+ void ( *handler )( void * ),
+ void *arg
+)
+{
+ memset( ctx, 0, sizeof( *ctx ) );
+ ctx->isr_request.handler = handler;
+ ctx->isr_request.arg = arg;
+ _Thread_queue_Initialize( &ctx->thread_queue, "Wrap" );
+}
+
+static void Prepare(
+ WrapThreadQueueContext *ctx,
+ Thread_Control *thread
+)
+{
+ if ( thread->Wait.queue != NULL ) {
+ ctx->wrapped_ops = thread->Wait.operations;
+ thread->Wait.operations = &ctx->tq_ops;
+ } else {
+ Thread_queue_Context queue_context;
+
+ ctx->wrapped_ops = NULL;
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Acquire( &ctx->thread_queue, &queue_context );
+ _Thread_Wait_flags_set(
+ thread,
+ THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK
+ );
+ _Thread_Wait_claim( thread, &ctx->thread_queue.Queue );
+ _Thread_Wait_claim_finalize( thread, &ctx->tq_ops );
+ _Thread_queue_Release( &ctx->thread_queue, &queue_context );
+ }
+}
+
+static void WrappedExtract(
+ WrapThreadQueueContext *ctx,
+ Thread_queue_Queue *queue,
+ Thread_Control *thread,
+ Thread_queue_Context *queue_context
+)
+{
+ if ( ctx->wrapped_ops ) {
+ thread->Wait.operations = ctx->wrapped_ops;
+ ( *thread->Wait.operations->extract )( queue, thread, queue_context );
+ }
+}
+
+static void Extract(
+ Thread_queue_Queue *queue,
+ Thread_Control *thread,
+ Thread_queue_Context *queue_context
+)
+{
+ WrapThreadQueueContext *ctx;
+
+ ctx = (WrapThreadQueueContext *) thread->Wait.operations;
+ CallWithinISRSubmit( &ctx->isr_request );
+ WrappedExtract( ctx, queue, thread, queue_context );
+}
+
+void WrapThreadQueueExtract(
+ WrapThreadQueueContext *ctx,
+ Thread_Control *thread
+)
+{
+ ctx->tq_ops.extract = Extract;
+ Prepare( ctx, thread );
+}
+
+static void ExtractDirect(
+ Thread_queue_Queue *queue,
+ Thread_Control *thread,
+ Thread_queue_Context *queue_context
+)
+{
+ WrapThreadQueueContext *ctx;
+
+ ctx = (WrapThreadQueueContext *) thread->Wait.operations;
+ ( *ctx->isr_request.handler )( ctx->isr_request.arg );
+ WrappedExtract( ctx, queue, thread, queue_context );
+}
+
+void WrapThreadQueueExtractDirect(
+ WrapThreadQueueContext *ctx,
+ Thread_Control *thread
+)
+{
+ ctx->tq_ops.extract = ExtractDirect;
+ Prepare( ctx, thread );
+}
+
+void WrapThreadQueueDestroy( WrapThreadQueueContext *ctx )
+{
+ _Thread_queue_Destroy( &ctx->thread_queue );
+}