summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-09 13:08:47 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-09 15:47:13 +0100
commit771b6959f14054b562dafdf708c83b9e9983371b (patch)
tree7267eb9d5d3539b1ef0149eae27a28aa8cee02c1
parent3921a273977a015e7feecc65ad59480dc7c068fa (diff)
testsuites/validation/tc-sched-smp.cqual-46
-rw-r--r--testsuites/validation/tc-sched-smp.c723
1 files changed, 688 insertions, 35 deletions
diff --git a/testsuites/validation/tc-sched-smp.c b/testsuites/validation/tc-sched-smp.c
index 32995422a6..e96814c9d7 100644
--- a/testsuites/validation/tc-sched-smp.c
+++ b/testsuites/validation/tc-sched-smp.c
@@ -54,7 +54,9 @@
#include <rtems.h>
#include <rtems/test-scheduler.h>
+#include <rtems/score/percpu.h>
#include <rtems/score/smpbarrier.h>
+#include <rtems/score/thread.h>
#include "tx-support.h"
@@ -69,9 +71,8 @@
*
* This test case performs the following actions:
*
- * - Create two worker threads and a mutex. Use the mutex and the worker to
- * construct the removal of the last processor of a scheduler while a thread
- * is scheduled.
+ * - Use the mutex and the worker to construct the removal of the last
+ * processor of a scheduler while a thread is scheduled.
*
* - Let worker B help worker A.
*
@@ -81,9 +82,116 @@
*
* - Clean up all used resources.
*
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a block operation.
+ *
+ * - Block the runner thread while the owner thread of the highest priority
+ * ready node is already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is blocked during a block operation.
+ *
+ * - Block the runner thread while the owner thread of the highest priority
+ * ready node is blocked.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a set affinity operation.
+ *
+ * - Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a set affinity operation
+ * while a sticky node is involved.
+ *
+ * - Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is blocked during a set affinity operation.
+ *
+ * - Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is blocked.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is blocked during a set affinity operation while a
+ * sticky node is involved.
+ *
+ * - Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is blocked.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a set priority operation.
+ *
+ * - Set the priority of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is blocked during a set priority operation.
+ *
+ * - Set the priority of the runner thread while the owner thread of the
+ * highest priority ready node is blocked.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a yield operation.
+ *
+ * - Yield while the owner thread of the highest priority ready node is
+ * already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a yield operation while a
+ * sticky node is involved.
+ *
+ * - Yield while the owner thread of the highest priority ready node is
+ * already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is blocked during a yield operation.
+ *
+ * - Yield while the owner thread of the highest priority ready node is
+ * blocked.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is blocked during a yield operation while a sticky node
+ * is involved.
+ *
+ * - Yield while the owner thread of the highest priority ready node is
+ * blocked.
+ *
+ * - Clean up all used resources.
+ *
* @{
*/
+typedef enum {
+ WORKER_A,
+ WORKER_B,
+ WORKER_C,
+ WORKER_COUNT
+} WorkerIndex;
+
/**
* @brief Test context for spec:/score/sched/smp/val/smp test case.
*/
@@ -94,19 +202,24 @@ typedef struct {
rtems_id runner_id;
/**
- * @brief This member contains the worker A identifier.
+ * @brief This member contains the worker identifiers.
+ */
+ rtems_id worker_id[ WORKER_COUNT ];
+
+ /**
+ * @brief This member contains the mutex identifier.
*/
- rtems_id worker_a_id;
+ rtems_id mutex_id;
/**
- * @brief This member contains the worker B identifier.
+ * @brief This member contains the sticky mutex identifier.
*/
- rtems_id worker_b_id;
+ rtems_id sticky_id;
/**
- * @brief This member contains the mutex identifier.
+ * @brief This member contains the worker busy status.
*/
- rtems_id mutex_id;
+ volatile bool busy[ WORKER_COUNT ];;
/**
* @brief This member provides the context to wrap thread queue operations.
@@ -123,6 +236,16 @@ typedef struct {
* workers.
*/
SMP_barrier_Control barrier;
+
+ /**
+ * @brief This member contains the per-CPU job.
+ */
+ Per_CPU_Job job;
+
+ /**
+ * @brief This member contains the per-CPU job context.
+ */
+ Per_CPU_Job_context job_context;
} ScoreSchedSmpValSmp_Context;
static ScoreSchedSmpValSmp_Context
@@ -134,7 +257,8 @@ typedef enum {
EVENT_OBTAIN = RTEMS_EVENT_0,
EVENT_RELEASE = RTEMS_EVENT_1,
EVENT_SYNC_RUNNER = RTEMS_EVENT_2,
- EVENT_RESTART = RTEMS_EVENT_3
+ EVENT_RESTART = RTEMS_EVENT_3,
+ EVENT_BUSY = RTEMS_EVENT_4
} Event;
static void Barriers( void *arg )
@@ -162,7 +286,213 @@ static void RequestISR( void *arg )
CallWithinISRSubmit( &ctx->request );
}
-static void WorkerTask( rtems_task_argument arg )
+static void SendAndSync( Context *ctx, WorkerIndex worker, Event event )
+{
+ SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | event );
+ ReceiveAllEvents( EVENT_SYNC_RUNNER );
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+}
+
+static void MakeBusy( Context *ctx, WorkerIndex worker )
+{
+ ctx->busy[ worker ] = true;
+ SendEvents( ctx->worker_id[ worker ], EVENT_BUSY );
+}
+
+static void StopBusy( Context *ctx, WorkerIndex worker )
+{
+ ctx->busy[ worker ] = false;
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+}
+
+static void MakeSticky( const Context *ctx )
+{
+ ObtainMutex( ctx->sticky_id );
+}
+
+static void CleanSticky( const Context *ctx )
+{
+ ReleaseMutex( ctx->sticky_id );
+}
+
+static void Block( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ SuspendTask( ctx->runner_id );
+ ResumeTask( ctx->runner_id );
+}
+
+static void OperationStopBusyC(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when,
+ T_scheduler_operation op
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if ( when == T_SCHEDULER_BEFORE && event->operation == op ) {
+ T_scheduler_set_event_handler( NULL, NULL );
+ StopBusy( ctx, WORKER_C );
+ }
+}
+
+static void BlockStopBusyC(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationStopBusyC( arg, event, when, T_SCHEDULER_BLOCK );
+}
+
+static void SetAffinityStopBusyC(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationStopBusyC( arg, event, when, T_SCHEDULER_SET_AFFINITY );
+}
+
+static void UpdatePriorityStopBusyC(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationStopBusyC( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY );
+}
+
+static void YieldStopBusyC(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationStopBusyC( arg, event, when, T_SCHEDULER_YIELD );
+}
+
+static void SuspendA( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ SuspendTask( ctx->worker_id[ WORKER_A ] );
+}
+
+static void OperationSuspendA(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when,
+ T_scheduler_operation op
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if ( when == T_SCHEDULER_BEFORE && event->operation == op ) {
+ const rtems_tcb *worker_a;
+
+ T_scheduler_set_event_handler( NULL, NULL );
+ ctx->job_context.handler = SuspendA;
+ _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job );
+
+ worker_a = GetThread( ctx->worker_id[ WORKER_A ] );
+
+ while ( worker_a->Scheduler.state != THREAD_SCHEDULER_BLOCKED ) {
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ }
+ }
+}
+
+static void BlockSuspendA(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationSuspendA( arg, event, when, T_SCHEDULER_BLOCK );
+}
+
+static void SetAffinitySuspendA(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationSuspendA( arg, event, when, T_SCHEDULER_SET_AFFINITY );
+}
+
+static void UpdatePrioritySuspendA(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationSuspendA( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY );
+}
+
+static void YieldSuspendA(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ OperationSuspendA( arg, event, when, T_SCHEDULER_YIELD );
+}
+
+static void PrepareOwnerScheduled( Context *ctx )
+{
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+ SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+ MakeBusy( ctx, WORKER_C );
+ MakeBusy( ctx, WORKER_A );
+}
+
+static void CleanupOwnerScheduled( Context *ctx )
+{
+ StopBusy( ctx, WORKER_A );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+ SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+static void PrepareOwnerBlocked( Context *ctx )
+{
+ SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_B_ID, PRIO_NORMAL );
+ SendAndSync( ctx, WORKER_A, EVENT_OBTAIN );
+ SendEvents( ctx->worker_id[ WORKER_B ], EVENT_OBTAIN );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+ MakeBusy( ctx, WORKER_C );
+ SetPriority( ctx->worker_id[ WORKER_B ], PRIO_LOW );
+ MakeBusy( ctx, WORKER_A );
+ SetPriority( ctx->worker_id[ WORKER_B ], PRIO_NORMAL );
+}
+
+static void CleanupOwnerBlocked( Context *ctx )
+{
+ StopBusy( ctx, WORKER_C );
+ ResumeTask( ctx->worker_id[ WORKER_A ] );
+ StopBusy( ctx, WORKER_A );
+ SendAndSync( ctx, WORKER_A, EVENT_RELEASE );
+ SetPriority( ctx->worker_id[ WORKER_B ], PRIO_HIGH );
+ SendEvents( ctx->worker_id[ WORKER_B ], EVENT_RELEASE );
+ SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_A_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+static void Worker( rtems_task_argument arg, WorkerIndex worker )
{
Context *ctx;
@@ -194,20 +524,71 @@ static void WorkerTask( rtems_task_argument arg )
WrapThreadQueueExtract(
&ctx->wrap_tq_ctx,
- GetThread( ctx->worker_b_id )
+ GetThread( ctx->worker_id[ WORKER_B ] )
);
- sc = rtems_task_restart( ctx->worker_b_id, (rtems_task_argument) ctx );
+ sc = rtems_task_restart(
+ ctx->worker_id[ WORKER_B ],
+ (rtems_task_argument) ctx
+ );
T_rsc_success( sc );
T_eq_u32( rtems_scheduler_get_processor(), 0 );
}
+
+ if ( ( events & EVENT_BUSY ) != 0 ) {
+ while ( ctx->busy[ worker ] ) {
+ /* Wait */
+ }
+ }
}
}
+static void WorkerA( rtems_task_argument arg )
+{
+ Worker( arg, WORKER_A );
+}
+
+static void WorkerB( rtems_task_argument arg )
+{
+ Worker( arg, WORKER_B );
+}
+
+static void WorkerC( rtems_task_argument arg )
+{
+ Worker( arg, WORKER_C );
+}
+
static void ScoreSchedSmpValSmp_Setup( ScoreSchedSmpValSmp_Context *ctx )
{
+ rtems_status_code sc;
+
+ ctx->runner_id = rtems_task_self();
+ ctx->job_context.arg = ctx;
+ ctx->job.context = &ctx->job_context;
+ ctx->mutex_id = CreateMutex();
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'S', 'T', 'K', 'Y' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &ctx->sticky_id
+ );
+ T_rsc_success( sc );
+
SetSelfPriority( PRIO_NORMAL );
+
+ ctx->worker_id[ WORKER_A ] = CreateTask( "WRKA", PRIO_HIGH );
+ StartTask( ctx->worker_id[ WORKER_A ], WorkerA, ctx );
+
+ ctx->worker_id[ WORKER_B ] = CreateTask( "WRKB", PRIO_HIGH );
+ StartTask( ctx->worker_id[ WORKER_B ], WorkerB, ctx );
+
+ ctx->worker_id[ WORKER_C ] = CreateTask( "WRKC", PRIO_HIGH );
+ StartTask( ctx->worker_id[ WORKER_C ], WorkerC, ctx );
+
WrapThreadQueueInitialize( &ctx->wrap_tq_ctx, RequestISR, ctx );
}
@@ -221,6 +602,11 @@ static void ScoreSchedSmpValSmp_Setup_Wrap( void *arg )
static void ScoreSchedSmpValSmp_Teardown( ScoreSchedSmpValSmp_Context *ctx )
{
+ DeleteTask( ctx->worker_id[ WORKER_A ] );
+ DeleteTask( ctx->worker_id[ WORKER_B ] );
+ DeleteTask( ctx->worker_id[ WORKER_C ] );
+ DeleteMutex( ctx->mutex_id );
+ DeleteMutex( ctx->sticky_id );
RestoreRunnerPriority();
WrapThreadQueueDestroy( &ctx->wrap_tq_ctx );
}
@@ -242,9 +628,8 @@ static T_fixture ScoreSchedSmpValSmp_Fixture = {
};
/**
- * @brief Create two worker threads and a mutex. Use the mutex and the worker
- * to construct the removal of the last processor of a scheduler while a
- * thread is scheduled.
+ * @brief Use the mutex and the worker to construct the removal of the last
+ * processor of a scheduler while a thread is scheduled.
*/
static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
{
@@ -253,35 +638,25 @@ static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
_SMP_barrier_Control_initialize( &ctx->barrier );
_SMP_barrier_State_initialize( &barrier_state );
- ctx->runner_id = rtems_task_self();
- ctx->mutex_id = CreateMutex();
-
- ctx->worker_a_id = CreateTask( "WRKA", PRIO_HIGH );
- StartTask( ctx->worker_a_id, WorkerTask, ctx );
-
- ctx->worker_b_id = CreateTask( "WRKB", PRIO_NORMAL );
- SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_NORMAL );
- StartTask( ctx->worker_b_id, WorkerTask, ctx );
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
/*
* Let worker B help worker A.
*/
- SendEvents( ctx->worker_a_id, EVENT_OBTAIN );
- SendEvents( ctx->worker_b_id, EVENT_SYNC_RUNNER | EVENT_OBTAIN );
- ReceiveAllEvents( EVENT_SYNC_RUNNER );
- WaitForExecutionStop( ctx->worker_b_id );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+ SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
/*
* Restart the worker B to withdraw the help offer and wait on barriers. Move
* worker B to scheduler A. Remove the processor while worker A is
* scheduled.
*/
- SendEvents( ctx->worker_a_id, EVENT_RESTART );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RESTART );
/* A */
_SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
- SetScheduler( ctx->worker_b_id, SCHEDULER_A_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
RemoveProcessor( SCHEDULER_B_ID, 1 );
/* B */
@@ -292,10 +667,276 @@ static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
*/
SetPriority( ctx->runner_id, PRIO_NORMAL );
AddProcessor( SCHEDULER_B_ID, 1 );
- SendEvents( ctx->worker_a_id, EVENT_RELEASE );
- DeleteTask( ctx->worker_a_id );
- DeleteTask( ctx->worker_b_id );
- DeleteMutex( ctx->mutex_id );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a block operation.
+ */
+static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Block the runner thread while the owner thread of the highest priority
+ * ready node is already scheduled.
+ */
+ T_scheduler_set_event_handler( BlockStopBusyC, ctx );
+ CallWithinISR( Block, ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is blocked during a block operation.
+ */
+static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerBlocked( ctx );
+
+ /*
+ * Block the runner thread while the owner thread of the highest priority
+ * ready node is blocked.
+ */
+ T_scheduler_set_event_handler( BlockSuspendA, ctx );
+ CallWithinISR( Block, ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a set affinity
+ * operation.
+ */
+static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ */
+ T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx );
+ SetSelfAffinityAll();
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a set affinity
+ * operation while a sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ */
+ MakeSticky( ctx );
+ T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx );
+ SetSelfAffinityAll();
+ CleanSticky( ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is blocked during a set affinity operation.
+ */
+static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerBlocked( ctx );
+
+ /*
+ * Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is blocked.
+ */
+ T_scheduler_set_event_handler( SetAffinitySuspendA, ctx );
+ SetSelfAffinityAll();
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is blocked during a set affinity operation while a
+ * sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerBlocked( ctx );
+
+ /*
+ * Set the affinity of the runner thread while the owner thread of the
+ * highest priority ready node is blocked.
+ */
+ MakeSticky( ctx );
+ T_scheduler_set_event_handler( SetAffinitySuspendA, ctx );
+ SetSelfAffinityAll();
+ CleanSticky( ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a set priority
+ * operation.
+ */
+static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Set the priority of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ */
+ SetSelfPriority( PRIO_HIGH );
+ T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx );
+ SetSelfPriority( PRIO_NORMAL );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is blocked during a set priority operation.
+ */
+static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerBlocked( ctx );
+
+ /*
+ * Set the priority of the runner thread while the owner thread of the
+ * highest priority ready node is blocked.
+ */
+ SetSelfPriority( PRIO_HIGH );
+ T_scheduler_set_event_handler( UpdatePrioritySuspendA, ctx );
+ SetSelfPriority( PRIO_NORMAL );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a yield operation.
+ */
+static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Yield while the owner thread of the highest priority ready node is already
+ * scheduled.
+ */
+ T_scheduler_set_event_handler( YieldStopBusyC, ctx );
+ Yield();
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a yield operation
+ * while a sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Yield while the owner thread of the highest priority ready node is already
+ * scheduled.
+ */
+ MakeSticky( ctx );
+ T_scheduler_set_event_handler( YieldStopBusyC, ctx );
+ Yield();
+ CleanSticky( ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is blocked during a yield operation.
+ */
+static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerBlocked( ctx );
+
+ /*
+ * Yield while the owner thread of the highest priority ready node is
+ * blocked.
+ */
+ T_scheduler_set_event_handler( YieldSuspendA, ctx );
+ Yield();
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is blocked during a yield operation while a sticky
+ * node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_12( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerBlocked( ctx );
+
+ /*
+ * Yield while the owner thread of the highest priority ready node is
+ * blocked.
+ */
+ MakeSticky( ctx );
+ T_scheduler_set_event_handler( YieldSuspendA, ctx );
+ Yield();
+ CleanSticky( ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerBlocked( ctx );
}
/**
@@ -308,6 +949,18 @@ T_TEST_CASE_FIXTURE( ScoreSchedSmpValSmp, &ScoreSchedSmpValSmp_Fixture )
ctx = T_fixture_context();
ScoreSchedSmpValSmp_Action_0( ctx );
+ ScoreSchedSmpValSmp_Action_1( ctx );
+ ScoreSchedSmpValSmp_Action_2( ctx );
+ ScoreSchedSmpValSmp_Action_3( ctx );
+ ScoreSchedSmpValSmp_Action_4( ctx );
+ ScoreSchedSmpValSmp_Action_5( ctx );
+ ScoreSchedSmpValSmp_Action_6( ctx );
+ ScoreSchedSmpValSmp_Action_7( ctx );
+ ScoreSchedSmpValSmp_Action_8( ctx );
+ ScoreSchedSmpValSmp_Action_9( ctx );
+ ScoreSchedSmpValSmp_Action_10( ctx );
+ ScoreSchedSmpValSmp_Action_11( ctx );
+ ScoreSchedSmpValSmp_Action_12( ctx );
}
/** @} */