summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-12 10:16:58 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-12 10:16:58 +0100
commitcbccf8145b7df42bb32b09cca0fa68140dc6489c (patch)
tree7c3c665cdb94e20bf02b9cb8b5eb261697948c7c
parent46fd192d0b688b0f6f55db08e18a3c6ffff42a32 (diff)
testsuites/validation/tc-sched-smp.c
-rw-r--r--testsuites/validation/tc-sched-smp.c275
1 files changed, 231 insertions, 44 deletions
diff --git a/testsuites/validation/tc-sched-smp.c b/testsuites/validation/tc-sched-smp.c
index 8744d9a562..c617c212dc 100644
--- a/testsuites/validation/tc-sched-smp.c
+++ b/testsuites/validation/tc-sched-smp.c
@@ -70,11 +70,21 @@
*
* This test case performs the following actions:
*
+ * - Construct a system state in which a thread is rescheduled while it is not
+ * scheduled on another scheduler.
+ *
+ * - Reschedule worker A by the home scheduler while worker A is not
+ * scheduled on another scheduler.
+ *
+ * - Clean up all used resources.
+ *
* - Construct a system state in which an ask for help request is cancelled
* while it is processed on another processor.
*
- * - Block the runner thread while the owner thread of the highest priority
- * ready node is already scheduled.
+ * - Unblock worker A. It cannot be scheduled on its home scheduler.
+ * Intercept the ask for help request. Block the worker A. This will
+ * cancel the ask for help request. Remove the request while the other
+ * processor tries to cancel the request.
*
* - Clean up all used resources.
*
@@ -137,6 +147,15 @@
* - Clean up all used resources.
*
* - Construct a system state in which a scheduler tries to schedule a node
+ * those owner thread is already scheduled during a set priority operation
+ * while a sticky node is involved.
+ *
+ * - Set the priority of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ *
+ * - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
* those owner thread is blocked during a set priority operation.
*
* - Set the priority of the runner thread while the owner thread of the
@@ -218,14 +237,14 @@ typedef struct {
volatile bool busy[ WORKER_COUNT ];;
/**
- * @brief This member contains the per-CPU job.
+ * @brief This member contains the per-CPU jobs.
*/
- Per_CPU_Job job;
+ Per_CPU_Job job[ 2 ];
/**
- * @brief This member contains the per-CPU job context.
+ * @brief This member contains the per-CPU job contexts.
*/
- Per_CPU_Job_context job_context;
+ Per_CPU_Job_context job_context[ 2 ];
/**
* @brief This member contains the call within ISR request.
@@ -241,8 +260,10 @@ typedef ScoreSchedSmpValSmp_Context Context;
typedef enum {
EVENT_OBTAIN = RTEMS_EVENT_0,
EVENT_RELEASE = RTEMS_EVENT_1,
- EVENT_SYNC_RUNNER = RTEMS_EVENT_2,
- EVENT_BUSY = RTEMS_EVENT_3
+ EVENT_STICKY_OBTAIN = RTEMS_EVENT_2,
+ EVENT_STICKY_RELEASE = RTEMS_EVENT_3,
+ EVENT_SYNC_RUNNER = RTEMS_EVENT_4,
+ EVENT_BUSY = RTEMS_EVENT_5
} Event;
static void SendAndSync( Context *ctx, WorkerIndex worker, Event event )
@@ -359,8 +380,8 @@ static void OperationSuspendA(
const rtems_tcb *worker_a;
T_scheduler_set_event_handler( NULL, NULL );
- ctx->job_context.handler = SuspendA;
- _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job );
+ ctx->job_context[ 0 ].handler = SuspendA;
+ _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
worker_a = GetThread( ctx->worker_id[ WORKER_A ] );
@@ -406,33 +427,62 @@ static void YieldSuspendA(
OperationSuspendA( arg, event, when, T_SCHEDULER_YIELD );
}
-static void InterceptAskForHelp( void *arg )
+static void GuideAskForHelp( void *arg )
{
Context *ctx;
- Per_CPU_Control *cpu_self;
+ Per_CPU_Control *cpu;
ISR_lock_Context lock_context;
- Chain_Node *node;
- Thread_Control *thread;
ctx = arg;
- cpu_self = _Per_CPU_Get();
+ cpu = _Per_CPU_Get_by_index( 0 );
_ISR_lock_ISR_disable( &lock_context );
- _Per_CPU_Acquire( cpu_self, &lock_context );
- ctx->job_context.handler = SuspendA;
- _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job );
- ISRLockWaitForOthers( &cpu_self->Lock, 1 );
-
- /* See _Thread_Preemption_intervention() */
- node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
- thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
- T_assert_eq_ptr( thread, GetThread( ctx->worker_id[ WORKER_A ] ) );
- thread->Scheduler.ask_for_help_cpu = NULL;
-
- _Per_CPU_Release( cpu_self, &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
+
+ ISRLockWaitForOthers( &cpu->Lock, 1 );
+
+ ctx->job_context[ 0 ].handler = SuspendA;
+ _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+ ISRLockWaitForOthers( &cpu->Lock, 2 );
+
+ _Per_CPU_Release( cpu, &lock_context );
_ISR_lock_ISR_enable( &lock_context );
}
+static void InterceptAskForHelp( void *arg )
+{
+ Context *ctx;
+ Per_CPU_Control *cpu_self;
+
+ ctx = arg;
+ cpu_self = _Per_CPU_Get();
+
+ if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+ ctx->job_context[ 1 ].handler = GuideAskForHelp;
+ _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 2 ), &ctx->job[ 1 ] );
+ ISRLockWaitForOwned( &cpu_self->Lock );
+ } else {
+ ISR_lock_Context lock_context;
+ Chain_Node *node;
+ Thread_Control *thread;
+
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu_self, &lock_context );
+ ctx->job_context[ 0 ].handler = SuspendA;
+ _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+ ISRLockWaitForOthers( &cpu_self->Lock, 1 );
+
+ /* See _Thread_Preemption_intervention() */
+ node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
+ thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
+ T_assert_eq_ptr( thread, GetThread( ctx->worker_id[ WORKER_A ] ) );
+ thread->Scheduler.ask_for_help_cpu = NULL;
+
+ _Per_CPU_Release( cpu_self, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+ }
+}
+
static void UnblockAskForHelp(
void *arg,
const T_scheduler_event *event,
@@ -454,6 +504,70 @@ static void UnblockAskForHelp(
}
}
+static void RaiseWorkerPriorityWithIdleRunner( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ SuspendTask( ctx->runner_id );
+ T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+ ResumeTask( ctx->runner_id );
+}
+
+static void MakeReady( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ MakeBusy( ctx, WORKER_C );
+}
+
+static void UpdatePriorityMakeReady(
+ void *arg,
+ const T_scheduler_event *event,
+ T_scheduler_when when
+)
+{
+ Context *ctx;
+
+ ctx = arg;
+
+ if (
+ when == T_SCHEDULER_BEFORE &&
+ event->operation == T_SCHEDULER_UPDATE_PRIORITY
+ ) {
+ Thread_Control *thread;
+
+ T_scheduler_set_event_handler( NULL, NULL );
+
+ thread = GetThread( ctx->worker_id[ WORKER_A ] );
+ T_eq_int( thread->Scheduler.state, THREAD_SCHEDULER_SCHEDULED );
+
+ ctx->job_context[ 0 ].handler = MakeReady;
+ _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+
+ while ( thread->Scheduler.state != THREAD_SCHEDULER_READY ) {
+ RTEMS_COMPILER_MEMORY_BARRIER();
+ }
+ }
+}
+
+static void ReadyToScheduled( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ SuspendTask( ctx->runner_id );
+
+ T_scheduler_set_event_handler( UpdatePriorityMakeReady, ctx );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+
+ SetPriority( ctx->runner_id, PRIO_VERY_HIGH );
+ ResumeTask( ctx->runner_id );
+}
+
static void PrepareOwnerScheduled( Context *ctx )
{
SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
@@ -522,6 +636,14 @@ static void Worker( rtems_task_argument arg, WorkerIndex worker )
ReleaseMutex( ctx->mutex_id );
}
+ if ( ( events & EVENT_STICKY_OBTAIN ) != 0 ) {
+ ObtainMutex( ctx->sticky_id );
+ }
+
+ if ( ( events & EVENT_STICKY_RELEASE ) != 0 ) {
+ ReleaseMutex( ctx->sticky_id );
+ }
+
if ( ( events & EVENT_BUSY ) != 0 ) {
while ( ctx->busy[ worker ] ) {
/* Wait */
@@ -548,12 +670,16 @@ static void WorkerC( rtems_task_argument arg )
static void ScoreSchedSmpValSmp_Setup( ScoreSchedSmpValSmp_Context *ctx )
{
rtems_status_code sc;
+ size_t i;
ctx->runner_id = rtems_task_self();
- ctx->job_context.arg = ctx;
- ctx->job.context = &ctx->job_context;
ctx->mutex_id = CreateMutex();
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->job ); ++i ) {
+ ctx->job_context[ i ].arg = ctx;
+ ctx->job[ i ].context = &ctx->job_context[ i ];
+ }
+
sc = rtems_semaphore_create(
rtems_build_name( 'S', 'T', 'K', 'Y' ),
1,
@@ -611,16 +737,52 @@ static T_fixture ScoreSchedSmpValSmp_Fixture = {
};
/**
+ * @brief Construct a system state in which a thread is rescheduled while it
+ * is not scheduled on another scheduler.
+ */
+static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
+{
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+ SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+ SendAndSync( ctx, WORKER_A, EVENT_STICKY_OBTAIN );
+ MakeBusy( ctx, WORKER_A );
+
+ /*
+ * Reschedule worker A by the home scheduler while worker A is not scheduled
+ * on another scheduler.
+ */
+ CallWithinISR( ReadyToScheduled, ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ StopBusy( ctx, WORKER_C );
+ StopBusy( ctx, WORKER_A );
+ SendAndSync( ctx, WORKER_A, EVENT_STICKY_RELEASE );
+ SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+ SetSelfPriority( PRIO_NORMAL );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+ SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+/**
* @brief Construct a system state in which an ask for help request is
* cancelled while it is processed on another processor.
*/
-static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
/*
- * Block the runner thread while the owner thread of the highest priority
- * ready node is already scheduled.
+ * Unblock worker A. It cannot be scheduled on its home scheduler. Intercept
+ * the ask for help request. Block the worker A. This will cancel the ask
+ * for help request. Remove the request while the other processor tries to
+ * cancel the request.
*/
SuspendTask( ctx->worker_id[ WORKER_A ] );
T_scheduler_set_event_handler( UnblockAskForHelp, ctx );
@@ -638,7 +800,7 @@ static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
* @brief Construct a system state in which a scheduler tries to schedule a
* node those owner thread is already scheduled during a block operation.
*/
-static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
@@ -659,7 +821,7 @@ static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx )
* @brief Construct a system state in which a scheduler tries to schedule a
* node those owner thread is blocked during a block operation.
*/
-static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerBlocked( ctx );
@@ -681,7 +843,7 @@ static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx )
* node those owner thread is already scheduled during a set affinity
* operation.
*/
-static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
@@ -703,7 +865,7 @@ static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx )
* node those owner thread is already scheduled during a set affinity
* operation while a sticky node is involved.
*/
-static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
@@ -726,7 +888,7 @@ static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx )
* @brief Construct a system state in which a scheduler tries to schedule a
* node those owner thread is blocked during a set affinity operation.
*/
-static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerBlocked( ctx );
@@ -748,7 +910,7 @@ static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx )
* node those owner thread is blocked during a set affinity operation while a
* sticky node is involved.
*/
-static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerBlocked( ctx );
@@ -772,7 +934,7 @@ static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx )
* node those owner thread is already scheduled during a set priority
* operation.
*/
-static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
@@ -792,9 +954,32 @@ static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx )
/**
* @brief Construct a system state in which a scheduler tries to schedule a
+ * node those owner thread is already scheduled during a set priority
+ * operation while a sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx )
+{
+ PrepareOwnerScheduled( ctx );
+
+ /*
+ * Set the priority of the runner thread while the owner thread of the
+ * highest priority ready node is already scheduled.
+ */
+ MakeSticky( ctx );
+ CallWithinISR( RaiseWorkerPriorityWithIdleRunner, ctx );
+ CleanSticky( ctx );
+
+ /*
+ * Clean up all used resources.
+ */
+ CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
* node those owner thread is blocked during a set priority operation.
*/
-static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerBlocked( ctx );
@@ -816,7 +1001,7 @@ static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx )
* @brief Construct a system state in which a scheduler tries to schedule a
* node those owner thread is already scheduled during a yield operation.
*/
-static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
@@ -838,7 +1023,7 @@ static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx )
* node those owner thread is already scheduled during a yield operation
* while a sticky node is involved.
*/
-static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_12( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerScheduled( ctx );
@@ -861,7 +1046,7 @@ static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx )
* @brief Construct a system state in which a scheduler tries to schedule a
* node those owner thread is blocked during a yield operation.
*/
-static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_13( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerBlocked( ctx );
@@ -883,7 +1068,7 @@ static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx )
* node those owner thread is blocked during a yield operation while a sticky
* node is involved.
*/
-static void ScoreSchedSmpValSmp_Action_12( ScoreSchedSmpValSmp_Context *ctx )
+static void ScoreSchedSmpValSmp_Action_14( ScoreSchedSmpValSmp_Context *ctx )
{
PrepareOwnerBlocked( ctx );
@@ -924,6 +1109,8 @@ T_TEST_CASE_FIXTURE( ScoreSchedSmpValSmp, &ScoreSchedSmpValSmp_Fixture )
ScoreSchedSmpValSmp_Action_10( ctx );
ScoreSchedSmpValSmp_Action_11( ctx );
ScoreSchedSmpValSmp_Action_12( ctx );
+ ScoreSchedSmpValSmp_Action_13( ctx );
+ ScoreSchedSmpValSmp_Action_14( ctx );
}
/** @} */