diff options
author | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2021-11-08 09:57:38 +0100 |
---|---|---|
committer | Sebastian Huber <sebastian.huber@embedded-brains.de> | 2021-11-11 17:05:09 +0100 |
commit | 9fb9eb4488927a49f734b0169ce8733540d34f42 (patch) | |
tree | 164651bd0291c933a589d345086fd88afad2a6bf | |
parent | dbf6578e8e2d894b35d1925725446ed912bbadb5 (diff) |
testsuites/validation/tc-sched-smp.c
-rw-r--r-- | spec/build/testsuites/validation/validation-smp-only-0.yml | 1 | ||||
-rw-r--r-- | testsuites/validation/tc-sched-smp.c | 929 |
2 files changed, 930 insertions, 0 deletions
diff --git a/spec/build/testsuites/validation/validation-smp-only-0.yml b/spec/build/testsuites/validation/validation-smp-only-0.yml index 96199b9d9a..750b3b39ac 100644 --- a/spec/build/testsuites/validation/validation-smp-only-0.yml +++ b/spec/build/testsuites/validation/validation-smp-only-0.yml @@ -14,6 +14,7 @@ links: [] source: - testsuites/validation/tc-bsp-interrupt-spurious.c - testsuites/validation/tc-scheduler-smp-only.c +- testsuites/validation/tc-sched-smp.c - testsuites/validation/tc-sched-smp-edf-set-affinity.c - testsuites/validation/tc-score-smp-per-cpu-jobs.c - testsuites/validation/tc-score-smp-thread.c diff --git a/testsuites/validation/tc-sched-smp.c b/testsuites/validation/tc-sched-smp.c new file mode 100644 index 0000000000..8744d9a562 --- /dev/null +++ b/testsuites/validation/tc-sched-smp.c @@ -0,0 +1,929 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSTestCaseScoreSchedSmpValSmp + */ + +/* + * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This file is part of the RTEMS quality process and was automatically + * generated. If you find something that needs to be fixed or + * worded better please post a report or patch to an RTEMS mailing list + * or raise a bug report: + * + * https://www.rtems.org/bugs.html + * + * For information on updating and regenerating please refer to the How-To + * section in the Software Requirements Engineering chapter of the + * RTEMS Software Engineering manual. The manual is provided as a part of + * a release. For development sources please refer to the online + * documentation at: + * + * https://docs.rtems.org + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems.h> +#include <rtems/test-scheduler.h> +#include <rtems/score/percpu.h> +#include <rtems/score/threadimpl.h> + +#include "tx-support.h" + +#include <rtems/test.h> + +/** + * @defgroup RTEMSTestCaseScoreSchedSmpValSmp spec:/score/sched/smp/val/smp + * + * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0 + * + * @brief Tests SMP-specific scheduler behaviour. + * + * This test case performs the following actions: + * + * - Construct a system state in which an ask for help request is cancelled + * while it is processed on another processor. + * + * - Block the runner thread while the owner thread of the highest priority + * ready node is already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is already scheduled during a block operation. + * + * - Block the runner thread while the owner thread of the highest priority + * ready node is already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is blocked during a block operation. + * + * - Block the runner thread while the owner thread of the highest priority + * ready node is blocked. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is already scheduled during a set affinity operation. + * + * - Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is already scheduled during a set affinity operation + * while a sticky node is involved. + * + * - Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is blocked during a set affinity operation. + * + * - Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is blocked. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is blocked during a set affinity operation while a + * sticky node is involved. + * + * - Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is blocked. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is already scheduled during a set priority operation. + * + * - Set the priority of the runner thread while the owner thread of the + * highest priority ready node is already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is blocked during a set priority operation. + * + * - Set the priority of the runner thread while the owner thread of the + * highest priority ready node is blocked. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is already scheduled during a yield operation. + * + * - Yield while the owner thread of the highest priority ready node is + * already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is already scheduled during a yield operation while a + * sticky node is involved. + * + * - Yield while the owner thread of the highest priority ready node is + * already scheduled. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is blocked during a yield operation. + * + * - Yield while the owner thread of the highest priority ready node is + * blocked. + * + * - Clean up all used resources. + * + * - Construct a system state in which a scheduler tries to schedule a node + * those owner thread is blocked during a yield operation while a sticky node + * is involved. + * + * - Yield while the owner thread of the highest priority ready node is + * blocked. + * + * - Clean up all used resources. + * + * @{ + */ + +typedef enum { + WORKER_A, + WORKER_B, + WORKER_C, + WORKER_COUNT +} WorkerIndex; + +/** + * @brief Test context for spec:/score/sched/smp/val/smp test case. + */ +typedef struct { + /** + * @brief This member contains the runner identifier. + */ + rtems_id runner_id; + + /** + * @brief This member contains the worker identifiers. + */ + rtems_id worker_id[ WORKER_COUNT ]; + + /** + * @brief This member contains the mutex identifier. + */ + rtems_id mutex_id; + + /** + * @brief This member contains the sticky mutex identifier. + */ + rtems_id sticky_id; + + /** + * @brief This member contains the worker busy status. + */ + volatile bool busy[ WORKER_COUNT ];; + + /** + * @brief This member contains the per-CPU job. + */ + Per_CPU_Job job; + + /** + * @brief This member contains the per-CPU job context. + */ + Per_CPU_Job_context job_context; + + /** + * @brief This member contains the call within ISR request. + */ + CallWithinISRRequest request;; +} ScoreSchedSmpValSmp_Context; + +static ScoreSchedSmpValSmp_Context + ScoreSchedSmpValSmp_Instance; + +typedef ScoreSchedSmpValSmp_Context Context; + +typedef enum { + EVENT_OBTAIN = RTEMS_EVENT_0, + EVENT_RELEASE = RTEMS_EVENT_1, + EVENT_SYNC_RUNNER = RTEMS_EVENT_2, + EVENT_BUSY = RTEMS_EVENT_3 +} Event; + +static void SendAndSync( Context *ctx, WorkerIndex worker, Event event ) +{ + SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | event ); + ReceiveAllEvents( EVENT_SYNC_RUNNER ); + WaitForExecutionStop( ctx->worker_id[ worker ] ); +} + +static void MakeBusy( Context *ctx, WorkerIndex worker ) +{ + ctx->busy[ worker ] = true; + SendEvents( ctx->worker_id[ worker ], EVENT_BUSY ); +} + +static void StopBusy( Context *ctx, WorkerIndex worker ) +{ + ctx->busy[ worker ] = false; + WaitForExecutionStop( ctx->worker_id[ worker ] ); +} + +static void MakeSticky( const Context *ctx ) +{ + ObtainMutex( ctx->sticky_id ); +} + +static void CleanSticky( const Context *ctx ) +{ + ReleaseMutex( ctx->sticky_id ); +} + +static void Block( void *arg ) +{ + Context *ctx; + + ctx = arg; + SuspendTask( ctx->runner_id ); + ResumeTask( ctx->runner_id ); +} + +static void OperationStopBusyC( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when, + T_scheduler_operation op +) +{ + Context *ctx; + + ctx = arg; + + if ( when == T_SCHEDULER_BEFORE && event->operation == op ) { + T_scheduler_set_event_handler( NULL, NULL ); + StopBusy( ctx, WORKER_C ); + } +} + +static void BlockStopBusyC( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationStopBusyC( arg, event, when, T_SCHEDULER_BLOCK ); +} + +static void SetAffinityStopBusyC( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationStopBusyC( arg, event, when, T_SCHEDULER_SET_AFFINITY ); +} + +static void UpdatePriorityStopBusyC( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationStopBusyC( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY ); +} + +static void YieldStopBusyC( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationStopBusyC( arg, event, when, T_SCHEDULER_YIELD ); +} + +static void SuspendA( void *arg ) +{ + Context *ctx; + + ctx = arg; + SuspendTask( ctx->worker_id[ WORKER_A ] ); +} + +static void OperationSuspendA( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when, + T_scheduler_operation op +) +{ + Context *ctx; + + ctx = arg; + + if ( when == T_SCHEDULER_BEFORE && event->operation == op ) { + const rtems_tcb *worker_a; + + T_scheduler_set_event_handler( NULL, NULL ); + ctx->job_context.handler = SuspendA; + _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job ); + + worker_a = GetThread( ctx->worker_id[ WORKER_A ] ); + + while ( worker_a->Scheduler.state != THREAD_SCHEDULER_BLOCKED ) { + RTEMS_COMPILER_MEMORY_BARRIER(); + } + } +} + +static void BlockSuspendA( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationSuspendA( arg, event, when, T_SCHEDULER_BLOCK ); +} + +static void SetAffinitySuspendA( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationSuspendA( arg, event, when, T_SCHEDULER_SET_AFFINITY ); +} + +static void UpdatePrioritySuspendA( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationSuspendA( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY ); +} + +static void YieldSuspendA( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + OperationSuspendA( arg, event, when, T_SCHEDULER_YIELD ); +} + +static void InterceptAskForHelp( void *arg ) +{ + Context *ctx; + Per_CPU_Control *cpu_self; + ISR_lock_Context lock_context; + Chain_Node *node; + Thread_Control *thread; + + ctx = arg; + cpu_self = _Per_CPU_Get(); + + _ISR_lock_ISR_disable( &lock_context ); + _Per_CPU_Acquire( cpu_self, &lock_context ); + ctx->job_context.handler = SuspendA; + _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job ); + ISRLockWaitForOthers( &cpu_self->Lock, 1 ); + + /* See _Thread_Preemption_intervention() */ + node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help ); + thread = THREAD_OF_SCHEDULER_HELP_NODE( node ); + T_assert_eq_ptr( thread, GetThread( ctx->worker_id[ WORKER_A ] ) ); + thread->Scheduler.ask_for_help_cpu = NULL; + + _Per_CPU_Release( cpu_self, &lock_context ); + _ISR_lock_ISR_enable( &lock_context ); +} + +static void UnblockAskForHelp( + void *arg, + const T_scheduler_event *event, + T_scheduler_when when +) +{ + Context *ctx; + + ctx = arg; + + if ( + when == T_SCHEDULER_BEFORE && + event->operation == T_SCHEDULER_UNBLOCK + ) { + T_scheduler_set_event_handler( NULL, NULL ); + ctx->request.handler = InterceptAskForHelp; + ctx->request.arg = ctx; + CallWithinISRSubmit( &ctx->request ); + } +} + +static void PrepareOwnerScheduled( Context *ctx ) +{ + SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL ); + SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN ); + SendAndSync( ctx, WORKER_B, EVENT_OBTAIN ); + SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH ); + SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL ); + MakeBusy( ctx, WORKER_C ); + MakeBusy( ctx, WORKER_A ); +} + +static void CleanupOwnerScheduled( Context *ctx ) +{ + StopBusy( ctx, WORKER_A ); + SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH ); + SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE ); + SendAndSync( ctx, WORKER_B, EVENT_RELEASE ); + SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH ); + SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH ); +} + +static void PrepareOwnerBlocked( Context *ctx ) +{ + SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_B_ID, PRIO_NORMAL ); + SendAndSync( ctx, WORKER_A, EVENT_OBTAIN ); + SendEvents( ctx->worker_id[ WORKER_B ], EVENT_OBTAIN ); + SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH ); + MakeBusy( ctx, WORKER_C ); + SetPriority( ctx->worker_id[ WORKER_B ], PRIO_LOW ); + MakeBusy( ctx, WORKER_A ); + SetPriority( ctx->worker_id[ WORKER_B ], PRIO_NORMAL ); +} + +static void CleanupOwnerBlocked( Context *ctx ) +{ + StopBusy( ctx, WORKER_C ); + ResumeTask( ctx->worker_id[ WORKER_A ] ); + StopBusy( ctx, WORKER_A ); + SendAndSync( ctx, WORKER_A, EVENT_RELEASE ); + SetPriority( ctx->worker_id[ WORKER_B ], PRIO_HIGH ); + SendEvents( ctx->worker_id[ WORKER_B ], EVENT_RELEASE ); + SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_A_ID, PRIO_HIGH ); + SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH ); +} + +static void Worker( rtems_task_argument arg, WorkerIndex worker ) +{ + Context *ctx; + + ctx = (Context *) arg; + + while ( true ) { + rtems_event_set events; + + events = ReceiveAnyEvents(); + + if ( ( events & EVENT_SYNC_RUNNER ) != 0 ) { + SendEvents( ctx->runner_id, EVENT_SYNC_RUNNER ); + } + + if ( ( events & EVENT_OBTAIN ) != 0 ) { + ObtainMutex( ctx->mutex_id ); + } + + if ( ( events & EVENT_RELEASE ) != 0 ) { + ReleaseMutex( ctx->mutex_id ); + } + + if ( ( events & EVENT_BUSY ) != 0 ) { + while ( ctx->busy[ worker ] ) { + /* Wait */ + } + } + } +} + +static void WorkerA( rtems_task_argument arg ) +{ + Worker( arg, WORKER_A ); +} + +static void WorkerB( rtems_task_argument arg ) +{ + Worker( arg, WORKER_B ); +} + +static void WorkerC( rtems_task_argument arg ) +{ + Worker( arg, WORKER_C ); +} + +static void ScoreSchedSmpValSmp_Setup( ScoreSchedSmpValSmp_Context *ctx ) +{ + rtems_status_code sc; + + ctx->runner_id = rtems_task_self(); + ctx->job_context.arg = ctx; + ctx->job.context = &ctx->job_context; + ctx->mutex_id = CreateMutex(); + + sc = rtems_semaphore_create( + rtems_build_name( 'S', 'T', 'K', 'Y' ), + 1, + RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING, + PRIO_NORMAL, + &ctx->sticky_id + ); + T_rsc_success( sc ); + + SetSelfPriority( PRIO_NORMAL ); + + ctx->worker_id[ WORKER_A ] = CreateTask( "WRKA", PRIO_HIGH ); + StartTask( ctx->worker_id[ WORKER_A ], WorkerA, ctx ); + + ctx->worker_id[ WORKER_B ] = CreateTask( "WRKB", PRIO_HIGH ); + StartTask( ctx->worker_id[ WORKER_B ], WorkerB, ctx ); + + ctx->worker_id[ WORKER_C ] = CreateTask( "WRKC", PRIO_HIGH ); + StartTask( ctx->worker_id[ WORKER_C ], WorkerC, ctx ); +} + +static void ScoreSchedSmpValSmp_Setup_Wrap( void *arg ) +{ + ScoreSchedSmpValSmp_Context *ctx; + + ctx = arg; + ScoreSchedSmpValSmp_Setup( ctx ); +} + +static void ScoreSchedSmpValSmp_Teardown( ScoreSchedSmpValSmp_Context *ctx ) +{ + DeleteTask( ctx->worker_id[ WORKER_A ] ); + DeleteTask( ctx->worker_id[ WORKER_B ] ); + DeleteTask( ctx->worker_id[ WORKER_C ] ); + DeleteMutex( ctx->mutex_id ); + DeleteMutex( ctx->sticky_id ); + RestoreRunnerPriority(); +} + +static void ScoreSchedSmpValSmp_Teardown_Wrap( void *arg ) +{ + ScoreSchedSmpValSmp_Context *ctx; + + ctx = arg; + ScoreSchedSmpValSmp_Teardown( ctx ); +} + +static T_fixture ScoreSchedSmpValSmp_Fixture = { + .setup = ScoreSchedSmpValSmp_Setup_Wrap, + .stop = NULL, + .teardown = ScoreSchedSmpValSmp_Teardown_Wrap, + .scope = NULL, + .initial_context = &ScoreSchedSmpValSmp_Instance +}; + +/** + * @brief Construct a system state in which an ask for help request is + * cancelled while it is processed on another processor. + */ +static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Block the runner thread while the owner thread of the highest priority + * ready node is already scheduled. + */ + SuspendTask( ctx->worker_id[ WORKER_A ] ); + T_scheduler_set_event_handler( UnblockAskForHelp, ctx ); + ResumeTask( ctx->worker_id[ WORKER_A ] ); + + /* + * Clean up all used resources. + */ + ResumeTask( ctx->worker_id[ WORKER_A ] ); + StopBusy( ctx, WORKER_C ); + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is already scheduled during a block operation. + */ +static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Block the runner thread while the owner thread of the highest priority + * ready node is already scheduled. + */ + T_scheduler_set_event_handler( BlockStopBusyC, ctx ); + CallWithinISR( Block, ctx ); + + /* + * Clean up all used resources. + */ + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is blocked during a block operation. + */ +static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerBlocked( ctx ); + + /* + * Block the runner thread while the owner thread of the highest priority + * ready node is blocked. + */ + T_scheduler_set_event_handler( BlockSuspendA, ctx ); + CallWithinISR( Block, ctx ); + + /* + * Clean up all used resources. + */ + CleanupOwnerBlocked( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is already scheduled during a set affinity + * operation. + */ +static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is already scheduled. + */ + T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx ); + SetSelfAffinityAll(); + + /* + * Clean up all used resources. + */ + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is already scheduled during a set affinity + * operation while a sticky node is involved. + */ +static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is already scheduled. + */ + MakeSticky( ctx ); + T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx ); + SetSelfAffinityAll(); + CleanSticky( ctx ); + + /* + * Clean up all used resources. + */ + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is blocked during a set affinity operation. + */ +static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerBlocked( ctx ); + + /* + * Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is blocked. + */ + T_scheduler_set_event_handler( SetAffinitySuspendA, ctx ); + SetSelfAffinityAll(); + + /* + * Clean up all used resources. + */ + CleanupOwnerBlocked( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is blocked during a set affinity operation while a + * sticky node is involved. + */ +static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerBlocked( ctx ); + + /* + * Set the affinity of the runner thread while the owner thread of the + * highest priority ready node is blocked. + */ + MakeSticky( ctx ); + T_scheduler_set_event_handler( SetAffinitySuspendA, ctx ); + SetSelfAffinityAll(); + CleanSticky( ctx ); + + /* + * Clean up all used resources. + */ + CleanupOwnerBlocked( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is already scheduled during a set priority + * operation. + */ +static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Set the priority of the runner thread while the owner thread of the + * highest priority ready node is already scheduled. + */ + SetSelfPriority( PRIO_HIGH ); + T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx ); + SetSelfPriority( PRIO_NORMAL ); + + /* + * Clean up all used resources. + */ + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is blocked during a set priority operation. + */ +static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerBlocked( ctx ); + + /* + * Set the priority of the runner thread while the owner thread of the + * highest priority ready node is blocked. + */ + SetSelfPriority( PRIO_HIGH ); + T_scheduler_set_event_handler( UpdatePrioritySuspendA, ctx ); + SetSelfPriority( PRIO_NORMAL ); + + /* + * Clean up all used resources. + */ + CleanupOwnerBlocked( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is already scheduled during a yield operation. + */ +static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Yield while the owner thread of the highest priority ready node is already + * scheduled. + */ + T_scheduler_set_event_handler( YieldStopBusyC, ctx ); + Yield(); + + /* + * Clean up all used resources. + */ + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is already scheduled during a yield operation + * while a sticky node is involved. + */ +static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerScheduled( ctx ); + + /* + * Yield while the owner thread of the highest priority ready node is already + * scheduled. + */ + MakeSticky( ctx ); + T_scheduler_set_event_handler( YieldStopBusyC, ctx ); + Yield(); + CleanSticky( ctx ); + + /* + * Clean up all used resources. + */ + CleanupOwnerScheduled( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is blocked during a yield operation. + */ +static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerBlocked( ctx ); + + /* + * Yield while the owner thread of the highest priority ready node is + * blocked. + */ + T_scheduler_set_event_handler( YieldSuspendA, ctx ); + Yield(); + + /* + * Clean up all used resources. + */ + CleanupOwnerBlocked( ctx ); +} + +/** + * @brief Construct a system state in which a scheduler tries to schedule a + * node those owner thread is blocked during a yield operation while a sticky + * node is involved. + */ +static void ScoreSchedSmpValSmp_Action_12( ScoreSchedSmpValSmp_Context *ctx ) +{ + PrepareOwnerBlocked( ctx ); + + /* + * Yield while the owner thread of the highest priority ready node is + * blocked. + */ + MakeSticky( ctx ); + T_scheduler_set_event_handler( YieldSuspendA, ctx ); + Yield(); + CleanSticky( ctx ); + + /* + * Clean up all used resources. + */ + CleanupOwnerBlocked( ctx ); +} + +/** + * @fn void T_case_body_ScoreSchedSmpValSmp( void ) + */ +T_TEST_CASE_FIXTURE( ScoreSchedSmpValSmp, &ScoreSchedSmpValSmp_Fixture ) +{ + ScoreSchedSmpValSmp_Context *ctx; + + ctx = T_fixture_context(); + + ScoreSchedSmpValSmp_Action_0( ctx ); + ScoreSchedSmpValSmp_Action_1( ctx ); + ScoreSchedSmpValSmp_Action_2( ctx ); + ScoreSchedSmpValSmp_Action_3( ctx ); + ScoreSchedSmpValSmp_Action_4( ctx ); + ScoreSchedSmpValSmp_Action_5( ctx ); + ScoreSchedSmpValSmp_Action_6( ctx ); + ScoreSchedSmpValSmp_Action_7( ctx ); + ScoreSchedSmpValSmp_Action_8( ctx ); + ScoreSchedSmpValSmp_Action_9( ctx ); + ScoreSchedSmpValSmp_Action_10( ctx ); + ScoreSchedSmpValSmp_Action_11( ctx ); + ScoreSchedSmpValSmp_Action_12( ctx ); +} + +/** @} */ |