summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-12-09 16:15:46 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-03-24 11:10:49 +0100
commit1c87bb585f781e2ebd42af7f1bf045f97d065c5c (patch)
tree20599263eb3fa39e416219c4ea48c21d94b19739
parentvalidation: Test Rate Monotonic Manager (diff)
downloadrtems-1c87bb585f781e2ebd42af7f1bf045f97d065c5c.tar.bz2
validation: Test Scheduler Manager
The test source code is generated from specification items by the "./spec2modules.py" script contained in the git://git.rtems.org/rtems-central.git Git repository. Please read the "How-To" section in the "Software Requirements Engineering" chapter of the RTEMS Software Engineering manual to get more information about the process. Update #3716.
Diffstat (limited to '')
-rw-r--r--spec/build/testsuites/validation/validation-no-clock-0.yml8
-rw-r--r--spec/build/testsuites/validation/validation-non-smp.yml1
-rw-r--r--spec/build/testsuites/validation/validation-smp-only-0.yml1
-rw-r--r--testsuites/validation/tc-scheduler-add-processor.c790
-rw-r--r--testsuites/validation/tc-scheduler-get-maximum-priority.c463
-rw-r--r--testsuites/validation/tc-scheduler-get-processor-set.c550
-rw-r--r--testsuites/validation/tc-scheduler-ident-by-processor-set.c743
-rw-r--r--testsuites/validation/tc-scheduler-ident-by-processor.c593
-rw-r--r--testsuites/validation/tc-scheduler-ident.c406
-rw-r--r--testsuites/validation/tc-scheduler-non-smp.c109
-rw-r--r--testsuites/validation/tc-scheduler-remove-processor.c1475
-rw-r--r--testsuites/validation/tc-scheduler-smp-only.c244
-rw-r--r--testsuites/validation/tc-scheduler.c157
13 files changed, 5540 insertions, 0 deletions
diff --git a/spec/build/testsuites/validation/validation-no-clock-0.yml b/spec/build/testsuites/validation/validation-no-clock-0.yml
index cfd8afcbe5..b82fa35f44 100644
--- a/spec/build/testsuites/validation/validation-no-clock-0.yml
+++ b/spec/build/testsuites/validation/validation-no-clock-0.yml
@@ -54,6 +54,14 @@ source:
- testsuites/validation/tc-ratemon-ident.c
- testsuites/validation/tc-ratemon-period.c
- testsuites/validation/tc-ratemon-timeout.c
+- testsuites/validation/tc-scheduler-add-processor.c
+- testsuites/validation/tc-scheduler.c
+- testsuites/validation/tc-scheduler-get-maximum-priority.c
+- testsuites/validation/tc-scheduler-get-processor-set.c
+- testsuites/validation/tc-scheduler-ident-by-processor.c
+- testsuites/validation/tc-scheduler-ident-by-processor-set.c
+- testsuites/validation/tc-scheduler-ident.c
+- testsuites/validation/tc-scheduler-remove-processor.c
- testsuites/validation/tc-score-fatal.c
- testsuites/validation/tr-event-constant.c
- testsuites/validation/tr-mtx-seize-try.c
diff --git a/spec/build/testsuites/validation/validation-non-smp.yml b/spec/build/testsuites/validation/validation-non-smp.yml
index 3aeaa29111..2520ecb5c5 100644
--- a/spec/build/testsuites/validation/validation-non-smp.yml
+++ b/spec/build/testsuites/validation/validation-non-smp.yml
@@ -13,6 +13,7 @@ ldflags: []
links: []
source:
- testsuites/validation/tc-intr-non-smp.c
+- testsuites/validation/tc-scheduler-non-smp.c
- testsuites/validation/ts-validation-non-smp.c
stlib: []
target: testsuites/validation/ts-validation-non-smp.exe
diff --git a/spec/build/testsuites/validation/validation-smp-only-0.yml b/spec/build/testsuites/validation/validation-smp-only-0.yml
index 1909f7ef7f..545fa9d77f 100644
--- a/spec/build/testsuites/validation/validation-smp-only-0.yml
+++ b/spec/build/testsuites/validation/validation-smp-only-0.yml
@@ -16,6 +16,7 @@ source:
- testsuites/validation/tc-acfg-scheduler-edf-smp.c
- testsuites/validation/tc-bsp-interrupt-spurious.c
- testsuites/validation/tc-intr-smp-only.c
+- testsuites/validation/tc-scheduler-smp-only.c
- testsuites/validation/tc-score-tq-smp.c
- testsuites/validation/ts-validation-smp-only-0.c
stlib: []
diff --git a/testsuites/validation/tc-scheduler-add-processor.c b/testsuites/validation/tc-scheduler-add-processor.c
new file mode 100644
index 0000000000..dbeaaa7b16
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-add-processor.c
@@ -0,0 +1,790 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqAddProcessor
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/test-scheduler.h>
+#include <rtems/score/percpu.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqAddProcessor \
+ * spec:/rtems/scheduler/req/add-processor
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqAddProcessor_Pre_HasReady_Ready,
+ RtemsSchedulerReqAddProcessor_Pre_HasReady_Empty,
+ RtemsSchedulerReqAddProcessor_Pre_HasReady_NA
+} RtemsSchedulerReqAddProcessor_Pre_HasReady;
+
+typedef enum {
+ RtemsSchedulerReqAddProcessor_Pre_Id_Invalid,
+ RtemsSchedulerReqAddProcessor_Pre_Id_Scheduler,
+ RtemsSchedulerReqAddProcessor_Pre_Id_NA
+} RtemsSchedulerReqAddProcessor_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Valid,
+ RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Invalid,
+ RtemsSchedulerReqAddProcessor_Pre_CPUIndex_NA
+} RtemsSchedulerReqAddProcessor_Pre_CPUIndex;
+
+typedef enum {
+ RtemsSchedulerReqAddProcessor_Pre_CPUState_Idle,
+ RtemsSchedulerReqAddProcessor_Pre_CPUState_InUse,
+ RtemsSchedulerReqAddProcessor_Pre_CPUState_NotOnline,
+ RtemsSchedulerReqAddProcessor_Pre_CPUState_NotUsable,
+ RtemsSchedulerReqAddProcessor_Pre_CPUState_NA
+} RtemsSchedulerReqAddProcessor_Pre_CPUState;
+
+typedef enum {
+ RtemsSchedulerReqAddProcessor_Post_Status_Ok,
+ RtemsSchedulerReqAddProcessor_Post_Status_InvId,
+ RtemsSchedulerReqAddProcessor_Post_Status_NotConf,
+ RtemsSchedulerReqAddProcessor_Post_Status_IncStat,
+ RtemsSchedulerReqAddProcessor_Post_Status_InUse,
+ RtemsSchedulerReqAddProcessor_Post_Status_NA
+} RtemsSchedulerReqAddProcessor_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqAddProcessor_Post_Added_Yes,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop,
+ RtemsSchedulerReqAddProcessor_Post_Added_NA
+} RtemsSchedulerReqAddProcessor_Post_Added;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_HasReady_NA : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Pre_CPUIndex_NA : 1;
+ uint16_t Pre_CPUState_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_Added : 2;
+} RtemsSchedulerReqAddProcessor_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/add-processor test case.
+ */
+typedef struct {
+ /**
+ * @brief This member specifies the scheduler used to add the processor.
+ */
+ rtems_id scheduler_id;
+
+ /**
+ * @brief This member contains the identifier of scheduler A.
+ */
+ rtems_id scheduler_a_id;
+
+ /**
+ * @brief This member contains the identifier of scheduler B.
+ */
+ rtems_id scheduler_b_id;
+
+ /**
+ * @brief This member contains the identifier of scheduler C.
+ */
+ rtems_id scheduler_c_id;
+
+ /**
+ * @brief This member references the processor control of the processor to
+ * add.
+ */
+ Per_CPU_Control *cpu;
+
+ /**
+ * @brief This member contains the online status of the processor to add
+ * before the rtems_scheduler_add_processor() call is prepared.
+ */
+ bool online;;
+
+ /**
+ * @brief If this member is true, then the processor should be added to the
+ * scheduler B during cleanup.
+ */
+ bool add_cpu_to_scheduler_b;;
+
+ /**
+ * @brief This member provides the scheduler operation records.
+ */
+ T_scheduler_log_2 scheduler_log;;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_add_processor() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies if the ``scheduler_id`` parameter value.
+ */
+ rtems_id id;
+
+ /**
+ * @brief This member specifies if the ``cpu_index`` parameter value.
+ */
+ uint32_t cpu_index;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 4 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 4 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqAddProcessor_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqAddProcessor_Context;
+
+static RtemsSchedulerReqAddProcessor_Context
+ RtemsSchedulerReqAddProcessor_Instance;
+
+static const char * const RtemsSchedulerReqAddProcessor_PreDesc_HasReady[] = {
+ "Ready",
+ "Empty",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqAddProcessor_PreDesc_Id[] = {
+ "Invalid",
+ "Scheduler",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqAddProcessor_PreDesc_CPUIndex[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqAddProcessor_PreDesc_CPUState[] = {
+ "Idle",
+ "InUse",
+ "NotOnline",
+ "NotUsable",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqAddProcessor_PreDesc[] = {
+ RtemsSchedulerReqAddProcessor_PreDesc_HasReady,
+ RtemsSchedulerReqAddProcessor_PreDesc_Id,
+ RtemsSchedulerReqAddProcessor_PreDesc_CPUIndex,
+ RtemsSchedulerReqAddProcessor_PreDesc_CPUState,
+ NULL
+};
+
+#define CPU_TO_ADD 1
+
+static void RtemsSchedulerReqAddProcessor_Pre_HasReady_Prepare(
+ RtemsSchedulerReqAddProcessor_Context *ctx,
+ RtemsSchedulerReqAddProcessor_Pre_HasReady state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqAddProcessor_Pre_HasReady_Ready: {
+ /*
+ * While the scheduler has at least one ready thread.
+ */
+ ctx->scheduler_id = ctx->scheduler_a_id;
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_HasReady_Empty: {
+ /*
+ * While the scheduler has no ready threads.
+ */
+ #if defined(RTEMS_SMP)
+ ctx->scheduler_id = ctx->scheduler_c_id;
+ #else
+ ctx->scheduler_id = ctx->scheduler_a_id;
+ #endif
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_HasReady_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_Pre_Id_Prepare(
+ RtemsSchedulerReqAddProcessor_Context *ctx,
+ RtemsSchedulerReqAddProcessor_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqAddProcessor_Pre_Id_Invalid: {
+ /*
+ * While the ``scheduler_id`` parameter is not associated with a
+ * scheduler.
+ */
+ ctx->id = INVALID_ID;
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_Id_Scheduler: {
+ /*
+ * While the ``scheduler_id`` parameter is associated with a scheduler.
+ */
+ ctx->id = ctx->scheduler_id;
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Prepare(
+ RtemsSchedulerReqAddProcessor_Context *ctx,
+ RtemsSchedulerReqAddProcessor_Pre_CPUIndex state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Valid: {
+ /*
+ * While the ``cpu_index`` parameter is less than the configured
+ * processor maximum.
+ */
+ #if defined(RTEMS_SMP)
+ ctx->cpu_index = CPU_TO_ADD;
+ #else
+ ctx->cpu_index = 0;
+ #endif
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Invalid: {
+ /*
+ * While the ``cpu_index`` parameter is greater than or equal to the
+ * configured processor maximum.
+ */
+ ctx->cpu_index = rtems_configuration_get_maximum_processors();
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_CPUIndex_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_Pre_CPUState_Prepare(
+ RtemsSchedulerReqAddProcessor_Context *ctx,
+ RtemsSchedulerReqAddProcessor_Pre_CPUState state
+)
+{
+ rtems_status_code sc;
+
+ switch ( state ) {
+ case RtemsSchedulerReqAddProcessor_Pre_CPUState_Idle: {
+ /*
+ * While the processor associated with the ``cpu_index`` parameter is
+ * configured to be used by a scheduler, while the processor associated
+ * with the ``cpu_index`` parameter is online, while the processor
+ * associated with the ``cpu_index`` parameter is not owned by a
+ * scheduler.
+ */
+ sc = rtems_scheduler_remove_processor(
+ ctx->scheduler_b_id,
+ CPU_TO_ADD
+ );
+ T_rsc_success( sc );
+ ctx->add_cpu_to_scheduler_b = true;
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_CPUState_InUse: {
+ /*
+ * While the processor associated with the ``cpu_index`` parameter is
+ * owned by a scheduler.
+ */
+ /* Nothing to do */
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_CPUState_NotOnline: {
+ /*
+ * While the processor associated with the ``cpu_index`` parameter is not
+ * online.
+ */
+ sc = rtems_scheduler_remove_processor(
+ ctx->scheduler_b_id,
+ CPU_TO_ADD
+ );
+ T_rsc_success( sc );
+ ctx->add_cpu_to_scheduler_b = true;
+ #if defined(RTEMS_SMP)
+ ctx->cpu->online = false;
+ #endif
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_CPUState_NotUsable: {
+ /*
+ * While the processor associated with the ``cpu_index`` parameter is not
+ * configured to be used by a scheduler.
+ */
+ ctx->cpu_index = rtems_configuration_get_maximum_processors() - 1;
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Pre_CPUState_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_Post_Status_Check(
+ RtemsSchedulerReqAddProcessor_Context *ctx,
+ RtemsSchedulerReqAddProcessor_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqAddProcessor_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_add_processor() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Status_InvId: {
+ /*
+ * The return status of rtems_scheduler_add_processor() shall be
+ * RTEMS_INVALID_ID.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Status_NotConf: {
+ /*
+ * The return status of rtems_scheduler_add_processor() shall be
+ * RTEMS_NOT_CONFIGURED.
+ */
+ T_rsc( ctx->status, RTEMS_NOT_CONFIGURED );
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Status_IncStat: {
+ /*
+ * The return status of rtems_scheduler_add_processor() shall be
+ * RTEMS_INCORRECT_STATE.
+ */
+ T_rsc( ctx->status, RTEMS_INCORRECT_STATE );
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Status_InUse: {
+ /*
+ * The return status of rtems_scheduler_add_processor() shall be
+ * RTEMS_RESOURCE_IN_USE.
+ */
+ T_rsc( ctx->status, RTEMS_RESOURCE_IN_USE );
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_Post_Added_Check(
+ RtemsSchedulerReqAddProcessor_Context *ctx,
+ RtemsSchedulerReqAddProcessor_Post_Added state
+)
+{
+ rtems_status_code sc;
+ cpu_set_t set;
+ rtems_task_priority priority;
+
+ switch ( state ) {
+ case RtemsSchedulerReqAddProcessor_Post_Added_Yes: {
+ /*
+ * The processor specified by the ``cpu_index`` parameter shall be added
+ * to the scheduler specified by the ``scheduler_id`` by the
+ * rtems_scheduler_add_processor() call.
+ */
+ T_eq_sz( ctx->scheduler_log.header.recorded, 2 );
+ T_eq_int(
+ ctx->scheduler_log.events[ 0 ].operation,
+ T_SCHEDULER_MAP_PRIORITY
+ );
+ T_eq_int(
+ ctx->scheduler_log.events[ 1 ].operation,
+ T_SCHEDULER_ADD_PROCESSOR
+ );
+
+ priority = GetSelfPriority();
+
+ if ( ctx->scheduler_id == ctx->scheduler_c_id ) {
+ SetSelfScheduler( ctx->scheduler_c_id, priority );
+ }
+
+ SetSelfAffinityOne( CPU_TO_ADD );
+ T_eq_u32( rtems_scheduler_get_processor(), CPU_TO_ADD );
+ SetSelfAffinityAll();
+
+ if ( ctx->scheduler_id == ctx->scheduler_c_id ) {
+ SetSelfScheduler( ctx->scheduler_a_id, priority );
+ }
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Added_Nop: {
+ /*
+ * No processor shall be added to a scheduler by the
+ * rtems_scheduler_add_processor() call.
+ */
+ T_eq_sz( ctx->scheduler_log.header.recorded, 0 );
+
+ CPU_ZERO( &set );
+ CPU_SET( CPU_TO_ADD, &set );
+ sc = rtems_task_set_affinity( RTEMS_SELF, sizeof( set ), &set );
+ T_rsc( sc, RTEMS_INVALID_NUMBER );
+ break;
+ }
+
+ case RtemsSchedulerReqAddProcessor_Post_Added_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_Setup(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_A_NAME,
+ &ctx->scheduler_a_id
+ );
+ T_rsc_success( sc );
+
+ #if defined(RTEMS_SMP)
+ ctx->cpu = _Per_CPU_Get_by_index( CPU_TO_ADD );
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &ctx->scheduler_b_id );
+ T_rsc_success( sc );
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_C_NAME, &ctx->scheduler_c_id );
+ T_rsc_success( sc );
+ #else
+ ctx->scheduler_b_id = INVALID_ID;
+ ctx->scheduler_c_id = INVALID_ID;
+ #endif
+}
+
+static void RtemsSchedulerReqAddProcessor_Setup_Wrap( void *arg )
+{
+ RtemsSchedulerReqAddProcessor_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqAddProcessor_Setup( ctx );
+}
+
+static void RtemsSchedulerReqAddProcessor_Prepare(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ ctx->add_cpu_to_scheduler_b = false;
+ ctx->online = _Per_CPU_Is_processor_online( ctx->cpu );
+ #endif
+}
+
+static void RtemsSchedulerReqAddProcessor_Action(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ T_scheduler_log *log;
+
+ log = T_scheduler_record_2( &ctx->scheduler_log );
+ T_null( log );
+
+ ctx->status = rtems_scheduler_add_processor( ctx->id, ctx->cpu_index );
+
+ log = T_scheduler_record( NULL );
+ T_eq_ptr( &log->header, &ctx->scheduler_log.header );
+}
+
+static void RtemsSchedulerReqAddProcessor_Cleanup(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ ctx->cpu->online = ctx->online;
+
+ if ( ctx->status == RTEMS_SUCCESSFUL ) {
+ sc = rtems_scheduler_remove_processor( ctx->scheduler_id, CPU_TO_ADD );
+ T_rsc_success( sc );
+ }
+
+ if ( ctx->add_cpu_to_scheduler_b ) {
+ sc = rtems_scheduler_add_processor( ctx->scheduler_b_id, CPU_TO_ADD );
+ T_rsc_success( sc );
+ }
+ #endif
+}
+
+static const RtemsSchedulerReqAddProcessor_Entry
+RtemsSchedulerReqAddProcessor_Entries[] = {
+ { 0, 0, 0, 0, 1, RtemsSchedulerReqAddProcessor_Post_Status_InvId,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop },
+ { 0, 0, 0, 0, 1, RtemsSchedulerReqAddProcessor_Post_Status_NotConf,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_InvId,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop },
+#else
+ { 1, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_NA,
+ RtemsSchedulerReqAddProcessor_Post_Added_NA },
+#endif
+ { 0, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_InvId,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_Ok,
+ RtemsSchedulerReqAddProcessor_Post_Added_Yes },
+#else
+ { 1, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_NA,
+ RtemsSchedulerReqAddProcessor_Post_Added_NA },
+#endif
+ { 0, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_InUse,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_IncStat,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop },
+#else
+ { 1, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_NA,
+ RtemsSchedulerReqAddProcessor_Post_Added_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_NotConf,
+ RtemsSchedulerReqAddProcessor_Post_Added_Nop }
+#else
+ { 1, 0, 0, 0, 0, RtemsSchedulerReqAddProcessor_Post_Status_NA,
+ RtemsSchedulerReqAddProcessor_Post_Added_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSchedulerReqAddProcessor_Map[] = {
+ 2, 3, 2, 2, 0, 0, 0, 0, 4, 5, 6, 7, 1, 1, 1, 1, 2, 3, 2, 2, 0, 0, 0, 0, 4, 5,
+ 6, 7, 1, 1, 1, 1
+};
+
+static size_t RtemsSchedulerReqAddProcessor_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ RtemsSchedulerReqAddProcessor_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ RtemsSchedulerReqAddProcessor_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqAddProcessor_Fixture = {
+ .setup = RtemsSchedulerReqAddProcessor_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSchedulerReqAddProcessor_Scope,
+ .initial_context = &RtemsSchedulerReqAddProcessor_Instance
+};
+
+static inline RtemsSchedulerReqAddProcessor_Entry
+RtemsSchedulerReqAddProcessor_PopEntry(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSchedulerReqAddProcessor_Entries[
+ RtemsSchedulerReqAddProcessor_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqAddProcessor_SetPreConditionStates(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+
+ if ( ctx->Map.entry.Pre_CPUState_NA ) {
+ ctx->Map.pcs[ 3 ] = RtemsSchedulerReqAddProcessor_Pre_CPUState_NA;
+ } else {
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ }
+}
+
+static void RtemsSchedulerReqAddProcessor_TestVariant(
+ RtemsSchedulerReqAddProcessor_Context *ctx
+)
+{
+ RtemsSchedulerReqAddProcessor_Pre_HasReady_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSchedulerReqAddProcessor_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSchedulerReqAddProcessor_Pre_CPUState_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ RtemsSchedulerReqAddProcessor_Action( ctx );
+ RtemsSchedulerReqAddProcessor_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ RtemsSchedulerReqAddProcessor_Post_Added_Check(
+ ctx,
+ ctx->Map.entry.Post_Added
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqAddProcessor( void )
+ */
+T_TEST_CASE_FIXTURE(
+ RtemsSchedulerReqAddProcessor,
+ &RtemsSchedulerReqAddProcessor_Fixture
+)
+{
+ RtemsSchedulerReqAddProcessor_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = RtemsSchedulerReqAddProcessor_Pre_HasReady_Ready;
+ ctx->Map.pci[ 0 ] < RtemsSchedulerReqAddProcessor_Pre_HasReady_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = RtemsSchedulerReqAddProcessor_Pre_Id_Invalid;
+ ctx->Map.pci[ 1 ] < RtemsSchedulerReqAddProcessor_Pre_Id_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = RtemsSchedulerReqAddProcessor_Pre_CPUIndex_Valid;
+ ctx->Map.pci[ 2 ] < RtemsSchedulerReqAddProcessor_Pre_CPUIndex_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = RtemsSchedulerReqAddProcessor_Pre_CPUState_Idle;
+ ctx->Map.pci[ 3 ] < RtemsSchedulerReqAddProcessor_Pre_CPUState_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqAddProcessor_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSchedulerReqAddProcessor_SetPreConditionStates( ctx );
+ RtemsSchedulerReqAddProcessor_Prepare( ctx );
+ RtemsSchedulerReqAddProcessor_TestVariant( ctx );
+ RtemsSchedulerReqAddProcessor_Cleanup( ctx );
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-get-maximum-priority.c b/testsuites/validation/tc-scheduler-get-maximum-priority.c
new file mode 100644
index 0000000000..45ef88c5a6
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-get-maximum-priority.c
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqGetMaximumPriority
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <limits.h>
+#include <rtems.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqGetMaximumPriority \
+ * spec:/rtems/scheduler/req/get-maximum-priority
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqGetMaximumPriority_Pre_Id_Invalid,
+ RtemsSchedulerReqGetMaximumPriority_Pre_Id_Scheduler,
+ RtemsSchedulerReqGetMaximumPriority_Pre_Id_NA
+} RtemsSchedulerReqGetMaximumPriority_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Valid,
+ RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Null,
+ RtemsSchedulerReqGetMaximumPriority_Pre_Prio_NA
+} RtemsSchedulerReqGetMaximumPriority_Pre_Prio;
+
+typedef enum {
+ RtemsSchedulerReqGetMaximumPriority_Post_Status_Ok,
+ RtemsSchedulerReqGetMaximumPriority_Post_Status_InvAddr,
+ RtemsSchedulerReqGetMaximumPriority_Post_Status_InvId,
+ RtemsSchedulerReqGetMaximumPriority_Post_Status_NA
+} RtemsSchedulerReqGetMaximumPriority_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Set,
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Nop,
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_NA
+} RtemsSchedulerReqGetMaximumPriority_Post_PrioObj;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Id_NA : 1;
+ uint8_t Pre_Prio_NA : 1;
+ uint8_t Post_Status : 2;
+ uint8_t Post_PrioObj : 2;
+} RtemsSchedulerReqGetMaximumPriority_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/get-maximum-priority test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the identifier of a scheduler.
+ */
+ rtems_id scheduler_id;
+
+ /**
+ * @brief This member provides the object referenced by the ``priority``
+ * parameter.
+ */
+ rtems_task_priority priority_obj;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_get_maximum_priority() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies if the ``scheduler_id`` parameter value.
+ */
+ rtems_id id;
+
+ /**
+ * @brief This member specifies if the ``priority`` parameter value.
+ */
+ rtems_task_priority *priority;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqGetMaximumPriority_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqGetMaximumPriority_Context;
+
+static RtemsSchedulerReqGetMaximumPriority_Context
+ RtemsSchedulerReqGetMaximumPriority_Instance;
+
+static const char * const RtemsSchedulerReqGetMaximumPriority_PreDesc_Id[] = {
+ "Invalid",
+ "Scheduler",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqGetMaximumPriority_PreDesc_Prio[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqGetMaximumPriority_PreDesc[] = {
+ RtemsSchedulerReqGetMaximumPriority_PreDesc_Id,
+ RtemsSchedulerReqGetMaximumPriority_PreDesc_Prio,
+ NULL
+};
+
+static void RtemsSchedulerReqGetMaximumPriority_Pre_Id_Prepare(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx,
+ RtemsSchedulerReqGetMaximumPriority_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetMaximumPriority_Pre_Id_Invalid: {
+ /*
+ * While the ``scheduler_id`` parameter is not associated with a
+ * scheduler.
+ */
+ ctx->id = INVALID_ID;
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Pre_Id_Scheduler: {
+ /*
+ * While the ``scheduler_id`` parameter is associated with a scheduler.
+ */
+ ctx->id = ctx->scheduler_id;
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Prepare(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx,
+ RtemsSchedulerReqGetMaximumPriority_Pre_Prio state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Valid: {
+ /*
+ * While the ``priority`` parameter references an object of type
+ * rtems_task_priority.
+ */
+ ctx->priority = &ctx->priority_obj;
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Null: {
+ /*
+ * While the ``priority`` parameter is equal to NULL.
+ */
+ ctx->priority = NULL;
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Pre_Prio_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Post_Status_Check(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx,
+ RtemsSchedulerReqGetMaximumPriority_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetMaximumPriority_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_get_maximum_priority() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_scheduler_get_maximum_priority() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Post_Status_InvId: {
+ /*
+ * The return status of rtems_scheduler_get_maximum_priority() shall be
+ * RTEMS_INVALID_ID.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Check(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx,
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Set: {
+ /*
+ * The value of the object referenced by the ``priority`` parameter shall
+ * be set to the maximum priority value of the scheduler specified by the
+ * ``scheduler_id`` parameter after the return of the
+ * rtems_scheduler_get_maximum_priority() call.
+ */
+ #if defined(RTEMS_SMP)
+ T_eq_u32( ctx->priority_obj, INT_MAX );
+ #else
+ T_eq_u32( ctx->priority_obj, 127 );
+ #endif
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Nop: {
+ /*
+ * Objects referenced by the ``priority`` parameter in past calls to
+ * rtems_scheduler_get_maximum_priority() shall not be accessed by the
+ * rtems_scheduler_get_maximum_priority() call.
+ */
+ T_eq_u32( ctx->priority_obj, PRIO_INVALID );
+ break;
+ }
+
+ case RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Setup(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_A_NAME,
+ &ctx->scheduler_id
+ );
+ T_rsc_success( sc );
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Setup_Wrap( void *arg )
+{
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqGetMaximumPriority_Setup( ctx );
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Prepare(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx
+)
+{
+ ctx->priority_obj = PRIO_INVALID;
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_Action(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx
+)
+{
+ ctx->status = rtems_scheduler_get_maximum_priority( ctx->id, ctx->priority );
+}
+
+static const RtemsSchedulerReqGetMaximumPriority_Entry
+RtemsSchedulerReqGetMaximumPriority_Entries[] = {
+ { 0, 0, 0, RtemsSchedulerReqGetMaximumPriority_Post_Status_InvAddr,
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Nop },
+ { 0, 0, 0, RtemsSchedulerReqGetMaximumPriority_Post_Status_InvId,
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Nop },
+ { 0, 0, 0, RtemsSchedulerReqGetMaximumPriority_Post_Status_Ok,
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Set }
+};
+
+static const uint8_t
+RtemsSchedulerReqGetMaximumPriority_Map[] = {
+ 1, 0, 2, 0
+};
+
+static size_t RtemsSchedulerReqGetMaximumPriority_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ RtemsSchedulerReqGetMaximumPriority_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqGetMaximumPriority_Fixture = {
+ .setup = RtemsSchedulerReqGetMaximumPriority_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSchedulerReqGetMaximumPriority_Scope,
+ .initial_context = &RtemsSchedulerReqGetMaximumPriority_Instance
+};
+
+static inline RtemsSchedulerReqGetMaximumPriority_Entry
+RtemsSchedulerReqGetMaximumPriority_PopEntry(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSchedulerReqGetMaximumPriority_Entries[
+ RtemsSchedulerReqGetMaximumPriority_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqGetMaximumPriority_TestVariant(
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx
+)
+{
+ RtemsSchedulerReqGetMaximumPriority_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ RtemsSchedulerReqGetMaximumPriority_Action( ctx );
+ RtemsSchedulerReqGetMaximumPriority_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ RtemsSchedulerReqGetMaximumPriority_Post_PrioObj_Check(
+ ctx,
+ ctx->Map.entry.Post_PrioObj
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqGetMaximumPriority( void )
+ */
+T_TEST_CASE_FIXTURE(
+ RtemsSchedulerReqGetMaximumPriority,
+ &RtemsSchedulerReqGetMaximumPriority_Fixture
+)
+{
+ RtemsSchedulerReqGetMaximumPriority_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSchedulerReqGetMaximumPriority_Pre_Id_Invalid;
+ ctx->Map.pcs[ 0 ] < RtemsSchedulerReqGetMaximumPriority_Pre_Id_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSchedulerReqGetMaximumPriority_Pre_Prio_Valid;
+ ctx->Map.pcs[ 1 ] < RtemsSchedulerReqGetMaximumPriority_Pre_Prio_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqGetMaximumPriority_PopEntry( ctx );
+ RtemsSchedulerReqGetMaximumPriority_Prepare( ctx );
+ RtemsSchedulerReqGetMaximumPriority_TestVariant( ctx );
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-get-processor-set.c b/testsuites/validation/tc-scheduler-get-processor-set.c
new file mode 100644
index 0000000000..3b48df44f9
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-get-processor-set.c
@@ -0,0 +1,550 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqGetProcessorSet
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqGetProcessorSet \
+ * spec:/rtems/scheduler/req/get-processor-set
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqGetProcessorSet_Pre_Id_Invalid,
+ RtemsSchedulerReqGetProcessorSet_Pre_Id_Scheduler,
+ RtemsSchedulerReqGetProcessorSet_Pre_Id_NA
+} RtemsSchedulerReqGetProcessorSet_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Valid,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_TooSmall,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Askew,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_NA
+} RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize;
+
+typedef enum {
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Valid,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Null,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_NA
+} RtemsSchedulerReqGetProcessorSet_Pre_CPUSet;
+
+typedef enum {
+ RtemsSchedulerReqGetProcessorSet_Post_Status_Ok,
+ RtemsSchedulerReqGetProcessorSet_Post_Status_InvAddr,
+ RtemsSchedulerReqGetProcessorSet_Post_Status_InvId,
+ RtemsSchedulerReqGetProcessorSet_Post_Status_InvSize,
+ RtemsSchedulerReqGetProcessorSet_Post_Status_NA
+} RtemsSchedulerReqGetProcessorSet_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Set,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Nop,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_NA
+} RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Pre_CPUSetSize_NA : 1;
+ uint16_t Pre_CPUSet_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_CPUSetVar : 2;
+} RtemsSchedulerReqGetProcessorSet_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/get-processor-set test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the identifier of a scheduler.
+ */
+ rtems_id scheduler_id;
+
+ /**
+ * @brief This member provides the object referenced by the ``cpusetsize``
+ * parameter.
+ */
+ cpu_set_t cpuset_value;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_get_processor_set() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies if the ``scheduler_id`` parameter value.
+ */
+ rtems_id id;
+
+ /**
+ * @brief This member specifies if the ``cpusetsize`` parameter value.
+ */
+ size_t cpusetsize;
+
+ /**
+ * @brief This member specifies if the ``cpuset`` parameter value.
+ */
+ cpu_set_t *cpuset;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqGetProcessorSet_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqGetProcessorSet_Context;
+
+static RtemsSchedulerReqGetProcessorSet_Context
+ RtemsSchedulerReqGetProcessorSet_Instance;
+
+static const char * const RtemsSchedulerReqGetProcessorSet_PreDesc_Id[] = {
+ "Invalid",
+ "Scheduler",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqGetProcessorSet_PreDesc_CPUSetSize[] = {
+ "Valid",
+ "TooSmall",
+ "Askew",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqGetProcessorSet_PreDesc_CPUSet[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqGetProcessorSet_PreDesc[] = {
+ RtemsSchedulerReqGetProcessorSet_PreDesc_Id,
+ RtemsSchedulerReqGetProcessorSet_PreDesc_CPUSetSize,
+ RtemsSchedulerReqGetProcessorSet_PreDesc_CPUSet,
+ NULL
+};
+
+static void RtemsSchedulerReqGetProcessorSet_Pre_Id_Prepare(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx,
+ RtemsSchedulerReqGetProcessorSet_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetProcessorSet_Pre_Id_Invalid: {
+ /*
+ * While the ``scheduler_id`` parameter is not associated with a
+ * scheduler.
+ */
+ ctx->id = INVALID_ID;
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_Id_Scheduler: {
+ /*
+ * While the ``scheduler_id`` parameter is associated with a scheduler.
+ */
+ ctx->id = ctx->scheduler_id;
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Prepare(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Valid: {
+ /*
+ * While the ``cpusetsize`` parameter is an integral multiple of the size
+ * of long, while the ``cpusetsize`` parameter specifies a processor set
+ * which is large enough to contain the processor set of the scheduler.
+ */
+ ctx->cpusetsize = sizeof( ctx->cpuset_value );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_TooSmall: {
+ /*
+ * While the ``cpusetsize`` parameter is an integral multiple of the size
+ * of long, while the ``cpusetsize`` parameter specifies a processor set
+ * which is not large enough to contain the processor set of the
+ * scheduler.
+ */
+ ctx->cpusetsize = 0;
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Askew: {
+ /*
+ * While the ``cpusetsize`` parameter is not an integral multiple of the
+ * size of long.
+ */
+ ctx->cpusetsize = SIZE_MAX;
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Prepare(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx,
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSet state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Valid: {
+ /*
+ * While the ``cpuset`` parameter references an object of type cpu_set_t.
+ */
+ ctx->cpuset = &ctx->cpuset_value;
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Null: {
+ /*
+ * While the ``cpuset`` parameter is equal to NULL.
+ */
+ ctx->cpuset = NULL;
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Post_Status_Check(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx,
+ RtemsSchedulerReqGetProcessorSet_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqGetProcessorSet_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_get_processor_set() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_scheduler_get_processor_set() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Post_Status_InvId: {
+ /*
+ * The return status of rtems_scheduler_get_processor_set() shall be
+ * RTEMS_INVALID_ID.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Post_Status_InvSize: {
+ /*
+ * The return status of rtems_scheduler_get_processor_set() shall be
+ * RTEMS_INVALID_SIZE.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_SIZE );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Check(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar state
+)
+{
+ cpu_set_t set;
+
+ switch ( state ) {
+ case RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Set: {
+ /*
+ * The value of the object referenced by the ``cpuset`` parameter shall
+ * be set to the processor set owned by the scheduler specified by the
+ * ``scheduler_id`` parameter at some point during the call after the
+ * return of the rtems_scheduler_get_processor_set() call.
+ */
+ CPU_ZERO( &set );
+ CPU_SET( 0, &set );
+ T_eq_int( CPU_CMP( &ctx->cpuset_value, &set ), 0 );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Nop: {
+ /*
+ * Objects referenced by the ``cpuset`` parameter in past calls to
+ * rtems_scheduler_get_processor_set() shall not be accessed by the
+ * rtems_scheduler_get_processor_set() call.
+ */
+ CPU_FILL( &set );
+ T_eq_int( CPU_CMP( &ctx->cpuset_value, &set ), 0 );
+ break;
+ }
+
+ case RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Setup(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_A_NAME,
+ &ctx->scheduler_id
+ );
+ T_rsc_success( sc );
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Setup_Wrap( void *arg )
+{
+ RtemsSchedulerReqGetProcessorSet_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqGetProcessorSet_Setup( ctx );
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Prepare(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx
+)
+{
+ CPU_FILL( &ctx->cpuset_value );
+}
+
+static void RtemsSchedulerReqGetProcessorSet_Action(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx
+)
+{
+ ctx->status = rtems_scheduler_get_processor_set(
+ ctx->id,
+ ctx->cpusetsize,
+ ctx->cpuset
+ );
+}
+
+static const RtemsSchedulerReqGetProcessorSet_Entry
+RtemsSchedulerReqGetProcessorSet_Entries[] = {
+ { 0, 0, 0, 0, RtemsSchedulerReqGetProcessorSet_Post_Status_InvAddr,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Nop },
+ { 0, 0, 0, 0, RtemsSchedulerReqGetProcessorSet_Post_Status_InvId,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Nop },
+ { 0, 0, 0, 0, RtemsSchedulerReqGetProcessorSet_Post_Status_InvSize,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Nop },
+ { 0, 0, 0, 0, RtemsSchedulerReqGetProcessorSet_Post_Status_Ok,
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Set }
+};
+
+static const uint8_t
+RtemsSchedulerReqGetProcessorSet_Map[] = {
+ 1, 0, 1, 0, 1, 0, 3, 0, 2, 0, 2, 0
+};
+
+static size_t RtemsSchedulerReqGetProcessorSet_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ RtemsSchedulerReqGetProcessorSet_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ RtemsSchedulerReqGetProcessorSet_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqGetProcessorSet_Fixture = {
+ .setup = RtemsSchedulerReqGetProcessorSet_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSchedulerReqGetProcessorSet_Scope,
+ .initial_context = &RtemsSchedulerReqGetProcessorSet_Instance
+};
+
+static inline RtemsSchedulerReqGetProcessorSet_Entry
+RtemsSchedulerReqGetProcessorSet_PopEntry(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSchedulerReqGetProcessorSet_Entries[
+ RtemsSchedulerReqGetProcessorSet_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqGetProcessorSet_TestVariant(
+ RtemsSchedulerReqGetProcessorSet_Context *ctx
+)
+{
+ RtemsSchedulerReqGetProcessorSet_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ RtemsSchedulerReqGetProcessorSet_Action( ctx );
+ RtemsSchedulerReqGetProcessorSet_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ RtemsSchedulerReqGetProcessorSet_Post_CPUSetVar_Check(
+ ctx,
+ ctx->Map.entry.Post_CPUSetVar
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqGetProcessorSet( void )
+ */
+T_TEST_CASE_FIXTURE(
+ RtemsSchedulerReqGetProcessorSet,
+ &RtemsSchedulerReqGetProcessorSet_Fixture
+)
+{
+ RtemsSchedulerReqGetProcessorSet_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSchedulerReqGetProcessorSet_Pre_Id_Invalid;
+ ctx->Map.pcs[ 0 ] < RtemsSchedulerReqGetProcessorSet_Pre_Id_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_Valid;
+ ctx->Map.pcs[ 1 ] < RtemsSchedulerReqGetProcessorSet_Pre_CPUSetSize_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 2 ] = RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_Valid;
+ ctx->Map.pcs[ 2 ] < RtemsSchedulerReqGetProcessorSet_Pre_CPUSet_NA;
+ ++ctx->Map.pcs[ 2 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqGetProcessorSet_PopEntry( ctx );
+ RtemsSchedulerReqGetProcessorSet_Prepare( ctx );
+ RtemsSchedulerReqGetProcessorSet_TestVariant( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-ident-by-processor-set.c b/testsuites/validation/tc-scheduler-ident-by-processor-set.c
new file mode 100644
index 0000000000..57b660c835
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-ident-by-processor-set.c
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqIdentByProcessorSet
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqIdentByProcessorSet \
+ * spec:/rtems/scheduler/req/ident-by-processor-set
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_Yes,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_No,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_NA
+} RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Invalid,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Valid,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_NA
+} RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Valid,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Invalid,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_NA
+} RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Valid,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Null,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_NA
+} RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Valid,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Null,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_Id_NA
+} RtemsSchedulerReqIdentByProcessorSet_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_Ok,
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvSize,
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvName,
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_IncStat,
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_NA
+} RtemsSchedulerReqIdentByProcessorSet_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Set,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_NA
+} RtemsSchedulerReqIdentByProcessorSet_Post_IdVar;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_CPUOwnedByScheduler_NA : 1;
+ uint16_t Pre_CPUSetObj_NA : 1;
+ uint16_t Pre_CPUSetSize_NA : 1;
+ uint16_t Pre_CPUSet_NA : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_IdVar : 2;
+} RtemsSchedulerReqIdentByProcessorSet_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/ident-by-processor-set
+ * test case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the identifier of a second scheduler.
+ */
+ rtems_id second_scheduler_id;
+
+ /**
+ * @brief This member provides the object referenced by the ``cpuset``
+ * parameter.
+ */
+ cpu_set_t cpuset_value;
+
+ /**
+ * @brief This member provides the object referenced by the ``id`` parameter.
+ */
+ rtems_id id_value;
+
+ /**
+ * @brief If this member is true, then the processor specified by the
+ * ``cpusetsize`` parameter shall be owned by a scheduler.
+ */
+ bool cpu_has_scheduler;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_ident_by_processor_set() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies if the ``cpusetsize`` parameter value.
+ */
+ size_t cpusetsize;
+
+ /**
+ * @brief This member specifies if the ``cpuset`` parameter value.
+ */
+ const cpu_set_t *cpuset;
+
+ /**
+ * @brief This member specifies if the ``id`` parameter value.
+ */
+ rtems_id *id;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 5 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 5 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqIdentByProcessorSet_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqIdentByProcessorSet_Context;
+
+static RtemsSchedulerReqIdentByProcessorSet_Context
+ RtemsSchedulerReqIdentByProcessorSet_Instance;
+
+static const char * const RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUOwnedByScheduler[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUSetObj[] = {
+ "Invalid",
+ "Valid",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUSetSize[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUSet[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdentByProcessorSet_PreDesc_Id[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqIdentByProcessorSet_PreDesc[] = {
+ RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUOwnedByScheduler,
+ RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUSetObj,
+ RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUSetSize,
+ RtemsSchedulerReqIdentByProcessorSet_PreDesc_CPUSet,
+ RtemsSchedulerReqIdentByProcessorSet_PreDesc_Id,
+ NULL
+};
+
+static void
+RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_Prepare(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_Yes: {
+ /*
+ * While the highest numbered online processor specified by the processor
+ * set is owned by a scheduler.
+ */
+ ctx->cpu_has_scheduler = true;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_No: {
+ /*
+ * While the highest numbered online processor specified by the processor
+ * set is not owned by a scheduler.
+ */
+ ctx->cpu_has_scheduler = false;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Prepare(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Invalid: {
+ /*
+ * While the processor set contains no online processor.
+ */
+ CPU_ZERO( &ctx->cpuset_value );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Valid: {
+ /*
+ * While the processor set contains at least one online processor.
+ */
+ CPU_ZERO( &ctx->cpuset_value );
+
+ if ( ctx->cpu_has_scheduler ) {
+ CPU_SET( 0, &ctx->cpuset_value );
+ } else {
+ CPU_SET( 1, &ctx->cpuset_value );
+ }
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Prepare(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Valid: {
+ /*
+ * While the ``cpusetsize`` parameter is an integral multiple of the size
+ * of long.
+ */
+ ctx->cpusetsize = sizeof( ctx->cpuset_value );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Invalid: {
+ /*
+ * While the ``cpusetsize`` parameter is not an integral multiple of the
+ * size of long.
+ */
+ ctx->cpusetsize = 1;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Prepare(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Valid: {
+ /*
+ * While the ``cpuset`` parameter references an object of type cpu_set_t.
+ */
+ ctx->cpuset = &ctx->cpuset_value;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Null: {
+ /*
+ * While the ``cpuset`` parameter is equal to NULL.
+ */
+ ctx->cpuset = NULL;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Prepare(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter references an object of type rtems_id.
+ */
+ ctx->id_value = INVALID_ID;
+ ctx->id = &ctx->id_value;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Null: {
+ /*
+ * While the ``id`` parameter is equal to NULL.
+ */
+ ctx->id = NULL;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Post_Status_Check(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor_set() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor_set() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvSize: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor_set() shall be
+ * RTEMS_INVALID_SIZE.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_SIZE );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvName: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor_set() shall be
+ * RTEMS_INVALID_NAME.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_NAME );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_Status_IncStat: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor_set() shall be
+ * RTEMS_INVALID_NAME.
+ */
+ T_rsc( ctx->status, RTEMS_INCORRECT_STATE );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Check(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Set: {
+ /*
+ * The value of the object referenced by the ``id`` parameter shall be
+ * set to the identifier of the scheduler which owned the highest
+ * numbered online processor specified by the ``cpusetsize`` ``cpuset``
+ * parameters at some point during the call after the return of the
+ * rtems_scheduler_ident_by_processor_set() call.
+ */
+ T_eq_u32( ctx->id_value, 0x0f010001 );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop: {
+ /*
+ * Objects referenced by the ``id`` parameter in past calls to
+ * rtems_scheduler_ident_by_processor_set() shall not be accessed by the
+ * rtems_scheduler_ident_by_processor_set() call.
+ */
+ T_eq_u32( ctx->id_value, INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Setup(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_B_NAME,
+ &ctx->second_scheduler_id
+ );
+ T_rsc_success( sc );
+ #else
+ ctx->second_scheduler_id = INVALID_ID;
+ #endif
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Setup_Wrap( void *arg )
+{
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqIdentByProcessorSet_Setup( ctx );
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Prepare(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx
+)
+{
+ ctx->id_value = INVALID_ID;
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_Action(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ if ( !ctx->cpu_has_scheduler ) {
+ sc = rtems_scheduler_remove_processor( ctx->second_scheduler_id, 1 );
+ T_rsc_success( sc );
+ }
+ #endif
+
+ ctx->status = rtems_scheduler_ident_by_processor_set(
+ ctx->cpusetsize,
+ ctx->cpuset,
+ ctx->id
+ );
+
+ #if defined(RTEMS_SMP)
+ if ( !ctx->cpu_has_scheduler ) {
+ sc = rtems_scheduler_add_processor( ctx->second_scheduler_id, 1 );
+ T_rsc_success( sc );
+ }
+ #endif
+}
+
+static const RtemsSchedulerReqIdentByProcessorSet_Entry
+RtemsSchedulerReqIdentByProcessorSet_Entries[] = {
+ { 0, 1, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+ { 0, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_NA,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_NA },
+#endif
+ { 0, 1, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvName,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+ { 0, 1, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvSize,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+ { 0, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_Ok,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Set },
+ { 0, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvSize,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_IncStat,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop },
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_NA,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_InvSize,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Nop }
+#else
+ { 1, 0, 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessorSet_Post_Status_NA,
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSchedulerReqIdentByProcessorSet_Map[] = {
+ 3, 0, 0, 0, 4, 0, 0, 0, 5, 1, 1, 1, 6, 1, 1, 1, 3, 0, 0, 0, 4, 0, 0, 0, 7, 2,
+ 2, 2, 8, 2, 2, 2
+};
+
+static size_t RtemsSchedulerReqIdentByProcessorSet_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ RtemsSchedulerReqIdentByProcessorSet_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqIdentByProcessorSet_Fixture = {
+ .setup = RtemsSchedulerReqIdentByProcessorSet_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSchedulerReqIdentByProcessorSet_Scope,
+ .initial_context = &RtemsSchedulerReqIdentByProcessorSet_Instance
+};
+
+static inline RtemsSchedulerReqIdentByProcessorSet_Entry
+RtemsSchedulerReqIdentByProcessorSet_PopEntry(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSchedulerReqIdentByProcessorSet_Entries[
+ RtemsSchedulerReqIdentByProcessorSet_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_SetPreConditionStates(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx
+)
+{
+ if ( ctx->Map.entry.Pre_CPUOwnedByScheduler_NA ) {
+ ctx->Map.pcs[ 0 ] = RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_NA;
+ } else {
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ }
+
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+}
+
+static void RtemsSchedulerReqIdentByProcessorSet_TestVariant(
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx
+)
+{
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Prepare(
+ ctx,
+ ctx->Map.pcs[ 2 ]
+ );
+ RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Prepare(
+ ctx,
+ ctx->Map.pcs[ 3 ]
+ );
+ RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Prepare(
+ ctx,
+ ctx->Map.pcs[ 4 ]
+ );
+ RtemsSchedulerReqIdentByProcessorSet_Action( ctx );
+ RtemsSchedulerReqIdentByProcessorSet_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ RtemsSchedulerReqIdentByProcessorSet_Post_IdVar_Check(
+ ctx,
+ ctx->Map.entry.Post_IdVar
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqIdentByProcessorSet( void )
+ */
+T_TEST_CASE_FIXTURE(
+ RtemsSchedulerReqIdentByProcessorSet,
+ &RtemsSchedulerReqIdentByProcessorSet_Fixture
+)
+{
+ RtemsSchedulerReqIdentByProcessorSet_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_Yes;
+ ctx->Map.pci[ 0 ] < RtemsSchedulerReqIdentByProcessorSet_Pre_CPUOwnedByScheduler_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_Invalid;
+ ctx->Map.pci[ 1 ] < RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetObj_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_Valid;
+ ctx->Map.pci[ 2 ] < RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSetSize_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_Valid;
+ ctx->Map.pci[ 3 ] < RtemsSchedulerReqIdentByProcessorSet_Pre_CPUSet_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = RtemsSchedulerReqIdentByProcessorSet_Pre_Id_Valid;
+ ctx->Map.pci[ 4 ] < RtemsSchedulerReqIdentByProcessorSet_Pre_Id_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqIdentByProcessorSet_PopEntry(
+ ctx
+ );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSchedulerReqIdentByProcessorSet_SetPreConditionStates( ctx );
+ RtemsSchedulerReqIdentByProcessorSet_Prepare( ctx );
+ RtemsSchedulerReqIdentByProcessorSet_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-ident-by-processor.c b/testsuites/validation/tc-scheduler-ident-by-processor.c
new file mode 100644
index 0000000000..43021fdc23
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-ident-by-processor.c
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqIdentByProcessor
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqIdentByProcessor \
+ * spec:/rtems/scheduler/req/ident-by-processor
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_Yes,
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_No,
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_NA
+} RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Invalid,
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Valid,
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_NA
+} RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessor_Pre_Id_Valid,
+ RtemsSchedulerReqIdentByProcessor_Pre_Id_Null,
+ RtemsSchedulerReqIdentByProcessor_Pre_Id_NA
+} RtemsSchedulerReqIdentByProcessor_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessor_Post_Status_Ok,
+ RtemsSchedulerReqIdentByProcessor_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessor_Post_Status_InvName,
+ RtemsSchedulerReqIdentByProcessor_Post_Status_IncStat,
+ RtemsSchedulerReqIdentByProcessor_Post_Status_NA
+} RtemsSchedulerReqIdentByProcessor_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Set,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_NA
+} RtemsSchedulerReqIdentByProcessor_Post_IdVar;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_CPUOwnedByScheduler_NA : 1;
+ uint16_t Pre_CPUIndex_NA : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_IdVar : 2;
+} RtemsSchedulerReqIdentByProcessor_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/ident-by-processor test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the identifier of a second scheduler.
+ */
+ rtems_id second_scheduler_id;
+
+ /**
+ * @brief This member provides the object referenced by the ``id`` parameter.
+ */
+ rtems_id id_value;
+
+ /**
+ * @brief If this member is true, then the processor specified by the
+ * ``cpu_index`` parameter shall be owned by a scheduler.
+ */
+ bool cpu_has_scheduler;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_ident_by_processor() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies if the ``cpu_index`` parameter value.
+ */
+ uint32_t cpu_index;
+
+ /**
+ * @brief This member specifies if the ``id`` parameter value.
+ */
+ rtems_id *id;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 3 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 3 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqIdentByProcessor_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqIdentByProcessor_Context;
+
+static RtemsSchedulerReqIdentByProcessor_Context
+ RtemsSchedulerReqIdentByProcessor_Instance;
+
+static const char * const RtemsSchedulerReqIdentByProcessor_PreDesc_CPUOwnedByScheduler[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdentByProcessor_PreDesc_CPUIndex[] = {
+ "Invalid",
+ "Valid",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdentByProcessor_PreDesc_Id[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqIdentByProcessor_PreDesc[] = {
+ RtemsSchedulerReqIdentByProcessor_PreDesc_CPUOwnedByScheduler,
+ RtemsSchedulerReqIdentByProcessor_PreDesc_CPUIndex,
+ RtemsSchedulerReqIdentByProcessor_PreDesc_Id,
+ NULL
+};
+
+static void RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_Prepare(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx,
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_Yes: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is owned
+ * by a scheduler.
+ */
+ ctx->cpu_has_scheduler = true;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_No: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is not
+ * owned by a scheduler.
+ */
+ ctx->cpu_has_scheduler = false;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Prepare(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx,
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Invalid: {
+ /*
+ * While the ``cpu_index`` parameter greater than or equal to the
+ * processor maximum.
+ */
+ ctx->cpu_index = rtems_scheduler_get_processor_maximum();
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Valid: {
+ /*
+ * While the ``cpu_index`` parameter less than the processor maximum.
+ */
+ if ( ctx->cpu_has_scheduler ) {
+ ctx->cpu_index = 0;
+ } else {
+ ctx->cpu_index = 1;
+ }
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Pre_Id_Prepare(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx,
+ RtemsSchedulerReqIdentByProcessor_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessor_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter references an object of type rtems_id.
+ */
+ ctx->id_value = INVALID_ID;
+ ctx->id = &ctx->id_value;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Pre_Id_Null: {
+ /*
+ * While the ``id`` parameter is equal to NULL.
+ */
+ ctx->id = NULL;
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Post_Status_Check(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx,
+ RtemsSchedulerReqIdentByProcessor_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessor_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Post_Status_InvName: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor() shall be
+ * RTEMS_INVALID_NAME.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_NAME );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Post_Status_IncStat: {
+ /*
+ * The return status of rtems_scheduler_ident_by_processor() shall be
+ * RTEMS_INVALID_NAME.
+ */
+ T_rsc( ctx->status, RTEMS_INCORRECT_STATE );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Post_IdVar_Check(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdentByProcessor_Post_IdVar_Set: {
+ /*
+ * The value of the object referenced by the ``id`` parameter shall be
+ * set to the identifier of the scheduler which owned the processor
+ * specified by the ``cpu_index`` parameter at some point during the call
+ * after the return of the rtems_scheduler_ident_by_processor() call.
+ */
+ T_eq_ptr( ctx->id, &ctx->id_value );
+ T_eq_u32( ctx->id_value, 0x0f010001 );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop: {
+ /*
+ * Objects referenced by the ``id`` parameter in past calls to
+ * rtems_scheduler_ident_by_processor() shall not be accessed by the
+ * rtems_scheduler_ident_by_processor() call.
+ */
+ T_eq_u32( ctx->id_value, INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqIdentByProcessor_Post_IdVar_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Setup(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ sc = rtems_scheduler_ident(
+ TEST_SCHEDULER_B_NAME,
+ &ctx->second_scheduler_id
+ );
+ T_rsc_success( sc );
+ #else
+ ctx->second_scheduler_id = INVALID_ID;
+ #endif
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Setup_Wrap( void *arg )
+{
+ RtemsSchedulerReqIdentByProcessor_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqIdentByProcessor_Setup( ctx );
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Prepare(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx
+)
+{
+ ctx->id_value = INVALID_ID;
+}
+
+static void RtemsSchedulerReqIdentByProcessor_Action(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ rtems_status_code sc;
+
+ if ( !ctx->cpu_has_scheduler ) {
+ sc = rtems_scheduler_remove_processor( ctx->second_scheduler_id, 1 );
+ T_rsc_success( sc );
+ }
+ #endif
+
+ ctx->status = rtems_scheduler_ident_by_processor( ctx->cpu_index, ctx->id );
+
+ #if defined(RTEMS_SMP)
+ if ( !ctx->cpu_has_scheduler ) {
+ sc = rtems_scheduler_add_processor( ctx->second_scheduler_id, 1 );
+ T_rsc_success( sc );
+ }
+ #endif
+}
+
+static const RtemsSchedulerReqIdentByProcessor_Entry
+RtemsSchedulerReqIdentByProcessor_Entries[] = {
+ { 0, 1, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_InvName,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop },
+ { 0, 1, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop },
+ { 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_Ok,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Set },
+ { 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop },
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_IncStat,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop },
+#else
+ { 1, 0, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_NA,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_NA },
+#endif
+#if defined(RTEMS_SMP)
+ { 0, 0, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_InvAddr,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Nop }
+#else
+ { 1, 0, 0, 0, RtemsSchedulerReqIdentByProcessor_Post_Status_NA,
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_NA }
+#endif
+};
+
+static const uint8_t
+RtemsSchedulerReqIdentByProcessor_Map[] = {
+ 0, 1, 2, 3, 0, 1, 4, 5
+};
+
+static size_t RtemsSchedulerReqIdentByProcessor_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ RtemsSchedulerReqIdentByProcessor_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ RtemsSchedulerReqIdentByProcessor_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqIdentByProcessor_Fixture = {
+ .setup = RtemsSchedulerReqIdentByProcessor_Setup_Wrap,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSchedulerReqIdentByProcessor_Scope,
+ .initial_context = &RtemsSchedulerReqIdentByProcessor_Instance
+};
+
+static inline RtemsSchedulerReqIdentByProcessor_Entry
+RtemsSchedulerReqIdentByProcessor_PopEntry(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSchedulerReqIdentByProcessor_Entries[
+ RtemsSchedulerReqIdentByProcessor_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqIdentByProcessor_SetPreConditionStates(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx
+)
+{
+ if ( ctx->Map.entry.Pre_CPUOwnedByScheduler_NA ) {
+ ctx->Map.pcs[ 0 ] = RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_NA;
+ } else {
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ }
+
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+}
+
+static void RtemsSchedulerReqIdentByProcessor_TestVariant(
+ RtemsSchedulerReqIdentByProcessor_Context *ctx
+)
+{
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_Prepare(
+ ctx,
+ ctx->Map.pcs[ 0 ]
+ );
+ RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ RtemsSchedulerReqIdentByProcessor_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSchedulerReqIdentByProcessor_Action( ctx );
+ RtemsSchedulerReqIdentByProcessor_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ RtemsSchedulerReqIdentByProcessor_Post_IdVar_Check(
+ ctx,
+ ctx->Map.entry.Post_IdVar
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqIdentByProcessor( void )
+ */
+T_TEST_CASE_FIXTURE(
+ RtemsSchedulerReqIdentByProcessor,
+ &RtemsSchedulerReqIdentByProcessor_Fixture
+)
+{
+ RtemsSchedulerReqIdentByProcessor_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pci[ 0 ] = RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_Yes;
+ ctx->Map.pci[ 0 ] < RtemsSchedulerReqIdentByProcessor_Pre_CPUOwnedByScheduler_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_Invalid;
+ ctx->Map.pci[ 1 ] < RtemsSchedulerReqIdentByProcessor_Pre_CPUIndex_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = RtemsSchedulerReqIdentByProcessor_Pre_Id_Valid;
+ ctx->Map.pci[ 2 ] < RtemsSchedulerReqIdentByProcessor_Pre_Id_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqIdentByProcessor_PopEntry( ctx );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSchedulerReqIdentByProcessor_SetPreConditionStates( ctx );
+ RtemsSchedulerReqIdentByProcessor_Prepare( ctx );
+ RtemsSchedulerReqIdentByProcessor_TestVariant( ctx );
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-ident.c b/testsuites/validation/tc-scheduler-ident.c
new file mode 100644
index 0000000000..2517e1b50f
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-ident.c
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqIdent
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqIdent \
+ * spec:/rtems/scheduler/req/ident
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqIdent_Pre_Name_Invalid,
+ RtemsSchedulerReqIdent_Pre_Name_Valid,
+ RtemsSchedulerReqIdent_Pre_Name_NA
+} RtemsSchedulerReqIdent_Pre_Name;
+
+typedef enum {
+ RtemsSchedulerReqIdent_Pre_Id_Valid,
+ RtemsSchedulerReqIdent_Pre_Id_Null,
+ RtemsSchedulerReqIdent_Pre_Id_NA
+} RtemsSchedulerReqIdent_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqIdent_Post_Status_Ok,
+ RtemsSchedulerReqIdent_Post_Status_InvAddr,
+ RtemsSchedulerReqIdent_Post_Status_InvName,
+ RtemsSchedulerReqIdent_Post_Status_NA
+} RtemsSchedulerReqIdent_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqIdent_Post_IdVar_Set,
+ RtemsSchedulerReqIdent_Post_IdVar_Nop,
+ RtemsSchedulerReqIdent_Post_IdVar_NA
+} RtemsSchedulerReqIdent_Post_IdVar;
+
+typedef struct {
+ uint8_t Skip : 1;
+ uint8_t Pre_Name_NA : 1;
+ uint8_t Pre_Id_NA : 1;
+ uint8_t Post_Status : 2;
+ uint8_t Post_IdVar : 2;
+} RtemsSchedulerReqIdent_Entry;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/ident test case.
+ */
+typedef struct {
+ /**
+ * @brief This member provides the object referenced by the ``id`` parameter.
+ */
+ rtems_id id_value;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_ident() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies if the ``name`` parameter value.
+ */
+ rtems_name name;
+
+ /**
+ * @brief This member specifies if the ``id`` parameter value.
+ */
+ rtems_id *id;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 2 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqIdent_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqIdent_Context;
+
+static RtemsSchedulerReqIdent_Context
+ RtemsSchedulerReqIdent_Instance;
+
+static const char * const RtemsSchedulerReqIdent_PreDesc_Name[] = {
+ "Invalid",
+ "Valid",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqIdent_PreDesc_Id[] = {
+ "Valid",
+ "Null",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqIdent_PreDesc[] = {
+ RtemsSchedulerReqIdent_PreDesc_Name,
+ RtemsSchedulerReqIdent_PreDesc_Id,
+ NULL
+};
+
+static void RtemsSchedulerReqIdent_Pre_Name_Prepare(
+ RtemsSchedulerReqIdent_Context *ctx,
+ RtemsSchedulerReqIdent_Pre_Name state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdent_Pre_Name_Invalid: {
+ /*
+ * While the ``name`` parameter is not associated with a scheduler.
+ */
+ ctx->name = 0;
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Pre_Name_Valid: {
+ /*
+ * While the ``name`` parameter is associated with a scheduler.
+ */
+ ctx->name = TEST_SCHEDULER_A_NAME;
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Pre_Name_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdent_Pre_Id_Prepare(
+ RtemsSchedulerReqIdent_Context *ctx,
+ RtemsSchedulerReqIdent_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdent_Pre_Id_Valid: {
+ /*
+ * While the ``id`` parameter references an object of type rtems_id.
+ */
+ ctx->id = &ctx->id_value;
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Pre_Id_Null: {
+ /*
+ * While the ``id`` parameter is equal to NULL.
+ */
+ ctx->id = NULL;
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdent_Post_Status_Check(
+ RtemsSchedulerReqIdent_Context *ctx,
+ RtemsSchedulerReqIdent_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdent_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_ident() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Post_Status_InvAddr: {
+ /*
+ * The return status of rtems_scheduler_ident() shall be
+ * RTEMS_INVALID_ADDRESS.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ADDRESS );
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Post_Status_InvName: {
+ /*
+ * The return status of rtems_scheduler_ident() shall be
+ * RTEMS_INVALID_NAME.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_NAME );
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdent_Post_IdVar_Check(
+ RtemsSchedulerReqIdent_Context *ctx,
+ RtemsSchedulerReqIdent_Post_IdVar state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqIdent_Post_IdVar_Set: {
+ /*
+ * The value of the object referenced by the ``id`` parameter shall be
+ * set to the identifier of the scheduler with the lowest scheduler index
+ * and a name equal to the ``name`` parameter after the return of the
+ * rtems_scheduler_ident() call.
+ */
+ T_eq_ptr( ctx->id, &ctx->id_value );
+ T_eq_u32( ctx->id_value, 0x0f010001 );
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Post_IdVar_Nop: {
+ /*
+ * Objects referenced by the ``id`` parameter in past calls to
+ * rtems_scheduler_ident() shall not be accessed by the
+ * rtems_scheduler_ident() call.
+ */
+ T_eq_u32( ctx->id_value, INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqIdent_Post_IdVar_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqIdent_Prepare(
+ RtemsSchedulerReqIdent_Context *ctx
+)
+{
+ ctx->id_value = INVALID_ID;
+}
+
+static void RtemsSchedulerReqIdent_Action(
+ RtemsSchedulerReqIdent_Context *ctx
+)
+{
+ ctx->status = rtems_scheduler_ident( ctx->name, ctx->id );
+}
+
+static const RtemsSchedulerReqIdent_Entry
+RtemsSchedulerReqIdent_Entries[] = {
+ { 0, 0, 0, RtemsSchedulerReqIdent_Post_Status_InvAddr,
+ RtemsSchedulerReqIdent_Post_IdVar_Nop },
+ { 0, 0, 0, RtemsSchedulerReqIdent_Post_Status_InvName,
+ RtemsSchedulerReqIdent_Post_IdVar_Nop },
+ { 0, 0, 0, RtemsSchedulerReqIdent_Post_Status_Ok,
+ RtemsSchedulerReqIdent_Post_IdVar_Set }
+};
+
+static const uint8_t
+RtemsSchedulerReqIdent_Map[] = {
+ 1, 0, 2, 0
+};
+
+static size_t RtemsSchedulerReqIdent_Scope( void *arg, char *buf, size_t n )
+{
+ RtemsSchedulerReqIdent_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope( RtemsSchedulerReqIdent_PreDesc, buf, n, ctx->Map.pcs );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqIdent_Fixture = {
+ .setup = NULL,
+ .stop = NULL,
+ .teardown = NULL,
+ .scope = RtemsSchedulerReqIdent_Scope,
+ .initial_context = &RtemsSchedulerReqIdent_Instance
+};
+
+static inline RtemsSchedulerReqIdent_Entry RtemsSchedulerReqIdent_PopEntry(
+ RtemsSchedulerReqIdent_Context *ctx
+)
+{
+ size_t index;
+
+ index = ctx->Map.index;
+ ctx->Map.index = index + 1;
+ return RtemsSchedulerReqIdent_Entries[
+ RtemsSchedulerReqIdent_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqIdent_TestVariant(
+ RtemsSchedulerReqIdent_Context *ctx
+)
+{
+ RtemsSchedulerReqIdent_Pre_Name_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSchedulerReqIdent_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+ RtemsSchedulerReqIdent_Action( ctx );
+ RtemsSchedulerReqIdent_Post_Status_Check( ctx, ctx->Map.entry.Post_Status );
+ RtemsSchedulerReqIdent_Post_IdVar_Check( ctx, ctx->Map.entry.Post_IdVar );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqIdent( void )
+ */
+T_TEST_CASE_FIXTURE( RtemsSchedulerReqIdent, &RtemsSchedulerReqIdent_Fixture )
+{
+ RtemsSchedulerReqIdent_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+
+ for (
+ ctx->Map.pcs[ 0 ] = RtemsSchedulerReqIdent_Pre_Name_Invalid;
+ ctx->Map.pcs[ 0 ] < RtemsSchedulerReqIdent_Pre_Name_NA;
+ ++ctx->Map.pcs[ 0 ]
+ ) {
+ for (
+ ctx->Map.pcs[ 1 ] = RtemsSchedulerReqIdent_Pre_Id_Valid;
+ ctx->Map.pcs[ 1 ] < RtemsSchedulerReqIdent_Pre_Id_NA;
+ ++ctx->Map.pcs[ 1 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqIdent_PopEntry( ctx );
+ RtemsSchedulerReqIdent_Prepare( ctx );
+ RtemsSchedulerReqIdent_TestVariant( ctx );
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-non-smp.c b/testsuites/validation/tc-scheduler-non-smp.c
new file mode 100644
index 0000000000..a3eec8fd2d
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-non-smp.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerValNonSmp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerValNonSmp \
+ * spec:/rtems/scheduler/val/non-smp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNonSmp
+ *
+ * @brief This test case collection provides validation test cases for non-SMP
+ * requirements of the @ref RTEMSAPIClassicScheduler.
+ *
+ * This test case performs the following actions:
+ *
+ * - Check that calling rtems_scheduler_get_processor() is a constant
+ * expression which evaluates to zero.
+ *
+ * - Check that calling rtems_scheduler_get_processor_maximum() is a constant
+ * expression which evaluates to zero.
+ *
+ * @{
+ */
+
+/**
+ * @brief Check that calling rtems_scheduler_get_processor() is a constant
+ * expression which evaluates to zero.
+ */
+static void RtemsSchedulerValNonSmp_Action_0( void )
+{
+ RTEMS_STATIC_ASSERT( rtems_scheduler_get_processor() == 0, GET_PROCESSOR );
+}
+
+/**
+ * @brief Check that calling rtems_scheduler_get_processor_maximum() is a
+ * constant expression which evaluates to zero.
+ */
+static void RtemsSchedulerValNonSmp_Action_1( void )
+{
+ RTEMS_STATIC_ASSERT(
+ rtems_scheduler_get_processor_maximum() == 1,
+ GET_PROCESSOR_MAXIMUM
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerValNonSmp( void )
+ */
+T_TEST_CASE( RtemsSchedulerValNonSmp )
+{
+ RtemsSchedulerValNonSmp_Action_0();
+ RtemsSchedulerValNonSmp_Action_1();
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-remove-processor.c b/testsuites/validation/tc-scheduler-remove-processor.c
new file mode 100644
index 0000000000..6be66fa961
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-remove-processor.c
@@ -0,0 +1,1475 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerReqRemoveProcessor
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/test-scheduler.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/smpbarrier.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerReqRemoveProcessor \
+ * spec:/rtems/scheduler/req/remove-processor
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_Id_Invalid,
+ RtemsSchedulerReqRemoveProcessor_Pre_Id_Scheduler,
+ RtemsSchedulerReqRemoveProcessor_Pre_Id_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_Id;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Valid,
+ RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Invalid,
+ RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_Owned_Yes,
+ RtemsSchedulerReqRemoveProcessor_Pre_Owned_No,
+ RtemsSchedulerReqRemoveProcessor_Pre_Owned_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_Owned;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_Last_Yes,
+ RtemsSchedulerReqRemoveProcessor_Pre_Last_No,
+ RtemsSchedulerReqRemoveProcessor_Pre_Last_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_Last;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_Home_Yes,
+ RtemsSchedulerReqRemoveProcessor_Pre_Home_No,
+ RtemsSchedulerReqRemoveProcessor_Pre_Home_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_Home;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_Yes,
+ RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_No,
+ RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Idle,
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Task,
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_TaskIdle,
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Helping,
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_NA
+} RtemsSchedulerReqRemoveProcessor_Pre_UsedBy;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Post_Status_Ok,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_InvId,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_InvNum,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_InUse,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_NA
+} RtemsSchedulerReqRemoveProcessor_Post_Status;
+
+typedef enum {
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Yes,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA
+} RtemsSchedulerReqRemoveProcessor_Post_Removed;
+
+typedef struct {
+ uint16_t Skip : 1;
+ uint16_t Pre_Id_NA : 1;
+ uint16_t Pre_CPUIndex_NA : 1;
+ uint16_t Pre_Owned_NA : 1;
+ uint16_t Pre_Last_NA : 1;
+ uint16_t Pre_Home_NA : 1;
+ uint16_t Pre_RequiredByAffinity_NA : 1;
+ uint16_t Pre_UsedBy_NA : 1;
+ uint16_t Post_Status : 3;
+ uint16_t Post_Removed : 2;
+} RtemsSchedulerReqRemoveProcessor_Entry;
+
+typedef enum {
+ WORKER_A,
+ WORKER_B,
+ WORKER_C,
+ WORKER_COUNT
+} WorkerIndex;
+
+/**
+ * @brief Test context for spec:/rtems/scheduler/req/remove-processor test
+ * case.
+ */
+typedef struct {
+ /**
+ * @brief This member contains the runner identifier.
+ */
+ rtems_id runner_id;
+
+ /**
+ * @brief This member contains the worker identifiers.
+ */
+ rtems_id worker_id[ WORKER_COUNT ];
+
+ /**
+ * @brief This member contains the mutex identifier.
+ */
+ rtems_id mutex_id;
+
+ /**
+ * @brief This member contains the sticky mutex identifier.
+ */
+ rtems_id sticky_id;
+
+ /**
+ * @brief This member contains the worker busy status.
+ */
+ volatile bool busy[ WORKER_COUNT ];;
+
+ /**
+ * @brief This member contains the worker busy status.
+ */
+ volatile uint32_t busy_counter[ WORKER_COUNT ];;
+
+ /**
+ * @brief This member contains the barrier to synchronize the runner and the
+ * workers.
+ */
+ SMP_barrier_Control barrier;
+
+ /**
+ * @brief This member contains the call within ISR request.
+ */
+ CallWithinISRRequest request;;
+
+ /**
+ * @brief This member provides the context to wrap thread queue operations.
+ */
+ WrapThreadQueueContext wrap_tq_ctx;
+
+ /**
+ * @brief If this member is true, then the processor to remove shall be owned
+ * by the scheduler.
+ */
+ bool owned;
+
+ /**
+ * @brief If this member is true, then the processor to remove shall be the
+ * last processor of the scheduler.
+ */
+ bool last;
+
+ /**
+ * @brief If this member is true, then at least one non-idle task shall use
+ * the scheduler as its home scheduler.
+ */
+ bool home;
+
+ /**
+ * @brief If this member is true, then at least one non-idle task shall
+ * required the processor to remove due to its affinity set.
+ */
+ bool required_by_affinity;
+
+ /**
+ * @brief If this member is true, then the processor to remove shall be used
+ * by an idle task.
+ */
+ bool idle;
+
+ /**
+ * @brief If this member is true, then the processor to remove shall be used
+ * by a task or on behalf of a task which uses the scheduler as its home
+ * scheduler.
+ */
+ bool task;
+
+ /**
+ * @brief If this member is true, then the processor to remove shall be used
+ * by a task which uses the scheduler as a helping scheduler.
+ */
+ bool helping;
+
+ /**
+ * @brief This member provides the scheduler operation records.
+ */
+ T_scheduler_log_4 scheduler_log;;
+
+ /**
+ * @brief This member contains the return value of the
+ * rtems_scheduler_remove_processor() call.
+ */
+ rtems_status_code status;
+
+ /**
+ * @brief This member specifies the ``scheduler_id`` parameter value.
+ */
+ rtems_id id;
+
+ /**
+ * @brief This member specifies the ``cpu_index`` parameter value.
+ */
+ uint32_t cpu_index;
+
+ struct {
+ /**
+ * @brief This member defines the pre-condition indices for the next
+ * action.
+ */
+ size_t pci[ 7 ];
+
+ /**
+ * @brief This member defines the pre-condition states for the next action.
+ */
+ size_t pcs[ 7 ];
+
+ /**
+ * @brief If this member is true, then the test action loop is executed.
+ */
+ bool in_action_loop;
+
+ /**
+ * @brief This member contains the next transition map index.
+ */
+ size_t index;
+
+ /**
+ * @brief This member contains the current transition map entry.
+ */
+ RtemsSchedulerReqRemoveProcessor_Entry entry;
+
+ /**
+ * @brief If this member is true, then the current transition variant
+ * should be skipped.
+ */
+ bool skip;
+ } Map;
+} RtemsSchedulerReqRemoveProcessor_Context;
+
+static RtemsSchedulerReqRemoveProcessor_Context
+ RtemsSchedulerReqRemoveProcessor_Instance;
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_Id[] = {
+ "Invalid",
+ "Scheduler",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_CPUIndex[] = {
+ "Valid",
+ "Invalid",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_Owned[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_Last[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_Home[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_RequiredByAffinity[] = {
+ "Yes",
+ "No",
+ "NA"
+};
+
+static const char * const RtemsSchedulerReqRemoveProcessor_PreDesc_UsedBy[] = {
+ "Idle",
+ "Task",
+ "TaskIdle",
+ "Helping",
+ "NA"
+};
+
+static const char * const * const RtemsSchedulerReqRemoveProcessor_PreDesc[] = {
+ RtemsSchedulerReqRemoveProcessor_PreDesc_Id,
+ RtemsSchedulerReqRemoveProcessor_PreDesc_CPUIndex,
+ RtemsSchedulerReqRemoveProcessor_PreDesc_Owned,
+ RtemsSchedulerReqRemoveProcessor_PreDesc_Last,
+ RtemsSchedulerReqRemoveProcessor_PreDesc_Home,
+ RtemsSchedulerReqRemoveProcessor_PreDesc_RequiredByAffinity,
+ RtemsSchedulerReqRemoveProcessor_PreDesc_UsedBy,
+ NULL
+};
+
+typedef RtemsSchedulerReqRemoveProcessor_Context Context;
+
+static void DoRemoveProcessor( Context *ctx )
+{
+ T_scheduler_log *log;
+
+ log = T_scheduler_record_4( &ctx->scheduler_log );
+ T_null( log );
+
+ ctx->status = rtems_scheduler_remove_processor( ctx->id, ctx->cpu_index );
+
+ log = T_scheduler_record( NULL );
+ T_eq_ptr( &log->header, &ctx->scheduler_log.header );
+
+ if ( ctx->status == RTEMS_SUCCESSFUL ) {
+ AddProcessor( ctx->id, ctx->cpu_index );
+ }
+}
+
+#if defined(RTEMS_SMP)
+typedef enum {
+ EVENT_SYNC_RUNNER = RTEMS_EVENT_0,
+ EVENT_OBTAIN = RTEMS_EVENT_1,
+ EVENT_RELEASE = RTEMS_EVENT_2,
+ EVENT_STICKY_OBTAIN = RTEMS_EVENT_3,
+ EVENT_STICKY_RELEASE = RTEMS_EVENT_4,
+ EVENT_RESTART = RTEMS_EVENT_5,
+ EVENT_BUSY = RTEMS_EVENT_6,
+ EVENT_SYNC_RUNNER_LATE = RTEMS_EVENT_7
+} Event;
+
+static void Barriers( void *arg )
+{
+ Context *ctx;
+ SMP_barrier_State barrier_state;
+
+ ctx = arg;
+ _SMP_barrier_State_initialize( &barrier_state );
+
+ /* A */
+ _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+
+ /* B */
+ _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+}
+
+static void RequestISR( void *arg )
+{
+ Context *ctx;
+
+ ctx = arg;
+ ctx->request.handler = Barriers;
+ ctx->request.arg = ctx;
+ CallWithinISRSubmit( &ctx->request );
+}
+
+static void SendAndSync( Context *ctx, WorkerIndex worker, Event event )
+{
+ SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | event );
+ ReceiveAllEvents( EVENT_SYNC_RUNNER );
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+}
+
+static void MakeBusy( Context *ctx, WorkerIndex worker )
+{
+ ctx->busy_counter[ worker ] = 0;
+ ctx->busy[ worker ] = true;
+ SendEvents( ctx->worker_id[ worker ], EVENT_BUSY );
+}
+
+static void MakeBusyAndSync( Context *ctx, WorkerIndex worker )
+{
+ ctx->busy_counter[ worker ] = 0;
+ ctx->busy[ worker ] = true;
+ SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | EVENT_BUSY );
+ ReceiveAllEvents( EVENT_SYNC_RUNNER );
+}
+
+static void StopBusy( Context *ctx, WorkerIndex worker )
+{
+ ctx->busy[ worker ] = false;
+}
+
+static void StopBusyAndWait( Context *ctx, WorkerIndex worker )
+{
+ StopBusy( ctx, worker );
+ WaitForExecutionStop( ctx->worker_id[ worker ] );
+}
+
+static void WaitForBusy( Context *ctx, WorkerIndex worker )
+{
+ while ( ctx->busy_counter[ worker ] == 0 ) {
+ /* Wait */
+ }
+}
+
+static void RemoveWithHelpingOnly( Context *ctx )
+{
+ SMP_barrier_State barrier_state;
+
+ /*
+ * Use the mutex and the worker to construct the removal of the last
+ * processor of a scheduler while a thread is scheduled.
+ */
+
+ _SMP_barrier_Control_initialize( &ctx->barrier );
+ _SMP_barrier_State_initialize( &barrier_state );
+
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+
+ /* Let worker B help worker A */
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+ SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+
+ /*
+ * Restart the worker B to withdraw the help offer and wait on barriers.
+ * Move worker B to scheduler A. Remove the processor while worker A is
+ * scheduled.
+ */
+
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RESTART );
+
+ /* A */
+ _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+
+ SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+
+ ctx->id = SCHEDULER_B_ID;
+ ctx->cpu_index = 1;
+ DoRemoveProcessor( ctx );
+
+ /* B */
+ _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+
+ /* Clean up all used resources */
+ SetSelfPriority( PRIO_NORMAL );
+ SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+ T_busy(100000);
+}
+
+static void Worker( rtems_task_argument arg, WorkerIndex worker )
+{
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+
+ events = ReceiveAnyEvents();
+
+ if ( ( events & EVENT_SYNC_RUNNER ) != 0 ) {
+ SendEvents( ctx->runner_id, EVENT_SYNC_RUNNER );
+ }
+
+ if ( ( events & EVENT_OBTAIN ) != 0 ) {
+ ObtainMutex( ctx->mutex_id );
+ }
+
+ if ( ( events & EVENT_RELEASE ) != 0 ) {
+ ReleaseMutex( ctx->mutex_id );
+ }
+
+ if ( ( events & EVENT_STICKY_OBTAIN ) != 0 ) {
+ ObtainMutex( ctx->sticky_id );
+ }
+
+ if ( ( events & EVENT_STICKY_RELEASE ) != 0 ) {
+ ReleaseMutex( ctx->sticky_id );
+ }
+
+ if ( ( events & EVENT_RESTART ) != 0 ) {
+ rtems_status_code sc;
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+ SetPriority( ctx->runner_id, PRIO_VERY_HIGH );
+ T_eq_u32( rtems_scheduler_get_processor(), 1 );
+
+ if ( !ctx->last ) {
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_LOW );
+ RemoveProcessor( SCHEDULER_C_ID, 2 );
+ AddProcessor( SCHEDULER_B_ID, 2 );
+ }
+
+ WrapThreadQueueExtract(
+ &ctx->wrap_tq_ctx,
+ GetThread( ctx->worker_id[ WORKER_B ] )
+ );
+
+ sc = rtems_task_restart(
+ ctx->worker_id[ WORKER_B ],
+ (rtems_task_argument) ctx
+ );
+ T_rsc_success( sc );
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+
+ if ( !ctx->last ) {
+ RemoveProcessor( SCHEDULER_B_ID, 2 );
+ AddProcessor( SCHEDULER_C_ID, 2 );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_C_ID, PRIO_NORMAL );
+ }
+ }
+
+ if ( ( events & EVENT_BUSY ) != 0 ) {
+ while ( ctx->busy[ worker ] ) {
+ ++ctx->busy_counter[ worker ];
+ }
+ }
+
+ if ( ( events & EVENT_SYNC_RUNNER_LATE ) != 0 ) {
+ SendEvents( ctx->runner_id, EVENT_SYNC_RUNNER );
+ }
+ }
+}
+
+static void WorkerA( rtems_task_argument arg )
+{
+ Worker( arg, WORKER_A );
+}
+
+static void WorkerB( rtems_task_argument arg )
+{
+ Worker( arg, WORKER_B );
+}
+
+static void WorkerC( rtems_task_argument arg )
+{
+ Worker( arg, WORKER_C );
+}
+#endif
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_Id_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_Id state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_Id_Invalid: {
+ /*
+ * While the ``scheduler_id`` parameter is not associated with a
+ * scheduler.
+ */
+ ctx->id = INVALID_ID;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Id_Scheduler: {
+ /*
+ * While the ``scheduler_id`` parameter is associated with a scheduler.
+ */
+ ctx->id = SCHEDULER_A_ID;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Id_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Valid: {
+ /*
+ * While the ``cpu_index`` parameter is less than the configured
+ * processor maximum.
+ */
+ ctx->cpu_index = 0;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Invalid: {
+ /*
+ * While the ``cpu_index`` parameter is greater than or equal to the
+ * configured processor maximum.
+ */
+ ctx->cpu_index = rtems_configuration_get_maximum_processors();
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_Owned_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_Owned state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_Owned_Yes: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is owned
+ * by the scheduler specified by the ``scheduler_id`` parameter.
+ */
+ ctx->owned = true;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Owned_No: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is not
+ * owned by the scheduler specified by the ``scheduler_id`` parameter.
+ */
+ ctx->owned = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Owned_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_Last_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_Last state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_Last_Yes: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is the
+ * last processor owned by the scheduler specified by the
+ * ``scheduler_id`` parameter.
+ */
+ ctx->last = true;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Last_No: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is not
+ * the last processor owned by the scheduler specified by the
+ * ``scheduler_id`` parameter.
+ */
+ ctx->last = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Last_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_Home_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_Home state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_Home_Yes: {
+ /*
+ * While at least one non-idle task exists which uses the scheduler
+ * specified by the ``scheduler_id`` parameter as its home scheduler.
+ */
+ ctx->home = true;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Home_No: {
+ /*
+ * While no non-idle task exists which uses the scheduler specified by
+ * the ``scheduler_id`` parameter as its home scheduler.
+ */
+ ctx->home = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_Home_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_Yes: {
+ /*
+ * While at least one non-idle task which uses the scheduler specified by
+ * the ``scheduler_id`` parameter as its home scheduler exists those
+ * processor affinity set requires the processor specified by the
+ * ``cpu_index`` parameter.
+ */
+ ctx->required_by_affinity = true;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_No: {
+ /*
+ * While no non-idle task which uses the scheduler specified by the
+ * ``scheduler_id`` parameter as its home scheduler exists those
+ * processor affinity set requires the processor specified by the
+ * ``cpu_index`` parameter.
+ */
+ ctx->required_by_affinity = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Idle: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is used
+ * by an idle task.
+ */
+ ctx->idle = true;
+ ctx->task = false;
+ ctx->helping = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Task: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is used
+ * by a task task which uses the scheduler specified by the
+ * ``scheduler_id`` parameter as its home scheduler.
+ */
+ ctx->idle = false;
+ ctx->task = true;
+ ctx->helping = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_TaskIdle: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is used
+ * by an idle task on behalf of a task task which uses the scheduler
+ * specified by the ``scheduler_id`` parameter as its home scheduler.
+ */
+ ctx->idle = true;
+ ctx->task = true;
+ ctx->helping = false;
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Helping: {
+ /*
+ * While the processor specified by the ``cpu_index`` parameter is used
+ * by a task task which uses the scheduler specified by the
+ * ``scheduler_id`` parameter as a helping scheduler.
+ */
+ if ( !ctx->last && rtems_scheduler_get_processor_maximum() < 3 ) {
+ ctx->Map.skip = true;
+ } else {
+ ctx->idle = false;
+ ctx->task = false;
+ ctx->helping = true;
+ }
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Post_Status_Check(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Post_Status state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Post_Status_Ok: {
+ /*
+ * The return status of rtems_scheduler_remove_processor() shall be
+ * RTEMS_SUCCESSFUL.
+ */
+ T_rsc_success( ctx->status );
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Post_Status_InvId: {
+ /*
+ * The return status of rtems_scheduler_remove_processor() shall be
+ * RTEMS_INVALID_ID.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_ID );
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Post_Status_InvNum: {
+ /*
+ * The return status of rtems_scheduler_remove_processor() shall be
+ * RTEMS_INVALID_NUMBER.
+ */
+ T_rsc( ctx->status, RTEMS_INVALID_NUMBER );
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Post_Status_InUse: {
+ /*
+ * The return status of rtems_scheduler_remove_processor() shall be
+ * RTEMS_RESOURCE_IN_USE.
+ */
+ T_rsc( ctx->status, RTEMS_RESOURCE_IN_USE );
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Post_Status_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Post_Removed_Check(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed state
+)
+{
+ switch ( state ) {
+ case RtemsSchedulerReqRemoveProcessor_Post_Removed_Yes: {
+ /*
+ * The processor specified by the ``cpu_index`` parameter shall be
+ * removed from the scheduler specified by the ``scheduler_id`` by the
+ * rtems_scheduler_remove_processor() call.
+ */
+ if ( ctx->home && ctx->helping ) {
+ /*
+ * For these test scenarios we use scheduler A in which the runner
+ * remains scheduled. So, an ask for help request is issued, when the
+ * processor allocated to a task which uses the scheduler as a helping
+ * scheduler is removed.
+ */
+ T_eq_u32( ctx->id, SCHEDULER_A_ID );
+ T_eq_sz( ctx->scheduler_log.header.recorded, 3 );
+ T_eq_int(
+ ctx->scheduler_log.events[ 0 ].operation,
+ T_SCHEDULER_REMOVE_PROCESSOR
+ );
+ T_eq_int(
+ ctx->scheduler_log.events[ 1 ].operation,
+ T_SCHEDULER_ASK_FOR_HELP
+ );
+ T_eq_int(
+ ctx->scheduler_log.events[ 2 ].operation,
+ T_SCHEDULER_ASK_FOR_HELP
+ );
+ } else {
+ T_eq_sz( ctx->scheduler_log.header.recorded, 1 );
+ T_eq_int(
+ ctx->scheduler_log.events[ 0 ].operation,
+ T_SCHEDULER_REMOVE_PROCESSOR
+ );
+ }
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop: {
+ /*
+ * No processor shall be removed from a scheduler by the
+ * rtems_scheduler_remove_processor() call.
+ */
+ T_eq_sz( ctx->scheduler_log.header.recorded, 0 );
+ break;
+ }
+
+ case RtemsSchedulerReqRemoveProcessor_Post_Removed_NA:
+ break;
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Setup(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ rtems_status_code sc;
+ rtems_task_priority priority;
+
+ ctx->runner_id = rtems_task_self();
+ ctx->mutex_id = CreateMutex();
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( 'S', 'T', 'K', 'Y' ),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+ PRIO_NORMAL,
+ &ctx->sticky_id
+ );
+ T_rsc_success( sc );
+
+ sc = rtems_semaphore_set_priority(
+ ctx->sticky_id,
+ SCHEDULER_B_ID,
+ PRIO_NORMAL,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ if ( rtems_scheduler_get_processor_maximum() >= 3 ) {
+ sc = rtems_semaphore_set_priority(
+ ctx->sticky_id,
+ SCHEDULER_C_ID,
+ PRIO_LOW,
+ &priority
+ );
+ T_rsc_success( sc );
+
+ ctx->worker_id[ WORKER_C ] = CreateTask( "WRKC", PRIO_NORMAL );
+ SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_C_ID, PRIO_NORMAL );
+ StartTask( ctx->worker_id[ WORKER_C ], WorkerC, ctx );
+
+ if ( rtems_scheduler_get_processor_maximum() >= 4 ) {
+ RemoveProcessor( SCHEDULER_C_ID, 3 );
+ }
+ }
+
+ SetSelfPriority( PRIO_NORMAL );
+ SetSelfAffinityOne( 0 );
+
+ ctx->worker_id[ WORKER_A ] = CreateTask( "WRKA", PRIO_HIGH );
+ StartTask( ctx->worker_id[ WORKER_A ], WorkerA, ctx );
+
+ ctx->worker_id[ WORKER_B ] = CreateTask( "WRKB", PRIO_HIGH );
+ StartTask( ctx->worker_id[ WORKER_B ], WorkerB, ctx );
+
+ WrapThreadQueueInitialize( &ctx->wrap_tq_ctx, RequestISR, ctx );
+ #endif
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Setup_Wrap( void *arg )
+{
+ RtemsSchedulerReqRemoveProcessor_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqRemoveProcessor_Setup( ctx );
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Teardown(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ #if defined(RTEMS_SMP)
+ DeleteTask( ctx->worker_id[ WORKER_A ] );
+ DeleteTask( ctx->worker_id[ WORKER_B ] );
+ DeleteTask( ctx->worker_id[ WORKER_C ] );
+ DeleteMutex( ctx->mutex_id );
+ DeleteMutex( ctx->sticky_id );
+ WrapThreadQueueDestroy( &ctx->wrap_tq_ctx );
+
+ if ( rtems_scheduler_get_processor_maximum() >= 4 ) {
+ AddProcessor( SCHEDULER_C_ID, 3 );
+ }
+
+ RestoreRunnerPriority();
+ SetSelfAffinityAll();
+ #endif
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Teardown_Wrap( void *arg )
+{
+ RtemsSchedulerReqRemoveProcessor_Context *ctx;
+
+ ctx = arg;
+ ctx->Map.in_action_loop = false;
+ RtemsSchedulerReqRemoveProcessor_Teardown( ctx );
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Prepare(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ ctx->status = RTEMS_NOT_IMPLEMENTED;
+}
+
+static void RtemsSchedulerReqRemoveProcessor_Action(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ if (
+ ctx->id == INVALID_ID ||
+ ctx->cpu_index == rtems_configuration_get_maximum_processors() ||
+ ( ctx->owned && ctx->last && ctx->home && ctx->required_by_affinity &&
+ ( ctx->task || ctx->idle ) )
+ ) {
+ DoRemoveProcessor( ctx );
+ } else {
+ #if defined(RTEMS_SMP)
+ if ( ctx->owned && !ctx->home && ctx->helping ) {
+ RemoveWithHelpingOnly( ctx );
+ } else {
+ if ( ctx->owned ) {
+ rtems_id worker_a;
+ rtems_id worker_b;
+
+ worker_a = ctx->worker_id[ WORKER_A ];
+ worker_b = ctx->worker_id[ WORKER_B ];
+
+ ctx->cpu_index = 1;
+
+ if ( ctx->last ) {
+ ctx->id = SCHEDULER_B_ID;
+ } else {
+ RemoveProcessor( SCHEDULER_B_ID, 1 );
+ AddProcessor( SCHEDULER_A_ID, 1 );
+ }
+
+ if ( ctx->home ) {
+ SetScheduler( worker_a, ctx->id, PRIO_LOW );
+
+ if ( ctx->required_by_affinity ) {
+ SetAffinityOne( worker_a, 1 );
+ } else {
+ SetAffinityAll( worker_a );
+ }
+ }
+
+ if ( ctx->idle ) {
+ if ( ctx->task ) {
+ SendAndSync( ctx, WORKER_A, EVENT_STICKY_OBTAIN );
+ SuspendTask( worker_a );
+ }
+ } else if ( ctx->task ) {
+ MakeBusy( ctx, WORKER_A );
+ } else if ( ctx->helping ) {
+ T_true( ctx->home );
+
+ if ( ctx->last ) {
+ SendEvents( worker_b, EVENT_OBTAIN );
+ SetPriority( worker_b, PRIO_LOW );
+ } else {
+ SetScheduler( worker_b, SCHEDULER_C_ID, PRIO_LOW );
+ SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+ MakeBusyAndSync( ctx, WORKER_C );
+ }
+
+ SendAndSync( ctx, WORKER_A, EVENT_OBTAIN );
+ MakeBusy( ctx, WORKER_B );
+ WaitForBusy( ctx, WORKER_B );
+ }
+
+ DoRemoveProcessor( ctx );
+
+ if ( ctx->idle ) {
+ if ( ctx->task ) {
+ ResumeTask( worker_a );
+ SendAndSync( ctx, WORKER_A, EVENT_STICKY_RELEASE );
+ }
+ } else if ( ctx->task ) {
+ StopBusyAndWait( ctx, WORKER_A );
+ } else if ( ctx->helping ) {
+ StopBusy( ctx, WORKER_B );
+
+ if ( ctx->last ) {
+ SetPriority( worker_b, PRIO_HIGH );
+ SendEvents( worker_b, EVENT_RELEASE );
+ } else {
+ StopBusyAndWait( ctx, WORKER_C );
+ SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+ SetScheduler( worker_b, SCHEDULER_A_ID, PRIO_HIGH );
+ }
+
+ WaitForExecutionStop( worker_b );
+ SendAndSync( ctx, WORKER_A, EVENT_RELEASE );
+ }
+
+ SetAffinityAll( worker_a );
+ SetScheduler( worker_a, SCHEDULER_A_ID, PRIO_HIGH );
+
+ if ( !ctx->last ) {
+ RemoveProcessor( SCHEDULER_A_ID, 1 );
+ AddProcessor( SCHEDULER_B_ID, 1 );
+ }
+ } else {
+ ctx->id = SCHEDULER_B_ID;
+ DoRemoveProcessor( ctx );
+ }
+ }
+ #else
+ T_unreachable();
+ #endif
+ }
+}
+
+static const RtemsSchedulerReqRemoveProcessor_Entry
+RtemsSchedulerReqRemoveProcessor_Entries[] = {
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 1, 1, 1, 1, 1, RtemsSchedulerReqRemoveProcessor_Post_Status_InvId,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 1, 1, 1, 1, 1,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_InvNum,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 0, 1, 1, 1, 1,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_InvNum,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 1, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_InUse,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_Ok,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Yes },
+#endif
+ { 0, 0, 0, 1, 1, 1, 1, 1, RtemsSchedulerReqRemoveProcessor_Post_Status_InvId,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#endif
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_Ok,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Yes },
+#endif
+ { 0, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_InUse,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#if !defined(RTEMS_SMP)
+ { 1, 0, 0, 0, 0, 0, 0, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_NA,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_NA },
+#else
+ { 0, 0, 0, 0, 0, 0, 1, 0, RtemsSchedulerReqRemoveProcessor_Post_Status_InUse,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop },
+#endif
+ { 0, 0, 0, 1, 1, 1, 1, 1,
+ RtemsSchedulerReqRemoveProcessor_Post_Status_InvNum,
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Nop }
+};
+
+static const uint8_t
+RtemsSchedulerReqRemoveProcessor_Map[] = {
+ 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9,
+ 4, 4, 7, 7, 7, 7, 5, 3, 3, 10, 5, 3, 3, 10, 4, 4, 4, 4, 8, 8, 8, 8, 5, 3, 3,
+ 5, 5, 3, 3, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 11, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static size_t RtemsSchedulerReqRemoveProcessor_Scope(
+ void *arg,
+ char *buf,
+ size_t n
+)
+{
+ RtemsSchedulerReqRemoveProcessor_Context *ctx;
+
+ ctx = arg;
+
+ if ( ctx->Map.in_action_loop ) {
+ return T_get_scope(
+ RtemsSchedulerReqRemoveProcessor_PreDesc,
+ buf,
+ n,
+ ctx->Map.pcs
+ );
+ }
+
+ return 0;
+}
+
+static T_fixture RtemsSchedulerReqRemoveProcessor_Fixture = {
+ .setup = RtemsSchedulerReqRemoveProcessor_Setup_Wrap,
+ .stop = NULL,
+ .teardown = RtemsSchedulerReqRemoveProcessor_Teardown_Wrap,
+ .scope = RtemsSchedulerReqRemoveProcessor_Scope,
+ .initial_context = &RtemsSchedulerReqRemoveProcessor_Instance
+};
+
+static const uint8_t RtemsSchedulerReqRemoveProcessor_Weights[] = {
+ 128, 64, 32, 16, 8, 4, 1
+};
+
+static void RtemsSchedulerReqRemoveProcessor_Skip(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx,
+ size_t index
+)
+{
+ switch ( index + 1 ) {
+ case 1:
+ ctx->Map.pci[ 1 ] = RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_NA - 1;
+ /* Fall through */
+ case 2:
+ ctx->Map.pci[ 2 ] = RtemsSchedulerReqRemoveProcessor_Pre_Owned_NA - 1;
+ /* Fall through */
+ case 3:
+ ctx->Map.pci[ 3 ] = RtemsSchedulerReqRemoveProcessor_Pre_Last_NA - 1;
+ /* Fall through */
+ case 4:
+ ctx->Map.pci[ 4 ] = RtemsSchedulerReqRemoveProcessor_Pre_Home_NA - 1;
+ /* Fall through */
+ case 5:
+ ctx->Map.pci[ 5 ] = RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_NA - 1;
+ /* Fall through */
+ case 6:
+ ctx->Map.pci[ 6 ] = RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_NA - 1;
+ break;
+ }
+}
+
+static inline RtemsSchedulerReqRemoveProcessor_Entry
+RtemsSchedulerReqRemoveProcessor_PopEntry(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ size_t index;
+
+ if ( ctx->Map.skip ) {
+ size_t i;
+
+ ctx->Map.skip = false;
+ index = 0;
+
+ for ( i = 0; i < 7; ++i ) {
+ index += RtemsSchedulerReqRemoveProcessor_Weights[ i ] * ctx->Map.pci[ i ];
+ }
+ } else {
+ index = ctx->Map.index;
+ }
+
+ ctx->Map.index = index + 1;
+
+ return RtemsSchedulerReqRemoveProcessor_Entries[
+ RtemsSchedulerReqRemoveProcessor_Map[ index ]
+ ];
+}
+
+static void RtemsSchedulerReqRemoveProcessor_SetPreConditionStates(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ ctx->Map.pcs[ 0 ] = ctx->Map.pci[ 0 ];
+ ctx->Map.pcs[ 1 ] = ctx->Map.pci[ 1 ];
+
+ if ( ctx->Map.entry.Pre_Owned_NA ) {
+ ctx->Map.pcs[ 2 ] = RtemsSchedulerReqRemoveProcessor_Pre_Owned_NA;
+ } else {
+ ctx->Map.pcs[ 2 ] = ctx->Map.pci[ 2 ];
+ }
+
+ if ( ctx->Map.entry.Pre_Last_NA ) {
+ ctx->Map.pcs[ 3 ] = RtemsSchedulerReqRemoveProcessor_Pre_Last_NA;
+ } else {
+ ctx->Map.pcs[ 3 ] = ctx->Map.pci[ 3 ];
+ }
+
+ if ( ctx->Map.entry.Pre_Home_NA ) {
+ ctx->Map.pcs[ 4 ] = RtemsSchedulerReqRemoveProcessor_Pre_Home_NA;
+ } else {
+ ctx->Map.pcs[ 4 ] = ctx->Map.pci[ 4 ];
+ }
+
+ if ( ctx->Map.entry.Pre_RequiredByAffinity_NA ) {
+ ctx->Map.pcs[ 5 ] = RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_NA;
+ } else {
+ ctx->Map.pcs[ 5 ] = ctx->Map.pci[ 5 ];
+ }
+
+ if ( ctx->Map.entry.Pre_UsedBy_NA ) {
+ ctx->Map.pcs[ 6 ] = RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_NA;
+ } else {
+ ctx->Map.pcs[ 6 ] = ctx->Map.pci[ 6 ];
+ }
+}
+
+static void RtemsSchedulerReqRemoveProcessor_TestVariant(
+ RtemsSchedulerReqRemoveProcessor_Context *ctx
+)
+{
+ RtemsSchedulerReqRemoveProcessor_Pre_Id_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+ RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Prepare(
+ ctx,
+ ctx->Map.pcs[ 1 ]
+ );
+ RtemsSchedulerReqRemoveProcessor_Pre_Owned_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+ RtemsSchedulerReqRemoveProcessor_Pre_Last_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+ RtemsSchedulerReqRemoveProcessor_Pre_Home_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+ RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_Prepare(
+ ctx,
+ ctx->Map.pcs[ 5 ]
+ );
+ RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Prepare(
+ ctx,
+ ctx->Map.pcs[ 6 ]
+ );
+
+ if ( ctx->Map.skip ) {
+ RtemsSchedulerReqRemoveProcessor_Skip( ctx, 6 );
+ return;
+ }
+
+ RtemsSchedulerReqRemoveProcessor_Action( ctx );
+ RtemsSchedulerReqRemoveProcessor_Post_Status_Check(
+ ctx,
+ ctx->Map.entry.Post_Status
+ );
+ RtemsSchedulerReqRemoveProcessor_Post_Removed_Check(
+ ctx,
+ ctx->Map.entry.Post_Removed
+ );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerReqRemoveProcessor( void )
+ */
+T_TEST_CASE_FIXTURE(
+ RtemsSchedulerReqRemoveProcessor,
+ &RtemsSchedulerReqRemoveProcessor_Fixture
+)
+{
+ RtemsSchedulerReqRemoveProcessor_Context *ctx;
+
+ ctx = T_fixture_context();
+ ctx->Map.in_action_loop = true;
+ ctx->Map.index = 0;
+ ctx->Map.skip = false;
+
+ for (
+ ctx->Map.pci[ 0 ] = RtemsSchedulerReqRemoveProcessor_Pre_Id_Invalid;
+ ctx->Map.pci[ 0 ] < RtemsSchedulerReqRemoveProcessor_Pre_Id_NA;
+ ++ctx->Map.pci[ 0 ]
+ ) {
+ for (
+ ctx->Map.pci[ 1 ] = RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_Valid;
+ ctx->Map.pci[ 1 ] < RtemsSchedulerReqRemoveProcessor_Pre_CPUIndex_NA;
+ ++ctx->Map.pci[ 1 ]
+ ) {
+ for (
+ ctx->Map.pci[ 2 ] = RtemsSchedulerReqRemoveProcessor_Pre_Owned_Yes;
+ ctx->Map.pci[ 2 ] < RtemsSchedulerReqRemoveProcessor_Pre_Owned_NA;
+ ++ctx->Map.pci[ 2 ]
+ ) {
+ for (
+ ctx->Map.pci[ 3 ] = RtemsSchedulerReqRemoveProcessor_Pre_Last_Yes;
+ ctx->Map.pci[ 3 ] < RtemsSchedulerReqRemoveProcessor_Pre_Last_NA;
+ ++ctx->Map.pci[ 3 ]
+ ) {
+ for (
+ ctx->Map.pci[ 4 ] = RtemsSchedulerReqRemoveProcessor_Pre_Home_Yes;
+ ctx->Map.pci[ 4 ] < RtemsSchedulerReqRemoveProcessor_Pre_Home_NA;
+ ++ctx->Map.pci[ 4 ]
+ ) {
+ for (
+ ctx->Map.pci[ 5 ] = RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_Yes;
+ ctx->Map.pci[ 5 ] < RtemsSchedulerReqRemoveProcessor_Pre_RequiredByAffinity_NA;
+ ++ctx->Map.pci[ 5 ]
+ ) {
+ for (
+ ctx->Map.pci[ 6 ] = RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_Idle;
+ ctx->Map.pci[ 6 ] < RtemsSchedulerReqRemoveProcessor_Pre_UsedBy_NA;
+ ++ctx->Map.pci[ 6 ]
+ ) {
+ ctx->Map.entry = RtemsSchedulerReqRemoveProcessor_PopEntry(
+ ctx
+ );
+
+ if ( ctx->Map.entry.Skip ) {
+ continue;
+ }
+
+ RtemsSchedulerReqRemoveProcessor_SetPreConditionStates( ctx );
+ RtemsSchedulerReqRemoveProcessor_Prepare( ctx );
+ RtemsSchedulerReqRemoveProcessor_TestVariant( ctx );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler-smp-only.c b/testsuites/validation/tc-scheduler-smp-only.c
new file mode 100644
index 0000000000..2a6f0b53c6
--- /dev/null
+++ b/testsuites/validation/tc-scheduler-smp-only.c
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerValSmpOnly
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "ts-config.h"
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerValSmpOnly \
+ * spec:/rtems/scheduler/val/smp-only
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @brief This test case collection provides validation test cases for SMP-only
+ * requirements of the @ref RTEMSAPIClassicScheduler.
+ *
+ * This test case performs the following actions:
+ *
+ * - Call rtems_scheduler_get_processor() on all online processors and check
+ * the returned value.
+ *
+ * - Call rtems_scheduler_get_processor_maximum() and check the returned value.
+ *
+ * - Check that the returned value is greater than or equal to one.
+ *
+ * - Check that the returned value is less than or equal to
+ * rtems_configuration_get_maximum_processors().
+ *
+ * - Call rtems_scheduler_ident() for each configured scheduler.
+ *
+ * - Check that the object index of scheduler A has the expected value.
+ *
+ * - Check that the object index of scheduler B has the expected value.
+ *
+ * - Check that the object index of scheduler C has the expected value.
+ *
+ * - Check that the object index of scheduler D has the expected value.
+ *
+ * - Check that processor 0 has scheduler A assigned.
+ *
+ * - Check that processor 1 has scheduler B assigned.
+ *
+ * - Check that processor 2 has scheduler C assigned if it is present.
+ *
+ * - Check that processor 3 has scheduler C assigned if it is present.
+ *
+ * @{
+ */
+
+/**
+ * @brief Call rtems_scheduler_get_processor() on all online processors and
+ * check the returned value.
+ */
+static void RtemsSchedulerValSmpOnly_Action_0( void )
+{
+ rtems_id scheduler_id;
+ rtems_task_priority priority;
+ uint32_t cpu_index;
+ uint32_t cpu_max;
+
+ scheduler_id = GetSelfScheduler();
+ priority = GetSelfPriority();
+ cpu_max = rtems_scheduler_get_processor_maximum();
+ T_step_ge_u32( 0, cpu_max, 1 );
+
+ for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
+ rtems_status_code sc;
+ rtems_id id;
+
+ sc = rtems_scheduler_ident_by_processor( cpu_index, &id );
+ T_quiet_rsc_success( sc );
+
+ SetSelfScheduler( id, priority );
+ SetSelfAffinityOne( cpu_index );
+
+ T_quiet_eq_u32( rtems_scheduler_get_processor(), cpu_index );
+
+ SetSelfAffinityAll();
+ }
+
+ SetSelfScheduler( scheduler_id, priority );
+}
+
+/**
+ * @brief Call rtems_scheduler_get_processor_maximum() and check the returned
+ * value.
+ */
+static void RtemsSchedulerValSmpOnly_Action_1( void )
+{
+ uint32_t cpu_max;
+
+ cpu_max = rtems_scheduler_get_processor_maximum();
+
+ /*
+ * Check that the returned value is greater than or equal to one.
+ */
+ T_step_ge_u32( 1, cpu_max, 1 );
+
+ /*
+ * Check that the returned value is less than or equal to
+ * rtems_configuration_get_maximum_processors().
+ */
+ T_step_le_u32(
+ 2,
+ cpu_max,
+ rtems_configuration_get_maximum_processors()
+ );
+}
+
+/**
+ * @brief Call rtems_scheduler_ident() for each configured scheduler.
+ */
+static void RtemsSchedulerValSmpOnly_Action_2( void )
+{
+ rtems_status_code sc;
+ rtems_id id[ 4 ];
+ rtems_id id_by_cpu;
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_A_NAME, &id[ 0 ]);
+ T_step_rsc_success( 3, sc );
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &id[ 1 ]);
+ T_step_rsc_success( 4, sc );
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_C_NAME, &id[ 2 ]);
+ T_step_rsc_success( 5, sc );
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_D_NAME, &id[ 3 ]);
+ T_step_rsc_success( 6, sc );
+
+ /*
+ * Check that the object index of scheduler A has the expected value.
+ */
+ T_step_eq_u16( 7, rtems_object_id_get_index( id[ 0 ] ), 1 );
+
+ /*
+ * Check that the object index of scheduler B has the expected value.
+ */
+ T_step_eq_u16( 8, rtems_object_id_get_index( id[ 1 ] ), 2 );
+
+ /*
+ * Check that the object index of scheduler C has the expected value.
+ */
+ T_step_eq_u16( 9, rtems_object_id_get_index( id[ 2 ] ), 3 );
+
+ /*
+ * Check that the object index of scheduler D has the expected value.
+ */
+ T_step_eq_u16( 10, rtems_object_id_get_index( id[ 3 ] ), 4 );
+
+ /*
+ * Check that processor 0 has scheduler A assigned.
+ */
+ sc = rtems_scheduler_ident_by_processor( 0, &id_by_cpu );
+ T_step_rsc_success( 11, sc );
+ T_step_eq_u32( 12, id[ 0 ], id_by_cpu );
+
+ /*
+ * Check that processor 1 has scheduler B assigned.
+ */
+ sc = rtems_scheduler_ident_by_processor( 1, &id_by_cpu );
+ T_step_rsc_success( 13, sc );
+ T_step_eq_u32( 14, id[ 1 ], id_by_cpu );
+
+ /*
+ * Check that processor 2 has scheduler C assigned if it is present.
+ */
+ sc = rtems_scheduler_ident_by_processor( 2, &id_by_cpu );
+ T_step_true( 15, sc == RTEMS_INVALID_NAME || id[ 2 ] == id_by_cpu );
+
+ /*
+ * Check that processor 3 has scheduler C assigned if it is present.
+ */
+ sc = rtems_scheduler_ident_by_processor( 3, &id_by_cpu );
+ T_step_true( 16, sc == RTEMS_INVALID_NAME || id[ 2 ] == id_by_cpu );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerValSmpOnly( void )
+ */
+T_TEST_CASE( RtemsSchedulerValSmpOnly )
+{
+ T_plan( 17 );
+
+ RtemsSchedulerValSmpOnly_Action_0();
+ RtemsSchedulerValSmpOnly_Action_1();
+ RtemsSchedulerValSmpOnly_Action_2();
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-scheduler.c b/testsuites/validation/tc-scheduler.c
new file mode 100644
index 0000000000..e930d3ee51
--- /dev/null
+++ b/testsuites/validation/tc-scheduler.c
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseRtemsSchedulerValScheduler
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated. If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual. The manual is provided as a part of
+ * a release. For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseRtemsSchedulerValScheduler \
+ * spec:/rtems/scheduler/val/scheduler
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @brief This test case collection provides validation test cases for general
+ * requirements of the @ref RTEMSAPIClassicScheduler.
+ *
+ * This test case performs the following actions:
+ *
+ * - Validate rtems_scheduler_get_processor().
+ *
+ * - Check that the values returned by rtems_scheduler_get_processor() and
+ * rtems_scheduler_get_processor() are equal.
+ *
+ * - Validate rtems_scheduler_get_processor_maximum().
+ *
+ * - Check that the values returned by
+ * rtems_scheduler_get_processor_maximum() and
+ * rtems_scheduler_get_processor_maximum() are equal.
+ *
+ * @{
+ */
+
+static uint32_t GetProcessorMaximumMacro( void )
+{
+ return rtems_scheduler_get_processor_maximum();
+}
+
+#undef rtems_scheduler_get_processor_maximum
+
+static uint32_t GetProcessorMaximum( void )
+{
+ return rtems_scheduler_get_processor_maximum();
+}
+
+static uint32_t GetProcessorMacro( void )
+{
+ return rtems_scheduler_get_processor();
+}
+
+#undef rtems_scheduler_get_processor
+
+static uint32_t GetProcessor( void )
+{
+ return rtems_scheduler_get_processor();
+}
+
+/**
+ * @brief Validate rtems_scheduler_get_processor().
+ */
+static void RtemsSchedulerValScheduler_Action_0( void )
+{
+ uint32_t cpu_index;
+ uint32_t cpu_index_macro;
+
+ cpu_index = GetProcessor();
+ cpu_index_macro = GetProcessorMacro();
+
+ /*
+ * Check that the values returned by rtems_scheduler_get_processor() and
+ * rtems_scheduler_get_processor() are equal.
+ */
+ T_step_eq_u32( 0, cpu_index, cpu_index_macro );
+}
+
+/**
+ * @brief Validate rtems_scheduler_get_processor_maximum().
+ */
+static void RtemsSchedulerValScheduler_Action_1( void )
+{
+ uint32_t cpu_max;
+ uint32_t cpu_max_macro;
+
+ cpu_max = GetProcessorMaximum();
+ cpu_max_macro = GetProcessorMaximumMacro();
+
+ /*
+ * Check that the values returned by rtems_scheduler_get_processor_maximum()
+ * and rtems_scheduler_get_processor_maximum() are equal.
+ */
+ T_step_eq_u32( 1, cpu_max, cpu_max_macro );
+}
+
+/**
+ * @fn void T_case_body_RtemsSchedulerValScheduler( void )
+ */
+T_TEST_CASE( RtemsSchedulerValScheduler )
+{
+ T_plan( 2 );
+
+ RtemsSchedulerValScheduler_Action_0();
+ RtemsSchedulerValScheduler_Action_1();
+}
+
+/** @} */