summaryrefslogtreecommitdiffstats
path: root/testsuites/smptests/smpmrsp01
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-05-21 10:33:43 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-05-28 10:50:41 +0200
commit8fcafdd553f3a564ecb5ab5093d01b29971418da (patch)
tree7807b3a4ae28e62f0218f8e20051b1b7e0143206 /testsuites/smptests/smpmrsp01
parentbsps/sparc: Change tabs to spaces. (diff)
downloadrtems-8fcafdd553f3a564ecb5ab5093d01b29971418da.tar.bz2
score: Multiprocessor Resource Sharing Protocol
Add basic support for the Multiprocessor Resource Sharing Protocol (MrsP). The Multiprocessor Resource Sharing Protocol (MrsP) is defined in A. Burns and A.J. Wellings, A Schedulability Compatible Multiprocessor Resource Sharing Protocol - MrsP, Proceedings of the 25th Euromicro Conference on Real-Time Systems (ECRTS 2013), July 2013. It is a generalization of the Priority Ceiling Protocol to SMP systems. Each MrsP semaphore uses a ceiling priority per scheduler instance. These ceiling priorities can be specified with rtems_semaphore_set_priority(). A task obtaining or owning a MrsP semaphore will execute with the ceiling priority for its scheduler instance as specified by the MrsP semaphore object. Tasks waiting to get ownership of a MrsP semaphore will not relinquish the processor voluntarily. In case the owner of a MrsP semaphore gets preempted it can ask all tasks waiting for this semaphore to help out and temporarily borrow the right to execute on one of their assigned processors. The help out feature is not implemented with this patch.
Diffstat (limited to 'testsuites/smptests/smpmrsp01')
-rw-r--r--testsuites/smptests/smpmrsp01/Makefile.am19
-rw-r--r--testsuites/smptests/smpmrsp01/init.c698
-rw-r--r--testsuites/smptests/smpmrsp01/smpmrsp01.doc15
-rw-r--r--testsuites/smptests/smpmrsp01/smpmrsp01.scn147
4 files changed, 879 insertions, 0 deletions
diff --git a/testsuites/smptests/smpmrsp01/Makefile.am b/testsuites/smptests/smpmrsp01/Makefile.am
new file mode 100644
index 0000000000..7983434b98
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpmrsp01
+smpmrsp01_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpmrsp01.scn smpmrsp01.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpmrsp01_OBJECTS)
+LINK_LIBS = $(smpmrsp01_LDLIBS)
+
+smpmrsp01$(EXEEXT): $(smpmrsp01_OBJECTS) $(smpmrsp01_DEPENDENCIES)
+ @rm -f smpmrsp01$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
new file mode 100644
index 0000000000..4f6637afac
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rtems.h>
+#include <rtems/libcsupport.h>
+#include <rtems/score/smpbarrier.h>
+
+#define TESTS_USE_PRINTK
+#include "tmacros.h"
+
+const char rtems_test_name[] = "SMPMRSP 1";
+
+#define CPU_COUNT 32
+
+#define MRSP_COUNT 32
+
+typedef struct {
+ uint32_t sleep;
+ uint32_t timeout;
+ uint32_t obtain[MRSP_COUNT];
+} counter;
+
+typedef struct {
+ rtems_id main_task_id;
+ rtems_id counting_sem_id;
+ rtems_id mrsp_ids[MRSP_COUNT];
+ rtems_id scheduler_ids[CPU_COUNT];
+ rtems_id worker_ids[2 * CPU_COUNT];
+ volatile bool stop_worker[CPU_COUNT];
+ counter counters[2 * CPU_COUNT];
+ Thread_Control *worker_task;
+ SMP_barrier_Control barrier;
+} test_context;
+
+static test_context test_instance = {
+ .barrier = SMP_BARRIER_CONTROL_INITIALIZER
+};
+
+static void barrier(test_context *ctx, SMP_barrier_State *bs)
+{
+ _SMP_barrier_Wait(&ctx->barrier, bs, 2);
+}
+
+static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio)
+{
+ rtems_status_code sc;
+ rtems_task_priority prio;
+
+ sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == expected_prio);
+}
+
+static void change_prio(rtems_id task_id, rtems_task_priority prio)
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_set_priority(task_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void assert_executing_worker(test_context *ctx)
+{
+ rtems_test_assert(
+ _CPU_Context_Get_is_executing(&ctx->worker_task->Registers)
+ );
+}
+
+static void obtain_and_release_worker(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+ ctx->worker_task = _Thread_Get_executing();
+
+ assert_prio(RTEMS_SELF, 3);
+
+ /* Obtain with timeout (A) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ assert_prio(RTEMS_SELF, 3);
+
+ /* Obtain with priority change and timeout (B) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ assert_prio(RTEMS_SELF, 1);
+
+ /* Restore priority (C) */
+ barrier(ctx, &barrier_state);
+
+ /* Obtain without timeout (D) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 2);
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 3);
+
+ /* Worker done (E) */
+ barrier(ctx, &barrier_state);
+
+ rtems_task_suspend(RTEMS_SELF);
+ rtems_test_assert(0);
+}
+
+static void test_mrsp_obtain_and_release(void)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ rtems_task_priority prio;
+ rtems_id scheduler_id;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+ puts("test MrsP obtain and release");
+
+ /* Check executing task parameters */
+
+ sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
+
+ assert_prio(RTEMS_SELF, 2);
+
+ /* Create a MrsP semaphore object and lock it */
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &ctx->mrsp_ids[0]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 2);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 1);
+
+ /*
+ * The ceiling priority values per scheduler are equal to the value specified
+ * for object creation.
+ */
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[0],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ /* Check the old value and set a new ceiling priority for scheduler B */
+
+ prio = 2;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[1],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ /* Check the ceiling priority values */
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[0],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[1],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 2);
+
+ /* Check that a thread waiting to get ownership remains executing */
+
+ sc = rtems_task_create(
+ rtems_build_name('W', 'O', 'R', 'K'),
+ 3,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[0]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(ctx->worker_ids[0], obtain_and_release_worker, 0);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* Obtain with timeout (A) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(ctx->worker_ids[0], 2);
+ assert_executing_worker(ctx);
+
+ /* Obtain with priority change and timeout (B) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(ctx->worker_ids[0], 2);
+ change_prio(ctx->worker_ids[0], 1);
+ assert_executing_worker(ctx);
+
+ /* Restore priority (C) */
+ barrier(ctx, &barrier_state);
+
+ assert_prio(ctx->worker_ids[0], 1);
+ change_prio(ctx->worker_ids[0], 3);
+
+ /* Obtain without timeout (D) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(ctx->worker_ids[0], 2);
+ assert_executing_worker(ctx);
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* Worker done (E) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_delete(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_flush_error(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP flush error");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_flush(id);
+ rtems_test_assert(sc == RTEMS_NOT_DEFINED);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_initially_locked_error(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP initially locked error");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 0,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
+}
+
+static void test_mrsp_nested_obtain_error(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP nested obtain error");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_UNSATISFIED);
+
+ sc = rtems_semaphore_release(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static uint32_t simple_random(uint32_t v)
+{
+ v *= 1664525;
+ v += 1013904223;
+
+ return v;
+}
+
+static rtems_interval timeout(uint32_t v)
+{
+ return (v >> 23) % 4;
+}
+
+static void load_worker(rtems_task_argument index)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ uint32_t v = index;
+
+ while (!ctx->stop_worker[index]) {
+ uint32_t i = (v >> 13) % MRSP_COUNT;
+
+ assert_prio(RTEMS_SELF, 3 + CPU_COUNT + index);
+
+ if ((v >> 7) % 1024 == 0) {
+ /* Give some time to the lower priority tasks */
+
+ ++ctx->counters[index].sleep;
+
+ sc = rtems_task_wake_after(1);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ } else {
+ uint32_t n = (v >> 17) % (i + 1);
+ uint32_t s;
+ uint32_t t;
+
+ /* Nested obtain */
+ for (s = 0; s <= n; ++s) {
+ uint32_t k = i - s;
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[k], RTEMS_WAIT, timeout(v));
+ if (sc == RTEMS_SUCCESSFUL) {
+ ++ctx->counters[index].obtain[n];
+
+ assert_prio(RTEMS_SELF, 3 + k);
+ } else {
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ ++ctx->counters[index].timeout;
+
+ break;
+ }
+
+ v = simple_random(v);
+ }
+
+ /* Release in reverse obtain order */
+ for (t = 0; t < s; ++t) {
+ uint32_t k = i + t - s + 1;
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+ }
+
+ v = simple_random(v);
+ }
+
+ sc = rtems_semaphore_release(ctx->counting_sem_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_task_suspend(RTEMS_SELF);
+ rtems_test_assert(0);
+}
+
+static void test_mrsp_load(void)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ uint32_t cpu_count = rtems_get_processor_count();
+ uint32_t index;
+
+ puts("test MrsP load");
+
+ assert_prio(RTEMS_SELF, 2);
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('S', 'Y', 'N', 'C'),
+ 0,
+ RTEMS_COUNTING_SEMAPHORE,
+ 0,
+ &ctx->counting_sem_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (index = 0; index < MRSP_COUNT; ++index) {
+ sc = rtems_semaphore_create(
+ 'A' + index,
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 3 + index,
+ &ctx->mrsp_ids[index]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ for (index = 0; index < cpu_count; ++index) {
+ uint32_t a = 2 * index;
+ uint32_t b = a + 1;
+
+ sc = rtems_task_create(
+ 'A' + a,
+ 3 + MRSP_COUNT + a,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[a]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(
+ ctx->worker_ids[a],
+ ctx->scheduler_ids[index]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(
+ ctx->worker_ids[a],
+ load_worker,
+ (rtems_task_argument) a
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_create(
+ 'A' + b,
+ 3 + MRSP_COUNT + b,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[b]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(
+ ctx->worker_ids[b],
+ ctx->scheduler_ids[index]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(
+ ctx->worker_ids[b],
+ load_worker,
+ (rtems_task_argument) b
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ ctx->stop_worker[index] = true;
+ }
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ sc = rtems_semaphore_obtain(
+ ctx->counting_sem_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ sc = rtems_task_delete(ctx->worker_ids[index]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ for (index = 0; index < MRSP_COUNT; ++index) {
+ sc = rtems_semaphore_delete(ctx->mrsp_ids[index]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ sc = rtems_semaphore_delete(ctx->counting_sem_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ uint32_t nest_level;
+
+ printf(
+ "worker[%" PRIu32 "][%" PRIu32 "]\n"
+ " sleep = %" PRIu32 "\n"
+ " timeout = %" PRIu32 "\n",
+ index / 2,
+ index % 2,
+ ctx->counters[index].sleep,
+ ctx->counters[index].timeout
+ );
+
+ for (nest_level = 0; nest_level < MRSP_COUNT; ++nest_level) {
+ printf(
+ " obtain[%" PRIu32 "] = %" PRIu32 "\n",
+ nest_level,
+ ctx->counters[index].obtain[nest_level]
+ );
+ }
+ }
+}
+
+static void Init(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ rtems_resource_snapshot snapshot;
+ uint32_t cpu_count = rtems_get_processor_count();
+ uint32_t cpu_index;
+
+ TEST_BEGIN();
+
+ rtems_resource_snapshot_take(&snapshot);
+
+ ctx->main_task_id = rtems_task_self();
+
+ for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
+ sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ test_mrsp_flush_error();
+ test_mrsp_initially_locked_error();
+ test_mrsp_nested_obtain_error();
+ test_mrsp_obtain_and_release();
+ test_mrsp_load();
+
+ rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
+
+ TEST_END();
+ rtems_test_exit(0);
+}
+
+#define CONFIGURE_SMP_APPLICATION
+
+#define CONFIGURE_MICROSECONDS_PER_TICK 1000
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 1)
+#define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1)
+#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT
+#define CONFIGURE_MAXIMUM_TIMERS 1
+
+#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
+
+#define CONFIGURE_SCHEDULER_SIMPLE_SMP
+
+#include <rtems/scheduler.h>
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
+
+#define CONFIGURE_SCHEDULER_CONTROLS \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16)
+
+#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
+ RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
+ RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
+ RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_INIT_TASK_PRIORITY 2
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.doc b/testsuites/smptests/smpmrsp01/smpmrsp01.doc
new file mode 100644
index 0000000000..85badfd8e5
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.doc
@@ -0,0 +1,15 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpmrsp01
+
+directives:
+
+ - _MRSP_Initialize()
+ - _MRSP_Obtain()
+ - _MRSP_Release()
+ - _MRSP_Get_ceiling_priority()
+ - _MRSP_Set_ceiling_priority()
+
+concepts:
+
+ - Ensure that MrsP semaphores work.
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.scn b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
new file mode 100644
index 0000000000..5762a02c8c
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
@@ -0,0 +1,147 @@
+*** BEGIN OF TEST SMPMRSP 1 ***
+test MrsP flush error
+test MrsP initially locked error
+test MrsP nested obtain error
+test MrsP obtain and release
+test MrsP load
+worker[0][0]
+ sleep = 890
+ timeout = 1455
+ obtain[0] = 141069
+ obtain[1] = 111062
+ obtain[2] = 255631
+ obtain[3] = 186559
+ obtain[4] = 310707
+ obtain[5] = 246838
+ obtain[6] = 331853
+ obtain[7] = 298938
+ obtain[8] = 331989
+ obtain[9] = 343041
+ obtain[10] = 310191
+ obtain[11] = 381001
+ obtain[12] = 269001
+ obtain[13] = 412849
+ obtain[14] = 217768
+ obtain[15] = 444036
+ obtain[16] = 160721
+ obtain[17] = 476211
+ obtain[18] = 151929
+ obtain[19] = 438664
+ obtain[20] = 132708
+ obtain[21] = 388090
+ obtain[22] = 118166
+ obtain[23] = 337468
+ obtain[24] = 96676
+ obtain[25] = 271392
+ obtain[26] = 75445
+ obtain[27] = 203259
+ obtain[28] = 52933
+ obtain[29] = 132769
+ obtain[30] = 27856
+ obtain[31] = 57014
+worker[0][1]
+ sleep = 15
+ timeout = 33
+ obtain[0] = 2241
+ obtain[1] = 1890
+ obtain[2] = 4128
+ obtain[3] = 3128
+ obtain[4] = 5110
+ obtain[5] = 3981
+ obtain[6] = 5348
+ obtain[7] = 4825
+ obtain[8] = 5184
+ obtain[9] = 5720
+ obtain[10] = 4488
+ obtain[11] = 6038
+ obtain[12] = 4095
+ obtain[13] = 6658
+ obtain[14] = 3754
+ obtain[15] = 6768
+ obtain[16] = 2654
+ obtain[17] = 7051
+ obtain[18] = 2679
+ obtain[19] = 6956
+ obtain[20] = 2498
+ obtain[21] = 6173
+ obtain[22] = 2024
+ obtain[23] = 5514
+ obtain[24] = 1650
+ obtain[25] = 4141
+ obtain[26] = 1568
+ obtain[27] = 3285
+ obtain[28] = 812
+ obtain[29] = 2317
+ obtain[30] = 527
+ obtain[31] = 996
+worker[1][0]
+ sleep = 890
+ timeout = 1581
+ obtain[0] = 140732
+ obtain[1] = 111655
+ obtain[2] = 256936
+ obtain[3] = 186534
+ obtain[4] = 311714
+ obtain[5] = 248065
+ obtain[6] = 333155
+ obtain[7] = 300734
+ obtain[8] = 329675
+ obtain[9] = 343832
+ obtain[10] = 309112
+ obtain[11] = 380452
+ obtain[12] = 270156
+ obtain[13] = 416600
+ obtain[14] = 223484
+ obtain[15] = 444991
+ obtain[16] = 163750
+ obtain[17] = 476096
+ obtain[18] = 150317
+ obtain[19] = 432827
+ obtain[20] = 133946
+ obtain[21] = 388441
+ obtain[22] = 119760
+ obtain[23] = 337033
+ obtain[24] = 99153
+ obtain[25] = 271558
+ obtain[26] = 77535
+ obtain[27] = 202607
+ obtain[28] = 53225
+ obtain[29] = 130801
+ obtain[30] = 27321
+ obtain[31] = 56239
+worker[1][1]
+ sleep = 25
+ timeout = 48
+ obtain[0] = 2164
+ obtain[1] = 1722
+ obtain[2] = 4095
+ obtain[3] = 3002
+ obtain[4] = 4950
+ obtain[5] = 4020
+ obtain[6] = 5235
+ obtain[7] = 4716
+ obtain[8] = 5407
+ obtain[9] = 5070
+ obtain[10] = 5082
+ obtain[11] = 6130
+ obtain[12] = 4368
+ obtain[13] = 6108
+ obtain[14] = 3270
+ obtain[15] = 6800
+ obtain[16] = 2652
+ obtain[17] = 7633
+ obtain[18] = 2451
+ obtain[19] = 7480
+ obtain[20] = 2079
+ obtain[21] = 6232
+ obtain[22] = 1590
+ obtain[23] = 5739
+ obtain[24] = 1627
+ obtain[25] = 4030
+ obtain[26] = 1296
+ obtain[27] = 2803
+ obtain[28] = 969
+ obtain[29] = 2253
+ obtain[30] = 217
+ obtain[31] = 930
+*** END OF TEST SMPMRSP 1 ***