summaryrefslogtreecommitdiffstats
path: root/testsuites/smptests/smplock01/init.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-10-31 10:50:54 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-10-31 13:09:42 +0100
commit7eec247c356826abbf51f53b41e6307d577a446d (patch)
tree29bc4acea20ff434a9b32be0878c136a2480ff3c /testsuites/smptests/smplock01/init.c
parentsys/sockio.h: Update to FreeBSD head 2016-05-03 (diff)
downloadrtems-7eec247c356826abbf51f53b41e6307d577a446d.tar.bz2
smptests/smplock01: Use test support
Convert output to XML.
Diffstat (limited to 'testsuites/smptests/smplock01/init.c')
-rw-r--r--testsuites/smptests/smplock01/init.c622
1 files changed, 375 insertions, 247 deletions
diff --git a/testsuites/smptests/smplock01/init.c b/testsuites/smptests/smplock01/init.c
index 62397a3b27..046fce6493 100644
--- a/testsuites/smptests/smplock01/init.c
+++ b/testsuites/smptests/smplock01/init.c
@@ -19,10 +19,10 @@
#include <rtems/score/smplock.h>
#include <rtems/score/smplockmcs.h>
#include <rtems/score/smplockseq.h>
-#include <rtems/score/smpbarrier.h>
-#include <rtems/score/atomic.h>
+#include <rtems/test.h>
#include <rtems.h>
+#define TESTS_USE_PRINTK
#include "tmacros.h"
const char rtems_test_name[] = "SMPLOCK 1";
@@ -33,34 +33,21 @@ const char rtems_test_name[] = "SMPLOCK 1";
#define TEST_COUNT 11
-typedef enum {
- INITIAL,
- START_TEST,
- STOP_TEST
-} states;
-
typedef struct {
- Atomic_Uint state;
- SMP_barrier_Control barrier;
- rtems_id timer_id;
- rtems_interval timeout;
+ rtems_test_parallel_context base;
unsigned long counter[TEST_COUNT];
- unsigned long test_counter[TEST_COUNT][CPU_COUNT];
+ unsigned long local_counter[CPU_COUNT][TEST_COUNT][CPU_COUNT];
SMP_lock_Control lock;
#if defined(RTEMS_PROFILING)
SMP_lock_Stats mcs_stats;
#endif
SMP_MCS_lock_Control mcs_lock;
SMP_sequence_lock_Control seq_lock;
- char unused_space_for_cache_line_separation_0[128];
- int a;
- char unused_space_for_cache_line_separation_1[128];
- int b;
-} global_context;
-
-static global_context context = {
- .state = ATOMIC_INITIALIZER_UINT(INITIAL),
- .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
+ int a RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
+ int b RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
+} test_context RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
+
+static test_context test_instance = {
.lock = SMP_LOCK_INITIALIZER("global ticket"),
#if defined(RTEMS_PROFILING)
.mcs_stats = SMP_LOCK_STATS_INITIALIZER("global MCS"),
@@ -69,146 +56,222 @@ static global_context context = {
.seq_lock = SMP_SEQUENCE_LOCK_INITIALIZER
};
-static const char * const test_names[TEST_COUNT] = {
- "global ticket lock with local counter",
- "global MCS lock with local counter",
- "global ticket lock with global counter",
- "global MCS lock with global counter",
- "local ticket lock with local counter",
- "local MCS lock with local counter",
- "local ticket lock with global counter",
- "local MCS lock with global counter",
- "global ticket lock with busy section",
- "global MCS lock with busy section",
- "sequence lock"
-};
-
-static void stop_test_timer(rtems_id timer_id, void *arg)
+static rtems_interval test_duration(void)
{
- global_context *ctx = arg;
-
- _Atomic_Store_uint(&ctx->state, STOP_TEST, ATOMIC_ORDER_RELEASE);
+ return rtems_clock_get_ticks_per_second();
}
-static void wait_for_state(global_context *ctx, int desired_state)
+static rtems_interval test_init(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
{
- while (
- _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_ACQUIRE) != desired_state
- ) {
- /* Wait */
- }
+ return test_duration();
}
-static bool assert_state(global_context *ctx, int desired_state)
+static void test_fini(
+ test_context *ctx,
+ const char *name,
+ size_t test,
+ size_t active_workers
+)
{
- return _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED) == desired_state;
-}
+ unsigned long sum = 0;
+ unsigned long n = active_workers;
+ unsigned long i;
+
+ printf(" <%s activeWorker=\"%lu\">\n", name, n);
+
+ for (i = 0; i < n; ++i) {
+ unsigned long local_counter =
+ ctx->local_counter[active_workers - 1][test][i];
-typedef void (*test_body)(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
-);
+ sum += local_counter;
+
+ printf(
+ " <LocalCounter worker=\"%lu\">%lu</LocalCounter>\n",
+ i,
+ local_counter
+ );
+ }
+
+ printf(
+ " <GlobalCounter>%lu</GlobalCounter>\n"
+ " <SumOfLocalCounter>%lu</SumOfLocalCounter>\n"
+ " </%s>\n",
+ ctx->counter[test],
+ sum,
+ name
+ );
+}
static void test_0_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 0;
unsigned long counter = 0;
SMP_lock_Context lock_context;
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_lock_Acquire(&ctx->lock, &lock_context);
_SMP_lock_Release(&ctx->lock, &lock_context);
++counter;
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_0_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "GlobalTicketLockWithLocalCounter",
+ 0,
+ active_workers
+ );
}
static void test_1_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 1;
unsigned long counter = 0;
SMP_MCS_lock_Context lock_context;
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_MCS_lock_Acquire(&ctx->mcs_lock, &lock_context, &ctx->mcs_stats);
_SMP_MCS_lock_Release(&ctx->mcs_lock, &lock_context);
++counter;
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_1_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "GlobalMCSLockWithLocalCounter",
+ 1,
+ active_workers
+ );
}
static void test_2_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 2;
unsigned long counter = 0;
SMP_lock_Context lock_context;
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_lock_Acquire(&ctx->lock, &lock_context);
++ctx->counter[test];
_SMP_lock_Release(&ctx->lock, &lock_context);
++counter;
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_2_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "GlobalTicketLockWithGlobalCounter",
+ 2,
+ active_workers
+ );
}
static void test_3_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 3;
unsigned long counter = 0;
SMP_MCS_lock_Context lock_context;
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_MCS_lock_Acquire(&ctx->mcs_lock, &lock_context, &ctx->mcs_stats);
++ctx->counter[test];
_SMP_MCS_lock_Release(&ctx->mcs_lock, &lock_context);
++counter;
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_3_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "GlobalMCSLockWithGlobalCounter",
+ 3,
+ active_workers
+ );
}
static void test_4_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 4;
unsigned long counter = 0;
SMP_lock_Control lock;
SMP_lock_Context lock_context;
_SMP_lock_Initialize(&lock, "local");
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_lock_Acquire(&lock, &lock_context);
_SMP_lock_Release(&lock, &lock_context);
++counter;
@@ -216,17 +279,34 @@ static void test_4_body(
_SMP_lock_Destroy(&lock);
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_4_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "LocalTicketLockWithLocalCounter",
+ 4,
+ active_workers
+ );
}
static void test_5_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 5;
unsigned long counter = 0;
#if defined(RTEMS_PROFILING)
SMP_lock_Stats stats;
@@ -237,7 +317,7 @@ static void test_5_body(
_SMP_lock_Stats_initialize(&stats, "local");
_SMP_MCS_lock_Initialize(&lock);
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_MCS_lock_Acquire(&lock, &lock_context, &stats);
_SMP_MCS_lock_Release(&lock, &lock_context);
++counter;
@@ -246,24 +326,41 @@ static void test_5_body(
_SMP_MCS_lock_Destroy(&lock);
_SMP_lock_Stats_destroy(&stats);
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_5_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "LocalMCSLockWithLocalCounter",
+ 5,
+ active_workers
+ );
}
static void test_6_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 6;
unsigned long counter = 0;
SMP_lock_Control lock;
SMP_lock_Context lock_context;
_SMP_lock_Initialize(&lock, "local");
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_lock_Acquire(&lock, &lock_context);
/* The counter value is not interesting, only the access to it */
@@ -275,17 +372,34 @@ static void test_6_body(
_SMP_lock_Destroy(&lock);
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_6_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "LocalTicketLockWithGlobalCounter",
+ 6,
+ active_workers
+ );
}
static void test_7_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 7;
unsigned long counter = 0;
#if defined(RTEMS_PROFILING)
SMP_lock_Stats stats;
@@ -296,7 +410,7 @@ static void test_7_body(
_SMP_lock_Stats_initialize(&stats, "local");
_SMP_MCS_lock_Initialize(&lock);
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_MCS_lock_Acquire(&lock, &lock_context, &stats);
/* The counter value is not interesting, only the access to it */
@@ -309,7 +423,23 @@ static void test_7_body(
_SMP_MCS_lock_Destroy(&lock);
_SMP_lock_Stats_destroy(&stats);
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_7_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "LocalMCSLockWithGlobalCounter",
+ 7,
+ active_workers
+ );
}
static void busy_section(void)
@@ -322,60 +452,95 @@ static void busy_section(void)
}
static void test_8_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 8;
unsigned long counter = 0;
SMP_lock_Context lock_context;
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_lock_Acquire(&ctx->lock, &lock_context);
busy_section();
_SMP_lock_Release(&ctx->lock, &lock_context);
++counter;
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_8_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "GlobalTicketLockWithBusySection",
+ 8,
+ active_workers
+ );
}
static void test_9_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 9;
unsigned long counter = 0;
SMP_MCS_lock_Context lock_context;
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
_SMP_MCS_lock_Acquire(&ctx->mcs_lock, &lock_context, &ctx->mcs_stats);
busy_section();
_SMP_MCS_lock_Release(&ctx->mcs_lock, &lock_context);
++counter;
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
+}
+
+static void test_9_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "GlobalMCSLockWithBusySection",
+ 9,
+ active_workers
+ );
}
static void test_10_body(
- int test,
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
)
{
+ test_context *ctx = (test_context *) base;
+ size_t test = 10;
unsigned long counter = 0;
unsigned long seq;
- if (cpu_self == 0) {
- while (assert_state(ctx, START_TEST)) {
+ if (rtems_test_parallel_is_master_worker(worker_index)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
seq = _SMP_sequence_lock_Write_begin(&ctx->seq_lock);
ctx->a = counter;
@@ -386,7 +551,7 @@ static void test_10_body(
++counter;
}
} else {
- while (assert_state(ctx, START_TEST)) {
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
unsigned long a;
unsigned long b;
@@ -403,129 +568,92 @@ static void test_10_body(
}
}
- ctx->test_counter[test][cpu_self] = counter;
+ ctx->local_counter[active_workers - 1][test][worker_index] = counter;
}
-static const test_body test_bodies[TEST_COUNT] = {
- test_0_body,
- test_1_body,
- test_2_body,
- test_3_body,
- test_4_body,
- test_5_body,
- test_6_body,
- test_7_body,
- test_8_body,
- test_9_body,
- test_10_body
-};
-
-static void run_tests(
- global_context *ctx,
- SMP_barrier_State *bs,
- unsigned int cpu_count,
- unsigned int cpu_self,
- bool master
+static void test_10_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
)
{
- int test;
-
- for (test = 0; test < TEST_COUNT; ++test) {
- _SMP_barrier_Wait(&ctx->barrier, bs, cpu_count);
-
- if (master) {
- rtems_status_code sc = rtems_timer_fire_after(
- ctx->timer_id,
- ctx->timeout,
- stop_test_timer,
- ctx
- );
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- _Atomic_Store_uint(&ctx->state, START_TEST, ATOMIC_ORDER_RELEASE);
- }
-
- wait_for_state(ctx, START_TEST);
-
- (*test_bodies[test])(test, ctx, bs, cpu_count, cpu_self);
- }
-
- _SMP_barrier_Wait(&ctx->barrier, bs, cpu_count);
+ test_context *ctx = (test_context *) base;
+
+ test_fini(
+ ctx,
+ "SequenceLock",
+ 10,
+ active_workers
+ );
}
-static void task(rtems_task_argument arg)
-{
- global_context *ctx = (global_context *) arg;
- uint32_t cpu_count = rtems_get_processor_count();
- uint32_t cpu_self = rtems_get_current_processor();
- rtems_status_code sc;
- SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
-
- run_tests(ctx, &bs, cpu_count, cpu_self, false);
-
- sc = rtems_task_suspend(RTEMS_SELF);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-}
+static const rtems_test_parallel_job test_jobs[TEST_COUNT] = {
+ {
+ .init = test_init,
+ .body = test_0_body,
+ .fini = test_0_fini,
+ .cascade = true
+ }, {
+ .init = test_init,
+ .body = test_1_body,
+ .fini = test_1_fini,
+ .cascade = true
+ }, {
+ .init = test_init,
+ .body = test_2_body,
+ .fini = test_2_fini,
+ .cascade = false
+ }, {
+ .init = test_init,
+ .body = test_3_body,
+ .fini = test_3_fini,
+ .cascade = false
+ }, {
+ .init = test_init,
+ .body = test_4_body,
+ .fini = test_4_fini,
+ .cascade = true
+ }, {
+ .init = test_init,
+ .body = test_5_body,
+ .fini = test_5_fini,
+ .cascade = true
+ }, {
+ .init = test_init,
+ .body = test_6_body,
+ .fini = test_6_fini,
+ .cascade = false
+ }, {
+ .init = test_init,
+ .body = test_7_body,
+ .fini = test_7_fini,
+ .cascade = false
+ }, {
+ .init = test_init,
+ .body = test_8_body,
+ .fini = test_8_fini,
+ .cascade = false
+ }, {
+ .init = test_init,
+ .body = test_9_body,
+ .fini = test_9_fini,
+ .cascade = false
+ }, {
+ .init = test_init,
+ .body = test_10_body,
+ .fini = test_10_fini,
+ .cascade = false
+ }
+};
static void test(void)
{
- global_context *ctx = &context;
- uint32_t cpu_count = rtems_get_processor_count();
- uint32_t cpu_self = rtems_get_current_processor();
- uint32_t cpu;
- int test;
- rtems_status_code sc;
- SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
-
- for (cpu = 0; cpu < cpu_count; ++cpu) {
- if (cpu != cpu_self) {
- rtems_id task_id;
-
- sc = rtems_task_create(
- rtems_build_name('T', 'A', 'S', 'K'),
- TASK_PRIORITY,
- RTEMS_MINIMUM_STACK_SIZE,
- RTEMS_DEFAULT_MODES,
- RTEMS_DEFAULT_ATTRIBUTES,
- &task_id
- );
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- sc = rtems_task_start(task_id, task, (rtems_task_argument) ctx);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- }
- }
-
- ctx->timeout = 5 * rtems_clock_get_ticks_per_second();
-
- sc = rtems_timer_create(rtems_build_name('T', 'I', 'M', 'R'), &ctx->timer_id);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ test_context *ctx = &test_instance;
+ const char *test = "SMPLock01";
- run_tests(ctx, &bs, cpu_count, cpu_self, true);
-
- for (test = 0; test < TEST_COUNT; ++test) {
- unsigned long sum = 0;
-
- printf("%s\n", test_names[test]);
-
- for (cpu = 0; cpu < cpu_count; ++cpu) {
- unsigned long local_counter = ctx->test_counter[test][cpu];
-
- sum += local_counter;
-
- printf(
- "\tprocessor %" PRIu32 ", local counter %lu\n",
- cpu,
- local_counter
- );
- }
-
- printf(
- "\tglobal counter %lu, sum of local counter %lu\n",
- ctx->counter[test],
- sum
- );
- }
+ printf("<%s>\n", test);
+ rtems_test_parallel(&ctx->base, NULL, &test_jobs[0], TEST_COUNT);
+ printf("</%s>\n", test);
}
static void Init(rtems_task_argument arg)