summaryrefslogtreecommitdiffstats
path: root/cpukit/libtest
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2019-04-05 08:16:05 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2019-04-09 08:06:46 +0200
commitf9219db2a95c8285d1be22092572b4236bfe6488 (patch)
treec168ac7fdf7458cea6d7146584b2adb38d47f6a1 /cpukit/libtest
parentrtems: Add rtems_scheduler_get_processor() (diff)
downloadrtems-f9219db2a95c8285d1be22092572b4236bfe6488.tar.bz2
rtems: Add rtems_scheduler_get_processor_maximum()
Add rtems_scheduler_get_processor_maximum() as a replacement for rtems_get_processor_count(). The rtems_get_processor_count() is a bit orphaned. Adopt it by the Scheduler Manager. The count is also misleading, since the processor set may have gaps and the actual count of online processors may be less than the value returned by rtems_get_processor_count(). Update #3732.
Diffstat (limited to 'cpukit/libtest')
-rw-r--r--cpukit/libtest/t-test-rtems-measure.c2
-rw-r--r--cpukit/libtest/testparallel.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/cpukit/libtest/t-test-rtems-measure.c b/cpukit/libtest/t-test-rtems-measure.c
index 242855f19f..80b404f1ad 100644
--- a/cpukit/libtest/t-test-rtems-measure.c
+++ b/cpukit/libtest/t-test-rtems-measure.c
@@ -200,7 +200,7 @@ T_measure_runtime_create(const T_measure_runtime_config *config)
chunk_size *= 2;
- load_count = rtems_get_processor_count();
+ load_count = rtems_scheduler_get_processor_maximum();
load_size = load_count * sizeof(ctx->load_contexts[0]);
ctx = malloc(sizeof(*ctx) + sample_size + load_size + chunk_size +
diff --git a/cpukit/libtest/testparallel.c b/cpukit/libtest/testparallel.c
index ea805a3cf4..4ff26650f5 100644
--- a/cpukit/libtest/testparallel.c
+++ b/cpukit/libtest/testparallel.c
@@ -60,8 +60,8 @@ static void run_tests(
for (i = 0; i < job_count; ++i) {
const rtems_test_parallel_job *job = &jobs[i];
- size_t n = rtems_get_processor_count();
- size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1;
+ size_t n = rtems_scheduler_get_processor_maximum();
+ size_t j = job->cascade ? 0 : rtems_scheduler_get_processor_maximum() - 1;
while (j < n) {
size_t active_worker = j + 1;
@@ -133,7 +133,7 @@ void rtems_test_parallel(
_Atomic_Init_ulong(&ctx->stop, 0);
_SMP_barrier_Control_initialize(&ctx->barrier);
- ctx->worker_count = rtems_get_processor_count();
+ ctx->worker_count = rtems_scheduler_get_processor_maximum();
ctx->worker_ids[0] = rtems_task_self();
ctx->jobs = jobs;
ctx->job_count = job_count;