summaryrefslogtreecommitdiffstats
path: root/testsuites/sptests/spscheduler01
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-04-09 15:07:54 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-04-15 10:41:44 +0200
commitc5831a3f9af11228dbdaabaf01f69d37e55684ef (patch)
treee9ddedd942f3f31d239820dfc4dbcef4cde0b09a /testsuites/sptests/spscheduler01
parentrtems: Add task get/set scheduler (diff)
downloadrtems-c5831a3f9af11228dbdaabaf01f69d37e55684ef.tar.bz2
score: Add clustered/partitioned scheduling
Clustered/partitioned scheduling helps to control the worst-case latencies in the system. The goal is to reduce the amount of shared state in the system and thus prevention of lock contention. Modern multi-processor systems tend to have several layers of data and instruction caches. With clustered/partitioned scheduling it is possible to honour the cache topology of a system and thus avoid expensive cache synchronization traffic. We have clustered scheduling in case the set of processors of a system is partitioned into non-empty pairwise-disjoint subsets. These subsets are called clusters. Clusters with a cardinality of one are partitions. Each cluster is owned by exactly one scheduler instance.
Diffstat (limited to 'testsuites/sptests/spscheduler01')
-rw-r--r--testsuites/sptests/spscheduler01/init.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/testsuites/sptests/spscheduler01/init.c b/testsuites/sptests/spscheduler01/init.c
index 6c19abcfd0..30ea4ce8f9 100644
--- a/testsuites/sptests/spscheduler01/init.c
+++ b/testsuites/sptests/spscheduler01/init.c
@@ -35,6 +35,7 @@ static void test_task_get_set_affinity(void)
{
#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
rtems_id self_id = rtems_task_self();
+ rtems_id task_id;
rtems_status_code sc;
cpu_set_t cpusetone;
cpu_set_t cpuset;
@@ -46,6 +47,16 @@ static void test_task_get_set_affinity(void)
CPU_ZERO(&cpusetone);
CPU_SET(0, &cpusetone);
+ sc = rtems_task_create(
+ rtems_build_name('T', 'A', 'S', 'K'),
+ 2,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &task_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(cpuset), NULL);
rtems_test_assert(sc == RTEMS_INVALID_ADDRESS);
@@ -70,16 +81,19 @@ static void test_task_get_set_affinity(void)
rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));
sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
+ rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
+
+ sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
+ rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
+
+ sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_get_affinity(self_id, sizeof(cpuset), &cpuset);
+ sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));
- sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
cpusetbigone = CPU_ALLOC(big);
rtems_test_assert(cpusetbigone != NULL);
@@ -89,12 +103,15 @@ static void test_task_get_set_affinity(void)
CPU_ZERO_S(cpusetbigsize, cpusetbigone);
CPU_SET_S(0, cpusetbigsize, cpusetbigone);
- sc = rtems_task_get_affinity(RTEMS_SELF, cpusetbigsize, cpusetbig);
+ sc = rtems_task_get_affinity(task_id, cpusetbigsize, cpusetbig);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone));
- sc = rtems_task_set_affinity(RTEMS_SELF, cpusetbigsize, cpusetbig);
+ sc = rtems_task_set_affinity(task_id, cpusetbigsize, cpusetbig);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_delete(task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
CPU_FREE(cpusetbig);