summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-11-26 11:51:34 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-11-27 10:33:31 +0100
commit5bd822a77973ab1f9e28e747b760600158966733 (patch)
treecbfaa69e04e7fb6b7783d8ba95debd68905ec8e1
parentsmp: Fix scheduler helping protocol assertions (diff)
downloadrtems-5bd822a77973ab1f9e28e747b760600158966733.tar.bz2
smp: Fix scheduler helping protocol
Ensure that scheduler nodes in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL helping state are always SCHEDULER_SMP_NODE_READY or SCHEDULER_SMP_NODE_SCHEDULED to ensure the MrsP protocol properties.
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h111
-rw-r--r--testsuites/smptests/smpmrsp01/init.c216
-rw-r--r--testsuites/smptests/smpmrsp01/smpmrsp01.scn22
3 files changed, 283 insertions, 66 deletions
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index b262b91f84..31ae6d184d 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -949,9 +949,10 @@ void _Scheduler_Thread_change_resource_root(
/**
* @brief Use an idle thread for this scheduler node.
*
- * A thread in the SCHEDULER_HELP_ACTIVE_OWNER owner state may use an idle
- * thread for the scheduler node owned by itself in case it executes currently
- * using another scheduler node or in case it is in a blocking state.
+ * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
+ * helping state may use an idle thread for the scheduler node owned by itself
+ * in case it executes currently using another scheduler node or in case it is
+ * in a blocking state.
*
* @param[in] context The scheduler instance context.
* @param[in] node The node which wants to use the idle thread.
@@ -965,7 +966,10 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
{
Thread_Control *idle = ( *get_idle_thread )( context );
- _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER );
+ _Assert(
+ node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
+ || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
+ );
_Assert( _Scheduler_Node_get_idle( node ) == NULL );
_Assert(
_Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
@@ -1009,6 +1013,8 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
_Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+ } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
+ _Scheduler_Use_idle_thread( context, node, get_idle_thread );
} else {
_Scheduler_Node_set_user( node, owner );
}
@@ -1072,6 +1078,9 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
* @brief Block this scheduler node.
*
* @param[in] context The scheduler instance context.
+ * @param[in] thread The thread which wants to get blocked referencing this
+ * node. This is not necessarily the user of this node in case the node
+ * participates in the scheduler helping protocol.
* @param[in] node The node which wants to get blocked.
* @param[in] is_scheduled This node is scheduled.
* @param[in] get_idle_thread Function to get an idle thread.
@@ -1087,23 +1096,46 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
Scheduler_Get_idle_thread get_idle_thread
)
{
- bool block;
- Thread_Control *old_user = _Scheduler_Node_get_user( node );
- Thread_Control *new_user = NULL;
+ Thread_Control *old_user;
+ Thread_Control *new_user;
- _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
- if ( is_scheduled ) {
- if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+ if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
+ _Assert( thread == _Scheduler_Node_get_user( node ) );
+
+ return true;
+ }
+
+ new_user = NULL;
+
+ if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+ if ( is_scheduled ) {
+ _Assert( thread == _Scheduler_Node_get_user( node ) );
+ old_user = thread;
new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
- } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
- Thread_Control *owner = _Scheduler_Node_get_owner( node );
+ }
+ } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
+ if ( is_scheduled ) {
+ old_user = _Scheduler_Node_get_user( node );
- if ( thread == old_user && owner != old_user ) {
- new_user = owner;
- _Scheduler_Node_set_user( node, new_user );
+ if ( thread == old_user ) {
+ Thread_Control *owner = _Scheduler_Node_get_owner( node );
+
+ if (
+ thread != owner
+ && owner->Scheduler.state == THREAD_SCHEDULER_READY
+ ) {
+ new_user = owner;
+ _Scheduler_Node_set_user( node, new_user );
+ } else {
+ new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+ }
}
}
+ } else {
+ /* Not implemented, this is part of the OMIP support path. */
+ _Assert(0);
}
if ( new_user != NULL ) {
@@ -1112,13 +1144,9 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
_Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
_Thread_Set_CPU( new_user, cpu );
_Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
-
- block = false;
- } else {
- block = true;
}
- return block;
+ return false;
}
/**
@@ -1146,26 +1174,38 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
if ( is_scheduled ) {
Thread_Control *old_user = _Scheduler_Node_get_user( node );
Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+ Thread_Control *idle = _Scheduler_Release_idle_thread(
+ context,
+ node,
+ release_idle_thread
+ );
+ Thread_Control *owner = _Scheduler_Node_get_owner( node );
+ Thread_Control *new_user;
if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
- Thread_Control *idle = _Scheduler_Release_idle_thread(
- context,
- node,
- release_idle_thread
- );
-
_Assert( idle != NULL );
- (void) idle;
+ new_user = the_thread;
+ } else if ( idle != NULL ) {
+ _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+ new_user = the_thread;
+ } else if ( the_thread != owner ) {
+ _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+ _Assert( old_user != the_thread );
+ _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
+ new_user = the_thread;
+ _Scheduler_Node_set_user( node, new_user );
} else {
_Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
-
- _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
- _Scheduler_Node_set_user( node, the_thread );
+ _Assert( old_user != the_thread );
+ _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
+ new_user = NULL;
}
- _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
- _Thread_Set_CPU( the_thread, cpu );
- _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
+ if ( new_user != NULL ) {
+ _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Set_CPU( new_user, cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
+ }
unblock = false;
} else {
@@ -1243,7 +1283,10 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
new_user = needs_help;
} else {
- _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+ _Assert(
+ node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
+ || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
+ );
_Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
new_user = offers_help;
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
index b6727288cd..acda13fd1c 100644
--- a/testsuites/smptests/smpmrsp01/init.c
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -54,7 +54,8 @@ typedef struct {
typedef struct {
rtems_id main_task_id;
rtems_id migration_task_id;
- rtems_id high_task_id;
+ rtems_id low_task_id[2];
+ rtems_id high_task_id[2];
rtems_id timer_id;
rtems_id counting_sem_id;
rtems_id mrsp_ids[MRSP_COUNT];
@@ -68,7 +69,8 @@ typedef struct {
SMP_lock_Control switch_lock;
size_t switch_index;
switch_event switch_events[32];
- volatile bool run;
+ volatile bool high_run[2];
+ volatile bool low_run[2];
} test_context;
static test_context test_instance = {
@@ -717,9 +719,6 @@ static void test_mrsp_multiple_obtain(void)
sc = rtems_semaphore_delete(sem_c_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- change_prio(RTEMS_SELF, 2);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
static void run_task(rtems_task_argument arg)
@@ -753,8 +752,9 @@ static void ready_unlock_worker(rtems_task_argument arg)
/* Done (G) */
barrier(ctx, &barrier_state);
- rtems_task_suspend(RTEMS_SELF);
- rtems_test_assert(0);
+ while (true) {
+ /* Do nothing */
+ }
}
static void unblock_ready_timer(rtems_id timer_id, void *arg)
@@ -763,16 +763,16 @@ static void unblock_ready_timer(rtems_id timer_id, void *arg)
rtems_status_code sc;
sc = rtems_task_start(
- ctx->high_task_id,
+ ctx->high_task_id[0],
run_task,
- (rtems_task_argument) &ctx->run
+ (rtems_task_argument) &ctx->high_run[0]
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_suspend(ctx->high_task_id);
+ sc = rtems_task_suspend(ctx->high_task_id[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_resume(ctx->high_task_id);
+ sc = rtems_task_resume(ctx->high_task_id[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
/*
@@ -783,7 +783,7 @@ static void unblock_ready_timer(rtems_id timer_id, void *arg)
sc = rtems_event_transient_send(ctx->main_task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_suspend(ctx->high_task_id);
+ sc = rtems_task_suspend(ctx->high_task_id[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
@@ -802,16 +802,37 @@ static void unblock_ready_owner(test_context *ctx)
sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- rtems_test_assert(!ctx->run);
+ rtems_test_assert(!ctx->high_run[0]);
}
-static void unblock_ready_rival(test_context *ctx)
+static void unblock_owner_before_rival_timer(rtems_id timer_id, void *arg)
{
+ test_context *ctx = arg;
rtems_status_code sc;
- SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
- sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
+ sc = rtems_task_suspend(ctx->high_task_id[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->high_task_id[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void unblock_owner_after_rival_timer(rtems_id timer_id, void *arg)
+{
+ test_context *ctx = arg;
+ rtems_status_code sc;
+
+ sc = rtems_task_suspend(ctx->high_task_id[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->high_task_id[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void various_block_unblock(test_context *ctx)
+{
+ rtems_status_code sc;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
/* Worker obtain (F) */
barrier(ctx, &barrier_state);
@@ -822,41 +843,143 @@ static void unblock_ready_rival(test_context *ctx)
sc = rtems_task_suspend(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]);
+ sc = rtems_task_wake_after(2);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_resume(ctx->high_task_id);
+ sc = rtems_task_start(
+ ctx->high_task_id[1],
+ run_task,
+ (rtems_task_argument) &ctx->high_run[1]
+ );
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- while (!ctx->run) {
+ while (!ctx->high_run[1]) {
/* Do noting */
}
sc = rtems_task_resume(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_suspend(ctx->high_task_id);
+ /* Try to schedule a blocked active rival */
+
+ sc = rtems_task_suspend(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->high_task_id[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_resume(ctx->high_task_id[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_resume(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_test_assert(rtems_get_current_processor() == 0);
+
+ /* Use node of the active rival */
+
+ sc = rtems_task_suspend(ctx->high_task_id[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_resume(ctx->high_task_id[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_test_assert(rtems_get_current_processor() == 1);
+
+ sc = rtems_task_suspend(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_resume(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /*
+ * Try to schedule an active rival with an already scheduled active owner
+ * user.
+ */
+
+ sc = rtems_timer_fire_after(
+ ctx->timer_id,
+ 2,
+ unblock_owner_before_rival_timer,
+ ctx
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* This will take the processor away from us, the timer will help later */
+ sc = rtems_task_resume(ctx->high_task_id[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /*
+ * Try to schedule an active owner with an already scheduled active rival
+ * user.
+ */
+
+ sc = rtems_task_resume(ctx->high_task_id[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_timer_fire_after(
+ ctx->timer_id,
+ 2,
+ unblock_owner_after_rival_timer,
+ ctx
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* This will take the processor away from us, the timer will help later */
+ sc = rtems_task_resume(ctx->high_task_id[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(rtems_get_current_processor() == 0);
+
assert_prio(RTEMS_SELF, 4);
- /* Worker done (F) */
+ /* Worker done (G) */
barrier(ctx, &barrier_state);
}
-static void test_mrsp_unblock_ready(test_context *ctx)
+static void start_low_task(test_context *ctx, size_t i)
{
rtems_status_code sc;
- puts("test MrsP unblock ready");
+ sc = rtems_task_create(
+ rtems_build_name('L', 'O', 'W', '0' + i),
+ 5,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->low_task_id[i]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->low_task_id[i], ctx->scheduler_ids[i]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(
+ ctx->low_task_id[i],
+ run_task,
+ (rtems_task_argument) &ctx->low_run[i]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_various_block_and_unblock(test_context *ctx)
+{
+ rtems_status_code sc;
- ctx->run = false;
+ puts("test MrsP various block and unblock");
change_prio(RTEMS_SELF, 4);
+ reset_switch_events(ctx);
+
+ ctx->low_run[0] = false;
+ ctx->low_run[1] = false;
+ ctx->high_run[0] = false;
+ ctx->high_run[1] = false;
+
sc = rtems_semaphore_create(
rtems_build_name(' ', ' ', ' ', 'A'),
1,
@@ -870,16 +993,29 @@ static void test_mrsp_unblock_ready(test_context *ctx)
assert_prio(RTEMS_SELF, 4);
sc = rtems_task_create(
- rtems_build_name('H', 'I', 'G', 'H'),
+ rtems_build_name('H', 'I', 'G', '0'),
2,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
- &ctx->high_task_id
+ &ctx->high_task_id[0]
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_create(
+ rtems_build_name('H', 'I', 'G', '1'),
+ 2,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->high_task_id[1]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->high_task_id[1], ctx->scheduler_ids[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_create(
rtems_build_name('W', 'O', 'R', 'K'),
4,
RTEMS_MINIMUM_STACK_SIZE,
@@ -892,28 +1028,46 @@ static void test_mrsp_unblock_ready(test_context *ctx)
sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
sc = rtems_timer_create(
rtems_build_name('T', 'I', 'M', 'R'),
&ctx->timer_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ /* In case these tasks run, then we have a MrsP protocol violation */
+ start_low_task(ctx, 0);
+ start_low_task(ctx, 1);
+
unblock_ready_owner(ctx);
- unblock_ready_rival(ctx);
+ various_block_unblock(ctx);
+
+ rtems_test_assert(!ctx->low_run[0]);
+ rtems_test_assert(!ctx->low_run[1]);
+
+ print_switch_events(ctx);
sc = rtems_timer_delete(ctx->timer_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ sc = rtems_task_delete(ctx->high_task_id[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_delete(ctx->high_task_id[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
sc = rtems_task_delete(ctx->worker_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_task_delete(ctx->high_task_id);
+ sc = rtems_task_delete(ctx->low_task_id[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
+ sc = rtems_task_delete(ctx->low_task_id[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- change_prio(RTEMS_SELF, 2);
+ sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
@@ -1421,7 +1575,7 @@ static void Init(rtems_task_argument arg)
test_mrsp_unlock_order_error();
test_mrsp_deadlock_error(ctx);
test_mrsp_multiple_obtain();
- test_mrsp_unblock_ready(ctx);
+ test_mrsp_various_block_and_unblock(ctx);
test_mrsp_obtain_and_sleep_and_release(ctx);
test_mrsp_obtain_and_release_with_help(ctx);
test_mrsp_obtain_and_release(ctx);
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.scn b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
index a32d71f402..1fc8b6b577 100644
--- a/testsuites/smptests/smpmrsp01/smpmrsp01.scn
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
@@ -5,7 +5,27 @@ test MrsP nested obtain error
test MrsP unlock order error
test MrsP deadlock error
test MrsP multiple obtain
-test MrsP unblock ready
+test MrsP various block and unblock
+[1] IDLE -> WORK (prio 4, node WORK)
+[0] MAIN -> IDLE (prio 3, node MAIN)
+[0] IDLE -> MAIN (prio 3, node MAIN)
+[1] WORK -> IDLE (prio 3, node WORK)
+[1] IDLE -> HIG1 (prio 2, node HIG1)
+[1] HIG1 -> IDLE (prio 3, node WORK)
+[1] IDLE -> HIG1 (prio 2, node HIG1)
+[1] HIG1 -> WORK (prio 3, node WORK)
+[1] WORK -> MAIN (prio 3, node WORK)
+[0] MAIN -> HIG0 (prio 2, node HIG0)
+[1] MAIN -> HIG1 (prio 2, node HIG1)
+[1] HIG1 -> WORK (prio 3, node WORK)
+[0] HIG0 -> MAIN (prio 3, node MAIN)
+[1] WORK -> MAIN (prio 3, node WORK)
+[0] MAIN -> HIG0 (prio 2, node HIG0)
+[1] MAIN -> HIG1 (prio 2, node HIG1)
+[1] HIG1 -> MAIN (prio 3, node WORK)
+[0] HIG0 -> IDLE (prio 4, node MAIN)
+[1] MAIN -> WORK (prio 3, node WORK)
+[0] IDLE -> MAIN (prio 4, node MAIN)
test MrsP obtain and sleep and release
[0] MAIN -> RUN (prio 2, node RUN)
[0] RUN -> MAIN (prio 1, node MAIN)