summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score/schedulersmpimpl.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-06-12 14:37:57 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-06-12 16:13:26 +0200
commit701dd96f598bd58a41884795ba5bf0b5da337d78 (patch)
tree2d8374400a2622c19ad5215e0d1e150eebda1396 /cpukit/score/include/rtems/score/schedulersmpimpl.h
parentbsp/realview-pbx-a9: Fix SMP startup (diff)
downloadrtems-701dd96f598bd58a41884795ba5bf0b5da337d78.tar.bz2
score: PR2181: Add _Thread_Yield()
The _Scheduler_Yield() was called by the executing thread with thread dispatching disabled and interrupts enabled. The rtems_task_suspend() is explicitly allowed in ISRs: http://rtems.org/onlinedocs/doc-current/share/rtems/html/c_user/Interrupt-Manager-Directives-Allowed-from-an-ISR.html#Interrupt-Manager-Directives-Allowed-from-an-ISR Unlike the other scheduler operations the locking was performed inside the operation. This lead to the following race condition. Suppose a ISR suspends the executing thread right before the yield scheduler operation. Now the executing thread is not longer in the set of ready threads. The typical scheduler operations did not check the thread state and will now extract the thread again and enqueue it. This corrupted data structures. Add _Thread_Yield() and do the scheduler yield operation with interrupts disabled. This has a negligible effect on the interrupt latency.
Diffstat (limited to 'cpukit/score/include/rtems/score/schedulersmpimpl.h')
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index b4126b5d6d..9d74434024 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -672,6 +672,27 @@ static inline void _Scheduler_SMP_Change_priority(
}
}
+static inline void _Scheduler_SMP_Yield(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue_fifo,
+ Scheduler_SMP_Enqueue enqueue_scheduled_fifo
+)
+{
+ Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
+
+ if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Extract_from_scheduled( thread );
+
+ ( *enqueue_scheduled_fifo )( context, thread );
+ } else {
+ ( *extract_from_ready )( context, thread );
+
+ ( *enqueue_fifo )( context, thread );
+ }
+}
+
static inline void _Scheduler_SMP_Insert_scheduled_lifo(
Scheduler_Context *context,
Thread_Control *thread