summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score/schedulersmpimpl.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2013-08-12 10:59:40 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2013-08-20 10:14:04 +0200
commitfc2ad63203475ac93668b522c4364036a97e450d (patch)
treee5e4682e5e2af4f3623259dfd021da4ccc665988 /cpukit/score/include/rtems/score/schedulersmpimpl.h
parentsmp: Rename _Scheduler_simple_smp_Start_idle() (diff)
downloadrtems-fc2ad63203475ac93668b522c4364036a97e450d.tar.bz2
smp: _Scheduler_simple_smp_Allocate_processor()
Rename _Scheduler_simple_smp_Allocate_processor() to _Scheduler_SMP_Allocate_processor().
Diffstat (limited to 'cpukit/score/include/rtems/score/schedulersmpimpl.h')
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h42
1 files changed, 42 insertions, 0 deletions
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 7e123e2c39..d23b0c0f00 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -41,6 +41,48 @@ static inline Scheduler_SMP_Control *_Scheduler_SMP_Instance( void )
return _Scheduler.information;
}
+static inline void _Scheduler_SMP_Allocate_processor(
+ Thread_Control *scheduled,
+ Thread_Control *victim
+)
+{
+ Per_CPU_Control *cpu_of_scheduled = scheduled->cpu;
+ Per_CPU_Control *cpu_of_victim = victim->cpu;
+ Thread_Control *heir;
+
+ scheduled->is_scheduled = true;
+ victim->is_scheduled = false;
+
+ _Per_CPU_Acquire( cpu_of_scheduled );
+
+ if ( scheduled->is_executing ) {
+ heir = cpu_of_scheduled->heir;
+ cpu_of_scheduled->heir = scheduled;
+ } else {
+ heir = scheduled;
+ }
+
+ _Per_CPU_Release( cpu_of_scheduled );
+
+ if ( heir != victim ) {
+ const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
+
+ heir->cpu = cpu_of_victim;
+
+ /*
+ * FIXME: Here we need atomic store operations with a relaxed memory order.
+ * The _CPU_SMP_Send_interrupt() will ensure that the change can be
+ * observed consistently.
+ */
+ cpu_of_victim->heir = heir;
+ cpu_of_victim->dispatch_necessary = true;
+
+ if ( cpu_of_victim != cpu_of_executing ) {
+ _Per_CPU_Send_interrupt( cpu_of_victim );
+ }
+ }
+}
+
/** @} */
#ifdef __cplusplus