summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threaddispatch.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2013-08-05 14:54:11 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2013-08-09 23:02:38 +0200
commitd19cce29dcaffa7c679407bc211ee09c2d9dc40a (patch)
tree2e0eb6211f0199680fcf9445b644a25495e53732 /cpukit/score/src/threaddispatch.c
parentscore: Add and use _Per_CPU_Acquire_all(). (diff)
downloadrtems-d19cce29dcaffa7c679407bc211ee09c2d9dc40a.tar.bz2
score: Per-CPU thread dispatch disable level
Use a per-CPU thread dispatch disable level. So instead of one global thread dispatch disable level we have now one instance per processor. This is a major performance improvement for SMP. On non-SMP configurations this may simplifiy the interrupt entry/exit code. The giant lock is still present, but it is now decoupled from the thread dispatching in _Thread_Dispatch(), _Thread_Handler(), _Thread_Restart_self() and the interrupt entry/exit. Access to the giant lock is now available via _Giant_Acquire() and _Giant_Release(). The giant lock is still implicitly acquired via _Thread_Dispatch_decrement_disable_level(). The giant lock is only acquired for high-level operations in interrupt handlers (e.g. release of a semaphore, sending of an event). As a side-effect this change fixes the lost thread dispatch necessary indication bug in _Thread_Dispatch(). A per-CPU thread dispatch disable level greatly simplifies the SMP support for the interrupt entry/exit code since no spin locks have to be acquired in this area. It is only necessary to get the current processor index and use this to calculate the address of the own per-CPU control. This reduces the interrupt latency considerably. All elements for the interrupt entry/exit code are now part of the Per_CPU_Control structure: thread dispatch disable level, ISR nest level and thread dispatch necessary. Nothing else is required (except CPU port specific stuff like on SPARC).
Diffstat (limited to 'cpukit/score/src/threaddispatch.c')
-rw-r--r--cpukit/score/src/threaddispatch.c107
1 files changed, 50 insertions, 57 deletions
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index c659f9fd67..3b5fb429cf 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -20,6 +20,7 @@
#include <rtems/score/threaddispatch.h>
#include <rtems/score/apiext.h>
+#include <rtems/score/assert.h>
#include <rtems/score/isr.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/todimpl.h>
@@ -28,60 +29,47 @@
void _Thread_Dispatch( void )
{
+ Per_CPU_Control *per_cpu;
Thread_Control *executing;
Thread_Control *heir;
ISR_Level level;
- #if defined(RTEMS_SMP)
- /*
- * WARNING: The SMP sequence has severe defects regarding the real-time
- * performance.
- *
- * Consider the following scenario. We have three tasks L (lowest
- * priority), M (middle priority), and H (highest priority). Now let a
- * thread dispatch from M to L happen. An interrupt occurs in
- * _Thread_Dispatch() here:
- *
- * void _Thread_Dispatch( void )
- * {
- * [...]
- *
- * post_switch:
- *
- * _ISR_Enable( level );
- *
- * <-- INTERRUPT
- * <-- AFTER INTERRUPT
- *
- * _Thread_Unnest_dispatch();
- *
- * _API_extensions_Run_post_switch();
- * }
- *
- * The interrupt event makes task H ready. The interrupt code will see
- * _Thread_Dispatch_disable_level > 0 and thus doesn't perform a
- * _Thread_Dispatch(). Now we return to position "AFTER INTERRUPT". This
- * means task L executes now although task H is ready! Task H will execute
- * once someone calls _Thread_Dispatch().
- */
- _Thread_Disable_dispatch();
- #else
- _Thread_Dispatch_set_disable_level( 1 );
- #endif
+#if defined( RTEMS_SMP )
+ _ISR_Disable( level );
+#endif
+
+ per_cpu = _Per_CPU_Get();
+ _Assert( per_cpu->thread_dispatch_disable_level == 0 );
+ per_cpu->thread_dispatch_disable_level = 1;
+
+#if defined( RTEMS_SMP )
+ _ISR_Enable( level );
+#endif
/*
* Now determine if we need to perform a dispatch on the current CPU.
*/
- executing = _Thread_Executing;
- _ISR_Disable( level );
- while ( _Thread_Dispatch_necessary == true ) {
- heir = _Thread_Heir;
- #if defined(RTEMS_SMP)
- executing->is_executing = false;
- heir->is_executing = true;
- #endif
- _Thread_Dispatch_necessary = false;
- _Thread_Executing = heir;
+ executing = per_cpu->executing;
+ _Per_CPU_ISR_disable_and_acquire( per_cpu, level );
+#if defined( RTEMS_SMP )
+ /*
+ * On SMP the complete context switch must be atomic with respect to one
+ * processor. The scheduler must obtain the per-CPU lock to check if a
+ * thread is executing and to update the heir. This ensures that a thread
+ * cannot execute on more than one processor at a time. See also
+ * _Thread_Handler() since _Context_switch() may branch to this function.
+ */
+ if ( per_cpu->dispatch_necessary ) {
+#else
+ while ( per_cpu->dispatch_necessary ) {
+#endif
+ heir = per_cpu->heir;
+ per_cpu->dispatch_necessary = false;
+ per_cpu->executing = heir;
+#if defined( RTEMS_SMP )
+ executing->is_executing = false;
+ heir->is_executing = true;
+#endif
/*
* When the heir and executing are the same, then we are being
@@ -102,16 +90,18 @@ void _Thread_Dispatch( void )
if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
+#if !defined( RTEMS_SMP )
_ISR_Enable( level );
+#endif
#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
_Thread_Update_cpu_time_used(
executing,
- &_Thread_Time_of_last_context_switch
+ &per_cpu->time_of_last_context_switch
);
#else
{
- _TOD_Get_uptime( &_Thread_Time_of_last_context_switch );
+ _TOD_Get_uptime( &per_cpu->time_of_last_context_switch );
heir->cpu_time_used++;
}
#endif
@@ -165,21 +155,24 @@ void _Thread_Dispatch( void )
#endif
#endif
- executing = _Thread_Executing;
+ /*
+ * We have to obtain these values again after the context switch since the
+ * heir thread may have migrated from another processor. Values from the
+ * stack or non-volatile registers reflect the old execution environment.
+ */
+ per_cpu = _Per_CPU_Get();
+ executing = per_cpu->executing;
+#if !defined( RTEMS_SMP )
_ISR_Disable( level );
+#endif
}
post_switch:
- #ifndef RTEMS_SMP
- _Thread_Dispatch_set_disable_level( 0 );
- #endif
-
- _ISR_Enable( level );
+ _Assert( per_cpu->thread_dispatch_disable_level == 1 );
+ per_cpu->thread_dispatch_disable_level = 0;
- #ifdef RTEMS_SMP
- _Thread_Unnest_dispatch();
- #endif
+ _Per_CPU_Release_and_ISR_enable( per_cpu, level );
_API_extensions_Run_post_switch( executing );
}