summaryrefslogtreecommitdiffstats
path: root/cpukit/include
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2019-04-11 13:47:50 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2019-04-12 09:44:44 +0200
commite97b7c9a7af2f4e19a8bdeaf13033617f8c4c2b6 (patch)
tree579598cfce50f24f2d7450e0c54f1eea6de0a023 /cpukit/include
parentscore: Add _ISR_lock_Set_name() (diff)
downloadrtems-e97b7c9a7af2f4e19a8bdeaf13033617f8c4c2b6.tar.bz2
score: Use an ISR lock for Per_CPU_Control::Lock
The use of a hand crafted lock for Per_CPU_Control::Lock was necessary at some point in the SMP support development, but it is no longer justified.
Diffstat (limited to 'cpukit/include')
-rw-r--r--cpukit/include/rtems/score/percpu.h161
-rw-r--r--cpukit/include/rtems/score/schedulersmpimpl.h12
-rw-r--r--cpukit/include/rtems/score/threadimpl.h6
-rw-r--r--cpukit/include/rtems/score/userextimpl.h23
4 files changed, 98 insertions, 104 deletions
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 8e33aaee4a..27f4e93a46 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -28,7 +28,6 @@
#include <rtems/score/chain.h>
#include <rtems/score/isrlock.h>
#include <rtems/score/smp.h>
- #include <rtems/score/smplock.h>
#include <rtems/score/timestamp.h>
#include <rtems/score/watchdog.h>
#endif
@@ -333,7 +332,7 @@ typedef struct Per_CPU_Control {
*
* It is volatile since interrupts may alter this flag.
*
- * This field is not protected by a lock and must be accessed only by this
+ * This member is not protected by a lock and must be accessed only by this
* processor. Code (e.g. scheduler and post-switch action requests) running
* on another processors must use an inter-processor interrupt to set the
* thread dispatch necessary indicator to true.
@@ -352,7 +351,8 @@ typedef struct Per_CPU_Control {
/**
* @brief This is the thread executing on this processor.
*
- * This field is not protected by a lock. The only writer is this processor.
+ * This member is not protected by a lock. The only writer is this
+ * processor.
*
* On SMP configurations a thread may be registered as executing on more than
* one processor in case a thread migration is in progress. On SMP
@@ -364,10 +364,10 @@ typedef struct Per_CPU_Control {
/**
* @brief This is the heir thread for this processor.
*
- * This field is not protected by a lock. The only writer after multitasking
- * start is the scheduler owning this processor. It is assumed that stores
- * to pointers are atomic on all supported SMP architectures. The CPU port
- * specific code (inter-processor interrupt handling and
+ * This member is not protected by a lock. The only writer after
+ * multitasking start is the scheduler owning this processor. It is assumed
+ * that stores to pointers are atomic on all supported SMP architectures.
+ * The CPU port specific code (inter-processor interrupt handling and
* _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the
* last value written.
*
@@ -418,39 +418,31 @@ typedef struct Per_CPU_Control {
#if defined( RTEMS_SMP )
/**
- * @brief This lock protects some parts of the low-level thread dispatching.
+ * @brief This lock protects some members of this structure.
+ */
+ ISR_lock_Control Lock;
+
+ /**
+ * @brief Lock context used to acquire all per-CPU locks.
*
- * We must use a ticket lock here since we cannot transport a local context
- * through the context switch.
+ * This member is protected by the Per_CPU_Control::Lock lock.
*
- * @see _Thread_Dispatch().
+ * @see _Per_CPU_Acquire_all().
*/
- SMP_ticket_lock_Control Lock;
-
- #if defined( RTEMS_PROFILING )
- /**
- * @brief Lock statistics for the per-CPU lock.
- */
- SMP_lock_Stats Lock_stats;
-
- /**
- * @brief Lock statistics context for the per-CPU lock.
- */
- SMP_lock_Stats_context Lock_stats_context;
- #endif
+ ISR_lock_Context Lock_context;
/**
* @brief Chain of threads in need for help.
*
- * This field is protected by the Per_CPU_Control::Lock lock.
+ * This member is protected by the Per_CPU_Control::Lock lock.
*/
Chain_Control Threads_in_need_for_help;
/**
* @brief Bit field for SMP messages.
*
- * This bit field is not protected locks. Atomic operations are used to
- * set and get the message bits.
+ * This member is not protected locks. Atomic operations are used to set
+ * and get the message bits.
*/
Atomic_Ulong message;
@@ -488,7 +480,7 @@ typedef struct Per_CPU_Control {
/**
* @brief Indicates the current state of the CPU.
*
- * This field is protected by the _Per_CPU_State_lock lock.
+ * This member is protected by the _Per_CPU_State_lock lock.
*
* @see _Per_CPU_State_change().
*/
@@ -539,62 +531,11 @@ typedef struct {
*/
extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
-#if defined( RTEMS_SMP )
-#define _Per_CPU_Acquire( cpu ) \
- _SMP_ticket_lock_Acquire( \
- &( cpu )->Lock, \
- &( cpu )->Lock_stats, \
- &( cpu )->Lock_stats_context \
- )
-#else
-#define _Per_CPU_Acquire( cpu ) \
- do { \
- (void) ( cpu ); \
- } while ( 0 )
-#endif
-
-#if defined( RTEMS_SMP )
-#define _Per_CPU_Release( cpu ) \
- _SMP_ticket_lock_Release( \
- &( cpu )->Lock, \
- &( cpu )->Lock_stats_context \
- )
-#else
-#define _Per_CPU_Release( cpu ) \
- do { \
- (void) ( cpu ); \
- } while ( 0 )
-#endif
+#define _Per_CPU_Acquire( cpu, lock_context ) \
+ _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
-#if defined( RTEMS_SMP )
-#define _Per_CPU_Acquire_all( isr_cookie ) \
- do { \
- uint32_t ncpus = _SMP_Get_processor_maximum(); \
- uint32_t cpu; \
- _ISR_Local_disable( isr_cookie ); \
- for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
- _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \
- } \
- } while ( 0 )
-#else
-#define _Per_CPU_Acquire_all( isr_cookie ) \
- _ISR_Local_disable( isr_cookie )
-#endif
-
-#if defined( RTEMS_SMP )
-#define _Per_CPU_Release_all( isr_cookie ) \
- do { \
- uint32_t ncpus = _SMP_Get_processor_maximum(); \
- uint32_t cpu; \
- for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \
- _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \
- } \
- _ISR_Local_enable( isr_cookie ); \
- } while ( 0 )
-#else
-#define _Per_CPU_Release_all( isr_cookie ) \
- _ISR_Local_enable( isr_cookie )
-#endif
+#define _Per_CPU_Release( cpu, lock_context ) \
+ _ISR_lock_Release( &( cpu )->Lock, lock_context )
/*
* If we get the current processor index in a context which allows thread
@@ -671,6 +612,60 @@ static inline bool _Per_CPU_Is_boot_processor(
#endif
}
+RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all(
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+ Per_CPU_Control *previous_cpu;
+
+ cpu_max = _SMP_Get_processor_maximum();
+ previous_cpu = _Per_CPU_Get_by_index( 0 );
+
+ _ISR_lock_ISR_disable( lock_context );
+ _Per_CPU_Acquire( previous_cpu, lock_context );
+
+ for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
+ Per_CPU_Control *cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
+ previous_cpu = cpu;
+ }
+#else
+ _ISR_lock_ISR_disable( lock_context );
+#endif
+}
+
+RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+ Per_CPU_Control *cpu;
+
+ cpu_max = _SMP_Get_processor_maximum();
+ cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
+
+ for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
+ Per_CPU_Control *previous_cpu;
+
+ previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
+ _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
+ cpu = previous_cpu;
+ }
+
+ _Per_CPU_Release( cpu, lock_context );
+ _ISR_lock_ISR_enable( lock_context );
+#else
+ _ISR_lock_ISR_enable( lock_context );
+#endif
+}
+
#if defined( RTEMS_SMP )
/**
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index 0f4805d7f5..f7b5fcc1e2 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -566,13 +566,13 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
)
{
Thread_Control *victim_thread;
- ISR_lock_Context lock_context;
+ ISR_lock_Context scheduler_lock_context;
Per_CPU_Control *victim_cpu;
victim_thread = _Scheduler_Node_get_user( victim );
_Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
- _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
+ _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
victim_cpu = _Thread_Get_CPU( victim_thread );
@@ -580,16 +580,18 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
_Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
if ( victim_thread->Scheduler.helping_nodes > 0 ) {
- _Per_CPU_Acquire( victim_cpu );
+ ISR_lock_Context per_cpu_lock_context;
+
+ _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
_Chain_Append_unprotected(
&victim_cpu->Threads_in_need_for_help,
&victim_thread->Scheduler.Help_node
);
- _Per_CPU_Release( victim_cpu );
+ _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
}
}
- _Thread_Scheduler_release_critical( victim_thread, &lock_context );
+ _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
_Scheduler_SMP_Allocate_processor(
context,
diff --git a/cpukit/include/rtems/score/threadimpl.h b/cpukit/include/rtems/score/threadimpl.h
index 2232d57dd0..75725aeb48 100644
--- a/cpukit/include/rtems/score/threadimpl.h
+++ b/cpukit/include/rtems/score/threadimpl.h
@@ -990,14 +990,16 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
Per_CPU_Control *cpu
)
{
- _Per_CPU_Acquire( cpu );
+ ISR_lock_Context lock_context;
+
+ _Per_CPU_Acquire( cpu, &lock_context );
if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
_Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
_Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
}
- _Per_CPU_Release( cpu );
+ _Per_CPU_Release( cpu, &lock_context );
}
#endif
diff --git a/cpukit/include/rtems/score/userextimpl.h b/cpukit/include/rtems/score/userextimpl.h
index b781773863..851e28c179 100644
--- a/cpukit/include/rtems/score/userextimpl.h
+++ b/cpukit/include/rtems/score/userextimpl.h
@@ -261,34 +261,29 @@ static inline void _User_extensions_Thread_switch(
node = _Chain_Immutable_first( chain );
if ( node != tail ) {
- Per_CPU_Control *cpu_self;
#if defined(RTEMS_SMP)
- ISR_Level level;
-#endif
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
cpu_self = _Per_CPU_Get();
-#if defined(RTEMS_SMP)
- _ISR_Local_disable( level );
-#endif
- _Per_CPU_Acquire( cpu_self );
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu_self, &lock_context );
-#if defined(RTEMS_SMP)
node = _Chain_Immutable_first( chain );
#endif
while ( node != tail ) {
- const User_extensions_Switch_control *extension =
- (const User_extensions_Switch_control *) node;
-
- (*extension->thread_switch)( executing, heir );
+ const User_extensions_Switch_control *extension;
+ extension = (const User_extensions_Switch_control *) node;
node = _Chain_Immutable_next( node );
+ (*extension->thread_switch)( executing, heir );
}
- _Per_CPU_Release( cpu_self );
#if defined(RTEMS_SMP)
- _ISR_Local_enable( level );
+ _Per_CPU_Release( cpu_self, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
#endif
}
}