summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/include/rtems/score/isrlock.h110
-rw-r--r--cpukit/score/include/rtems/score/percpu.h78
-rw-r--r--cpukit/score/include/rtems/score/smplock.h137
-rw-r--r--cpukit/score/include/rtems/score/threaddispatch.h7
-rw-r--r--cpukit/score/include/rtems/score/todimpl.h8
-rw-r--r--cpukit/score/src/coretodget.c6
-rw-r--r--cpukit/score/src/coretodsecondssinceepoch.c6
-rw-r--r--cpukit/score/src/coretodset.c6
-rw-r--r--cpukit/score/src/coretodtickle.c6
-rw-r--r--cpukit/score/src/percpu.c22
-rw-r--r--cpukit/score/src/smp.c7
-rw-r--r--cpukit/score/src/threaddispatchdisablelevel.c30
-rw-r--r--cpukit/score/src/threadhandler.c6
13 files changed, 280 insertions, 149 deletions
diff --git a/cpukit/score/include/rtems/score/isrlock.h b/cpukit/score/include/rtems/score/isrlock.h
index 56ff19b803..e118475968 100644
--- a/cpukit/score/include/rtems/score/isrlock.h
+++ b/cpukit/score/include/rtems/score/isrlock.h
@@ -51,12 +51,23 @@ extern "C" {
* @brief ISR lock control.
*/
typedef struct {
- #if defined( RTEMS_SMP )
- SMP_lock_Control lock;
- #endif
+#if defined( RTEMS_SMP )
+ SMP_lock_Control lock;
+#endif
} ISR_lock_Control;
/**
+ * @brief Local ISR lock context for acquire and release pairs.
+ */
+typedef struct {
+#if defined( RTEMS_SMP )
+ SMP_lock_Context lock_context;
+#else
+ ISR_Level isr_level;
+#endif
+} ISR_lock_Context;
+
+/**
* @brief Initializer for static initialization of ISR locks.
*/
#if defined( RTEMS_SMP )
@@ -72,17 +83,16 @@ typedef struct {
*
* Concurrent initialization leads to unpredictable results.
*
- * @param[in,out] _lock The ISR lock control.
+ * @param[in,out] lock The ISR lock control.
*/
+static inline void _ISR_lock_Initialize( ISR_lock_Control *lock )
+{
#if defined( RTEMS_SMP )
- #define _ISR_lock_Initialize( _lock ) \
- _SMP_lock_Initialize( &( _lock )->lock )
+ _SMP_lock_Initialize( &lock->lock );
#else
- #define _ISR_lock_Initialize( _lock ) \
- do { \
- (void) _lock; \
- } while (0)
+ (void) lock;
#endif
+}
/**
* @brief Acquires an ISR lock.
@@ -92,21 +102,24 @@ typedef struct {
*
* This function can be used in thread and interrupt context.
*
- * @param[in,out] _lock The ISR lock control.
- * @param[out] _isr_cookie The interrupt status to restore will be returned.
+ * @param[in,out] lock The ISR lock control.
+ * @param[in,out] context The local ISR lock context for an acquire and release
+ * pair.
*
* @see _ISR_lock_Release_and_ISR_enable().
*/
+static inline void _ISR_lock_ISR_disable_and_acquire(
+ ISR_lock_Control *lock,
+ ISR_lock_Context *context
+)
+{
#if defined( RTEMS_SMP )
- #define _ISR_lock_ISR_disable_and_acquire( _lock, _isr_cookie ) \
- _SMP_lock_ISR_disable_and_acquire( &( _lock )->lock, _isr_cookie )
+ _SMP_lock_ISR_disable_and_acquire( &lock->lock, &context->lock_context );
#else
- #define _ISR_lock_ISR_disable_and_acquire( _lock, _isr_cookie ) \
- do { \
- (void) _lock; \
- _ISR_Disable( _isr_cookie ); \
- } while (0)
+ (void) lock;
+ _ISR_Disable( context->isr_level );
#endif
+}
/**
* @brief Releases an ISR lock.
@@ -116,21 +129,24 @@ typedef struct {
*
* This function can be used in thread and interrupt context.
*
- * @param[in,out] _lock The ISR lock control.
- * @param[in] _isr_cookie The interrupt status to restore.
+ * @param[in,out] lock The ISR lock control.
+ * @param[in,out] context The local ISR lock context for an acquire and release
+ * pair.
*
* @see _ISR_lock_ISR_disable_and_acquire().
*/
+static inline void _ISR_lock_Release_and_ISR_enable(
+ ISR_lock_Control *lock,
+ ISR_lock_Context *context
+)
+{
#if defined( RTEMS_SMP )
- #define _ISR_lock_Release_and_ISR_enable( _lock, _isr_cookie ) \
- _SMP_lock_Release_and_ISR_enable( &( _lock )->lock, _isr_cookie )
+ _SMP_lock_Release_and_ISR_enable( &lock->lock, &context->lock_context );
#else
- #define _ISR_lock_Release_and_ISR_enable( _lock, _isr_cookie ) \
- do { \
- (void) _lock; \
- _ISR_Enable( _isr_cookie ); \
- } while (0)
+ (void) lock;
+ _ISR_Enable( context->isr_level );
#endif
+}
/**
* @brief Acquires an ISR lock inside an ISR disabled section.
@@ -142,19 +158,24 @@ typedef struct {
* interrupts and these interrupts enter the critical section protected by this
* lock, then the result is unpredictable.
*
- * @param[in,out] _lock The ISR lock control.
+ * @param[in,out] lock The ISR lock control.
+ * @param[in,out] context The local ISR lock context for an acquire and release
+ * pair.
*
* @see _ISR_lock_Release().
*/
+static inline void _ISR_lock_Acquire(
+ ISR_lock_Control *lock,
+ ISR_lock_Context *context
+)
+{
#if defined( RTEMS_SMP )
- #define _ISR_lock_Acquire( _lock ) \
- _SMP_lock_Acquire( &( _lock )->lock )
+ _SMP_lock_Acquire( &lock->lock, &context->lock_context );
#else
- #define _ISR_lock_Acquire( _lock ) \
- do { \
- (void) _lock; \
- } while (0)
+ (void) lock;
+ (void) context;
#endif
+}
/**
* @brief Releases an ISR lock inside an ISR disabled section.
@@ -162,19 +183,24 @@ typedef struct {
* The interrupt status will remain unchanged. On SMP configurations this
* function releases an SMP lock.
*
- * @param[in,out] _lock The ISR lock control.
+ * @param[in,out] lock The ISR lock control.
+ * @param[in,out] context The local ISR lock context for an acquire and release
+ * pair.
*
* @see _ISR_lock_Acquire().
*/
+static inline void _ISR_lock_Release(
+ ISR_lock_Control *lock,
+ ISR_lock_Context *context
+)
+{
#if defined( RTEMS_SMP )
- #define _ISR_lock_Release( _lock ) \
- _SMP_lock_Release( &( _lock )->lock )
+ _SMP_lock_Release( &lock->lock, &context->lock_context );
#else
- #define _ISR_lock_Release( _lock ) \
- do { \
- (void) _lock; \
- } while (0)
+ (void) lock;
+ (void) context;
#endif
+}
/** @} */
diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h
index 4b7fd6241f..067cb84a29 100644
--- a/cpukit/score/include/rtems/score/percpu.h
+++ b/cpukit/score/include/rtems/score/percpu.h
@@ -23,9 +23,10 @@
#include <rtems/asm.h>
#else
#include <rtems/score/assert.h>
- #include <rtems/score/isrlock.h>
- #include <rtems/score/timestamp.h>
+ #include <rtems/score/isrlevel.h>
#include <rtems/score/smp.h>
+ #include <rtems/score/smplock.h>
+ #include <rtems/score/timestamp.h>
#endif
#ifdef __cplusplus
@@ -203,14 +204,23 @@ typedef struct {
/** This is the time of the last context switch on this CPU. */
Timestamp_Control time_of_last_context_switch;
- /**
- * @brief This lock protects the dispatch_necessary, executing, heir and
- * message fields.
- */
- ISR_lock_Control lock;
-
#if defined( RTEMS_SMP )
/**
+ * @brief This lock protects the dispatch_necessary, executing, heir and
+ * message fields.
+ *
+ * We must use a ticket lock here since we cannot transport a local context
+ * through the context switch.
+ */
+ SMP_ticket_lock_Control Lock;
+
+ /**
+ * @brief Context for the Giant lock acquire and release pair of this
+ * processor.
+ */
+ SMP_lock_Context Giant_lock_context;
+
+ /**
* This is the request for the interrupt.
*
* @note This may become a chain protected by atomic instructions.
@@ -247,17 +257,53 @@ typedef struct {
*/
extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
-#define _Per_CPU_ISR_disable_and_acquire( per_cpu, isr_cookie ) \
- _ISR_lock_ISR_disable_and_acquire( &( per_cpu )->lock, isr_cookie )
-
-#define _Per_CPU_Release_and_ISR_enable( per_cpu, isr_cookie ) \
- _ISR_lock_Release_and_ISR_enable( &( per_cpu )->lock, isr_cookie )
-
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Acquire( per_cpu ) \
+ _SMP_ticket_lock_Acquire( &( per_cpu )->Lock )
+#else
#define _Per_CPU_Acquire( per_cpu ) \
- _ISR_lock_Acquire( &( per_cpu )->lock )
+ do { \
+ (void) ( per_cpu ); \
+ } while ( 0 )
+#endif
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Release( per_cpu ) \
+ _SMP_ticket_lock_Release( &( per_cpu )->Lock )
+#else
#define _Per_CPU_Release( per_cpu ) \
- _ISR_lock_Release( &( per_cpu )->lock )
+ do { \
+ (void) ( per_cpu ); \
+ } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_ISR_disable_and_acquire( per_cpu, isr_cookie ) \
+ do { \
+ _ISR_Disable_without_giant( isr_cookie ); \
+ _Per_CPU_Acquire( per_cpu ); \
+ } while ( 0 )
+#else
+#define _Per_CPU_ISR_disable_and_acquire( per_cpu, isr_cookie ) \
+ do { \
+ _ISR_Disable( isr_cookie ); \
+ (void) ( per_cpu ); \
+ } while ( 0 )
+#endif
+
+#if defined( RTEMS_SMP )
+#define _Per_CPU_Release_and_ISR_enable( per_cpu, isr_cookie ) \
+ do { \
+ _Per_CPU_Release( per_cpu ); \
+ _ISR_Enable_without_giant( isr_cookie ); \
+ } while ( 0 )
+#else
+#define _Per_CPU_Release_and_ISR_enable( per_cpu, isr_cookie ) \
+ do { \
+ (void) ( per_cpu ); \
+ _ISR_Enable( isr_cookie ); \
+ } while ( 0 )
+#endif
#if defined( RTEMS_SMP )
#define _Per_CPU_Acquire_all( isr_cookie ) \
diff --git a/cpukit/score/include/rtems/score/smplock.h b/cpukit/score/include/rtems/score/smplock.h
index 25efbfa8f3..101aa0a6a1 100644
--- a/cpukit/score/include/rtems/score/smplock.h
+++ b/cpukit/score/include/rtems/score/smplock.h
@@ -42,54 +42,50 @@ extern "C" {
* The SMP lock is implemented as a ticket lock. This provides fairness in
* case of concurrent lock attempts.
*
- * This SMP lock API has a flaw. It does not provide the ability to use a
- * local context for acquire and release pairs. Such a context is necessary to
- * implement for example the Mellor-Crummey and Scott (MCS) locks. The SMP
- * lock is currently used in _Thread_Disable_dispatch() and
- * _Thread_Enable_dispatch() and makes them to a giant lock acquire and
- * release. Since these functions do not pass state information via a local
- * context there is currently no use case for such a feature.
+ * This SMP lock API uses a local context for acquire and release pairs. Such
+ * a context may be used to implement for example the Mellor-Crummey and Scott
+ * (MCS) locks in the future.
*
* @{
*/
/**
- * @brief SMP lock control.
+ * @brief SMP ticket lock control.
*/
typedef struct {
Atomic_Uint next_ticket;
Atomic_Uint now_serving;
-} SMP_lock_Control;
+} SMP_ticket_lock_Control;
/**
- * @brief SMP lock control initializer for static initialization.
+ * @brief SMP ticket lock control initializer for static initialization.
*/
-#define SMP_LOCK_INITIALIZER \
+#define SMP_TICKET_LOCK_INITIALIZER \
{ ATOMIC_INITIALIZER_UINT( 0U ), ATOMIC_INITIALIZER_UINT( 0U ) }
/**
- * @brief Initializes a SMP lock control.
+ * @brief Initializes an SMP ticket lock control.
*
* Concurrent initialization leads to unpredictable results.
*
- * @param[out] lock The SMP lock control.
+ * @param[out] lock The SMP ticket lock control.
*/
-static inline void _SMP_lock_Initialize( SMP_lock_Control *lock )
+static inline void _SMP_ticket_lock_Initialize( SMP_ticket_lock_Control *lock )
{
_Atomic_Init_uint( &lock->next_ticket, 0U );
_Atomic_Init_uint( &lock->now_serving, 0U );
}
/**
- * @brief Acquires a SMP lock.
+ * @brief Acquires an SMP ticket lock.
*
* This function will not disable interrupts. The caller must ensure that the
* current thread of execution is not interrupted indefinite once it obtained
- * the SMP lock.
+ * the SMP ticket lock.
*
- * @param[in,out] lock The SMP lock control.
+ * @param[in,out] lock The SMP ticket lock control.
*/
-static inline void _SMP_lock_Acquire( SMP_lock_Control *lock )
+static inline void _SMP_ticket_lock_Acquire( SMP_ticket_lock_Control *lock )
{
unsigned int my_ticket =
_Atomic_Fetch_add_uint( &lock->next_ticket, 1U, ATOMIC_ORDER_RELAXED );
@@ -102,11 +98,11 @@ static inline void _SMP_lock_Acquire( SMP_lock_Control *lock )
}
/**
- * @brief Releases a SMP lock.
+ * @brief Releases an SMP ticket lock.
*
- * @param[in,out] lock The SMP lock control.
+ * @param[in,out] lock The SMP ticket lock control.
*/
-static inline void _SMP_lock_Release( SMP_lock_Control *lock )
+static inline void _SMP_ticket_lock_Release( SMP_ticket_lock_Control *lock )
{
unsigned int current_ticket =
_Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED );
@@ -116,28 +112,103 @@ static inline void _SMP_lock_Release( SMP_lock_Control *lock )
}
/**
+ * @brief SMP lock control.
+ */
+typedef struct {
+ SMP_ticket_lock_Control ticket_lock;
+} SMP_lock_Control;
+
+/**
+ * @brief Local SMP lock context for acquire and release pairs.
+ */
+typedef struct {
+ ISR_Level isr_level;
+} SMP_lock_Context;
+
+/**
+ * @brief SMP lock control initializer for static initialization.
+ */
+#define SMP_LOCK_INITIALIZER { SMP_TICKET_LOCK_INITIALIZER }
+
+/**
+ * @brief Initializes an SMP lock control.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param[out] lock The SMP lock control.
+ */
+static inline void _SMP_lock_Initialize( SMP_lock_Control *lock )
+{
+ _SMP_ticket_lock_Initialize( &lock->ticket_lock );
+}
+
+/**
+ * @brief Acquires an SMP lock.
+ *
+ * This function will not disable interrupts. The caller must ensure that the
+ * current thread of execution is not interrupted indefinite once it obtained
+ * the SMP lock.
+ *
+ * @param[in,out] lock The SMP lock control.
+ * @param[in,out] context The local SMP lock context for an acquire and release
+ * pair.
+ */
+static inline void _SMP_lock_Acquire(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+ (void) context;
+ _SMP_ticket_lock_Acquire( &lock->ticket_lock );
+}
+
+/**
+ * @brief Releases an SMP lock.
+ *
+ * @param[in,out] lock The SMP lock control.
+ * @param[in,out] context The local SMP lock context for an acquire and release
+ * pair.
+ */
+static inline void _SMP_lock_Release(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+ (void) context;
+ _SMP_ticket_lock_Release( &lock->ticket_lock );
+}
+
+/**
* @brief Disables interrupts and acquires the SMP lock.
*
* @param[in,out] lock The SMP lock control.
- * @param[out] isr_cookie The ISR cookie.
+ * @param[in,out] context The local SMP lock context for an acquire and release
+ * pair.
*/
-#define _SMP_lock_ISR_disable_and_acquire( lock, isr_cookie ) \
- do { \
- _ISR_Disable_without_giant( isr_cookie ); \
- _SMP_lock_Acquire( lock ); \
- } while (0)
+static inline void _SMP_lock_ISR_disable_and_acquire(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+ _ISR_Disable_without_giant( context->isr_level );
+ _SMP_lock_Acquire( lock, context );
+}
/**
* @brief Releases the SMP lock and enables interrupts.
*
* @param[in,out] lock The SMP lock control.
- * @param[in] isr_cookie The ISR cookie.
+ * @param[in,out] context The local SMP lock context for an acquire and release
+ * pair.
*/
-#define _SMP_lock_Release_and_ISR_enable( lock, isr_cookie ) \
- do { \
- _SMP_lock_Release( lock ); \
- _ISR_Enable_without_giant( isr_cookie ); \
- } while (0)
+static inline void _SMP_lock_Release_and_ISR_enable(
+ SMP_lock_Control *lock,
+ SMP_lock_Context *context
+)
+{
+ _SMP_lock_Release( lock, context );
+ _ISR_Enable_without_giant( context->isr_level );
+}
/**@}*/
diff --git a/cpukit/score/include/rtems/score/threaddispatch.h b/cpukit/score/include/rtems/score/threaddispatch.h
index 54786eb62b..5b25212de9 100644
--- a/cpukit/score/include/rtems/score/threaddispatch.h
+++ b/cpukit/score/include/rtems/score/threaddispatch.h
@@ -113,12 +113,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void )
*
* The thread dispatch disable level is not altered by this function.
*
- * The only use case for this operation is in
- * _SMP_Request_shutdown().
+ * The only use case for this operation is in _SMP_Request_shutdown().
*
- * @param[in] self_cpu The current processor index.
+ * @param[in] self_cpu The current processor.
*/
- void _Giant_Drop( uint32_t self_cpu );
+ void _Giant_Drop( Per_CPU_Control *self_cpu );
/**
* @brief Increments the thread dispatch level.
diff --git a/cpukit/score/include/rtems/score/todimpl.h b/cpukit/score/include/rtems/score/todimpl.h
index 097965cbc7..a710b0f41f 100644
--- a/cpukit/score/include/rtems/score/todimpl.h
+++ b/cpukit/score/include/rtems/score/todimpl.h
@@ -176,11 +176,11 @@ typedef struct {
SCORE_EXTERN TOD_Control _TOD;
-#define _TOD_Acquire( _tod, _isr_cookie ) \
- _ISR_lock_ISR_disable_and_acquire( &( _tod )->lock, _isr_cookie )
+#define _TOD_Acquire( _tod, lock_context ) \
+ _ISR_lock_ISR_disable_and_acquire( &( _tod )->lock, lock_context )
-#define _TOD_Release( _tod, _isr_cookie ) \
- _ISR_lock_Release_and_ISR_enable( &( _tod )->lock, _isr_cookie )
+#define _TOD_Release( _tod, lock_context ) \
+ _ISR_lock_Release_and_ISR_enable( &( _tod )->lock, lock_context )
/**
* @brief Initializes the time of day handler.
diff --git a/cpukit/score/src/coretodget.c b/cpukit/score/src/coretodget.c
index 50ca8a0029..6ddf86f18b 100644
--- a/cpukit/score/src/coretodget.c
+++ b/cpukit/score/src/coretodget.c
@@ -27,15 +27,15 @@ Timestamp_Control *_TOD_Get_with_nanoseconds(
)
{
TOD_Control *tod = &_TOD;
- ISR_Level level;
+ ISR_lock_Context lock_context;
Timestamp_Control offset;
Timestamp_Control now;
uint32_t nanoseconds;
- _TOD_Acquire( tod, level );
+ _TOD_Acquire( tod, &lock_context );
nanoseconds = ( *tod->nanoseconds_since_last_tick )();
now = *clock;
- _TOD_Release( tod, level );
+ _TOD_Release( tod, &lock_context );
_Timestamp_Set( &offset, 0, nanoseconds );
_Timestamp_Add_to( &now, &offset );
diff --git a/cpukit/score/src/coretodsecondssinceepoch.c b/cpukit/score/src/coretodsecondssinceepoch.c
index 91445c0c80..620ad3dd4b 100644
--- a/cpukit/score/src/coretodsecondssinceepoch.c
+++ b/cpukit/score/src/coretodsecondssinceepoch.c
@@ -21,12 +21,12 @@
uint32_t _TOD_Seconds_since_epoch( void )
{
TOD_Control *tod = &_TOD;
- ISR_Level level;
+ ISR_lock_Context lock_context;
Timestamp_Control now;
- _TOD_Acquire( tod, level );
+ _TOD_Acquire( tod, &lock_context );
now = tod->now;
- _TOD_Release( tod, level );
+ _TOD_Release( tod, &lock_context );
return _Timestamp_Get_seconds( &now );
}
diff --git a/cpukit/score/src/coretodset.c b/cpukit/score/src/coretodset.c
index c265606137..4262cf32b6 100644
--- a/cpukit/score/src/coretodset.c
+++ b/cpukit/score/src/coretodset.c
@@ -30,7 +30,7 @@ void _TOD_Set_with_timestamp(
uint32_t nanoseconds = _Timestamp_Get_nanoseconds( tod_as_timestamp );
Watchdog_Interval seconds_next = _Timestamp_Get_seconds( tod_as_timestamp );
Watchdog_Interval seconds_now;
- ISR_Level level;
+ ISR_lock_Context lock_context;
_Thread_Disable_dispatch();
@@ -41,9 +41,9 @@ void _TOD_Set_with_timestamp(
else
_Watchdog_Adjust_seconds( WATCHDOG_FORWARD, seconds_next - seconds_now );
- _TOD_Acquire( tod, level );
+ _TOD_Acquire( tod, &lock_context );
tod->now = *tod_as_timestamp;
- _TOD_Release( tod, level );
+ _TOD_Release( tod, &lock_context );
tod->seconds_trigger = nanoseconds;
tod->is_set = true;
diff --git a/cpukit/score/src/coretodtickle.c b/cpukit/score/src/coretodtickle.c
index c9f9597b1d..055b40c617 100644
--- a/cpukit/score/src/coretodtickle.c
+++ b/cpukit/score/src/coretodtickle.c
@@ -25,7 +25,7 @@
void _TOD_Tickle_ticks( void )
{
TOD_Control *tod = &_TOD;
- ISR_Level level;
+ ISR_lock_Context lock_context;
Timestamp_Control tick;
uint32_t nanoseconds_per_tick;
@@ -37,7 +37,7 @@ void _TOD_Tickle_ticks( void )
/* Update the counter of ticks since boot */
_Watchdog_Ticks_since_boot += 1;
- _TOD_Acquire( tod, level );
+ _TOD_Acquire( tod, &lock_context );
/* Update the uptime */
_Timestamp_Add_to( &tod->uptime, &tick );
@@ -45,7 +45,7 @@ void _TOD_Tickle_ticks( void )
/* Update the current TOD */
_Timestamp_Add_to( &tod->now, &tick );
- _TOD_Release( tod, level );
+ _TOD_Release( tod, &lock_context );
_TOD.seconds_trigger += nanoseconds_per_tick;
if ( _TOD.seconds_trigger >= 1000000000UL ) {
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index 3a7a84518e..50e523941c 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -28,20 +28,6 @@
static SMP_lock_Control _Per_CPU_State_lock = SMP_LOCK_INITIALIZER;
-static ISR_Level _Per_CPU_State_acquire( void )
-{
- ISR_Level level;
-
- _SMP_lock_ISR_disable_and_acquire( &_Per_CPU_State_lock, level );
-
- return level;
-}
-
-static void _Per_CPU_State_release( ISR_Level level )
-{
- _SMP_lock_Release_and_ISR_enable( &_Per_CPU_State_lock, level );
-}
-
static void _Per_CPU_State_busy_wait(
const Per_CPU_Control *per_cpu,
Per_CPU_State new_state
@@ -126,12 +112,14 @@ void _Per_CPU_State_change(
Per_CPU_State new_state
)
{
- ISR_Level level;
+ SMP_lock_Control *lock = &_Per_CPU_State_lock;
+ SMP_lock_Context lock_context;
Per_CPU_State next_state;
_Per_CPU_State_busy_wait( per_cpu, new_state );
- level = _Per_CPU_State_acquire();
+ _SMP_lock_ISR_disable_and_acquire( lock, &lock_context );
+
next_state = _Per_CPU_State_get_next( per_cpu->state, new_state );
per_cpu->state = next_state;
@@ -159,7 +147,7 @@ void _Per_CPU_State_change(
_CPU_SMP_Processor_event_broadcast();
- _Per_CPU_State_release( level );
+ _SMP_lock_Release_and_ISR_enable( lock, &lock_context );
if (
next_state == PER_CPU_STATE_SHUTDOWN
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index e56073d281..40d2ac3088 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -32,7 +32,7 @@ void _SMP_Handler_initialize( void )
for ( cpu = 0 ; cpu < max_cpus; ++cpu ) {
Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
- _ISR_lock_Initialize( &per_cpu->lock );
+ _SMP_ticket_lock_Initialize( &per_cpu->Lock );
}
/*
@@ -69,8 +69,7 @@ void _SMP_Start_multitasking_on_secondary_processor( void )
void _SMP_Request_shutdown( void )
{
- uint32_t self = _SMP_Get_current_processor();
- Per_CPU_Control *self_cpu = _Per_CPU_Get_by_index( self );
+ Per_CPU_Control *self_cpu = _Per_CPU_Get();
_Per_CPU_State_change( self_cpu, PER_CPU_STATE_SHUTDOWN );
@@ -80,7 +79,7 @@ void _SMP_Request_shutdown( void )
* In case the executing thread still holds SMP locks, then other processors
* already waiting for this SMP lock will spin forever.
*/
- _Giant_Drop( self );
+ _Giant_Drop( self_cpu );
}
void _SMP_Send_message( uint32_t cpu, uint32_t message )
diff --git a/cpukit/score/src/threaddispatchdisablelevel.c b/cpukit/score/src/threaddispatchdisablelevel.c
index b6eb49f48a..dc03a702a4 100644
--- a/cpukit/score/src/threaddispatchdisablelevel.c
+++ b/cpukit/score/src/threaddispatchdisablelevel.c
@@ -33,12 +33,13 @@ static Giant_Control _Giant = {
.nest_level = 0
};
-static void _Giant_Do_acquire( uint32_t self_cpu_index )
+static void _Giant_Do_acquire( Per_CPU_Control *self_cpu )
{
Giant_Control *giant = &_Giant;
+ uint32_t self_cpu_index = _Per_CPU_Get_index( self_cpu );
if ( giant->owner_cpu != self_cpu_index ) {
- _SMP_lock_Acquire( &giant->lock );
+ _SMP_lock_Acquire( &giant->lock, &self_cpu->Giant_lock_context );
giant->owner_cpu = self_cpu_index;
giant->nest_level = 1;
} else {
@@ -46,48 +47,47 @@ static void _Giant_Do_acquire( uint32_t self_cpu_index )
}
}
-static void _Giant_Do_release( void )
+static void _Giant_Do_release( Per_CPU_Control *self_cpu )
{
Giant_Control *giant = &_Giant;
--giant->nest_level;
if ( giant->nest_level == 0 ) {
giant->owner_cpu = NO_OWNER_CPU;
- _SMP_lock_Release( &giant->lock );
+ _SMP_lock_Release( &giant->lock, &self_cpu->Giant_lock_context );
}
}
-void _Giant_Drop( uint32_t self_cpu )
+void _Giant_Drop( Per_CPU_Control *self_cpu )
{
Giant_Control *giant = &_Giant;
+ uint32_t self_cpu_index = _Per_CPU_Get_index( self_cpu );
_Assert( _ISR_Get_level() != 0 );
- if ( giant->owner_cpu == self_cpu ) {
+ if ( giant->owner_cpu == self_cpu_index ) {
giant->nest_level = 0;
giant->owner_cpu = NO_OWNER_CPU;
- _SMP_lock_Release( &giant->lock );
+ _SMP_lock_Release( &giant->lock, &self_cpu->Giant_lock_context );
}
}
uint32_t _Thread_Dispatch_increment_disable_level( void )
{
ISR_Level isr_level;
- uint32_t self_cpu_index;
uint32_t disable_level;
Per_CPU_Control *self_cpu;
_ISR_Disable_without_giant( isr_level );
/*
- * We must obtain the processor ID after interrupts are disabled to prevent
+ * We must obtain the processor after interrupts are disabled to prevent
* thread migration.
*/
- self_cpu_index = _SMP_Get_current_processor();
+ self_cpu = _Per_CPU_Get();
- _Giant_Do_acquire( self_cpu_index );
+ _Giant_Do_acquire( self_cpu );
- self_cpu = _Per_CPU_Get_by_index( self_cpu_index );
disable_level = self_cpu->thread_dispatch_disable_level;
++disable_level;
self_cpu->thread_dispatch_disable_level = disable_level;
@@ -110,7 +110,7 @@ uint32_t _Thread_Dispatch_decrement_disable_level( void )
--disable_level;
self_cpu->thread_dispatch_disable_level = disable_level;
- _Giant_Do_release();
+ _Giant_Do_release( self_cpu );
_Assert( disable_level != 0 || _Giant.owner_cpu == NO_OWNER_CPU );
_ISR_Enable_without_giant( isr_level );
@@ -124,7 +124,7 @@ void _Giant_Acquire( void )
_ISR_Disable_without_giant( isr_level );
_Assert( _Thread_Dispatch_disable_level != 0 );
- _Giant_Do_acquire( _SMP_Get_current_processor() );
+ _Giant_Do_acquire( _Per_CPU_Get() );
_ISR_Enable_without_giant( isr_level );
}
@@ -134,7 +134,7 @@ void _Giant_Release( void )
_ISR_Disable_without_giant( isr_level );
_Assert( _Thread_Dispatch_disable_level != 0 );
- _Giant_Do_release();
+ _Giant_Do_release( _Per_CPU_Get() );
_ISR_Enable_without_giant( isr_level );
}
diff --git a/cpukit/score/src/threadhandler.c b/cpukit/score/src/threadhandler.c
index 4ecc789aff..161bb43a6b 100644
--- a/cpukit/score/src/threadhandler.c
+++ b/cpukit/score/src/threadhandler.c
@@ -58,8 +58,10 @@
#if defined(RTEMS_SMP)
static SMP_lock_Control constructor_lock = SMP_LOCK_INITIALIZER;
+ SMP_lock_Context lock_context;
+
if ( !doneConstructors ) {
- _SMP_lock_Acquire( &constructor_lock );
+ _SMP_lock_Acquire( &constructor_lock, &lock_context );
#endif
#if defined(RTEMS_MULTIPROCESSING)
@@ -74,7 +76,7 @@
#endif
#if defined(RTEMS_SMP)
- _SMP_lock_Release( &constructor_lock );
+ _SMP_lock_Release( &constructor_lock, &lock_context );
}
#endif