summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-06-29 15:32:51 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-06-30 07:57:33 +0200
commitb6b25a9e68f2c34fe9cb5b69fc802b4b9f25d53b (patch)
treea7a2bf366f5635ade55c9afa5f445627ca7aa9d8 /cpukit/score/include/rtems/score
parentscore: _CPU_SMP_Send_interrupt() documentation (diff)
downloadrtems-b6b25a9e68f2c34fe9cb5b69fc802b4b9f25d53b.tar.bz2
score: Fix thread lock on SMP configurations
Diffstat (limited to 'cpukit/score/include/rtems/score')
-rw-r--r--cpukit/score/include/rtems/score/thread.h16
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h34
2 files changed, 40 insertions, 10 deletions
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 4d498e5f3f..46c222ff52 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -666,7 +666,19 @@ typedef struct {
* of the actual RTEMS build configuration, e.g. profiling enabled or
* disabled.
*/
- SMP_ticket_lock_Control *current;
+ union {
+ /**
+ * @brief The current thread lock as an atomic unsigned integer pointer value.
+ */
+ Atomic_Uintptr atomic;
+
+ /**
+ * @brief The current thread lock as a normal pointer.
+ *
+ * Only provided for debugging purposes.
+ */
+ SMP_ticket_lock_Control *normal;
+ } current;
/**
* @brief The default thread lock in case the thread is not blocked on a
@@ -680,7 +692,7 @@ typedef struct {
*
* These statistics are used by the executing thread in case it acquires a
* thread lock. Thus the statistics are an aggregation of acquire and
- * release operations of diffent locks.
+ * release operations of different locks.
*/
SMP_lock_Stats Stats;
#endif
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index c878266c58..a4e746957a 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -1118,33 +1118,47 @@ RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire(
)
{
#if defined(RTEMS_SMP)
- SMP_ticket_lock_Control *lock;
+ SMP_ticket_lock_Control *lock_0;
while ( true ) {
+ SMP_ticket_lock_Control *lock_1;
+
_ISR_lock_ISR_disable( lock_context );
/*
+ * We must use a load acquire here paired with the store release in
+ * _Thread_Lock_set_unprotected() to observe corresponding thread wait
+ * queue and thread wait operations.
+ *
* We assume that a normal load of pointer is identical to a relaxed atomic
* load. Here, we may read an out-of-date lock. However, only the owner
* of this out-of-date lock is allowed to set a new one. Thus, we read at
* least this new lock ...
*/
- lock = the_thread->Lock.current;
+ lock_0 = (SMP_ticket_lock_Control *) _Atomic_Load_uintptr(
+ &the_thread->Lock.current.atomic,
+ ATOMIC_ORDER_ACQUIRE
+ );
_SMP_ticket_lock_Acquire(
- lock,
+ lock_0,
&_Thread_Executing->Lock.Stats,
&lock_context->Lock_context.Stats_context
);
+ lock_1 = (SMP_ticket_lock_Control *) _Atomic_Load_uintptr(
+ &the_thread->Lock.current.atomic,
+ ATOMIC_ORDER_RELAXED
+ );
+
/*
* ... here, and so on.
*/
- if ( lock == the_thread->Lock.current ) {
- return lock;
+ if ( lock_0 == lock_1 ) {
+ return lock_0;
}
- _Thread_Lock_release( lock, lock_context );
+ _Thread_Lock_release( lock_0, lock_context );
}
#else
_ISR_Local_disable( lock_context->isr_level );
@@ -1163,7 +1177,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
SMP_ticket_lock_Control *new_lock
)
{
- the_thread->Lock.current = new_lock;
+ _Atomic_Store_uintptr(
+ &the_thread->Lock.current.atomic,
+ (uintptr_t) new_lock,
+ ATOMIC_ORDER_RELEASE
+ );
}
#endif
@@ -1185,7 +1203,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
ISR_lock_Context lock_context;
_Thread_Lock_acquire_default_critical( the_thread, &lock_context );
- _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
+ _Assert( the_thread->Lock.current.normal == &the_thread->Lock.Default );
_Thread_Lock_set_unprotected( the_thread, new_lock );
_Thread_Lock_release_default_critical( the_thread, &lock_context );
}