summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-11-10 17:23:12 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-11-17 07:47:48 +0100
commit72857659c1f8fc71da4a30833caab583a2cfac6d (patch)
tree43998ecf8c46c6f4fed92acc227d58bb8c8a42dd
parentFix interrupt epilogue for ARMv7-AR and PowerPC (diff)
downloadrtems-72857659c1f8fc71da4a30833caab583a2cfac6d.tar.bz2
score: Fix race condition on SMP
We must ensure that the Thread_Control::Wait information update is visible to the target thread before we update its wait flags, otherwise we may return out of date events or a wrong status. Close #2471.
-rw-r--r--cpukit/rtems/src/eventseize.c6
-rw-r--r--cpukit/rtems/src/eventsurrender.c55
-rw-r--r--cpukit/score/src/threadtimeout.c47
3 files changed, 67 insertions, 41 deletions
diff --git a/cpukit/rtems/src/eventseize.c b/cpukit/rtems/src/eventseize.c
index a9290b38e8..9c41b777f9 100644
--- a/cpukit/rtems/src/eventseize.c
+++ b/cpukit/rtems/src/eventseize.c
@@ -100,6 +100,12 @@ void _Event_Seize(
_Thread_Set_state( executing, block_state );
+ /*
+ * See _Event_Surrender() and _Thread_Timeout(), corresponding atomic
+ * variable is Thread_Control::Wait::flags.
+ */
+ _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
+
success = _Thread_Wait_flags_try_change(
executing,
intend_to_block,
diff --git a/cpukit/rtems/src/eventsurrender.c b/cpukit/rtems/src/eventsurrender.c
index 156586023d..6a14467b7d 100644
--- a/cpukit/rtems/src/eventsurrender.c
+++ b/cpukit/rtems/src/eventsurrender.c
@@ -34,6 +34,20 @@ static void _Event_Satisfy(
*(rtems_event_set *) the_thread->Wait.return_argument = seized_events;
}
+static bool _Event_Is_blocking_on_event(
+ const Thread_Control *the_thread,
+ Thread_Wait_flags wait_class
+)
+{
+ Thread_Wait_flags wait_flags;
+ Thread_Wait_flags wait_mask;
+
+ wait_flags = _Thread_Wait_flags_get( the_thread );
+ wait_mask = THREAD_WAIT_CLASS_MASK | THREAD_WAIT_STATE_READY_AGAIN;
+
+ return ( wait_flags & wait_mask ) == wait_class;
+}
+
static bool _Event_Is_satisfied(
const Thread_Control *the_thread,
rtems_event_set pending_events,
@@ -59,46 +73,43 @@ void _Event_Surrender(
ISR_lock_Context *lock_context
)
{
- rtems_event_set pending_events;
- rtems_event_set seized_events;
- Thread_Wait_flags wait_flags;
- bool unblock;
+ rtems_event_set pending_events;
+ rtems_event_set seized_events;
+ bool unblock;
_Thread_Lock_acquire_default_critical( the_thread, lock_context );
_Event_sets_Post( event_in, &event->pending_events );
pending_events = event->pending_events;
- wait_flags = _Thread_Wait_flags_get( the_thread );
-
if (
- ( wait_flags & THREAD_WAIT_CLASS_MASK ) == wait_class
+ _Event_Is_blocking_on_event( the_thread, wait_class )
&& _Event_Is_satisfied( the_thread, pending_events, &seized_events )
) {
- Thread_Wait_flags intend_to_block;
- Thread_Wait_flags blocked;
- bool success;
+ Thread_Wait_flags ready_again;
+ bool success;
+
+ _Event_Satisfy( the_thread, event, pending_events, seized_events );
- intend_to_block = wait_class | THREAD_WAIT_STATE_INTEND_TO_BLOCK;
- blocked = wait_class | THREAD_WAIT_STATE_BLOCKED;
+ /* See _Event_Seize() */
+ _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+ ready_again = wait_class | THREAD_WAIT_STATE_READY_AGAIN;
success = _Thread_Wait_flags_try_change_critical(
the_thread,
- intend_to_block,
- wait_class | THREAD_WAIT_STATE_READY_AGAIN
+ wait_class | THREAD_WAIT_STATE_INTEND_TO_BLOCK,
+ ready_again
);
+
if ( success ) {
- _Event_Satisfy( the_thread, event, pending_events, seized_events );
unblock = false;
- } else if ( _Thread_Wait_flags_get( the_thread ) == blocked ) {
- _Event_Satisfy( the_thread, event, pending_events, seized_events );
- _Thread_Wait_flags_set(
- the_thread,
- wait_class | THREAD_WAIT_STATE_READY_AGAIN
+ } else {
+ _Assert(
+ _Thread_Wait_flags_get( the_thread )
+ == wait_class | THREAD_WAIT_STATE_BLOCKED
);
+ _Thread_Wait_flags_set( the_thread, ready_again );
unblock = true;
- } else {
- unblock = false;
}
} else {
unblock = false;
diff --git a/cpukit/score/src/threadtimeout.c b/cpukit/score/src/threadtimeout.c
index f69bc35ea2..b3d01168df 100644
--- a/cpukit/score/src/threadtimeout.c
+++ b/cpukit/score/src/threadtimeout.c
@@ -39,35 +39,44 @@ void _Thread_Timeout( Objects_Id id, void *arg )
ISR_lock_Control *thread_lock;
ISR_lock_Context lock_context;
Thread_Wait_flags wait_flags;
- Thread_Wait_flags wait_class;
- Thread_Wait_flags intend_to_block;
- Thread_Wait_flags blocked;
- bool success;
bool unblock;
the_thread = arg;
thread_lock = _Thread_Lock_acquire( the_thread, &lock_context );
wait_flags = _Thread_Wait_flags_get( the_thread );
- wait_class = wait_flags & THREAD_WAIT_CLASS_MASK;
- intend_to_block = wait_class | THREAD_WAIT_STATE_INTEND_TO_BLOCK;
- blocked = wait_class | THREAD_WAIT_STATE_BLOCKED;
- success = _Thread_Wait_flags_try_change_critical(
- the_thread,
- intend_to_block,
- wait_class | THREAD_WAIT_STATE_READY_AGAIN
- );
- if ( success ) {
+ if ( ( wait_flags & THREAD_WAIT_STATE_READY_AGAIN ) == 0 ) {
+ Thread_Wait_flags wait_class;
+ Thread_Wait_flags ready_again;
+ bool success;
+
_Thread_Do_timeout( the_thread );
- unblock = false;
- } else if ( _Thread_Wait_flags_get( the_thread ) == blocked ) {
- _Thread_Wait_flags_set(
+
+ /*
+ * This fence is only necessary for the events, see _Event_Seize(). The
+ * thread queues use the thread lock for synchronization.
+ */
+ _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+
+ wait_class = wait_flags & THREAD_WAIT_CLASS_MASK;
+ ready_again = wait_class | THREAD_WAIT_STATE_READY_AGAIN;
+ success = _Thread_Wait_flags_try_change_critical(
the_thread,
- wait_class | THREAD_WAIT_STATE_READY_AGAIN
+ wait_class | THREAD_WAIT_STATE_INTEND_TO_BLOCK,
+ ready_again
);
- _Thread_Do_timeout( the_thread );
- unblock = true;
+
+ if ( success ) {
+ unblock = false;
+ } else {
+ _Assert(
+ _Thread_Wait_flags_get( the_thread )
+ == wait_class | THREAD_WAIT_STATE_BLOCKED
+ );
+ _Thread_Wait_flags_set( the_thread, ready_again );
+ unblock = true;
+ }
} else {
unblock = false;
}