summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cpukit/score/include/rtems/score/cpustdatomic.h16
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h10
-rw-r--r--cpukit/score/src/threaddispatch.c13
3 files changed, 21 insertions, 18 deletions
diff --git a/cpukit/score/include/rtems/score/cpustdatomic.h b/cpukit/score/include/rtems/score/cpustdatomic.h
index 222ad78852..a663a06293 100644
--- a/cpukit/score/include/rtems/score/cpustdatomic.h
+++ b/cpukit/score/include/rtems/score/cpustdatomic.h
@@ -54,23 +54,13 @@ typedef atomic_uintptr_t Atomic_Pointer;
typedef atomic_flag Atomic_Flag;
/**
- * @brief the enumeration Atomic_Memory_barrier specifies the detailed regular
- * memory synchronization operations used in the atomic operation API
- * definitions.
+ * @brief Memory order according to ISO/IEC 9899:2011.
*/
typedef enum {
- /** no operation orders memory. */
ATOMIC_ORDER_RELAXED = memory_order_relaxed,
- /** a load operation performs an acquire operation on the affected memory
- * location. This flag guarantees that the effects of load operation are
- * completed before the effects of any later data accesses.
- */
ATOMIC_ORDER_ACQUIRE = memory_order_acquire,
- /** a store operation performs a release operation on the affected memory
- * location. This flag guarantee that all effects of all previous data
- * accesses are completed before the store operation takes place.
- */
- ATOMIC_ORDER_RELEASE = memory_order_release
+ ATOMIC_ORDER_RELEASE = memory_order_release,
+ ATOMIC_ORDER_SEQ_CST = memory_order_seq_cst
} Atomic_Order;
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 61fbff534f..20c5d17ee7 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -92,12 +92,14 @@ static inline void _Scheduler_SMP_Allocate_processor(
_Thread_Set_CPU( heir, cpu_of_victim );
+ cpu_of_victim->heir = heir;
+
/*
- * FIXME: Here we need atomic store operations with a relaxed memory order.
- * The _CPU_SMP_Send_interrupt() will ensure that the change can be
- * observed consistently.
+ * It is critical that we first update the heir and then the dispatch
+ * necessary so that _Thread_Dispatch() cannot miss an update.
*/
- cpu_of_victim->heir = heir;
+ _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+
cpu_of_victim->dispatch_necessary = true;
if ( cpu_of_victim != cpu_of_executing ) {
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index 40a2efbb82..9e3f86e23b 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -97,9 +97,20 @@ void _Thread_Dispatch( void )
#else
while ( per_cpu->dispatch_necessary ) {
#endif
- heir = per_cpu->heir;
per_cpu->dispatch_necessary = false;
+
+#if defined( RTEMS_SMP )
+ /*
+ * It is critical that we first update the dispatch necessary and then the
+ * read the heir so that we don't miss an update by
+ * _Scheduler_SMP_Allocate_processor().
+ */
+ _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
+#endif
+
+ heir = per_cpu->heir;
per_cpu->executing = heir;
+
#if defined( RTEMS_SMP )
executing->is_executing = false;
heir->is_executing = true;