summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/include/rtems')
-rw-r--r--cpukit/score/include/rtems/score/cpustdatomic.h16
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h10
2 files changed, 9 insertions, 17 deletions
diff --git a/cpukit/score/include/rtems/score/cpustdatomic.h b/cpukit/score/include/rtems/score/cpustdatomic.h
index 222ad78852..a663a06293 100644
--- a/cpukit/score/include/rtems/score/cpustdatomic.h
+++ b/cpukit/score/include/rtems/score/cpustdatomic.h
@@ -54,23 +54,13 @@ typedef atomic_uintptr_t Atomic_Pointer;
typedef atomic_flag Atomic_Flag;
/**
- * @brief the enumeration Atomic_Memory_barrier specifies the detailed regular
- * memory synchronization operations used in the atomic operation API
- * definitions.
+ * @brief Memory order according to ISO/IEC 9899:2011.
*/
typedef enum {
- /** no operation orders memory. */
ATOMIC_ORDER_RELAXED = memory_order_relaxed,
- /** a load operation performs an acquire operation on the affected memory
- * location. This flag guarantees that the effects of load operation are
- * completed before the effects of any later data accesses.
- */
ATOMIC_ORDER_ACQUIRE = memory_order_acquire,
- /** a store operation performs a release operation on the affected memory
- * location. This flag guarantee that all effects of all previous data
- * accesses are completed before the store operation takes place.
- */
- ATOMIC_ORDER_RELEASE = memory_order_release
+ ATOMIC_ORDER_RELEASE = memory_order_release,
+ ATOMIC_ORDER_SEQ_CST = memory_order_seq_cst
} Atomic_Order;
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 61fbff534f..20c5d17ee7 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -92,12 +92,14 @@ static inline void _Scheduler_SMP_Allocate_processor(
_Thread_Set_CPU( heir, cpu_of_victim );
+ cpu_of_victim->heir = heir;
+
/*
- * FIXME: Here we need atomic store operations with a relaxed memory order.
- * The _CPU_SMP_Send_interrupt() will ensure that the change can be
- * observed consistently.
+ * It is critical that we first update the heir and then the dispatch
+ * necessary so that _Thread_Dispatch() cannot miss an update.
*/
- cpu_of_victim->heir = heir;
+ _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+
cpu_of_victim->dispatch_necessary = true;
if ( cpu_of_victim != cpu_of_executing ) {