summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-11-11 11:49:45 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-11-12 08:21:45 +0100
commit5c7bfcf82e1cb46d0a2da486c646eae4146e93c8 (patch)
tree98ec5b445e5402da21af59e2b979fc288d829677
parentIntroduce general purpose system server event (diff)
downloadrtems-5c7bfcf82e1cb46d0a2da486c646eae4146e93c8.tar.bz2
Fix interrupt epilogue for ARMv7-AR and PowerPC
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S51
-rw-r--r--cpukit/score/cpu/arm/arm_exc_interrupt.S27
2 files changed, 65 insertions, 13 deletions
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
index 59e621f64f..0e71dad93c 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
@@ -29,29 +29,29 @@
#define SCRATCH_5_REGISTER r12
#define FRAME_REGISTER r14
-#define VECTOR_OFFSET(reg) GPR4_OFFSET(reg)
-#define SELF_CPU_OFFSET(reg) GPR5_OFFSET(reg)
-#define ISR_NEST_OFFSET(reg) GPR6_OFFSET(reg)
-#define DISPATCH_LEVEL_OFFSET(reg) GPR7_OFFSET(reg)
-#define HANDLER_OFFSET(reg) GPR8_OFFSET(reg)
-#define SCRATCH_0_OFFSET(reg) GPR0_OFFSET(reg)
-#define SCRATCH_1_OFFSET(reg) GPR3_OFFSET(reg)
-#define SCRATCH_2_OFFSET(reg) GPR9_OFFSET(reg)
-#define SCRATCH_3_OFFSET(reg) GPR10_OFFSET(reg)
-#define SCRATCH_4_OFFSET(reg) GPR11_OFFSET(reg)
-#define SCRATCH_5_OFFSET(reg) GPR12_OFFSET(reg)
+#define VECTOR_OFFSET GPR4_OFFSET
+#define SELF_CPU_OFFSET GPR5_OFFSET
+#define ISR_NEST_OFFSET GPR6_OFFSET
+#define DISPATCH_LEVEL_OFFSET GPR7_OFFSET
+#define HANDLER_OFFSET GPR8_OFFSET
+#define SCRATCH_0_OFFSET GPR0_OFFSET
+#define SCRATCH_1_OFFSET GPR3_OFFSET
+#define SCRATCH_2_OFFSET GPR9_OFFSET
+#define SCRATCH_3_OFFSET GPR10_OFFSET
+#define SCRATCH_4_OFFSET GPR11_OFFSET
+#define SCRATCH_5_OFFSET GPR12_OFFSET
/*
* The register 2 slot is free, since this is the read-only small data anchor.
*/
-#define FRAME_OFFSET(reg) GPR2_OFFSET(reg)
+#define FRAME_OFFSET GPR2_OFFSET
#ifdef RTEMS_PROFILING
/*
* The PPC_EXC_MINIMAL_FRAME_SIZE is enough to store this additional register.
*/
#define ENTRY_INSTANT_REGISTER r15
-#define ENTRY_INSTANT_OFFSET(reg) GPR13_OFFSET(reg)
+#define ENTRY_INSTANT_OFFSET GPR13_OFFSET
.macro GET_TIME_BASE REG
#ifdef ppc8540
@@ -399,6 +399,31 @@ thread_dispatching_done:
evldd HANDLER_REGISTER, PPC_EXC_ACC_OFFSET(r1)
#endif
+ /*
+ * We must clear reservations here, since otherwise compare-and-swap
+ * atomic operations with interrupts enabled may yield wrong results.
+ * A compare-and-swap atomic operation is generated by the compiler
+ * like this:
+ *
+ * .L1:
+ * lwarx r9, r0, r3
+ * cmpw r9, r4
+ * bne- .L2
+ * stwcx. r5, r0, r3
+ * bne- .L1
+ * .L2:
+ *
+ * Consider the following scenario. A thread is interrupted right
+ * before the stwcx. The interrupt updates the value using a
+ * compare-and-swap sequence. Everything is fine up to this point.
+ * The interrupt performs now a compare-and-swap sequence which fails
+ * with a branch to .L2. The current processor has now a reservation.
+ * The interrupt returns without further stwcx. The thread updates the
+ * value using the unrelated reservation of the interrupt.
+ */
+ li SCRATCH_0_REGISTER, FRAME_OFFSET
+ stwcx. SCRATCH_0_REGISTER, r1, SCRATCH_0_REGISTER
+
/* Load SRR0, SRR1, CR, CTR, XER, and LR */
lwz SCRATCH_0_REGISTER, SRR0_FRAME_OFFSET(r1)
lwz SCRATCH_1_REGISTER, SRR1_FRAME_OFFSET(r1)
diff --git a/cpukit/score/cpu/arm/arm_exc_interrupt.S b/cpukit/score/cpu/arm/arm_exc_interrupt.S
index 7930c32044..fcb1510b95 100644
--- a/cpukit/score/cpu/arm/arm_exc_interrupt.S
+++ b/cpukit/score/cpu/arm/arm_exc_interrupt.S
@@ -209,6 +209,33 @@ thread_dispatch_done:
/* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
ldmia sp!, {EXCHANGE_LR, EXCHANGE_SPSR}
+#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
+ /*
+ * We must clear reservations here, since otherwise compare-and-swap
+ * atomic operations with interrupts enabled may yield wrong results.
+ * A compare-and-swap atomic operation is generated by the compiler
+ * like this:
+ *
+ * .L1:
+ * ldrex r1, [r0]
+ * cmp r1, r3
+ * bne .L2
+ * strex r3, r2, [r0]
+ * cmp r3, #0
+ * bne .L1
+ * .L2:
+ *
+ * Consider the following scenario. A thread is interrupted right
+ * before the strex. The interrupt updates the value using a
+ * compare-and-swap sequence. Everything is fine up to this point.
+ * The interrupt performs now a compare-and-swap sequence which fails
+ * with a branch to .L2. The current processor has now a reservation.
+ * The interrupt returns without further strex. The thread updates the
+ * value using the unrelated reservation of the interrupt.
+ */
+ clrex
+#endif
+
/* Return from interrupt */
subs pc, lr, #4