summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu/powerpc/new-exceptions
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-14 15:23:40 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-18 07:30:35 +0100
commit7ce60b378dcf732e1467dcb7664a94824ac608c7 (patch)
tree47e29717be924e2aaa700378f37b10a51bc72487 /c/src/lib/libcpu/powerpc/new-exceptions
parentarm: Use Per_CPU_Control::isr_dispatch_disable (diff)
downloadrtems-7ce60b378dcf732e1467dcb7664a94824ac608c7.tar.bz2
powerpc: Use Per_CPU_Control::isr_dispatch_disable
Update #2751.
Diffstat (limited to 'c/src/lib/libcpu/powerpc/new-exceptions')
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S78
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S8
2 files changed, 66 insertions, 20 deletions
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
index c6cbdcec8d..4c9d8aa3f0 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2015 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2011, 2016 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -283,10 +283,12 @@ ppc_exc_wrap_async_normal:
bl bsp_interrupt_dispatch
#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
- /* Load ISR nest level and thread dispatch disable level */
+ /* Load some per-CPU variables */
GET_SELF_CPU_CONTROL SELF_CPU_REGISTER
+ lbz SCRATCH_0_REGISTER, PER_CPU_DISPATCH_NEEDED(SELF_CPU_REGISTER)
+ lwz SCRATCH_1_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SELF_CPU_REGISTER)
+ lwz SCRATCH_2_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SELF_CPU_REGISTER)
lwz ISR_NEST_REGISTER, PER_CPU_ISR_NEST_LEVEL(SELF_CPU_REGISTER)
- lwz DISPATCH_LEVEL_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SELF_CPU_REGISTER)
/*
* Switch back to original stack (FRAME_REGISTER == r1 if we are still
@@ -295,36 +297,74 @@ ppc_exc_wrap_async_normal:
mr r1, FRAME_REGISTER
lwz FRAME_REGISTER, FRAME_OFFSET(r1)
- /* Decrement ISR nest level and thread dispatch disable level */
+ /* Decrement levels and determine thread dispatch state */
+ xori SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, 1
+ or SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, SCRATCH_1_REGISTER
+ subi DISPATCH_LEVEL_REGISTER, SCRATCH_2_REGISTER, 1
+ or. SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, DISPATCH_LEVEL_REGISTER
#ifdef RTEMS_PROFILING
+ cmpwi cr2, SCRATCH_0_REGISTER, 0
subic. ISR_NEST_REGISTER, ISR_NEST_REGISTER, 1
- subi DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_REGISTER, 1
- cmpwi cr2, DISPATCH_LEVEL_REGISTER, 0
-#else /* RTEMS_PROFILING */
+#else
subi ISR_NEST_REGISTER, ISR_NEST_REGISTER, 1
- subic. DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_REGISTER, 1
-#endif /* RTEMS_PROFILING */
- stw ISR_NEST_REGISTER, PER_CPU_ISR_NEST_LEVEL(SELF_CPU_REGISTER)
+#endif
+
+ /* Store thread dispatch disable and ISR nest levels */
stw DISPATCH_LEVEL_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SELF_CPU_REGISTER)
+ stw ISR_NEST_REGISTER, PER_CPU_ISR_NEST_LEVEL(SELF_CPU_REGISTER)
#ifdef RTEMS_PROFILING
/* Store profiling data if necessary */
- bne profiling_done
+ bne .Lprofiling_done
mr r3, SELF_CPU_REGISTER
mr r4, ENTRY_INSTANT_REGISTER
GET_TIME_BASE r5
bl _Profiling_Outer_most_interrupt_entry_and_exit
-profiling_done:
+ GET_SELF_CPU_CONTROL SELF_CPU_REGISTER
+.Lprofiling_done:
#endif /* RTEMS_PROFILING */
- /* Call thread dispatcher if necessary */
+ /*
+ * Check thread dispatch necessary, ISR dispatch disable and thread
+ * dispatch disable level.
+ */
#ifdef RTEMS_PROFILING
- bne cr2, thread_dispatching_done
-#else /* RTEMS_PROFILING */
- bne thread_dispatching_done
-#endif /* RTEMS_PROFILING */
- bl _Thread_Dispatch
-thread_dispatching_done:
+ bne cr2, .Lthread_dispatch_done
+#else
+ bne .Lthread_dispatch_done
+#endif
+
+ /* Thread dispatch */
+.Ldo_thread_dispatch:
+
+ /* Set ISR dispatch disable and thread dispatch disable level to one */
+ li SCRATCH_0_REGISTER, 1
+ stw SCRATCH_0_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SELF_CPU_REGISTER)
+ stw SCRATCH_0_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SELF_CPU_REGISTER)
+
+ /* Call _Thread_Do_dispatch(), this function will enable interrupts */
+ mr r3, SELF_CPU_REGISTER
+ mfmsr r4
+ ori r4, r4, MSR_EE
+ bl _Thread_Do_dispatch
+
+ /* Disable interrupts */
+ wrteei 0
+
+#ifdef RTEMS_SMP
+ GET_SELF_CPU_CONTROL SELF_CPU_REGISTER
+#endif
+
+ /* Check if we have to do the thread dispatch again */
+ lbz SCRATCH_0_REGISTER, PER_CPU_DISPATCH_NEEDED(SELF_CPU_REGISTER)
+ cmpwi SCRATCH_0_REGISTER, 0
+ bne .Ldo_thread_dispatch
+
+ /* We are done with thread dispatching */
+ li SCRATCH_0_REGISTER, 0
+ stw SCRATCH_0_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SELF_CPU_REGISTER)
+
+.Lthread_dispatch_done:
#ifdef PPC_MULTILIB_ALTIVEC
/* Restore volatile AltiVec context */
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
index e5b4fcdbcd..a1ec64b8e4 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
@@ -265,9 +265,11 @@ PROC (_CPU_Context_switch):
/* Save context to r3 */
+ GET_SELF_CPU_CONTROL r12
mfmsr r6
mflr r7
mfcr r8
+ lwz r11, PER_CPU_ISR_DISPATCH_DISABLE(r12)
/*
* We have to clear the reservation of the executing thread. See also
@@ -335,6 +337,7 @@ PROC (_CPU_Context_switch):
PPC_GPR_STORE r31, PPC_CONTEXT_OFFSET_GPR31(r3)
stw r2, PPC_CONTEXT_OFFSET_GPR2(r3)
+ stw r11, PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE(r3)
#ifdef PPC_MULTILIB_ALTIVEC
li r9, PPC_CONTEXT_OFFSET_V20
@@ -409,7 +412,6 @@ PROC (_CPU_Context_switch):
*/
msync
- GET_SELF_CPU_CONTROL r12
addi r1, r12, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE
li r6, 0
stw r6, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
@@ -471,6 +473,7 @@ restore_context:
PPC_GPR_LOAD r31, PPC_CONTEXT_OFFSET_GPR31(r5)
lwz r2, PPC_CONTEXT_OFFSET_GPR2(r5)
+ lwz r11, PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE(r5)
#ifdef PPC_MULTILIB_ALTIVEC
li r9, PPC_CONTEXT_OFFSET_V20
@@ -525,6 +528,7 @@ restore_context:
mtcr r8
mtlr r7
mtmsr r6
+ stw r11, PER_CPU_ISR_DISPATCH_DISABLE(r12)
#ifdef BSP_USE_SYNC_IN_CONTEXT_SWITCH
isync
@@ -537,6 +541,8 @@ PROC (_CPU_Context_restore):
/* Align to a cache line */
clrrwi r5, r3, PPC_DEFAULT_CACHE_LINE_POWER
+ GET_SELF_CPU_CONTROL r12
+
#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
li r3, 0
#endif