summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu
diff options
context:
space:
mode:
authorThomas Doerfler <Thomas.Doerfler@embedded-brains.de>2008-07-11 10:02:12 +0000
committerThomas Doerfler <Thomas.Doerfler@embedded-brains.de>2008-07-11 10:02:12 +0000
commit25a92bc1ed79b0eb5967fa454220ea90810c7ebb (patch)
tree1f1986de2e9d38bf3514284fc37c2f3d84c3e4d2 /c/src/lib/libcpu
parentadapted powerpc BSPs to new exception code (diff)
downloadrtems-25a92bc1ed79b0eb5967fa454220ea90810c7ebb.tar.bz2
adapted powerpc exception code
Diffstat (limited to 'c/src/lib/libcpu')
-rw-r--r--c/src/lib/libcpu/powerpc/ChangeLog54
-rw-r--r--c/src/lib/libcpu/powerpc/Makefile.am2
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/README21
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc.S361
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h914
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_bspsupp.h53
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_hdl.c84
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h16
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c87
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/cpu.c10
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/e500_raw_exc_init.c39
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.c77
-rw-r--r--c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.h26
-rw-r--r--c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c24
-rw-r--r--c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h17
15 files changed, 983 insertions, 802 deletions
diff --git a/c/src/lib/libcpu/powerpc/ChangeLog b/c/src/lib/libcpu/powerpc/ChangeLog
index eeace5a461..25f2fa5cf8 100644
--- a/c/src/lib/libcpu/powerpc/ChangeLog
+++ b/c/src/lib/libcpu/powerpc/ChangeLog
@@ -1,3 +1,57 @@
+2008-07-11 Sebastian Huber <sebastian.huber@embedded-brains.de>
+
+ * Makefile.am: Install powerpc-utility.h.
+
+ * shared/include/cpuIdent.h, shared/include/cpuIdent.c: Added e200 and
+ e300 features.
+
+ * new-exceptions/cpu.c: Removed PR288 bugfix check.
+
+ * new-exceptions/e500_raw_exc_init.c: Added initialization for e200.
+ Set IVPR register for e200 and e500 to ppc_exc_vector_base.
+
+ * new-exceptions/raw_exception.c, new-exceptions/raw_exception.h: Added
+ vector categories for e200 and e300 cores. Added global variable
+ ppc_exc_vector_base for CPUs with IVPR register.
+
+ * new-exceptions/bspsupport/ppc_exc.S,
+ new-exceptions/bspsupport/ppc_exc_asm_macros.h,
+ new-exceptions/bspsupport/ppc_exc_bspsupp.h,
+ new-exceptions/bspsupport/ppc_exc_hdl.c,
+ new-exceptions/bspsupport/vectors.h,
+ new-exceptions/bspsupport/vectors_init.c: Conistent code layout in most
+ assember code sections and usage of defines for registers. Usage of
+ standard header files to avoid multiple definitions.
+
+ Optimized exception code: Removed many branches and exploit branch
+ prediction for asynchronous exceptions, moved common wrap code into
+ WRAP macro to eliminate branch, static initialization of the handler
+ table with a default handler to eliminate the test if a handler is
+ present. Register CR6 is no more cleared because the exeption handler
+ functions are not variadic.
+
+ New method to switch to the interrupt stack. It will be tested if the
+ exception stack pointer is already inside the interrupt stack area. It
+ is no more necessary to disable interrupts. The SPRG1 and SPRG2 are
+ used to store the initial interrupt stack pointer and the interrupt
+ stack memory area start.
+
+ Removed variable ppc_exc_msr_irq_mask and use general interrupt disable
+ mask from SPRG0 instead.
+
+ New initialization routine ppc_exc_initialize() for bsp_start(). It
+ takes the interrupt disable mask, interrupt stack start and size as
+ parameters.
+
+ Added packed prologues for CPUs with IVPR and IVOR registers to save
+ memory space.
+
+ Reverted ppc_exc_crit_always_enabled change from yesterday.
+
+ WARNING: Tests with critical interrupt exceptions crash the system at
+ least on MPC8313ERDB and MPC8349EAMDS. There may be somewhere a
+ serious bug with the new code.
+
2008-07-10 Till Straumann <strauman@slac.stanford.edu>
* mpc6xx/mmu/pte121.c: use general "memory" clobber
diff --git a/c/src/lib/libcpu/powerpc/Makefile.am b/c/src/lib/libcpu/powerpc/Makefile.am
index 452f5c79bb..8a071a2dd9 100644
--- a/c/src/lib/libcpu/powerpc/Makefile.am
+++ b/c/src/lib/libcpu/powerpc/Makefile.am
@@ -13,7 +13,7 @@ include_rtems_powerpc_HEADERS = rtems/powerpc/cache.h \
include_rtems_scoredir = $(includedir)/rtems/score
include_libcpudir = $(includedir)/libcpu
-include_libcpu_HEADERS =
+include_libcpu_HEADERS = shared/include/powerpc-utility.h
EXTRA_DIST =
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/README b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/README
index b6522312d4..0f575ff1b1 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/README
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/README
@@ -389,7 +389,7 @@ RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
dispatch requirement).
And one more note: We never want to disable
- machine-check exceptions to avoid a checkstop.
+ machine-check exceptions to avoid a checkstop.
This means that we cannot use enabling/disabling
this type of exception for protection of critical
OS data structures.
@@ -404,3 +404,22 @@ RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
Note that synchronous machine-checks can legally
use OS primitives and currently there are no
asynchronous machine-checks defined.
+
+ Epilogue:
+
+ You have to disable all asynchronous exceptions which may cause a context
+ switch before the restoring of the SRRs and the RFI. Reason:
+
+ Suppose we are in the epilogue code of an EE between the move to SRRs and
+ the RFI. Here EE is disabled but CE is enabled. Now a CE happens. The
+ handler decides that a thread dispatch is necessary. The CE checks if
+ this is possible:
+
+ o The thread dispatch disable level is 0, because the EE has already
+ decremented it.
+ o The EE lock variable is cleared.
+ o The EE executes not the first instruction.
+
+ Hence a thread dispatch is allowed. The CE issues a context switch to a
+ task with EE enabled (for example a task waiting for a semaphore). Now a
+ EE happens and the current content of the SRRs is lost.
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc.S b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc.S
index c3cede95da..90b295d20f 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc.S
@@ -3,6 +3,8 @@
*
* Modified and partially rewritten by Till Straumann, 2007
*
+ * Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008.
+ *
* Low-level assembly code for PPC exceptions.
*
* This file was written with the goal to eliminate
@@ -21,10 +23,10 @@
* Expand prologue snippets for classic, ppc405-critical, bookE-critical
* and E500 machine-check, synchronous and asynchronous exceptions
*/
- PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
- PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
- PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
- PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
@@ -34,20 +36,20 @@
.global ppc_exc_min_prolog_size
ppc_exc_min_prolog_size = 4 * 4
- /* Special prologue for 603e-style CPUs.
- *
- * 603e shadows GPR0..GPR3 for certain exceptions. We must switch
- * that off before we can use the stack pointer. Note that this is
- * ONLY safe if the shadowing is actually active -- otherwise, r1
- * is destroyed. We deliberately use r1 so problems become obvious
- * if this is misused!
- */
+/* Special prologue for 603e-style CPUs.
+ *
+ * 603e shadows GPR0..GPR3 for certain exceptions. We must switch
+ * that off before we can use the stack pointer. Note that this is
+ * ONLY safe if the shadowing is actually active -- otherwise, r1
+ * is destroyed. We deliberately use r1 so problems become obvious
+ * if this is misused!
+ */
.global ppc_exc_tgpr_clr_prolog
ppc_exc_tgpr_clr_prolog:
- mfmsr r1
- rlwinm r1,r1,0,15,13
- mtmsr r1
- isync
+ mfmsr r1
+ rlwinm r1,r1,0,15,13
+ mtmsr r1
+ isync
/* FALL THRU TO 'auto' PROLOG */
/* Determine vector dynamically/automatically
@@ -58,9 +60,21 @@ ppc_exc_tgpr_clr_prolog:
.global ppc_exc_min_prolog_auto
ppc_exc_min_prolog_auto:
stwu r1, -EXCEPTION_FRAME_END(r1)
- stw r3, GPR3_OFFSET(r1)
- mflr r3
- bla wrap_auto
+ stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
+ mflr VECTOR_REGISTER
+ bla wrap_auto
+
+/**
+ * @brief Use vector offsets with 16 byte boundaries.
+ *
+ * @see ppc_exc_min_prolog_auto();
+ */
+ .global ppc_exc_min_prolog_auto_packed
+ppc_exc_min_prolog_auto_packed:
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+ stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
+ mflr VECTOR_REGISTER
+ bla wrap_auto_packed
.global ppc_exc_tgpr_clr_prolog_size
ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
@@ -78,10 +92,10 @@ ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
*/
.global ppc_exc_min_prolog_auto_async
ppc_exc_min_prolog_auto_async:
- stw r1, ppc_exc_lock_std@sdarel(r13)
- stw r3, ppc_exc_gpr3_std@sdarel(r13)
- mflr r3
- bla wrap_auto_async
+ stw r1, ppc_exc_lock_std@sdarel(r13)
+ stw VECTOR_REGISTER, ppc_exc_vector_register_std@sdarel(r13)
+ mflr VECTOR_REGISTER
+ bla wrap_auto_async
/******************************************************/
/* WRAPPERS */
@@ -101,294 +115,63 @@ __ppc_exc_wrappers_start = .
/* Expand wrappers for different exception flavors */
/* Standard/classic powerpc */
- WRAP _FLVR=std _PRI=std _SRR0=srr0 _SRR1=srr1 _RFI=rfi
+ WRAP _FLVR=std _PRI=std _SRR0=srr0 _SRR1=srr1 _RFI=rfi
/* ppc405 has a critical exception using srr2/srr3 */
- WRAP _FLVR=p405_crit _PRI=crit _SRR0=srr2 _SRR1=srr3 _RFI=rfci
+ WRAP _FLVR=p405_crit _PRI=crit _SRR0=srr2 _SRR1=srr3 _RFI=rfci
/* bookE has critical exception using csrr0 cssr1 */
- WRAP _FLVR=bookE_crit _PRI=crit _SRR0=csrr0 _SRR1=csrr1 _RFI=rfci
+ WRAP _FLVR=bookE_crit _PRI=crit _SRR0=csrr0 _SRR1=csrr1 _RFI=rfci
/* e500 has machine-check exception using mcsrr0 mcssr1 */
- WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
-
+ WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
- /* LR holds vector, r3 holds orig. LR */
+ /* LR holds vector, VECTOR_REGISTER holds orig. LR */
wrap_auto:
- stw r14, GPR14_OFFSET(r1)
- /* find address where we jumped from */
- mflr r14
- /* restore LR */
- mtlr r3
- /* compute vector into R3 */
- rlwinm r3, r14, 24, 26, 31
- /* we're now in almost the same state as if called by
- * min_prolog_std but we must skip saving r14
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
+
+ /* Find address where we jumped from */
+ mflr FRAME_REGISTER
+
+ /* Restore LR */
+ mtlr VECTOR_REGISTER
+
+ /* Compute vector into R3 */
+ rlwinm VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
+
+ /*
+ * We're now in almost the same state as if called by
+ * min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already
*/
- b wrap_no_save_r14_std
+ b wrap_no_save_frame_register_std
+
+ /* See: wrap_auto */
+wrap_auto_packed:
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
+ mflr FRAME_REGISTER
+ mtlr VECTOR_REGISTER
+ rlwinm VECTOR_REGISTER, FRAME_REGISTER, 28, 26, 31
+ b wrap_no_save_frame_register_std
wrap_auto_async:
stwu r1, -EXCEPTION_FRAME_END(r1)
- stw r14, GPR14_OFFSET(r1)
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
/* find address where we jumped from */
- mflr r14
+ mflr FRAME_REGISTER
/* restore LR */
- mtlr r3
+ mtlr VECTOR_REGISTER
/* set upper bits to indicate that non-volatile
* registers should not be saved/restored.
*/
- li r3, 0xffff8000
+ li VECTOR_REGISTER, 0xffff8000
/* compute vector into R3 */
- rlwimi r3, r14, 24, 26, 31
+ rlwimi VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
/* we're now in almost the same state as if called by
- * min_prolog_std but we must skip saving r14
+ * min_prolog_std but we must skip saving FRAME_REGISTER
* since that's done already
*/
- b wrap_no_save_r14_std
-
-/*
- * Common code for all flavors of exception and whether
- * they are synchronous or asynchronous.
- *
- * Call with
- * r3 : vector
- * r4 : srr0
- * r5 : srr1
- * r14: exception frame
- * cr4: OR of lower-priority locks
- * cr2: exception type (asyn/isr [<0] or synchronous [>=0])
- * lr : is updated by 'bl'
- * all others: original state
- *
- * If this is an asynchronous exception ( cr2 < 0 ):
- * - save volatile registers only,
- * - disable thread dispatching,
- * - switch to interrupt stack (if necessary),
- * - call the C-dispatcher,
- * - switch back the stack,
- * - decrement the dispatch-disable level
- * - check if it is safe to dispatch (disable-level must be 0
- * AND no lower-priority asynchronous exception must be under
- * way (as indicated by the lock variables).
- * - If it would be OK to dispatch, call the C-wrapup code.
- * - restore volatile registers
- *
- * Otherwise, i.e., if we are dealing with a synchronous exception
- * then:
- * - save all registers
- * - call the C-dispatcher
- * - restore registers
- */
-
-wrap_common:
- stw r4, SRR0_FRAME_OFFSET(r14)
- stw r5, SRR1_FRAME_OFFSET(r14)
-
- /* prepare for calling C code; */
-
- /* use non-volatile r15 for remembering lr */
- stw r15, GPR15_OFFSET(r14)
-
- /* save vector; negative if only scratch regs. are valid */
- stw r3, EXCEPTION_NUMBER_OFFSET(r14)
-
- /* save scratch registers */
-
- /* r2 should be unused or fixed anyways (eabi sdata2) */
- stw r0, GPR0_OFFSET(r14)
- stw r2, GPR2_OFFSET(r14)
- stw r6, GPR6_OFFSET(r14)
- stw r7, GPR7_OFFSET(r14)
- stw r8, GPR8_OFFSET(r14)
- stw r9, GPR9_OFFSET(r14)
- stw r10, GPR10_OFFSET(r14)
- stw r11, GPR11_OFFSET(r14)
- stw r12, GPR12_OFFSET(r14)
- /* r13 must be fixed anyways (sysv sdata) */
-
- /* save LR */
- mflr r15
-
- mfctr r4
- mfxer r5
- stw r4, EXC_CTR_OFFSET(r14)
- stw r5, EXC_XER_OFFSET(r14)
-
- /*
- * Switch MMU / RI on if necessary;
- * remember decision in cr3
- */
- lwz r4, ppc_exc_msr_bits@sdarel(r13)
- cmpwi cr3, r4, 0
- beq cr3, 1f
- mfmsr r5
- or r5, r5, r4
- mtmsr r5
- sync
- isync
-1:
-
- /* If this is a asynchronous exception we skip ahead */
- blt cr2, skip_save_nonvolatile_regs
-
- /* YES; they want everything ('normal exception') */
-
- /* save original stack pointer */
- lwz r5, EXC_MIN_GPR1(r14)
- stw r5, GPR1_OFFSET(r14)
-
- stw r13, GPR13_OFFSET(r14)
-
- /* store r16..r31 into the exception frame */
- stmw r16, GPR16_OFFSET(r14)
-
-skip_save_nonvolatile_regs:
- /* store address of exception frame in r4; vector is in r3 */
- addi r4, r14, FRAME_LINK_SPACE
-
- /* load hi-halfword of C wrapper address */
- lis r5, ppc_exc_C_wrapper@h
- /* clear CR[6] to make sure no vararg callee assumes that
- * there are any valid FP regs
- */
- crxor 6,6,6
- /* merge lo-halfword of C wrapper address */
- ori r5, r5, ppc_exc_C_wrapper@l
- /* Far branch to ppc_C_wrapper */
- mtlr r5
- blrl
-
- /* do not clobber r3 since we pass the return value
- * of ppc_exc_C_wrapper on to ppc_exc_wrapup
- */
-
- /* skip decrementing the thread-dispatch disable level
- * and calling ppc_exc_wrapup if this is a synchronous
- * exception.
- */
- bge cr2, restore_nonvolatile_regs
-
- /* decrement ISR nest level;
- * disable all interrupts.
- * (Disabling IRQs here is not necessary if we
- * use the stack-switching strategy which tests
- * if we are alreay on the ISR-stack as opposed
- * to test the nesting level; see ppc_exc_asm_macros.h)
- */
- lwz r4, ppc_exc_msr_irq_mask@sdarel(r13)
- mfmsr r5
- andc r4, r5, r4
- mtmsr r4
- lwz r4, _ISR_Nest_level@sdarel(r13)
- addi r4, r4, -1
- stw r4, _ISR_Nest_level@sdarel(r13)
-
- /*
- * switch back to original stack (r14 == r1 if we are
- * still on the IRQ stack).
- */
- mr r1, r14
-
- /* restore interrupt mask */
- mtmsr r5
-
- /* decrement thread_dispatch level and check
- * if we have to run the dispatcher.
- */
- lwz r5, _Thread_Dispatch_disable_level@sdarel(r13)
- addic. r5, r5, -1
- stw r5, _Thread_Dispatch_disable_level@sdarel(r13)
-
- /* test _Thread_Dispatch_disable nesting level AND
- * lower priority locks (in cr4); ONLY if
- * _Thread_Dispatch_disable_level == 0 AND no lock is set
- * then call ppc_exc_wrapup which may do a context switch.
- */
- crand EQ(cr0), EQ(cr0), EQ(cr4)
- bne 2f
- crxor 6,6,6
- /* Far branch to ppc_exc_wrapup */
- lis r5, ppc_exc_wrapup@h
- addi r4, r14, FRAME_LINK_SPACE
- ori r5, r5, ppc_exc_wrapup@l
- mtlr r5
- blrl
-2:
- lwz r14, GPR14_OFFSET(r1)
-
- /* we can skip restoring r16..r31 */
- b skip_restore_nonvolatile_regs
-
-restore_nonvolatile_regs:
- /* synchronous exc: restore everything from the exception frame */
- lwz r14, GPR14_OFFSET(r1)
-
- /* restore stack pointer */
- lwz r5, GPR1_OFFSET(r1)
- stw r5, EXC_MIN_GPR1(r1)
-
- /* restore non-volatile regs */
- lwz r13, GPR13_OFFSET(r1)
- lmw r16, GPR16_OFFSET(r1)
-
-skip_restore_nonvolatile_regs:
- lwz r3, EXC_XER_OFFSET(r1)
- lwz r4, EXC_CTR_OFFSET(r1)
- mtxer r3
- mtctr r4
-
- /* restore lr, r15 */
- mtlr r15
- lwz r15, GPR15_OFFSET(r1)
-
- /* restore scratch regs */
- lwz r12, GPR12_OFFSET(r1)
- lwz r11, GPR11_OFFSET(r1)
- lwz r10, GPR10_OFFSET(r1)
- lwz r9, GPR9_OFFSET(r1)
- lwz r8, GPR8_OFFSET(r1)
- lwz r7, GPR7_OFFSET(r1)
- lwz r6, GPR6_OFFSET(r1)
- /* r4, r5 are eventually restored by caller */
- lwz r3, GPR3_OFFSET(r1)
- lwz r2, GPR2_OFFSET(r1)
- /* r1, is eventually restored by caller */
- lwz r0, GPR0_OFFSET(r1)
-
- beq cr3, 2f
- /* restore MSR settings */
- lwz r5, ppc_exc_msr_bits@sdarel(r13)
- mfmsr r4
- andc r4, r4, r5
- mtmsr r4
- sync
- isync
-2:
-
- lwz r4, EXC_CR_OFFSET(r1)
- mtcr r4
-
- /* Must disable interrupts prior to restoring SSRs.
- * Here's a scenario discovered by Sebastian Huber:
- * 1) CE happens between writing to SRR and RFI
- * 2) CE handler does something which requires a task switch
- * 3) CE wrapper returns and determines that task switch
- * is OK since EE lock is not held, dispatch-disable level
- * is zero etc.
- * 4) switch to other task enables EE
- * 5) eventually, switch back to task interrupted by 1)
- * 6) RFI happens but SRR contents have been clobbered.
- */
- lwz r4, ppc_exc_msr_irq_mask@sdarel(r13)
- mfmsr r5
- andc r4, r5, r4
- mtmsr r4
+ b wrap_no_save_frame_register_std
- /* restore SRR and stack */
- lwz r4, SRR0_FRAME_OFFSET(r1)
- lwz r5, SRR1_FRAME_OFFSET(r1)
- blr
-
.global __ppc_exc_wrappers_end
__ppc_exc_wrappers_end = .
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h
index 4f66d91e54..106e73bd6d 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h
@@ -3,6 +3,8 @@
*
* Modified and partially rewritten by Till Straumann, 2007-2008
*
+ * Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008.
+ *
* Low-level assembly code for PPC exceptions (macros).
*
* This file was written with the goal to eliminate
@@ -10,116 +12,33 @@
* reintroduce such statements.
*/
-#include <rtems/score/cpu.h>
-#include <bsp/vectors.h>
+#include <libcpu/powerpc-utility.h>
#include <libcpu/raw_exception.h>
-#define EXC_MIN_GPR1 0
-#define FRAME_LINK_SPACE 8
-
-
-#define r0 0
-#define r1 1
-#define r2 2
-#define r3 3
-#define r4 4
-#define r5 5
-#define r6 6
-#define r7 7
-#define r8 8
-#define r9 9
-#define r10 10
-#define r11 11
-#define r12 12
-#define r13 13
-#define r14 14
-#define r15 15
-#define r16 16
-#define r17 17
-#define r18 18
-#define r19 19
-#define r20 20
-#define r21 21
-#define r22 22
-#define r23 23
-#define r24 24
-#define r25 25
-#define r26 26
-#define r27 27
-#define r28 28
-#define r29 29
-#define r30 30
-#define r31 31
-
-#define cr0 0
-#define cr1 1
-#define cr4 4
-
-#define LT(cr) ((cr)*4+0)
-#define GT(cr) ((cr)*4+1)
-#define EQ(cr) ((cr)*4+2)
-
-#define NOFRAME 0xffff8000
-
-/* Opcode of 'stw r1, off(r13)' */
-#define STW_R1_R13(off) ((((36<<10)|(r1<<5)|(r13))<<16) | ((off)&0xffff))
+#include "vectors.h"
-/*
- **********************************************************************
- * MACRO: SWITCH_STACK
- **********************************************************************
- *
- * Increment _ISR_Nest_level and switch r1 to interrupt
- * stack if not already there.
- *
- * USES: RA, RB, cr0
- * ON EXIT: RA, RB available, r1 points into interrupt
- * stack.
- *
- * NOTES:
- * - NEVER store stuff in a frame before
- * reserving it (stwu r1) - otherwise
- * higher-priority exception may overwrite.
- * - algorithm should allow nesting of higher
- * priority exceptions (HPE) (by disabling
- * them while the stack is switched).
- */
-#if 1
- .macro SWITCH_STACK RA RB FLVR
- mfspr \RB, SPRG1
- cmplw cr0, r1, \RB
- bgt do_r1_reload_\FLVR
- lwz \RA, ppc_exc_intr_stack_size@sdarel(r13)
- subf \RB, \RA, \RB
- cmplw cr0, r1, \RB
- bge no_r1_reload_\FLVR
-do_r1_reload_\FLVR:
- mfspr r1, SPRG1
-no_r1_reload_\FLVR:
- lwz \RA, _ISR_Nest_level@sdarel(r13)
- addi \RA, \RA, 1
- stw \RA, _ISR_Nest_level@sdarel(r13)
- .endm
-#else
- .macro SWITCH_STACK RA RB FLVR
- /* disable interrupts */
- lwz \RA, ppc_exc_msr_irq_mask@sdarel(r13)
- mfmsr \RB
- andc \RA, \RB, \RA
- mtmsr \RA
- /* increment nest level */
- lwz \RA, _ISR_Nest_level@sdarel(r13)
- cmplwi cr0, \RA, 0
- bne no_r1_reload_\FLVR
- /* reload r1 */
- mfspr r1, SPRG1
-no_r1_reload_\FLVR:
- addi \RA, \RA, 1
- stw \RA, _ISR_Nest_level@sdarel(r13)
- /* restore IRQ mask */
- mtmsr \RB
- .endm
-#endif
+#define LT(cr) ((cr)*4+0)
+#define GT(cr) ((cr)*4+1)
+#define EQ(cr) ((cr)*4+2)
+
+/* Opcode of 'stw r1, off(r13)' */
+#define STW_R1_R13(off) ((((36<<10)|(r1<<5)|(r13))<<16) | ((off)&0xffff))
+
+#define FRAME_REGISTER r14
+#define VECTOR_REGISTER r4
+#define SCRATCH_REGISTER_0 r5
+#define SCRATCH_REGISTER_1 r6
+#define SCRATCH_REGISTER_2 r7
+
+#define FRAME_OFFSET( r) GPR14_OFFSET( r)
+#define VECTOR_OFFSET( r) GPR4_OFFSET( r)
+#define SCRATCH_REGISTER_0_OFFSET( r) GPR5_OFFSET( r)
+#define SCRATCH_REGISTER_1_OFFSET( r) GPR6_OFFSET( r)
+#define SCRATCH_REGISTER_2_OFFSET( r) GPR7_OFFSET( r)
+
+#define CR_TYPE 2
+#define CR_MSR 3
+#define CR_LOCK 4
/*
* Minimal prologue snippets:
@@ -146,25 +65,27 @@ no_r1_reload_\FLVR:
*/
/*
- **********************************************************************
+ *****************************************************************************
* MACRO: PPC_EXC_MIN_PROLOG_ASYNC
- **********************************************************************
- * USES: r3
- * ON EXIT: vector in r3
+ *****************************************************************************
+ * USES: VECTOR_REGISTER
+ * ON EXIT: Vector in VECTOR_REGISTER
*
- * NOTES: r3 saved in special variable 'ppc_exc_gpr3_\_PRI'
+ * NOTES: VECTOR_REGISTER saved in special variable
+ * 'ppc_exc_vector_register_\_PRI'.
*
*/
.macro PPC_EXC_MIN_PROLOG_ASYNC _NAME _VEC _PRI _FLVR
+
.global ppc_exc_min_prolog_async_\_NAME
ppc_exc_min_prolog_async_\_NAME:
- /* Atomically write lock variable in 1st instruction with non-zero value
- * (r1 is always nonzero; r13 could also be used)
+ /* Atomically write lock variable in 1st instruction with non-zero
+ * value (r1 is always nonzero; r13 could also be used)
*
* NOTE: raising an exception and executing this first instruction
- * of the exception handler is apparently NOT atomic, i.e.,
- * a low-priority IRQ could set the PC to this location and
- * a critical IRQ could intervene just at this point.
+ * of the exception handler is apparently NOT atomic, i.e., a
+ * low-priority IRQ could set the PC to this location and a
+ * critical IRQ could intervene just at this point.
*
* We check against this pathological case by checking the
* opcode/instruction at the interrupted PC for matching
@@ -178,322 +99,649 @@ ppc_exc_min_prolog_async_\_NAME:
* 2) This instruction (including the address offset)
* is not used anywhere else (probably a safe assumption).
*/
- stw r1, ppc_exc_lock_\_PRI@sdarel(r13)
- /* We have no stack frame yet; store r3 in special area;
+ stw r1, ppc_exc_lock_\_PRI@sdarel(r13)
+ /* We have no stack frame yet; store VECTOR_REGISTER in special area;
* a higher-priority (critical) interrupt uses a different area
* (hence the different prologue snippets) (\PRI)
*/
- stw r3, ppc_exc_gpr3_\_PRI@sdarel(r13)
- /* Load vector.
+ stw VECTOR_REGISTER, ppc_exc_vector_register_\_PRI@sdarel(r13)
+ /* Load vector.
*/
- li r3, ( \_VEC | 0xffff8000 )
- /* Branch (must be within 32MB)
+ li VECTOR_REGISTER, ( \_VEC | 0xffff8000 )
+ /* Branch (must be within 32MB)
*/
- ba wrap_\_FLVR
+ ba wrap_\_FLVR
+
.endm
/*
- **********************************************************************
+ *****************************************************************************
* MACRO: PPC_EXC_MIN_PROLOG_SYNC
- **********************************************************************
- * USES: r3
- * ON EXIT: vector in r3
+ *****************************************************************************
+ * USES: VECTOR_REGISTER
+ * ON EXIT: vector in VECTOR_REGISTER
*
- * NOTES: exception stack frame pushed; r3 saved in frame
+ * NOTES: exception stack frame pushed; VECTOR_REGISTER saved in frame
*
*/
- .macro PPC_EXC_MIN_PROLOG_SYNC _NAME _VEC _PRI _FLVR
- .global ppc_exc_min_prolog_sync_\_NAME
+ .macro PPC_EXC_MIN_PROLOG_SYNC _NAME _VEC _PRI _FLVR
+
+ .global ppc_exc_min_prolog_sync_\_NAME
ppc_exc_min_prolog_sync_\_NAME:
stwu r1, -EXCEPTION_FRAME_END(r1)
- stw r3, GPR3_OFFSET(r1)
- li r3, \_VEC
- ba wrap_nopush_\_FLVR
+ stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
+ li VECTOR_REGISTER, \_VEC
+ ba wrap_nopush_\_FLVR
+
.endm
/*
- **********************************************************************
+ *****************************************************************************
* MACRO: TEST_1ST_OPCODE_crit
- **********************************************************************
+ *****************************************************************************
*
- * USES: REG, cr4
- * ON EXIT: REG available (contains *pc - STW_R1_R13(0)), return value in cr4
+ * USES: REG, CR_LOCK
+ * ON EXIT: REG available (contains *pc - STW_R1_R13(0)),
+ * return value in CR_LOCK.
*
- * test opcode interrupted by critical (asynchronous) exception;
- * set cr4 if
+ * test opcode interrupted by critical (asynchronous) exception; set CR_LOCK if
*
- * *SRR0 == 'stw r1, ppc_exc_std_lock@sdarel(r13)'
+ * *SRR0 == 'stw r1, ppc_exc_lock_std@sdarel(r13)'
*
*/
- .macro TEST_1ST_OPCODE_crit _REG _SRR0
- mf\_SRR0 \_REG
- lwz \_REG, 0(\_REG)
- /* opcode now in REG */
+ .macro TEST_1ST_OPCODE_crit _REG
+
+ lwz \_REG, SRR0_FRAME_OFFSET(FRAME_REGISTER)
+ lwz \_REG, 0(\_REG)
+ /* opcode now in REG */
- /* subtract upper 16bits of 'stw r1, 0(r13)' instruction */
+ /* subtract upper 16bits of 'stw r1, 0(r13)' instruction */
subis \_REG, \_REG, STW_R1_R13(0)@h
/*
* if what's left compares against the 'ppc_exc_lock_std@sdarel'
* address offset then we have a match...
*/
- cmpli cr4, \_REG, ppc_exc_lock_std@sdarel
- .endm
+ cmpli CR_LOCK, \_REG, ppc_exc_lock_std@sdarel
-/*
- **********************************************************************
- * MACRO: TEST_1ST_OPCODE_mchk
- **********************************************************************
- * USES: REG, cr0, cr4
- * ON EXIT: REG, cr0 available, return value in cr4
- *
- * test opcode interrupted by (asynchronous) machine-check exception;
- * set cr4 if
- *
- * *SRR0 == 'stw r1, ppc_exc_std_lock@sdarel(r13)'
- *
- * OR
- *
- * *SRR0 == 'stw r1, ppc_exc_crit_lock@sdarel(r13)'
- *
- */
- .macro TEST_1ST_OPCODE_mchk _REG _SRR0
- TEST_1ST_OPCODE_crit _REG=\_REG _SRR0=\_SRR0
- cmpli cr0, \_REG, ppc_exc_lock_crit@sdarel
- /* cr4 set if 1st opcode matches writing either lock */
- cror EQ(cr4), EQ(cr4), EQ(cr0)
.endm
/*
- **********************************************************************
+ *****************************************************************************
* MACRO: TEST_LOCK_std
- **********************************************************************
+ *****************************************************************************
*
- * USES: cr4
- * ON EXIT: cr4 is set (indicates no lower-priority locks are engaged)
+ * USES: CR_LOCK
+ * ON EXIT: CR_LOCK is set (indicates no lower-priority locks are engaged)
*
*/
- .macro TEST_LOCK_std _SRR0 _FLVR
- /* 'std' is lowest level, i.e., can not be locked -> EQ(cr4) = 1 */
- creqv EQ(cr4), EQ(cr4), EQ(cr4)
+ .macro TEST_LOCK_std _FLVR
+ /* 'std' is lowest level, i.e., can not be locked -> EQ(CR_LOCK) = 1 */
+ creqv EQ(CR_LOCK), EQ(CR_LOCK), EQ(CR_LOCK)
.endm
/*
- **********************************************************************
+ ******************************************************************************
* MACRO: TEST_LOCK_crit
- **********************************************************************
+ ******************************************************************************
*
- * USES: cr4, cr0, r4, r5
- * ON EXIT: cr0, r4, r5 available, returns result in cr4
+ * USES: CR_LOCK, cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
+ * ON EXIT: cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1 available,
+ * returns result in CR_LOCK.
*
* critical-exception wrapper has to check 'std' lock:
*
- * Return cr4 = ( ppc_std_lock == 0
- * && * _SRR0 != <write std lock instruction>
- * && ppc_exc_crit_always_enabled == 0 )
+ * Return CR_LOCK = ( ppc_lock_std == 0
+ * && * SRR0 != <write std lock instruction> )
*
*/
- .macro TEST_LOCK_crit _SRR0 _FLVR
- /* Are critical exceptions always enabled ? */
- lwz r4, ppc_exc_crit_always_enabled@sdarel(r13)
- cmpwi cr4, r4, 0
- bne cr4, TEST_LOCK_crit_done_\_FLVR
-
- /* STD interrupt could have been interrupted before
- * executing the 1st instruction which sets the lock;
- * check this case by looking at the opcode present
- * at the interrupted PC location.
+ .macro TEST_LOCK_crit _FLVR
+
+ /* STD interrupt could have been interrupted before executing the 1st
+ * instruction which sets the lock; check this case by looking at the
+ * opcode present at the interrupted PC location.
*/
- TEST_1ST_OPCODE_crit _REG=r4 _SRR0=\_SRR0
+ TEST_1ST_OPCODE_crit _REG=SCRATCH_REGISTER_0
/*
- * At this point cr4 is set if
+ * At this point CR_LOCK is set if
*
* *(PC) == 'stw r1, ppc_exc_lock_std@sdarel(r13)'
*
*/
/* check lock */
- lwz r5, ppc_exc_lock_std@sdarel(r13)
- cmpli cr0, r5, 0
+ lwz SCRATCH_REGISTER_1, ppc_exc_lock_std@sdarel(r13)
+ cmpli cr0, SCRATCH_REGISTER_1, 0
/*
*
- * cr4 = ( *pc != <write std lock instruction>
+ * CR_LOCK = ( *pc != <write std lock instruction>
* && ppc_exc_lock_std == 0 )
*/
- crandc EQ(cr4), EQ(cr0), EQ(cr4)
-TEST_LOCK_crit_done_\_FLVR:
+ crandc EQ(CR_LOCK), EQ(cr0), EQ(CR_LOCK)
+
.endm
-#if 0
/*
- **********************************************************************
+ ******************************************************************************
* MACRO: TEST_LOCK_mchk
- **********************************************************************
- *
- * USES: cr4, cr0, r4, r5
- * ON EXIT: cr0, r4, r5 available, returns result in cr4
- *
- * machine-check wrapper has to check 'std' and 'crit' locks, i.e.,
- *
- * Return cr4 = ( * _SRR0 != <write std lock instruction>
- * && * _SRR0 != <write crit lock instruction> )
- * && ppc_std_lock == 0
- * && ppc_crit_lock == 0 )
+ ******************************************************************************
+ *
+ * USES: CR_LOCK
+ * ON EXIT: CR_LOCK is cleared.
+ *
+ * We never want to disable machine-check exceptions to avoid a checkstop. This
+ * means that we cannot use enabling/disabling this type of exception for
+ * protection of critical OS data structures. Therefore, calling OS primitives
+ * from a machine-check handler is ILLEGAL. Since machine-checks can happen
+ * anytime it is not legal to perform a context switch (since the exception
+ * could hit a IRQ protected section of code). We simply let this test return
+ * 0 so that ppc_exc_wrapup is never called after handling a machine-check.
*/
- .macro TEST_LOCK_mchk _SRR0 _FLVR
- TEST_1ST_OPCODE_mchk _REG=r4 _SRR0=\_SRR0
- /* cr4 set if 1st opcode matches writing either lock */
-
- /* proceed checking the locks */
- lwz r5, ppc_exc_lock_std@sdarel(r13)
- lwz r4, ppc_exc_lock_crit@sdarel(r13)
- /* set cr0 if neither lock is set */
- or. r4, r4, r5
- /* set cr4 if
- * cr0 is set (neither lock set)
- * AND cr4 is clear (interrupted opcode doesn't match writing any lock)
- */
- crandc EQ(cr4), EQ(cr0), EQ(cr4)
+ .macro TEST_LOCK_mchk _SRR0 _FLVR
+
+ crxor EQ(CR_LOCK), EQ(CR_LOCK), EQ(CR_LOCK)
+
.endm
-#else
+
/*
- **********************************************************************
- * MACRO: TEST_LOCK_mchk
- **********************************************************************
+ ******************************************************************************
+ * MACRO: RECOVER_CHECK_\PRI
+ ******************************************************************************
*
- * USES: cr4
- * ON EXIT: cr4 is cleared.
+ * USES: cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
+ * ON EXIT: cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1 available
*
- * We never want to disable machine-check exceptions to avoid
- * a checkstop. This means that we cannot use enabling/disabling
- * this type of exception for protection of critical OS data structures.
- * Therefore, calling OS primitives from a machine-check handler
- * is ILLEGAL. Since machine-checks can happen anytime it is not
- * legal to perform a context switch (since the exception could
- * hit a IRQ protected section of code).
- * We simply let this test return 0 so that ppc_exc_wrapup is
- * never called after handling a machine-check.
+ * Checks if the exception is recoverable for exceptions which need such a
+ * test.
*/
- .macro TEST_LOCK_mchk _SRR0 _FLVR
- crxor EQ(cr4), EQ(cr4), EQ(cr4)
+
+/* Standard*/
+ .macro RECOVER_CHECK_std _FLVR
+
+ /* Check if exception is recoverable */
+ lwz SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(FRAME_REGISTER)
+ lwz SCRATCH_REGISTER_1, ppc_exc_msr_bits@sdarel(r13)
+ xor SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ andi. SCRATCH_REGISTER_0, SCRATCH_REGISTER_1, MSR_RI
+
+recover_check_twiddle_std_\_FLVR:
+
+ /* Not recoverable? */
+ bne recover_check_twiddle_std_\_FLVR
+
.endm
-#endif
+/* Critical */
+ .macro RECOVER_CHECK_crit _FLVR
+
+ /* Nothing to do */
+
+ .endm
+
+/* Machine check */
+ .macro RECOVER_CHECK_mchk _FLVR
+
+ /* Check if exception is recoverable */
+ lwz SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(FRAME_REGISTER)
+ lwz SCRATCH_REGISTER_1, ppc_exc_msr_bits@sdarel(r13)
+ xor SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ andi. SCRATCH_REGISTER_0, SCRATCH_REGISTER_1, MSR_RI
+
+recover_check_twiddle_mchk_\_FLVR:
+
+ /* Not recoverable? */
+ bne recover_check_twiddle_mchk_\_FLVR
+
+ .endm
/*
- **********************************************************************
+ ******************************************************************************
* MACRO: WRAP
- **********************************************************************
- *
- * Minimal prologue snippets jump into WRAP
- * which prepares calling code common to all
- * flavors of exceptions.
- * We must have this macro instantiated for
- * each possible flavor of exception so that
- * we use the proper lock variable, SRR register pair and
- * RFI instruction.
- *
+ ******************************************************************************
+ *
+ * Minimal prologue snippets jump into WRAP which calls the high level
+ * exception handler. We must have this macro instantiated for each possible
+ * flavor of exception so that we use the proper lock variable, SRR register
+ * pair and RFI instruction.
+ *
+ * We have two types of exceptions: synchronous and asynchronous (= interrupt
+ * like). The type is encoded in the vector register (= VECTOR_REGISTER). For
+ * interrupt like exceptions the MSB in the vector register is set. The
+ * exception type is kept in the comparison register CR_TYPE. Normal
+ * exceptions (MSB is clear) use the task stack and a context switch may happen
+ * at any time. The interrupt like exceptions disable thread dispatching and
+ * switch to the interrupt stack (base address is in SPRG1).
+ *
+ * +
+ * |
+ * | Minimal prologue
+ * |
+ * +
+ * |
+ * | o Setup frame pointer
+ * | o Save basic registers
+ * | o Determine exception type:
+ * | synchronous or asynchronous
+ * |
+ * +-----+
+ * Synchronous exceptions: | | Asynchronous exceptions:
+ * | |
+ * Save non-volatile registers | | o Increment thread dispatch
+ * | | disable level
+ * | | o Increment ISR nest level
+ * | | o Clear lock
+ * | | o Switch stack if necessary
+ * | |
+ * +---->+
+ * |
+ * | o Save volatile registers
+ * | o Change MSR if necessary
+ * | o Call high level handler
+ * | o Call global handler if necessary
+ * | o Check if exception is recoverable
+ * |
+ * +-----+
+ * Synchronous exceptions: | | Asynchronous exceptions:
+ * | |
+ * Restore non-volatile registers | | o Decrement ISR nest level
+ * | | o Switch stack
+ * | | o Decrement thread dispatch
+ * | | disable level
+ * | | o Test lock
+ * | | o May do a context switch
+ * | |
+ * +---->+
+ * |
+ * | o Restore MSR if necessary
+ * | o Restore volatile registers
+ * | o Restore frame pointer
+ * | o Return
+ * |
+ * +
*/
.macro WRAP _FLVR _PRI _SRR0 _SRR1 _RFI
+
wrap_\_FLVR:
- stwu r1, -EXCEPTION_FRAME_END(r1)
+
+ /* Push exception frame */
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+
wrap_nopush_\_FLVR:
- stw r14, GPR14_OFFSET(r1)
-wrap_no_save_r14_\_FLVR:
-
- /* Save r4 r5 and CR; we want CR soon */
- mfcr r14
- stw r4, GPR4_OFFSET(r1)
- stw r5, GPR5_OFFSET(r1)
- stw r14, EXC_CR_OFFSET(r1)
-
- /* Check if this is an 'interrupt-type' exception
- * (MSB vector is set).
- * 'interrupt-type' exceptions disable thread dispatching
- * and switch to a private stack.
- * The type of exception is kept in (non-volatile) cr2
- * < 0 -> interrupt-type
- * > 0 -> 'normal' exception; always on task stack,
- * may switch context at any time.
+
+ /* Save frame register */
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
+
+wrap_no_save_frame_register_\_FLVR:
+
+ /*
+ * We save at first only some scratch registers
+ * and the CR. We use a non-volatile register
+ * for the exception frame pointer (= FRAME_REGISTER).
+ */
+
+ /* Move frame address in non-volatile FRAME_REGISTER */
+ mr FRAME_REGISTER, r1
+
+ /* Save scratch registers */
+ stw SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(FRAME_REGISTER)
+ stw SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(FRAME_REGISTER)
+ stw SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(FRAME_REGISTER)
+
+ /* Save CR */
+ mfcr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_CR_OFFSET(FRAME_REGISTER)
+
+ /* Check exception type and remember it in non-volatile CR_TYPE */
+ cmpwi CR_TYPE, VECTOR_REGISTER, 0
+
+ /*
+ * Depending on the exception type we do now save the non-volatile
+ * registers or disable thread dispatching and switch to the ISR stack.
+ */
+
+ /* Branch for synchronous exceptions */
+ bge CR_TYPE, wrap_save_non_volatile_regs_\_FLVR
+
+ /*
+ * Increment the thread dispatch disable level in case a higher
+ * priority exception occurs we don't want it to run the scheduler. It
+ * is safe to increment this without disabling higher priority
+ * exceptions since those will see that we wrote the lock anyways.
*/
- cmpwi cr2, r3, 0
- /*
- * Save frame address in r14
+ /* Increment ISR nest level and thread dispatch disable level */
+ lwz SCRATCH_REGISTER_0, _ISR_Nest_level@sdarel(r13)
+ lwz SCRATCH_REGISTER_1, _Thread_Dispatch_disable_level@sdarel(r13)
+ addi SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, 1
+ addi SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, 1
+ stw SCRATCH_REGISTER_0, _ISR_Nest_level@sdarel(r13)
+ stw SCRATCH_REGISTER_1, _Thread_Dispatch_disable_level@sdarel(r13)
+
+ /*
+ * No higher-priority exception occurring after this point
+ * can cause a context switch.
*/
- mr r14, r1
- bge cr2, no_thread_dispatch_disable_\_FLVR
+ /* Clear lock */
+ li SCRATCH_REGISTER_0, 0
+ stw SCRATCH_REGISTER_0, ppc_exc_lock_\_PRI@sdarel(r13)
+
+ /* Switch stack if necessary */
+ mfspr SCRATCH_REGISTER_0, SPRG1
+ cmpw SCRATCH_REGISTER_0, r1
+ blt wrap_stack_switch_\_FLVR
+ mfspr SCRATCH_REGISTER_1, SPRG2
+ cmpw SCRATCH_REGISTER_1, r1
+ blt wrap_stack_switch_done_\_FLVR
+
+wrap_stack_switch_\_FLVR:
+
+ mr r1, SCRATCH_REGISTER_0
- /* first thing we need to
- * increment the thread-dispatch disable level
- * in case a higher priority exception occurs
- * we don't want it to run the scheduler.
- * (It is safe to increment this w/o disabling
- * higher priority interrupts since those will
- * see that we wrote the lock anyways).
+wrap_stack_switch_done_\_FLVR:
+
+ /*
+ * Load the pristine VECTOR_REGISTER from a special location for
+ * asynchronous exceptions. The synchronous exceptions save the
+ * VECTOR_REGISTER in their minimal prologue.
*/
- lwz r5, _Thread_Dispatch_disable_level@sdarel(r13)
- addi r5, r5, 1
- stw r5, _Thread_Dispatch_disable_level@sdarel(r13)
+ lwz SCRATCH_REGISTER_2, ppc_exc_vector_register_\_PRI@sdarel(r13)
+
+ /* Save pristine vector register */
+ stw SCRATCH_REGISTER_2, VECTOR_OFFSET(FRAME_REGISTER)
- /* clear lock; no higher-priority interrupt occurring after
- * this point can cause a context switch.
+wrap_disable_thread_dispatching_done_\_FLVR:
+
+ /*
+ * We now have SCRATCH_REGISTER_0, SCRATCH_REGISTER_1,
+ * SCRATCH_REGISTER_2 and CR available. VECTOR_REGISTER still holds
+ * the vector (and exception type). FRAME_REGISTER is a pointer to the
+ * exception frame (always on the stack of the interrupted context).
+ * r1 is the stack pointer, either on the task stack or on the ISR
+ * stack. CR_TYPE holds the exception type.
*/
- li r5, 0
- stw r5, ppc_exc_lock_\_PRI@sdarel(r13)
- /* test lower-priority locks; result in (non-volatile) cr4 */
- TEST_LOCK_\_PRI _SRR0=\_SRR0 _FLVR=\_FLVR
+ /* Save SRR0 */
+ mfspr SCRATCH_REGISTER_0, \_SRR0
+ stw SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(FRAME_REGISTER)
- /* Peform stack switch if necessary */
- SWITCH_STACK RA=r4 RB=r5 FLVR=\_FLVR
+ /* Save SRR1 */
+ mfspr SCRATCH_REGISTER_0, \_SRR1
+ stw SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(FRAME_REGISTER)
- /* save r3, in exception frame */
- lwz r5, ppc_exc_gpr3_\_PRI@sdarel(r13)
- stw r5, GPR3_OFFSET(r14)
+ /* Save CTR */
+ mfctr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_CTR_OFFSET(FRAME_REGISTER)
-no_thread_dispatch_disable_\_FLVR:
+ /* Save XER */
+ mfxer SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_XER_OFFSET(FRAME_REGISTER)
- /* save lr into exception frame */
- mflr r4
- stw r4, EXC_LR_OFFSET(r14)
+ /* Save LR */
+ mflr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_LR_OFFSET(FRAME_REGISTER)
- /* we now have r4,r5,lr,cr available;
- * r3 still holds the vector,
- * r14 a pointer to the exception frame (always on
- * task stack)
- * r1 is the stack pointer, either on the task stack
- * or on the IRQ stack
+ /* Save volatile registers */
+ stw r0, GPR0_OFFSET(FRAME_REGISTER)
+ stw r3, GPR3_OFFSET(FRAME_REGISTER)
+ stw r8, GPR8_OFFSET(FRAME_REGISTER)
+ stw r9, GPR9_OFFSET(FRAME_REGISTER)
+ stw r10, GPR10_OFFSET(FRAME_REGISTER)
+ stw r11, GPR11_OFFSET(FRAME_REGISTER)
+ stw r12, GPR12_OFFSET(FRAME_REGISTER)
+
+ /* Save read-only small data area anchor (EABI) */
+ stw r2, GPR2_OFFSET(FRAME_REGISTER)
+
+ /* Save vector number and exception type */
+ stw VECTOR_REGISTER, EXCEPTION_NUMBER_OFFSET(FRAME_REGISTER)
+
+ /* Load MSR bit mask */
+ lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
+
+ /*
+ * Change the MSR if necessary (MMU, RI),
+ * remember decision in non-volatile CR_MSR
+ */
+ cmpwi CR_MSR, SCRATCH_REGISTER_0, 0
+ bne CR_MSR, wrap_change_msr_\_FLVR
+
+wrap_change_msr_done_\_FLVR:
+
+ /*
+ * Call high level exception handler
+ */
+
+ /*
+ * Get the handler table index from the vector number. We have to
+ * discard the exception type. Take only the least significant five
+ * bits (= LAST_VALID_EXC + 1) from the vector register. Multiply by
+ * four (= size of function pointer).
*/
+ rlwinm SCRATCH_REGISTER_1, VECTOR_REGISTER, 2, 25, 29
+
+ /* Load handler table address */
+ LA SCRATCH_REGISTER_0, ppc_exc_handler_table
- /* retrieve SRR0/SRR1 */
- mf\_SRR0 r4
- mf\_SRR1 r5
+ /* Load handler address */
+ lwzx SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
/*
- * branch to common routine;
+ * First parameter = exception frame pointer + FRAME_LINK_SPACE
*
- * r1, r3, r4, r5, cr, lr and r14 are saved on the
- * stack at this point.
+ * We add FRAME_LINK_SPACE to the frame pointer because the high level
+ * handler expects a BSP_Exception_frame structure.
*/
- bl wrap_common
+ addi r3, FRAME_REGISTER, FRAME_LINK_SPACE
- /*
- * restore SRRs, r4, r5, r1 (stack pointer) and lr;
- * wrap_common restores r3, r14 and cr for us.
+ /*
+ * Second parameter = vector number (r4 is the VECTOR_REGISTER)
*
- * NOTE: we restore r1 from the frame rather than
- * just popping (adding to current r1) since the
- * exception handler might have done strange things
- * (e.g., a debugger moving and relocating the stack).
+ * Discard the exception type and store the vector number
+ * in the vector register. Take only the least significant
+ * five bits (= LAST_VALID_EXC + 1).
+ */
+ rlwinm VECTOR_REGISTER, VECTOR_REGISTER, 0, 27, 31
+
+ /* Call handler */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+ /* Check return value and call global handler if necessary */
+ cmpwi r3, 0
+ bne wrap_call_global_handler_\_FLVR
+
+wrap_handler_done_\_FLVR:
+
+ /* Check if exception is recoverable */
+ RECOVER_CHECK_\_PRI _FLVR=\_FLVR
+
+ /*
+ * Depending on the exception type we do now restore the non-volatile
+ * registers or enable thread dispatching and switch back from the ISR
+ * stack.
+ */
+
+ /* Branch for synchronous exceptions */
+ bge CR_TYPE, wrap_restore_non_volatile_regs_\_FLVR
+
+ /*
+ * Switch back to original stack (FRAME_REGISTER == r1 if we are still
+ * on the IRQ stack).
+ */
+ mr r1, FRAME_REGISTER
+
+ /*
+ * Check thread dispatch disable level AND lower priority locks (in
+ * CR_LOCK): ONLY if the thread dispatch disable level == 0 AND no lock
+ * is set then call ppc_exc_wrapup() which may do a context switch. We
+ * can skip TEST_LOCK, because it has no side effects.
+ */
+
+ /* Decrement ISR nest level and thread dispatch disable level */
+ lwz SCRATCH_REGISTER_0, _ISR_Nest_level@sdarel(r13)
+ lwz SCRATCH_REGISTER_1, _Thread_Dispatch_disable_level@sdarel(r13)
+ subi SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, 1
+ subic. SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, 1
+ stw SCRATCH_REGISTER_0, _ISR_Nest_level@sdarel(r13)
+ stw SCRATCH_REGISTER_1, _Thread_Dispatch_disable_level@sdarel(r13)
+
+ /* Branch to skip thread dispatching */
+ bne wrap_thread_dispatching_done_\_FLVR
+
+ /* Test lower-priority locks (result in non-volatile CR_LOCK) */
+ TEST_LOCK_\_PRI _FLVR=\_FLVR
+
+ /* Branch to skip thread dispatching */
+ bne CR_LOCK, wrap_thread_dispatching_done_\_FLVR
+
+ /* Load address of ppc_exc_wrapup() */
+ LA SCRATCH_REGISTER_0, ppc_exc_wrapup
+
+ /* First parameter = exception frame pointer + FRAME_LINK_SPACE */
+ addi r3, FRAME_REGISTER, FRAME_LINK_SPACE
+
+ /* Call ppc_exc_wrapup() */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+wrap_thread_dispatching_done_\_FLVR:
+
+ /* Restore MSR? */
+ bne CR_MSR, wrap_restore_msr_\_FLVR
+
+wrap_restore_msr_done_\_FLVR:
+
+ /*
+ * At this point r1 is a valid exception frame pointer and
+ * FRAME_REGISTER is no longer needed.
+ */
+
+ /* Restore frame register */
+ lwz FRAME_REGISTER, FRAME_OFFSET(r1)
+
+ /* Restore XER and CTR */
+ lwz SCRATCH_REGISTER_0, EXC_XER_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, EXC_CTR_OFFSET(r1)
+ mtxer SCRATCH_REGISTER_0
+ mtctr SCRATCH_REGISTER_1
+
+ /* Restore CR and LR */
+ lwz SCRATCH_REGISTER_0, EXC_CR_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, EXC_LR_OFFSET(r1)
+ mtcr SCRATCH_REGISTER_0
+ mtlr SCRATCH_REGISTER_1
+
+ /* Restore volatile registers */
+ lwz r0, GPR0_OFFSET(r1)
+ lwz r3, GPR3_OFFSET(r1)
+ lwz r8, GPR8_OFFSET(r1)
+ lwz r9, GPR9_OFFSET(r1)
+ lwz r10, GPR10_OFFSET(r1)
+ lwz r11, GPR11_OFFSET(r1)
+ lwz r12, GPR12_OFFSET(r1)
+
+ /* Restore read-only small data area anchor (EABI) */
+ lwz r2, GPR2_OFFSET(r1)
+
+ /* Restore vector register */
+ lwz VECTOR_REGISTER, VECTOR_OFFSET(r1)
+
+ /*
+ * Disable all asynchronous exceptions which can do a thread dispatch.
+ * See README.
*/
- mt\_SRR0 r4
- mt\_SRR1 r5
- /* restore lr */
- lwz r5, EXC_LR_OFFSET(r1)
- lwz r4, GPR4_OFFSET(r1)
- mtlr r5
- lwz r5, GPR5_OFFSET(r1)
- lwz r1, EXC_MIN_GPR1(r1)
+ INTERRUPT_DISABLE SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+
+ /* Restore scratch registers and SRRs */
+ lwz SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+ lwz SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(r1)
+ mtspr \_SRR0, SCRATCH_REGISTER_0
+ lwz SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(r1)
+ mtspr \_SRR1, SCRATCH_REGISTER_1
+ lwz SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(r1)
+
+ /*
+ * We restore r1 from the frame rather than just popping (adding to
+ * current r1) since the exception handler might have done strange
+ * things (e.g. a debugger moving and relocating the stack).
+ */
+ lwz r1, 0(r1)
+
+ /* Return */
\_RFI
+
+wrap_change_msr_\_FLVR:
+
+ mfmsr SCRATCH_REGISTER_1
+ or SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ mtmsr SCRATCH_REGISTER_1
+ msync
+ isync
+ b wrap_change_msr_done_\_FLVR
+
+wrap_restore_msr_\_FLVR:
+
+ lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
+ mfmsr SCRATCH_REGISTER_1
+ andc SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ mtmsr SCRATCH_REGISTER_1
+ msync
+ isync
+ b wrap_restore_msr_done_\_FLVR
+
+wrap_save_non_volatile_regs_\_FLVR:
+
+ /* Load pristine stack pointer */
+ lwz SCRATCH_REGISTER_1, 0(FRAME_REGISTER)
+
+ /* Save small data area anchor (SYSV) */
+ stw r13, GPR13_OFFSET(FRAME_REGISTER)
+
+ /* Save pristine stack pointer */
+ stw SCRATCH_REGISTER_1, GPR1_OFFSET(FRAME_REGISTER)
+
+ /* r14 is the FRAME_REGISTER and will be saved elsewhere */
+
+ /* Save non-volatile registers r15 .. r31 */
+ stmw r15, GPR15_OFFSET(FRAME_REGISTER)
+
+ b wrap_disable_thread_dispatching_done_\_FLVR
+
+wrap_restore_non_volatile_regs_\_FLVR:
+
+ /* Load stack pointer */
+ lwz SCRATCH_REGISTER_0, GPR1_OFFSET(r1)
+
+ /* Restore small data area anchor (SYSV) */
+ lwz r13, GPR13_OFFSET(r1)
+
+ /* r14 is the FRAME_REGISTER and will be restored elsewhere */
+
+ /* Restore non-volatile registers r15 .. r31 */
+ lmw r15, GPR15_OFFSET(r1)
+
+ /* Restore stack pointer */
+ stw SCRATCH_REGISTER_0, 0(r1)
+
+ b wrap_thread_dispatching_done_\_FLVR
+
+wrap_call_global_handler_\_FLVR:
+
+ /* First parameter = exception frame pointer + FRAME_LINK_SPACE */
+ addi r3, FRAME_REGISTER, FRAME_LINK_SPACE
+
+ /* Load global handler address */
+ LW SCRATCH_REGISTER_0, globalExceptHdl
+
+ /* Check address */
+ cmpwi SCRATCH_REGISTER_0, 0
+ beq wrap_handler_done_\_FLVR
+
+ /* Call global handler */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+ b wrap_handler_done_\_FLVR
+
.endm
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_bspsupp.h b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_bspsupp.h
index 5c816ff86c..6121895291 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_bspsupp.h
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_bspsupp.h
@@ -13,6 +13,10 @@
#ifndef PPC_EXC_SHARED_H
#define PPC_EXC_SHARED_H
+#include <stdint.h>
+
+#include "vectors.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -33,7 +37,6 @@ extern "C" {
*
* Other return values are reserved.
*/
-
typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector);
/*
@@ -46,52 +49,6 @@ typedef int (*ppc_exc_handler_t)(BSP_Exception_frame *f, unsigned int vector);
*/
extern uint32_t ppc_exc_msr_bits;
-/*
- * Set of MSR bits required to disable all
- * asynchronous exceptions (depends on CPU type;
- * must be set during initialization).
- * Interrupt are disabled by writing the
- * one's complement of this mask to msr:
- * msr &= ~ppc_exc_msr_irq_mask;
- */
-extern uint32_t ppc_exc_msr_irq_mask;
-
-/*
- * Cache size of the interrupt stack in a SDA variable
- */
-extern uint32_t ppc_exc_intr_stack_size;
-
-/*
- * This variable defines the semantics of asynchronous
- * critical exceptions ("critical interrupts")
- * on BookE-style CPUs.
- *
- * There are the following ways of using these interrupts
- *
- * 1) permanently disabled; no support
- * 2) permanently enabled; handlers for critical interrupts
- * MUST NOT use any RTEMS primitives at all. They cannot
- * directly e.g., release a semaphore.
- * 3) enabled/disabled by the OS during critical sections.
- * In this scenario critical interrupts are not much
- * different from 'normal' interrupts but handlers may
- * safely use RTEMS primitives (i.e., the subset which
- * is OK to use from ISR context).
- *
- * The BSP (or application) may initialize this
- * variable PRIOR to calling 'initialize_exceptions'
- * to any of the following values:
- *
- * NOTE: so far, OS_SUPPORT is not supported by the cpukit
- * yet since the IRQ/enabling-disabling primitives
- * do not mask MSR_CE yet.
- */
-#define PPC_EXC_CRIT_NO_OS_SUPPORT 1
-#define PPC_EXC_CRIT_OS_SUPPORT 0
-#define PPC_EXC_CRIT_DISABLED (-1)
-
-extern int32_t ppc_exc_crit_always_enabled;
-
/* (See README under CAVEATS). During initialization
* a check is performed to assert that write-back
* caching is enabled for memory accesses. If a BSP
@@ -169,6 +126,8 @@ extern void ppc_exc_tgpr_clr_prolog();
*/
extern void ppc_exc_min_prolog_auto();
+extern void ppc_exc_min_prolog_auto_packed();
+
/* CPU support may store the address of a function here
* that can be used by the default exception handler to
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_hdl.c b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_hdl.c
index ec0533e822..f3d9350638 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_hdl.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_hdl.c
@@ -39,11 +39,9 @@ uint32_t ppc_exc_lock_std = 0;
uint32_t ppc_exc_lock_crit = 0;
uint32_t ppc_exc_lock_mchk = 0;
-uint32_t ppc_exc_gpr3_std = 0;
-uint32_t ppc_exc_gpr3_crit = 0;
-uint32_t ppc_exc_gpr3_mchk = 0;
-
-uint32_t ppc_exc_msr_irq_mask = MSR_EE;
+uint32_t ppc_exc_vector_register_std = 0;
+uint32_t ppc_exc_vector_register_crit = 0;
+uint32_t ppc_exc_vector_register_mchk = 0;
/* MSR bits to enable once critical status info is saved and the stack
* is switched; must be set depending on CPU type
@@ -53,74 +51,44 @@ uint32_t ppc_exc_msr_irq_mask = MSR_EE;
*/
uint32_t ppc_exc_msr_bits = MSR_IR | MSR_DR | MSR_RI;
-uint32_t ppc_exc_intr_stack_size = 0;
-
-int32_t ppc_exc_crit_always_enabled = PPC_EXC_CRIT_NO_OS_SUPPORT;
-
+int ppc_exc_handler_default( BSP_Exception_frame *f, unsigned int vector)
+{
+ return 1;
+}
/* Table of C-handlers */
-static ppc_exc_handler_t ppc_exc_handlers[LAST_VALID_EXC + 1] = {0, };
+ppc_exc_handler_t ppc_exc_handler_table [LAST_VALID_EXC + 1] = {
+ [0 ... LAST_VALID_EXC] = ppc_exc_handler_default
+};
-ppc_exc_handler_t
-ppc_exc_get_handler(unsigned vector)
+ppc_exc_handler_t ppc_exc_get_handler( unsigned vector)
{
- if ( vector > LAST_VALID_EXC )
+ ppc_exc_handler_t handler = NULL;
+ if (vector > LAST_VALID_EXC) {
return 0;
- return ppc_exc_handlers[vector];
+ }
+ if (ppc_exc_handler_table [vector] != ppc_exc_handler_default) {
+ handler = ppc_exc_handler_table [vector];
+ }
+ return handler;
}
-int
-ppc_exc_set_handler(unsigned vector, ppc_exc_handler_t hdl)
+int ppc_exc_set_handler( unsigned vector, ppc_exc_handler_t handler)
{
- if ( vector > LAST_VALID_EXC )
+ if (vector > LAST_VALID_EXC) {
return -1;
- ppc_exc_handlers[vector] = hdl;
- return 0;
-}
-
-/* This routine executes on the interrupt stack (if vect < 0) */
-int
-ppc_exc_C_wrapper(int vect, BSP_Exception_frame *f)
-{
-unsigned int i = vect & 0x3f;
-int rval = 1;
-
- if ( i <= LAST_VALID_EXC && ppc_exc_handlers[i] ) {
- rval = ppc_exc_handlers[i](f, i);
- }
-
- if ( rval ) {
- /* not handled, so far ... */
- if ( globalExceptHdl ) {
- /*
- * global handler must be prepared to
- * deal with asynchronous exceptions!
- */
- globalExceptHdl(f);
- }
- rval = 0;
}
-
- if ( (ppc_exc_msr_bits ^ f->EXC_SRR1) & MSR_RI ) {
- printk("unrecoverable exception (RI was clear), spinning to death.\n");
- while (1)
- ;
+ if (handler == NULL) {
+ ppc_exc_handler_table [vector] = ppc_exc_handler_default;
+ } else {
+ ppc_exc_handler_table [vector] = handler;
}
-
- return rval;
+ return 0;
}
void
-ppc_exc_wrapup(int ll_rval, BSP_Exception_frame *f)
+ppc_exc_wrapup( BSP_Exception_frame *f)
{
- /* Check if we need to run the global handler now */
- if ( ll_rval ) {
- /* We get here if ppc_exc_C_wrapper() returned nonzero.
- * This could be useful if we need to do something
- * with thread-dispatching enabled (at this point it is)
- * after handling an asynchronous exception.
- */
- }
/* dispatch_disable level is decremented from assembly code. */
if ( _Context_Switch_necessary ) {
/* FIXME: I believe it should be OK to re-enable
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
index 19039bca38..eb60413232 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
@@ -14,6 +14,7 @@
*/
#ifndef LIBCPU_POWERPC_BSPSUPP_VECTORS_H
#define LIBCPU_POWERPC_BSPSUPP_VECTORS_H
+
#include <libcpu/raw_exception.h>
/*
@@ -61,15 +62,21 @@
#define EXC_CTR_OFFSET 152
#define EXC_XER_OFFSET 156
#define EXC_LR_OFFSET 160
+
+/* Exception stack frame -> BSP_Exception_frame */
+#define FRAME_LINK_SPACE 8
+
/*
* maintain the EABI requested 8 bytes aligment
* As SVR4 ABI requires 16, make it 16 (as some
* exception may need more registers to be processed...)
*/
-#define EXCEPTION_FRAME_END 176
+#define EXCEPTION_FRAME_END 176
#ifndef ASM
+#include <stdint.h>
+
/* codemove is like memmove, but it also gets the cache line size
* as 4th parameter to synchronize them. If this last parameter is
* zero, it performs more or less like memmove. No copy is performed if
@@ -80,7 +87,12 @@
extern void * codemove(void *, const void *, unsigned int, unsigned long);
extern void exception_nop_enable(const rtems_raw_except_connect_data* ptr);
extern int exception_always_enabled(const rtems_raw_except_connect_data* ptr);
-extern void initialize_exceptions();
+
+void ppc_exc_initialize(
+ uint32_t interrupt_disable_mask,
+ uint32_t interrupt_stack_start,
+ uint32_t interrupt_stack_size
+);
typedef struct _BSP_Exception_frame {
unsigned EXC_SRR0;
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c
index dd80dd094d..83479775fa 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c
@@ -47,6 +47,8 @@ typedef struct LRFrameRec_ {
#define STACK_CLAMP 50 /* in case we have a corrupted bottom */
+SPR_RW(SPRG1)
+SPR_RW(SPRG2)
SPR_RO(LR)
SPR_RO(DAR)
#define DEAR_BOOKE 61
@@ -250,11 +252,11 @@ ppc_exc_min_prolog_template_t tmpl;
return (rtems_raw_except_func)prologues[n_prolog++];
}
-void ppc_exc_init(
+void ppc_exc_table_init(
rtems_raw_except_connect_data *exception_table,
int nEntries)
{
-int i,v;
+unsigned i,v;
ppc_raw_exception_category cat;
uintptr_t vaddr;
@@ -270,11 +272,17 @@ uintptr_t vaddr;
exception_config.rawExceptHdlTbl = exception_table;
exception_config.defaultRawEntry.exceptIndex = 0;
exception_config.defaultRawEntry.hdl.vector = 0;
- /* Note that the 'auto' handler cannot be used for everything; in particular,
- * it assumes classic exceptions with a vector offset aligned on a 256-byte
- * boundary.
- */
- exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto;
+
+ if (ppc_cpu_has_ivpr_and_ivor()) {
+ /* Use packed version with 16-byte boundaries for CPUs with IVPR and IVOR registers */
+ exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto_packed;
+ } else {
+ /* Note that the 'auto' handler cannot be used for everything; in particular,
+ * it assumes classic exceptions with a vector offset aligned on a 256-byte
+ * boundary.
+ */
+ exception_config.defaultRawEntry.hdl.raw_hdl = ppc_exc_min_prolog_auto;
+ }
/*
* Note that the cast of an array address to an unsigned
@@ -306,7 +314,7 @@ uintptr_t vaddr;
* default prolog can handle classic, synchronous exceptions
* with a vector offset aligned on a 256-byte boundary.
*/
- if ( PPC_EXC_CLASSIC == cat && 0 == ( vaddr & 0xff ) ) {
+ if (cat == PPC_EXC_CLASSIC && ((vaddr & 0xff) == 0 || (ppc_cpu_has_ivpr_and_ivor() && (vaddr & 0xf) == 0))) {
exception_table[i].hdl.raw_hdl_size = exception_config.defaultRawEntry.hdl.raw_hdl_size;
exception_table[i].hdl.raw_hdl = exception_config.defaultRawEntry.hdl.raw_hdl;
} else {
@@ -329,44 +337,45 @@ uintptr_t vaddr;
#endif
}
-void initialize_exceptions()
+
+void ppc_exc_initialize(
+ uint32_t interrupt_disable_mask,
+ uint32_t interrupt_stack_start,
+ uint32_t interrupt_stack_size
+)
{
-int i;
-int n = sizeof(exception_table)/sizeof(exception_table[0]);
+ int i;
+ int n = sizeof(exception_table)/sizeof(exception_table[0]);
- /* Use current MMU / RI settings when running C exception handlers */
- ppc_exc_msr_bits = _read_MSR() & ( MSR_DR | MSR_IR | MSR_RI );
+ uint32_t interrupt_stack_end = 0;
+ uint32_t interrupt_stack_pointer = 0;
+ uint32_t *p = NULL;
- /* Cache size of the interrupt stack in a SDA variable */
- ppc_exc_intr_stack_size = rtems_configuration_get_interrupt_stack_size();
+ /* Ensure proper interrupt stack alignment */
+ interrupt_stack_start &= ~(CPU_STACK_ALIGNMENT - 1);
+ interrupt_stack_size &= ~(CPU_STACK_ALIGNMENT - 1);
- /* Copy into a SDA variable that is easy to access from
- * assembly code
- */
- if ( ppc_cpu_is_bookE() ) {
- ppc_exc_msr_irq_mask = MSR_EE | MSR_CE | MSR_DE ;
- switch (ppc_exc_crit_always_enabled) {
- case PPC_EXC_CRIT_NO_OS_SUPPORT:
- _write_MSR(_read_MSR() | (MSR_CE | MSR_DE));
- break;
-
- case PPC_EXC_CRIT_OS_SUPPORT:
- printk("ppc_exc: PPC_EXC_CRIT_OS_SUPPORT not yet implemented\n");
- /* fall thru */
-
- case PPC_EXC_CRIT_DISABLED:
- default:
- ppc_exc_crit_always_enabled = PPC_EXC_CRIT_DISABLED;
- _write_MSR(_read_MSR() & ~(MSR_CE | MSR_DE));
- break;
- }
- } else {
- ppc_exc_msr_irq_mask = MSR_EE ;
- }
+ /* Interrupt stack end and pointer */
+ interrupt_stack_end = interrupt_stack_start + interrupt_stack_size;
+ interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE;
+
+ /* Tag interrupt stack bottom */
+ p = (uint32_t *) interrupt_stack_pointer;
+ *p = 0;
+
+ /* Move interrupt stack values to special purpose registers */
+ _write_SPRG1( interrupt_stack_pointer);
+ _write_SPRG2( interrupt_stack_start);
+
+ /* Interrupt disable mask */
+ ppc_interrupt_set_disable_mask( interrupt_disable_mask);
+
+ /* Use current MMU / RI settings when running C exception handlers */
+ ppc_exc_msr_bits = _read_MSR() & ( MSR_DR | MSR_IR | MSR_RI );
for ( i=0; i<n; i++ )
exception_table[i].hdl.vector = i;
- ppc_exc_init(exception_table, n);
+ ppc_exc_table_init(exception_table, n);
/* If we are on a classic PPC with MSR_DR enabled then
* assert that the mapping for at least this task's
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
index 8344aa5c06..405aa812d3 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
@@ -47,15 +47,7 @@ void _CPU_Initialize(
void (*thread_dispatch) /* ignored on this CPU */
)
{
- {
- unsigned hasFixed = 0;
- /* assert that our BSP has fixed PR288 */
- __asm__ __volatile__ ("mfspr %0, %2":
- "=r"(hasFixed):"0"(hasFixed),"i"(SPRG0));
- if ( PPC_BSP_HAS_FIXED_PR288 != hasFixed ) {
- BSP_panic("This BSP needs to fix PR#288");
- }
- }
+ /* Do nothing */
}
/*PAGE
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/e500_raw_exc_init.c b/c/src/lib/libcpu/powerpc/new-exceptions/e500_raw_exc_init.c
index 891dde59ae..4cafa2e5ed 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/e500_raw_exc_init.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/e500_raw_exc_init.c
@@ -1,16 +1,20 @@
#include <libcpu/cpuIdent.h>
#include <libcpu/raw_exception.h>
+#define MTIVPR(prefix) asm volatile("mtivpr %0"::"r"(prefix));
#define MTIVOR(x, vec) asm volatile("mtivor"#x" %0"::"r"(vec));
/* Use during early init for initializing the e500 IVOR/IVPR registers */
void
e500_setup_raw_exceptions()
{
-unsigned c;
+ unsigned c;
if ( ! (c = ppc_cpu_is_bookE()) || PPC_BOOKE_405 == c )
return;
- asm volatile("mtivpr %0"::"r"(0));
+
+ /* Set interupt vector prefix register */
+ MTIVPR( ppc_exc_vector_base);
+
/* setup vectors to be compatible with classic PPC */
MTIVOR(0, ppc_get_vector_addr(ASM_BOOKE_CRIT_VECTOR)); /* Critical input not (yet) supported; use reset vector */
MTIVOR(1, ppc_get_vector_addr(ASM_MACH_VECTOR));
@@ -33,3 +37,34 @@ unsigned c;
MTIVOR(34, ppc_get_vector_addr(0x15));
MTIVOR(35, ppc_get_vector_addr(ASM_60X_PERFMON_VECTOR));
}
+
+void e200_setup_raw_exceptions()
+{
+ if (current_ppc_cpu != PPC_e200z6) {
+ return;
+ }
+
+ /* Interupt vector prefix register */
+ MTIVPR( ppc_exc_vector_base);
+
+ /* Interupt vector offset register */
+ MTIVOR( 0, 0); /* Critical input */
+ MTIVOR( 1, ppc_get_vector_addr( ASM_MACH_VECTOR));
+ MTIVOR( 2, ppc_get_vector_addr( ASM_PROT_VECTOR));
+ MTIVOR( 3, ppc_get_vector_addr( ASM_ISI_VECTOR));
+ MTIVOR( 4, ppc_get_vector_addr( ASM_EXT_VECTOR));
+ MTIVOR( 5, ppc_get_vector_addr( ASM_ALIGN_VECTOR));
+ MTIVOR( 6, ppc_get_vector_addr( ASM_PROG_VECTOR));
+ MTIVOR( 7, ppc_get_vector_addr( ASM_FLOAT_VECTOR));
+ MTIVOR( 8, ppc_get_vector_addr( ASM_SYS_VECTOR));
+ MTIVOR( 9, 0); /* APU unavailable */
+ MTIVOR( 10, ppc_get_vector_addr( ASM_BOOKE_DEC_VECTOR));
+ MTIVOR( 11, ppc_get_vector_addr( ASM_BOOKE_FIT_VECTOR));
+ MTIVOR( 12, ppc_get_vector_addr( ASM_BOOKE_WDOG_VECTOR));
+ MTIVOR( 13, ppc_get_vector_addr( ASM_BOOKE_ITLBMISS_VECTOR));
+ MTIVOR( 14, ppc_get_vector_addr( ASM_BOOKE_DTLBMISS_VECTOR));
+ MTIVOR( 15, ppc_get_vector_addr( ASM_TRACE_VECTOR));
+ MTIVOR( 32, ppc_get_vector_addr( ASM_E200_SPE_UNAVAILABLE_VECTOR));
+ MTIVOR( 33, ppc_get_vector_addr( ASM_E200_SPE_DATA_VECTOR));
+ MTIVOR( 34, ppc_get_vector_addr( ASM_E200_SPE_ROUND_VECTOR));
+}
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.c b/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.c
index a7055d6cca..e330d80921 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.c
@@ -56,6 +56,8 @@ boolean bsp_exceptions_in_RAM = TRUE;
*/
uint32_t bsp_raw_vector_is_405_critical = 0;
+uint32_t ppc_exc_vector_base = 0;
+
void* ppc_get_vector_addr(rtems_vector vector)
{
unsigned vaddr;
@@ -94,13 +96,18 @@ void* ppc_get_vector_addr(rtems_vector vector)
default:
break;
}
- if ( bsp_exceptions_in_RAM )
- return ((void*) vaddr);
+ if (bsp_exceptions_in_RAM) {
+ if (ppc_cpu_has_ivpr_and_ivor()) {
+ return ((void*) ((vaddr >> 4) + ppc_exc_vector_base));
+ } else {
+ return ((void*) (vaddr + ppc_exc_vector_base));
+ }
+ }
return ((void*) (vaddr + 0xfff00000));
}
-static cat_ini_t mpc_860_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t mpc_860_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_PROT_VECTOR ] = PPC_EXC_CLASSIC,
@@ -128,7 +135,7 @@ static cat_ini_t mpc_860_vector_categories[LAST_VALID_EXC + 1] = {
};
-static cat_ini_t mpc_5xx_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t mpc_5xx_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC,
@@ -153,7 +160,7 @@ static cat_ini_t mpc_5xx_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_5XX_NMEBREAK_VECTOR ] = PPC_EXC_CLASSIC,
};
-static cat_ini_t ppc_405_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t ppc_405_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_EXT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ ASM_BOOKE_DEC_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
@@ -197,14 +204,14 @@ static ppc_raw_exception_category altivec_vector_is_valid(rtems_vector vector)
return PPC_EXC_INVALID;
}
-static cat_ini_t mpc_750_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t mpc_750_vector_categories[LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS,
[ ASM_60X_SYSMGMT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
[ ASM_60X_ADDR_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_CLASSIC,
};
-static cat_ini_t psim_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t psim_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_RESET_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_MACH_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_PROT_VECTOR ] = PPC_EXC_CLASSIC,
@@ -225,7 +232,7 @@ static cat_ini_t psim_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID,
};
-static cat_ini_t mpc_603_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t mpc_603_vector_categories[LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS,
[ ASM_60X_PERFMON_VECTOR ] = PPC_EXC_INVALID,
[ ASM_60X_SYSMGMT_VECTOR ] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
@@ -236,7 +243,7 @@ static cat_ini_t mpc_603_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID,
};
-static cat_ini_t mpc_604_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t mpc_604_vector_categories[LAST_VALID_EXC + 1] = {
PPC_BASIC_VECS,
[ ASM_60X_PERFMON_VECTOR ] = PPC_EXC_CLASSIC,
[ ASM_60X_IMISS_VECTOR ] = PPC_EXC_INVALID,
@@ -247,7 +254,41 @@ static cat_ini_t mpc_604_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_60X_ITM_VECTOR ] = PPC_EXC_INVALID,
};
-static cat_ini_t e500_vector_categories[LAST_VALID_EXC + 1] = {
+static const cat_ini_t e200_vector_categories [LAST_VALID_EXC + 1] = {
+ [ASM_MACH_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
+ [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+
+ /* FIXME: Depending on HDI0[DAPUEN] this is a critical or debug exception */
+ [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_BOOKE_CRITICAL,
+
+ [ASM_E200_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E200_SPE_DATA_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E200_SPE_ROUND_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const cat_ini_t e300_vector_categories [LAST_VALID_EXC + 1] = {
+ PPC_BASIC_VECS,
+ [ASM_E300_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_E300_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_IMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+};
+
+static const cat_ini_t e500_vector_categories[LAST_VALID_EXC + 1] = {
[ ASM_MACH_VECTOR ] = PPC_EXC_E500_MACHCHK,
[ ASM_BOOKE_CRIT_VECTOR ] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
@@ -307,17 +348,22 @@ ppc_raw_exception_category rval = PPC_EXC_INVALID;
case PPC_8260:
/* case PPC_8240: -- same value as 8260 */
case PPC_8245:
+ rval = mpc_603_vector_categories[vector];
+ break;
case PPC_e300c1:
case PPC_e300c2:
case PPC_e300c3:
- rval = mpc_603_vector_categories[vector];
+ rval = e300_vector_categories[vector];
break;
case PPC_PSIM:
rval = psim_vector_categories[vector];
break;
- case PPC_8540:
+ case PPC_8540:
rval = e500_vector_categories[vector];
- break;
+ break;
+ case PPC_e200z6:
+ rval = e200_vector_categories[vector];
+ break;
case PPC_5XX:
rval = mpc_5xx_vector_categories[vector];
break;
@@ -454,7 +500,10 @@ int ppc_init_exceptions (rtems_raw_except_global_settings* config)
rtems_interrupt_disable(k);
- if ( (c = ppc_cpu_is_bookE()) && PPC_BOOKE_405 != c ) {
+ /* FIXME special case selection method */
+ if (current_ppc_cpu == PPC_e200z6) {
+ e200_setup_raw_exceptions();
+ } else if ( (c = ppc_cpu_is_bookE()) && PPC_BOOKE_405 != c ) {
e500_setup_raw_exceptions();
}
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.h b/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.h
index 8015db0136..9723a37f92 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.h
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/raw_exception.h
@@ -43,7 +43,6 @@
#define ASM_PROG_VECTOR 0x07
#define ASM_FLOAT_VECTOR 0x08
#define ASM_DEC_VECTOR 0x09
-#define ASM_60X_VEC_VECTOR 0x0A
#define ASM_SYS_VECTOR 0x0C
#define ASM_TRACE_VECTOR 0x0D
@@ -80,6 +79,7 @@
#define ASM_5XX_NMEBREAK_VECTOR 0x1F
+#define ASM_60X_VEC_VECTOR 0x0A
#define ASM_60X_PERFMON_VECTOR 0x0F
#define ASM_60X_IMISS_VECTOR 0x10
#define ASM_60X_DLMISS_VECTOR 0x11
@@ -89,6 +89,24 @@
#define ASM_60X_VEC_ASSIST_VECTOR 0x16
#define ASM_60X_ITM_VECTOR 0x17
+/* e200 */
+#define ASM_E200_SPE_UNAVAILABLE_VECTOR 0x15
+#define ASM_E200_SPE_DATA_VECTOR 0x16
+#define ASM_E200_SPE_ROUND_VECTOR 0x17
+
+/* e300 */
+#define ASM_E300_CRIT_VECTOR 0x0A
+#define ASM_E300_PERFMON_VECTOR 0x0F
+#define ASM_E300_IMISS_VECTOR ASM_60X_IMISS_VECTOR /* Special case: Shadowed GPRs */
+#define ASM_E300_DLMISS_VECTOR ASM_60X_DLMISS_VECTOR /* Special case: Shadowed GPRs */
+#define ASM_E300_DSMISS_VECTOR ASM_60X_DSMISS_VECTOR /* Special case: Shadowed GPRs */
+#define ASM_E300_ADDR_VECTOR 0x13
+#define ASM_E300_SYSMGMT_VECTOR 0x14
+
+/*
+ * If you change that number make sure to adjust the wrapper code in ppc_exc.S
+ * and that ppc_exc_handler_table will be correctly initialized.
+ */
#define LAST_VALID_EXC 0x1F
/* DO NOT USE -- this symbol is DEPRECATED
@@ -227,6 +245,12 @@ void e500_setup_raw_exceptions();
*/
extern boolean bsp_exceptions_in_RAM;
+/**
+ * @brief Vector base address for CPUs (for example e200 and e500) with IVPR
+ * and IVOR registers.
+ */
+extern uint32_t ppc_exc_vector_base;
+
# endif /* ASM */
#endif
diff --git a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c
index 3715c6c29b..cd5d97ec6f 100644
--- a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c
+++ b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c
@@ -47,8 +47,9 @@ char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu)
case PPC_860: return "MPC860";
case PPC_8260: return "MPC8260";
case PPC_8245: return "MPC8245";
- case PPC_8540: return "MPC8540";
- case PPC_PSIM: return "PSIM";
+ case PPC_8540: return "MPC8540";
+ case PPC_PSIM: return "PSIM";
+ case PPC_e200z6: return "e200z6";
default:
printk("Unknown CPU value of 0x%x. Please add it to "
"<libcpu/powerpc/shared/include/cpuIdent.c>\n", cpu );
@@ -84,8 +85,12 @@ ppc_cpu_id_t get_ppc_cpu_type()
case PPC_860:
case PPC_8260:
case PPC_8245:
- case PPC_PSIM:
- case PPC_8540:
+ case PPC_PSIM:
+ case PPC_8540:
+ case PPC_e200z6:
+ case PPC_e300c1:
+ case PPC_e300c2:
+ case PPC_e300c3:
break;
default:
printk("Unknown PVR value of 0x%x. Please add it to "
@@ -126,6 +131,7 @@ ppc_cpu_id_t get_ppc_cpu_type()
current_ppc_features.is_bookE = PPC_BOOKE_405;
break;
case PPC_8540:
+ case PPC_e200z6:
current_ppc_features.is_bookE = PPC_BOOKE_E500;
default:
break;
@@ -150,7 +156,15 @@ ppc_cpu_id_t get_ppc_cpu_type()
default:
break;
}
-
+
+ switch (current_ppc_cpu) {
+ case PPC_e200z6:
+ current_ppc_features.has_ivpr_and_ivor = 1;
+ break;
+ default:
+ break;
+ }
+
return current_ppc_cpu;
}
diff --git a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h
index 9166e91302..2ee0ca0709 100644
--- a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h
+++ b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h
@@ -16,6 +16,8 @@
#ifndef _LIBCPU_CPUIDENT_H
#define _LIBCPU_CPUIDENT_H
+#include <stdbool.h>
+
#ifndef ASM
typedef enum
{
@@ -44,6 +46,7 @@ typedef enum
PPC_e300c1 = 0x8083, /* e300c1 core, in MPC83xx*/
PPC_e300c2 = 0x8084, /* e300c2 core */
PPC_e300c3 = 0x8085, /* e300c3 core */
+ PPC_e200z6 = 0x8115,
PPC_PSIM = 0xfffe, /* GDB PowerPC simulator -- fake version */
PPC_UNKNOWN = 0xffff
} ppc_cpu_id_t;
@@ -67,6 +70,7 @@ typedef struct {
unsigned has_8_bats : 1;
unsigned has_epic : 1;
unsigned has_shadowed_gprs : 1;
+ unsigned has_ivpr_and_ivor : 1;
} ppc_feature_t;
extern ppc_feature_t current_ppc_features;
@@ -81,7 +85,7 @@ extern ppc_cpu_revision_t current_ppc_revision;
/* PUBLIC ACCESS ROUTINES */
#define _PPC_FEAT_DECL(x) \
-static inline ppc_cpu_##x() { \
+static inline unsigned ppc_cpu_##x() { \
if ( PPC_UNKNOWN == current_ppc_cpu ) \
get_ppc_cpu_type(); \
return current_ppc_features.x; \
@@ -95,6 +99,17 @@ _PPC_FEAT_DECL(is_60x)
_PPC_FEAT_DECL(has_8_bats)
_PPC_FEAT_DECL(has_epic)
_PPC_FEAT_DECL(has_shadowed_gprs)
+_PPC_FEAT_DECL(has_ivpr_and_ivor)
+
+static inline bool ppc_cpu_is_e300()
+{
+ if (current_ppc_cpu == PPC_UNKNOWN) {
+ get_ppc_cpu_type();
+ }
+ return current_ppc_cpu == PPC_e300c1
+ || current_ppc_cpu == PPC_e300c2
+ || current_ppc_cpu == PPC_e300c3;
+}
#undef _PPC_FEAT_DECL
#endif /* ASM */