summaryrefslogtreecommitdiffstats
path: root/bsps
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-03-13 16:24:16 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-03-19 07:00:20 +0100
commitbd1508019cfb219b41da7cb6f1a9895a69c64534 (patch)
tree6c3f15e745ef11dc3d7c3b706fde54c95dac055d /bsps
parentbsps/powerpc: Remove unused files (diff)
downloadrtems-bd1508019cfb219b41da7cb6f1a9895a69c64534.tar.bz2
bsps/powerpc: Move exceptions support to bsps
This patch is a part of the BSP source reorganization. Update #3285.
Diffstat (limited to 'bsps')
-rw-r--r--bsps/powerpc/shared/exceptions.am14
-rw-r--r--bsps/powerpc/shared/exceptions/README431
-rw-r--r--bsps/powerpc/shared/exceptions/ppc-code-copy.c39
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc.S185
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_address.c100
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_alignment.c43
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_asm_macros.h1114
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_async_normal.S471
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_categories.c325
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_fatal.S229
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_global_handler.c28
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_hdl.c116
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_initialize.c187
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_naked.S207
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_print.c236
-rw-r--r--bsps/powerpc/shared/exceptions/ppc_exc_prologue.c181
-rw-r--r--bsps/powerpc/ss555/start/vectors.S201
-rw-r--r--bsps/powerpc/ss555/start/vectors_init.c137
18 files changed, 4244 insertions, 0 deletions
diff --git a/bsps/powerpc/shared/exceptions.am b/bsps/powerpc/shared/exceptions.am
new file mode 100644
index 0000000000..e85b4bb326
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions.am
@@ -0,0 +1,14 @@
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc-code-copy.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_address.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_alignment.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_asm_macros.h
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_async_normal.S
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_categories.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_fatal.S
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_global_handler.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_hdl.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_initialize.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_naked.S
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_print.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc_prologue.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/exceptions/ppc_exc.S
diff --git a/bsps/powerpc/shared/exceptions/README b/bsps/powerpc/shared/exceptions/README
new file mode 100644
index 0000000000..eb5f9c7cb7
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/README
@@ -0,0 +1,431 @@
+
+BSP support middleware for 'new-exception' style PPC.
+
+T. Straumann, 12/2007
+
+EXPLANATION OF SOME TERMS
+=========================
+
+In this README we refer to exceptions and sometimes
+to 'interrupts'. Interrupts simply are asynchronous
+exceptions such as 'external' exceptions or 'decrementer'
+/'timer' exceptions.
+
+Traditionally (in the libbsp/powerpc/shared implementation),
+synchronous exceptions are handled entirely in the context
+of the interrupted task, i.e., the exception handlers use
+the task's stack and leave thread-dispatching enabled,
+i.e., scheduling is allowed to happen 'in the middle'
+of an exception handler.
+
+Asynchronous exceptions/interrupts, OTOH, use a dedicated
+interrupt stack and defer scheduling until after the last
+nested ISR has finished.
+
+RATIONALE
+=========
+The 'new-exception' processing API works at a rather
+low level. It provides functions for
+installing low-level code (which must be written in
+assembly code) directly into the PPC vector area.
+It is entirely left to the BSP to implement low-level
+exception handlers and to implement an API for
+C-level exception handlers and to implement the
+RTEMS interrupt API defined in cpukit/include/rtems/irq.h.
+
+The result has been a Darwinian evolution of variants
+of this code which is very hard to maintain. Mostly,
+the four files
+
+libbsp/powerpc/shared/vectors/vectors.S
+ (low-level handlers for 'normal' or 'synchronous'
+ exceptions. This code saves all registers on
+ the interrupted task's stack and calls a
+ 'global' C (high-level) exception handler.
+
+libbsp/powerpc/shared/vectors/vectors_init.c
+ (default implementation of the 'global' C
+ exception handler and initialization of the
+ vector table with trampoline code that ends up
+ calling the 'global' handler.
+
+libbsp/powerpc/shared/irq/irq_asm.S
+ (low-level handlers for 'IRQ'-type or 'asynchronous'
+ exceptions. This code is very similar to vectors.S
+ but does slightly more: after saving (only
+ the minimal set of) registers on the interrupted
+ task's stack it disables thread-dispatching, switches
+ to a dedicated ISR stack (if not already there which is
+ possible for nested interrupts) and then executes the high
+ level (C) interrupt dispatcher 'C_dispatch_irq_handler()'.
+ After 'C_dispatch_irq_handler()' returns the stack
+ is switched back (if not a nested IRQ), thread-dispatching
+ is re-enabled, signals are delivered and a context
+ switch is initiated if necessary.
+
+libbsp/powerpc/shared/irq/irq.c
+ implementation of the RTEMS ('new') IRQ API defined
+ in cpukit/include/rtems/irq.h.
+
+have been copied and modified by a myriad of BSPs leading
+to many slightly different variants.
+
+THE BSP-SUPORT MIDDLEWARE
+=========================
+
+The code in this directory is an attempt to provide the
+functionality implemented by the aforementioned files
+in a more generic way so that it can be shared by more
+BSPs rather than being copied and modified.
+
+Another important goal was eliminating all conditional
+compilation which tested for specific CPU models by means
+of C-preprocessor symbols (#ifdef ppcXYZ).
+Instead, appropriate run-time checks for features defined
+in cpuIdent.h are used.
+
+The assembly code has been (almost completely) rewritten
+and it tries to address a few problems while deliberately
+trying to live with the existing APIs and semantics
+(how these could be improved is beyond the scope but
+that they could is beyond doubt...):
+
+ - some PPCs don't fit into the classic scheme where
+ the exception vector addresses all were multiples of
+ 0x100 (some vectors are spaced as closely as 0x10).
+ The API should not expose vector offsets but only
+ vector numbers which can be considered an abstract
+ entity. The mapping from vector numbers to actual
+ address offsets is performed inside 'raw_exception.c'
+ - having to provide assembly prologue code in order to
+ hook an exception is cumbersome. The middleware
+ tries to free users and BSP writers from this issue
+ by dealing with assembly prologues entirely inside
+ the middleware. The user can hook ordinary C routines.
+ - the advent of BookE CPUs brought interrupts with
+ multiple priorities: non-critical and critical
+ interrupts. Unfortunately, these are not entirely
+ trivial to deal with (unless critical interrupts
+ are permanently disabled [which is still the case:
+ ATM rtems_interrupt_enable()/rtems_interrupt_disable()
+ only deal with EE]). See separate section titled
+ 'race condition...' below for a detailed explanation.
+
+STRUCTURE
+=========
+
+The middleware uses exception 'categories' or
+'flavors' as defined in raw_exception.h.
+
+The middleware consists of the following parts:
+
+ 1 small 'prologue' snippets that encode the
+ vector information and jump to appropriate
+ 'flavored-wrapper' code for further handling.
+ Some PPC exceptions are spaced only
+ 16-bytes apart, so the generic
+ prologue snippets are only 16-bytes long.
+ Prologues for synchronuos and asynchronous
+ exceptions differ.
+
+ 2 flavored-wrappers which sets up a stack frame
+ and do things that are specific for
+ different 'flavors' of exceptions which
+ currently are
+ - classic PPC exception
+ - ppc405 critical exception
+ - bookE critical exception
+ - e500 machine check exception
+
+ Assembler macros are provided and they can be
+ expanded to generate prologue templates and
+ flavored-wrappers for different flavors
+ of exceptions. Currently, there are two prologues
+ for all aforementioned flavors. One for synchronous
+ exceptions, the other for interrupts.
+
+ 3 generic assembly-level code that does the bulk
+ of saving register context and calling C-code.
+
+ 4 C-code (ppc_exc_hdl.c) for dispatching BSP/user
+ handlers.
+
+ 5 Initialization code (vectors_init.c). All valid
+ exceptions for the detected CPU are determined
+ and a fitting prologue snippet for the exception
+ category (classic, critical, synchronous or IRQ, ...)
+ is generated from a template and the vector number
+ and then installed in the vector area.
+
+ The user/BSP only has to deal with installing
+ high-level handlers but by default, the standard
+ 'C_dispatch_irq_handler' routine is hooked to
+ the external and 'decrementer' exceptions.
+
+ 6 RTEMS IRQ API is implemented by 'irq.c'. It
+ relies on a few routines to be provided by
+ the BSP.
+
+USAGE
+=====
+ BSP writers must provide the following routines
+ (declared in irq_supp.h):
+ Interrupt controller (PIC) support:
+ BSP_setup_the_pic() - initialize PIC hardware
+ BSP_enable_irq_at_pic() - enable/disable given irq at PIC; IGNORE if
+ BSP_disable_irq_at_pic() irq number out of range!
+ C_dispatch_irq_handler() - handle irqs and dispatch user handlers
+ this routine SHOULD use the inline
+ fragment
+
+ bsp_irq_dispatch_list()
+
+ provided by irq_supp.h
+ for calling user handlers.
+
+ BSP initialization; call
+
+ rtems_status_code sc = ppc_exc_initialize(
+ PPC_INTERRUPT_DISABLE_MASK_DEFAULT,
+ interrupt_stack_begin,
+ interrupt_stack_size
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSP_panic("cannot initialize exceptions");
+ }
+ BSP_rtems_irq_mngt_set();
+
+ Note that BSP_rtems_irq_mngt_set() hooks the C_dispatch_irq_handler()
+ to the external and decrementer (PIT exception for bookE; a decrementer
+ emulation is activated) exceptions for backwards compatibility reasons.
+ C_dispatch_irq_handler() must therefore be able to support these two
+ exceptions.
+ However, the BSP implementor is free to either disconnect
+ C_dispatch_irq_handler() from either of these exceptions, to connect
+ other handlers (e.g., for SYSMGMT exceptions) or to hook
+ C_dispatch_irq_handler() to yet more exceptions etc. *after*
+ BSP_rtems_irq_mngt_set() executed.
+
+ Hooking exceptions:
+
+ The API defined in vectors.h declares routines for connecting
+ a C-handler to any exception. Note that the execution environment
+ of the C-handler depends on the exception being synchronous or
+ asynchronous:
+
+ - synchronous exceptions use the task stack and do not
+ disable thread dispatching scheduling.
+ - asynchronous exceptions use a dedicated stack and do
+ defer thread dispatching until handling has (almost) finished.
+
+ By inspecting the vector number stored in the exception frame
+ the nature of the exception can be determined: asynchronous
+ exceptions have the most significant bit(s) set.
+
+ Any exception for which no dedicated handler is registered
+ ends up being handled by the routine addressed by the
+ (traditional) 'globalExcHdl' function pointer.
+
+ Makefile.am:
+ - make sure the Makefile.am does NOT use any of the files
+ vectors.S, vectors.h, vectors_init.c, irq_asm.S, irq.c
+ from 'libbsp/powerpc/shared' NOR must the BSP implement
+ any functionality that is provided by those files (and
+ now the middleware).
+
+ - (probably) remove 'vectors.rel' and anything related
+
+ - add
+
+ ../../../libcpu/@RTEMS_CPU@/@exceptions@/bspsupport/vectors.h
+ ../../../libcpu/@RTEMS_CPU@/@exceptions@/bspsupport/irq_supp.h
+
+ to 'include_bsp_HEADERS'
+
+ - add
+
+ ../../../libcpu/@RTEMS_CPU@/@exceptions@/exc_bspsupport.rel
+ ../../../libcpu/@RTEMS_CPU@/@exceptions@/irq_bspsupport.rel
+
+ to 'libbsp_a_LIBADD'
+
+ (irq.c is in a separate '.rel' so that you can get support
+ for exceptions only).
+
+CAVEATS
+=======
+
+On classic PPCs, early (and late) parts of the low-level
+exception handling code run with the MMU disabled which mean
+that the default caching attributes (write-back) are in effect
+(thanks to Thomas Doerfler for bringing this up).
+The code currently assumes that the MMU translations
+for the task and interrupt stacks as well as some
+variables in the data-area MATCH THE DEFAULT CACHING
+ATTRIBUTES (this assumption also holds for the old code
+in libbsp/powepc/shared/vectors ../irq).
+
+During initialization of exception handling, a crude test
+is performed to check if memory seems to have the write-back
+attribute. The 'dcbz' instruction should - on most PPCs - cause
+an alignment exception if the tested cache-line does not
+have this attribute.
+
+BSPs which entirely disable caching (e.g., by physically
+disabling the cache(s)) should set the variable
+ ppc_exc_cache_wb_check = 0
+prior to calling initialize_exceptions().
+Note that this check does not catch all possible
+misconfigurations (e.g., on the 860, the default attribute
+is AFAIK [libcpu/powerpc/mpc8xx/mmu/mmu_init.c] set to
+'caching-disabled' which is potentially harmful but
+this situation is not detected).
+
+
+RACE CONDITION WHEN DEALING WITH CRITICAL INTERRUPTS
+====================================================
+
+ The problematic race condition is as follows:
+
+ Usually, ISRs are allowed to use certain OS
+ primitives such as e.g., releasing a semaphore.
+ In order to prevent a context switch from happening
+ immediately (this would result in the ISR being
+ suspended), thread-dispatching must be disabled
+ around execution of the ISR. However, on the
+ PPC architecture it is neither possible to
+ atomically disable ALL interrupts nor is it
+ possible to atomically increment a variable
+ (the thread-dispatch-disable level).
+ Hence, the following sequence of events could
+ occur:
+ 1) low-priority interrupt (LPI) is taken
+ 2) before the LPI can increase the
+ thread-dispatch-disable level or disable
+ high-priority interupts, a high-priority
+ interrupt (HPI) happens
+ 3) HPI increases dispatch-disable level
+ 4) HPI executes high-priority ISR which e.g.,
+ posts a semaphore
+ 5) HPI decreases dispatch-disable level and
+ realizes that a context switch is necessary
+ 6) context switch is performed since LPI had
+ not gotten to the point where it could
+ increase the dispatch-disable level.
+ At this point, the LPI has been effectively
+ suspended which means that the low-priority
+ ISR will not be executed until the task
+ interupted in 1) is scheduled again!
+
+ The solution to this problem is letting the
+ first machine instruction of the low-priority
+ exception handler write a non-zero value to
+ a variable in memory:
+
+ ee_vector_offset:
+
+ stw r1, ee_lock@sdarel(r13)
+ .. save some registers etc..
+ .. increase thread-dispatch-disable-level
+ .. clear 'ee_lock' variable
+
+ After the HPI decrements the dispatch-disable level
+ it checks 'ee_lock' and refrains from performing
+ a context switch if 'ee_lock' is nonzero. Since
+ the LPI will complete execution subsequently it
+ will eventually do the context switch.
+
+ For the single-instruction write operation we must
+ a) write a register that is guaranteed to be
+ non-zero (e.g., R1 (stack pointer) or R13
+ (SVR4 short-data area).
+ b) use an addressing mode that doesn't require
+ loading any registers. The short-data area
+ pointer R13 is appropriate.
+
+ CAVEAT: unfortunately, this method by itself
+ is *NOT* enough because raising a low-priority
+ exception and executing the first instruction
+ of the handler is *NOT* atomic. Hence, the following
+ could occur:
+
+ 1) LPI is taken
+ 2) PC is saved in SRR0, PC is loaded with
+ address of 'locking instruction'
+ stw r1, ee_lock@sdarel(r13)
+ 3) ==> critical interrupt happens
+ 4) PC (containing address of locking instruction)
+ is saved in CSRR0
+ 5) HPI is dispatched
+
+ For the HPI to correctly handle this situation
+ it does the following:
+
+
+ a) increase thread-dispatch disable level
+ b) do interrupt work
+ c) decrease thread-dispatch disable level
+ d) if ( dispatch-disable level == 0 )
+ d1) check ee_lock
+ d2) check instruction at *CSRR0
+ d3) do a context switch if necessary ONLY IF
+ ee_lock is NOT set AND *CSRR0 is NOT the
+ 'locking instruction'
+
+ this works because the address of 'ee_lock'
+ is embedded in the locking instruction
+ 'stw r1, ee_lock@sdarel(r13)' and because the
+ registers r1/r13 have a special purpose
+ (stack-pointer, SDA-pointer). Hence it is safe
+ to assume that the particular instruction
+ 'stw r1,ee_lock&sdarel(r13)' never occurs
+ anywhere else.
+
+ Another note: this algorithm also makes sure
+ that ONLY nested ASYNCHRONOUS interrupts which
+ enable/disable thread-dispatching and check if
+ thread-dispatching is required before returning
+ control engage in this locking protocol. It is
+ important that when a critical, asynchronous
+ interrupt interrupts a 'synchronous' exception
+ (which does not disable thread-dispatching)
+ the thread-dispatching operation upon return of
+ the HPI is NOT deferred (because the synchronous
+ handler would not, eventually, check for a
+ dispatch requirement).
+
+ And one more note: We never want to disable
+ machine-check exceptions to avoid a checkstop.
+ This means that we cannot use enabling/disabling
+ this type of exception for protection of critical
+ OS data structures.
+ Therefore, calling OS primitives from a asynchronous
+ machine-check handler is ILLEGAL and not supported.
+ Since machine-checks can happen anytime it is not
+ legal to test if a deferred context switch should
+ be performed when the asynchronous machine-check
+ handler returns (since _Context_Switch_is_necessary
+ could have been set by a IRQ-protected section of
+ code that was hit by the machine-check).
+ Note that synchronous machine-checks can legally
+ use OS primitives and currently there are no
+ asynchronous machine-checks defined.
+
+ Epilogue:
+
+ You have to disable all asynchronous exceptions which may cause a context
+ switch before the restoring of the SRRs and the RFI. Reason:
+
+ Suppose we are in the epilogue code of an EE between the move to SRRs and
+ the RFI. Here EE is disabled but CE is enabled. Now a CE happens. The
+ handler decides that a thread dispatch is necessary. The CE checks if
+ this is possible:
+
+ o The thread dispatch disable level is 0, because the EE has already
+ decremented it.
+ o The EE lock variable is cleared.
+ o The EE executes not the first instruction.
+
+ Hence a thread dispatch is allowed. The CE issues a context switch to a
+ task with EE enabled (for example a task waiting for a semaphore). Now a
+ EE happens and the current content of the SRRs is lost.
diff --git a/bsps/powerpc/shared/exceptions/ppc-code-copy.c b/bsps/powerpc/shared/exceptions/ppc-code-copy.c
new file mode 100644
index 0000000000..07f5234076
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc-code-copy.c
@@ -0,0 +1,39 @@
+/**
+ * @file
+ *
+ * @ingroup powerpc_shared
+ *
+ * @brief Code copy implementation.
+ */
+
+/*
+ * Copyright (c) 2009
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * D-82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <string.h>
+
+#include <rtems.h>
+
+#include <libcpu/powerpc-utility.h>
+
+void ppc_code_copy(void *dest, const void *src, size_t n)
+{
+ if (memcmp(dest, src, n) != 0) {
+ memcpy(dest, src, n);
+
+ rtems_cache_flush_multiple_data_lines(dest, n);
+ ppc_synchronize_data();
+
+ rtems_cache_invalidate_multiple_instruction_lines(dest, n);
+ ppc_synchronize_instructions();
+ }
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc.S b/bsps/powerpc/shared/exceptions/ppc_exc.S
new file mode 100644
index 0000000000..c606c0da30
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc.S
@@ -0,0 +1,185 @@
+/*
+ * (c) 1999, Eric Valette valette@crf.canon.fr
+ *
+ * Modified and partially rewritten by Till Straumann, 2007
+ *
+ * Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008.
+ *
+ * Low-level assembly code for PPC exceptions.
+ *
+ * This file was written with the goal to eliminate
+ * ALL #ifdef <cpu_flavor> conditionals -- please do not
+ * reintroduce such statements.
+ */
+
+/* Load macro definitions */
+#include <rtems/asm.h>
+#include <rtems/system.h>
+#include <rtems/score/percpu.h>
+
+/*
+ * This code uses the small-data area which is not available in the 64-bit
+ * PowerPC ELFv2 ABI.
+ */
+#ifndef __powerpc64__
+
+#include "ppc_exc_asm_macros.h"
+
+/******************************************************/
+/* PROLOGUES */
+/******************************************************/
+
+ /*
+ * Expand prologue snippets for classic, ppc405-critical, bookE-critical
+ * and E500 machine-check, synchronous and asynchronous exceptions
+ */
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
+ PPC_EXC_MIN_PROLOG_SYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
+
+ PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_std _VEC=0 _PRI=std _FLVR=std
+ PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_p405_crit _VEC=0 _PRI=crit _FLVR=p405_crit
+ PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_bookE_crit _VEC=0 _PRI=crit _FLVR=bookE_crit
+ PPC_EXC_MIN_PROLOG_ASYNC _NAME=tmpl_e500_mchk _VEC=0 _PRI=mchk _FLVR=e500_mchk
+
+ .global ppc_exc_min_prolog_size
+ppc_exc_min_prolog_size = 4 * 4
+
+/* Special prologue for 603e-style CPUs.
+ *
+ * 603e shadows GPR0..GPR3 for certain exceptions. We must switch
+ * that off before we can use the stack pointer. Note that this is
+ * ONLY safe if the shadowing is actually active -- otherwise, r1
+ * is destroyed. We deliberately use r1 so problems become obvious
+ * if this is misused!
+ */
+ .global ppc_exc_tgpr_clr_prolog
+ppc_exc_tgpr_clr_prolog:
+ mfmsr r1
+ rlwinm r1,r1,0,15,13
+ mtmsr r1
+ isync
+ /* FALL THRU TO 'auto' PROLOG */
+
+/* Determine vector dynamically/automatically
+ *
+ * BUT: - only standard exceptions (no critical ones)
+ * - vector offset must be on 256 Byte boundary.
+ */
+ .global ppc_exc_min_prolog_auto
+ppc_exc_min_prolog_auto:
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+ stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
+ mflr VECTOR_REGISTER
+
+ /*
+ * We store the absolute branch target address here. It will be used
+ * to generate the branch operation in ppc_exc_make_prologue().
+ *
+ * We add one to request the link in the generated branch instruction.
+ */
+ .int ppc_exc_wrap_auto + 1
+
+ .global ppc_exc_tgpr_clr_prolog_size
+ppc_exc_tgpr_clr_prolog_size = . - ppc_exc_tgpr_clr_prolog
+
+/*
+ * Automatic vector, asynchronous exception; however,
+ * automatic vector calculation is less efficient than
+ * using an explicit vector in a minimal prolog snippet.
+ * The latter method is preferable since there usually
+ * are few asynchronous exceptions.
+ *
+ * For generic exceptions (which are the bulk) using
+ * the 'auto' prologue is OK since performance is not
+ * really an issue.
+ */
+ .global ppc_exc_min_prolog_auto_async
+ppc_exc_min_prolog_auto_async:
+ stw r1, ppc_exc_lock_std@sdarel(r13)
+ stw VECTOR_REGISTER, ppc_exc_vector_register_std@sdarel(r13)
+ mflr VECTOR_REGISTER
+
+ /*
+ * We store the absolute branch target address here. It will be used
+ * to generate the branch operation in ppc_exc_make_prologue().
+ *
+ * We add one to request the link in the generated branch instruction.
+ */
+ .int ppc_exc_wrap_auto_async + 1
+
+/******************************************************/
+/* WRAPPERS */
+/******************************************************/
+
+ /* Tag start and end of the wrappers.
+ * If exceptions are installed farther removed
+ * from the text area than 32M then the wrappers
+ * must be moved to an area that is reachable
+ * from where the prologues reside. Branches into
+ * C-code are far.
+ */
+
+ .global __ppc_exc_wrappers_start
+__ppc_exc_wrappers_start = .
+
+ /* Expand wrappers for different exception flavors */
+
+ /* Standard/classic powerpc */
+ WRAP _FLVR=std _PRI=std _SRR0=srr0 _SRR1=srr1 _RFI=rfi
+
+ /* ppc405 has a critical exception using srr2/srr3 */
+ WRAP _FLVR=p405_crit _PRI=crit _SRR0=srr2 _SRR1=srr3 _RFI=rfci
+
+ /* bookE has critical exception using csrr0 cssr1 */
+ WRAP _FLVR=bookE_crit _PRI=crit _SRR0=csrr0 _SRR1=csrr1 _RFI=rfci
+
+ /* e500 has machine-check exception using mcsrr0 mcssr1 */
+ WRAP _FLVR=e500_mchk _PRI=mchk _SRR0=mcsrr0 _SRR1=mcsrr1 _RFI=rfmci
+
+ /* LR holds vector, VECTOR_REGISTER holds orig. LR */
+ .global ppc_exc_wrap_auto
+ppc_exc_wrap_auto:
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
+
+ /* Find address where we jumped from */
+ mflr FRAME_REGISTER
+
+ /* Restore LR */
+ mtlr VECTOR_REGISTER
+
+ /* Compute vector into R3 */
+ rlwinm VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
+
+ /*
+ * We're now in almost the same state as if called by
+ * min_prolog_std but we must skip saving FRAME_REGISTER
+ * since that's done already
+ */
+ b wrap_no_save_frame_register_std
+
+ .global ppc_exc_wrap_auto_async
+ppc_exc_wrap_auto_async:
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
+ /* find address where we jumped from */
+ mflr FRAME_REGISTER
+ /* restore LR */
+ mtlr VECTOR_REGISTER
+ /* set upper bits to indicate that non-volatile
+ * registers should not be saved/restored.
+ */
+ li VECTOR_REGISTER, 0xffff8000
+ /* compute vector into R3 */
+ rlwimi VECTOR_REGISTER, FRAME_REGISTER, 24, 26, 31
+ /* we're now in almost the same state as if called by
+ * min_prolog_std but we must skip saving FRAME_REGISTER
+ * since that's done already
+ */
+ b wrap_no_save_frame_register_std
+
+ .global __ppc_exc_wrappers_end
+__ppc_exc_wrappers_end = .
+
+#endif /* !__powerpc64__ */
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_address.c b/bsps/powerpc/shared/exceptions/ppc_exc_address.c
new file mode 100644
index 0000000000..fc944177a8
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_address.c
@@ -0,0 +1,100 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * Copyright (C) 2009 embedded brains GmbH.
+ *
+ * Enhanced by Jay Kulpinski <jskulpin@eng01.gdds.com>
+ * to support 603, 603e, 604, 604e exceptions
+ *
+ * Moved to "libcpu/powerpc/new-exceptions" and consolidated
+ * by Thomas Doerfler <Thomas.Doerfler@embedded-brains.de>
+ * to be common for all PPCs with new exceptions.
+ *
+ * Derived from file "libcpu/powerpc/new-exceptions/raw_exception.c".
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+
+#include <bsp/vectors.h>
+
+/*
+ * XXX: These values are choosen to directly generate the vector offsets for an
+ * e200z1 which has hard wired IVORs (IVOR0=0x00, IVOR1=0x10, IVOR2=0x20, ...).
+ */
+static const uint8_t ivor_values [] = {
+ [ASM_BOOKE_CRIT_VECTOR] = 0,
+ [ASM_MACH_VECTOR] = 1,
+ [ASM_PROT_VECTOR] = 2,
+ [ASM_ISI_VECTOR] = 3,
+ [ASM_EXT_VECTOR] = 4,
+ [ASM_ALIGN_VECTOR] = 5,
+ [ASM_PROG_VECTOR] = 6,
+ [ASM_FLOAT_VECTOR] = 7,
+ [ASM_SYS_VECTOR] = 8,
+ [ASM_BOOKE_APU_VECTOR] = 9,
+ [ASM_BOOKE_DEC_VECTOR] = 10,
+ [ASM_BOOKE_FIT_VECTOR] = 11,
+ [ASM_BOOKE_WDOG_VECTOR] = 12,
+ [ASM_BOOKE_DTLBMISS_VECTOR] = 13,
+ [ASM_BOOKE_ITLBMISS_VECTOR] = 14,
+ [ASM_BOOKE_DEBUG_VECTOR] = 15,
+ [ASM_E500_SPE_UNAVAILABLE_VECTOR] = 16,
+ [ASM_E500_EMB_FP_DATA_VECTOR] = 17,
+ [ASM_E500_EMB_FP_ROUND_VECTOR] = 18,
+ [ASM_E500_PERFMON_VECTOR] = 19
+};
+
+void *ppc_exc_vector_address(unsigned vector, void *vector_base)
+{
+ uintptr_t vector_offset = vector << 8;
+
+ if (ppc_cpu_has_altivec()) {
+ if (vector == ASM_60X_VEC_VECTOR) {
+ vector_offset = ASM_60X_VEC_VECTOR_OFFSET;
+ }
+ }
+
+ if (ppc_cpu_is(PPC_405)) {
+ switch (vector) {
+ case ASM_BOOKE_FIT_VECTOR:
+ vector_offset = ASM_PPC405_FIT_VECTOR_OFFSET;
+ break;
+ case ASM_BOOKE_WDOG_VECTOR:
+ vector_offset = ASM_PPC405_WDOG_VECTOR_OFFSET;
+ break;
+ case ASM_TRACE_VECTOR:
+ vector_offset = ASM_PPC405_TRACE_VECTOR_OFFSET;
+ break;
+ case ASM_PPC405_APU_UNAVAIL_VECTOR:
+ vector_offset = ASM_60X_VEC_VECTOR_OFFSET;
+ default:
+ break;
+ }
+ }
+
+ if (
+ ppc_cpu_is_bookE() == PPC_BOOKE_STD
+ || ppc_cpu_is_bookE() == PPC_BOOKE_E500
+ ) {
+ if (vector < sizeof(ivor_values) / sizeof(ivor_values [0])) {
+ vector_offset = ((uintptr_t) ivor_values [vector]) << 4;
+ } else {
+ vector_offset = 0;
+ }
+ }
+
+ return (void *) ((uintptr_t) vector_base + vector_offset);
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_alignment.c b/bsps/powerpc/shared/exceptions/ppc_exc_alignment.c
new file mode 100644
index 0000000000..732ff96b18
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_alignment.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2011 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <bsp/vectors.h>
+
+int ppc_exc_alignment_handler(BSP_Exception_frame *frame, unsigned excNum)
+{
+ unsigned opcode = *(unsigned *) frame->EXC_SRR0;
+
+ /* Do we have a dcbz instruction? */
+ if ((opcode & 0xffe007ff) == 0x7c0007ec) {
+ unsigned clsz = rtems_cache_get_data_line_size();
+ unsigned a = (opcode >> 16) & 0x1f;
+ unsigned b = (opcode >> 11) & 0x1f;
+ PPC_GPR_TYPE *regs = &frame->GPR0;
+ unsigned *current = (unsigned *)
+ (((a == 0 ? 0 : (unsigned) regs[a]) + (unsigned) regs[b]) & (clsz - 1));
+ unsigned *end = current + clsz / sizeof(*current);
+
+ while (current != end) {
+ *current = 0;
+ ++current;
+ }
+
+ frame->EXC_SRR0 += 4;
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_asm_macros.h b/bsps/powerpc/shared/exceptions/ppc_exc_asm_macros.h
new file mode 100644
index 0000000000..c89046619b
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_asm_macros.h
@@ -0,0 +1,1114 @@
+/*
+ * (c) 1999, Eric Valette valette@crf.canon.fr
+ *
+ * Modified and partially rewritten by Till Straumann, 2007-2008
+ *
+ * Modified by Sebastian Huber <sebastian.huber@embedded-brains.de>, 2008-2012.
+ *
+ * Low-level assembly code for PPC exceptions (macros).
+ *
+ * This file was written with the goal to eliminate
+ * ALL #ifdef <cpu_flavor> conditionals -- please do not
+ * reintroduce such statements.
+ */
+
+#include <bspopts.h>
+#include <bsp/vectors.h>
+#include <libcpu/powerpc-utility.h>
+
+#define LT(cr) ((cr)*4+0)
+#define GT(cr) ((cr)*4+1)
+#define EQ(cr) ((cr)*4+2)
+
+/* Opcode of 'stw r1, off(r13)' */
+#define STW_R1_R13(off) ((((36<<10)|(r1<<5)|(r13))<<16) | ((off)&0xffff))
+
+#define FRAME_REGISTER r14
+#define VECTOR_REGISTER r4
+#define SCRATCH_REGISTER_0 r5
+#define SCRATCH_REGISTER_1 r6
+#define SCRATCH_REGISTER_2 r7
+
+#define FRAME_OFFSET( r) GPR14_OFFSET( r)
+#define VECTOR_OFFSET( r) GPR4_OFFSET( r)
+#define SCRATCH_REGISTER_0_OFFSET( r) GPR5_OFFSET( r)
+#define SCRATCH_REGISTER_1_OFFSET( r) GPR6_OFFSET( r)
+#define SCRATCH_REGISTER_2_OFFSET( r) GPR7_OFFSET( r)
+
+#define CR_TYPE 2
+#define CR_MSR 3
+#define CR_LOCK 4
+
+ /*
+ * Minimal prologue snippets:
+ *
+ * Rationale: on some PPCs the vector offsets are spaced
+ * as closely as 16 bytes.
+ *
+ * If we deal with asynchronous exceptions ('interrupts')
+ * then we can use 4 instructions to
+ * 1. atomically write lock to indicate ISR is in progress
+ * (we cannot atomically increase the Thread_Dispatch_disable_level,
+ * see README)
+ * 2. save a register in special area
+ * 3. load register with vector info
+ * 4. branch
+ *
+ * If we deal with a synchronous exception (no stack switch
+ * nor dispatch-disabling necessary) then it's easier:
+ * 1. push stack frame
+ * 2. save register on stack
+ * 3. load register with vector info
+ * 4. branch
+ *
+ */
+
+/*
+ *****************************************************************************
+ * MACRO: PPC_EXC_MIN_PROLOG_ASYNC
+ *****************************************************************************
+ * USES: VECTOR_REGISTER
+ * ON EXIT: Vector in VECTOR_REGISTER
+ *
+ * NOTES: VECTOR_REGISTER saved in special variable
+ * 'ppc_exc_vector_register_\_PRI'.
+ *
+ */
+ .macro PPC_EXC_MIN_PROLOG_ASYNC _NAME _VEC _PRI _FLVR
+
+ .global ppc_exc_min_prolog_async_\_NAME
+ppc_exc_min_prolog_async_\_NAME:
+ /* Atomically write lock variable in 1st instruction with non-zero
+ * value (r1 is always nonzero; r13 could also be used)
+ *
+ * NOTE: raising an exception and executing this first instruction
+ * of the exception handler is apparently NOT atomic, i.e., a
+ * low-priority IRQ could set the PC to this location and a
+ * critical IRQ could intervene just at this point.
+ *
+ * We check against this pathological case by checking the
+ * opcode/instruction at the interrupted PC for matching
+ *
+ * stw r1, ppc_exc_lock_XXX@sdarel(r13)
+ *
+ * ASSUMPTION:
+ * 1) ALL 'asynchronous' exceptions (which disable thread-
+ * dispatching) execute THIS 'magical' instruction
+ * FIRST.
+ * 2) This instruction (including the address offset)
+ * is not used anywhere else (probably a safe assumption).
+ */
+ stw r1, ppc_exc_lock_\_PRI@sdarel(r13)
+ /* We have no stack frame yet; store VECTOR_REGISTER in special area;
+ * a higher-priority (critical) interrupt uses a different area
+ * (hence the different prologue snippets) (\PRI)
+ */
+ stw VECTOR_REGISTER, ppc_exc_vector_register_\_PRI@sdarel(r13)
+ /* Load vector.
+ */
+ li VECTOR_REGISTER, ( \_VEC | 0xffff8000 )
+
+ /*
+ * We store the absolute branch target address here. It will be used
+ * to generate the branch operation in ppc_exc_make_prologue().
+ */
+ .int ppc_exc_wrap_\_FLVR
+
+ .endm
+
+/*
+ *****************************************************************************
+ * MACRO: PPC_EXC_MIN_PROLOG_SYNC
+ *****************************************************************************
+ * USES: VECTOR_REGISTER
+ * ON EXIT: vector in VECTOR_REGISTER
+ *
+ * NOTES: exception stack frame pushed; VECTOR_REGISTER saved in frame
+ *
+ */
+ .macro PPC_EXC_MIN_PROLOG_SYNC _NAME _VEC _PRI _FLVR
+
+ .global ppc_exc_min_prolog_sync_\_NAME
+ppc_exc_min_prolog_sync_\_NAME:
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+ stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
+ li VECTOR_REGISTER, \_VEC
+
+ /*
+ * We store the absolute branch target address here. It will be used
+ * to generate the branch operation in ppc_exc_make_prologue().
+ */
+ .int ppc_exc_wrap_nopush_\_FLVR
+
+ .endm
+
+/*
+ *****************************************************************************
+ * MACRO: TEST_1ST_OPCODE_crit
+ *****************************************************************************
+ *
+ * USES: REG, cr0
+ * ON EXIT: REG available (contains *pc - STW_R1_R13(0)),
+ * return value in cr0.
+ *
+ * test opcode interrupted by critical (asynchronous) exception; set CR_LOCK if
+ *
+ * *SRR0 == 'stw r1, ppc_exc_lock_std@sdarel(r13)'
+ *
+ */
+ .macro TEST_1ST_OPCODE_crit _REG
+
+ lwz \_REG, SRR0_FRAME_OFFSET(FRAME_REGISTER)
+ lwz \_REG, 0(\_REG)
+ /* opcode now in REG */
+
+ /* subtract upper 16bits of 'stw r1, 0(r13)' instruction */
+ subis \_REG, \_REG, STW_R1_R13(0)@h
+ /*
+ * if what's left compares against the 'ppc_exc_lock_std@sdarel'
+ * address offset then we have a match...
+ */
+ cmplwi cr0, \_REG, ppc_exc_lock_std@sdarel
+
+ .endm
+
+/*
+ *****************************************************************************
+ * MACRO: TEST_LOCK_std
+ *****************************************************************************
+ *
+ * USES: CR_LOCK
+ * ON EXIT: CR_LOCK is set (indicates no lower-priority locks are engaged)
+ *
+ */
+ .macro TEST_LOCK_std _FLVR
+ /* 'std' is lowest level, i.e., can not be locked -> EQ(CR_LOCK) = 1 */
+ creqv EQ(CR_LOCK), EQ(CR_LOCK), EQ(CR_LOCK)
+ .endm
+
+/*
+ ******************************************************************************
+ * MACRO: TEST_LOCK_crit
+ ******************************************************************************
+ *
+ * USES: CR_LOCK, cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
+ * ON EXIT: cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1 available,
+ * returns result in CR_LOCK.
+ *
+ * critical-exception wrapper has to check 'std' lock:
+ *
+ * Return CR_LOCK = ( (interrupt_mask & MSR_CE) != 0
+ && ppc_lock_std == 0
+ * && * SRR0 != <write std lock instruction> )
+ *
+ */
+ .macro TEST_LOCK_crit _FLVR
+ /* If MSR_CE is not in the IRQ mask then we must never allow
+ * thread-dispatching!
+ */
+ GET_INTERRUPT_MASK mask=SCRATCH_REGISTER_1
+ /* EQ(cr0) = ((interrupt_mask & MSR_CE) == 0) */
+ andis. SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, MSR_CE@h
+ beq TEST_LOCK_crit_done_\_FLVR
+
+ /* STD interrupt could have been interrupted before executing the 1st
+ * instruction which sets the lock; check this case by looking at the
+ * opcode present at the interrupted PC location.
+ */
+ TEST_1ST_OPCODE_crit _REG=SCRATCH_REGISTER_0
+ /*
+ * At this point cr0 is set if
+ *
+ * *(PC) == 'stw r1, ppc_exc_lock_std@sdarel(r13)'
+ *
+ */
+
+ /* check lock */
+ lwz SCRATCH_REGISTER_1, ppc_exc_lock_std@sdarel(r13)
+ cmplwi CR_LOCK, SCRATCH_REGISTER_1, 0
+
+ /* set EQ(CR_LOCK) to result */
+TEST_LOCK_crit_done_\_FLVR:
+ /* If we end up here because the interrupt mask did not contain
+ * MSR_CE then cr0 is set and therefore the value of CR_LOCK
+ * does not matter since x && !1 == 0:
+ *
+ * if ( (interrupt_mask & MSR_CE) == 0 ) {
+ * EQ(CR_LOCK) = EQ(CR_LOCK) && ! ((interrupt_mask & MSR_CE) == 0)
+ * } else {
+ * EQ(CR_LOCK) = (ppc_exc_lock_std == 0) && ! (*pc == <write std lock instruction>)
+ * }
+ */
+ crandc EQ(CR_LOCK), EQ(CR_LOCK), EQ(cr0)
+
+ .endm
+
+/*
+ ******************************************************************************
+ * MACRO: TEST_LOCK_mchk
+ ******************************************************************************
+ *
+ * USES: CR_LOCK
+ * ON EXIT: CR_LOCK is cleared.
+ *
+ * We never want to disable machine-check exceptions to avoid a checkstop. This
+ * means that we cannot use enabling/disabling this type of exception for
+ * protection of critical OS data structures. Therefore, calling OS primitives
+ * from a machine-check handler is ILLEGAL. Since machine-checks can happen
+ * anytime it is not legal to perform a context switch (since the exception
+ * could hit a IRQ protected section of code). We simply let this test return
+ * 0 so that ppc_exc_wrapup is never called after handling a machine-check.
+ */
+ .macro TEST_LOCK_mchk _SRR0 _FLVR
+
+ crxor EQ(CR_LOCK), EQ(CR_LOCK), EQ(CR_LOCK)
+
+ .endm
+
+/*
+ ******************************************************************************
+ * MACRO: RECOVER_CHECK_\PRI
+ ******************************************************************************
+ *
+ * USES: cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
+ * ON EXIT: cr0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1 available
+ *
+ * Checks if the exception is recoverable for exceptions which need such a
+ * test.
+ */
+
+/* Standard*/
+ .macro RECOVER_CHECK_std _FLVR
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Check if exception is recoverable */
+ lwz SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(FRAME_REGISTER)
+ lwz SCRATCH_REGISTER_1, ppc_exc_msr_bits@sdarel(r13)
+ xor SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ andi. SCRATCH_REGISTER_0, SCRATCH_REGISTER_1, MSR_RI
+
+recover_check_twiddle_std_\_FLVR:
+
+ /* Not recoverable? */
+ bne recover_check_twiddle_std_\_FLVR
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+ .endm
+
+/* Critical */
+ .macro RECOVER_CHECK_crit _FLVR
+
+ /* Nothing to do */
+
+ .endm
+
+/* Machine check */
+ .macro RECOVER_CHECK_mchk _FLVR
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Check if exception is recoverable */
+ lwz SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(FRAME_REGISTER)
+ lwz SCRATCH_REGISTER_1, ppc_exc_msr_bits@sdarel(r13)
+ xor SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ andi. SCRATCH_REGISTER_0, SCRATCH_REGISTER_1, MSR_RI
+
+recover_check_twiddle_mchk_\_FLVR:
+
+ /* Not recoverable? */
+ bne recover_check_twiddle_mchk_\_FLVR
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+ .endm
+
+/*
+ ******************************************************************************
+ * MACRO: WRAP
+ ******************************************************************************
+ *
+ * Minimal prologue snippets jump into WRAP which calls the high level
+ * exception handler. We must have this macro instantiated for each possible
+ * flavor of exception so that we use the proper lock variable, SRR register
+ * pair and RFI instruction.
+ *
+ * We have two types of exceptions: synchronous and asynchronous (= interrupt
+ * like). The type is encoded in the vector register (= VECTOR_REGISTER). For
+ * interrupt like exceptions the MSB in the vector register is set. The
+ * exception type is kept in the comparison register CR_TYPE. Normal
+ * exceptions (MSB is clear) use the task stack and a context switch may happen
+ * at any time. The interrupt like exceptions disable thread dispatching and
+ * switch to the interrupt stack (base address is in SPRG1).
+ *
+ * +
+ * |
+ * | Minimal prologue
+ * |
+ * +
+ * |
+ * | o Setup frame pointer
+ * | o Save basic registers
+ * | o Determine exception type:
+ * | synchronous or asynchronous
+ * |
+ * +-----+
+ * Synchronous exceptions: | | Asynchronous exceptions:
+ * | |
+ * Save non-volatile registers | | o Increment thread dispatch
+ * | | disable level
+ * | | o Increment ISR nest level
+ * | | o Clear lock
+ * | | o Switch stack if necessary
+ * | |
+ * +---->+
+ * |
+ * | o Save volatile registers
+ * | o Change MSR if necessary
+ * | o Call high level handler
+ * | o Call global handler if necessary
+ * | o Check if exception is recoverable
+ * |
+ * +-----+
+ * Synchronous exceptions: | | Asynchronous exceptions:
+ * | |
+ * Restore non-volatile registers | | o Decrement ISR nest level
+ * | | o Switch stack
+ * | | o Decrement thread dispatch
+ * | | disable level
+ * | | o Test lock
+ * | | o May do a context switch
+ * | |
+ * +---->+
+ * |
+ * | o Restore MSR if necessary
+ * | o Restore volatile registers
+ * | o Restore frame pointer
+ * | o Return
+ * |
+ * +
+ */
+ .macro WRAP _FLVR _PRI _SRR0 _SRR1 _RFI
+
+ .global ppc_exc_wrap_\_FLVR
+ppc_exc_wrap_\_FLVR:
+
+ /* Push exception frame */
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+
+ .global ppc_exc_wrap_nopush_\_FLVR
+ppc_exc_wrap_nopush_\_FLVR:
+
+ /* Save frame register */
+ stw FRAME_REGISTER, FRAME_OFFSET(r1)
+
+wrap_no_save_frame_register_\_FLVR:
+
+ /*
+ * We save at first only some scratch registers
+ * and the CR. We use a non-volatile register
+ * for the exception frame pointer (= FRAME_REGISTER).
+ */
+
+ /* Move frame address in non-volatile FRAME_REGISTER */
+ mr FRAME_REGISTER, r1
+
+ /* Save scratch registers */
+ stw SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(FRAME_REGISTER)
+ stw SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(FRAME_REGISTER)
+ stw SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(FRAME_REGISTER)
+
+ /* Save CR */
+ mfcr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_CR_OFFSET(FRAME_REGISTER)
+
+ /* Check exception type and remember it in non-volatile CR_TYPE */
+ cmpwi CR_TYPE, VECTOR_REGISTER, 0
+
+#if defined(PPC_MULTILIB_FPU) || defined(PPC_MULTILIB_ALTIVEC)
+ /* Enable FPU and/or AltiVec */
+ mfmsr SCRATCH_REGISTER_0
+#ifdef PPC_MULTILIB_FPU
+ ori SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, MSR_FP
+#endif
+#ifdef PPC_MULTILIB_ALTIVEC
+ oris SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, MSR_VE >> 16
+#endif
+ mtmsr SCRATCH_REGISTER_0
+ isync
+#endif
+
+ /*
+ * Depending on the exception type we do now save the non-volatile
+ * registers or disable thread dispatching and switch to the ISR stack.
+ */
+
+ /* Branch for synchronous exceptions */
+ bge CR_TYPE, wrap_save_non_volatile_regs_\_FLVR
+
+ /*
+ * Increment the thread dispatch disable level in case a higher
+ * priority exception occurs we don't want it to run the scheduler. It
+ * is safe to increment this without disabling higher priority
+ * exceptions since those will see that we wrote the lock anyways.
+ */
+
+ /* Increment ISR nest level and thread dispatch disable level */
+ GET_SELF_CPU_CONTROL SCRATCH_REGISTER_2
+ lwz SCRATCH_REGISTER_0, PER_CPU_ISR_NEST_LEVEL@l(SCRATCH_REGISTER_2)
+ lwz SCRATCH_REGISTER_1, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_REGISTER_2)
+ addi SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, 1
+ addi SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, 1
+ stw SCRATCH_REGISTER_0, PER_CPU_ISR_NEST_LEVEL@l(SCRATCH_REGISTER_2)
+ stw SCRATCH_REGISTER_1, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_REGISTER_2)
+
+ /*
+ * No higher-priority exception occurring after this point
+ * can cause a context switch.
+ */
+
+ /* Clear lock */
+ li SCRATCH_REGISTER_0, 0
+ stw SCRATCH_REGISTER_0, ppc_exc_lock_\_PRI@sdarel(r13)
+
+ /* Switch stack if necessary */
+ mfspr SCRATCH_REGISTER_0, SPRG1
+ cmpw SCRATCH_REGISTER_0, r1
+ blt wrap_stack_switch_\_FLVR
+ mfspr SCRATCH_REGISTER_1, SPRG2
+ cmpw SCRATCH_REGISTER_1, r1
+ blt wrap_stack_switch_done_\_FLVR
+
+wrap_stack_switch_\_FLVR:
+
+ mr r1, SCRATCH_REGISTER_0
+
+wrap_stack_switch_done_\_FLVR:
+
+ /*
+ * Load the pristine VECTOR_REGISTER from a special location for
+ * asynchronous exceptions. The synchronous exceptions save the
+ * VECTOR_REGISTER in their minimal prologue.
+ */
+ lwz SCRATCH_REGISTER_2, ppc_exc_vector_register_\_PRI@sdarel(r13)
+
+ /* Save pristine vector register */
+ stw SCRATCH_REGISTER_2, VECTOR_OFFSET(FRAME_REGISTER)
+
+wrap_disable_thread_dispatching_done_\_FLVR:
+
+ /*
+ * We now have SCRATCH_REGISTER_0, SCRATCH_REGISTER_1,
+ * SCRATCH_REGISTER_2 and CR available. VECTOR_REGISTER still holds
+ * the vector (and exception type). FRAME_REGISTER is a pointer to the
+ * exception frame (always on the stack of the interrupted context).
+ * r1 is the stack pointer, either on the task stack or on the ISR
+ * stack. CR_TYPE holds the exception type.
+ */
+
+ /* Save SRR0 */
+ mfspr SCRATCH_REGISTER_0, \_SRR0
+ stw SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(FRAME_REGISTER)
+
+ /* Save SRR1 */
+ mfspr SCRATCH_REGISTER_0, \_SRR1
+ stw SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(FRAME_REGISTER)
+
+ /* Save CTR */
+ mfctr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_CTR_OFFSET(FRAME_REGISTER)
+
+ /* Save XER */
+ mfxer SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_XER_OFFSET(FRAME_REGISTER)
+
+ /* Save LR */
+ mflr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_LR_OFFSET(FRAME_REGISTER)
+
+ /* Save volatile registers */
+ stw r0, GPR0_OFFSET(FRAME_REGISTER)
+ stw r3, GPR3_OFFSET(FRAME_REGISTER)
+ stw r8, GPR8_OFFSET(FRAME_REGISTER)
+ stw r9, GPR9_OFFSET(FRAME_REGISTER)
+ stw r10, GPR10_OFFSET(FRAME_REGISTER)
+ stw r11, GPR11_OFFSET(FRAME_REGISTER)
+ stw r12, GPR12_OFFSET(FRAME_REGISTER)
+
+ /* Save read-only small data area anchor (EABI) */
+ stw r2, GPR2_OFFSET(FRAME_REGISTER)
+
+ /* Save vector number and exception type */
+ stw VECTOR_REGISTER, EXCEPTION_NUMBER_OFFSET(FRAME_REGISTER)
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Load MSR bit mask */
+ lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
+
+ /*
+ * Change the MSR if necessary (MMU, RI),
+ * remember decision in non-volatile CR_MSR
+ */
+ cmpwi CR_MSR, SCRATCH_REGISTER_0, 0
+ bne CR_MSR, wrap_change_msr_\_FLVR
+
+wrap_change_msr_done_\_FLVR:
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
+ LA SCRATCH_REGISTER_0, _CPU_save_altivec_volatile
+ mtctr SCRATCH_REGISTER_0
+ addi r3, FRAME_REGISTER, EXC_VEC_OFFSET
+ bctrl
+ /*
+ * Establish defaults for vrsave and vscr
+ */
+ li SCRATCH_REGISTER_0, 0
+ mtvrsave SCRATCH_REGISTER_0
+ /*
+ * Use java/c9x mode; clear saturation bit
+ */
+ vxor 0, 0, 0
+ mtvscr 0
+ /*
+ * Reload VECTOR_REGISTER
+ */
+ lwz VECTOR_REGISTER, EXCEPTION_NUMBER_OFFSET(FRAME_REGISTER)
+#endif
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(0)
+ stvx v0, FRAME_REGISTER, SCRATCH_REGISTER_0
+ mfvscr v0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(1)
+ stvx v1, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(2)
+ stvx v2, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(3)
+ stvx v3, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(4)
+ stvx v4, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(5)
+ stvx v5, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(6)
+ stvx v6, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(7)
+ stvx v7, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(8)
+ stvx v8, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(9)
+ stvx v9, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(0)
+ stvx v10, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(11)
+ stvx v11, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(12)
+ stvx v12, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(13)
+ stvx v13, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(14)
+ stvx v14, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(15)
+ stvx v15, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(16)
+ stvx v16, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(17)
+ stvx v17, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(18)
+ stvx v18, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(19)
+ stvx v19, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VSCR_OFFSET
+ stvewx v0, r1, SCRATCH_REGISTER_0
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ stfd f0, PPC_EXC_FR_OFFSET(0)(FRAME_REGISTER)
+ mffs f0
+ stfd f1, PPC_EXC_FR_OFFSET(1)(FRAME_REGISTER)
+ stfd f2, PPC_EXC_FR_OFFSET(2)(FRAME_REGISTER)
+ stfd f3, PPC_EXC_FR_OFFSET(3)(FRAME_REGISTER)
+ stfd f4, PPC_EXC_FR_OFFSET(4)(FRAME_REGISTER)
+ stfd f5, PPC_EXC_FR_OFFSET(5)(FRAME_REGISTER)
+ stfd f6, PPC_EXC_FR_OFFSET(6)(FRAME_REGISTER)
+ stfd f7, PPC_EXC_FR_OFFSET(7)(FRAME_REGISTER)
+ stfd f8, PPC_EXC_FR_OFFSET(8)(FRAME_REGISTER)
+ stfd f9, PPC_EXC_FR_OFFSET(9)(FRAME_REGISTER)
+ stfd f10, PPC_EXC_FR_OFFSET(10)(FRAME_REGISTER)
+ stfd f11, PPC_EXC_FR_OFFSET(11)(FRAME_REGISTER)
+ stfd f12, PPC_EXC_FR_OFFSET(12)(FRAME_REGISTER)
+ stfd f13, PPC_EXC_FR_OFFSET(13)(FRAME_REGISTER)
+ stfd f0, PPC_EXC_FPSCR_OFFSET(FRAME_REGISTER)
+#endif
+
+ /*
+ * Call high level exception handler
+ */
+
+ /*
+ * Get the handler table index from the vector number. We have to
+ * discard the exception type. Take only the least significant five
+ * bits (= LAST_VALID_EXC + 1) from the vector register. Multiply by
+ * four (= size of function pointer).
+ */
+ rlwinm SCRATCH_REGISTER_1, VECTOR_REGISTER, 2, 25, 29
+
+ /* Load handler table address */
+ LA SCRATCH_REGISTER_0, ppc_exc_handler_table
+
+ /* Load handler address */
+ lwzx SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
+
+ /*
+ * First parameter = exception frame pointer + FRAME_LINK_SPACE
+ *
+ * We add FRAME_LINK_SPACE to the frame pointer because the high level
+ * handler expects a BSP_Exception_frame structure.
+ */
+ addi r3, FRAME_REGISTER, FRAME_LINK_SPACE
+
+ /*
+ * Second parameter = vector number (r4 is the VECTOR_REGISTER)
+ *
+ * Discard the exception type and store the vector number
+ * in the vector register. Take only the least significant
+ * five bits (= LAST_VALID_EXC + 1).
+ */
+ rlwinm VECTOR_REGISTER, VECTOR_REGISTER, 0, 27, 31
+
+ /* Call handler */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+ /* Check return value and call global handler if necessary */
+ cmpwi r3, 0
+ bne wrap_call_global_handler_\_FLVR
+
+wrap_handler_done_\_FLVR:
+
+ /* Check if exception is recoverable */
+ RECOVER_CHECK_\_PRI _FLVR=\_FLVR
+
+ /*
+ * Depending on the exception type we do now restore the non-volatile
+ * registers or enable thread dispatching and switch back from the ISR
+ * stack.
+ */
+
+ /* Branch for synchronous exceptions */
+ bge CR_TYPE, wrap_restore_non_volatile_regs_\_FLVR
+
+ /*
+ * Switch back to original stack (FRAME_REGISTER == r1 if we are still
+ * on the IRQ stack).
+ */
+ mr r1, FRAME_REGISTER
+
+ /*
+ * Check thread dispatch disable level AND lower priority locks (in
+ * CR_LOCK): ONLY if the thread dispatch disable level == 0 AND no lock
+ * is set then call ppc_exc_wrapup() which may do a context switch. We
+ * can skip TEST_LOCK, because it has no side effects.
+ */
+
+ /* Decrement ISR nest level and thread dispatch disable level */
+ GET_SELF_CPU_CONTROL SCRATCH_REGISTER_2
+ lwz SCRATCH_REGISTER_0, PER_CPU_ISR_NEST_LEVEL@l(SCRATCH_REGISTER_2)
+ lwz SCRATCH_REGISTER_1, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_REGISTER_2)
+ subi SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, 1
+ subic. SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, 1
+ stw SCRATCH_REGISTER_0, PER_CPU_ISR_NEST_LEVEL@l(SCRATCH_REGISTER_2)
+ stw SCRATCH_REGISTER_1, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_REGISTER_2)
+
+ /* Branch to skip thread dispatching */
+ bne wrap_thread_dispatching_done_\_FLVR
+
+ /* Test lower-priority locks (result in non-volatile CR_LOCK) */
+ TEST_LOCK_\_PRI _FLVR=\_FLVR
+
+ /* Branch to skip thread dispatching */
+ bne CR_LOCK, wrap_thread_dispatching_done_\_FLVR
+
+ /* Load address of ppc_exc_wrapup() */
+ LA SCRATCH_REGISTER_0, ppc_exc_wrapup
+
+ /* First parameter = exception frame pointer + FRAME_LINK_SPACE */
+ addi r3, FRAME_REGISTER, FRAME_LINK_SPACE
+
+ /* Call ppc_exc_wrapup() */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+wrap_thread_dispatching_done_\_FLVR:
+
+#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
+ LA SCRATCH_REGISTER_0, _CPU_load_altivec_volatile
+ mtctr SCRATCH_REGISTER_0
+ addi r3, FRAME_REGISTER, EXC_VEC_OFFSET
+ bctrl
+#endif
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li SCRATCH_REGISTER_0, PPC_EXC_MIN_VSCR_OFFSET
+ lvewx v0, r1, SCRATCH_REGISTER_0
+ mtvscr v0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(0)
+ lvx v0, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(1)
+ lvx v1, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(2)
+ lvx v2, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(3)
+ lvx v3, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(4)
+ lvx v4, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(5)
+ lvx v5, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(6)
+ lvx v6, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(7)
+ lvx v7, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(8)
+ lvx v8, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(9)
+ lvx v9, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(0)
+ lvx v10, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(11)
+ lvx v11, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(12)
+ lvx v12, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(13)
+ lvx v13, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(14)
+ lvx v14, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(15)
+ lvx v15, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(16)
+ lvx v16, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(17)
+ lvx v17, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(18)
+ lvx v18, FRAME_REGISTER, SCRATCH_REGISTER_0
+ li SCRATCH_REGISTER_0, PPC_EXC_VR_OFFSET(19)
+ lvx v19, FRAME_REGISTER, SCRATCH_REGISTER_0
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ lfd f0, PPC_EXC_FPSCR_OFFSET(FRAME_REGISTER)
+ mtfsf 0xff, f0
+ lfd f0, PPC_EXC_FR_OFFSET(0)(FRAME_REGISTER)
+ lfd f1, PPC_EXC_FR_OFFSET(1)(FRAME_REGISTER)
+ lfd f2, PPC_EXC_FR_OFFSET(2)(FRAME_REGISTER)
+ lfd f3, PPC_EXC_FR_OFFSET(3)(FRAME_REGISTER)
+ lfd f4, PPC_EXC_FR_OFFSET(4)(FRAME_REGISTER)
+ lfd f5, PPC_EXC_FR_OFFSET(5)(FRAME_REGISTER)
+ lfd f6, PPC_EXC_FR_OFFSET(6)(FRAME_REGISTER)
+ lfd f7, PPC_EXC_FR_OFFSET(7)(FRAME_REGISTER)
+ lfd f8, PPC_EXC_FR_OFFSET(8)(FRAME_REGISTER)
+ lfd f9, PPC_EXC_FR_OFFSET(9)(FRAME_REGISTER)
+ lfd f10, PPC_EXC_FR_OFFSET(10)(FRAME_REGISTER)
+ lfd f11, PPC_EXC_FR_OFFSET(11)(FRAME_REGISTER)
+ lfd f12, PPC_EXC_FR_OFFSET(12)(FRAME_REGISTER)
+ lfd f13, PPC_EXC_FR_OFFSET(13)(FRAME_REGISTER)
+#endif
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Restore MSR? */
+ bne CR_MSR, wrap_restore_msr_\_FLVR
+
+wrap_restore_msr_done_\_FLVR:
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+ /*
+ * At this point r1 is a valid exception frame pointer and
+ * FRAME_REGISTER is no longer needed.
+ */
+
+ /* Restore frame register */
+ lwz FRAME_REGISTER, FRAME_OFFSET(r1)
+
+ /* Restore XER and CTR */
+ lwz SCRATCH_REGISTER_0, EXC_XER_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, EXC_CTR_OFFSET(r1)
+ mtxer SCRATCH_REGISTER_0
+ mtctr SCRATCH_REGISTER_1
+
+ /* Restore CR and LR */
+ lwz SCRATCH_REGISTER_0, EXC_CR_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, EXC_LR_OFFSET(r1)
+ mtcr SCRATCH_REGISTER_0
+ mtlr SCRATCH_REGISTER_1
+
+ /* Restore volatile registers */
+ lwz r0, GPR0_OFFSET(r1)
+ lwz r3, GPR3_OFFSET(r1)
+ lwz r8, GPR8_OFFSET(r1)
+ lwz r9, GPR9_OFFSET(r1)
+ lwz r10, GPR10_OFFSET(r1)
+ lwz r11, GPR11_OFFSET(r1)
+ lwz r12, GPR12_OFFSET(r1)
+
+ /* Restore read-only small data area anchor (EABI) */
+ lwz r2, GPR2_OFFSET(r1)
+
+ /* Restore vector register */
+ lwz VECTOR_REGISTER, VECTOR_OFFSET(r1)
+
+ /*
+ * Disable all asynchronous exceptions which can do a thread dispatch.
+ * See README.
+ */
+ INTERRUPT_DISABLE SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+
+ /* Restore scratch registers and SRRs */
+ lwz SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+ lwz SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(r1)
+ mtspr \_SRR0, SCRATCH_REGISTER_0
+ lwz SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(r1)
+ mtspr \_SRR1, SCRATCH_REGISTER_1
+ lwz SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(r1)
+
+ /*
+ * We restore r1 from the frame rather than just popping (adding to
+ * current r1) since the exception handler might have done strange
+ * things (e.g. a debugger moving and relocating the stack).
+ */
+ lwz r1, 0(r1)
+
+ /* Return */
+ \_RFI
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+wrap_change_msr_\_FLVR:
+
+ mfmsr SCRATCH_REGISTER_1
+ or SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ mtmsr SCRATCH_REGISTER_1
+ msync
+ isync
+ b wrap_change_msr_done_\_FLVR
+
+wrap_restore_msr_\_FLVR:
+
+ lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
+ mfmsr SCRATCH_REGISTER_1
+ andc SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ mtmsr SCRATCH_REGISTER_1
+ msync
+ isync
+ b wrap_restore_msr_done_\_FLVR
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+wrap_save_non_volatile_regs_\_FLVR:
+
+ /* Load pristine stack pointer */
+ lwz SCRATCH_REGISTER_1, 0(FRAME_REGISTER)
+
+ /* Save small data area anchor (SYSV) */
+ stw r13, GPR13_OFFSET(FRAME_REGISTER)
+
+ /* Save pristine stack pointer */
+ stw SCRATCH_REGISTER_1, GPR1_OFFSET(FRAME_REGISTER)
+
+ /* r14 is the FRAME_REGISTER and will be saved elsewhere */
+
+ /* Save non-volatile registers r15 .. r31 */
+#ifndef __SPE__
+ stmw r15, GPR15_OFFSET(FRAME_REGISTER)
+#else
+ stw r15, GPR15_OFFSET(FRAME_REGISTER)
+ stw r16, GPR16_OFFSET(FRAME_REGISTER)
+ stw r17, GPR17_OFFSET(FRAME_REGISTER)
+ stw r18, GPR18_OFFSET(FRAME_REGISTER)
+ stw r19, GPR19_OFFSET(FRAME_REGISTER)
+ stw r20, GPR20_OFFSET(FRAME_REGISTER)
+ stw r21, GPR21_OFFSET(FRAME_REGISTER)
+ stw r22, GPR22_OFFSET(FRAME_REGISTER)
+ stw r23, GPR23_OFFSET(FRAME_REGISTER)
+ stw r24, GPR24_OFFSET(FRAME_REGISTER)
+ stw r25, GPR25_OFFSET(FRAME_REGISTER)
+ stw r26, GPR26_OFFSET(FRAME_REGISTER)
+ stw r27, GPR27_OFFSET(FRAME_REGISTER)
+ stw r28, GPR28_OFFSET(FRAME_REGISTER)
+ stw r29, GPR29_OFFSET(FRAME_REGISTER)
+ stw r30, GPR30_OFFSET(FRAME_REGISTER)
+ stw r31, GPR31_OFFSET(FRAME_REGISTER)
+#endif
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(20)
+ stvx v20, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(21)
+ stvx v21, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(22)
+ stvx v22, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(23)
+ stvx v23, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(24)
+ stvx v24, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(25)
+ stvx v25, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(26)
+ stvx v26, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(27)
+ stvx v27, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(28)
+ stvx v28, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(29)
+ stvx v29, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(30)
+ stvx v30, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(31)
+ stvx v31, FRAME_REGISTER, SCRATCH_REGISTER_1
+ mfvrsave SCRATCH_REGISTER_1
+ stw SCRATCH_REGISTER_1, PPC_EXC_VRSAVE_OFFSET(FRAME_REGISTER)
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ stfd f14, PPC_EXC_FR_OFFSET(14)(FRAME_REGISTER)
+ stfd f15, PPC_EXC_FR_OFFSET(15)(FRAME_REGISTER)
+ stfd f16, PPC_EXC_FR_OFFSET(16)(FRAME_REGISTER)
+ stfd f17, PPC_EXC_FR_OFFSET(17)(FRAME_REGISTER)
+ stfd f18, PPC_EXC_FR_OFFSET(18)(FRAME_REGISTER)
+ stfd f19, PPC_EXC_FR_OFFSET(19)(FRAME_REGISTER)
+ stfd f20, PPC_EXC_FR_OFFSET(20)(FRAME_REGISTER)
+ stfd f21, PPC_EXC_FR_OFFSET(21)(FRAME_REGISTER)
+ stfd f22, PPC_EXC_FR_OFFSET(22)(FRAME_REGISTER)
+ stfd f23, PPC_EXC_FR_OFFSET(23)(FRAME_REGISTER)
+ stfd f24, PPC_EXC_FR_OFFSET(24)(FRAME_REGISTER)
+ stfd f25, PPC_EXC_FR_OFFSET(25)(FRAME_REGISTER)
+ stfd f26, PPC_EXC_FR_OFFSET(26)(FRAME_REGISTER)
+ stfd f27, PPC_EXC_FR_OFFSET(27)(FRAME_REGISTER)
+ stfd f28, PPC_EXC_FR_OFFSET(28)(FRAME_REGISTER)
+ stfd f29, PPC_EXC_FR_OFFSET(29)(FRAME_REGISTER)
+ stfd f30, PPC_EXC_FR_OFFSET(30)(FRAME_REGISTER)
+ stfd f31, PPC_EXC_FR_OFFSET(31)(FRAME_REGISTER)
+#endif
+
+ b wrap_disable_thread_dispatching_done_\_FLVR
+
+wrap_restore_non_volatile_regs_\_FLVR:
+
+ /* Load stack pointer */
+ lwz SCRATCH_REGISTER_0, GPR1_OFFSET(r1)
+
+ /* Restore small data area anchor (SYSV) */
+ lwz r13, GPR13_OFFSET(r1)
+
+ /* r14 is the FRAME_REGISTER and will be restored elsewhere */
+
+ /* Restore non-volatile registers r15 .. r31 */
+#ifndef __SPE__
+ lmw r15, GPR15_OFFSET(r1)
+#else
+ lwz r15, GPR15_OFFSET(FRAME_REGISTER)
+ lwz r16, GPR16_OFFSET(FRAME_REGISTER)
+ lwz r17, GPR17_OFFSET(FRAME_REGISTER)
+ lwz r18, GPR18_OFFSET(FRAME_REGISTER)
+ lwz r19, GPR19_OFFSET(FRAME_REGISTER)
+ lwz r20, GPR20_OFFSET(FRAME_REGISTER)
+ lwz r21, GPR21_OFFSET(FRAME_REGISTER)
+ lwz r22, GPR22_OFFSET(FRAME_REGISTER)
+ lwz r23, GPR23_OFFSET(FRAME_REGISTER)
+ lwz r24, GPR24_OFFSET(FRAME_REGISTER)
+ lwz r25, GPR25_OFFSET(FRAME_REGISTER)
+ lwz r26, GPR26_OFFSET(FRAME_REGISTER)
+ lwz r27, GPR27_OFFSET(FRAME_REGISTER)
+ lwz r28, GPR28_OFFSET(FRAME_REGISTER)
+ lwz r29, GPR29_OFFSET(FRAME_REGISTER)
+ lwz r30, GPR30_OFFSET(FRAME_REGISTER)
+ lwz r31, GPR31_OFFSET(FRAME_REGISTER)
+#endif
+
+ /* Restore stack pointer */
+ stw SCRATCH_REGISTER_0, 0(r1)
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(20)
+ lvx v20, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(21)
+ lvx v21, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(22)
+ lvx v22, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(23)
+ lvx v23, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(24)
+ lvx v24, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(25)
+ lvx v25, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(26)
+ lvx v26, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(27)
+ lvx v27, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(28)
+ lvx v28, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(29)
+ lvx v29, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(30)
+ lvx v30, FRAME_REGISTER, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(31)
+ lvx v31, FRAME_REGISTER, SCRATCH_REGISTER_1
+ lwz SCRATCH_REGISTER_1, PPC_EXC_VRSAVE_OFFSET(FRAME_REGISTER)
+ mtvrsave SCRATCH_REGISTER_1
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ lfd f14, PPC_EXC_FR_OFFSET(14)(FRAME_REGISTER)
+ lfd f15, PPC_EXC_FR_OFFSET(15)(FRAME_REGISTER)
+ lfd f16, PPC_EXC_FR_OFFSET(16)(FRAME_REGISTER)
+ lfd f17, PPC_EXC_FR_OFFSET(17)(FRAME_REGISTER)
+ lfd f18, PPC_EXC_FR_OFFSET(18)(FRAME_REGISTER)
+ lfd f19, PPC_EXC_FR_OFFSET(19)(FRAME_REGISTER)
+ lfd f20, PPC_EXC_FR_OFFSET(20)(FRAME_REGISTER)
+ lfd f21, PPC_EXC_FR_OFFSET(21)(FRAME_REGISTER)
+ lfd f22, PPC_EXC_FR_OFFSET(22)(FRAME_REGISTER)
+ lfd f23, PPC_EXC_FR_OFFSET(23)(FRAME_REGISTER)
+ lfd f24, PPC_EXC_FR_OFFSET(24)(FRAME_REGISTER)
+ lfd f25, PPC_EXC_FR_OFFSET(25)(FRAME_REGISTER)
+ lfd f26, PPC_EXC_FR_OFFSET(26)(FRAME_REGISTER)
+ lfd f27, PPC_EXC_FR_OFFSET(27)(FRAME_REGISTER)
+ lfd f28, PPC_EXC_FR_OFFSET(28)(FRAME_REGISTER)
+ lfd f29, PPC_EXC_FR_OFFSET(29)(FRAME_REGISTER)
+ lfd f30, PPC_EXC_FR_OFFSET(30)(FRAME_REGISTER)
+ lfd f31, PPC_EXC_FR_OFFSET(31)(FRAME_REGISTER)
+#endif
+
+ b wrap_thread_dispatching_done_\_FLVR
+
+wrap_call_global_handler_\_FLVR:
+
+ /* First parameter = exception frame pointer + FRAME_LINK_SPACE */
+ addi r3, FRAME_REGISTER, FRAME_LINK_SPACE
+
+#ifndef PPC_EXC_CONFIG_USE_FIXED_HANDLER
+
+ /* Load global handler address */
+ LW SCRATCH_REGISTER_0, globalExceptHdl
+
+ /* Check address */
+ cmpwi SCRATCH_REGISTER_0, 0
+ beq wrap_handler_done_\_FLVR
+
+ /* Call global handler */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+#else /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
+
+ /* Call fixed global handler */
+ bl C_exception_handler
+
+#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
+
+ b wrap_handler_done_\_FLVR
+
+ .endm
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_async_normal.S b/bsps/powerpc/shared/exceptions/ppc_exc_async_normal.S
new file mode 100644
index 0000000000..4b318e5e16
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_async_normal.S
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2011, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bspopts.h>
+#include <rtems/score/percpu.h>
+#include <bsp/vectors.h>
+
+#ifdef PPC_EXC_CONFIG_USE_FIXED_HANDLER
+
+#define SCRATCH_0_REGISTER r0
+#define SCRATCH_1_REGISTER r3
+#define SCRATCH_2_REGISTER r4
+#define SCRATCH_3_REGISTER r5
+#define SCRATCH_4_REGISTER r6
+#define SCRATCH_5_REGISTER r7
+#define SCRATCH_6_REGISTER r8
+#define SCRATCH_7_REGISTER r9
+#define SCRATCH_8_REGISTER r10
+#define SCRATCH_9_REGISTER r11
+#define SCRATCH_10_REGISTER r12
+#define FRAME_REGISTER r14
+
+#define SCRATCH_0_OFFSET GPR0_OFFSET
+#define SCRATCH_1_OFFSET GPR3_OFFSET
+#define SCRATCH_2_OFFSET GPR4_OFFSET
+#define SCRATCH_3_OFFSET GPR5_OFFSET
+#define SCRATCH_4_OFFSET GPR6_OFFSET
+#define SCRATCH_5_OFFSET GPR7_OFFSET
+#define SCRATCH_6_OFFSET GPR8_OFFSET
+#define SCRATCH_7_OFFSET GPR9_OFFSET
+#define SCRATCH_8_OFFSET GPR10_OFFSET
+#define SCRATCH_9_OFFSET GPR11_OFFSET
+#define SCRATCH_10_OFFSET GPR12_OFFSET
+#define FRAME_OFFSET PPC_EXC_INTERRUPT_FRAME_OFFSET
+
+#ifdef RTEMS_PROFILING
+.macro GET_TIME_BASE REG
+#if defined(__PPC_CPU_E6500__)
+ mfspr \REG, FSL_EIS_ATBL
+#elif defined(ppc8540)
+ mfspr \REG, TBRL
+#else /* ppc8540 */
+ mftb \REG
+#endif /* ppc8540 */
+.endm
+#endif /* RTEMS_PROFILING */
+
+ .global ppc_exc_min_prolog_async_tmpl_normal
+ .global ppc_exc_interrupt
+
+ppc_exc_min_prolog_async_tmpl_normal:
+
+ stwu r1, -PPC_EXC_INTERRUPT_FRAME_SIZE(r1)
+ PPC_REG_STORE SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
+ li SCRATCH_1_REGISTER, 0xffff8000
+
+ /*
+ * We store the absolute branch target address here. It will be used
+ * to generate the branch operation in ppc_exc_make_prologue().
+ */
+ .int ppc_exc_interrupt
+
+ppc_exc_interrupt:
+
+ /* Save non-volatile FRAME_REGISTER */
+ PPC_REG_STORE FRAME_REGISTER, FRAME_OFFSET(r1)
+
+#ifdef RTEMS_PROFILING
+ /* Get entry instant */
+ GET_TIME_BASE FRAME_REGISTER
+ stw FRAME_REGISTER, PPC_EXC_INTERRUPT_ENTRY_INSTANT_OFFSET(r1)
+#endif /* RTEMS_PROFILING */
+
+#ifdef __SPE__
+ /* Enable SPE */
+ mfmsr FRAME_REGISTER
+ oris FRAME_REGISTER, FRAME_REGISTER, MSR_SPE >> 16
+ mtmsr FRAME_REGISTER
+ isync
+
+ /*
+ * Save high order part of SCRATCH_1_REGISTER here. The low order part
+ * was saved in the minimal prologue.
+ */
+ evmergehi SCRATCH_1_REGISTER, SCRATCH_1_REGISTER, FRAME_REGISTER
+ PPC_REG_STORE FRAME_REGISTER, GPR3_OFFSET(r1)
+#endif
+
+#if defined(PPC_MULTILIB_FPU) || defined(PPC_MULTILIB_ALTIVEC)
+ /* Enable FPU and/or AltiVec */
+ mfmsr FRAME_REGISTER
+#ifdef PPC_MULTILIB_FPU
+ ori FRAME_REGISTER, FRAME_REGISTER, MSR_FP
+#endif
+#ifdef PPC_MULTILIB_ALTIVEC
+ oris FRAME_REGISTER, FRAME_REGISTER, MSR_VE >> 16
+#endif
+ mtmsr FRAME_REGISTER
+ isync
+#endif
+
+ /* Move frame pointer to non-volatile FRAME_REGISTER */
+ mr FRAME_REGISTER, r1
+
+ /*
+ * Save volatile registers. The SCRATCH_1_REGISTER has been saved in
+ * minimum prologue.
+ */
+ PPC_GPR_STORE SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
+#ifdef __powerpc64__
+ PPC_GPR_STORE r2, GPR2_OFFSET(r1)
+ LA32 r2, .TOC.
+#endif
+ PPC_GPR_STORE SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
+ GET_SELF_CPU_CONTROL SCRATCH_2_REGISTER
+ PPC_GPR_STORE SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_6_REGISTER, SCRATCH_6_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_7_REGISTER, SCRATCH_7_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_8_REGISTER, SCRATCH_8_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_9_REGISTER, SCRATCH_9_OFFSET(r1)
+ PPC_GPR_STORE SCRATCH_10_REGISTER, SCRATCH_10_OFFSET(r1)
+
+ /* Load ISR nest level and thread dispatch disable level */
+ lwz SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_2_REGISTER)
+ lwz SCRATCH_4_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_2_REGISTER)
+
+ /* Save SRR0, SRR1, CR, XER, CTR, and LR */
+ mfsrr0 SCRATCH_0_REGISTER
+ mfsrr1 SCRATCH_5_REGISTER
+ mfcr SCRATCH_6_REGISTER
+ mfxer SCRATCH_7_REGISTER
+ mfctr SCRATCH_8_REGISTER
+ mflr SCRATCH_9_REGISTER
+ PPC_REG_STORE SCRATCH_0_REGISTER, SRR0_FRAME_OFFSET(r1)
+ PPC_REG_STORE SCRATCH_5_REGISTER, SRR1_FRAME_OFFSET(r1)
+ stw SCRATCH_6_REGISTER, EXC_CR_OFFSET(r1)
+ stw SCRATCH_7_REGISTER, EXC_XER_OFFSET(r1)
+ PPC_REG_STORE SCRATCH_8_REGISTER, EXC_CTR_OFFSET(r1)
+ PPC_REG_STORE SCRATCH_9_REGISTER, EXC_LR_OFFSET(r1)
+
+#ifdef __SPE__
+ /* Save SPEFSCR and ACC */
+ mfspr SCRATCH_0_REGISTER, FSL_EIS_SPEFSCR
+ evxor SCRATCH_5_REGISTER, SCRATCH_5_REGISTER, SCRATCH_5_REGISTER
+ evmwumiaa SCRATCH_5_REGISTER, SCRATCH_5_REGISTER, SCRATCH_5_REGISTER
+ stw SCRATCH_0_REGISTER, PPC_EXC_SPEFSCR_OFFSET(r1)
+ evstdd SCRATCH_5_REGISTER, PPC_EXC_ACC_OFFSET(r1)
+#endif
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ /* Save volatile AltiVec context */
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(0)
+ stvx v0, r1, SCRATCH_0_REGISTER
+ mfvscr v0
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(1)
+ stvx v1, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(2)
+ stvx v2, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(3)
+ stvx v3, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(4)
+ stvx v4, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(5)
+ stvx v5, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(6)
+ stvx v6, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(7)
+ stvx v7, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(8)
+ stvx v8, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(9)
+ stvx v9, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(10)
+ stvx v10, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(11)
+ stvx v11, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(12)
+ stvx v12, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(13)
+ stvx v13, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(14)
+ stvx v14, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(15)
+ stvx v15, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(16)
+ stvx v16, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(17)
+ stvx v17, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(18)
+ stvx v18, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(19)
+ stvx v19, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VSCR_OFFSET
+ stvewx v0, r1, SCRATCH_0_REGISTER
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ /* Save volatile FPU context */
+ stfd f0, PPC_EXC_MIN_FR_OFFSET(0)(r1)
+ mffs f0
+ stfd f1, PPC_EXC_MIN_FR_OFFSET(1)(r1)
+ stfd f2, PPC_EXC_MIN_FR_OFFSET(2)(r1)
+ stfd f3, PPC_EXC_MIN_FR_OFFSET(3)(r1)
+ stfd f4, PPC_EXC_MIN_FR_OFFSET(4)(r1)
+ stfd f5, PPC_EXC_MIN_FR_OFFSET(5)(r1)
+ stfd f6, PPC_EXC_MIN_FR_OFFSET(6)(r1)
+ stfd f7, PPC_EXC_MIN_FR_OFFSET(7)(r1)
+ stfd f8, PPC_EXC_MIN_FR_OFFSET(8)(r1)
+ stfd f9, PPC_EXC_MIN_FR_OFFSET(9)(r1)
+ stfd f10, PPC_EXC_MIN_FR_OFFSET(10)(r1)
+ stfd f11, PPC_EXC_MIN_FR_OFFSET(11)(r1)
+ stfd f12, PPC_EXC_MIN_FR_OFFSET(12)(r1)
+ stfd f13, PPC_EXC_MIN_FR_OFFSET(13)(r1)
+ stfd f0, PPC_EXC_MIN_FPSCR_OFFSET(r1)
+#endif
+
+ /* Increment ISR nest level and thread dispatch disable level */
+ cmpwi SCRATCH_3_REGISTER, 0
+#ifdef RTEMS_PROFILING
+ cmpwi cr2, SCRATCH_3_REGISTER, 0
+#endif
+ addi SCRATCH_3_REGISTER, SCRATCH_3_REGISTER, 1
+ addi SCRATCH_4_REGISTER, SCRATCH_4_REGISTER, 1
+ stw SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_2_REGISTER)
+ stw SCRATCH_4_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_2_REGISTER)
+
+ /* Switch stack if necessary */
+ mfspr SCRATCH_0_REGISTER, SPRG1
+ iselgt r1, r1, SCRATCH_0_REGISTER
+
+ /* Call fixed high level handler */
+ bl bsp_interrupt_dispatch
+ PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
+
+#ifdef RTEMS_PROFILING
+ /* Update profiling data if necessary */
+ bne cr2, .Lprofiling_done
+ GET_SELF_CPU_CONTROL r3
+ lwz r4, PPC_EXC_INTERRUPT_ENTRY_INSTANT_OFFSET(FRAME_REGISTER)
+ GET_TIME_BASE r5
+ bl _Profiling_Outer_most_interrupt_entry_and_exit
+ PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
+.Lprofiling_done:
+#endif /* RTEMS_PROFILING */
+
+ /* Load some per-CPU variables */
+ GET_SELF_CPU_CONTROL SCRATCH_1_REGISTER
+ lbz SCRATCH_0_REGISTER, PER_CPU_DISPATCH_NEEDED(SCRATCH_1_REGISTER)
+ lwz SCRATCH_5_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SCRATCH_1_REGISTER)
+ lwz SCRATCH_6_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_1_REGISTER)
+ lwz SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_1_REGISTER)
+
+ /*
+ * Switch back to original stack (FRAME_REGISTER == r1 if we are still
+ * on the IRQ stack) and restore FRAME_REGISTER.
+ */
+ mr r1, FRAME_REGISTER
+ PPC_REG_LOAD FRAME_REGISTER, FRAME_OFFSET(r1)
+
+ /* Decrement levels and determine thread dispatch state */
+ xori SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, 1
+ or SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, SCRATCH_5_REGISTER
+ subi SCRATCH_4_REGISTER, SCRATCH_6_REGISTER, 1
+ or. SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, SCRATCH_4_REGISTER
+ subi SCRATCH_3_REGISTER, SCRATCH_3_REGISTER, 1
+
+ /* Store thread dispatch disable and ISR nest levels */
+ stw SCRATCH_4_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_1_REGISTER)
+ stw SCRATCH_3_REGISTER, PER_CPU_ISR_NEST_LEVEL(SCRATCH_1_REGISTER)
+
+ /*
+ * Check thread dispatch necessary, ISR dispatch disable and thread
+ * dispatch disable level.
+ */
+ bne .Lthread_dispatch_done
+
+ /* Thread dispatch */
+.Ldo_thread_dispatch:
+
+ /* Set ISR dispatch disable and thread dispatch disable level to one */
+ li SCRATCH_0_REGISTER, 1
+ stw SCRATCH_0_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SCRATCH_1_REGISTER)
+ stw SCRATCH_0_REGISTER, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(SCRATCH_1_REGISTER)
+
+ /*
+ * Call _Thread_Do_dispatch(), this function will enable interrupts.
+ * The r3 is SCRATCH_1_REGISTER.
+ */
+ mfmsr r4
+ ori r4, r4, MSR_EE
+ bl _Thread_Do_dispatch
+ PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
+
+ /* Disable interrupts */
+ wrteei 0
+
+ /* SCRATCH_1_REGISTER is volatile, we must set it again */
+ GET_SELF_CPU_CONTROL SCRATCH_1_REGISTER
+
+ /* Check if we have to do the thread dispatch again */
+ lbz SCRATCH_0_REGISTER, PER_CPU_DISPATCH_NEEDED(SCRATCH_1_REGISTER)
+ cmpwi SCRATCH_0_REGISTER, 0
+ bne .Ldo_thread_dispatch
+
+ /* We are done with thread dispatching */
+ li SCRATCH_0_REGISTER, 0
+ stw SCRATCH_0_REGISTER, PER_CPU_ISR_DISPATCH_DISABLE(SCRATCH_1_REGISTER)
+
+.Lthread_dispatch_done:
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ /* Restore volatile AltiVec context */
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VSCR_OFFSET
+ lvewx v0, r1, SCRATCH_0_REGISTER
+ mtvscr v0
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(0)
+ lvx v0, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(1)
+ lvx v1, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(2)
+ lvx v2, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(3)
+ lvx v3, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(4)
+ lvx v4, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(5)
+ lvx v5, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(6)
+ lvx v6, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(7)
+ lvx v7, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(8)
+ lvx v8, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(9)
+ lvx v9, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(10)
+ lvx v10, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(11)
+ lvx v11, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(12)
+ lvx v12, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(13)
+ lvx v13, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(14)
+ lvx v14, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(15)
+ lvx v15, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(16)
+ lvx v16, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(17)
+ lvx v17, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(18)
+ lvx v18, r1, SCRATCH_0_REGISTER
+ li SCRATCH_0_REGISTER, PPC_EXC_MIN_VR_OFFSET(19)
+ lvx v19, r1, SCRATCH_0_REGISTER
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ /* Restore volatile FPU context */
+ lfd f0, PPC_EXC_MIN_FPSCR_OFFSET(r1)
+ mtfsf 0xff, f0
+ lfd f0, PPC_EXC_MIN_FR_OFFSET(0)(r1)
+ lfd f1, PPC_EXC_MIN_FR_OFFSET(1)(r1)
+ lfd f2, PPC_EXC_MIN_FR_OFFSET(2)(r1)
+ lfd f3, PPC_EXC_MIN_FR_OFFSET(3)(r1)
+ lfd f4, PPC_EXC_MIN_FR_OFFSET(4)(r1)
+ lfd f5, PPC_EXC_MIN_FR_OFFSET(5)(r1)
+ lfd f6, PPC_EXC_MIN_FR_OFFSET(6)(r1)
+ lfd f7, PPC_EXC_MIN_FR_OFFSET(7)(r1)
+ lfd f8, PPC_EXC_MIN_FR_OFFSET(8)(r1)
+ lfd f9, PPC_EXC_MIN_FR_OFFSET(9)(r1)
+ lfd f10, PPC_EXC_MIN_FR_OFFSET(10)(r1)
+ lfd f11, PPC_EXC_MIN_FR_OFFSET(11)(r1)
+ lfd f12, PPC_EXC_MIN_FR_OFFSET(12)(r1)
+ lfd f13, PPC_EXC_MIN_FR_OFFSET(13)(r1)
+#endif
+
+#ifdef __SPE__
+ /* Load SPEFSCR and ACC */
+ lwz SCRATCH_3_REGISTER, PPC_EXC_SPEFSCR_OFFSET(r1)
+ evldd SCRATCH_4_REGISTER, PPC_EXC_ACC_OFFSET(r1)
+#endif
+
+ /*
+ * We must clear reservations here, since otherwise compare-and-swap
+ * atomic operations with interrupts enabled may yield wrong results.
+ * A compare-and-swap atomic operation is generated by the compiler
+ * like this:
+ *
+ * .L1:
+ * lwarx r9, r0, r3
+ * cmpw r9, r4
+ * bne- .L2
+ * stwcx. r5, r0, r3
+ * bne- .L1
+ * .L2:
+ *
+ * Consider the following scenario. A thread is interrupted right
+ * before the stwcx. The interrupt updates the value using a
+ * compare-and-swap sequence. Everything is fine up to this point.
+ * The interrupt performs now a compare-and-swap sequence which fails
+ * with a branch to .L2. The current processor has now a reservation.
+ * The interrupt returns without further stwcx. The thread updates the
+ * value using the unrelated reservation of the interrupt.
+ */
+ li SCRATCH_0_REGISTER, FRAME_OFFSET
+ stwcx. SCRATCH_0_REGISTER, r1, SCRATCH_0_REGISTER
+
+ /* Load SRR0, SRR1, CR, XER, CTR, and LR */
+ PPC_REG_LOAD SCRATCH_5_REGISTER, SRR0_FRAME_OFFSET(r1)
+ PPC_REG_LOAD SCRATCH_6_REGISTER, SRR1_FRAME_OFFSET(r1)
+ lwz SCRATCH_7_REGISTER, EXC_CR_OFFSET(r1)
+ lwz SCRATCH_8_REGISTER, EXC_XER_OFFSET(r1)
+ PPC_REG_LOAD SCRATCH_9_REGISTER, EXC_CTR_OFFSET(r1)
+ PPC_REG_LOAD SCRATCH_10_REGISTER, EXC_LR_OFFSET(r1)
+
+ /* Restore volatile registers */
+ PPC_GPR_LOAD SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
+#ifdef __powerpc64__
+ PPC_GPR_LOAD r2, GPR2_OFFSET(r1)
+#endif
+ PPC_GPR_LOAD SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
+ PPC_GPR_LOAD SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
+
+#ifdef __SPE__
+ /* Restore SPEFSCR and ACC */
+ mtspr FSL_EIS_SPEFSCR, SCRATCH_3_REGISTER
+ evmra SCRATCH_4_REGISTER, SCRATCH_4_REGISTER
+#endif
+
+ /* Restore volatile registers */
+ PPC_GPR_LOAD SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
+ PPC_GPR_LOAD SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
+
+ /* Restore SRR0, SRR1, CR, CTR, XER, and LR plus volatile registers */
+ mtsrr0 SCRATCH_5_REGISTER
+ PPC_GPR_LOAD SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
+ mtsrr1 SCRATCH_6_REGISTER
+ PPC_GPR_LOAD SCRATCH_6_REGISTER, SCRATCH_6_OFFSET(r1)
+ mtcr SCRATCH_7_REGISTER
+ PPC_GPR_LOAD SCRATCH_7_REGISTER, SCRATCH_7_OFFSET(r1)
+ mtxer SCRATCH_8_REGISTER
+ PPC_GPR_LOAD SCRATCH_8_REGISTER, SCRATCH_8_OFFSET(r1)
+ mtctr SCRATCH_9_REGISTER
+ PPC_GPR_LOAD SCRATCH_9_REGISTER, SCRATCH_9_OFFSET(r1)
+ mtlr SCRATCH_10_REGISTER
+ PPC_GPR_LOAD SCRATCH_10_REGISTER, SCRATCH_10_OFFSET(r1)
+
+ /* Pop stack */
+ addi r1, r1, PPC_EXC_INTERRUPT_FRAME_SIZE
+
+ /* Return */
+ rfi
+
+/* Symbol provided for debugging and tracing */
+ppc_exc_interrupt_end:
+
+#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_categories.c b/bsps/powerpc/shared/exceptions/ppc_exc_categories.c
new file mode 100644
index 0000000000..46508abcdf
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_categories.c
@@ -0,0 +1,325 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * Copyright (C) 2009-2011 embedded brains GmbH.
+ *
+ * Enhanced by Jay Kulpinski <jskulpin@eng01.gdds.com>
+ * to support 603, 603e, 604, 604e exceptions
+ *
+ * Moved to "libcpu/powerpc/new-exceptions" and consolidated
+ * by Thomas Doerfler <Thomas.Doerfler@embedded-brains.de>
+ * to be common for all PPCs with new exceptions.
+ *
+ * Derived from file "libcpu/powerpc/new-exceptions/raw_exception.c".
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp/vectors.h>
+
+#define PPC_BASIC_VECS_WO_SYS \
+ [ASM_RESET_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_MACH_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, \
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, \
+ [ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, \
+ [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC
+
+#define PPC_BASIC_VECS \
+ PPC_BASIC_VECS_WO_SYS, \
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC
+
+static const ppc_exc_categories ppc_405_category_table = {
+ [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_405_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_MACH_VECTOR] = PPC_EXC_405_CRITICAL,
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
+
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+
+ [ASM_PPC405_APU_UNAVAIL_VECTOR] = PPC_EXC_CLASSIC,
+
+ [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_405_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_TRACE_VECTOR] = PPC_EXC_405_CRITICAL,
+};
+
+static const ppc_exc_categories ppc_booke_category_table = {
+ [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_MACH_VECTOR] = PPC_EXC_E500_MACHCHK,
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_APU_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
+};
+
+static const ppc_exc_categories mpc_5xx_category_table = {
+ [ASM_RESET_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_MACH_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC,
+
+ [ASM_5XX_FLOATASSIST_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_SOFTEMUL_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_IPROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_DPROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_DBREAK_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_IBREAK_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_MEBREAK_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_5XX_NMEBREAK_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories mpc_603_category_table = {
+ PPC_BASIC_VECS,
+
+ [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_60X_IMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories mpc_604_category_table = {
+ PPC_BASIC_VECS,
+
+ [ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories mpc_604_altivec_category_table = {
+ PPC_BASIC_VECS,
+
+ [ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories mpc_750_category_table = {
+ PPC_BASIC_VECS,
+
+ [ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_ITM_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories mpc_750_altivec_category_table = {
+ PPC_BASIC_VECS,
+
+ [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_ITM_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories mpc_860_category_table = {
+ PPC_BASIC_VECS,
+
+ [ASM_8XX_FLOATASSIST_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_SOFTEMUL_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_ITLBERROR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_DTLBERROR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_DBREAK_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_IBREAK_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_PERIFBREAK_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_8XX_DEVPORT_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+static const ppc_exc_categories e300_category_table = {
+ [ASM_RESET_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_MACH_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_NAKED,
+ [ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC,
+
+ [ASM_E300_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_E300_PERFMON_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_IMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E300_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+};
+
+static const ppc_exc_categories e200_category_table = {
+ [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_MACH_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+
+ /* FIXME: Depending on HDI0 [DAPUEN] this is a critical or debug exception */
+ [ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
+
+ [ASM_E500_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E500_EMB_FP_DATA_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E500_EMB_FP_ROUND_VECTOR] = PPC_EXC_CLASSIC
+};
+
+static const ppc_exc_categories e500_category_table = {
+ [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_MACH_VECTOR] = PPC_EXC_E500_MACHCHK,
+ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_APU_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC,
+ [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL,
+ [ASM_E500_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E500_EMB_FP_DATA_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E500_EMB_FP_ROUND_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_E500_PERFMON_VECTOR] = PPC_EXC_CLASSIC
+};
+
+static const ppc_exc_categories psim_category_table = {
+ PPC_BASIC_VECS_WO_SYS,
+
+ [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC,
+ [ASM_60X_IMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_DLMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_DSMISS_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC,
+ [ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC,
+};
+
+const ppc_exc_categories *ppc_exc_categories_for_cpu(ppc_cpu_id_t cpu)
+{
+ if (ppc_cpu_has_altivec()) {
+ switch (cpu) {
+ case PPC_7400:
+ return &mpc_750_altivec_category_table;
+ case PPC_7455:
+ case PPC_7457:
+ return &mpc_604_altivec_category_table;
+ default:
+ break;
+ }
+ }
+
+ switch (cpu) {
+ case PPC_7400:
+ case PPC_750:
+ case PPC_750_IBM:
+ return &mpc_750_category_table;
+ case PPC_7455:
+ case PPC_7457:
+ case PPC_604:
+ case PPC_604e:
+ case PPC_604r:
+ return &mpc_604_category_table;
+ case PPC_603:
+ case PPC_603e:
+ case PPC_603le:
+ case PPC_603ev:
+ /* case PPC_8240: same value as 8260 */
+ case PPC_8260:
+ case PPC_8245:
+ return &mpc_603_category_table;
+ case PPC_e300c1:
+ case PPC_e300c2:
+ case PPC_e300c3:
+ return &e300_category_table;
+ case PPC_PSIM:
+ return &psim_category_table;
+ case PPC_8540:
+ case PPC_e500v2:
+ case PPC_e6500:
+ return &e500_category_table;
+ case PPC_e200z0:
+ case PPC_e200z1:
+ case PPC_e200z4:
+ case PPC_e200z6:
+ case PPC_e200z7:
+ return &e200_category_table;
+ case PPC_5XX:
+ return &mpc_5xx_category_table;
+ case PPC_860:
+ return &mpc_860_category_table;
+ case PPC_405:
+ case PPC_405GP:
+ case PPC_405EX:
+ return &ppc_405_category_table;
+ case PPC_440:
+ return &ppc_booke_category_table;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+ppc_exc_category ppc_exc_category_for_vector(const ppc_exc_categories *categories, unsigned vector)
+{
+ if (vector <= LAST_VALID_EXC) {
+ return (*categories) [vector];
+ } else {
+ return PPC_EXC_INVALID;
+ }
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_fatal.S b/bsps/powerpc/shared/exceptions/ppc_exc_fatal.S
new file mode 100644
index 0000000000..1cb97e350a
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_fatal.S
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2011, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bspopts.h>
+#include <rtems/score/percpu.h>
+#include <bsp/vectors.h>
+
+#define SCRATCH_REGISTER_0 r3
+#define SCRATCH_REGISTER_1 r4
+
+ .global ppc_exc_fatal_normal
+ .global ppc_exc_fatal_critical
+ .global ppc_exc_fatal_machine_check
+ .global ppc_exc_fatal_debug
+
+ppc_exc_fatal_critical:
+
+ PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
+ mfcsrr0 SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
+ mfcsrr1 SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+ b .Lppc_exc_fatal
+
+ppc_exc_fatal_machine_check:
+
+ PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
+ mfmcsrr0 SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
+ mfmcsrr1 SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+ b .Lppc_exc_fatal
+
+ppc_exc_fatal_debug:
+
+ PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
+ mfspr SCRATCH_REGISTER_1, BOOKE_DSRR0
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
+ mfspr SCRATCH_REGISTER_1, BOOKE_DSRR1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+ b .Lppc_exc_fatal
+
+ppc_exc_fatal_normal:
+
+ PPC_REG_STORE SCRATCH_REGISTER_1, GPR4_OFFSET(r1)
+ mfsrr0 SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR0_FRAME_OFFSET(r1)
+ mfsrr1 SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+
+.Lppc_exc_fatal:
+
+ stw r3, EXCEPTION_NUMBER_OFFSET(r1)
+ mfcr SCRATCH_REGISTER_1
+ stw SCRATCH_REGISTER_1, EXC_CR_OFFSET(r1)
+ mfxer SCRATCH_REGISTER_1
+ stw SCRATCH_REGISTER_1, EXC_XER_OFFSET(r1)
+ mfctr SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, EXC_CTR_OFFSET(r1)
+ mflr SCRATCH_REGISTER_1
+ PPC_REG_STORE SCRATCH_REGISTER_1, EXC_LR_OFFSET(r1)
+ PPC_REG_STORE r0, GPR0_OFFSET(r1)
+ PPC_REG_STORE r1, GPR1_OFFSET(r1)
+ PPC_REG_STORE r2, GPR2_OFFSET(r1)
+ PPC_REG_STORE r5, GPR5_OFFSET(r1)
+ PPC_REG_STORE r6, GPR6_OFFSET(r1)
+ PPC_REG_STORE r7, GPR7_OFFSET(r1)
+ PPC_REG_STORE r8, GPR8_OFFSET(r1)
+ PPC_REG_STORE r9, GPR9_OFFSET(r1)
+ PPC_REG_STORE r10, GPR10_OFFSET(r1)
+ PPC_REG_STORE r11, GPR11_OFFSET(r1)
+ PPC_REG_STORE r12, GPR12_OFFSET(r1)
+ PPC_REG_STORE r13, GPR13_OFFSET(r1)
+ PPC_REG_STORE r14, GPR14_OFFSET(r1)
+ PPC_REG_STORE r15, GPR15_OFFSET(r1)
+ PPC_REG_STORE r16, GPR16_OFFSET(r1)
+ PPC_REG_STORE r17, GPR17_OFFSET(r1)
+ PPC_REG_STORE r18, GPR18_OFFSET(r1)
+ PPC_REG_STORE r19, GPR19_OFFSET(r1)
+ PPC_REG_STORE r20, GPR20_OFFSET(r1)
+ PPC_REG_STORE r21, GPR21_OFFSET(r1)
+ PPC_REG_STORE r22, GPR22_OFFSET(r1)
+ PPC_REG_STORE r23, GPR23_OFFSET(r1)
+ PPC_REG_STORE r24, GPR24_OFFSET(r1)
+ PPC_REG_STORE r25, GPR25_OFFSET(r1)
+ PPC_REG_STORE r26, GPR26_OFFSET(r1)
+ PPC_REG_STORE r27, GPR27_OFFSET(r1)
+ PPC_REG_STORE r28, GPR28_OFFSET(r1)
+ PPC_REG_STORE r29, GPR29_OFFSET(r1)
+ PPC_REG_STORE r30, GPR30_OFFSET(r1)
+ PPC_REG_STORE r31, GPR31_OFFSET(r1)
+
+ /* Enable FPU and/or AltiVec */
+#if defined(PPC_MULTILIB_FPU) || defined(PPC_MULTILIB_ALTIVEC)
+ mfmsr SCRATCH_REGISTER_1
+#ifdef PPC_MULTILIB_FPU
+ ori SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, MSR_FP
+#endif
+#ifdef PPC_MULTILIB_ALTIVEC
+ oris SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, MSR_VE >> 16
+#endif
+ mtmsr SCRATCH_REGISTER_1
+ isync
+#endif
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(0)
+ stvx v0, r1, SCRATCH_REGISTER_1
+ mfvscr v0
+ li SCRATCH_REGISTER_1, PPC_EXC_VSCR_OFFSET
+ stvewx v0, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(1)
+ stvx v1, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(2)
+ stvx v2, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(3)
+ stvx v3, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(4)
+ stvx v4, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(5)
+ stvx v5, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(6)
+ stvx v6, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(7)
+ stvx v7, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(8)
+ stvx v8, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(9)
+ stvx v9, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(10)
+ stvx v10, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(11)
+ stvx v11, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(12)
+ stvx v12, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(13)
+ stvx v13, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(14)
+ stvx v14, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(15)
+ stvx v15, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(16)
+ stvx v16, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(17)
+ stvx v17, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(18)
+ stvx v18, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(19)
+ stvx v19, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(20)
+ stvx v20, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(21)
+ stvx v21, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(22)
+ stvx v22, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(23)
+ stvx v23, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(24)
+ stvx v24, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(25)
+ stvx v25, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(26)
+ stvx v26, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(27)
+ stvx v27, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(28)
+ stvx v28, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(29)
+ stvx v29, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(30)
+ stvx v30, r1, SCRATCH_REGISTER_1
+ li SCRATCH_REGISTER_1, PPC_EXC_VR_OFFSET(31)
+ stvx v31, r1, SCRATCH_REGISTER_1
+ mfvrsave SCRATCH_REGISTER_1
+ stw SCRATCH_REGISTER_1, PPC_EXC_VRSAVE_OFFSET(r1)
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ stfd f0, PPC_EXC_FR_OFFSET(0)(r1)
+ mffs f0
+ stfd f0, PPC_EXC_FPSCR_OFFSET(r1)
+ stfd f1, PPC_EXC_FR_OFFSET(1)(r1)
+ stfd f2, PPC_EXC_FR_OFFSET(2)(r1)
+ stfd f3, PPC_EXC_FR_OFFSET(3)(r1)
+ stfd f4, PPC_EXC_FR_OFFSET(4)(r1)
+ stfd f5, PPC_EXC_FR_OFFSET(5)(r1)
+ stfd f6, PPC_EXC_FR_OFFSET(6)(r1)
+ stfd f7, PPC_EXC_FR_OFFSET(7)(r1)
+ stfd f8, PPC_EXC_FR_OFFSET(8)(r1)
+ stfd f9, PPC_EXC_FR_OFFSET(9)(r1)
+ stfd f10, PPC_EXC_FR_OFFSET(10)(r1)
+ stfd f11, PPC_EXC_FR_OFFSET(11)(r1)
+ stfd f12, PPC_EXC_FR_OFFSET(12)(r1)
+ stfd f13, PPC_EXC_FR_OFFSET(13)(r1)
+ stfd f14, PPC_EXC_FR_OFFSET(14)(r1)
+ stfd f15, PPC_EXC_FR_OFFSET(15)(r1)
+ stfd f16, PPC_EXC_FR_OFFSET(16)(r1)
+ stfd f17, PPC_EXC_FR_OFFSET(17)(r1)
+ stfd f18, PPC_EXC_FR_OFFSET(18)(r1)
+ stfd f19, PPC_EXC_FR_OFFSET(19)(r1)
+ stfd f20, PPC_EXC_FR_OFFSET(20)(r1)
+ stfd f21, PPC_EXC_FR_OFFSET(21)(r1)
+ stfd f22, PPC_EXC_FR_OFFSET(22)(r1)
+ stfd f23, PPC_EXC_FR_OFFSET(23)(r1)
+ stfd f24, PPC_EXC_FR_OFFSET(24)(r1)
+ stfd f25, PPC_EXC_FR_OFFSET(25)(r1)
+ stfd f26, PPC_EXC_FR_OFFSET(26)(r1)
+ stfd f27, PPC_EXC_FR_OFFSET(27)(r1)
+ stfd f28, PPC_EXC_FR_OFFSET(28)(r1)
+ stfd f29, PPC_EXC_FR_OFFSET(29)(r1)
+ stfd f30, PPC_EXC_FR_OFFSET(30)(r1)
+ stfd f31, PPC_EXC_FR_OFFSET(31)(r1)
+#endif
+
+ li r3, 9
+ addi r4, r1, FRAME_LINK_SPACE
+ b _Terminate
+ PPC64_NOP_FOR_LINKER_TOC_POINTER_RESTORE
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_global_handler.c b/bsps/powerpc/shared/exceptions/ppc_exc_global_handler.c
new file mode 100644
index 0000000000..9597f8719a
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_global_handler.c
@@ -0,0 +1,28 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp/vectors.h>
+
+void C_exception_handler(BSP_Exception_frame *excPtr)
+{
+ rtems_fatal(
+ RTEMS_FATAL_SOURCE_EXCEPTION,
+ (rtems_fatal_code) excPtr
+ );
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_hdl.c b/bsps/powerpc/shared/exceptions/ppc_exc_hdl.c
new file mode 100644
index 0000000000..ce1f326438
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_hdl.c
@@ -0,0 +1,116 @@
+/* PowerPC exception handling middleware; consult README for more
+ * information.
+ *
+ * Author: Till Straumann <strauman@slac.stanford.edu>, 2007
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp/vectors.h>
+
+#include <rtems/score/threaddispatch.h>
+
+/* Provide temp. storage space for a few registers.
+ * This is used by the assembly code prior to setting up
+ * the stack.
+ * One set is needed for each exception type with its
+ * own SRR0/SRR1 pair since such exceptions may nest.
+ *
+ * NOTE: The assembly code needs these variables to
+ * be in the .sdata section and accesses them
+ * via R13.
+ */
+uint32_t ppc_exc_lock_std = 0;
+uint32_t ppc_exc_lock_crit = 0;
+uint32_t ppc_exc_lock_mchk = 0;
+
+uint32_t ppc_exc_vector_register_std = 0;
+uint32_t ppc_exc_vector_register_crit = 0;
+uint32_t ppc_exc_vector_register_mchk = 0;
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+/* MSR bits to enable once critical status info is saved and the stack
+ * is switched; must be set depending on CPU type
+ *
+ * Default is set here for classic PPC CPUs with a MMU
+ * but is overridden from vectors_init.c
+ */
+uint32_t ppc_exc_msr_bits = MSR_IR | MSR_DR | MSR_RI;
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+int ppc_exc_handler_default(BSP_Exception_frame *f, unsigned int vector)
+{
+ return -1;
+}
+
+#ifndef PPC_EXC_CONFIG_USE_FIXED_HANDLER
+
+exception_handler_t globalExceptHdl = C_exception_handler;
+
+/* Table of C-handlers */
+ppc_exc_handler_t ppc_exc_handler_table [LAST_VALID_EXC + 1] = {
+ [0 ... LAST_VALID_EXC] = ppc_exc_handler_default
+};
+
+#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
+
+ppc_exc_handler_t ppc_exc_get_handler(unsigned vector)
+{
+ if (
+ vector <= LAST_VALID_EXC
+ && ppc_exc_handler_table [vector] != ppc_exc_handler_default
+ ) {
+ return ppc_exc_handler_table [vector];
+ } else {
+ return NULL;
+ }
+}
+
+rtems_status_code ppc_exc_set_handler(unsigned vector, ppc_exc_handler_t handler)
+{
+ if (vector <= LAST_VALID_EXC) {
+ if (handler == NULL) {
+ handler = ppc_exc_handler_default;
+ }
+
+ if (ppc_exc_handler_table [vector] != handler) {
+#ifndef PPC_EXC_CONFIG_USE_FIXED_HANDLER
+ ppc_exc_handler_table [vector] = handler;
+#else /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
+ return RTEMS_RESOURCE_IN_USE;
+#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
+ }
+
+ return RTEMS_SUCCESSFUL;
+ } else {
+ return RTEMS_INVALID_ID;
+ }
+}
+
+void ppc_exc_wrapup(BSP_Exception_frame *frame)
+{
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Per_CPU_Get();
+
+ if (cpu_self->isr_dispatch_disable) {
+ return;
+ }
+
+ while (cpu_self->dispatch_necessary) {
+ rtems_interrupt_level level;
+
+ cpu_self->isr_dispatch_disable = 1;
+ cpu_self->thread_dispatch_disable_level = 1;
+ _Thread_Do_dispatch(cpu_self, frame->EXC_SRR1);
+ rtems_interrupt_local_disable(level);
+ (void) level;
+ cpu_self = _Per_CPU_Get();
+ }
+
+ cpu_self->isr_dispatch_disable = 0;
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c b/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c
new file mode 100644
index 0000000000..4891ddcaa0
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c
@@ -0,0 +1,187 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
+ *
+ * Copyright (C) 2009-2012 embedded brains GmbH.
+ *
+ * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
+ * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+
+#include <bsp/vectors.h>
+#include <bsp/fatal.h>
+
+uint32_t ppc_exc_cache_wb_check = 1;
+
+#define MTIVPR(prefix) __asm__ volatile ("mtivpr %0" : : "r" (prefix))
+#define MTIVOR(x, vec) __asm__ volatile ("mtivor"#x" %0" : : "r" (vec))
+
+static void ppc_exc_initialize_booke(void *vector_base)
+{
+ /* Interupt vector prefix register */
+ MTIVPR((uint32_t) vector_base);
+
+ if (
+ ppc_cpu_is_specific_e200(PPC_e200z0)
+ || ppc_cpu_is_specific_e200(PPC_e200z1)
+ ) {
+ /*
+ * These cores have hard wired IVOR registers. An access will case a
+ * program exception.
+ */
+ return;
+ }
+
+ /* Interupt vector offset registers */
+ MTIVOR(0, ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
+ MTIVOR(1, ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
+ MTIVOR(2, ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
+ MTIVOR(3, ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
+ MTIVOR(4, ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
+ MTIVOR(5, ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
+ MTIVOR(6, ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
+ MTIVOR(7, ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
+ MTIVOR(8, ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
+ MTIVOR(9, ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
+ MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
+ MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
+ MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
+ MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
+ MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
+ MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
+ if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
+ MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
+ MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
+ MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
+ }
+ if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
+ MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
+ }
+}
+
+static void ppc_exc_fatal_error(void)
+{
+ bsp_fatal(PPC_FATAL_EXCEPTION_INITIALIZATION);
+}
+
+void ppc_exc_initialize_with_vector_base(
+ uintptr_t interrupt_stack_begin,
+ uintptr_t interrupt_stack_size,
+ void *vector_base
+)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ const ppc_exc_categories *const categories = ppc_exc_current_categories();
+ unsigned vector = 0;
+ uint32_t sda_base = 0;
+ uint32_t r13 = 0;
+
+ if (categories == NULL) {
+ ppc_exc_fatal_error();
+ }
+
+ /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
+ * early init code put it there.
+ */
+ __asm__ volatile (
+ "lis %0, _SDA_BASE_@h\n"
+ "ori %0, %0, _SDA_BASE_@l\n"
+ "mr %1, 13\n"
+ : "=r" (sda_base), "=r"(r13)
+ );
+
+ if (sda_base != r13) {
+ ppc_exc_fatal_error();
+ }
+
+ ppc_exc_initialize_interrupt_stack(interrupt_stack_begin, interrupt_stack_size);
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Use current MMU / RI settings when running C exception handlers */
+ ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);
+
+#ifdef __ALTIVEC__
+ /* Need vector unit enabled to save/restore altivec context */
+ ppc_exc_msr_bits |= MSR_VE;
+#endif
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+ if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
+ ppc_exc_initialize_booke(vector_base);
+ }
+
+ for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
+ ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);
+
+ if (category != PPC_EXC_INVALID) {
+ void *const vector_address = ppc_exc_vector_address(vector, vector_base);
+ uint32_t prologue [16];
+ size_t prologue_size = sizeof(prologue);
+
+ sc = ppc_exc_make_prologue(
+ vector,
+ vector_base,
+ category,
+ prologue,
+ &prologue_size
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ ppc_exc_fatal_error();
+ }
+
+ ppc_code_copy(vector_address, prologue, prologue_size);
+ }
+ }
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+ /* If we are on a classic PPC with MSR_DR enabled then
+ * assert that the mapping for at least this task's
+ * stack is write-back-caching enabled (see README/CAVEATS)
+ * Do this only if the cache is physically enabled.
+ * Since it is not easy to figure that out in a
+ * generic way we need help from the BSP: BSPs
+ * which run entirely w/o the cache may set
+ * ppc_exc_cache_wb_check to zero prior to calling
+ * this routine.
+ *
+ * We run this check only after exception handling is
+ * initialized so that we have some chance to get
+ * information printed if it fails.
+ *
+ * Note that it is unsafe to ignore this issue; if
+ * the check fails, do NOT disable it unless caches
+ * are always physically disabled.
+ */
+ if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
+ /* The size of 63 assumes cache lines are at most 32 bytes */
+ uint8_t dummy[63];
+ uintptr_t p = (uintptr_t) dummy;
+ /* If the dcbz instruction raises an alignment exception
+ * then the stack is mapped as write-thru or caching-disabled.
+ * The low-level code is not capable of dealing with this
+ * ATM.
+ */
+ p = (p + 31U) & ~31U;
+ __asm__ volatile ("dcbz 0, %0"::"b" (p));
+ /* If we make it thru here then things seem to be OK */
+ }
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_naked.S b/bsps/powerpc/shared/exceptions/ppc_exc_naked.S
new file mode 100644
index 0000000000..1c27575fea
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_naked.S
@@ -0,0 +1,207 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (c) 2009
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * D-82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include "ppc_exc_asm_macros.h"
+
+ .global ppc_exc_min_prolog_tmpl_naked
+
+ppc_exc_min_prolog_tmpl_naked:
+
+ stwu r1, -EXCEPTION_FRAME_END(r1)
+ stw VECTOR_REGISTER, VECTOR_OFFSET(r1)
+ li VECTOR_REGISTER, 0
+
+ /*
+ * We store the absolute branch target address here. It will be used
+ * to generate the branch operation in ppc_exc_make_prologue().
+ */
+ .int ppc_exc_wrap_naked
+
+ .global ppc_exc_wrap_naked
+ppc_exc_wrap_naked:
+
+ /* Save scratch registers */
+ stw SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(r1)
+ stw SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(r1)
+ stw SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(r1)
+
+ /* Save volatile registers */
+ stw r0, GPR0_OFFSET(r1)
+ stw r3, GPR3_OFFSET(r1)
+ stw r8, GPR8_OFFSET(r1)
+ stw r9, GPR9_OFFSET(r1)
+ stw r10, GPR10_OFFSET(r1)
+ stw r11, GPR11_OFFSET(r1)
+ stw r12, GPR12_OFFSET(r1)
+
+ /* Save CR */
+ mfcr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_CR_OFFSET(r1)
+
+ /* Save SRR0 */
+ mfspr SCRATCH_REGISTER_0, srr0
+ stw SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(r1)
+
+ /* Save SRR1 */
+ mfspr SCRATCH_REGISTER_0, srr1
+ stw SCRATCH_REGISTER_0, SRR1_FRAME_OFFSET(r1)
+
+ /* Save CTR */
+ mfctr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_CTR_OFFSET(r1)
+
+ /* Save XER */
+ mfxer SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_XER_OFFSET(r1)
+
+ /* Save LR */
+ mflr SCRATCH_REGISTER_0
+ stw SCRATCH_REGISTER_0, EXC_LR_OFFSET(r1)
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Load MSR bit mask */
+ lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
+
+ /*
+ * Change the MSR if necessary (MMU, RI), remember decision in
+ * non-volatile CR_MSR.
+ */
+ cmpwi CR_MSR, SCRATCH_REGISTER_0, 0
+ bne CR_MSR, wrap_change_msr_naked
+
+wrap_change_msr_done_naked:
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+ /*
+ * Call high level exception handler
+ */
+
+ /*
+ * Get the handler table index from the vector number. We have to
+ * discard the exception type. Take only the least significant five
+ * bits (= LAST_VALID_EXC + 1) from the vector register. Multiply by
+ * four (= size of function pointer).
+ */
+ rlwinm SCRATCH_REGISTER_1, VECTOR_REGISTER, 2, 25, 29
+
+ /* Load handler table address */
+ LA SCRATCH_REGISTER_0, ppc_exc_handler_table
+
+ /* Load handler address */
+ lwzx SCRATCH_REGISTER_0, SCRATCH_REGISTER_0, SCRATCH_REGISTER_1
+
+ /*
+ * First parameter = exception frame pointer + FRAME_LINK_SPACE
+ *
+ * We add FRAME_LINK_SPACE to the frame pointer because the high level
+ * handler expects a BSP_Exception_frame structure.
+ */
+ addi r3, r1, FRAME_LINK_SPACE
+
+ /*
+ * Second parameter = vector number (r4 is the VECTOR_REGISTER)
+ *
+ * Discard the exception type and store the vector number
+ * in the vector register. Take only the least significant
+ * five bits (= LAST_VALID_EXC + 1).
+ */
+ rlwinm VECTOR_REGISTER, VECTOR_REGISTER, 0, 27, 31
+
+ /* Call handler */
+ mtctr SCRATCH_REGISTER_0
+ bctrl
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+ /* Restore MSR? */
+ bne CR_MSR, wrap_restore_msr_naked
+
+wrap_restore_msr_done_naked:
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
+
+ /* Restore XER and CTR */
+ lwz SCRATCH_REGISTER_0, EXC_XER_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, EXC_CTR_OFFSET(r1)
+ mtxer SCRATCH_REGISTER_0
+ mtctr SCRATCH_REGISTER_1
+
+ /* Restore CR and LR */
+ lwz SCRATCH_REGISTER_0, EXC_CR_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, EXC_LR_OFFSET(r1)
+ mtcr SCRATCH_REGISTER_0
+ mtlr SCRATCH_REGISTER_1
+
+ /* Restore volatile registers */
+ lwz r0, GPR0_OFFSET(r1)
+ lwz r3, GPR3_OFFSET(r1)
+ lwz r8, GPR8_OFFSET(r1)
+ lwz r9, GPR9_OFFSET(r1)
+ lwz r10, GPR10_OFFSET(r1)
+ lwz r11, GPR11_OFFSET(r1)
+ lwz r12, GPR12_OFFSET(r1)
+
+ /* Restore vector register */
+ lwz VECTOR_REGISTER, VECTOR_OFFSET(r1)
+
+ /* Restore scratch registers and SRRs */
+ lwz SCRATCH_REGISTER_0, SRR0_FRAME_OFFSET(r1)
+ lwz SCRATCH_REGISTER_1, SRR1_FRAME_OFFSET(r1)
+ lwz SCRATCH_REGISTER_2, SCRATCH_REGISTER_2_OFFSET(r1)
+ mtspr srr0, SCRATCH_REGISTER_0
+ lwz SCRATCH_REGISTER_0, SCRATCH_REGISTER_0_OFFSET(r1)
+ mtspr srr1, SCRATCH_REGISTER_1
+ lwz SCRATCH_REGISTER_1, SCRATCH_REGISTER_1_OFFSET(r1)
+
+ /*
+ * We restore r1 from the frame rather than just popping (adding to
+ * current r1) since the exception handler might have done strange
+ * things (e.g. a debugger moving and relocating the stack).
+ */
+ lwz r1, 0(r1)
+
+ /* Return */
+ rfi
+
+#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
+
+wrap_change_msr_naked:
+
+ mfmsr SCRATCH_REGISTER_1
+ or SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ mtmsr SCRATCH_REGISTER_1
+ sync
+ isync
+ b wrap_change_msr_done_naked
+
+wrap_restore_msr_naked:
+
+ lwz SCRATCH_REGISTER_0, ppc_exc_msr_bits@sdarel(r13)
+ mfmsr SCRATCH_REGISTER_1
+ andc SCRATCH_REGISTER_1, SCRATCH_REGISTER_1, SCRATCH_REGISTER_0
+ mtmsr SCRATCH_REGISTER_1
+ sync
+ isync
+ b wrap_restore_msr_done_naked
+
+#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_print.c b/bsps/powerpc/shared/exceptions/ppc_exc_print.c
new file mode 100644
index 0000000000..5a3aad9f73
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_print.c
@@ -0,0 +1,236 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp/vectors.h>
+
+#include <rtems/score/threaddispatch.h>
+
+#include <inttypes.h>
+
+#ifndef __SPE__
+ #define GET_GPR(gpr) (gpr)
+#else
+ #define GET_GPR(gpr) ((uintptr_t) ((gpr) >> 32))
+#endif
+
+/* T. Straumann: provide a stack trace
+ * <strauman@slac.stanford.edu>, 6/26/2001
+ */
+typedef struct LRFrameRec_ {
+ struct LRFrameRec_ *frameLink;
+ unsigned long *lr;
+} LRFrameRec, *LRFrame;
+
+#define STACK_CLAMP 50 /* in case we have a corrupted bottom */
+
+static uint32_t ppc_exc_get_DAR_dflt(void)
+{
+ if (ppc_cpu_is_60x())
+ return PPC_SPECIAL_PURPOSE_REGISTER(PPC_DAR);
+ else
+ switch (ppc_cpu_is_bookE()) {
+ default:
+ break;
+ case PPC_BOOKE_STD:
+ case PPC_BOOKE_E500:
+ return PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_DEAR);
+ case PPC_BOOKE_405:
+ return PPC_SPECIAL_PURPOSE_REGISTER(PPC405_DEAR);
+ }
+ return 0xdeadbeef;
+}
+
+uint32_t (*ppc_exc_get_DAR)(void) = ppc_exc_get_DAR_dflt;
+
+void BSP_printStackTrace(const BSP_Exception_frame *excPtr)
+{
+ LRFrame f;
+ int i;
+ LRFrame sp;
+ void *lr;
+
+ printk("Stack Trace: \n ");
+ if (excPtr) {
+ printk("IP: 0x%08" PRIxPTR ", ", excPtr->EXC_SRR0);
+ sp = (LRFrame) GET_GPR(excPtr->GPR1);
+ lr = (void *) excPtr->EXC_LR;
+ } else {
+ /* there's no macro for this */
+ __asm__ __volatile__("mr %0, 1":"=r"(sp));
+ lr = (LRFrame) ppc_link_register();
+ }
+ printk("LR: 0x%08" PRIxPTR "\n", (uintptr_t) lr);
+ for (f = (LRFrame) sp, i = 0; f->frameLink && i < STACK_CLAMP; f = f->frameLink) {
+ printk("--^ 0x%08" PRIxPTR "", (uintptr_t) (f->frameLink->lr));
+ if (!(++i % 5))
+ printk("\n");
+ }
+ if (i >= STACK_CLAMP) {
+ printk("Too many stack frames (stack possibly corrupted), giving up...\n");
+ } else {
+ if (i % 5)
+ printk("\n");
+ }
+}
+
+void _CPU_Exception_frame_print(const CPU_Exception_frame *excPtr)
+{
+ const Thread_Control *executing = _Thread_Executing;
+ bool synch = (int) excPtr->_EXC_number >= 0;
+ unsigned n = excPtr->_EXC_number & 0x7fff;
+
+ printk("exception vector %d (0x%x)\n", n, n);
+ printk(" next PC or address of fault = 0x%08" PRIxPTR "\n", excPtr->EXC_SRR0);
+ printk(" saved MSR = 0x%08" PRIxPTR "\n", excPtr->EXC_SRR1);
+
+ /* Try to find out more about the context where this happened */
+ printk(
+ " context = %s, ISR nest level = %" PRIu32 "\n",
+ _ISR_Nest_level == 0 ? "task" : "interrupt",
+ _ISR_Nest_level
+ );
+ printk(
+ " thread dispatch disable level = %" PRIu32 "\n",
+ _Thread_Dispatch_disable_level
+ );
+
+ /* Dump registers */
+
+ printk(" R0 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR0));
+ if (synch) {
+ printk(" R1 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR1));
+ printk(" R2 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR2));
+ } else {
+ printk(" ");
+ printk(" ");
+ }
+ printk(" R3 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR3));
+ printk(" R4 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR4));
+ printk(" R5 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR5));
+ printk(" R6 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR6));
+ printk(" R7 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR7));
+ printk(" R8 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR8));
+ printk(" R9 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR9));
+ printk(" R10 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR10));
+ printk(" R11 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR11));
+ printk(" R12 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR12));
+ if (synch) {
+ printk(" R13 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR13));
+ printk(" R14 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR14));
+ printk(" R15 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR15));
+ printk(" R16 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR16));
+ printk(" R17 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR17));
+ printk(" R18 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR18));
+ printk(" R19 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR19));
+ printk(" R20 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR20));
+ printk(" R21 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR21));
+ printk(" R22 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR22));
+ printk(" R23 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR23));
+ printk(" R24 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR24));
+ printk(" R25 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR25));
+ printk(" R26 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR26));
+ printk(" R27 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR27));
+ printk(" R28 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR28));
+ printk(" R29 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR29));
+ printk(" R30 = 0x%08" PRIxPTR "", GET_GPR(excPtr->GPR30));
+ printk(" R31 = 0x%08" PRIxPTR "\n", GET_GPR(excPtr->GPR31));
+ } else {
+ printk("\n");
+ }
+ printk(" CR = 0x%08" PRIx32 "\n", excPtr->EXC_CR);
+ printk(" CTR = 0x%08" PRIxPTR "\n", excPtr->EXC_CTR);
+ printk(" XER = 0x%08" PRIx32 "\n", excPtr->EXC_XER);
+ printk(" LR = 0x%08" PRIxPTR "\n", excPtr->EXC_LR);
+
+ /* Would be great to print DAR but unfortunately,
+ * that is not portable across different CPUs.
+ * AFAIK on classic PPC DAR is SPR 19, on the
+ * 405 we have DEAR = SPR 0x3d5 and bookE says
+ * DEAR = SPR 61 :-(
+ */
+ if (ppc_exc_get_DAR != NULL) {
+ char* reg = ppc_cpu_is_60x() ? " DAR" : "DEAR";
+ printk(" %s = 0x%08" PRIx32 "\n", reg, ppc_exc_get_DAR());
+ }
+ if (ppc_cpu_is_bookE()) {
+ unsigned esr, mcsr;
+ if (ppc_cpu_is_bookE() == PPC_BOOKE_405) {
+ esr = PPC_SPECIAL_PURPOSE_REGISTER(PPC405_ESR);
+ mcsr = PPC_SPECIAL_PURPOSE_REGISTER(PPC405_MCSR);
+ } else {
+ esr = PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_ESR);
+ mcsr = PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_MCSR);
+ }
+ printk(" ESR = 0x%08x\n", esr);
+ printk(" MCSR = 0x%08x\n", mcsr);
+ }
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ {
+ unsigned char *v = (unsigned char *) &excPtr->V0;
+ int i;
+ int j;
+
+ printk(" VSCR = 0x%08" PRIx32 "\n", excPtr->VSCR);
+ printk("VRSAVE = 0x%08" PRIx32 "\n", excPtr->VRSAVE);
+
+ for (i = 0; i < 32; ++i) {
+ printk(" V%02i = 0x", i);
+
+ for (j = 0; j < 16; ++j) {
+ printk("%02x", v[j]);
+ }
+
+ printk("\n");
+
+ v += 16;
+ }
+ }
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ {
+ uint64_t *f = (uint64_t *) &excPtr->F0;
+ int i;
+
+ printk("FPSCR = 0x%08" PRIu64 "\n", excPtr->FPSCR);
+
+ for (i = 0; i < 32; ++i) {
+ printk(" F%02i = 0x%016" PRIu64 "\n", i, f[i]);
+ }
+ }
+#endif
+
+ if (executing != NULL) {
+ const char *name = (const char *) &executing->Object.name;
+
+ printk(
+ " executing thread ID = 0x%08" PRIx32 ", name = %c%c%c%c\n",
+ executing->Object.id,
+ name [0],
+ name [1],
+ name [2],
+ name [3]
+ );
+ } else {
+ printk(" executing thread pointer is NULL");
+ }
+
+ BSP_printStackTrace(excPtr);
+}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_prologue.c b/bsps/powerpc/shared/exceptions/ppc_exc_prologue.c
new file mode 100644
index 0000000000..09307cd944
--- /dev/null
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_prologue.c
@@ -0,0 +1,181 @@
+/**
+ * @file
+ *
+ * @ingroup ppc_exc
+ *
+ * @brief PowerPC Exceptions implementation.
+ */
+
+/*
+ * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
+ *
+ * Copyright (C) 2009-2012 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <string.h>
+
+#include <bsp/vectors.h>
+
+/* Offset into minimal prolog where vector number is hardcoded */
+#define PPC_EXC_PROLOG_VEC_OFFSET 2
+
+/* Symbols are defined by the linker */
+extern const char ppc_exc_min_prolog_size [];
+extern const char ppc_exc_tgpr_clr_prolog_size [];
+
+/* Special prologue for handling register shadowing on 603-style CPUs */
+extern const uint32_t ppc_exc_tgpr_clr_prolog [];
+
+/*
+ * Classic prologue which determines the vector dynamically from the offset
+ * address. This must only be used for classic, synchronous exceptions with a
+ * vector offset aligned on a 256-byte boundary.
+ */
+extern const uint32_t ppc_exc_min_prolog_auto [];
+
+/* Minimal prologue templates */
+extern const uint32_t ppc_exc_min_prolog_async_tmpl_std [];
+extern const uint32_t ppc_exc_min_prolog_sync_tmpl_std [];
+extern const uint32_t ppc_exc_min_prolog_async_tmpl_p405_crit [];
+extern const uint32_t ppc_exc_min_prolog_sync_tmpl_p405_crit [];
+extern const uint32_t ppc_exc_min_prolog_async_tmpl_bookE_crit [];
+extern const uint32_t ppc_exc_min_prolog_sync_tmpl_bookE_crit [];
+extern const uint32_t ppc_exc_min_prolog_sync_tmpl_e500_mchk [];
+extern const uint32_t ppc_exc_min_prolog_async_tmpl_e500_mchk [];
+extern const uint32_t ppc_exc_min_prolog_tmpl_naked [];
+extern const uint32_t ppc_exc_min_prolog_async_tmpl_normal [];
+
+static const uint32_t *const ppc_exc_prologue_templates [] = {
+ [PPC_EXC_CLASSIC] = ppc_exc_min_prolog_sync_tmpl_std,
+ [PPC_EXC_CLASSIC_ASYNC] = ppc_exc_min_prolog_async_tmpl_std,
+ [PPC_EXC_405_CRITICAL] = ppc_exc_min_prolog_sync_tmpl_p405_crit,
+ [PPC_EXC_405_CRITICAL_ASYNC] = ppc_exc_min_prolog_async_tmpl_p405_crit,
+ [PPC_EXC_BOOKE_CRITICAL] = ppc_exc_min_prolog_sync_tmpl_bookE_crit,
+ [PPC_EXC_BOOKE_CRITICAL_ASYNC] = ppc_exc_min_prolog_async_tmpl_bookE_crit,
+ [PPC_EXC_E500_MACHCHK] = ppc_exc_min_prolog_sync_tmpl_e500_mchk,
+ [PPC_EXC_E500_MACHCHK_ASYNC] = ppc_exc_min_prolog_async_tmpl_e500_mchk,
+ [PPC_EXC_NAKED] = ppc_exc_min_prolog_tmpl_naked
+};
+
+static bool ppc_exc_create_branch_op(
+ unsigned vector,
+ void *vector_base,
+ uint32_t *prologue,
+ size_t prologue_size
+)
+{
+ static const uintptr_t BRANCH_OP_CODE = 18 << 26;
+/* static const uintptr_t BRANCH_OP_LINK = 0x1; */
+ static const uintptr_t BRANCH_OP_ABS = 0x2;
+ static const uintptr_t BRANCH_OP_MSK = 0x3ffffff;
+ size_t branch_op_index = prologue_size / 4 - 1;
+ uintptr_t vector_address =
+ (uintptr_t) ppc_exc_vector_address(vector, vector_base);
+ uintptr_t branch_op_address = vector_address + 4 * branch_op_index;
+
+ /* This value may have BRANCH_OP_LINK set */
+ uintptr_t target_address = prologue [branch_op_index];
+
+ uintptr_t branch_target_address = target_address - branch_op_address;
+
+ /*
+ * We prefer to use a relative branch. This has the benefit that custom
+ * minimal prologues in a read-only area are relocatable.
+ */
+ if ((branch_target_address & ~BRANCH_OP_MSK) != 0) {
+ /* Target to far for relative branch (PC ± 32M) */
+ if (target_address >= 0xfe000001 || target_address < 0x01fffffd) {
+ /* Can use an absolute branch */
+ branch_target_address = (target_address | BRANCH_OP_ABS) & BRANCH_OP_MSK;
+ } else {
+ return false;
+ }
+ }
+
+ prologue [branch_op_index] = BRANCH_OP_CODE | branch_target_address;
+
+ return true;
+}
+
+rtems_status_code ppc_exc_make_prologue(
+ unsigned vector,
+ void *vector_base,
+ ppc_exc_category category,
+ uint32_t *prologue,
+ size_t *prologue_size
+)
+{
+ const uint32_t *prologue_template = NULL;
+ size_t prologue_template_size = 0;
+ bool fixup_vector = false;
+
+ if (!ppc_exc_is_valid_category(category)) {
+ return RTEMS_INVALID_NUMBER;
+ }
+
+ if (
+ ppc_cpu_has_shadowed_gprs()
+ && (vector == ASM_60X_IMISS_VECTOR
+ || vector == ASM_60X_DLMISS_VECTOR
+ || vector == ASM_60X_DSMISS_VECTOR)
+ ) {
+ prologue_template = ppc_exc_tgpr_clr_prolog;
+ prologue_template_size = (size_t) ppc_exc_tgpr_clr_prolog_size;
+ } else if (
+ category == PPC_EXC_CLASSIC
+ && ppc_cpu_is_bookE() != PPC_BOOKE_STD
+ && ppc_cpu_is_bookE() != PPC_BOOKE_E500
+ ) {
+ prologue_template = ppc_exc_min_prolog_auto;
+ prologue_template_size = (size_t) ppc_exc_min_prolog_size;
+#ifdef PPC_EXC_CONFIG_USE_FIXED_HANDLER
+ } else if (
+ category == PPC_EXC_CLASSIC_ASYNC
+ && ppc_cpu_is_bookE() == PPC_BOOKE_E500
+ && (ppc_interrupt_get_disable_mask() & MSR_CE) == 0
+ ) {
+ prologue_template = ppc_exc_min_prolog_async_tmpl_normal;
+ prologue_template_size = 16;
+ fixup_vector = true;
+#endif /* PPC_EXC_CONFIG_USE_FIXED_HANDLER */
+ } else {
+ prologue_template = ppc_exc_prologue_templates [category];
+ prologue_template_size = (size_t) ppc_exc_min_prolog_size;
+ fixup_vector = true;
+ }
+
+ if (prologue_template_size <= *prologue_size) {
+ *prologue_size = prologue_template_size;
+
+ memcpy(prologue, prologue_template, prologue_template_size);
+
+ if (
+ !ppc_exc_create_branch_op(
+ vector,
+ vector_base,
+ prologue,
+ prologue_template_size
+ )
+ ) {
+ return RTEMS_INVALID_ADDRESS;
+ }
+
+ if (fixup_vector) {
+ if (vector <= 0x7fffU) {
+ prologue [PPC_EXC_PROLOG_VEC_OFFSET] =
+ (prologue [PPC_EXC_PROLOG_VEC_OFFSET] & 0xffff8000U)
+ | (vector & 0x7fffU);
+ } else {
+ return RTEMS_INVALID_ID;
+ }
+ }
+ } else {
+ return RTEMS_INVALID_SIZE;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/bsps/powerpc/ss555/start/vectors.S b/bsps/powerpc/ss555/start/vectors.S
new file mode 100644
index 0000000000..35d2f23a30
--- /dev/null
+++ b/bsps/powerpc/ss555/start/vectors.S
@@ -0,0 +1,201 @@
+/*
+ * vectors.S
+ *
+ * This file contains the assembly code for the PowerPC exception veneers
+ * for RTEMS.
+ *
+ *
+ * MPC5xx port sponsored by Defence Research and Development Canada - Suffield
+ * Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
+ *
+ * Derived from libbsp/powerpc/mbx8xx/vectors/vectors.S,
+ *
+ * (c) 1999, Eric Valette valette@crf.canon.fr
+ */
+
+#include <rtems/asm.h>
+#include <rtems/score/cpu.h>
+#include <libcpu/vectors.h>
+
+#define SYNC \
+ sync; \
+ isync
+
+
+/*
+ * Hardware exception vector table.
+ *
+ * The MPC555 can be configured to use a compressed vector table with 8
+ * bytes per entry, rather than the usual 0x100 bytes of other PowerPC
+ * devices. The following macro uses this feature to save the better part
+ * of 8 kbytes of flash ROM.
+ *
+ * Each vector table entry has room for only a simple branch instruction
+ * which branches to a prologue specific to that exception. This
+ * exception-specific prologue begins the context save, loads the exception
+ * number into a register, and jumps to a common exception prologue, below.
+ */
+
+ .macro vectors num=0, total=NUM_EXCEPTIONS /* create vector table */
+
+/* vector table entry */
+ .section .vectors, "ax"
+
+ ba specific_prologue\@ /* run specific prologue */
+ .long 0 /* each entry is 8 bytes */
+
+/* exception-specific prologue */
+ .text
+
+specific_prologue\@:
+ stwu r1, -EXCEPTION_FRAME_END(r1) /* open stack frame */
+ stw r4, GPR4_OFFSET(r1) /* preserve register */
+ li r4, \num /* get exception number */
+ b common_prologue /* run common prologue */
+
+/* invoke macro recursively to create remainder of table */
+ .if \total - (\num + 1)
+ vectors "(\num + 1)", \total
+ .endif
+
+ .endm
+
+
+/* invoke macro to create entire vector table */
+ vectors
+
+
+/*
+ * Common exception prologue.
+ *
+ * Because the MPC555 vector table is in flash ROM, it's not possible to
+ * change the exception handlers by overwriting them at run-time, so this
+ * common exception prologue uses a table of exception handler pointers to
+ * provide equivalent flexibility.
+ *
+ * When the actual exception handler is run, R1 points to the base of a new
+ * exception stack frame, in which R3, R4 and LR have been saved. R4 holds
+ * the exception number.
+ */
+ .text
+
+common_prologue:
+ stw r3, GPR3_OFFSET(r1) /* preserve registers */
+ mflr r3
+ stw r3, EXC_LR_OFFSET(r1)
+
+ slwi r3, r4, 2 /* make table offset */
+ addis r3, r3, exception_handler_table@ha /* point to entry */
+ addi r3, r3, exception_handler_table@l
+ lwz r3, 0(r3) /* get entry */
+ mtlr r3 /* run it */
+ blr
+
+
+/*
+ * Default exception handler.
+ *
+ * The function initialize_exceptions() initializes all of the entries in
+ * the exception handler table with pointers to this routine, which saves
+ * the remainder of the interrupted code's state, then calls
+ * C_default_exception_handler() to dump registers.
+ *
+ * On entry, R1 points to a new exception stack frame in which R3, R4, and
+ * LR have been saved. R4 holds the exception number.
+ */
+ .text
+
+PUBLIC_VAR(default_exception_handler)
+SYM (default_exception_handler):
+ /*
+ * Save the interrupted code's program counter and MSR. Beyond this
+ * point, all exceptions are recoverable. Use an RCPU-specific SPR
+ * to set the RI bit in the MSR to indicate the recoverable state.
+ */
+ mfsrr0 r3
+ stw r3, SRR0_FRAME_OFFSET(r1)
+ mfsrr1 r3
+ stw r3, SRR1_FRAME_OFFSET(r1)
+
+ mtspr eid, r3 /* set MSR[RI], clear MSR[EE] */
+ SYNC
+
+ /*
+ * Save the remainder of the general-purpose registers.
+ *
+ * Compute the value of R1 at exception entry before storing it in
+ * the frame.
+ *
+ * Note that R2 should never change (it's the EABI pointer to
+ * .sdata2), but we save it just in case.
+ *
+ * Recall that R3 and R4 were saved by the specific- and
+ * common-exception handlers before entry to this routine.
+ */
+ stw r0, GPR0_OFFSET(r1)
+ addi r0, r1, EXCEPTION_FRAME_END
+ stw r0, GPR1_OFFSET(r1)
+ stw r2, GPR2_OFFSET(r1)
+ stmw r5, GPR5_OFFSET(r1) /* save R5 to R31 */
+
+ /*
+ * Save the remainder of the UISA special-purpose registers. Recall
+ * that LR was saved before entry.
+ */
+ mfcr r0
+ stw r0, EXC_CR_OFFSET(r1)
+ mfctr r0
+ stw r0, EXC_CTR_OFFSET(r1)
+ mfxer r0
+ stw r0, EXC_XER_OFFSET(r1)
+
+ /*
+ * Call C-language portion of the default exception handler, passing
+ * in the address of the frame.
+ *
+ * To simplify things a bit, we assume that the target routine is
+ * within +/- 32 Mbyte from here, which is a reasonable assumption
+ * on the MPC555.
+ */
+ stw r4, EXCEPTION_NUMBER_OFFSET(r1) /* save exception number */
+ addi r3, r1, 0x8 /* get frame address */
+ bl C_default_exception_handler /* call handler */
+
+ /*
+ * Restore UISA special-purpose registers.
+ */
+ lwz r0, EXC_XER_OFFSET(r1)
+ mtxer r0
+ lwz r0, EXC_CTR_OFFSET(r1)
+ mtctr r0
+ lwz r0, EXC_CR_OFFSET(r1)
+ mtcr r0
+ lwz r0, EXC_LR_OFFSET(r1)
+ mtlr r0
+
+ /*
+ * Restore most general-purpose registers.
+ */
+ lmw r2, GPR2_OFFSET(r1)
+
+ /*
+ * Restore the interrupted code's program counter and MSR, but first
+ * use an RCPU-specific special-purpose register to clear the RI
+ * bit, indicating that exceptions are temporarily non-recoverable.
+ */
+ mtspr nri, r0 /* clear MSR[RI] */
+ SYNC
+
+ lwz r0, SRR1_FRAME_OFFSET(r1)
+ mtsrr1 r0
+ lwz r0, SRR0_FRAME_OFFSET(r1)
+ mtsrr0 r0
+
+ /*
+ * Restore the final GPR, close the stack frame, and return to the
+ * interrupted code.
+ */
+ lwz r0, GPR0_OFFSET(r1)
+ addi r1, r1, EXCEPTION_FRAME_END
+ SYNC
+ rfi
diff --git a/bsps/powerpc/ss555/start/vectors_init.c b/bsps/powerpc/ss555/start/vectors_init.c
new file mode 100644
index 0000000000..342ca5509a
--- /dev/null
+++ b/bsps/powerpc/ss555/start/vectors_init.c
@@ -0,0 +1,137 @@
+/*
+ * vectors_init.c Exception hanlding initialisation (and generic handler).
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to handle exceptions.
+ */
+
+/*
+ * MPC5xx port sponsored by Defence Research and Development Canada - Suffield
+ * Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
+ *
+ * Derived from libbsp/powerpc/mbx8xx/vectors/vectors_init.c:
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+#include <inttypes.h>
+#include <rtems/bspIo.h>
+#include <libcpu/vectors.h>
+#include <libcpu/raw_exception.h>
+#include <bsp/irq.h>
+
+extern rtems_exception_handler_t default_exception_handler;
+
+static rtems_raw_except_global_settings exception_config;
+static rtems_raw_except_connect_data exception_table[NUM_EXCEPTIONS];
+rtems_exception_handler_t* exception_handler_table[NUM_EXCEPTIONS];
+
+void C_default_exception_handler(CPU_Exception_frame* excPtr)
+{
+ int recoverable = 0;
+
+ printk("exception handler called for exception %" PRIu32 "\n",
+ excPtr->_EXC_number);
+ printk("\t Next PC or Address of fault = %" PRIxPTR "\n", excPtr->EXC_SRR0);
+ printk("\t Saved MSR = %" PRIxPTR "\n", excPtr->EXC_SRR1);
+ printk("\t R0 = %" PRIxPTR "\n", excPtr->GPR0);
+ printk("\t R1 = %" PRIxPTR "\n", excPtr->GPR1);
+ printk("\t R2 = %" PRIxPTR "\n", excPtr->GPR2);
+ printk("\t R3 = %" PRIxPTR "\n", excPtr->GPR3);
+ printk("\t R4 = %" PRIxPTR "\n", excPtr->GPR4);
+ printk("\t R5 = %" PRIxPTR "\n", excPtr->GPR5);
+ printk("\t R6 = %" PRIxPTR "\n", excPtr->GPR6);
+ printk("\t R7 = %" PRIxPTR "\n", excPtr->GPR7);
+ printk("\t R8 = %" PRIxPTR "\n", excPtr->GPR8);
+ printk("\t R9 = %" PRIxPTR "\n", excPtr->GPR9);
+ printk("\t R10 = %" PRIxPTR "\n", excPtr->GPR10);
+ printk("\t R11 = %" PRIxPTR "\n", excPtr->GPR11);
+ printk("\t R12 = %" PRIxPTR "\n", excPtr->GPR12);
+ printk("\t R13 = %" PRIxPTR "\n", excPtr->GPR13);
+ printk("\t R14 = %" PRIxPTR "\n", excPtr->GPR14);
+ printk("\t R15 = %" PRIxPTR "\n", excPtr->GPR15);
+ printk("\t R16 = %" PRIxPTR "\n", excPtr->GPR16);
+ printk("\t R17 = %" PRIxPTR "\n", excPtr->GPR17);
+ printk("\t R18 = %" PRIxPTR "\n", excPtr->GPR18);
+ printk("\t R19 = %" PRIxPTR "\n", excPtr->GPR19);
+ printk("\t R20 = %" PRIxPTR "\n", excPtr->GPR20);
+ printk("\t R21 = %" PRIxPTR "\n", excPtr->GPR21);
+ printk("\t R22 = %" PRIxPTR "\n", excPtr->GPR22);
+ printk("\t R23 = %" PRIxPTR "\n", excPtr->GPR23);
+ printk("\t R24 = %" PRIxPTR "\n", excPtr->GPR24);
+ printk("\t R25 = %" PRIxPTR "\n", excPtr->GPR25);
+ printk("\t R26 = %" PRIxPTR "\n", excPtr->GPR26);
+ printk("\t R27 = %" PRIxPTR "\n", excPtr->GPR27);
+ printk("\t R28 = %" PRIxPTR "\n", excPtr->GPR28);
+ printk("\t R29 = %" PRIxPTR "\n", excPtr->GPR29);
+ printk("\t R30 = %" PRIxPTR "\n", excPtr->GPR30);
+ printk("\t R31 = %" PRIxPTR "\n", excPtr->GPR31);
+ printk("\t CR = %" PRIx32 "\n", excPtr->EXC_CR);
+ printk("\t CTR = %" PRIxPTR "\n", excPtr->EXC_CTR);
+ printk("\t XER = %" PRIx32 "\n", excPtr->EXC_XER);
+ printk("\t LR = %" PRIxPTR "\n", excPtr->EXC_LR);
+ if (excPtr->_EXC_number == ASM_DEC_VECTOR)
+ recoverable = 1;
+ if (excPtr->_EXC_number == ASM_SYS_VECTOR)
+#ifdef TEST_RAW_EXCEPTION_CODE
+ recoverable = 1;
+#else
+ recoverable = 0;
+#endif
+ if (!recoverable) {
+ printk("unrecoverable exception!!! Push reset button\n");
+ while(1);
+ }
+}
+
+static void nop_except_enable(const rtems_raw_except_connect_data* ptr)
+{
+}
+
+static int except_always_enabled(const rtems_raw_except_connect_data* ptr)
+{
+ return 1;
+}
+
+void initialize_exceptions(void)
+{
+ int i;
+
+ /*
+ * Initialize all entries of the exception table with a description of the
+ * default exception handler.
+ */
+ exception_config.exceptSize = NUM_EXCEPTIONS;
+ exception_config.rawExceptHdlTbl = &exception_table[0];
+ exception_config.defaultRawEntry.exceptIndex = 0;
+ exception_config.defaultRawEntry.hdl.vector = 0;
+ exception_config.defaultRawEntry.hdl.raw_hdl = default_exception_handler;
+
+ for (i = 0; i < exception_config.exceptSize; i++) {
+ printk("installing exception number %d\n", i);
+ exception_table[i].exceptIndex = i;
+ exception_table[i].hdl = exception_config.defaultRawEntry.hdl;
+ exception_table[i].hdl.vector = i;
+ exception_table[i].on = nop_except_enable;
+ exception_table[i].off = nop_except_enable;
+ exception_table[i].isOn = except_always_enabled;
+ }
+
+ /*
+ * Now pass the initialized exception table to the exceptions module which
+ * will install the handler pointers in the exception handler table.
+ */
+ if (!mpc5xx_init_exceptions(&exception_config)) {
+ /*
+ * At this stage we may not call CPU_Panic because it uses exceptions!!!
+ */
+ printk("Exception handling initialization failed\n");
+ printk("System locked\n"); while(1);
+ }
+ else {
+ printk("Exception handling initialization done\n");
+ }
+}