summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libbsp/powerpc/support
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>1999-12-02 14:31:19 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>1999-12-02 14:31:19 +0000
commitacc25eec35e186abc118b9ca4f097e22fc6b4846 (patch)
tree7fa75871c51372e70cbd9cb50b0a2fab55cfa750 /c/src/lib/libbsp/powerpc/support
parentMerged of mcp750 and mvme2307 BSP by Eric Valette <valette@crf.canon.fr>. (diff)
downloadrtems-acc25eec35e186abc118b9ca4f097e22fc6b4846.tar.bz2
Merged of mcp750 and mvme2307 BSP by Eric Valette <valette@crf.canon.fr>.
As part of this effort, the mpc750 libcpu code is now shared with the ppc6xx.
Diffstat (limited to 'c/src/lib/libbsp/powerpc/support')
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl9
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c116
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h979
-rw-r--r--c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S396
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/README80
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO8
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl4
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c853
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h1200
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S809
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S268
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c61
-rw-r--r--c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S132
13 files changed, 4915 insertions, 0 deletions
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl b/c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl
new file mode 100644
index 0000000000..68f8116fe9
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/c_isr.inl
@@ -0,0 +1,9 @@
+RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
+{
+ register unsigned int isr_nesting_level;
+ /*
+ * Move from special purpose register 0 (mfspr SPRG0, r3)
+ */
+ asm volatile ("mfspr %0, 272" : "=r" (isr_nesting_level));
+ return isr_nesting_level;
+}
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c
new file mode 100644
index 0000000000..e1c6eac4fd
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.c
@@ -0,0 +1,116 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ _CPU_MSR_GET( msr_value );
+
+ if (!(new_level & CPU_MODES_INTERRUPT_MASK)) {
+ msr_value |= MSR_EE;
+ }
+ else {
+ msr_value &= ~MSR_EE;
+ }
+
+ the_context->msr = msr_value;
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ the_context->pc = (unsigned32)entry_point;
+}
+
+
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+
+
+
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h
new file mode 100644
index 0000000000..145e2924eb
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu.h
@@ -0,0 +1,979 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/ppc.h> /* pick up machine definitions */
+#include <libcpu/cpu.h>
+
+#ifndef ASM
+#include <rtems/score/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 0
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT \
+ __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+#ifndef ASM
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+ unsigned32 calleeLr; /* link register used by callees: SVR4/EABI */
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 idle_task_stack_size;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_mpci_receive_server_stack;
+ void * (*stack_allocate_hook)( unsigned32 );
+ void (*stack_free_hook)( void* );
+ /* end of fields required on all CPUs */
+
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ boolean exceptions_in_RAM; /* TRUE if in RAM */
+
+} rtems_cpu_table;
+
+/*
+ * Macros to access required entires in the CPU Table are in
+ * the file rtems/system.h.
+ */
+
+/*
+ * Macros to access PowerPC MPC750 specific additions to the CPU Table
+ */
+
+#define rtems_cpu_configuration_get_clicks_per_usec() \
+ (_CPU_Table.clicks_per_usec)
+
+#define rtems_cpu_configuration_get_exceptions_in_ram() \
+ (_CPU_Table.exceptions_in_RAM)
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+
+#endif /* ndef ASM */
+
+/*
+ * This defines the number of levels and the mask used to pick those
+ * bits out of a thread mode.
+ */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+#ifndef ASM
+
+SCORE_EXTERN struct {
+ unsigned32 *Disable_level;
+ void *Stack;
+ volatile boolean *Switch_necessary;
+ boolean *Signal;
+
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+#endif /* ndef ASM */
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/*
+ * Needed for Interrupt stack
+ */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _isr_cookie.
+ */
+
+#define loc_string(a,b) a " (" #b ")\n"
+
+#ifndef ASM
+
+static inline unsigned32 _CPU_ISR_Get_level( void )
+{
+ register unsigned int msr;
+ _CPU_MSR_GET(msr);
+ if (msr & MSR_EE) return 0;
+ else return 1;
+}
+
+static inline void _CPU_ISR_Set_level( unsigned32 level )
+{
+ register unsigned int msr;
+ _CPU_MSR_GET(msr);
+ if (!(level & CPU_MODES_INTERRUPT_MASK)) {
+ msr |= MSR_EE;
+ }
+ else {
+ msr &= ~MSR_EE;
+ }
+ _CPU_MSR_SET(msr);
+}
+
+#define _CPU_ISR_install_vector(irq, new, old) {BSP_panic("_CPU_ISR_install_vector called\n");}
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _BSP_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=&r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
+ } while (0)
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline unsigned64 PPC_Get_timebase_register( void )
+{
+ unsigned32 tbr_low;
+ unsigned32 tbr_high;
+ unsigned32 tbr_high_old;
+ unsigned64 tbr;
+
+ do {
+ asm volatile( "mftbu %0" : "=r" (tbr_high_old));
+ asm volatile( "mftb %0" : "=r" (tbr_low));
+ asm volatile( "mftbu %0" : "=r" (tbr_high));
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+#endif /* ndef ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S
new file mode 100644
index 0000000000..213e094fa6
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/new_exception_processing/cpu_asm.S
@@ -0,0 +1,396 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+
+ .set IP_LINK, 0
+ .set IP_0, (IP_LINK + 8)
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/README b/c/src/lib/libbsp/powerpc/support/old_exception_processing/README
new file mode 100644
index 0000000000..c72bebfe0c
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/README
@@ -0,0 +1,80 @@
+#
+# $Id$
+#
+
+There are various issues regarding this port:
+
+
+
+1) Legal
+
+This port is written by Andrew Bray <andy@i-cubed.co.uk>, and
+is copyright 1995 i-cubed ltd.
+
+This port was later updated by Joel Sherrill <joel@OARcorp.com>
+to test the support for the PPC603, PPC603e, and MPC604. This
+was tested on the PowerPC simulator PSIM and a VMEbus single board
+computer.
+
+2) CPU support.
+
+This release fully supports the PPC403GA, PPC403GB, PPC603, PPC603e,
+MPC604, MPC750, and numerous MPC8xx processors. A good faith attempt
+has been made to include support other models based upon available
+documentation including the MPC5xx. There are two interrupt structures
+supported by the PowerPC port. The newer structure is supported by
+all the MPC750 and MPC604 BSPs. This structure is required to use
+the RDBG remote debugging support.
+
+This port was originally written and tested on the PPC403GA (using
+software floating point). Current ports are tested primarily on
+60x CPUs using the PowerPC simulator PSIM.
+
+Andrew Bray received assistance during the initial porting effort
+from IBM and Blue Micro and we would like to gratefully acknowledge
+that help.
+
+The support for the PPC602 processor is incomplete as only sketchy
+data is currently available. Perhaps this model has been dropped.
+
+3) Application Binary Interface
+
+In the context of RTEMS, the ABI is of interest for the following
+aspects:
+
+a) Register usage. Which registers are used to provide static variable
+ linkage, stack pointer etc.
+
+b) Function calling convention. How parameters are passed, how function
+ variables should be invoked, how values are returned, etc.
+
+c) Stack frame layout.
+
+I am aware of a number of ABIs for the PowerPC:
+
+a) The PowerOpen ABI. This is the original Power ABI used on the RS/6000.
+ This is the only ABI supported by versions of GCC before 2.7.0.
+
+b) The SVR4 ABI. This is the ABI defined by SunSoft for the Solaris port
+ to the PowerPC.
+
+c) The Embedded ABI. This is an embedded ABI for PowerPC use, which has no
+ operating system interface defined. It is promoted by SunSoft, Motorola,
+ and Cygnus Support. Cygnus are porting the GNU toolchain to this ABI.
+
+d) GCC 2.7.0. This compiler is partway along the road to supporting the EABI,
+ but is currently halfway in between.
+
+This port was built and tested using the PowerOpen ABI, with the following
+caveat: we used an ELF assembler and linker. So some attention may be
+required on the assembler files to get them through a traditional (XCOFF)
+PowerOpen assembler.
+
+This port contains support for the other ABIs, but this may prove to be
+incomplete as it is untested.
+
+The RTEMS PowerPC port supports EABI as the primary ABI. The powerpc-rtems
+GNU toolset configuration is EABI.
+
+Andrew Bray, 4 December 1995
+Joel Sherrill, 16 July 1997
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO b/c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO
new file mode 100644
index 0000000000..64c96cb14c
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/TODO
@@ -0,0 +1,8 @@
+#
+# $Id$
+#
+
+Todo list:
+
+Maybe decode external interrupts like the HPPA does.
+ See c/src/lib/libcpu/powerpc/ppc403/ictrl/* for implementation on ppc403
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl b/c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl
new file mode 100644
index 0000000000..706d4f7e4f
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/c_isr.inl
@@ -0,0 +1,4 @@
+RTEMS_INLINE_ROUTINE boolean _ISR_Is_in_progress( void )
+{
+ return (_ISR_Nest_level != 0);
+}
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c
new file mode 100644
index 0000000000..7d6824cb26
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.c
@@ -0,0 +1,853 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+
+/*
+ * These are for testing purposes.
+ */
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+static void ppc_spurious(int, CPU_Interrupt_frame *);
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ proc_ptr handler = (proc_ptr)ppc_spurious;
+ int i;
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ register unsigned32 r2 = 0;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ register unsigned32 r13 = 0;
+
+ asm ("mr %0,13" : "=r" ((r13)) : "0" ((r13)));
+ _CPU_IRQ_info.Default_r13 = r13;
+#endif
+
+ asm ("mr %0,2" : "=r" ((r2)) : "0" ((r2)));
+ _CPU_IRQ_info.Default_r2 = r2;
+#endif
+
+ _CPU_IRQ_info.Nest_level = &_ISR_Nest_level;
+ _CPU_IRQ_info.Disable_level = &_Thread_Dispatch_disable_level;
+ _CPU_IRQ_info.Vector_table = _ISR_Vector_table;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ _CPU_IRQ_info.Dispatch_r2 = ((unsigned32 *)_Thread_Dispatch)[1];
+#endif
+ _CPU_IRQ_info.Switch_necessary = &_Context_Switch_necessary;
+ _CPU_IRQ_info.Signal = &_ISR_Signals_to_thread_executing;
+
+#if (PPC_USE_SPRG)
+ i = (int)&_CPU_IRQ_info;
+ asm volatile("mtspr 0x113, %0" : "=r" (i) : "0" (i)); /* SPRG 3 */
+#endif
+
+ /*
+ * Store Msr Value in the IRQ info structure.
+ */
+ _CPU_MSR_Value(_CPU_IRQ_info.msr_initial);
+
+#if (PPC_USE_SPRG)
+ i = _CPU_IRQ_info.msr_initial;
+ asm volatile("mtspr 0x112, %0" : "=r" (i) : "0" (i)); /* SPRG 2 */
+#endif
+
+ if ( cpu_table->spurious_handler )
+ handler = (proc_ptr)cpu_table->spurious_handler;
+
+ for (i = 0; i < PPC_INTERRUPT_MAX; i++)
+ _ISR_Vector_table[i] = handler;
+
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Calculate_level
+ *
+ * The PowerPC puts its interrupt enable status in the MSR register
+ * which also contains things like endianness control. To be more
+ * awkward, the layout varies from processor to processor. This
+ * is why it was necessary to adopt a scheme which allowed the user
+ * to specify specifically which interrupt sources were enabled.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 new_msr = 0;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_CE) )
+ new_msr |= PPC_MSR_CE;
+#endif
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_ME) )
+ new_msr |= PPC_MSR_ME;
+
+ if ( !(new_level & PPC_INTERRUPT_LEVEL_EE) )
+ new_msr |= PPC_MSR_EE;
+
+ return new_msr;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Set_level
+ *
+ * This routine sets the requested level in the MSR.
+ */
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+)
+{
+ register unsigned32 tmp = 0;
+ register unsigned32 new_msr;
+
+ new_msr = _CPU_ISR_Calculate_level( new_level );
+
+ asm volatile (
+ "mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" :
+ "=&r" ((tmp)) :
+ "r" ((PPC_MSR_DISABLE_MASK)), "r" ((new_msr)), "0" ((tmp))
+ );
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Get_level
+ *
+ * This routine gets the current interrupt level from the MSR and
+ * converts it to an RTEMS interrupt level.
+ */
+
+unsigned32 _CPU_ISR_Get_level( void )
+{
+ unsigned32 level = 0;
+ unsigned32 msr;
+
+ asm volatile("mfmsr %0" : "=r" ((msr)));
+
+ msr &= PPC_MSR_DISABLE_MASK;
+
+ /*
+ * Set the critical interrupt enable bit
+ */
+
+#if (PPC_HAS_RFCI)
+ if ( !(msr & PPC_MSR_CE) )
+ level |= PPC_INTERRUPT_LEVEL_CE;
+#endif
+
+ if ( !(msr & PPC_MSR_ME) )
+ level |= PPC_INTERRUPT_LEVEL_ME;
+
+ if ( !(msr & PPC_MSR_EE) )
+ level |= PPC_INTERRUPT_LEVEL_EE;
+
+ return level;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+#define CPU_MINIMUM_STACK_FRAME_SIZE 56
+#else /* PPC_ABI_SVR4 or PPC_ABI_EABI */
+#define CPU_MINIMUM_STACK_FRAME_SIZE 8
+#endif
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+)
+{
+ unsigned32 msr_value;
+ unsigned32 sp;
+
+ sp = (unsigned32)stack_base + size - CPU_MINIMUM_STACK_FRAME_SIZE;
+ *((unsigned32 *)sp) = 0;
+ the_context->gpr1 = sp;
+
+ the_context->msr = _CPU_ISR_Calculate_level( new_level );
+
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+
+ /*if ( is_fp ) */
+ the_context->msr |= PPC_MSR_FP;
+
+ /*
+ * Calculate the task's MSR value:
+ *
+ * + Set the exception prefix bit to point to the exception table
+ * + Force the RI bit
+ * + Use the DR and IR bits
+ */
+ _CPU_MSR_Value( msr_value );
+ the_context->msr |= (msr_value & PPC_MSR_EP);
+ the_context->msr |= PPC_MSR_RI;
+ the_context->msr |= msr_value & (PPC_MSR_DR|PPC_MSR_IR);
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ { unsigned32 *desc = (unsigned32 *)entry_point;
+
+ the_context->pc = desc[0];
+ the_context->gpr2 = desc[1];
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_SVR4)
+ { unsigned r13 = 0;
+ asm volatile ("mr %0, 13" : "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr13 = r13;
+ }
+#endif
+
+#if (PPC_ABI == PPC_ABI_EABI)
+ { unsigned32 r2 = 0;
+ unsigned r13 = 0;
+ asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
+
+ the_context->pc = (unsigned32)entry_point;
+ the_context->gpr2 = r2;
+ the_context->gpr13 = r13;
+ }
+#endif
+}
+
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ proc_ptr ignored;
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * Install the wrapper so this ISR can be invoked properly.
+ */
+ if (_CPU_Table.exceptions_in_RAM)
+ _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler ? (ISR_Handler_entry)new_handler :
+ _CPU_Table.spurious_handler ?
+ (ISR_Handler_entry)_CPU_Table.spurious_handler :
+ (ISR_Handler_entry)ppc_spurious;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 56;
+#else
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 8;
+#endif
+}
+
+/* Handle a spurious interrupt */
+static void ppc_spurious(int v, CPU_Interrupt_frame *i)
+{
+#if 0
+ printf("Spurious interrupt on vector %d from %08.8x\n",
+ v, i->pc);
+#endif
+#ifdef ppc403
+ if (v == PPC_IRQ_EXTERNAL)
+ {
+ register int r = 0;
+
+ asm volatile("mtdcr 0x42, %0" :
+ "=&r" ((r)) : "0" ((r))); /* EXIER */
+ }
+ else if (v == PPC_IRQ_PIT)
+ {
+ register int r = 0x08000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+ else if (v == PPC_IRQ_FIT)
+ {
+ register int r = 0x04000000;
+
+ asm volatile("mtspr 0x3d8, %0" :
+ "=&r" ((r)) : "0" ((r))); /* TSR */
+ }
+#endif
+}
+
+void _CPU_Fatal_error(unsigned32 _error)
+{
+ asm volatile ("mr 3, %0" : : "r" ((_error)));
+ asm volatile ("tweq 5,5");
+ asm volatile ("li 0,0; mtmsr 0");
+ while (1) ;
+}
+
+#define PPC_SYNCHRONOUS_TRAP_BIT_MASK 0x100
+#define PPC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
+#define PPC_SYNCHRONOUS_TRAP ( _trap ) ((_trap)+PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+#define PPC_REAL_TRAP_NUMBER ( _trap ) ((_trap)%PPC_SYNCHRONOUS_TRAP_BIT_MASK)
+
+
+const CPU_Trap_table_entry _CPU_Trap_slot_template = {
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+#error " Vector install not tested."
+#if (PPC_HAS_FPU)
+#error " Vector install not tested."
+ 0x9421feb0, /* stwu r1, -(20*4 + 18*8 + IP_END)(r1) */
+#else
+#error " Vector install not tested."
+ 0x9421ff40, /* stwu r1, -(20*4 + IP_END)(r1) */
+#endif
+#else
+ 0x9421ff90, /* stwu r1, -(IP_END)(r1) */
+#endif
+
+ 0x90010008, /* stw %r0, IP_0(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* ba PROC (_ISR_Handler) */
+};
+
+#if defined(mpc860) || defined(mpc821)
+const CPU_Trap_table_entry _CPU_Trap_slot_template_m860 = {
+ 0x7c0803ac, /* mtlr %r0 */
+ 0x81210028, /* lwz %r9, IP_9(%r1) */
+ 0x38000000, /* li %r0, PPC_IRQ */
+ 0x48000002 /* b PROC (_ISR_Handler) */
+};
+#endif /* mpc860 */
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+);
+
+
+/*PAGE
+ *
+ * _CPU_ISR_install_raw_handler
+ *
+ * This routine installs the specified handler as a "raw" non-executive
+ * supported trap handler (a.k.a. interrupt service routine).
+ *
+ * Input Parameters:
+ * vector - trap table entry number plus synchronous
+ * vs. asynchronous information
+ * new_handler - address of the handler to be installed
+ * old_handler - pointer to an address of the handler previously installed
+ *
+ * Output Parameters: NONE
+ * *new_handler - address of the handler previously installed
+ *
+ * NOTE:
+ *
+ * This routine is based on the SPARC routine _CPU_ISR_install_raw_handler.
+ * Install a software trap handler as an executive interrupt handler
+ * (which is desirable since RTEMS takes care of window and register issues),
+ * then the executive needs to know that the return address is to the trap
+ * rather than the instruction following the trap.
+ *
+ */
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ unsigned32 real_vector;
+ CPU_Trap_table_entry *slot;
+ unsigned32 u32_handler=0;
+
+ /*
+ * Get the "real" trap number for this vector ignoring the synchronous
+ * versus asynchronous indicator included with our vector numbers.
+ */
+
+ real_vector = vector;
+
+ /*
+ * Get the current base address of the trap table and calculate a pointer
+ * to the slot we are interested in.
+ */
+ slot = (CPU_Trap_table_entry *)ppc_exception_vector_addr( real_vector );
+
+ /*
+ * Get the address of the old_handler from the trap table.
+ *
+ * NOTE: The old_handler returned will be bogus if it does not follow
+ * the RTEMS model.
+ */
+
+#define HIGH_BITS_MASK 0xFFFFFC00
+#define HIGH_BITS_SHIFT 10
+#define LOW_BITS_MASK 0x000003FF
+
+ if (slot->stwu_r1 == _CPU_Trap_slot_template.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+
+ /* IMD FIX: sign extend address fragment... */
+ if (u32_handler & 0x02000000) {
+ u32_handler |= 0xfc000000;
+ }
+
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+/* There are two kinds of handlers for the MPC860. One is the 'standard'
+ * one like above. The other is for the cascaded interrupts from the SIU
+ * and CPM. Therefore we must check for the alternate one if the standard
+ * one is not present
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (slot->stwu_r1 == _CPU_Trap_slot_template_m860.stwu_r1) {
+ /*
+ * Set u32_handler = to target address
+ */
+ u32_handler = slot->b_Handler & 0x03fffffc;
+ *old_handler = (proc_ptr) u32_handler;
+ } else
+#endif /* mpc860 */
+
+ *old_handler = 0;
+
+ /*
+ * Copy the template to the slot and then fix it.
+ */
+#if defined(mpc860) || defined(mpc821)
+ if (vector >= PPC_IRQ_IRQ0)
+ *slot = _CPU_Trap_slot_template_m860;
+ else
+#endif /* mpc860 */
+ *slot = _CPU_Trap_slot_template;
+
+ u32_handler = (unsigned32) new_handler;
+
+ /*
+ * IMD FIX: insert address fragment only (bits 6..29)
+ * therefore check for proper address range
+ * and remove unwanted bits
+ */
+ if ((u32_handler & 0xfc000000) == 0xfc000000) {
+ u32_handler &= ~0xfc000000;
+ }
+ else if ((u32_handler & 0xfc000000) != 0x00000000) {
+ _Internal_error_Occurred(INTERNAL_ERROR_CORE,
+ TRUE,
+ u32_handler);
+ }
+
+ slot->b_Handler |= u32_handler;
+
+ slot->li_r0_IRQ |= vector;
+
+ _CPU_Data_Cache_Block_Flush( slot );
+}
+
+unsigned32 ppc_exception_vector_addr(
+ unsigned32 vector
+)
+{
+#if (!PPC_HAS_EVPR)
+ unsigned32 Msr;
+#endif
+ unsigned32 Top = 0;
+ unsigned32 Offset = 0x000;
+
+#if (PPC_HAS_EXCEPTION_PREFIX)
+ _CPU_MSR_Value ( Msr );
+ if ( ( Msr & PPC_MSR_EP) != 0 ) /* Vectors at FFFx_xxxx */
+ Top = 0xfff00000;
+#elif (PPC_HAS_EVPR)
+ asm volatile( "mfspr %0,0x3d6" : "=r" (Top)); /* EVPR */
+ Top = Top & 0xffff0000;
+#endif
+
+ switch ( vector ) {
+ case PPC_IRQ_SYSTEM_RESET: /* on 40x aka PPC_IRQ_CRIT */
+ Offset = 0x00100;
+ break;
+ case PPC_IRQ_MCHECK:
+ Offset = 0x00200;
+ break;
+ case PPC_IRQ_PROTECT:
+ Offset = 0x00300;
+ break;
+ case PPC_IRQ_ISI:
+ Offset = 0x00400;
+ break;
+ case PPC_IRQ_EXTERNAL:
+ Offset = 0x00500;
+ break;
+ case PPC_IRQ_ALIGNMENT:
+ Offset = 0x00600;
+ break;
+ case PPC_IRQ_PROGRAM:
+ Offset = 0x00700;
+ break;
+ case PPC_IRQ_NOFP:
+ Offset = 0x00800;
+ break;
+ case PPC_IRQ_DECREMENTER:
+ Offset = 0x00900;
+ break;
+ case PPC_IRQ_RESERVED_A:
+ Offset = 0x00a00;
+ break;
+ case PPC_IRQ_RESERVED_B:
+ Offset = 0x00b00;
+ break;
+ case PPC_IRQ_SCALL:
+ Offset = 0x00c00;
+ break;
+ case PPC_IRQ_TRACE:
+ Offset = 0x00d00;
+ break;
+ case PPC_IRQ_FP_ASST:
+ Offset = 0x00e00;
+ break;
+
+#if defined(ppc403)
+
+/* PPC_IRQ_CRIT is the same vector as PPC_IRQ_RESET
+ case PPC_IRQ_CRIT:
+ Offset = 0x00100;
+ break;
+*/
+ case PPC_IRQ_PIT:
+ Offset = 0x01000;
+ break;
+ case PPC_IRQ_FIT:
+ Offset = 0x01010;
+ break;
+ case PPC_IRQ_WATCHDOG:
+ Offset = 0x01020;
+ break;
+ case PPC_IRQ_DEBUG:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc601)
+ case PPC_IRQ_TRACE:
+ Offset = 0x02000;
+ break;
+
+#elif defined(ppc603)
+ case PPC_IRQ_TRANS_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_DATA_LOAD:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_STORE:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(ppc603e)
+ case PPC_TLB_INST_MISS:
+ Offset = 0x1000;
+ break;
+ case PPC_TLB_LOAD_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_TLB_STORE_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_ADDRBRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc604)
+ case PPC_IRQ_ADDR_BRK:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_SYS_MGT:
+ Offset = 0x1400;
+ break;
+
+#elif defined(mpc860) || defined(mpc821)
+ case PPC_IRQ_EMULATE:
+ Offset = 0x1000;
+ break;
+ case PPC_IRQ_INST_MISS:
+ Offset = 0x1100;
+ break;
+ case PPC_IRQ_DATA_MISS:
+ Offset = 0x1200;
+ break;
+ case PPC_IRQ_INST_ERR:
+ Offset = 0x1300;
+ break;
+ case PPC_IRQ_DATA_ERR:
+ Offset = 0x1400;
+ break;
+ case PPC_IRQ_DATA_BPNT:
+ Offset = 0x1c00;
+ break;
+ case PPC_IRQ_INST_BPNT:
+ Offset = 0x1d00;
+ break;
+ case PPC_IRQ_IO_BPNT:
+ Offset = 0x1e00;
+ break;
+ case PPC_IRQ_DEV_PORT:
+ Offset = 0x1f00;
+ break;
+ case PPC_IRQ_IRQ0:
+ Offset = 0x2000;
+ break;
+ case PPC_IRQ_LVL0:
+ Offset = 0x2040;
+ break;
+ case PPC_IRQ_IRQ1:
+ Offset = 0x2080;
+ break;
+ case PPC_IRQ_LVL1:
+ Offset = 0x20c0;
+ break;
+ case PPC_IRQ_IRQ2:
+ Offset = 0x2100;
+ break;
+ case PPC_IRQ_LVL2:
+ Offset = 0x2140;
+ break;
+ case PPC_IRQ_IRQ3:
+ Offset = 0x2180;
+ break;
+ case PPC_IRQ_LVL3:
+ Offset = 0x21c0;
+ break;
+ case PPC_IRQ_IRQ4:
+ Offset = 0x2200;
+ break;
+ case PPC_IRQ_LVL4:
+ Offset = 0x2240;
+ break;
+ case PPC_IRQ_IRQ5:
+ Offset = 0x2280;
+ break;
+ case PPC_IRQ_LVL5:
+ Offset = 0x22c0;
+ break;
+ case PPC_IRQ_IRQ6:
+ Offset = 0x2300;
+ break;
+ case PPC_IRQ_LVL6:
+ Offset = 0x2340;
+ break;
+ case PPC_IRQ_IRQ7:
+ Offset = 0x2380;
+ break;
+ case PPC_IRQ_LVL7:
+ Offset = 0x23c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_0:
+ Offset = 0x2400;
+ break;
+ case PPC_IRQ_CPM_PC4:
+ Offset = 0x2410;
+ break;
+ case PPC_IRQ_CPM_PC5:
+ Offset = 0x2420;
+ break;
+ case PPC_IRQ_CPM_SMC2:
+ Offset = 0x2430;
+ break;
+ case PPC_IRQ_CPM_SMC1:
+ Offset = 0x2440;
+ break;
+ case PPC_IRQ_CPM_SPI:
+ Offset = 0x2450;
+ break;
+ case PPC_IRQ_CPM_PC6:
+ Offset = 0x2460;
+ break;
+ case PPC_IRQ_CPM_TIMER4:
+ Offset = 0x2470;
+ break;
+ case PPC_IRQ_CPM_RESERVED_8:
+ Offset = 0x2480;
+ break;
+ case PPC_IRQ_CPM_PC7:
+ Offset = 0x2490;
+ break;
+ case PPC_IRQ_CPM_PC8:
+ Offset = 0x24a0;
+ break;
+ case PPC_IRQ_CPM_PC9:
+ Offset = 0x24b0;
+ break;
+ case PPC_IRQ_CPM_TIMER3:
+ Offset = 0x24c0;
+ break;
+ case PPC_IRQ_CPM_RESERVED_D:
+ Offset = 0x24d0;
+ break;
+ case PPC_IRQ_CPM_PC10:
+ Offset = 0x24e0;
+ break;
+ case PPC_IRQ_CPM_PC11:
+ Offset = 0x24f0;
+ break;
+ case PPC_IRQ_CPM_I2C:
+ Offset = 0x2500;
+ break;
+ case PPC_IRQ_CPM_RISC_TIMER:
+ Offset = 0x2510;
+ break;
+ case PPC_IRQ_CPM_TIMER2:
+ Offset = 0x2520;
+ break;
+ case PPC_IRQ_CPM_RESERVED_13:
+ Offset = 0x2530;
+ break;
+ case PPC_IRQ_CPM_IDMA2:
+ Offset = 0x2540;
+ break;
+ case PPC_IRQ_CPM_IDMA1:
+ Offset = 0x2550;
+ break;
+ case PPC_IRQ_CPM_SDMA_ERROR:
+ Offset = 0x2560;
+ break;
+ case PPC_IRQ_CPM_PC12:
+ Offset = 0x2570;
+ break;
+ case PPC_IRQ_CPM_PC13:
+ Offset = 0x2580;
+ break;
+ case PPC_IRQ_CPM_TIMER1:
+ Offset = 0x2590;
+ break;
+ case PPC_IRQ_CPM_PC14:
+ Offset = 0x25a0;
+ break;
+ case PPC_IRQ_CPM_SCC4:
+ Offset = 0x25b0;
+ break;
+ case PPC_IRQ_CPM_SCC3:
+ Offset = 0x25c0;
+ break;
+ case PPC_IRQ_CPM_SCC2:
+ Offset = 0x25d0;
+ break;
+ case PPC_IRQ_CPM_SCC1:
+ Offset = 0x25e0;
+ break;
+ case PPC_IRQ_CPM_PC15:
+ Offset = 0x25f0;
+ break;
+#endif
+
+ }
+ Top += Offset;
+ return Top;
+}
+
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h
new file mode 100644
index 0000000000..2a502d0745
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu.h
@@ -0,0 +1,1200 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/ppc.h> /* pick up machine definitions */
+#ifndef ASM
+struct CPU_Interrupt_frame;
+typedef void ( *ppc_isr_entry )( int, struct CPU_Interrupt_frame * );
+
+#include <rtems/score/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+/*
+ * ACB: This is a lie, but it gets us a handle on a call to set up
+ * a variable derived from the top of the interrupt stack.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 1
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT \
+ __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ *
+ * The interrupt level is bit mapped for the PowerPC family. The
+ * bits are set to 0 to indicate that a particular exception source
+ * enabled and 1 if it is disabled. This keeps with RTEMS convention
+ * that interrupt level 0 means all sources are enabled.
+ *
+ * The bits are assigned to correspond to enable bits in the MSR.
+ */
+
+#define PPC_INTERRUPT_LEVEL_ME 0x01
+#define PPC_INTERRUPT_LEVEL_EE 0x02
+#define PPC_INTERRUPT_LEVEL_CE 0x04
+
+/* XXX should these be maskable? */
+#if 0
+#define PPC_INTERRUPT_LEVEL_DE 0x08
+#define PPC_INTERRUPT_LEVEL_BE 0x10
+#define PPC_INTERRUPT_LEVEL_SE 0x20
+#endif
+
+#define CPU_MODES_INTERRUPT_MASK 0x00000007
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ unsigned32 dummy[13]; /* Used by callees: PowerOpen ABI */
+#else
+ unsigned32 dummy[1]; /* Used by callees: SVR4/EABI */
+#endif
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 idle_task_stack_size;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_mpci_receive_server_stack;
+ void * (*stack_allocate_hook)( unsigned32 );
+ void (*stack_free_hook)( void* );
+ /* end of fields required on all CPUs */
+
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ void (*spurious_handler)(unsigned32 vector, CPU_Interrupt_frame *);
+ boolean exceptions_in_RAM; /* TRUE if in RAM */
+
+#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
+ unsigned32 serial_per_sec; /* Serial clocks per second */
+ boolean serial_external_clock;
+ boolean serial_xon_xoff;
+ boolean serial_cts_rts;
+ unsigned32 serial_rate;
+ unsigned32 timer_average_overhead; /* Average overhead of timer in ticks */
+ unsigned32 timer_least_valid; /* Least valid number from timer */
+ boolean timer_internal_clock; /* TRUE, when timer runs with CPU clk */
+#endif
+
+#if (defined(mpc860) || defined(mpc821))
+ unsigned32 clock_speed; /* Speed of CPU in Hz */
+#endif
+} rtems_cpu_table;
+
+/*
+ * Macros to access required entires in the CPU Table are in
+ * the file rtems/system.h.
+ */
+
+/*
+ * Macros to access PowerPC specific additions to the CPU Table
+ */
+
+#define rtems_cpu_configuration_get_clicks_per_usec() \
+ (_CPU_Table.clicks_per_usec)
+
+#define rtems_cpu_configuration_get_spurious_handler() \
+ (_CPU_Table.spurious_handler)
+
+#define rtems_cpu_configuration_get_exceptions_in_ram() \
+ (_CPU_Table.exceptions_in_RAM)
+
+#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
+
+#define rtems_cpu_configuration_get_serial_per_sec() \
+ (_CPU_Table.serial_per_sec)
+
+#define rtems_cpu_configuration_get_serial_external_clock() \
+ (_CPU_Table.serial_external_clock)
+
+#define rtems_cpu_configuration_get_serial_xon_xoff() \
+ (_CPU_Table.serial_xon_xoff)
+
+#define rtems_cpu_configuration_get_serial_cts_rts() \
+ (_CPU_Table.serial_cts_rts)
+
+#define rtems_cpu_configuration_get_serial_rate() \
+ (_CPU_Table.serial_rate)
+
+#define rtems_cpu_configuration_get_timer_average_overhead() \
+ (_CPU_Table.timer_average_overhead)
+
+#define rtems_cpu_configuration_get_timer_least_valid() \
+ (_CPU_Table.timer_least_valid)
+
+#define rtems_cpu_configuration_get_timer_internal_clock() \
+ (_CPU_Table.timer_internal_clock)
+
+#endif
+
+#if (defined(mpc860) || defined(mpc821))
+#define rtems_cpu_configuration_get_clock_speed() \
+ (_CPU_Table.clock_speed)
+#endif
+
+
+/*
+ * The following type defines an entry in the PPC's trap table.
+ *
+ * NOTE: The instructions chosen are RTEMS dependent although one is
+ * obligated to use two of the four instructions to perform a
+ * long jump. The other instructions load one register with the
+ * trap type (a.k.a. vector) and another with the psr.
+ */
+
+typedef struct {
+ unsigned32 stwu_r1; /* stwu %r1, -(??+IP_END)(%1)*/
+ unsigned32 stw_r0; /* stw %r0, IP_0(%r1) */
+ unsigned32 li_r0_IRQ; /* li %r0, _IRQ */
+ unsigned32 b_Handler; /* b PROC (_ISR_Handler) */
+} CPU_Trap_table_entry;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+
+SCORE_EXTERN struct {
+ unsigned32 *Nest_level;
+ unsigned32 *Disable_level;
+ void *Vector_table;
+ void *Stack;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ unsigned32 Dispatch_r2;
+#else
+ unsigned32 Default_r2;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ unsigned32 Default_r13;
+#endif
+#endif
+ volatile boolean *Switch_necessary;
+ boolean *Signal;
+
+ unsigned32 msr_initial;
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _isr_cookie.
+ */
+
+#define loc_string(a,b) a " (" #b ")\n"
+
+#define _CPU_MSR_Value( _msr_value ) \
+ do { \
+ _msr_value = 0; \
+ asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
+ } while (0)
+
+#define _CPU_MSR_SET( _msr_value ) \
+{ asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
+
+#if 0
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ _isr_cookie = 0; \
+ asm volatile (
+ "mfmsr %0" : \
+ "=r" ((_isr_cookie)) : \
+ "0" ((_isr_cookie)) \
+ ); \
+ asm volatile (
+ "andc %1,%0,%1" : \
+ "=r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ asm volatile (
+ "mtmsr %1" : \
+ "=r" ((_disable_mask)) : \
+ "0" ((_disable_mask)) \
+ ); \
+ }
+#endif
+
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ _isr_cookie = 0; \
+ asm volatile ( \
+ "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ }
+
+
+#define _CPU_Data_Cache_Block_Flush( _address ) \
+ do { register void *__address = (_address); \
+ register unsigned32 _zero = 0; \
+ asm volatile ( "dcbf %0,%1" : \
+ "=r" (_zero), "=r" (__address) : \
+ "0" (_zero), "1" (__address) \
+ ); \
+ } while (0)
+
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of an RTEMS critical section. The parameter
+ * _isr_cookie is not modified.
+ */
+
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ asm volatile ( "mtmsr %0" : \
+ "=r" ((_isr_cookie)) : \
+ "0" ((_isr_cookie))); \
+ }
+
+/*
+ * This temporarily restores the interrupt to _isr_cookie before immediately
+ * disabling them again. This is used to divide long RTEMS critical
+ * sections into two or more parts. The parameter _isr_cookie is not
+ * modified.
+ *
+ * NOTE: The version being used is not very optimized but it does
+ * not trip a problem in gcc where the disable mask does not
+ * get loaded. Check this for future (post 10/97 gcc versions.
+ */
+
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
+ asm volatile ( \
+ "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=r" ((_isr_cookie)), "=r" ((_disable_mask)) : \
+ "0" ((_isr_cookie)), "1" ((_disable_mask)) \
+ ); \
+ }
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+unsigned32 _CPU_ISR_Calculate_level(
+ unsigned32 new_level
+);
+
+void _CPU_ISR_Set_level(
+ unsigned32 new_level
+);
+
+unsigned32 _CPU_ISR_Get_level( void );
+
+void _CPU_ISR_install_raw_handler(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/* end of ISR handler macros */
+
+/*
+ * Simple spin delay in microsecond units for device drivers.
+ * This is very dependent on the clock speed of the target.
+ */
+
+#define CPU_Get_timebase_low( _value ) \
+ asm volatile( "mftb %0" : "=r" (_value) )
+
+#define delay( _microseconds ) \
+ do { \
+ unsigned32 start, ticks, now; \
+ CPU_Get_timebase_low( start ) ; \
+ ticks = (_microseconds) * _CPU_Table.clicks_per_usec; \
+ do \
+ CPU_Get_timebase_low( now ) ; \
+ while (now - start < ticks); \
+ } while (0)
+
+#define delay_in_bus_cycles( _cycles ) \
+ do { \
+ unsigned32 start, now; \
+ CPU_Get_timebase_low( start ); \
+ do \
+ CPU_Get_timebase_low( now ); \
+ while (now - start < (_cycles)); \
+ } while (0)
+
+
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ unsigned32 *stack_base,
+ unsigned32 size,
+ unsigned32 new_level,
+ void *entry_point,
+ boolean is_fp
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _CPU_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=&r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
+ } while (0)
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline unsigned64 PPC_Get_timebase_register( void )
+{
+ unsigned32 tbr_low;
+ unsigned32 tbr_high;
+ unsigned32 tbr_high_old;
+ unsigned64 tbr;
+
+ do {
+ asm volatile( "mftbu %0" : "=r" (tbr_high_old));
+ asm volatile( "mftb %0" : "=r" (tbr_low));
+ asm volatile( "mftbu %0" : "=r" (tbr_high));
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S
new file mode 100644
index 0000000000..a377fa5d2a
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/cpu_asm.S
@@ -0,0 +1,809 @@
+
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+#if (PPC_HAS_DOUBLE == 1)
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 8)
+ .set FP_2, (FP_1 + 8)
+ .set FP_3, (FP_2 + 8)
+ .set FP_4, (FP_3 + 8)
+ .set FP_5, (FP_4 + 8)
+ .set FP_6, (FP_5 + 8)
+ .set FP_7, (FP_6 + 8)
+ .set FP_8, (FP_7 + 8)
+ .set FP_9, (FP_8 + 8)
+ .set FP_10, (FP_9 + 8)
+ .set FP_11, (FP_10 + 8)
+ .set FP_12, (FP_11 + 8)
+ .set FP_13, (FP_12 + 8)
+ .set FP_14, (FP_13 + 8)
+ .set FP_15, (FP_14 + 8)
+ .set FP_16, (FP_15 + 8)
+ .set FP_17, (FP_16 + 8)
+ .set FP_18, (FP_17 + 8)
+ .set FP_19, (FP_18 + 8)
+ .set FP_20, (FP_19 + 8)
+ .set FP_21, (FP_20 + 8)
+ .set FP_22, (FP_21 + 8)
+ .set FP_23, (FP_22 + 8)
+ .set FP_24, (FP_23 + 8)
+ .set FP_25, (FP_24 + 8)
+ .set FP_26, (FP_25 + 8)
+ .set FP_27, (FP_26 + 8)
+ .set FP_28, (FP_27 + 8)
+ .set FP_29, (FP_28 + 8)
+ .set FP_30, (FP_29 + 8)
+ .set FP_31, (FP_30 + 8)
+ .set FP_FPSCR, (FP_31 + 8)
+#else
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+#endif
+
+ .set IP_LINK, 0
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ .set IP_0, (IP_LINK + 56)
+#else
+ .set IP_0, (IP_LINK + 8)
+#endif
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ /* _CPU_IRQ_info offsets */
+
+ /* These must be in this order */
+ .set Nest_level, 0
+ .set Disable_level, 4
+ .set Vector_table, 8
+ .set Stack, 12
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ .set Dispatch_r2, 16
+ .set Switch_necessary, 20
+#else
+ .set Default_r2, 16
+#if (PPC_ABI != PPC_ABI_GCC27)
+ .set Default_r13, 20
+ .set Switch_necessary, 24
+#else
+ .set Switch_necessary, 20
+#endif
+#endif
+ .set Signal, Switch_necessary + 4
+ .set msr_initial, Signal + 4
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ stfd f0, FP_0(r3)
+ stfd f1, FP_1(r3)
+ stfd f2, FP_2(r3)
+ stfd f3, FP_3(r3)
+ stfd f4, FP_4(r3)
+ stfd f5, FP_5(r3)
+ stfd f6, FP_6(r3)
+ stfd f7, FP_7(r3)
+ stfd f8, FP_8(r3)
+ stfd f9, FP_9(r3)
+ stfd f10, FP_10(r3)
+ stfd f11, FP_11(r3)
+ stfd f12, FP_12(r3)
+ stfd f13, FP_13(r3)
+ stfd f14, FP_14(r3)
+ stfd f15, FP_15(r3)
+ stfd f16, FP_16(r3)
+ stfd f17, FP_17(r3)
+ stfd f18, FP_18(r3)
+ stfd f19, FP_19(r3)
+ stfd f20, FP_20(r3)
+ stfd f21, FP_21(r3)
+ stfd f22, FP_22(r3)
+ stfd f23, FP_23(r3)
+ stfd f24, FP_24(r3)
+ stfd f25, FP_25(r3)
+ stfd f26, FP_26(r3)
+ stfd f27, FP_27(r3)
+ stfd f28, FP_28(r3)
+ stfd f29, FP_29(r3)
+ stfd f30, FP_30(r3)
+ stfd f31, FP_31(r3)
+ mffs f2
+ stfd f2, FP_FPSCR(r3)
+#else
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ lfd f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfd f0, FP_0(r3)
+ lfd f1, FP_1(r3)
+ lfd f2, FP_2(r3)
+ lfd f3, FP_3(r3)
+ lfd f4, FP_4(r3)
+ lfd f5, FP_5(r3)
+ lfd f6, FP_6(r3)
+ lfd f7, FP_7(r3)
+ lfd f8, FP_8(r3)
+ lfd f9, FP_9(r3)
+ lfd f10, FP_10(r3)
+ lfd f11, FP_11(r3)
+ lfd f12, FP_12(r3)
+ lfd f13, FP_13(r3)
+ lfd f14, FP_14(r3)
+ lfd f15, FP_15(r3)
+ lfd f16, FP_16(r3)
+ lfd f17, FP_17(r3)
+ lfd f18, FP_18(r3)
+ lfd f19, FP_19(r3)
+ lfd f20, FP_20(r3)
+ lfd f21, FP_21(r3)
+ lfd f22, FP_22(r3)
+ lfd f23, FP_23(r3)
+ lfd f24, FP_24(r3)
+ lfd f25, FP_25(r3)
+ lfd f26, FP_26(r3)
+ lfd f27, FP_27(r3)
+ lfd f28, FP_28(r3)
+ lfd f29, FP_29(r3)
+ lfd f30, FP_30(r3)
+ lfd f31, FP_31(r3)
+#else
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */
+ stw r1, GP_1(r3)
+ lwz r1, GP_1(r4)
+ stw r2, GP_2(r3)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ stmw r13, GP_13(r3)
+ lmw r13, GP_13(r4)
+#else
+ stw r13, GP_13(r3)
+ lwz r13, GP_13(r4)
+ stw r14, GP_14(r3)
+ lwz r14, GP_14(r4)
+ stw r15, GP_15(r3)
+ lwz r15, GP_15(r4)
+ stw r16, GP_16(r3)
+ lwz r16, GP_16(r4)
+ stw r17, GP_17(r3)
+ lwz r17, GP_17(r4)
+ stw r18, GP_18(r3)
+ lwz r18, GP_18(r4)
+ stw r19, GP_19(r3)
+ lwz r19, GP_19(r4)
+ stw r20, GP_20(r3)
+ lwz r20, GP_20(r4)
+ stw r21, GP_21(r3)
+ lwz r21, GP_21(r4)
+ stw r22, GP_22(r3)
+ lwz r22, GP_22(r4)
+ stw r23, GP_23(r3)
+ lwz r23, GP_23(r4)
+ stw r24, GP_24(r3)
+ lwz r24, GP_24(r4)
+ stw r25, GP_25(r3)
+ lwz r25, GP_25(r4)
+ stw r26, GP_26(r3)
+ lwz r26, GP_26(r4)
+ stw r27, GP_27(r3)
+ lwz r27, GP_27(r4)
+ stw r28, GP_28(r3)
+ lwz r28, GP_28(r4)
+ stw r29, GP_29(r3)
+ lwz r29, GP_29(r4)
+ stw r30, GP_30(r3)
+ lwz r30, GP_30(r4)
+ stw r31, GP_31(r3)
+ lwz r31, GP_31(r4)
+#endif
+ mfcr r5
+ stw r5, GP_CR(r3)
+ lwz r5, GP_CR(r4)
+ mflr r6
+ mtcrf 255, r5
+ stw r6, GP_PC(r3)
+ lwz r6, GP_PC(r4)
+ mfmsr r7
+ mtlr r6
+ stw r7, GP_MSR(r3)
+ lwz r7, GP_MSR(r4)
+ mtmsr r7
+#endif
+#if (PPC_CACHE_ALIGNMENT == 16)
+ /* This assumes that all the registers are in the given order */
+ li r5, 16
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_14+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+
+ addi r3, r3, GP_18-GP_14
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_22-GP_18
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ addi r3, r3, GP_26-GP_22
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_26(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stwu r14, GP_14+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r15, GP_15-GP_14(r3)
+ stw r16, GP_16-GP_14(r3)
+ stw r17, GP_17-GP_14(r3)
+ stwu r18, GP_18-GP_14(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stwu r22, GP_22-GP_18(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r23, GP_23-GP_22(r3)
+ stw r24, GP_24-GP_22(r3)
+ stw r25, GP_25-GP_22(r3)
+ stwu r26, GP_26-GP_22(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r27, GP_27-GP_26(r3)
+ stw r28, GP_28-GP_26(r3)
+ stw r29, GP_29-GP_26(r3)
+ stw r30, GP_30-GP_26(r3)
+ stw r31, GP_31-GP_26(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_26(r3)
+ mflr r7
+ stw r7, GP_PC-GP_26(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_26(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_19-GP_15
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_23-GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ addi r4, r4, GP_27-GP_23
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_27(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwzu r15, GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r16, GP_16-GP_15(r4)
+ lwz r17, GP_17-GP_15(r4)
+ lwz r18, GP_18-GP_15(r4)
+ lwzu r19, GP_19-GP_15(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwzu r23, GP_23-GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r24, GP_24-GP_23(r4)
+ lwz r25, GP_25-GP_23(r4)
+ lwz r26, GP_26-GP_23(r4)
+ lwzu r27, GP_27-GP_23(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r28, GP_28-GP_27(r4)
+ lwz r29, GP_29-GP_27(r4)
+ lwz r30, GP_30-GP_27(r4)
+ lwz r31, GP_31-GP_27(r4)
+#endif
+ lwz r6, GP_CR-GP_27(r4)
+ lwz r7, GP_PC-GP_27(r4)
+ lwz r8, GP_MSR-GP_27(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+#if (PPC_CACHE_ALIGNMENT == 32)
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+#if ( PPC_USE_DATA_CACHE )
+ dcbz r5, r3
+#endif
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r0, r4
+#endif
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+#if ( PPC_USE_DATA_CACHE )
+ dcbt r5, r4
+#endif
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
+/* Individual interrupt prologues look like this:
+ * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ * #if (PPC_HAS_FPU)
+ * stwu r1, -(20*4 + 18*8 + IP_END)(r1)
+ * #else
+ * stwu r1, -(20*4 + IP_END)(r1)
+ * #endif
+ * #else
+ * stwu r1, -(IP_END)(r1)
+ * #endif
+ * stw r0, IP_0(r1)
+ *
+ * li r0, vectornum
+ * b PROC (_ISR_Handler{,C})
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_Handler)
+PROC (_ISR_Handler):
+#define LABEL(x) x
+/* XXX ??
+#define MTSAVE(x) mtspr sprg0, x
+#define MFSAVE(x) mfspr x, sprg0
+*/
+#define MTPC(x) mtspr srr0, x
+#define MFPC(x) mfspr x, srr0
+#define MTMSR(x) mtspr srr1, x
+#define MFMSR(x) mfspr x, srr1
+
+ #include "irq_stub.S"
+ rfi
+
+#if (PPC_HAS_RFCI == 1)
+/* void __ISR_HandlerC()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * For critical interrupts
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_HandlerC)
+PROC (_ISR_HandlerC):
+#undef LABEL
+#undef MTSAVE
+#undef MFSAVE
+#undef MTPC
+#undef MFPC
+#undef MTMSR
+#undef MFMSR
+#define LABEL(x) x##_C
+/* XXX??
+#define MTSAVE(x) mtspr sprg1, x
+#define MFSAVE(x) mfspr x, sprg1
+*/
+#define MTPC(x) mtspr srr2, x
+#define MFPC(x) mfspr x, srr2
+#define MTMSR(x) mtspr srr3, x
+#define MFMSR(x) mfspr x, srr3
+ #include "irq_stub.S"
+ rfci
+#endif
+
+/* PowerOpen descriptors for indirect function calls.
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (_CPU_Context_save_fp)
+ DESCRIPTOR (_CPU_Context_restore_fp)
+ DESCRIPTOR (_CPU_Context_switch)
+ DESCRIPTOR (_CPU_Context_restore)
+ DESCRIPTOR (_ISR_Handler)
+#if (PPC_HAS_RFCI == 1)
+ DESCRIPTOR (_ISR_HandlerC)
+#endif
+#endif
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S b/c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S
new file mode 100644
index 0000000000..76c8927305
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/irq_stub.S
@@ -0,0 +1,268 @@
+/*
+ * This file contains the interrupt handler assembly code for the PowerPC
+ * implementation of RTEMS. It is #included from cpu_asm.s.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * $Id$
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ PUBLIC_VAR (_CPU_IRQ_info )
+
+ /* Finish off the interrupt frame */
+ stw r2, IP_2(r1)
+ stw r3, IP_3(r1)
+ stw r4, IP_4(r1)
+ stw r5, IP_5(r1)
+ stw r6, IP_6(r1)
+ stw r7, IP_7(r1)
+ stw r8, IP_8(r1)
+ stw r9, IP_9(r1)
+ stw r10, IP_10(r1)
+ stw r11, IP_11(r1)
+ stw r12, IP_12(r1)
+ stw r13, IP_13(r1)
+ stmw r28, IP_28(r1)
+ mfcr r5
+ mfctr r6
+ mfxer r7
+ mflr r8
+ MFPC (r9)
+ MFMSR (r10)
+ /* Establish addressing */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+#endif
+ dcbt r0, r11
+ stw r5, IP_CR(r1)
+ stw r6, IP_CTR(r1)
+ stw r7, IP_XER(r1)
+ stw r8, IP_LR(r1)
+ stw r9, IP_PC(r1)
+ stw r10, IP_MSR(r1)
+
+ lwz r30, Vector_table(r11)
+ slwi r4,r0,2
+ lwz r28, Nest_level(r11)
+ add r4, r4, r30
+
+ lwz r30, 0(r28)
+ mr r3, r0
+ lwz r31, Stack(r11)
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ */
+ /* Switch stacks, here we must prevent ALL interrupts */
+#if (PPC_USE_SPRG)
+ mfmsr r5
+ mfspr r6, sprg2
+#else
+ lwz r6,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r6,r6,r5
+ mfmsr r5
+#endif
+ mtmsr r6
+ cmpwi r30, 0
+ lwz r29, Disable_level(r11)
+ subf r31,r1,r31
+ bne LABEL (nested)
+ stwux r1,r1,r31
+LABEL (nested):
+ /*
+ * _ISR_Nest_level++;
+ */
+ lwz r31, 0(r29)
+ addi r30,r30,1
+ stw r30,0(r28)
+ /* From here on out, interrupts can be re-enabled. RTEMS
+ * convention says not.
+ */
+ lwz r4,0(r4)
+ /*
+ * _Thread_Dispatch_disable_level++;
+ */
+ addi r31,r31,1
+ stw r31, 0(r29)
+/* SCE 980217
+ *
+ * We need address translation ON when we call our ISR routine
+
+ mtmsr r5
+
+ */
+
+ /*
+ * (*_ISR_Vector_table[ vector ])( vector );
+ */
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r6,0(r4)
+ lwz r2,4(r4)
+ mtlr r6
+ lwz r11,8(r4)
+#endif
+#if (PPC_ABI == PPC_ABI_GCC27)
+ lwz r2, Default_r2(r11)
+ mtlr r4
+ #lwz r2, 0(r2)
+#endif
+#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI)
+ mtlr r4
+ lwz r2, Default_r2(r11)
+ lwz r13, Default_r13(r11)
+ #lwz r2, 0(r2)
+ #lwz r13, 0(r13)
+#endif
+ mr r4,r1
+ blrl
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+
+ /* We must re-disable the interrupts */
+#if (PPC_USE_SPRG)
+ mfspr r11, sprg3
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r30,~PPC_MSR_DISABLE_MASK@ha
+ ori r30,r30,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r30
+#endif
+ mtmsr r0
+ lwz r30, 0(r28)
+ lwz r31, 0(r29)
+
+ /*
+ * if (--Thread_Dispatch_disable,--_ISR_Nest_level)
+ * goto easy_exit;
+ */
+ addi r30, r30, -1
+ cmpwi r30, 0
+ addi r31, r31, -1
+ stw r30, 0(r28)
+ stw r31, 0(r29)
+ bne LABEL (easy_exit)
+ cmpwi r31, 0
+
+ lwz r30, Switch_necessary(r11)
+
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ */
+ lwz r1,0(r1)
+ bne LABEL (easy_exit)
+ lwz r30, 0(r30)
+ lwz r31, Signal(r11)
+
+ /*
+ * if ( _Context_Switch_necessary )
+ * goto switch
+ */
+ cmpwi r30, 0
+ lwz r28, 0(r31)
+ li r6,0
+ bne LABEL (switch)
+ /*
+ * if ( !_ISR_Signals_to_thread_executing )
+ * goto easy_exit
+ * _ISR_Signals_to_thread_executing = 0;
+ */
+ cmpwi r28, 0
+ beq LABEL (easy_exit)
+
+ /*
+ * switch:
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ */
+LABEL (switch):
+ stw r6, 0(r31)
+ /* Re-enable interrupts */
+ lwz r0, IP_MSR(r1)
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r2, Dispatch_r2(r11)
+#else
+ /* R2 and R13 still hold their values from the last call */
+#endif
+ mtmsr r0
+ bl SYM (_Thread_Dispatch)
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+ /*
+ * prepare to get out of interrupt
+ */
+ /* Re-disable IRQs */
+#if (PPC_USE_SPRG)
+ mfspr r0, sprg2
+#else
+ lis r11,_CPU_IRQ_info@ha
+ addi r11,r11,_CPU_IRQ_info@l
+ lwz r0,msr_initial(r11)
+ lis r5,~PPC_MSR_DISABLE_MASK@ha
+ ori r5,r5,~PPC_MSR_DISABLE_MASK@l
+ and r0,r0,r5
+#endif
+ mtmsr r0
+
+ /*
+ * easy_exit:
+ * prepare to get out of interrupt
+ * return from interrupt
+ */
+LABEL (easy_exit):
+ lwz r5, IP_CR(r1)
+ lwz r6, IP_CTR(r1)
+ lwz r7, IP_XER(r1)
+ lwz r8, IP_LR(r1)
+ lwz r9, IP_PC(r1)
+ lwz r10, IP_MSR(r1)
+ mtcrf 255,r5
+ mtctr r6
+ mtxer r7
+ mtlr r8
+ MTPC (r9)
+ MTMSR (r10)
+ lwz r0, IP_0(r1)
+ lwz r2, IP_2(r1)
+ lwz r3, IP_3(r1)
+ lwz r4, IP_4(r1)
+ lwz r5, IP_5(r1)
+ lwz r6, IP_6(r1)
+ lwz r7, IP_7(r1)
+ lwz r8, IP_8(r1)
+ lwz r9, IP_9(r1)
+ lwz r10, IP_10(r1)
+ lwz r11, IP_11(r1)
+ lwz r12, IP_12(r1)
+ lwz r13, IP_13(r1)
+ lmw r28, IP_28(r1)
+ lwz r1, 0(r1)
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c b/c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c
new file mode 100644
index 0000000000..ecfb4b96ca
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/ppccache.c
@@ -0,0 +1,61 @@
+/*
+ * PowerPC Cache enable routines
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+
+void powerpc_instruction_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * Enable the instruction cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00008000; /* Set ICE bit */
+
+ PPC_Set_HID0( value );
+}
+
+void powerpc_data_cache_enable ()
+{
+ unsigned32 value;
+
+ /*
+ * enable data cache
+ */
+
+ PPC_Get_HID0( value );
+
+ value |= 0x00004000; /* set DCE bit */
+
+ PPC_Set_HID0( value );
+}
+
diff --git a/c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S b/c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S
new file mode 100644
index 0000000000..b653152411
--- /dev/null
+++ b/c/src/lib/libbsp/powerpc/support/old_exception_processing/rtems.S
@@ -0,0 +1,132 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the PowerPC implementation of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/rtems.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+ BEGIN_CODE
+/*
+ * RTEMS
+ *
+ * This routine jumps to the directive indicated in r11.
+ * This routine is used when RTEMS is linked by itself and placed
+ * in ROM. This routine is the first address in the ROM space for
+ * RTEMS. The user "calls" this address with the directive arguments
+ * in the normal place.
+ * This routine then jumps indirectly to the correct directive
+ * preserving the arguments. The directive should not realize
+ * it has been "wrapped" in this way. The table "_Entry_points"
+ * is used to look up the directive.
+ */
+
+ ALIGN (4, 2)
+ PUBLIC_PROC (RTEMS)
+PROC (RTEMS):
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ mflr r0
+ stw r0, 8(r1)
+ stwu r1, -64(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r12, r12, r11
+
+ stw r2, 56(r1)
+ lwz r0, 0(r12)
+ mtlr r0
+ lwz r2, 4(r12)
+ lwz r11, 8(r12)
+ blrl
+ lwz r2, 56(r1)
+ addi r1, r1, 64
+ lwz r0, 8(r1)
+ mtlr r0
+#else
+ mflr r0
+ stw r0, 4(r1)
+ stwu r1, -16(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r11, r12, r11
+
+ stw r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ stw r13, 12(r1)
+#endif
+ mtlr r11
+ lwz r11, irqinfo-abase(r12)
+ lwz r2, 0(r11)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 4(r11)
+#endif
+ blrl
+ lwz r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 12(r1)
+#endif
+ addi r1, r1, 16
+ lwz r0, 4(r1)
+ mtlr r0
+#endif
+ blr
+
+
+ /* Addressability stuff */
+tabaddr:
+abase:
+ EXTERN_VAR (_Entry_points)
+Entry_points:
+ EXT_SYM_REF (_Entry_points)
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ EXTERN_VAR (_CPU_IRQ_info)
+irqinfo:
+ EXT_SYM_REF (_CPU_IRQ_info)
+#endif
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (RTEMS)
+#endif
+
+