summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--c/ACKNOWLEDGEMENTS7
-rw-r--r--c/src/exec/score/cpu/powerpc/README77
-rw-r--r--c/src/exec/score/cpu/powerpc/TODO3
-rw-r--r--c/src/exec/score/cpu/powerpc/cpu.c233
-rw-r--r--c/src/exec/score/cpu/powerpc/cpu.h1013
-rw-r--r--c/src/exec/score/cpu/powerpc/cpu_asm.s740
-rw-r--r--c/src/exec/score/cpu/powerpc/irq_stub.s215
-rw-r--r--c/src/exec/score/cpu/powerpc/ppc.h317
-rw-r--r--c/src/exec/score/cpu/powerpc/ppctypes.h74
-rw-r--r--c/src/exec/score/cpu/powerpc/rtems.s131
-rw-r--r--c/src/exec/score/inline/rtems/score/wkspace.inl2
-rw-r--r--c/src/exec/score/inline/wkspace.inl2
-rw-r--r--c/src/exec/score/macros/chain.inl2
-rw-r--r--c/src/exec/score/macros/heap.inl2
-rw-r--r--c/src/exec/score/macros/object.inl2
-rw-r--r--c/src/exec/score/macros/priority.inl2
-rw-r--r--c/src/exec/score/macros/rtems/score/chain.inl2
-rw-r--r--c/src/exec/score/macros/rtems/score/heap.inl2
-rw-r--r--c/src/exec/score/macros/rtems/score/object.inl2
-rw-r--r--c/src/exec/score/macros/rtems/score/priority.inl2
-rw-r--r--c/src/exec/score/macros/rtems/score/sysstate.inl2
-rw-r--r--c/src/exec/score/macros/rtems/score/wkspace.inl2
-rw-r--r--c/src/exec/score/macros/sysstate.inl2
-rw-r--r--c/src/exec/score/macros/wkspace.inl2
-rw-r--r--cpukit/score/inline/rtems/score/wkspace.inl2
-rw-r--r--cpukit/score/macros/rtems/score/chain.inl2
-rw-r--r--cpukit/score/macros/rtems/score/heap.inl2
-rw-r--r--cpukit/score/macros/rtems/score/object.inl2
-rw-r--r--cpukit/score/macros/rtems/score/priority.inl2
-rw-r--r--cpukit/score/macros/rtems/score/sysstate.inl2
-rw-r--r--cpukit/score/macros/rtems/score/wkspace.inl2
31 files changed, 2831 insertions, 21 deletions
diff --git a/c/ACKNOWLEDGEMENTS b/c/ACKNOWLEDGEMENTS
index fe9ca3a8a1..db40905347 100644
--- a/c/ACKNOWLEDGEMENTS
+++ b/c/ACKNOWLEDGEMENTS
@@ -47,6 +47,13 @@ The following persons/organizations have made contributions:
+ Bryce Cogswell (cogswell@cs.uoregon.edu) submitted the support for MS-DOS
as a development environment as well as djgpp/go32 as a target environment.
++ Andy Bray (andy@i-cubed.demon.co.uk) of I-CUBED Ltd. in Cambridge U.K.
+ for porting RTEMS to the PowerPC. This effort included support for the
+ IBM 403 as well as the Motorola 601, 603, and 604 variants. A special
+ thanks to Dom Latter (dom@i-cubed.demon.co.uk) for being an RTEMS
+ evangelist and promoting the use of RTEMS both at I-CUBED Ltd. as well
+ as within the Internet community as a whole.
+
Finally, the RTEMS project would like to thank those who have contributed
to the other free software efforts which RTEMS utilizes. The primary RTEMS
development environment is from the Free Software Foundation (the GNU
diff --git a/c/src/exec/score/cpu/powerpc/README b/c/src/exec/score/cpu/powerpc/README
new file mode 100644
index 0000000000..92af381241
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/README
@@ -0,0 +1,77 @@
+There are various issues regarding this port:
+
+
+
+1) Legal
+
+This port is written by Andrew Bray <andy@i-cubed.demon.co.uk>, and
+is copyright 1995 i-cubed ltd.
+
+Due to the current lack of a formal release note, this is a limited
+release to OAR, and other specific parties designated by OAR who
+are involved in porting RTEMS to the PowerPC architecture.
+
+This set of release files SHOULD (IMHO) include a cpu specific
+alignment exception handler. Ours is derived from IBM sample
+code. I am seeking a release from IBM for this file. In the
+mean time this file is excluded (but still included in the Makefile
+as a place-holder).
+
+
+
+2) CPU support.
+
+This release fully supports the IBM PPC403GA and PPC403GB processors.
+
+It has only been tested on the PPC403GA (using software floating
+point).
+
+With the gratefully acknowledged assistance of IBM and Blue Micro,
+this release contains code to support the following processors
+ PPC601, PPC603, PPC603e, PPC604, and PPC602.
+
+The support for these processors is incomplete, especially that for
+the PPC602 for which only sketchy data is currently available.
+
+
+
+3) Application Binary INterface
+
+In the context of RTEMS, the ABI is of interest for the following
+aspects:
+
+a) Register usage. Which registers are used to provide static variable
+ linkage, stack pointer etc.
+
+b) Function calling convention. How parameters are passed, how function
+ variables should be invoked, how values are returned, etc.
+
+c) Stack frame layout.
+
+I am aware of a number of ABIs for the PowerPC:
+
+a) The PowerOpen ABI. This is the original Power ABI used on the RS/6000.
+ This is the only ABI supported by versions of GCC before 2.7.0.
+
+b) The SVR4 ABI. This is the ABI defined by SunSoft for the Solaris port
+ to the PowerPC.
+
+c) The Embedded ABI. This is an embedded ABI for PowerPC use, which has no
+ operating system interface defined. It is promoted by SunSoft, Motorola,
+ and Cygnus Support. Cygnus are porting the GNU toolchain to this ABI.
+
+d) GCC 2.7.0. This compiler is partway along the road to supporting the EABI,
+ but is currently halfway in between.
+
+This port was built and tested using the PowerOpen ABI, with the following
+caveat: we used an ELF assembler and linker. So some attention may be required
+on the assembler files to get them through a traditional (XCOFF) PowerOpen
+assembler.
+
+This port contains support for the other ABIs, but this may prove to be incoplete
+as it is untested.
+
+In the long term, the RTEMS PowerPC port should move to the EABI as its primary
+or only port. This should wait on a true EABI version of GCC.
+
+Andrew Bray 18/8/1995
diff --git a/c/src/exec/score/cpu/powerpc/TODO b/c/src/exec/score/cpu/powerpc/TODO
new file mode 100644
index 0000000000..b72be4d767
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/TODO
@@ -0,0 +1,3 @@
+Todo list:
+
+Maybe decode external interrupts like the HPPA does.
diff --git a/c/src/exec/score/cpu/powerpc/cpu.c b/c/src/exec/score/cpu/powerpc/cpu.c
new file mode 100644
index 0000000000..ab358a2490
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/cpu.c
@@ -0,0 +1,233 @@
+/*
+ * PowerPC CPU Dependent Source
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+#include <rtems/context.h>
+#include <rtems/thread.h>
+#include <rtems/wkspace.h>
+
+/*
+ * These are for testing purposes.
+ */
+#define Testing
+
+#ifdef Testing
+static unsigned32 msr;
+#ifdef ppc403
+static unsigned32 evpr;
+static unsigned32 exier;
+#endif
+#endif
+
+/*
+ * ppc_interrupt_level_to_msr
+ *
+ * This routine converts a two bit interrupt level to an MSR bit map.
+ */
+
+const unsigned32 _CPU_msrs[4] =
+ { PPC_MSR_0, PPC_MSR_1, PPC_MSR_2, PPC_MSR_3 };
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+static void ppc_spurious(int, CPU_Interrupt_frame *);
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ proc_ptr handler = (proc_ptr)ppc_spurious;
+ int i;
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ register unsigned32 r2;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ register unsigned32 r13;
+
+ asm ("mr %0,13" : "=r" ((r13)) : "0" ((r13)));
+ _CPU_IRQ_info.Default_r13 = r13;
+#endif
+
+ asm ("mr %0,2" : "=r" ((r2)) : "0" ((r2)));
+ _CPU_IRQ_info.Default_r2 = r2;
+#endif
+
+ _CPU_IRQ_info.Nest_level = &_ISR_Nest_level;
+ _CPU_IRQ_info.Disable_level = &_Thread_Dispatch_disable_level;
+ _CPU_IRQ_info.Vector_table = _ISR_Vector_table;
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ _CPU_IRQ_info.Dispatch_r2 = ((unsigned32 *)_Thread_Dispatch)[1];
+#endif
+ _CPU_IRQ_info.Switch_necessary = &_Context_Switch_necessary;
+ _CPU_IRQ_info.Signal = &_ISR_Signals_to_thread_executing;
+
+ i = (int)&_CPU_IRQ_info;
+ asm volatile("mtsprg3 %0" : "=r" (i) : "0" (i));
+
+ i = PPC_MSR_INITIAL & ~PPC_MSR_DISABLE_MASK;
+ asm volatile("mtsprg2 %0" : "=r" (i) : "0" (i));
+
+#ifdef Testing
+ {
+ unsigned32 tmp;
+
+ asm volatile ("mfmsr %0" : "=r" (tmp));
+ msr = tmp;
+#ifdef ppc403
+ asm volatile ("mfevpr %0" : "=r" (tmp));
+ evpr = tmp;
+ asm volatile ("mfexier %0" : "=r" (tmp));
+ exier = tmp;
+ asm volatile ("mtevpr %0" :: "r" (0));
+#endif
+ }
+#endif
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ if ( cpu_table->spurious_handler )
+ handler = (proc_ptr)cpu_table->spurious_handler;
+
+ for (i = 0; i < PPC_INTERRUPT_MAX; i++)
+ _ISR_Vector_table[i] = handler;
+
+ _CPU_Table = *cpu_table;
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler ? (rtems_isr_entry)new_handler :
+ _CPU_Table.spurious_handler ? (rtems_isr_entry)_CPU_Table.spurious_handler :
+ (rtems_isr_entry)ppc_spurious;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 56;
+#else
+ _CPU_IRQ_info.Stack = _CPU_Interrupt_stack_high - 8;
+#endif
+}
+
+/* Handle a spurious interrupt */
+static void ppc_spurious(int v, CPU_Interrupt_frame *i)
+{
+#if 0
+ printf("Spurious interrupt on vector %d from %08.8x\n",
+ v, i->pc);
+#endif
+#ifdef ppc403
+ if (v == PPC_IRQ_EXTERNAL)
+ {
+ register int r = 0;
+
+ asm volatile("mtexier %0" : "=r" ((r)) : "0" ((r)));
+ }
+ else if (v == PPC_IRQ_PIT)
+ {
+ register int r = 0x08000000;
+
+ asm volatile("mttsr %0" : "=r" ((r)) : "0" ((r)));
+ }
+ else if (v == PPC_IRQ_FIT)
+ {
+ register int r = 0x04000000;
+
+ asm volatile("mttsr %0" : "=r" ((r)) : "0" ((r)));
+ }
+#endif
+}
+
+void _CPU_Fatal_error(unsigned32 _error)
+{
+#ifdef Testing
+ unsigned32 tmp;
+
+ tmp = msr;
+ asm volatile ("mtmsr %0" :: "r" (tmp));
+#ifdef ppc403
+ tmp = evpr;
+ asm volatile ("mtevpr %0" :: "r" (tmp));
+ tmp = exier;
+ asm volatile ("mtexier %0" :: "r" (tmp));
+#endif
+#endif
+ asm volatile ("mr 3, %0" : : "r" ((_error)));
+ asm volatile ("tweq 5,5");
+ asm volatile ("li 0,0; mtmsr 0");
+ while (1) ;
+}
diff --git a/c/src/exec/score/cpu/powerpc/cpu.h b/c/src/exec/score/cpu/powerpc/cpu.h
new file mode 100644
index 0000000000..82cad518e8
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/cpu.h
@@ -0,0 +1,1013 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the PowerPC
+ * processor.
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.h:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/ppc.h> /* pick up machine definitions */
+#ifndef ASM
+struct CPU_Interrupt_frame;
+
+#include <rtems/ppctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+/*
+ * ACB: This is a lie, but it gets us a handle on a call to set up
+ * a variable derived from the top of the interrupt stack.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "PPC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( PPC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+/*
+ * ACB Note: This could make debugging tricky..
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
+ * must be provided and is the default IDLE thread body instead of
+ * _Internal_threads_Idle_thread_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ */
+/*
+ * ACB Note: Levels are:
+ * 0: All maskable interrupts enabled
+ * 1: Other critical exceptions enabled
+ * 2: Machine check enabled
+ * 3: All maskable IRQs disabled
+ */
+
+#define CPU_MODES_INTERRUPT_MASK 0x00000003
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+typedef struct {
+ unsigned32 gpr1; /* Stack pointer for all */
+ unsigned32 gpr2; /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
+ unsigned32 gpr13; /* First non volatile PowerOpen, section ptr SVR4/EABI */
+ unsigned32 gpr14; /* Non volatile for all */
+ unsigned32 gpr15; /* Non volatile for all */
+ unsigned32 gpr16; /* Non volatile for all */
+ unsigned32 gpr17; /* Non volatile for all */
+ unsigned32 gpr18; /* Non volatile for all */
+ unsigned32 gpr19; /* Non volatile for all */
+ unsigned32 gpr20; /* Non volatile for all */
+ unsigned32 gpr21; /* Non volatile for all */
+ unsigned32 gpr22; /* Non volatile for all */
+ unsigned32 gpr23; /* Non volatile for all */
+ unsigned32 gpr24; /* Non volatile for all */
+ unsigned32 gpr25; /* Non volatile for all */
+ unsigned32 gpr26; /* Non volatile for all */
+ unsigned32 gpr27; /* Non volatile for all */
+ unsigned32 gpr28; /* Non volatile for all */
+ unsigned32 gpr29; /* Non volatile for all */
+ unsigned32 gpr30; /* Non volatile for all */
+ unsigned32 gpr31; /* Non volatile for all */
+ unsigned32 cr; /* PART of the CR is non volatile for all */
+ unsigned32 pc; /* Program counter/Link register */
+ unsigned32 msr; /* Initial interrupt level */
+} Context_Control;
+
+typedef struct {
+ /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
+ * procedure calls. However, this would mean that the interrupt
+ * frame had to hold f0-f13, and the fpscr. And as the majority
+ * of tasks will not have an FP context, we will save the whole
+ * context here.
+ */
+#if (PPC_HAS_DOUBLE == 1)
+ double f[32];
+ double fpscr;
+#else
+ float f[32];
+ float fpscr;
+#endif
+} Context_Control_fp;
+
+typedef struct CPU_Interrupt_frame {
+ unsigned32 stacklink; /* Ensure this is a real frame (also reg1 save) */
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ unsigned32 dummy[13]; /* Used by callees: PowerOpen ABI */
+#else
+ unsigned32 dummy[1]; /* Used by callees: SVR4/EABI */
+#endif
+ /* This is what is left out of the primary contexts */
+ unsigned32 gpr0;
+ unsigned32 gpr2; /* play safe */
+ unsigned32 gpr3;
+ unsigned32 gpr4;
+ unsigned32 gpr5;
+ unsigned32 gpr6;
+ unsigned32 gpr7;
+ unsigned32 gpr8;
+ unsigned32 gpr9;
+ unsigned32 gpr10;
+ unsigned32 gpr11;
+ unsigned32 gpr12;
+ unsigned32 gpr13; /* Play safe */
+ unsigned32 gpr28; /* For internal use by the IRQ handler */
+ unsigned32 gpr29; /* For internal use by the IRQ handler */
+ unsigned32 gpr30; /* For internal use by the IRQ handler */
+ unsigned32 gpr31; /* For internal use by the IRQ handler */
+ unsigned32 cr; /* Bits of this are volatile, so no-one may save */
+ unsigned32 ctr;
+ unsigned32 xer;
+ unsigned32 lr;
+ unsigned32 pc;
+ unsigned32 msr;
+ unsigned32 pad[3];
+} CPU_Interrupt_frame;
+
+
+/*
+ * The following table contains the information required to configure
+ * the PowerPC processor specific parameters.
+ *
+ * NOTE: The interrupt_stack_size field is required if
+ * CPU_ALLOCATE_INTERRUPT_STACK is defined as TRUE.
+ *
+ * The pretasking_hook, predriver_hook, and postdriver_hook,
+ * and the do_zero_of_workspace fields are required on ALL CPUs.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+ unsigned32 clicks_per_usec; /* Timer clicks per microsecond */
+ unsigned32 serial_per_sec; /* Serial clocks per second */
+ boolean serial_external_clock;
+ boolean serial_xon_xoff;
+ boolean serial_cts_rts;
+ unsigned32 serial_rate;
+ unsigned32 timer_average_overhead; /* Average overhead of timer in ticks */
+ unsigned32 timer_least_valid; /* Least valid number from timer */
+ void (*spurious_handler)(unsigned32 vector, CPU_Interrupt_frame *);
+} rtems_cpu_table;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+/* EXTERN Context_Control_fp _CPU_Null_fp_context; */
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+/* EXTERN void (*_CPU_Thread_dispatch_pointer)(); */
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+EXTERN struct {
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ unsigned32 Dispatch_r2;
+#else
+ unsigned32 Default_r2;
+#if (PPC_ABI != PPC_ABI_GCC27)
+ unsigned32 Default_r13;
+#endif
+#endif
+ unsigned32 *Nest_level;
+ unsigned32 *Disable_level;
+ void *Vector_table;
+ void *Stack;
+ boolean *Switch_necessary;
+ boolean *Signal;
+} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (128)
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * system initialization thread. Remember that in a multiprocessor
+ * system the system intialization thread becomes the MP server thread.
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX)
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*3)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT (PPC_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT (PPC_CACHE_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT (PPC_CACHE_ALIGNMENT)
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT (PPC_STACK_ALIGNMENT)
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _level.
+ */
+
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { \
+ asm volatile ( \
+ "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=r" ((_isr_cookie)) : "r" ((PPC_MSR_DISABLE_MASK)) \
+ ); \
+ }
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of an RTEMS critical section. The parameter
+ * _level is not modified.
+ */
+
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ asm volatile ( "mtmsr %0" : \
+ "=r" ((_isr_cookie)) : "0" ((_isr_cookie))); \
+ }
+
+/*
+ * This temporarily restores the interrupt to _level before immediately
+ * disabling them again. This is used to divide long RTEMS critical
+ * sections into two or more parts. The parameter _level is not
+ * modified.
+ */
+
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { \
+ asm volatile ( \
+ "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
+ "=r" ((_isr_cookie)) : \
+ "r" ((PPC_MSR_DISABLE_MASK)), "0" ((_isr_cookie)) \
+ ); \
+ }
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+#define _CPU_ISR_Set_level( new_level ) \
+ { \
+ register unsigned32 tmp; \
+ asm volatile ( \
+ "mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" : \
+ "=r" ((tmp)) : \
+ "r" ((PPC_MSR_DISABLE_MASK)), "r" ((_CPU_msrs[new_level])), "0" ((tmp)) \
+ ); \
+ }
+
+/* end of ISR handler macros */
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ */
+
+#if PPC_ABI == PPC_ABI_POWEROPEN
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry_point ) \
+ { \
+ unsigned32 sp, *desc; \
+ \
+ sp = ((unsigned32)_stack_base) + (_size) - 56; \
+ *((unsigned32 *)sp) = 0; \
+ \
+ desc = (unsigned32 *)_entry_point; \
+ \
+ (_the_context)->msr = PPC_MSR_INITIAL | \
+ _CPU_msrs[ _isr ]; \
+ (_the_context)->pc = desc[0]; \
+ (_the_context)->gpr1 = sp; \
+ (_the_context)->gpr2 = desc[1]; \
+ }
+#endif
+#if PPC_ABI == PPC_ABI_SVR4
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry_point ) \
+ { \
+ unsigned32 sp, r13; \
+ \
+ sp = ((unsigned32)_stack_base) + (_size) - 8; \
+ *((unsigned32 *)sp) = 0; \
+ \
+ asm volatile ("mr %0, 13" : "=r" ((r13))); \
+ \
+ (_the_context->msr) = PPC_MSR_INITIAL | \
+ _CPU_msrs[ _isr ]; \
+ (_the_context->pc) = _entry_point; \
+ (_the_context->gpr1) = sp; \
+ (_the_context->gpr13) = r13; \
+ }
+#endif
+#if PPC_ABI == PPC_ABI_EABI
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry_point ) \
+ { \
+ unsigned32 sp, r2, r13; \
+ \
+ sp = ((unsigned32)_stack_base) + (_size) - 8; \
+ *((unsigned32 *)sp) = 0; \
+ \
+ asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13))); \
+ \
+ (_the_context)->msr = PPC_MSR_INITIAL | \
+ _CPU_msrs[ _isr ]; \
+ (_the_context->pc) = _entry_point; \
+ (_the_context->gpr1) = sp; \
+ (_the_context->gpr2) = r2; \
+ (_the_context->gpr13) = r13; \
+ }
+#endif
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) (_base) + (_offset) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _CPU_Fatal_error(_error)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
+ "1" ((_value))); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x80000000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* variables */
+
+extern const unsigned32 _CPU_msrs[4];
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 swapped;
+
+ asm volatile("rlwimi %0,%1,8,24,31;"
+ "rlwimi %0,%1,24,16,23;"
+ "rlwimi %0,%1,8,8,15;"
+ "rlwimi %0,%1,24,0,7;" :
+ "=r" ((swapped)) : "r" ((value)));
+
+ return( swapped );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/exec/score/cpu/powerpc/cpu_asm.s b/c/src/exec/score/cpu/powerpc/cpu_asm.s
new file mode 100644
index 0000000000..eb6bf2b1e5
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/cpu_asm.s
@@ -0,0 +1,740 @@
+/* cpu_asm.s 1.0 - 95/08/08
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * cpu_asm.c,v 1.3 1995/06/07 02:40:23 joel Exp
+ */
+
+#include "asm.h"
+
+/*
+ * Offsets for various Contexts
+ */
+ .set GP_1, 0
+ .set GP_2, (GP_1 + 4)
+ .set GP_13, (GP_2 + 4)
+ .set GP_14, (GP_13 + 4)
+
+ .set GP_15, (GP_14 + 4)
+ .set GP_16, (GP_15 + 4)
+ .set GP_17, (GP_16 + 4)
+ .set GP_18, (GP_17 + 4)
+
+ .set GP_19, (GP_18 + 4)
+ .set GP_20, (GP_19 + 4)
+ .set GP_21, (GP_20 + 4)
+ .set GP_22, (GP_21 + 4)
+
+ .set GP_23, (GP_22 + 4)
+ .set GP_24, (GP_23 + 4)
+ .set GP_25, (GP_24 + 4)
+ .set GP_26, (GP_25 + 4)
+
+ .set GP_27, (GP_26 + 4)
+ .set GP_28, (GP_27 + 4)
+ .set GP_29, (GP_28 + 4)
+ .set GP_30, (GP_29 + 4)
+
+ .set GP_31, (GP_30 + 4)
+ .set GP_CR, (GP_31 + 4)
+ .set GP_PC, (GP_CR + 4)
+ .set GP_MSR, (GP_PC + 4)
+
+#if (PPC_HAS_DOUBLE == 1)
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 8)
+ .set FP_2, (FP_1 + 8)
+ .set FP_3, (FP_2 + 8)
+ .set FP_4, (FP_3 + 8)
+ .set FP_5, (FP_4 + 8)
+ .set FP_6, (FP_5 + 8)
+ .set FP_7, (FP_6 + 8)
+ .set FP_8, (FP_7 + 8)
+ .set FP_9, (FP_8 + 8)
+ .set FP_10, (FP_9 + 8)
+ .set FP_11, (FP_10 + 8)
+ .set FP_12, (FP_11 + 8)
+ .set FP_13, (FP_12 + 8)
+ .set FP_14, (FP_13 + 8)
+ .set FP_15, (FP_14 + 8)
+ .set FP_16, (FP_15 + 8)
+ .set FP_17, (FP_16 + 8)
+ .set FP_18, (FP_17 + 8)
+ .set FP_19, (FP_18 + 8)
+ .set FP_20, (FP_19 + 8)
+ .set FP_21, (FP_20 + 8)
+ .set FP_22, (FP_21 + 8)
+ .set FP_23, (FP_22 + 8)
+ .set FP_24, (FP_23 + 8)
+ .set FP_25, (FP_24 + 8)
+ .set FP_26, (FP_25 + 8)
+ .set FP_27, (FP_26 + 8)
+ .set FP_28, (FP_27 + 8)
+ .set FP_29, (FP_28 + 8)
+ .set FP_30, (FP_29 + 8)
+ .set FP_31, (FP_30 + 8)
+ .set FP_FPSCR, (FP_31 + 8)
+#else
+ .set FP_0, 0
+ .set FP_1, (FP_0 + 4)
+ .set FP_2, (FP_1 + 4)
+ .set FP_3, (FP_2 + 4)
+ .set FP_4, (FP_3 + 4)
+ .set FP_5, (FP_4 + 4)
+ .set FP_6, (FP_5 + 4)
+ .set FP_7, (FP_6 + 4)
+ .set FP_8, (FP_7 + 4)
+ .set FP_9, (FP_8 + 4)
+ .set FP_10, (FP_9 + 4)
+ .set FP_11, (FP_10 + 4)
+ .set FP_12, (FP_11 + 4)
+ .set FP_13, (FP_12 + 4)
+ .set FP_14, (FP_13 + 4)
+ .set FP_15, (FP_14 + 4)
+ .set FP_16, (FP_15 + 4)
+ .set FP_17, (FP_16 + 4)
+ .set FP_18, (FP_17 + 4)
+ .set FP_19, (FP_18 + 4)
+ .set FP_20, (FP_19 + 4)
+ .set FP_21, (FP_20 + 4)
+ .set FP_22, (FP_21 + 4)
+ .set FP_23, (FP_22 + 4)
+ .set FP_24, (FP_23 + 4)
+ .set FP_25, (FP_24 + 4)
+ .set FP_26, (FP_25 + 4)
+ .set FP_27, (FP_26 + 4)
+ .set FP_28, (FP_27 + 4)
+ .set FP_29, (FP_28 + 4)
+ .set FP_30, (FP_29 + 4)
+ .set FP_31, (FP_30 + 4)
+ .set FP_FPSCR, (FP_31 + 4)
+#endif
+
+ .set IP_LINK, 0
+#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ .set IP_0, (IP_LINK + 56)
+#else
+ .set IP_0, (IP_LINK + 8)
+#endif
+ .set IP_2, (IP_0 + 4)
+
+ .set IP_3, (IP_2 + 4)
+ .set IP_4, (IP_3 + 4)
+ .set IP_5, (IP_4 + 4)
+ .set IP_6, (IP_5 + 4)
+
+ .set IP_7, (IP_6 + 4)
+ .set IP_8, (IP_7 + 4)
+ .set IP_9, (IP_8 + 4)
+ .set IP_10, (IP_9 + 4)
+
+ .set IP_11, (IP_10 + 4)
+ .set IP_12, (IP_11 + 4)
+ .set IP_13, (IP_12 + 4)
+ .set IP_28, (IP_13 + 4)
+
+ .set IP_29, (IP_28 + 4)
+ .set IP_30, (IP_29 + 4)
+ .set IP_31, (IP_30 + 4)
+ .set IP_CR, (IP_31 + 4)
+
+ .set IP_CTR, (IP_CR + 4)
+ .set IP_XER, (IP_CTR + 4)
+ .set IP_LR, (IP_XER + 4)
+ .set IP_PC, (IP_LR + 4)
+
+ .set IP_MSR, (IP_PC + 4)
+ .set IP_END, (IP_MSR + 16)
+
+ /* _CPU_IRQ_info offsets */
+ /* These must be in this order */
+ .set Nest_level, 0
+ .set Disable_level, 4
+ .set Vector_table, 8
+ .set Stack, 12
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ .set Dispatch_r2, 16
+ .set Switch_necessary, 20
+#else
+ .set Default_r2, 16
+#if (PPC_ABI != PPC_ABI_GCC27)
+ .set Default_r13, 20
+ .set Switch_necessary, 24
+#else
+ .set Switch_necessary, 20
+#endif
+#endif
+ .set Signal, Switch_necessary + 4
+
+ BEGIN_CODE
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ stfd f0, FP_0(r3)
+ stfd f1, FP_1(r3)
+ stfd f2, FP_2(r3)
+ stfd f3, FP_3(r3)
+ stfd f4, FP_4(r3)
+ stfd f5, FP_5(r3)
+ stfd f6, FP_6(r3)
+ stfd f7, FP_7(r3)
+ stfd f8, FP_8(r3)
+ stfd f9, FP_9(r3)
+ stfd f10, FP_10(r3)
+ stfd f11, FP_11(r3)
+ stfd f12, FP_12(r3)
+ stfd f13, FP_13(r3)
+ stfd f14, FP_14(r3)
+ stfd f15, FP_15(r3)
+ stfd f16, FP_16(r3)
+ stfd f17, FP_17(r3)
+ stfd f18, FP_18(r3)
+ stfd f19, FP_19(r3)
+ stfd f20, FP_20(r3)
+ stfd f21, FP_21(r3)
+ stfd f22, FP_22(r3)
+ stfd f23, FP_23(r3)
+ stfd f24, FP_24(r3)
+ stfd f25, FP_25(r3)
+ stfd f26, FP_26(r3)
+ stfd f27, FP_27(r3)
+ stfd f28, FP_28(r3)
+ stfd f29, FP_29(r3)
+ stfd f30, FP_30(r3)
+ stfd f31, FP_31(r3)
+ mffs f2
+ stfd f2, FP_FPSCR(r3)
+#else
+ stfs f0, FP_0(r3)
+ stfs f1, FP_1(r3)
+ stfs f2, FP_2(r3)
+ stfs f3, FP_3(r3)
+ stfs f4, FP_4(r3)
+ stfs f5, FP_5(r3)
+ stfs f6, FP_6(r3)
+ stfs f7, FP_7(r3)
+ stfs f8, FP_8(r3)
+ stfs f9, FP_9(r3)
+ stfs f10, FP_10(r3)
+ stfs f11, FP_11(r3)
+ stfs f12, FP_12(r3)
+ stfs f13, FP_13(r3)
+ stfs f14, FP_14(r3)
+ stfs f15, FP_15(r3)
+ stfs f16, FP_16(r3)
+ stfs f17, FP_17(r3)
+ stfs f18, FP_18(r3)
+ stfs f19, FP_19(r3)
+ stfs f20, FP_20(r3)
+ stfs f21, FP_21(r3)
+ stfs f22, FP_22(r3)
+ stfs f23, FP_23(r3)
+ stfs f24, FP_24(r3)
+ stfs f25, FP_25(r3)
+ stfs f26, FP_26(r3)
+ stfs f27, FP_27(r3)
+ stfs f28, FP_28(r3)
+ stfs f29, FP_29(r3)
+ stfs f30, FP_30(r3)
+ stfs f31, FP_31(r3)
+ mffs f2
+ stfs f2, FP_FPSCR(r3)
+#endif
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+#if (PPC_HAS_FPU == 1)
+ lwz r3, 0(r3)
+#if (PPC_HAS_DOUBLE == 1)
+ lfd f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfd f0, FP_0(r3)
+ lfd f1, FP_1(r3)
+ lfd f2, FP_2(r3)
+ lfd f3, FP_3(r3)
+ lfd f4, FP_4(r3)
+ lfd f5, FP_5(r3)
+ lfd f6, FP_6(r3)
+ lfd f7, FP_7(r3)
+ lfd f8, FP_8(r3)
+ lfd f9, FP_9(r3)
+ lfd f10, FP_10(r3)
+ lfd f11, FP_11(r3)
+ lfd f12, FP_12(r3)
+ lfd f13, FP_13(r3)
+ lfd f14, FP_14(r3)
+ lfd f15, FP_15(r3)
+ lfd f16, FP_16(r3)
+ lfd f17, FP_17(r3)
+ lfd f18, FP_18(r3)
+ lfd f19, FP_19(r3)
+ lfd f20, FP_20(r3)
+ lfd f21, FP_21(r3)
+ lfd f22, FP_22(r3)
+ lfd f23, FP_23(r3)
+ lfd f24, FP_24(r3)
+ lfd f25, FP_25(r3)
+ lfd f26, FP_26(r3)
+ lfd f27, FP_27(r3)
+ lfd f28, FP_28(r3)
+ lfd f29, FP_29(r3)
+ lfd f30, FP_30(r3)
+ lfd f31, FP_31(r3)
+#else
+ lfs f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ lfs f0, FP_0(r3)
+ lfs f1, FP_1(r3)
+ lfs f2, FP_2(r3)
+ lfs f3, FP_3(r3)
+ lfs f4, FP_4(r3)
+ lfs f5, FP_5(r3)
+ lfs f6, FP_6(r3)
+ lfs f7, FP_7(r3)
+ lfs f8, FP_8(r3)
+ lfs f9, FP_9(r3)
+ lfs f10, FP_10(r3)
+ lfs f11, FP_11(r3)
+ lfs f12, FP_12(r3)
+ lfs f13, FP_13(r3)
+ lfs f14, FP_14(r3)
+ lfs f15, FP_15(r3)
+ lfs f16, FP_16(r3)
+ lfs f17, FP_17(r3)
+ lfs f18, FP_18(r3)
+ lfs f19, FP_19(r3)
+ lfs f20, FP_20(r3)
+ lfs f21, FP_21(r3)
+ lfs f22, FP_22(r3)
+ lfs f23, FP_23(r3)
+ lfs f24, FP_24(r3)
+ lfs f25, FP_25(r3)
+ lfs f26, FP_26(r3)
+ lfs f27, FP_27(r3)
+ lfs f28, FP_28(r3)
+ lfs f29, FP_29(r3)
+ lfs f30, FP_30(r3)
+ lfs f31, FP_31(r3)
+#endif
+#endif
+ blr
+
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+ sync
+ isync
+#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */
+ stw r1, GP_1(r3)
+ lwz r1, GP_1(r4)
+ stw r2, GP_2(r3)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ stmw r13, GP_13(r3)
+ lmw r13, GP_13(r4)
+#else
+ stw r13, GP_13(r3)
+ lwz r13, GP_13(r4)
+ stw r14, GP_14(r3)
+ lwz r14, GP_14(r4)
+ stw r15, GP_15(r3)
+ lwz r15, GP_15(r4)
+ stw r16, GP_16(r3)
+ lwz r16, GP_16(r4)
+ stw r17, GP_17(r3)
+ lwz r17, GP_17(r4)
+ stw r18, GP_18(r3)
+ lwz r18, GP_18(r4)
+ stw r19, GP_19(r3)
+ lwz r19, GP_19(r4)
+ stw r20, GP_20(r3)
+ lwz r20, GP_20(r4)
+ stw r21, GP_21(r3)
+ lwz r21, GP_21(r4)
+ stw r22, GP_22(r3)
+ lwz r22, GP_22(r4)
+ stw r23, GP_23(r3)
+ lwz r23, GP_23(r4)
+ stw r24, GP_24(r3)
+ lwz r24, GP_24(r4)
+ stw r25, GP_25(r3)
+ lwz r25, GP_25(r4)
+ stw r26, GP_26(r3)
+ lwz r26, GP_26(r4)
+ stw r27, GP_27(r3)
+ lwz r27, GP_27(r4)
+ stw r28, GP_28(r3)
+ lwz r28, GP_28(r4)
+ stw r29, GP_29(r3)
+ lwz r29, GP_29(r4)
+ stw r30, GP_30(r3)
+ lwz r30, GP_30(r4)
+ stw r31, GP_31(r3)
+ lwz r31, GP_31(r4)
+#endif
+ mfcr r5
+ stw r5, GP_CR(r3)
+ lwz r5, GP_CR(r4)
+ mflr r6
+ mtcrf 255, r5
+ stw r6, GP_PC(r3)
+ lwz r6, GP_PC(r4)
+ mfmsr r7
+ mtlr r6
+ stw r7, GP_MSR(r3)
+ lwz r7, GP_MSR(r4)
+ mtmsr r7
+#endif
+#if (PPC_CACHE_ALIGNMENT == 16)
+ /* This assumes that all the registers are in the given order */
+ li r5, 16
+ addi r3,r3,-4
+ dcbz r5, r3
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_14+4
+ dcbz r5, r3
+ addi r3, r3, GP_18-GP_14
+ dcbz r5, r3
+ addi r3, r3, GP_22-GP_18
+ dcbz r5, r3
+ addi r3, r3, GP_26-GP_22
+ dcbz r5, r3
+ stmw r13, GP_13-GP_26(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stwu r14, GP_14+4(r3)
+ dcbz r5, r3
+ stw r15, GP_15-GP_14(r3)
+ stw r16, GP_16-GP_14(r3)
+ stw r17, GP_17-GP_14(r3)
+ stwu r18, GP_18-GP_14(r3)
+ dcbz r5, r3
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stwu r22, GP_22-GP_18(r3)
+ dcbz r5, r3
+ stw r23, GP_23-GP_22(r3)
+ stw r24, GP_24-GP_22(r3)
+ stw r25, GP_25-GP_22(r3)
+ stwu r26, GP_26-GP_22(r3)
+ dcbz r5, r3
+ stw r27, GP_27-GP_26(r3)
+ stw r28, GP_28-GP_26(r3)
+ stw r29, GP_29-GP_26(r3)
+ stw r30, GP_30-GP_26(r3)
+ stw r31, GP_31-GP_26(r3)
+#endif
+ dcbt r0, r4
+ mfcr r6
+ stw r6, GP_CR-GP_26(r3)
+ mflr r7
+ stw r7, GP_PC-GP_26(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_26(r3)
+
+ dcbt r5, r4
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_15
+ dcbt r5, r4
+ addi r4, r4, GP_19-GP_15
+ dcbt r5, r4
+ addi r4, r4, GP_23-GP_19
+ dcbt r5, r4
+ addi r4, r4, GP_27-GP_23
+ dcbt r5, r4
+ lmw r13, GP_13-GP_27(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwzu r15, GP_15(r4)
+ dcbt r5, r4
+ lwz r16, GP_16-GP_15(r4)
+ lwz r17, GP_17-GP_15(r4)
+ lwz r18, GP_18-GP_15(r4)
+ lwzu r19, GP_19-GP_15(r4)
+ dcbt r5, r4
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwzu r23, GP_23-GP_19(r4)
+ dcbt r5, r4
+ lwz r24, GP_24-GP_23(r4)
+ lwz r25, GP_25-GP_23(r4)
+ lwz r26, GP_26-GP_23(r4)
+ lwzu r27, GP_27-GP_23(r4)
+ dcbt r5, r4
+ lwz r28, GP_28-GP_27(r4)
+ lwz r29, GP_29-GP_27(r4)
+ lwz r30, GP_30-GP_27(r4)
+ lwz r31, GP_31-GP_27(r4)
+#endif
+ lwz r6, GP_CR-GP_27(r4)
+ lwz r7, GP_PC-GP_27(r4)
+ lwz r8, GP_MSR-GP_27(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+#if (PPC_CACHE_ALIGNMENT == 32)
+ /* This assumes that all the registers are in the given order */
+ li r5, 32
+ addi r3,r3,-4
+ dcbz r5, r3
+ stw r1, GP_1+4(r3)
+ stw r2, GP_2+4(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r3, r3, GP_18+4
+ dcbz r5, r3
+ stmw r13, GP_13-GP_18(r3)
+#else
+ stw r13, GP_13+4(r3)
+ stw r14, GP_14+4(r3)
+ stw r15, GP_15+4(r3)
+ stw r16, GP_16+4(r3)
+ stw r17, GP_17+4(r3)
+ stwu r18, GP_18+4(r3)
+ dcbz r5, r3
+ stw r19, GP_19-GP_18(r3)
+ stw r20, GP_20-GP_18(r3)
+ stw r21, GP_21-GP_18(r3)
+ stw r22, GP_22-GP_18(r3)
+ stw r23, GP_23-GP_18(r3)
+ stw r24, GP_24-GP_18(r3)
+ stw r25, GP_25-GP_18(r3)
+ stw r26, GP_26-GP_18(r3)
+ stw r27, GP_27-GP_18(r3)
+ stw r28, GP_28-GP_18(r3)
+ stw r29, GP_29-GP_18(r3)
+ stw r30, GP_30-GP_18(r3)
+ stw r31, GP_31-GP_18(r3)
+#endif
+ dcbt r0, r4
+ mfcr r6
+ stw r6, GP_CR-GP_18(r3)
+ mflr r7
+ stw r7, GP_PC-GP_18(r3)
+ mfmsr r8
+ stw r8, GP_MSR-GP_18(r3)
+
+ dcbt r5, r4
+ lwz r1, GP_1(r4)
+ lwz r2, GP_2(r4)
+#if (PPC_USE_MULTIPLE == 1)
+ addi r4, r4, GP_19
+ dcbt r5, r4
+ lmw r13, GP_13-GP_19(r4)
+#else
+ lwz r13, GP_13(r4)
+ lwz r14, GP_14(r4)
+ lwz r15, GP_15(r4)
+ lwz r16, GP_16(r4)
+ lwz r17, GP_17(r4)
+ lwz r18, GP_18(r4)
+ lwzu r19, GP_19(r4)
+ dcbt r5, r4
+ lwz r20, GP_20-GP_19(r4)
+ lwz r21, GP_21-GP_19(r4)
+ lwz r22, GP_22-GP_19(r4)
+ lwz r23, GP_23-GP_19(r4)
+ lwz r24, GP_24-GP_19(r4)
+ lwz r25, GP_25-GP_19(r4)
+ lwz r26, GP_26-GP_19(r4)
+ lwz r27, GP_27-GP_19(r4)
+ lwz r28, GP_28-GP_19(r4)
+ lwz r29, GP_29-GP_19(r4)
+ lwz r30, GP_30-GP_19(r4)
+ lwz r31, GP_31-GP_19(r4)
+#endif
+ lwz r6, GP_CR-GP_19(r4)
+ lwz r7, GP_PC-GP_19(r4)
+ lwz r8, GP_MSR-GP_19(r4)
+ mtcrf 255, r6
+ mtlr r7
+ mtmsr r8
+#endif
+ blr
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+/*
+ * ACB: Don't worry about cache optimisation here - this is not THAT critical.
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ lwz r5, GP_CR(r3)
+ lwz r6, GP_PC(r3)
+ lwz r7, GP_MSR(r3)
+ mtcrf 255, r5
+ mtlr r6
+ mtmsr r7
+ lwz r1, GP_1(r3)
+ lwz r2, GP_2(r3)
+#if (PPC_USE_MULTIPLE == 1)
+ lmw r13, GP_13(r3)
+#else
+ lwz r13, GP_13(r3)
+ lwz r14, GP_14(r3)
+ lwz r15, GP_15(r3)
+ lwz r16, GP_16(r3)
+ lwz r17, GP_17(r3)
+ lwz r18, GP_18(r3)
+ lwz r19, GP_19(r3)
+ lwz r20, GP_20(r3)
+ lwz r21, GP_21(r3)
+ lwz r22, GP_22(r3)
+ lwz r23, GP_23(r3)
+ lwz r24, GP_24(r3)
+ lwz r25, GP_25(r3)
+ lwz r26, GP_26(r3)
+ lwz r27, GP_27(r3)
+ lwz r28, GP_28(r3)
+ lwz r29, GP_29(r3)
+ lwz r30, GP_30(r3)
+ lwz r31, GP_31(r3)
+#endif
+
+ blr
+
+/* Individual interrupt prologues look like this:
+ * mtsprg{0,1} r0
+ * mfsprg2 r0
+ * mtmsr r0
+ * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
+ * #if (PPC_HAS_FPU)
+ * stwu r1, -(20*4 + 18*8 + IP_END)(r1)
+ * #else
+ * stwu r1, -(20*4 + IP_END)(r1)
+ * #endif
+ * #else
+ * stwu r1, -(IP_END)(r1)
+ * #endif
+ * mfsprg{0,1} r0
+ * stw r0, IP_0(r1)
+ *
+ * li r0, vectornum
+ * b PROC (_ISR_Handler{,C})
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_Handler)
+PROC (_ISR_Handler):
+#define LABEL(x) x
+#define MTSAVE(x) mtsprg0 x
+#define MFSAVE(x) mfsprg0 x
+ #include "irq_stub.s"
+ rfi
+
+#if (PPC_HAS_RFCI == 1)
+/* void __ISR_HandlerC()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * For critical interrupts
+ *
+ */
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_ISR_HandlerC)
+PROC (_ISR_HandlerC):
+#undef LABEL
+#undef MTSAVE
+#undef MFSAVE
+#define LABEL(x) x##_C
+#define MTSAVE(x) mtsprg1 x
+#define MFSAVE(x) mfsprg1 x
+ #include "irq_stub.s"
+ rfci
+#endif
+
+/* PowerOpen descriptors for indirect function calls.
+ */
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (_CPU_Context_save_fp)
+ DESCRIPTOR (_CPU_Context_restore_fp)
+ DESCRIPTOR (_CPU_Context_switch)
+ DESCRIPTOR (_CPU_Context_restore)
+ DESCRIPTOR (_ISR_Handler)
+#if (PPC_HAS_RFCI == 1)
+ DESCRIPTOR (_ISR_HandlerC)
+#endif
+#endif
diff --git a/c/src/exec/score/cpu/powerpc/irq_stub.s b/c/src/exec/score/cpu/powerpc/irq_stub.s
new file mode 100644
index 0000000000..92e6b43dd2
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/irq_stub.s
@@ -0,0 +1,215 @@
+/* irq_stub.s 1.0 - 95/08/08
+ *
+ * This file contains the interrupt handler assembly code for the PowerPC
+ * implementation of RTEMS. It is #included from cpu_asm.s.
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ */
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ * The vector number is in r0. R0 has already been stacked.
+ *
+ */
+ /* Finish off the interrupt frame */
+ stw r2, IP_2(r1)
+ stw r3, IP_3(r1)
+ stw r4, IP_4(r1)
+ stw r5, IP_5(r1)
+ stw r6, IP_6(r1)
+ stw r7, IP_7(r1)
+ stw r8, IP_8(r1)
+ stw r9, IP_9(r1)
+ stw r10, IP_10(r1)
+ stw r11, IP_11(r1)
+ stw r12, IP_12(r1)
+ stw r13, IP_13(r1)
+ stmw r28, IP_28(r1)
+ mfcr r5
+ mfctr r6
+ mfxer r7
+ mflr r8
+ mfsrr0 r9
+ mfsrr1 r10
+ /* Establish addressing */
+ mfsprg3 r11
+ dcbt r0, r11
+ stw r5, IP_CR(r1)
+ stw r6, IP_CTR(r1)
+ stw r7, IP_XER(r1)
+ stw r8, IP_LR(r1)
+ stw r9, IP_PC(r1)
+ stw r10, IP_MSR(r1)
+
+ lwz r30, Vector_table(r11)
+ slwi r4,r0,2
+ lwz r28, Nest_level(r11)
+ add r4, r4, r30
+
+ lwz r30, 0(r28)
+ mr r3, r0
+ lwz r31, Stack(r11)
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ */
+ /* Switch stacks, here we must prevent ALL interrupts */
+ cmpwi r30, 0
+ lwz r29, Disable_level(r11)
+ subf r31,r1,r31
+ bne LABEL (nested)
+ stwux r1,r1,r31
+LABEL (nested):
+ /*
+ * _ISR_Nest_level++;
+ */
+ lwz r31, 0(r29)
+ addi r30,r30,1
+ stw r30,0(r28)
+ /* From here on out, interrupts can be re-enabled. RTEMS
+ * convention says not.
+ */
+ lwz r4,0(r4)
+ /*
+ * _Thread_Dispatch_disable_level++;
+ */
+ addi r31,r31,1
+ stw r31, 0(r29)
+ /*
+ * (*_ISR_Vector_table[ vector ])( vector );
+ */
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r6,0(r4)
+ lwz r2,4(r4)
+ mtlr r6
+ lwz r11,8(r4)
+#endif
+#if (PPC_ABI == PPC_ABI_GCC27)
+ lwz r2, Default_r2(r11)
+ mtlr r4
+ lwz r2, 0(r2)
+#endif
+#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI)
+ mtlr r4
+ lwz r2, Default_r2(r11)
+ lwz r13, Default_r13(r11)
+ lwz r2, 0(r2)
+ lwz r13, 0(r13)
+#endif
+ mr r4,r1
+ blrl
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+
+ /* We must re-disable the interrupts */
+ mfsprg3 r11
+ mfsprg2 r0
+ mtmsr r0
+
+ /*
+ * if (--Thread_Dispatch_disable,--_ISR_Nest_level)
+ * goto easy_exit;
+ */
+ addi r30, r30, -1
+ cmpwi r30, 0
+ addi r31, r31, -1
+ stw r30, 0(r28)
+ stw r31, 0(r29)
+ bne LABEL (easy_exit)
+
+ lwz r30, Switch_necessary(r11)
+
+ /*
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ */
+ lwz r1,0(r1)
+ lwz r30, 0(r30)
+ lwz r31, Signal(r11)
+
+ /*
+ * if ( _Context_Switch_necessary )
+ * goto switch
+ */
+ cmpwi r30, 0
+ lwz r28, 0(r31)
+ bne LABEL (switch)
+ /*
+ * if ( !_ISR_Signals_to_thread_executing )
+ * goto easy_exit
+ * _ISR_Signals_to_thread_executing = 0;
+ */
+ cmpwi r28, 0
+ li r6,0
+ beq LABEL (easy_exit)
+ stw r6, 0(r31)
+
+ /*
+ * switch:
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ */
+LABEL (switch):
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ lwz r2, Dispatch_r2(r11)
+#else
+ /* R2 and R13 still hold their values from the last call */
+#endif
+ bl PROC (_Thread_Dispatch)
+ /* NOP marker for debuggers */
+ or r6,r6,r6
+ /*
+ * prepare to get out of interrupt
+ */
+ /* Re-disable IRQs */
+ mfsprg2 r0
+ mtmsr r0
+ /*
+ * easy_exit:
+ * prepare to get out of interrupt
+ * return from interrupt
+ */
+LABEL (easy_exit):
+ lwz r5, IP_CR(r1)
+ lwz r6, IP_CTR(r1)
+ lwz r7, IP_XER(r1)
+ lwz r8, IP_LR(r1)
+ lwz r9, IP_PC(r1)
+ lwz r10, IP_MSR(r1)
+ mtcrf 255,r5
+ mtctr r6
+ mtxer r7
+ mtlr r8
+ mtsrr0 r9
+ mtsrr1 r10
+ lwz r0, IP_0(r1)
+ lwz r2, IP_2(r1)
+ lwz r3, IP_3(r1)
+ lwz r4, IP_4(r1)
+ lwz r5, IP_5(r1)
+ lwz r6, IP_6(r1)
+ lwz r7, IP_7(r1)
+ lwz r8, IP_8(r1)
+ lwz r9, IP_9(r1)
+ lwz r10, IP_10(r1)
+ lwz r11, IP_11(r1)
+ lwz r12, IP_12(r1)
+ lwz r13, IP_13(r1)
+ lmw r28, IP_28(r1)
+ lwz r1, 0(r1)
diff --git a/c/src/exec/score/cpu/powerpc/ppc.h b/c/src/exec/score/cpu/powerpc/ppc.h
new file mode 100644
index 0000000000..76f3129c2e
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/ppc.h
@@ -0,0 +1,317 @@
+/* ppc.h
+ *
+ * This file contains definitions for the IBM/Motorola PowerPC
+ * family members.
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/no_cpu.h:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ *
+ * Note:
+ * This file is included by both C and assembler code ( -DASM )
+ *
+ */
+
+#ifndef _INCLUDE_PPC_h
+#define _INCLUDE_PPC_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#ifdef ppc
+#undef ppc
+#endif
+#define ppc
+
+#ifdef REPLACE_THIS_WITH_THE_CPU_MODEL
+#undef REPLACE_THIS_WITH_THE_CPU_MODEL
+#endif
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+
+#ifdef REPLACE_THIS_WITH_THE_BSP
+#undef REPLACE_THIS_WITH_THE_BSP
+#endif
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This file contains the information required to build
+ * RTEMS for a particular member of the "no cpu"
+ * family when executing in protected mode. It does
+ * this by setting variables to indicate which implementation
+ * dependent features are present in a particular member
+ * of the family.
+ */
+
+#if defined(ppc403)
+
+#define RTEMS_MODEL_NAME "PowerPC 403"
+
+#define PPC_ALIGNMENT 4
+#define PPC_CACHE_ALIGNMENT 16
+#define PPC_CACHE_ALIGN_POWER 4
+#define PPC_INTERRUPT_MAX 16
+#define PPC_HAS_FPU 0
+#define PPC_HAS_DOUBLE 0
+#define PPC_HAS_RFCI 1
+#define PPC_MSR_DISABLE_MASK 0x00029200
+#define PPC_MSR_INITIAL 0x00000000
+#define PPC_INIT_FPSCR 0x00000000
+#define PPC_USE_MULTIPLE 1
+#define PPC_I_CACHE 2048
+#define PPC_D_CACHE 1024
+
+#define PPC_MSR_0 0x00029200
+#define PPC_MSR_1 0x00021200
+#define PPC_MSR_2 0x00001000
+#define PPC_MSR_3 0x00000000
+
+#elif defined(ppc601)
+
+#define RTEMS_MODEL_NAME "PowerPC 601"
+
+#define PPC_ALIGNMENT 8
+#define PPC_CACHE_ALIGNMENT 32
+#define PPC_CACHE_ALIGN_POWER 5
+#define PPC_INTERRUPT_MAX 16
+#define PPC_HAS_FPU 1
+#define PPC_HAS_DOUBLE 1
+#define PPC_HAS_RFCI 0
+#define PPC_MSR_DISABLE_MASK 0x00009900
+#define PPC_MSR_INITIAL 0x00002000
+#define PPC_INIT_FPSCR 0x000000f8
+#define PPC_USE_MULTIPLE 1
+#define PPC_I_CACHE 0
+#define PPC_D_CACHE 32768
+
+#define PPC_MSR_0 0x00009900
+#define PPC_MSR_1 0x00001000
+#define PPC_MSR_2 0x00001000
+#define PPC_MSR_3 0x00000000
+
+#elif defined(ppc602)
+
+#define RTEMS_MODEL_NAME "PowerPC 602"
+
+#define PPC_ALIGNMENT 4
+#define PPC_CACHE_ALIGNMENT 32
+#define PPC_CACHE_ALIGN_POWER 5
+#define PPC_INTERRUPT_MAX 16
+#define PPC_HAS_FPU 1
+#define PPC_HAS_DOUBLE 0
+#define PPC_HAS_RFCI 0
+#define PPC_MSR_DISABLE_MASK
+#define PPC_MSR_INITIAL
+#define PPC_INIT_FPSCR
+#define PPC_USE_MULTIPLE 0
+#define PPC_I_CACHE 4096
+#define PPC_D_CACHE 4096
+
+#elif defined(ppc603)
+
+#define RTEMS_MODEL_NAME "PowerPC 603"
+
+#define PPC_ALIGNMENT 8
+#define PPC_CACHE_ALIGNMENT 32
+#define PPC_CACHE_ALIGN_POWER 5
+#define PPC_INTERRUPT_MAX 16
+#define PPC_HAS_FPU 1
+#define PPC_HAS_DOUBLE 1
+#define PPC_HAS_RFCI 0
+#define PPC_MSR_DISABLE_MASK 0x00009900
+#define PPC_MSR_INITIAL 0x00002000
+#define PPC_INIT_FPSCR 0x000000f8
+#define PPC_USE_MULTIPLE 0
+#define PPC_I_CACHE 8192
+#define PPC_D_CACHE 8192
+
+#define PPC_MSR_0 0x00009900
+#define PPC_MSR_1 0x00001000
+#define PPC_MSR_2 0x00001000
+#define PPC_MSR_3 0x00000000
+
+#elif defined(ppc603e)
+
+#define RTEMS_MODEL_NAME "PowerPC 603e"
+
+#define PPC_ALIGNMENT 8
+#define PPC_CACHE_ALIGNMENT 32
+#define PPC_CACHE_ALIGN_POWER 5
+#define PPC_INTERRUPT_MAX 16
+#define PPC_HAS_FPU 1
+#define PPC_HAS_DOUBLE 1
+#define PPC_HAS_RFCI 0
+#define PPC_MSR_DISABLE_MASK 0x00009900
+#define PPC_MSR_INITIAL 0x00002000
+#define PPC_INIT_FPSCR 0x000000f8
+#define PPC_USE_MULTIPLE 0
+#define PPC_I_CACHE 16384
+#define PPC_D_CACHE 16384
+
+#define PPC_MSR_0 0x00009900
+#define PPC_MSR_1 0x00001000
+#define PPC_MSR_2 0x00001000
+#define PPC_MSR_3 0x00000000
+
+#elif defined(ppc604)
+
+#define RTEMS_MODEL_NAME "PowerPC 604"
+
+#define PPC_ALIGNMENT 8
+#define PPC_CACHE_ALIGNMENT 32
+#define PPC_CACHE_ALIGN_POWER 5
+#define PPC_INTERRUPT_MAX 16
+#define PPC_HAS_FPU 1
+#define PPC_HAS_DOUBLE 1
+#define PPC_HAS_RFCI 0
+#define PPC_MSR_DISABLE_MASK 0x00009900
+#define PPC_MSR_INITIAL 0x00002000
+#define PPC_INIT_FPSCR 0x000000f8
+#define PPC_USE_MULTIPLE 0
+#define PPC_I_CACHE 16384
+#define PPC_D_CACHE 16384
+
+#define PPC_MSR_0 0x00009900
+#define PPC_MSR_1 0x00001000
+#define PPC_MSR_2 0x00001000
+#define PPC_MSR_3 0x00000000
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Application binary interfaces.
+ * PPC_ABI MUST be defined as one of these.
+ * Only PPC_ABI_POWEROPEN is currently fully supported.
+ * Only EABI will be supported in the end when
+ * the tools are there.
+ * Only big endian is currently supported.
+ */
+/*
+ * PowerOpen ABI. This is Andy's hack of the
+ * PowerOpen ABI to ELF. ELF rather than a
+ * XCOFF assembler is used. This may work
+ * if PPC_ASM == PPC_ASM_XCOFF is defined.
+ */
+#define PPC_ABI_POWEROPEN 0
+/*
+ * GCC 2.7.0 munched version of EABI, with
+ * PowerOpen calling convention and stack frames,
+ * but EABI style indirect function calls.
+ */
+#define PPC_ABI_GCC27 1
+/*
+ * SVR4 ABI
+ */
+#define PPC_ABI_SVR4 2
+/*
+ * Embedded ABI
+ */
+#define PPC_ABI_EABI 3
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+#define PPC_STACK_ALIGNMENT 8
+#elif (PPC_ABI == PPC_ABI_GCC27)
+#define PPC_STACK_ALIGNMENT 8
+#elif (PPC_ABI == PPC_ABI_SVR4)
+#define PPC_STACK_ALIGNMENT 16
+#elif (PPC_ABI == PPC_ABI_EABI)
+#define PPC_STACK_ALIGNMENT 8
+#else
+#error "PPC_ABI is not properly defined"
+#endif
+#ifndef PPC_ABI
+#error "PPC_ABI is not properly defined"
+#endif
+
+/*
+ * Assemblers.
+ * PPC_ASM MUST be defined as one of these.
+ * Only PPC_ABI_ELF is currently fully supported.
+ */
+/*
+ * ELF assembler. Currently used for all ABIs.
+ */
+#define PPC_ASM_ELF 0
+/*
+ * XCOFF assembler, may be needed for PowerOpen ABI.
+ */
+#define PPC_ASM_XCOFF 1
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "PowerPC"
+
+/*
+ * Interrupt vectors.
+ */
+/* Machine check */
+#define PPC_IRQ_MCHECK 0
+/* Protection violation */
+#define PPC_IRQ_PROTECT 1
+/* External interrupt */
+#define PPC_IRQ_EXTERNAL 2
+/* Program exception */
+#define PPC_IRQ_PROGRAM 3
+/* System call */
+#define PPC_IRQ_SCALL 4
+/* Floating point unavailable */
+#define PPC_IRQ_NOFP 5
+/* Program interval timer */
+#define PPC_IRQ_PIT 6
+/* Fixed interval timer */
+#define PPC_IRQ_FIT 7
+/* Critical interrupt pin */
+#define PPC_IRQ_CRIT 8
+/* Watchdog timer */
+#define PPC_IRQ_WATCHDOG 9
+/* Debug exceptions */
+#define PPC_IRQ_DEBUG 10
+
+/*
+ * The following exceptions are not maskable, and are not
+ * necessarily predictable, so cannot be offered to RTEMS:
+ * Alignment exception - handled by the CPU module
+ * Data exceptions.
+ * Instruction exceptions.
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! _INCLUDE_PPC_h */
+/* end of include file */
diff --git a/c/src/exec/score/cpu/powerpc/ppctypes.h b/c/src/exec/score/cpu/powerpc/ppctypes.h
new file mode 100644
index 0000000000..64219dafe5
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/ppctypes.h
@@ -0,0 +1,74 @@
+/* ppctypes.h
+ *
+ * This include file contains type definitions pertaining to the PowerPC
+ * processor family.
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/no_cputypes.h:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * ppctypes.h,v 1.3 1995/05/31 14:57:15 joel Exp
+ */
+
+#ifndef __PPC_TYPES_h
+#define __PPC_TYPES_h
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* unsigned 8-bit integer */
+typedef unsigned short unsigned16; /* unsigned 16-bit integer */
+typedef unsigned int unsigned32; /* unsigned 32-bit integer */
+typedef unsigned long long unsigned64; /* unsigned 64-bit integer */
+
+typedef unsigned32 Priority_Bit_map_control;
+
+typedef signed char signed8; /* 8-bit signed integer */
+typedef signed short signed16; /* 16-bit signed integer */
+typedef signed int signed32; /* 32-bit signed integer */
+typedef signed long long signed64; /* 64 bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+typedef void ppc_isr;
+typedef void ( *ppc_isr_entry )( int, struct CPU_Interrupt_frame * );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/powerpc/rtems.s b/c/src/exec/score/cpu/powerpc/rtems.s
new file mode 100644
index 0000000000..87d0f093a7
--- /dev/null
+++ b/c/src/exec/score/cpu/powerpc/rtems.s
@@ -0,0 +1,131 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the PowerPC implementation of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.demon.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/rtems.c:
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ */
+
+#include "asm.h"
+
+ BEGIN_CODE
+/*
+ * RTEMS
+ *
+ * This routine jumps to the directive indicated in r11.
+ * This routine is used when RTEMS is linked by itself and placed
+ * in ROM. This routine is the first address in the ROM space for
+ * RTEMS. The user "calls" this address with the directive arguments
+ * in the normal place.
+ * This routine then jumps indirectly to the correct directive
+ * preserving the arguments. The directive should not realize
+ * it has been "wrapped" in this way. The table "_Entry_points"
+ * is used to look up the directive.
+ */
+
+ ALIGN (4, 2)
+ PUBLIC_PROC (RTEMS)
+PROC (RTEMS):
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ mflr r0
+ stw r0, 8(r1)
+ stwu r1, -64(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r12, r12, r11
+
+ stw r2, 56(r1)
+ lwz r0, 0(r12)
+ mtlr r0
+ lwz r2, 4(r12)
+ lwz r11, 8(r12)
+ blrl
+ lwz r2, 56(r1)
+ addi r1, r1, 64
+ lwz r0, 8(r1)
+ mtlr r0
+#else
+ mflr r0
+ stw r0, 4(r1)
+ stwu r1, -16(r1)
+
+ /* Establish addressing */
+ bl base
+base:
+ mflr r12
+ addi r12, r12, tabaddr - base
+
+ lwz r12, Entry_points-abase(r12)
+ slwi r11, r11, 2
+ lwzx r11, r12, r11
+
+ stw r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ stw r13, 12(r1)
+#endif
+ mtlr r11
+ lwz r11, irqinfo-abase(r12)
+ lwz r2, 0(r11)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 4(r11)
+#endif
+ blrl
+ lwz r2, 8(r1)
+#if (PPC_ABI != PPC_ABI_GCC27)
+ lwz r13, 12(r1)
+#endif
+ addi r1, r1, 16
+ lwz r0, 4(r1)
+ mtlr r0
+#endif
+ blr
+
+
+ /* Addressability stuff */
+tabaddr:
+abase:
+ EXTERN_VAR (_Entry_points)
+Entry_points:
+ EXT_SYM_REF (_Entry_points)
+#if (PPC_ABI != PPC_ABI_POWEROPEN)
+ EXTERN_VAR (_CPU_IRQ_info)
+irqinfo:
+ EXT_SYM_REF (_CPU_IRQ_info)
+#endif
+
+#if (PPC_ABI == PPC_ABI_POWEROPEN)
+ DESCRIPTOR (RTEMS)
+#endif
+
+
diff --git a/c/src/exec/score/inline/rtems/score/wkspace.inl b/c/src/exec/score/inline/rtems/score/wkspace.inl
index 1d88843db8..39d5ad8eb8 100644
--- a/c/src/exec/score/inline/rtems/score/wkspace.inl
+++ b/c/src/exec/score/inline/rtems/score/wkspace.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * wkspace.inl,v 1.2 1995/05/31 16:48:49 joel Exp
+ * $Id$
*/
#ifndef __WORKSPACE_inl
diff --git a/c/src/exec/score/inline/wkspace.inl b/c/src/exec/score/inline/wkspace.inl
index 1d88843db8..39d5ad8eb8 100644
--- a/c/src/exec/score/inline/wkspace.inl
+++ b/c/src/exec/score/inline/wkspace.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * wkspace.inl,v 1.2 1995/05/31 16:48:49 joel Exp
+ * $Id$
*/
#ifndef __WORKSPACE_inl
diff --git a/c/src/exec/score/macros/chain.inl b/c/src/exec/score/macros/chain.inl
index 3e17c46bfa..0f09499f1d 100644
--- a/c/src/exec/score/macros/chain.inl
+++ b/c/src/exec/score/macros/chain.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * chain.inl,v 1.2 1995/05/31 16:48:59 joel Exp
+ * $Id$
*/
#ifndef __MACROS_CHAIN_h
diff --git a/c/src/exec/score/macros/heap.inl b/c/src/exec/score/macros/heap.inl
index 1c2b20cbec..4966aab385 100644
--- a/c/src/exec/score/macros/heap.inl
+++ b/c/src/exec/score/macros/heap.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * heap.inl,v 1.2 1995/05/31 16:49:10 joel Exp
+ * $Id$
*/
#ifndef __HEAP_inl
diff --git a/c/src/exec/score/macros/object.inl b/c/src/exec/score/macros/object.inl
index b95b5d678b..6c7caa383a 100644
--- a/c/src/exec/score/macros/object.inl
+++ b/c/src/exec/score/macros/object.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * object.inl,v 1.2 1995/05/31 16:49:20 joel Exp
+ * $Id$
*/
#ifndef __OBJECTS_inl
diff --git a/c/src/exec/score/macros/priority.inl b/c/src/exec/score/macros/priority.inl
index 380df633d7..0efec0050e 100644
--- a/c/src/exec/score/macros/priority.inl
+++ b/c/src/exec/score/macros/priority.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * priority.inl,v 1.2 1995/05/31 16:49:27 joel Exp
+ * $Id$
*/
#ifndef __PRIORITY_inl
diff --git a/c/src/exec/score/macros/rtems/score/chain.inl b/c/src/exec/score/macros/rtems/score/chain.inl
index 3e17c46bfa..0f09499f1d 100644
--- a/c/src/exec/score/macros/rtems/score/chain.inl
+++ b/c/src/exec/score/macros/rtems/score/chain.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * chain.inl,v 1.2 1995/05/31 16:48:59 joel Exp
+ * $Id$
*/
#ifndef __MACROS_CHAIN_h
diff --git a/c/src/exec/score/macros/rtems/score/heap.inl b/c/src/exec/score/macros/rtems/score/heap.inl
index 1c2b20cbec..4966aab385 100644
--- a/c/src/exec/score/macros/rtems/score/heap.inl
+++ b/c/src/exec/score/macros/rtems/score/heap.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * heap.inl,v 1.2 1995/05/31 16:49:10 joel Exp
+ * $Id$
*/
#ifndef __HEAP_inl
diff --git a/c/src/exec/score/macros/rtems/score/object.inl b/c/src/exec/score/macros/rtems/score/object.inl
index b95b5d678b..6c7caa383a 100644
--- a/c/src/exec/score/macros/rtems/score/object.inl
+++ b/c/src/exec/score/macros/rtems/score/object.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * object.inl,v 1.2 1995/05/31 16:49:20 joel Exp
+ * $Id$
*/
#ifndef __OBJECTS_inl
diff --git a/c/src/exec/score/macros/rtems/score/priority.inl b/c/src/exec/score/macros/rtems/score/priority.inl
index 380df633d7..0efec0050e 100644
--- a/c/src/exec/score/macros/rtems/score/priority.inl
+++ b/c/src/exec/score/macros/rtems/score/priority.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * priority.inl,v 1.2 1995/05/31 16:49:27 joel Exp
+ * $Id$
*/
#ifndef __PRIORITY_inl
diff --git a/c/src/exec/score/macros/rtems/score/sysstate.inl b/c/src/exec/score/macros/rtems/score/sysstate.inl
index 89c5098341..ed13494d30 100644
--- a/c/src/exec/score/macros/rtems/score/sysstate.inl
+++ b/c/src/exec/score/macros/rtems/score/sysstate.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * sysstate.inl,v 1.2 1995/05/31 16:49:39 joel Exp
+ * $Id$
*/
#ifndef __SYSTEM_STATE_inl
diff --git a/c/src/exec/score/macros/rtems/score/wkspace.inl b/c/src/exec/score/macros/rtems/score/wkspace.inl
index d50bc110fb..1139e188db 100644
--- a/c/src/exec/score/macros/rtems/score/wkspace.inl
+++ b/c/src/exec/score/macros/rtems/score/wkspace.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * wkspace.inl,v 1.2 1995/05/31 16:49:53 joel Exp
+ * $Id$
*/
#ifndef __WORKSPACE_inl
diff --git a/c/src/exec/score/macros/sysstate.inl b/c/src/exec/score/macros/sysstate.inl
index 89c5098341..ed13494d30 100644
--- a/c/src/exec/score/macros/sysstate.inl
+++ b/c/src/exec/score/macros/sysstate.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * sysstate.inl,v 1.2 1995/05/31 16:49:39 joel Exp
+ * $Id$
*/
#ifndef __SYSTEM_STATE_inl
diff --git a/c/src/exec/score/macros/wkspace.inl b/c/src/exec/score/macros/wkspace.inl
index d50bc110fb..1139e188db 100644
--- a/c/src/exec/score/macros/wkspace.inl
+++ b/c/src/exec/score/macros/wkspace.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * wkspace.inl,v 1.2 1995/05/31 16:49:53 joel Exp
+ * $Id$
*/
#ifndef __WORKSPACE_inl
diff --git a/cpukit/score/inline/rtems/score/wkspace.inl b/cpukit/score/inline/rtems/score/wkspace.inl
index 1d88843db8..39d5ad8eb8 100644
--- a/cpukit/score/inline/rtems/score/wkspace.inl
+++ b/cpukit/score/inline/rtems/score/wkspace.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * wkspace.inl,v 1.2 1995/05/31 16:48:49 joel Exp
+ * $Id$
*/
#ifndef __WORKSPACE_inl
diff --git a/cpukit/score/macros/rtems/score/chain.inl b/cpukit/score/macros/rtems/score/chain.inl
index 3e17c46bfa..0f09499f1d 100644
--- a/cpukit/score/macros/rtems/score/chain.inl
+++ b/cpukit/score/macros/rtems/score/chain.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * chain.inl,v 1.2 1995/05/31 16:48:59 joel Exp
+ * $Id$
*/
#ifndef __MACROS_CHAIN_h
diff --git a/cpukit/score/macros/rtems/score/heap.inl b/cpukit/score/macros/rtems/score/heap.inl
index 1c2b20cbec..4966aab385 100644
--- a/cpukit/score/macros/rtems/score/heap.inl
+++ b/cpukit/score/macros/rtems/score/heap.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * heap.inl,v 1.2 1995/05/31 16:49:10 joel Exp
+ * $Id$
*/
#ifndef __HEAP_inl
diff --git a/cpukit/score/macros/rtems/score/object.inl b/cpukit/score/macros/rtems/score/object.inl
index b95b5d678b..6c7caa383a 100644
--- a/cpukit/score/macros/rtems/score/object.inl
+++ b/cpukit/score/macros/rtems/score/object.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * object.inl,v 1.2 1995/05/31 16:49:20 joel Exp
+ * $Id$
*/
#ifndef __OBJECTS_inl
diff --git a/cpukit/score/macros/rtems/score/priority.inl b/cpukit/score/macros/rtems/score/priority.inl
index 380df633d7..0efec0050e 100644
--- a/cpukit/score/macros/rtems/score/priority.inl
+++ b/cpukit/score/macros/rtems/score/priority.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * priority.inl,v 1.2 1995/05/31 16:49:27 joel Exp
+ * $Id$
*/
#ifndef __PRIORITY_inl
diff --git a/cpukit/score/macros/rtems/score/sysstate.inl b/cpukit/score/macros/rtems/score/sysstate.inl
index 89c5098341..ed13494d30 100644
--- a/cpukit/score/macros/rtems/score/sysstate.inl
+++ b/cpukit/score/macros/rtems/score/sysstate.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * sysstate.inl,v 1.2 1995/05/31 16:49:39 joel Exp
+ * $Id$
*/
#ifndef __SYSTEM_STATE_inl
diff --git a/cpukit/score/macros/rtems/score/wkspace.inl b/cpukit/score/macros/rtems/score/wkspace.inl
index d50bc110fb..1139e188db 100644
--- a/cpukit/score/macros/rtems/score/wkspace.inl
+++ b/cpukit/score/macros/rtems/score/wkspace.inl
@@ -11,7 +11,7 @@
* to the copyright license under the clause at DFARS 252.227-7013. This
* notice must appear in all copies of this file and its derivatives.
*
- * wkspace.inl,v 1.2 1995/05/31 16:49:53 joel Exp
+ * $Id$
*/
#ifndef __WORKSPACE_inl