summaryrefslogtreecommitdiffstats
path: root/c/src/exec/score/cpu
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>1995-05-11 17:39:37 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>1995-05-11 17:39:37 +0000
commitac7d5ef06a6d6e8d84abbd1f0b82162725f98326 (patch)
tree9304cf759a73f2a1c6fd3191948f00e870af3787 /c/src/exec/score/cpu
downloadrtems-ac7d5ef06a6d6e8d84abbd1f0b82162725f98326.tar.bz2
Initial revision
Diffstat (limited to 'c/src/exec/score/cpu')
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu.c313
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu.h581
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu_asm.h73
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu_asm.s797
-rw-r--r--c/src/exec/score/cpu/hppa1.1/hppa.h704
-rw-r--r--c/src/exec/score/cpu/hppa1.1/rtems.s53
-rw-r--r--c/src/exec/score/cpu/i386/asm.h131
-rw-r--r--c/src/exec/score/cpu/i386/cpu.c121
-rw-r--r--c/src/exec/score/cpu/i386/cpu.h367
-rw-r--r--c/src/exec/score/cpu/i386/cpu_asm.s654
-rw-r--r--c/src/exec/score/cpu/i386/i386.h493
-rw-r--r--c/src/exec/score/cpu/i386/rtems.s31
-rw-r--r--c/src/exec/score/cpu/i960/asm.h107
-rw-r--r--c/src/exec/score/cpu/i960/cpu.c124
-rw-r--r--c/src/exec/score/cpu/i960/cpu.h424
-rw-r--r--c/src/exec/score/cpu/i960/cpu_asm.s199
-rw-r--r--c/src/exec/score/cpu/i960/i960.h289
-rw-r--r--c/src/exec/score/cpu/i960/rtems.s25
-rw-r--r--c/src/exec/score/cpu/m68k/asm.h127
-rw-r--r--c/src/exec/score/cpu/m68k/cpu.c97
-rw-r--r--c/src/exec/score/cpu/m68k/cpu.h412
-rw-r--r--c/src/exec/score/cpu/m68k/cpu_asm.s202
-rw-r--r--c/src/exec/score/cpu/m68k/m68k.h282
-rw-r--r--c/src/exec/score/cpu/m68k/rtems.s46
-rw-r--r--c/src/exec/score/cpu/no_cpu/asm.h98
-rw-r--r--c/src/exec/score/cpu/no_cpu/cpu.c132
-rw-r--r--c/src/exec/score/cpu/no_cpu/cpu.h818
-rw-r--r--c/src/exec/score/cpu/no_cpu/cpu_asm.c152
-rw-r--r--c/src/exec/score/cpu/no_cpu/cpu_asm.h70
-rw-r--r--c/src/exec/score/cpu/no_cpu/no_cpu.h86
-rw-r--r--c/src/exec/score/cpu/no_cpu/rtems.c45
-rw-r--r--c/src/exec/score/cpu/unix/cpu.c529
-rw-r--r--c/src/exec/score/cpu/unix/cpu.h929
-rw-r--r--c/src/exec/score/cpu/unix/unix.h90
34 files changed, 9601 insertions, 0 deletions
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu.c b/c/src/exec/score/cpu/hppa1.1/cpu.c
new file mode 100644
index 0000000000..b69a172b4e
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu.c
@@ -0,0 +1,313 @@
+/*
+ * HP PA-RISC Dependent Source
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+#include <rtems/intr.h>
+#include <rtems/wkspace.h>
+
+rtems_status_code hppa_external_interrupt_initialize(void);
+void hppa_external_interrupt_enable(unsigned32);
+void hppa_external_interrupt_disable(unsigned32);
+void hppa_external_interrupt(unsigned32, CPU_Interrupt_frame *);
+
+/*
+ * Our interrupt handlers take a 2nd argument:
+ * a pointer to a CPU_Interrupt_frame
+ * So we use our own prototype instead of rtems_isr_entry
+ */
+
+typedef rtems_isr ( *hppa_rtems_isr_entry )(
+ rtems_vector_number,
+ CPU_Interrupt_frame *
+ );
+
+
+/*
+ * who are we? cpu number
+ * Not used by executive proper, just kept (or not) as a convenience
+ * for libcpu and libbsp stuff that wants it.
+ *
+ * Defaults to 0. If the BSP doesn't like it, it can change it.
+ */
+
+int cpu_number; /* from 0; cpu number in a multi cpu system */
+
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ *
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ register unsigned8 *fp_context;
+ unsigned32 iva;
+ unsigned32 iva_table;
+ int i;
+
+ extern void IVA_Table(void);
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ /*
+ * XXX; need to setup fpsr smarter perhaps
+ */
+
+ fp_context = (unsigned8*) &_CPU_Null_fp_context;
+ for (i=0 ; i<sizeof(Context_Control_fp); i++)
+ *fp_context++ = 0;
+
+ /*
+ * Set _CPU_Default_gr27 here so it will hopefully be the correct
+ * global data pointer for the entire system.
+ */
+
+ asm volatile( "stw %%r27,%0" : "=m" (_CPU_Default_gr27): );
+
+ /*
+ * Stabilize the interrupt stuff
+ */
+
+ (void) hppa_external_interrupt_initialize();
+
+ /*
+ * Set the IVA to point to physical address of the IVA_Table
+ */
+
+ iva_table = (unsigned32) IVA_Table;
+ HPPA_ASM_LPA(0, iva_table, iva);
+ set_iva(iva);
+
+ _CPU_Table = *cpu_table;
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+/*
+ * HPPA has 8w for each vector instead of an address to jump to.
+ * We put the actual ISR address in '_ISR_vector_table'. This will
+ * be pulled by the code in the vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ *old_handler = _ISR_Vector_table[vector];
+
+ _ISR_Vector_table[vector] = new_handler;
+
+ if (vector >= HPPA_INTERRUPT_EXTERNAL_BASE)
+ {
+ unsigned32 external_vector;
+
+ external_vector = vector - HPPA_INTERRUPT_EXTERNAL_BASE;
+ if (new_handler)
+ hppa_external_interrupt_enable(external_vector);
+ else
+ /* XXX this can never happen due to _ISR_Is_valid_user_handler */
+ hppa_external_interrupt_disable(external_vector);
+ }
+}
+
+
+/*
+ * Support for external and spurious interrupts on HPPA
+ *
+ * TODO:
+ * delete interrupt.c etc.
+ * Count interrupts
+ * make sure interrupts disabled properly
+ * should handler check again for more interrupts before exit?
+ * How to enable interrupts from an interrupt handler?
+ * Make sure there is an entry for everything in ISR_Vector_Table
+ */
+
+#define DISMISS(mask) set_eirr(mask)
+#define DISABLE(mask) set_eiem(get_eiem() & ~(mask))
+#define ENABLE(mask) set_eiem(get_eiem() | (mask))
+#define VECTOR_TO_MASK(v) (1 << (31 - (v)))
+
+/*
+ * Init the external interrupt scheme
+ * called by bsp_start()
+ */
+
+rtems_status_code
+hppa_external_interrupt_initialize(void)
+{
+ rtems_isr_entry ignore;
+
+ /* mark them all unused */
+
+ DISABLE(~0);
+ DISMISS(~0);
+
+ /* install the external interrupt handler */
+ rtems_interrupt_catch((rtems_isr_entry) hppa_external_interrupt,
+ HPPA_INTERRUPT_EXTERNAL_INTERRUPT, &ignore) ;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+/*
+ * Enable a specific external interrupt
+ */
+
+void
+hppa_external_interrupt_enable(unsigned32 v)
+{
+ unsigned32 isrlevel;
+
+ _CPU_ISR_Disable(isrlevel);
+ ENABLE(VECTOR_TO_MASK(v));
+ _CPU_ISR_Enable(isrlevel);
+}
+
+/*
+ * Does not clear or otherwise affect any pending requests
+ */
+
+void
+hppa_external_interrupt_disable(unsigned32 v)
+{
+ unsigned32 isrlevel;
+
+ _CPU_ISR_Disable(isrlevel);
+ DISABLE(VECTOR_TO_MASK(v));
+ _CPU_ISR_Enable(isrlevel);
+}
+
+void
+hppa_external_interrupt_spurious_handler(unsigned32 vector,
+ CPU_Interrupt_frame *iframe)
+{
+/* XXX should not be printing :)
+ printf("spurious external interrupt: %d at pc 0x%x; disabling\n",
+ vector, iframe->Interrupt.pcoqfront);
+*/
+ DISMISS(VECTOR_TO_MASK(vector));
+ DISABLE(VECTOR_TO_MASK(vector));
+}
+
+void
+hppa_external_interrupt_report_spurious(unsigned32 spurious,
+ CPU_Interrupt_frame *iframe)
+{
+ int v;
+ for (v=0; v < HPPA_EXTERNAL_INTERRUPTS; v++)
+ if (VECTOR_TO_MASK(v) & spurious)
+ hppa_external_interrupt_spurious_handler(v, iframe);
+ DISMISS(spurious);
+}
+
+
+/*
+ * External interrupt handler.
+ * This is installed as cpu interrupt handler for
+ * HPPA_INTERRUPT_EXTERNAL_INTERRUPT. It vectors out to
+ * specific external interrupt handlers.
+ */
+
+void
+hppa_external_interrupt(unsigned32 vector,
+ CPU_Interrupt_frame *iframe)
+{
+ unsigned32 mask;
+ unsigned32 *vp, *max_vp;
+ unsigned32 external_vector;
+ unsigned32 global_vector;
+ hppa_rtems_isr_entry handler;
+
+ max_vp = &_CPU_Table.external_interrupt[_CPU_Table.external_interrupts];
+ while ( (mask = (get_eirr() & get_eiem())) )
+ {
+ for (vp = _CPU_Table.external_interrupt; (vp < max_vp) && mask; vp++)
+ {
+ unsigned32 m;
+
+ external_vector = *vp;
+ global_vector = external_vector + HPPA_INTERRUPT_EXTERNAL_BASE;
+ m = VECTOR_TO_MASK(external_vector);
+ handler = (hppa_rtems_isr_entry) _ISR_Vector_table[global_vector];
+ if ((m & mask) && handler)
+ {
+ DISMISS(m);
+ mask &= ~m;
+ (*handler)(global_vector, iframe);
+ }
+ }
+
+ if (mask != 0) {
+ if ( _CPU_Table.spurious_handler )
+ (*((hppa_rtems_isr_entry) _CPU_Table.spurious_handler))(
+ mask,
+ iframe
+ );
+ else
+ hppa_external_interrupt_report_spurious(mask, iframe);
+ }
+ }
+}
+
+/*
+ * Halt the system.
+ * Called by the _CPU_Fatal_halt macro
+ *
+ * XXX
+ * Later on, this will allow us to return to the prom.
+ * For now, we just ignore 'type_of_halt'
+ */
+
+void
+hppa_cpu_halt(unsigned32 type_of_halt,
+ unsigned32 the_error)
+{
+ unsigned32 isrlevel;
+
+ _CPU_ISR_Disable(isrlevel);
+
+ asm volatile( "copy %0,%%r1" : : "r" (the_error) );
+ HPPA_ASM_BREAK(1, 0);
+}
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu.h b/c/src/exec/score/cpu/hppa1.1/cpu.h
new file mode 100644
index 0000000000..3e8f31fcf0
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu.h
@@ -0,0 +1,581 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the HP
+ * PA-RISC processor (Level 1.1).
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ *
+ * Note:
+ * This file is included by both C and assembler code ( -DASM )
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hppa.h> /* pick up machine definitions */
+
+/* conditional compilation parameters */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
+
+/*
+ * RTEMS manages an interrupt stack in software for the HPPA.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * HPPA has hardware FP, it is assumed to exist by GCC so all tasks
+ * may implicitly use it (especially for integer multiplies). Because
+ * the FP context is technically part of the basic integer context
+ * on this CPU, we cannot use the deferred FP context switch algorithm.
+ */
+
+#define CPU_HARDWARE_FP TRUE
+#define CPU_ALL_TASKS_ARE_FP TRUE
+#define CPU_IDLE_TASK_IS_FP FALSE
+#define CPU_USE_DEFERRED_FP_SWITCH FALSE
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+#define CPU_STACK_GROWS_UP TRUE
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((__aligned__ (32)))
+
+/* constants */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
+
+/*
+ * PSW contstants
+ */
+
+#define CPU_PSW_BASE (HPPA_PSW_C | HPPA_PSW_Q | HPPA_PSW_P | HPPA_PSW_D)
+#define CPU_PSW_INTERRUPTS_ON (CPU_PSW_BASE | HPPA_PSW_I)
+#define CPU_PSW_INTERRUPTS_OFF (CPU_PSW_BASE)
+
+#define CPU_PSW_DEFAULT CPU_PSW_BASE
+
+
+#ifndef ASM
+
+/*
+ * Contexts
+ *
+ * This means we have the following context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ *
+ * The PA-RISC is very fast so the expense of saving an extra register
+ * or two is not of great concern at the present. So we are not making
+ * a distinction between what is saved during a task switch and what is
+ * saved at each interrupt. Plus saving the entire context should make
+ * it easier to make gdb aware of RTEMS tasks.
+ */
+
+typedef struct {
+ unsigned32 flags; /* whatever */
+ unsigned32 gr1; /* scratch -- caller saves */
+ unsigned32 gr2; /* RP -- return pointer */
+ unsigned32 gr3; /* scratch -- callee saves */
+ unsigned32 gr4; /* scratch -- callee saves */
+ unsigned32 gr5; /* scratch -- callee saves */
+ unsigned32 gr6; /* scratch -- callee saves */
+ unsigned32 gr7; /* scratch -- callee saves */
+ unsigned32 gr8; /* scratch -- callee saves */
+ unsigned32 gr9; /* scratch -- callee saves */
+ unsigned32 gr10; /* scratch -- callee saves */
+ unsigned32 gr11; /* scratch -- callee saves */
+ unsigned32 gr12; /* scratch -- callee saves */
+ unsigned32 gr13; /* scratch -- callee saves */
+ unsigned32 gr14; /* scratch -- callee saves */
+ unsigned32 gr15; /* scratch -- callee saves */
+ unsigned32 gr16; /* scratch -- callee saves */
+ unsigned32 gr17; /* scratch -- callee saves */
+ unsigned32 gr18; /* scratch -- callee saves */
+ unsigned32 gr19; /* scratch -- caller saves */
+ unsigned32 gr20; /* scratch -- caller saves */
+ unsigned32 gr21; /* scratch -- caller saves */
+ unsigned32 gr22; /* scratch -- caller saves */
+ unsigned32 gr23; /* argument 3 */
+ unsigned32 gr24; /* argument 2 */
+ unsigned32 gr25; /* argument 1 */
+ unsigned32 gr26; /* argument 0 */
+ unsigned32 gr27; /* DP -- global data pointer */
+ unsigned32 gr28; /* return values -- caller saves */
+ unsigned32 gr29; /* return values -- caller saves */
+ unsigned32 sp; /* gr30 */
+ unsigned32 gr31;
+
+ /* Various control registers */
+
+ unsigned32 sar; /* cr11 */
+ unsigned32 ipsw; /* cr22; full 32 bits of psw */
+ unsigned32 iir; /* cr19; interrupt instruction register */
+ unsigned32 ior; /* cr21; interrupt offset register */
+ unsigned32 isr; /* cr20; interrupt space register (not used) */
+ unsigned32 pcoqfront; /* cr18; front que offset */
+ unsigned32 pcoqback; /* cr18; back que offset */
+ unsigned32 pcsqfront; /* cr17; front que space (not used) */
+ unsigned32 pcsqback; /* cr17; back que space (not used) */
+ unsigned32 itimer; /* cr16; itimer value */
+
+} Context_Control;
+
+
+/* Must be double word aligned.
+ * This will be ok since our allocator returns 8 byte aligned chunks
+ */
+
+typedef struct {
+ double fr0; /* status */
+ double fr1; /* exception information */
+ double fr2; /* exception information */
+ double fr3; /* exception information */
+ double fr4; /* argument */
+ double fr5; /* argument */
+ double fr6; /* argument */
+ double fr7; /* argument */
+ double fr8; /* scratch -- caller saves */
+ double fr9; /* scratch -- caller saves */
+ double fr10; /* scratch -- caller saves */
+ double fr11; /* scratch -- caller saves */
+ double fr12; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr13; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr14; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr15; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr16; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr17; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr18; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr19; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr20; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr21; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr22; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr23; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr24; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr25; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr26; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr27; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr28; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr29; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr30; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr31; /* caller saves -- (PA-RISC 1.1 CPUs) */
+} Context_Control_fp;
+
+/*
+ * The following structure defines the set of information saved
+ * on the current stack by RTEMS upon receipt of each interrupt.
+ */
+
+typedef struct {
+ Context_Control Integer;
+ Context_Control_fp Floating_Point;
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the HPPA specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+
+ /* HPPA simulator is slow enough; don't waste time
+ * zeroing memory that is already zero
+ */
+ boolean do_zero_of_workspace;
+
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+
+ /*
+ * Control of external interrupts.
+ * We keep a table of external vector numbers (0 - 31)
+ * The table is sorted by priority, that is: the first entry
+ * in the table indicates the vector that is highest priorty.
+ * The handler function is stored in _ISR_Vector_Table[] and
+ * is set by rtems_interrupt_catch()
+ */
+
+ unsigned32 external_interrupts; /* # of external interrupts we use */
+ unsigned32 external_interrupt[HPPA_EXTERNAL_INTERRUPTS];
+
+ void (*spurious_handler)( unsigned32 mask, CPU_Interrupt_frame *);
+
+ unsigned32 itimer_clicks_per_microsecond; /* for use by Clock driver */
+} rtems_cpu_table;
+
+/* variables */
+
+EXTERN Context_Control_fp _CPU_Null_fp_context;
+EXTERN unsigned32 _CPU_Default_gr27;
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+#endif /* ! ASM */
+
+/*
+ * context size area for floating point
+ */
+
+#ifndef ASM
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+#endif
+
+/*
+ * size of a frame on the stack
+ */
+
+#define CPU_FRAME_SIZE (16 * 4)
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (CPU_FRAME_SIZE * 2)
+
+/*
+ * extra stack required by system initialization thread
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 0
+
+/*
+ * HPPA has 32 interrupts, then 32 external interrupts
+ * Rtems (_ISR_Vector_Table) is aware of the first 64
+ * A BSP may reserve more.
+ *
+ * External interrupts all come thru the same vector (4)
+ * The external handler is the only person aware of the other
+ * interrupts (genie, rhino, etc)
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (HPPA_INTERRUPT_MAX)
+
+/*
+ * Don't be chintzy here; we don't want to debug these problems
+ * Some of the tests eat almost 4k.
+ * Plus, the HPPA always allocates chunks of 64 bytes for stack
+ * growth.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (8 * 1024)
+
+/*
+ * HPPA double's must be on 8 byte boundary
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ * just follow the basic HPPA alignment for the heap and partition
+ */
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * HPPA stack is best when 64 byte aligned.
+ */
+
+#define CPU_STACK_ALIGNMENT 64
+
+#ifndef ASM
+
+/* macros */
+
+/*
+ * ISR handler macros
+ *
+ * These macros perform the following functions:
+ * + disable all maskable CPU interrupts
+ * + restore previous interrupt level (enable)
+ * + temporarily restore interrupts (flash)
+ * + set a particular level
+ */
+
+/* Disable interrupts; returning previous level in _level */
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ do { \
+ HPPA_ASM_RSM(HPPA_PSW_I, _isr_cookie); \
+ } while(0)
+
+/* Enable interrupts to previous level from _CPU_ISR_Disable
+ * does not change 'level' */
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ HPPA_ASM_MTSM( _isr_cookie ); \
+ }
+
+/* restore, then disable interrupts; does not change level */
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { \
+ register int _ignore; \
+ _CPU_ISR_Enable( _isr_cookie ); \
+ _CPU_ISR_Disable( _ignore ); \
+ }
+
+/*
+ * Interrupt task levels
+ *
+ * Future scheme proposal
+ * level will be an index into a array.
+ * Each entry of array will be the interrupt bits
+ * enabled for that level. There will be 32 bits of external
+ * interrupts (to be placed in EIEM) and some (optional) bsp
+ * specific bits
+ *
+ * For pixel flow this *may* mean something like:
+ * level 0: all interrupts enabled (external + rhino)
+ * level 1: rhino disabled
+ * level 2: all io interrupts disabled (timer still enabled)
+ * level 7: *ALL* disabled (timer disabled)
+ */
+
+/* set interrupts on or off; does not return new level */
+#define _CPU_ISR_Set_level( new_level ) \
+ { \
+ volatile int ignore; \
+ if ( new_level ) HPPA_ASM_RSM(HPPA_PSW_I, ignore); \
+ else HPPA_ASM_SSM(HPPA_PSW_I, ignore); \
+ }
+
+/* end of ISR handler macros */
+
+/*
+ * Context handler macros
+ *
+ * These macros perform the following functions:
+ * + initialize a context area
+ * + restart the current thread
+ * + calculate the initial pointer into a FP context area
+ * + initialize an FP context area
+ *
+ * HPPA port adds two macros which hide the "indirectness" of the
+ * pointer passed the save/restore FP context assembly routines.
+ */
+
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _new_level, _entry_point ) \
+ do { \
+ unsigned32 _stack; \
+ \
+ (_the_context)->flags = 0xfeedf00d; \
+ (_the_context)->pcoqfront = (unsigned32)(_entry_point); \
+ (_the_context)->pcoqback = (unsigned32)(_entry_point) + 4; \
+ (_the_context)->pcsqfront = 0; \
+ (_the_context)->pcsqback = 0; \
+ if ( (_new_level) ) \
+ (_the_context)->ipsw = CPU_PSW_INTERRUPTS_OFF; \
+ else \
+ (_the_context)->ipsw = CPU_PSW_INTERRUPTS_ON; \
+ \
+ _stack = ((unsigned32)(_stack_base) + (CPU_STACK_ALIGNMENT - 1)); \
+ _stack &= ~(CPU_STACK_ALIGNMENT - 1); \
+ if ((_stack - (unsigned32) (_stack_base)) < CPU_FRAME_SIZE) \
+ _stack += CPU_FRAME_SIZE; \
+ \
+ (_the_context)->sp = (_stack); \
+ (_the_context)->gr27 = _CPU_Default_gr27; \
+ } while (0)
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ do { \
+ _CPU_Context_restore( (_the_context) ); \
+ } while (0)
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) (_base) + (_offset) )
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ do { \
+ *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context;\
+ } while(0)
+
+#define _CPU_Context_save_fp( _fp_context ) \
+ _CPU_Save_float_context( *(Context_Control_fp **)(_fp_context) )
+
+#define _CPU_Context_restore_fp( _fp_context ) \
+ _CPU_Restore_float_context( *(Context_Control_fp **)(_fp_context) )
+
+/* end of Context handler macros */
+
+/*
+ * Fatal Error manager macros
+ *
+ * These macros perform the following functions:
+ * + disable interrupts and halt the CPU
+ */
+
+void hppa_cpu_halt(unsigned32 type_of_halt, unsigned32 the_error);
+#define _CPU_Fatal_halt( _error ) \
+ hppa_cpu_halt(0, _error)
+
+/* end of Fatal Error manager macros */
+
+/*
+ * Bitfield handler macros
+ *
+ * These macros perform the following functions:
+ * + scan for the highest numbered (MSB) set in a 16 bit bitfield
+ *
+ * NOTE:
+ *
+ * The HPPA does not have a scan instruction. This functionality
+ * is implemented in software.
+ */
+
+int hppa_rtems_ffs(unsigned int value);
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ _output = hppa_rtems_ffs(_value)
+
+/* end of Bitfield handler macros */
+
+/*
+ * Priority handler macros
+ *
+ * These macros perform the following functions:
+ * + return a mask with the bit for this major/minor portion of
+ * of thread priority set.
+ * + translate the bit number returned by "Bitfield_find_first_bit"
+ * into an index into the thread ready chain bit maps
+ *
+ * Note: 255 is the lowest priority
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 1 << (_bit_number) )
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generally used only to restart self in an
+ * efficient manner and avoid stack conflicts.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Save_float_context
+ *
+ * This routine saves the floating point context passed to it.
+ *
+ * NOTE: _CPU_Context_save_fp is implemented as a macro on the HPPA
+ * which dereferences the pointer before calling this.
+ */
+
+void _CPU_Save_float_context(
+ Context_Control_fp *fp_context
+);
+
+/*
+ * _CPU_Restore_float_context
+ *
+ * This routine restores the floating point context passed to it.
+ *
+ * NOTE: _CPU_Context_save_fp is implemented as a macro on the HPPA
+ * which dereferences the pointer before calling this.
+ */
+
+void _CPU_Restore_float_context(
+ Context_Control_fp *fp_context
+);
+
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static so it can be referenced indirectly.
+ */
+
+static inline unsigned int
+CPU_swap_u32(unsigned32 value)
+{
+ unsigned32 swapped;
+
+ HPPA_ASM_SWAPBYTES(value, swapped);
+
+ return( swapped );
+}
+
+/*
+ * Unused; I think it should go away
+ */
+
+#if 0
+#define enable_tracing()
+#endif
+
+#endif /* ! ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! __CPU_h */
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu_asm.h b/c/src/exec/score/cpu/hppa1.1/cpu_asm.h
new file mode 100644
index 0000000000..8e480c2a5c
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu_asm.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1990,1991 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the Center
+ * for Software Science at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Utah $Hdr: asm.h 1.6 91/12/03$
+ *
+ * RTEMS: $Id$
+ */
+
+/*
+ * Hardware Space Registers
+ */
+sr0 .reg %sr0
+sr1 .reg %sr1
+sr2 .reg %sr2
+sr3 .reg %sr3
+sr4 .reg %sr4
+sr5 .reg %sr5
+sr6 .reg %sr6
+sr7 .reg %sr7
+
+/*
+ * Control register aliases
+ */
+
+rctr .reg %cr0
+pidr1 .reg %cr8
+pidr2 .reg %cr9
+ccr .reg %cr10
+sar .reg %cr11
+pidr3 .reg %cr12
+pidr4 .reg %cr13
+iva .reg %cr14
+eiem .reg %cr15
+itmr .reg %cr16
+pcsq .reg %cr17
+pcoq .reg %cr18
+iir .reg %cr19
+isr .reg %cr20
+ior .reg %cr21
+ipsw .reg %cr22
+eirr .reg %cr23
+
+/*
+ * Calling Convention
+ */
+rp .reg %r2
+arg3 .reg %r23
+arg2 .reg %r24
+arg1 .reg %r25
+arg0 .reg %r26
+dp .reg %r27
+ret0 .reg %r28
+ret1 .reg %r29
+sl .reg %r29
+sp .reg %r30
+
+
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu_asm.s b/c/src/exec/score/cpu/hppa1.1/cpu_asm.s
new file mode 100644
index 0000000000..43a5bb2499
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu_asm.s
@@ -0,0 +1,797 @@
+# @(#)cpu_asm.S 1.5 - 95/04/24
+#
+#
+# TODO:
+# Context_switch needs to only save callee save registers
+# I think this means can skip: r1, r2, r19-29, r31
+# Ref: p 3-2 of Procedure Calling Conventions Manual
+# This should be #ifndef DEBUG so that debugger has
+# accurate visibility into all registers
+#
+# This file contains the assembly code for the HPPA implementation
+# of RTEMS.
+#
+# COPYRIGHT (c) 1994,95 by Division Incorporated
+#
+# To anyone who acknowledges that this file is provided "AS IS"
+# without any express or implied warranty:
+# permission to use, copy, modify, and distribute this file
+# for any purpose is hereby granted without fee, provided that
+# the above copyright notice and this notice appears in all
+# copies, and that the name of Division Incorporated not be
+# used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# Division Incorporated makes no representations about the
+# suitability of this software for any purpose.
+#
+# $Id$
+#
+
+#include <rtems/hppa.h>
+#include <rtems/cpu_asm.h>
+#include <rtems/cpu.h>
+
+#include <offsets.h>
+
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+#
+# Special register usage for context switch and interrupts
+# Stay away from %cr28 which is used for TLB misses on 72000
+#
+
+isr_arg0 .reg %cr24
+isr_r9 .reg %cr25
+
+#
+# Interrupt stack frame looks like this
+#
+# offset item
+# -----------------------------------------------------------------
+# INTEGER_CONTEXT_OFFSET Context_Control
+# FP_CONTEXT_OFFSET Context_Control_fp
+#
+# It is padded out to a multiple of 64
+#
+
+
+# PAGE^L
+# void __Generic_ISR_Handler()
+#
+# This routine provides the RTEMS interrupt management.
+#
+# NOTE:
+# Upon entry, the stack will contain a stack frame back to the
+# interrupted task. If dispatching is enabled, this is the
+# outer most interrupt, (and a context switch is necessary or
+# the current task has signals), then set up the stack to
+# transfer control to the interrupt dispatcher.
+#
+#
+# We jump here from the interrupt vector.
+# The hardware has done some stuff for us:
+# PSW saved in IPSW
+# PSW set to 0
+# PSW[E] set to default (0)
+# PSW[M] set to 1 iff this is HPMC
+#
+# IIA queue is frozen (since PSW[Q] is now 0)
+# privilege level promoted to 0
+# IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap
+# registers GR 1,8,9,16,17,24,25 copied to shadow regs
+# SHR 0 1 2 3 4 5 6
+#
+# Our vector stub did the following
+# placed vector number is in r1
+#
+# stub
+# r1 <- vector number
+# save ipsw under rock
+# ipsw = ipsw & ~1 -- disable ints
+# save qregs under rock
+# qra = _Generic_ISR_handler
+# rfi
+#
+################################################
+
+# Distinct Interrupt Entry Points
+#
+# The following macro and the 32 instantiations of the macro
+# are necessary to determine which interrupt vector occurred.
+# The following macro allows a unique entry point to be defined
+# for each vector.
+#
+# r9 was loaded with the vector before branching here
+# scratch registers available: gr1, gr8, gr9, gr16, gr17, gr24
+#
+# NOTE:
+# .align 32 doesn not seem to work in the continuation below
+# so just have to count 8 instructions
+#
+# NOTE:
+# this whole scheme needs to be rethought for TLB traps which
+# have requirements about what tlb faults they can incur.
+# ref: TLB Operation Requirements in 1.1 arch book
+
+#define THANDLER(vector) \
+ mtctl %r9, isr_r9 ! \
+ b _Generic_ISR_Handler! \
+ ldi vector, %r9! \
+ nop ! \
+ nop ! \
+ nop ! \
+ nop ! \
+ nop
+
+ .align 4096
+ .EXPORT IVA_Table,ENTRY,PRIV_LEV=0
+IVA_Table:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+ THANDLER(0) /* unused */
+
+ THANDLER(HPPA_INTERRUPT_HIGH_PRIORITY_MACHINE_CHECK)
+
+ THANDLER(HPPA_INTERRUPT_POWER_FAIL)
+
+ THANDLER(HPPA_INTERRUPT_RECOVERY_COUNTER)
+
+ THANDLER(HPPA_INTERRUPT_EXTERNAL_INTERRUPT)
+
+ THANDLER(HPPA_INTERRUPT_LOW_PRIORITY_MACHINE_CHECK)
+
+ THANDLER(HPPA_INTERRUPT_INSTRUCTION_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_INSTRUCTION_MEMORY_PROTECTION)
+
+ THANDLER(HPPA_INTERRUPT_ILLEGAL_INSTRUCTION)
+
+ THANDLER(HPPA_INTERRUPT_BREAK_INSTRUCTION)
+
+ THANDLER(HPPA_INTERRUPT_PRIVILEGED_OPERATION)
+
+ THANDLER(HPPA_INTERRUPT_PRIVILEGED_REGISTER)
+
+ THANDLER(HPPA_INTERRUPT_OVERFLOW)
+
+ THANDLER(HPPA_INTERRUPT_CONDITIONAL)
+
+ THANDLER(HPPA_INTERRUPT_ASSIST_EXCEPTION)
+
+ THANDLER(HPPA_INTERRUPT_DATA_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_NON_ACCESS_INSTRUCTION_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_NON_ACCESS_DATA_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_PROTECTION)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_BREAK)
+
+ THANDLER(HPPA_INTERRUPT_TLB_DIRTY_BIT)
+
+ THANDLER(HPPA_INTERRUPT_PAGE_REFERENCE)
+
+ THANDLER(HPPA_INTERRUPT_ASSIST_EMULATION)
+
+ THANDLER(HPPA_INTERRUPT_HIGHER_PRIVILEGE_TRANSFER)
+
+ THANDLER(HPPA_INTERRUPT_LOWER_PRIVILEGE_TRANSFER)
+
+ THANDLER(HPPA_INTERRUPT_TAKEN_BRANCH)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_ACCESS_RIGHTS)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_PROTECTION_ID)
+
+ THANDLER(HPPA_INTERRUPT_UNALIGNED_DATA_REFERENCE)
+
+ THANDLER(HPPA_INTERRUPT_PERFORMANCE_MONITOR)
+
+ THANDLER(HPPA_INTERRUPT_INSTRUCTION_DEBUG)
+
+ THANDLER(HPPA_INTERRUPT_DATA_DEBUG)
+
+ .EXIT
+ .PROCEND
+
+ .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0
+_Generic_ISR_Handler:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+# Turn on the D bit in psw so we can start saving stuff on stack
+# (interrupt context pieces that need to be saved before the RFI)
+
+ ssm HPPA_PSW_D, %r0
+ mtctl arg0, isr_arg0
+
+# save interrupt state
+ mfctl ipsw, arg0
+ stw arg0, IPSW_OFFSET(sp)
+
+ mfctl iir, arg0
+ stw arg0, IIR_OFFSET(sp)
+
+ mfctl ior, arg0
+ stw arg0, IOR_OFFSET(sp)
+
+ mfctl pcoq, arg0
+ stw arg0, PCOQFRONT_OFFSET(sp)
+
+ mtctl %r0, pcoq
+ mfctl pcoq, arg0
+ stw arg0, PCOQBACK_OFFSET(sp)
+
+ mfctl %sar, arg0
+ stw arg0, SAR_OFFSET(sp)
+
+# Prepare to re-enter virtual mode
+# We need Q in case the interrupt handler enables interrupts
+#
+
+ ldil L%CPU_PSW_DEFAULT, arg0
+ ldo R%CPU_PSW_DEFAULT(arg0), arg0
+ mtctl arg0, ipsw
+
+# Now jump to "rest_of_isr_handler" with the rfi
+# We are assuming the space queues are all correct already
+
+ ldil L%rest_of_isr_handler, arg0
+ ldo R%rest_of_isr_handler(arg0), arg0
+ mtctl arg0, pcoq
+ ldo 4(arg0), arg0
+ mtctl arg0, pcoq
+
+ rfi
+ nop
+
+# At this point we are back in virtual mode and all our
+# normal addressing is once again ok.
+
+rest_of_isr_handler:
+
+#
+# Build an interrupt frame to hold the contexts we will need.
+# We have already saved the interrupt items on the stack
+
+# At this point the following registers are damaged wrt the interrupt
+# reg current value saved value
+# ------------------------------------------------
+# arg0 scratch isr_arg0 (ctl)
+# r9 vector number isr_r9 (ctl)
+#
+# Point to beginning of integer context and
+# save the integer context
+ stw %r1,R1_OFFSET(sp)
+ stw %r2,R2_OFFSET(sp)
+ stw %r3,R3_OFFSET(sp)
+ stw %r4,R4_OFFSET(sp)
+ stw %r5,R5_OFFSET(sp)
+ stw %r6,R6_OFFSET(sp)
+ stw %r7,R7_OFFSET(sp)
+ stw %r8,R8_OFFSET(sp)
+ stw %r9,R9_OFFSET(sp)
+ stw %r10,R10_OFFSET(sp)
+ stw %r11,R11_OFFSET(sp)
+ stw %r12,R12_OFFSET(sp)
+ stw %r13,R13_OFFSET(sp)
+ stw %r14,R14_OFFSET(sp)
+ stw %r15,R15_OFFSET(sp)
+ stw %r16,R16_OFFSET(sp)
+ stw %r17,R17_OFFSET(sp)
+ stw %r18,R18_OFFSET(sp)
+ stw %r19,R19_OFFSET(sp)
+ stw %r20,R20_OFFSET(sp)
+ stw %r21,R21_OFFSET(sp)
+ stw %r22,R22_OFFSET(sp)
+ stw %r23,R23_OFFSET(sp)
+ stw %r24,R24_OFFSET(sp)
+ stw %r25,R25_OFFSET(sp)
+ stw %r26,R26_OFFSET(sp)
+ stw %r27,R27_OFFSET(sp)
+ stw %r28,R28_OFFSET(sp)
+ stw %r29,R29_OFFSET(sp)
+ stw %r30,R30_OFFSET(sp)
+ stw %r31,R31_OFFSET(sp)
+
+# Now most registers are available since they have been saved
+#
+# The following items are currently wrong in the integer context
+# reg current value saved value
+# ------------------------------------------------
+# arg0 scratch isr_arg0 (ctl)
+# r9 vector number isr_r9 (ctl)
+#
+# Fix them
+
+ mfctl isr_arg0,%r3
+ stw %r3,ARG0_OFFSET(sp)
+
+ mfctl isr_r9,%r3
+ stw %r3,R9_OFFSET(sp)
+
+#
+# At this point we are done with isr_arg0, and isr_r9 control registers
+#
+
+
+# Point to beginning of float context and
+# save the floating point context -- doing whatever patches are necessary
+ .call ARGW0=GR
+ bl _CPU_Save_float_context,%r2
+ ldo FP_CONTEXT_OFFSET(sp),arg0
+
+# save the ptr to interrupt frame as an argument for the interrupt handler
+ copy sp, arg1
+
+# Advance the frame to point beyond all interrupt contexts (integer & float)
+# this also includes the pad to align to 64byte stack boundary
+ ldo CPU_INTERRUPT_FRAME_SIZE(sp), sp
+
+# r3 -- &_ISR_Nest_level
+# r5 -- value _ISR_Nest_level
+# r4 -- &_Thread_Dispatch_disable_level
+# r6 -- value _Thread_Dispatch_disable_level
+# r9 -- vector number
+
+ .import _ISR_Nest_level,data
+ ldil L%_ISR_Nest_level,%r3
+ ldo R%_ISR_Nest_level(%r3),%r3
+ ldw 0(%r3),%r5
+
+ .import _Thread_Dispatch_disable_level,data
+ ldil L%_Thread_Dispatch_disable_level,%r4
+ ldo R%_Thread_Dispatch_disable_level(%r4),%r4
+ ldw 0(%r4),%r6
+
+# increment interrupt nest level counter. If outermost interrupt
+# switch the stack and squirrel away the previous sp.
+ addi 1,%r5,%r5
+ stw %r5, 0(%r3)
+
+# compute and save new stack (with frame)
+# just in case we are nested -- simpler this way
+ comibf,= 1,%r5,stack_done
+ ldo 128(sp),%r7
+
+#
+# Switch to interrupt stack allocated by the interrupt manager (intr.c)
+#
+ .import _CPU_Interrupt_stack_low,data
+ ldil L%_CPU_Interrupt_stack_low,%r7
+ ldw R%_CPU_Interrupt_stack_low(%r7),%r7
+ ldo 128(%r7),%r7
+
+stack_done:
+# save our current stack pointer where the "old sp" is supposed to be
+ stw sp, -4(%r7)
+# and switch stacks (or advance old stack in nested case)
+ copy %r7, sp
+
+# increment the dispatch disable level counter.
+ addi 1,%r6,%r6
+ stw %r6, 0(%r4)
+
+# load address of user handler
+ .import _ISR_Vector_table,data
+ ldil L%_ISR_Vector_table,%r8
+ ldo R%_ISR_Vector_table(%r8),%r8
+ ldwx,s %r9(%r8),%r8
+
+# invoke user interrupt handler
+# Interrupts are currently disabled, as per RTEMS convention
+# The handler has the option of re-enabling interrupts
+# NOTE: can not use 'bl' since it uses "pc-relative" addressing
+# and we are using a hard coded address from a table
+# So... we fudge r2 ourselves (ala dynacall)
+#
+ copy %r9, %r26
+ .call ARGW0=GR, ARGW1=GR
+ blr %r0, rp
+ bv,n 0(%r8)
+
+post_user_interrupt_handler:
+
+# Back from user handler(s)
+# Disable external interrupts (since the interrupt handler could
+# have turned them on) and return to the interrupted task stack (assuming
+# (_ISR_Nest_level == 0)
+
+ rsm HPPA_PSW_I, %r0
+ ldw -4(sp), sp
+
+# r3 -- &_ISR_Nest_level
+# r5 -- value _ISR_Nest_level
+# r4 -- &_Thread_Dispatch_disable_level
+# r6 -- value _Thread_Dispatch_disable_level
+
+ .import _ISR_Nest_level,data
+ ldil L%_ISR_Nest_level,%r3
+ ldo R%_ISR_Nest_level(%r3),%r3
+ ldw 0(%r3),%r5
+
+ .import _Thread_Dispatch_disable_level,data
+ ldil L%_Thread_Dispatch_disable_level,%r4
+ ldo R%_Thread_Dispatch_disable_level(%r4),%r4
+ ldw 0(%r4), %r6
+
+# decrement isr nest level
+ addi -1, %r5, %r5
+ stw %r5, 0(%r3)
+
+# decrement dispatch disable level counter and, if not 0, go on
+ addi -1,%r6,%r6
+ comibf,= 0,%r6,isr_restore
+ stw %r6, 0(%r4)
+
+# check whether or not a context switch is necessary
+ .import _Context_Switch_necessary,data
+ ldil L%_Context_Switch_necessary,%r8
+ ldw R%_Context_Switch_necessary(%r8),%r8
+ comibf,=,n 0,%r8,ISR_dispatch
+
+# check whether or not a context switch is necessary because an ISR
+# sent signals to the interrupted task
+ .import _ISR_Signals_to_thread_executing,data
+ ldil L%_ISR_Signals_to_thread_executing,%r8
+ ldw R%_ISR_Signals_to_thread_executing(%r8),%r8
+ comibt,=,n 0,%r8,isr_restore
+
+# OK, something happened while in ISR and we need to switch to a task
+# other than the one which was interrupted or the
+# ISR_Signals_to_thread_executing case
+# We also turn on interrupts, since the interrupted task had them
+# on (obviously :-) and Thread_Dispatch is happy to leave ints on.
+#
+
+ISR_dispatch:
+ ssm HPPA_PSW_I, %r0
+
+ .import _Thread_Dispatch,code
+ .call
+ bl _Thread_Dispatch,%r2
+ ldo 128(sp),sp
+
+ ldo -128(sp),sp
+
+ rsm HPPA_PSW_I, %r0
+
+isr_restore:
+
+# Get a pointer to beginning of our stack frame
+ ldo -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1
+
+# restore float
+ .call ARGW0=GR
+ bl _CPU_Restore_float_context,%r2
+ ldo FP_CONTEXT_OFFSET(%arg1), arg0
+
+ copy %arg1, %arg0
+
+# ********** FALL THRU **********
+
+# Jump here from bottom of Context_Switch
+# Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self
+# restore interrupt state
+#
+
+ .EXPORT _CPU_Context_restore
+_CPU_Context_restore:
+
+# Turn off Q & I so we can write pcoq
+ rsm HPPA_PSW_Q + HPPA_PSW_I, %r0
+
+ ldw IPSW_OFFSET(arg0), %r8
+ mtctl %r8, ipsw
+
+ ldw SAR_OFFSET(arg0), %r9
+ mtctl %r9, sar
+
+ ldw PCOQFRONT_OFFSET(arg0), %r10
+ mtctl %r10, pcoq
+
+ ldw PCOQBACK_OFFSET(arg0), %r11
+ mtctl %r11, pcoq
+
+#
+# restore integer state
+#
+ ldw R1_OFFSET(arg0),%r1
+ ldw R2_OFFSET(arg0),%r2
+ ldw R3_OFFSET(arg0),%r3
+ ldw R4_OFFSET(arg0),%r4
+ ldw R5_OFFSET(arg0),%r5
+ ldw R6_OFFSET(arg0),%r6
+ ldw R7_OFFSET(arg0),%r7
+ ldw R8_OFFSET(arg0),%r8
+ ldw R9_OFFSET(arg0),%r9
+ ldw R10_OFFSET(arg0),%r10
+ ldw R11_OFFSET(arg0),%r11
+ ldw R12_OFFSET(arg0),%r12
+ ldw R13_OFFSET(arg0),%r13
+ ldw R14_OFFSET(arg0),%r14
+ ldw R15_OFFSET(arg0),%r15
+ ldw R16_OFFSET(arg0),%r16
+ ldw R17_OFFSET(arg0),%r17
+ ldw R18_OFFSET(arg0),%r18
+ ldw R19_OFFSET(arg0),%r19
+ ldw R20_OFFSET(arg0),%r20
+ ldw R21_OFFSET(arg0),%r21
+ ldw R22_OFFSET(arg0),%r22
+ ldw R23_OFFSET(arg0),%r23
+ ldw R24_OFFSET(arg0),%r24
+ ldw R25_OFFSET(arg0),%r25
+# skipping r26 (aka arg0) until we are done with it
+ ldw R27_OFFSET(arg0),%r27
+ ldw R28_OFFSET(arg0),%r28
+ ldw R29_OFFSET(arg0),%r29
+ ldw R30_OFFSET(arg0),%r30
+ ldw R31_OFFSET(arg0),%r31
+
+# Must load r26 last since it is arg0
+ ldw R26_OFFSET(arg0),%r26
+
+isr_exit:
+ rfi
+ .EXIT
+ .PROCEND
+
+#
+# This section is used to context switch floating point registers.
+# Ref: 6-35 of Architecture 1.1
+#
+# NOTE: since integer multiply uses the floating point unit,
+# we have to save/restore fp on every trap. We cannot
+# just try to keep track of fp usage.
+
+ .align 32
+ .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0
+_CPU_Save_float_context:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ fstds,ma %fr0,8(%arg0)
+ fstds,ma %fr1,8(%arg0)
+ fstds,ma %fr2,8(%arg0)
+ fstds,ma %fr3,8(%arg0)
+ fstds,ma %fr4,8(%arg0)
+ fstds,ma %fr5,8(%arg0)
+ fstds,ma %fr6,8(%arg0)
+ fstds,ma %fr7,8(%arg0)
+ fstds,ma %fr8,8(%arg0)
+ fstds,ma %fr9,8(%arg0)
+ fstds,ma %fr10,8(%arg0)
+ fstds,ma %fr11,8(%arg0)
+ fstds,ma %fr12,8(%arg0)
+ fstds,ma %fr13,8(%arg0)
+ fstds,ma %fr14,8(%arg0)
+ fstds,ma %fr15,8(%arg0)
+ fstds,ma %fr16,8(%arg0)
+ fstds,ma %fr17,8(%arg0)
+ fstds,ma %fr18,8(%arg0)
+ fstds,ma %fr19,8(%arg0)
+ fstds,ma %fr20,8(%arg0)
+ fstds,ma %fr21,8(%arg0)
+ fstds,ma %fr22,8(%arg0)
+ fstds,ma %fr23,8(%arg0)
+ fstds,ma %fr24,8(%arg0)
+ fstds,ma %fr25,8(%arg0)
+ fstds,ma %fr26,8(%arg0)
+ fstds,ma %fr27,8(%arg0)
+ fstds,ma %fr28,8(%arg0)
+ fstds,ma %fr29,8(%arg0)
+ fstds,ma %fr30,8(%arg0)
+ fstds %fr31,0(%arg0)
+ bv 0(%r2)
+ addi -(31*8), %arg0, %arg0 ; restore arg0 just for fun
+ .EXIT
+ .PROCEND
+
+ .align 32
+ .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0
+_CPU_Restore_float_context:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ addi (31*8), %arg0, %arg0 ; point at last double
+ fldds 0(%arg0),%fr31
+ fldds,mb -8(%arg0),%fr30
+ fldds,mb -8(%arg0),%fr29
+ fldds,mb -8(%arg0),%fr28
+ fldds,mb -8(%arg0),%fr27
+ fldds,mb -8(%arg0),%fr26
+ fldds,mb -8(%arg0),%fr25
+ fldds,mb -8(%arg0),%fr24
+ fldds,mb -8(%arg0),%fr23
+ fldds,mb -8(%arg0),%fr22
+ fldds,mb -8(%arg0),%fr21
+ fldds,mb -8(%arg0),%fr20
+ fldds,mb -8(%arg0),%fr19
+ fldds,mb -8(%arg0),%fr18
+ fldds,mb -8(%arg0),%fr17
+ fldds,mb -8(%arg0),%fr16
+ fldds,mb -8(%arg0),%fr15
+ fldds,mb -8(%arg0),%fr14
+ fldds,mb -8(%arg0),%fr13
+ fldds,mb -8(%arg0),%fr12
+ fldds,mb -8(%arg0),%fr11
+ fldds,mb -8(%arg0),%fr10
+ fldds,mb -8(%arg0),%fr9
+ fldds,mb -8(%arg0),%fr8
+ fldds,mb -8(%arg0),%fr7
+ fldds,mb -8(%arg0),%fr6
+ fldds,mb -8(%arg0),%fr5
+ fldds,mb -8(%arg0),%fr4
+ fldds,mb -8(%arg0),%fr3
+ fldds,mb -8(%arg0),%fr2
+ fldds,mb -8(%arg0),%fr1
+ bv 0(%r2)
+ fldds,mb -8(%arg0),%fr0
+ .EXIT
+ .PROCEND
+
+#
+# These 2 small routines are unused right now.
+# Normally we just go thru _CPU_Save_float_context (and Restore)
+#
+# Here we just deref the ptr and jump up, letting _CPU_Save_float_context
+# do the return for us.
+#
+ .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0
+_CPU_Context_save_fp:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ bl _CPU_Save_float_context, %r0
+ ldw 0(%arg0), %arg0
+ .EXIT
+ .PROCEND
+
+ .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0
+_CPU_Context_restore_fp:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ bl _CPU_Restore_float_context, %r0
+ ldw 0(%arg0), %arg0
+ .EXIT
+ .PROCEND
+
+
+# void _CPU_Context_switch( run_context, heir_context )
+#
+# This routine performs a normal non-FP context switch.
+#
+
+ .align 32
+ .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR
+_CPU_Context_switch:
+ .PROC
+ .CALLINFO FRAME=64
+ .ENTRY
+
+# Save the integer context
+ stw %r1,R1_OFFSET(arg0)
+ stw %r2,R2_OFFSET(arg0)
+ stw %r3,R3_OFFSET(arg0)
+ stw %r4,R4_OFFSET(arg0)
+ stw %r5,R5_OFFSET(arg0)
+ stw %r6,R6_OFFSET(arg0)
+ stw %r7,R7_OFFSET(arg0)
+ stw %r8,R8_OFFSET(arg0)
+ stw %r9,R9_OFFSET(arg0)
+ stw %r10,R10_OFFSET(arg0)
+ stw %r11,R11_OFFSET(arg0)
+ stw %r12,R12_OFFSET(arg0)
+ stw %r13,R13_OFFSET(arg0)
+ stw %r14,R14_OFFSET(arg0)
+ stw %r15,R15_OFFSET(arg0)
+ stw %r16,R16_OFFSET(arg0)
+ stw %r17,R17_OFFSET(arg0)
+ stw %r18,R18_OFFSET(arg0)
+ stw %r19,R19_OFFSET(arg0)
+ stw %r20,R20_OFFSET(arg0)
+ stw %r21,R21_OFFSET(arg0)
+ stw %r22,R22_OFFSET(arg0)
+ stw %r23,R23_OFFSET(arg0)
+ stw %r24,R24_OFFSET(arg0)
+ stw %r25,R25_OFFSET(arg0)
+ stw %r26,R26_OFFSET(arg0)
+ stw %r27,R27_OFFSET(arg0)
+ stw %r28,R28_OFFSET(arg0)
+ stw %r29,R29_OFFSET(arg0)
+ stw %r30,R30_OFFSET(arg0)
+ stw %r31,R31_OFFSET(arg0)
+
+# fill in interrupt context section
+ stw %r2, PCOQFRONT_OFFSET(%arg0)
+ ldo 4(%r2), %r2
+ stw %r2, PCOQBACK_OFFSET(%arg0)
+
+# Generate a suitable IPSW by using the system default psw
+# with the current low bits added in.
+
+ ldil L%CPU_PSW_DEFAULT, %r2
+ ldo R%CPU_PSW_DEFAULT(%r2), %r2
+ ssm 0, %arg2
+ dep %arg2, 31, 8, %r2
+ stw %r2, IPSW_OFFSET(%arg0)
+
+# at this point, the running task context is completely saved
+# Now jump to the bottom of the interrupt handler to load the
+# heirs context
+
+ b _CPU_Context_restore
+ copy %arg1, %arg0
+
+ .EXIT
+ .PROCEND
+
+
+/*
+ * Find first bit
+ * NOTE:
+ * This is used (and written) only for the ready chain code and
+ * priority bit maps.
+ * Any other use constitutes fraud.
+ * Returns first bit from the least significant side.
+ * Eg: if input is 0x8001
+ * output will indicate the '1' bit and return 0.
+ * This is counter to HPPA bit numbering which calls this
+ * bit 31. This way simplifies the macros _CPU_Priority_Mask
+ * and _CPU_Priority_Bits_index.
+ *
+ * NOTE:
+ * We just use 16 bit version
+ * does not handle zero case
+ *
+ * Based on the UTAH Mach libc version of ffs.
+ */
+
+ .align 32
+ .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR
+hppa_rtems_ffs:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+#ifdef RETURN_ERROR_ON_ZERO
+ comb,= %arg0,%r0,ffsdone ; If arg0 is 0
+ ldi -1,%ret0 ; return -1
+#endif
+
+#if BITFIELD_SIZE == 32
+ ldi 31,%ret0 ; Set return to high bit
+ extru,= %arg0,31,16,%r0 ; If low 16 bits are non-zero
+ addi,tr -16,%ret0,%ret0 ; subtract 16 from bitpos
+ shd %r0,%arg0,16,%arg0 ; else shift right 16 bits
+#else
+ ldi 15,%ret0 ; Set return to high bit
+#endif
+ extru,= %arg0,31,8,%r0 ; If low 8 bits are non-zero
+ addi,tr -8,%ret0,%ret0 ; subtract 8 from bitpos
+ shd %r0,%arg0,8,%arg0 ; else shift right 8 bits
+ extru,= %arg0,31,4,%r0 ; If low 4 bits are non-zero
+ addi,tr -4,%ret0,%ret0 ; subtract 4 from bitpos
+ shd %r0,%arg0,4,%arg0 ; else shift right 4 bits
+ extru,= %arg0,31,2,%r0 ; If low 2 bits are non-zero
+ addi,tr -2,%ret0,%ret0 ; subtract 2 from bitpos
+ shd %r0,%arg0,2,%arg0 ; else shift right 2 bits
+ extru,= %arg0,31,1,%r0 ; If low bit is non-zero
+ addi -1,%ret0,%ret0 ; subtract 1 from bitpos
+ffsdone:
+ bv,n 0(%r2)
+ nop
+ .EXIT
+ .PROCEND
diff --git a/c/src/exec/score/cpu/hppa1.1/hppa.h b/c/src/exec/score/cpu/hppa1.1/hppa.h
new file mode 100644
index 0000000000..8829bb8c87
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/hppa.h
@@ -0,0 +1,704 @@
+/*
+ * @(#)hppa.h 1.5 - 95/04/25
+ *
+ *
+ * File: $RCSfile$
+ * Project: PixelFlow
+ * Created: 94/10/4
+ * RespEngr: tony bennett
+ * Revision: $Revision$
+ * Last Mod: $Date$
+ *
+ * Description:
+ *
+ * Definitions for HP PA Risc
+ * ref: PA RISC 1.1 Architecture and Instruction Set Reference Manual
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ *
+ * Note:
+ * This file is included by both C and assembler code ( -DASM )
+ *
+ * $Id$
+ */
+
+#ifndef _INCLUDE_HPPA_H
+#define _INCLUDE_HPPA_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define hppa1_1
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This section contains the information required to build
+ * RTEMS for a particular member of the Hewlett Packard
+ * PA-RISC family. It does this by setting variables to
+ * indicate which implementation dependent features are
+ * present in a particular member of the family.
+ */
+
+#if defined(hppa7100)
+
+#define RTEMS_MODEL_NAME "hppa 7100"
+
+#elif defined(hppa7200)
+
+#define RTEMS_MODEL_NAME "hppa 7200"
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "HP PA-RISC 1.1"
+
+#ifndef ASM
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* 8-bit unsigned integer */
+typedef unsigned short unsigned16; /* 16-bit unsigned integer */
+typedef unsigned int unsigned32; /* 32-bit unsigned integer */
+typedef unsigned long long unsigned64; /* 64-bit unsigned integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef char signed8; /* 8-bit signed integer */
+typedef short signed16; /* 16-bit signed integer */
+typedef int signed32; /* 32-bit signed integer */
+typedef long long signed64; /* 64 bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+#endif /* !ASM */
+
+
+/*
+ * Processor Status Word (PSW) Masks
+ */
+
+#define HPPA_PSW_Y 0x80000000 /* Data Debug Trap Disable */
+#define HPPA_PSW_Z 0x40000000 /* Instruction Debug Trap Disable */
+#define HPPA_PSW_r2 0x20000000 /* reserved */
+#define HPPA_PSW_r3 0x10000000 /* reserved */
+#define HPPA_PSW_r4 0x08000000 /* reserved */
+#define HPPA_PSW_E 0x04000000 /* Little Endian on Memory References */
+#define HPPA_PSW_S 0x02000000 /* Secure Interval Timer */
+#define HPPA_PSW_T 0x01000000 /* Taken Branch Trap Enable */
+#define HPPA_PSW_H 0x00800000 /* Higher-Privilege Transfer Trap Enable*/
+#define HPPA_PSW_L 0x00400000 /* Lower-Privilege Transfer Trap Enable */
+#define HPPA_PSW_N 0x00200000 /* PC Queue Front Instruction Nullified */
+#define HPPA_PSW_X 0x00100000 /* Data Memory Break Disable */
+#define HPPA_PSW_B 0x00080000 /* Taken Branch in Previous Cycle */
+#define HPPA_PSW_C 0x00040000 /* Code Address Translation Enable */
+#define HPPA_PSW_V 0x00020000 /* Divide Step Correction */
+#define HPPA_PSW_M 0x00010000 /* High-Priority Machine Check Disable */
+#define HPPA_PSW_CB 0x0000ff00 /* Carry/Borrow Bits */
+#define HPPA_PSW_r24 0x00000080 /* reserved */
+#define HPPA_PSW_G 0x00000040 /* Debug trap Enable */
+#define HPPA_PSW_F 0x00000020 /* Performance monitor interrupt unmask */
+#define HPPA_PSW_R 0x00000010 /* Recovery Counter Enable */
+#define HPPA_PSW_Q 0x00000008 /* Interruption State Collection Enable */
+#define HPPA_PSW_P 0x00000004 /* Protection ID Validation Enable */
+#define HPPA_PSW_D 0x00000002 /* Data Address Translation Enable */
+#define HPPA_PSW_I 0x00000001 /* External, Power Failure, */
+ /* Low-Priority Machine Check */
+ /* Interruption Enable */
+
+/*
+ * HPPA traps and interrupts
+ * basic layout. Note numbers do not denote priority
+ *
+ * 0-31 basic traps and interrupts defined by HPPA architecture
+ * 32-63 32 external interrupts
+ * 64-... bsp defined
+ */
+
+#define HPPA_INTERRUPT_NON_EXISTENT 0
+/* group 1 */
+#define HPPA_INTERRUPT_HIGH_PRIORITY_MACHINE_CHECK 1
+/* group 2 */
+#define HPPA_INTERRUPT_POWER_FAIL 2
+#define HPPA_INTERRUPT_RECOVERY_COUNTER 3
+#define HPPA_INTERRUPT_EXTERNAL_INTERRUPT 4
+#define HPPA_INTERRUPT_LOW_PRIORITY_MACHINE_CHECK 5
+#define HPPA_INTERRUPT_PERFORMANCE_MONITOR 29
+/* group 3 */
+#define HPPA_INTERRUPT_INSTRUCTION_TLB_MISS 6
+#define HPPA_INTERRUPT_INSTRUCTION_MEMORY_PROTECTION 7
+#define HPPA_INTERRUPT_INSTRUCTION_DEBUG 30
+#define HPPA_INTERRUPT_ILLEGAL_INSTRUCTION 8
+#define HPPA_INTERRUPT_BREAK_INSTRUCTION 9
+#define HPPA_INTERRUPT_PRIVILEGED_OPERATION 10
+#define HPPA_INTERRUPT_PRIVILEGED_REGISTER 11
+#define HPPA_INTERRUPT_OVERFLOW 12
+#define HPPA_INTERRUPT_CONDITIONAL 13
+#define HPPA_INTERRUPT_ASSIST_EXCEPTION 14
+#define HPPA_INTERRUPT_DATA_TLB_MISS 15
+#define HPPA_INTERRUPT_NON_ACCESS_INSTRUCTION_TLB_MISS 16
+#define HPPA_INTERRUPT_NON_ACCESS_DATA_TLB_MISS 17
+#define HPPA_INTERRUPT_DATA_MEMORY_ACCESS_RIGHTS 26
+#define HPPA_INTERRUPT_DATA_MEMORY_PROTECTION_ID 27
+#define HPPA_INTERRUPT_UNALIGNED_DATA_REFERENCE 28
+#define HPPA_INTERRUPT_DATA_MEMORY_PROTECTION 18
+#define HPPA_INTERRUPT_DATA_MEMORY_BREAK 19
+#define HPPA_INTERRUPT_TLB_DIRTY_BIT 20
+#define HPPA_INTERRUPT_PAGE_REFERENCE 21
+#define HPPA_INTERRUPT_DATA_DEBUG 31
+#define HPPA_INTERRUPT_ASSIST_EMULATION 22
+/* group 4 */
+#define HPPA_INTERRUPT_HIGHER_PRIVILEGE_TRANSFER 23
+#define HPPA_INTERRUPT_LOWER_PRIVILEGE_TRANSFER 24
+#define HPPA_INTERRUPT_TAKEN_BRANCH 25
+
+#define HPPA_INTERRUPT_ON_CHIP_MAX 31
+
+/* External Interrupts via interrupt 4 */
+
+#define HPPA_INTERRUPT_EXTERNAL_BASE 32
+
+#define HPPA_INTERRUPT_EXTERNAL_0 32
+#define HPPA_INTERRUPT_EXTERNAL_1 33
+#define HPPA_INTERRUPT_EXTERNAL_2 34
+#define HPPA_INTERRUPT_EXTERNAL_3 35
+#define HPPA_INTERRUPT_EXTERNAL_4 36
+#define HPPA_INTERRUPT_EXTERNAL_5 37
+#define HPPA_INTERRUPT_EXTERNAL_6 38
+#define HPPA_INTERRUPT_EXTERNAL_7 39
+#define HPPA_INTERRUPT_EXTERNAL_8 40
+#define HPPA_INTERRUPT_EXTERNAL_9 41
+#define HPPA_INTERRUPT_EXTERNAL_10 42
+#define HPPA_INTERRUPT_EXTERNAL_11 43
+#define HPPA_INTERRUPT_EXTERNAL_12 44
+#define HPPA_INTERRUPT_EXTERNAL_13 45
+#define HPPA_INTERRUPT_EXTERNAL_14 46
+#define HPPA_INTERRUPT_EXTERNAL_15 47
+#define HPPA_INTERRUPT_EXTERNAL_16 48
+#define HPPA_INTERRUPT_EXTERNAL_17 49
+#define HPPA_INTERRUPT_EXTERNAL_18 50
+#define HPPA_INTERRUPT_EXTERNAL_19 51
+#define HPPA_INTERRUPT_EXTERNAL_20 52
+#define HPPA_INTERRUPT_EXTERNAL_21 53
+#define HPPA_INTERRUPT_EXTERNAL_22 54
+#define HPPA_INTERRUPT_EXTERNAL_23 55
+#define HPPA_INTERRUPT_EXTERNAL_24 56
+#define HPPA_INTERRUPT_EXTERNAL_25 57
+#define HPPA_INTERRUPT_EXTERNAL_26 58
+#define HPPA_INTERRUPT_EXTERNAL_27 59
+#define HPPA_INTERRUPT_EXTERNAL_28 60
+#define HPPA_INTERRUPT_EXTERNAL_29 61
+#define HPPA_INTERRUPT_EXTERNAL_30 62
+#define HPPA_INTERRUPT_EXTERNAL_31 63
+
+#define HPPA_INTERRUPT_EXTERNAL_INTERVAL_TIMER HPPA_INTERRUPT_EXTERNAL_0
+#define HPPA_EXTERNAL_INTERRUPTS 32
+
+/* BSP defined interrupts begin here */
+
+#define HPPA_INTERRUPT_MAX 64
+
+/*
+ * Inline macros for misc. interesting opcodes
+ */
+
+/* generate a global label */
+#define HPPA_ASM_LABEL(label) \
+ asm(".export " label ", ! .label " label);
+
+/* Return From Interrupt RFI */
+#define HPPA_ASM_RFI() asm volatile ("rfi")
+
+/* Set System Mask SSM i,t */
+#define HPPA_ASM_SSM(i,gr) asm volatile ("ssm %1, %0" \
+ : "=r" (gr) \
+ : "i" (i))
+/* Reset System Mask RSM i,t */
+#define HPPA_ASM_RSM(i,gr) asm volatile ("rsm %1, %0" \
+ : "=r" (gr) \
+ : "i" (i))
+/* Move To System Mask MTSM r */
+#define HPPA_ASM_MTSM(gr) asm volatile ("mtsm %0" \
+ : : "r" (gr))
+
+/* Load Space Identifier LDSID (s,b),t */
+#define HPPA_ASM_LDSID(sr,grb,grt) asm volatile ("ldsid (%1,%2),%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (grb))
+
+/*
+ * Gcc extended asm doesn't really allow for treatment of space registers
+ * as "registers", so we have to use "i" format.
+ * Unfortunately this means that the "=" constraint is not available.
+ */
+
+/* Move To Space Register MTSP r,sr */
+#define HPPA_ASM_MTSP(gr,sr) asm volatile ("mtsp %1,%0" \
+ : : "i" (sr), \
+ "r" (gr))
+
+/* Move From Space Register MFSP sr,t */
+#define HPPA_ASM_MFSP(sr,gr) asm volatile ("mfsp %1,%0" \
+ : "=r" (gr) \
+ : "i" (sr))
+
+/* Move To Control register MTCTL r,t */
+#define HPPA_ASM_MTCTL(gr,cr) asm volatile ("mtctl %1,%0" \
+ : : "i" (cr), \
+ "r" (gr))
+
+/* Move From Control register MFCTL r,t */
+#define HPPA_ASM_MFCTL(cr,gr) asm volatile ("mfctl %1,%0" \
+ : "=r" (gr) \
+ : "i" (cr))
+
+/* Synchronize caches SYNC */
+#define HPPA_ASM_SYNC() asm volatile ("sync")
+
+/* Probe Read Access PROBER (s,b),r,t */
+#define HPPA_ASM_PROBER(sr,groff,gracc,grt) \
+ asm volatile ("prober (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "r" (gracc))
+
+/* Probe Read Access Immediate PROBERI (s,b),i,t*/
+#define HPPA_ASM_PROBERI(sr,groff,iacc,grt) \
+ asm volatile ("proberi (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "i" (iacc))
+
+/* Probe Write Access PROBEW (s,b),r,t */
+#define HPPA_ASM_PROBEW(sr,groff,gracc,grt) \
+ asm volatile ("probew (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "r" (gracc))
+
+/* Probe Write Access Immediate PROBEWI (s,b),i,t */
+#define HPPA_ASM_PROBEWI(sr,groff,iacc,grt) \
+ asm volatile ("probewi (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "i" (iacc))
+
+/* Load Physical Address LPA x(s,b),t */
+#define HPPA_ASM_LPA(sr,grb,grt) asm volatile ("lpa %%r0(%1,%2),%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (grb))
+
+/* Load Coherence Index LCI x(s,b),t */
+/* AKA: Load Hash Address LHA x(s,b),t */
+#define HPPA_ASM_LCI(grx,sr,grb,grt) asm volatile ("lha %1(%2,%3),%0" \
+ : "=r" (grt) \
+ : "r" (grx),\
+ "i" (sr), \
+ "r" (grb))
+#define HPPA_ASM_LHA(grx,sr,grb,grt) HPPA_ASM_LCI(grx,sr,grb,grt)
+
+/* Purge Data Tlb PDTLB x(s,b) */
+#define HPPA_ASM_PDTLB(grx,sr,grb) asm volatile ("pdtlb %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Instruction Tlb PITLB x(s,b) */
+#define HPPA_ASM_PITLB(grx,sr,grb) asm volatile ("pitlb %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Data Tlb Entry PDTLBE x(s,b) */
+#define HPPA_ASM_PDTLBE(grx,sr,grb) asm volatile ("pdtlbe %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Instruction Tlb Entry PITLBE x(s,b) */
+#define HPPA_ASM_PITLBE(grx,sr,grb) asm volatile ("pitlbe %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+
+/* Insert Data TLB Address IDTLBA r,(s,b) */
+#define HPPA_ASM_IDTLBA(gr,sr,grb) asm volatile ("idtlba %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Insert Instruction TLB Address IITLBA r,(s,b) */
+#define HPPA_ASM_IITLBA(gr,sr,grb) asm volatile ("iitlba %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Insert Data TLB Protection IDTLBP r,(s,b) */
+#define HPPA_ASM_IDTLBP(gr,sr,grb) asm volatile ("idtlbp %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Insert Instruction TLB Protection IITLBP r,(s,b) */
+#define HPPA_ASM_IITLBP(gr,sr,grb) asm volatile ("iitlbp %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Data Cache PDC x(s,b) */
+#define HPPA_ASM_PDC(grx,sr,grb) asm volatile ("pdc %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Data Cache FDC x(s,b) */
+#define HPPA_ASM_FDC(grx,sr,grb) asm volatile ("fdc %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Instruction Cache FDC x(s,b) */
+#define HPPA_ASM_FIC(grx,sr,grb) asm volatile ("fic %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Data Cache Entry FDCE x(s,b) */
+#define HPPA_ASM_FDCE(grx,sr,grb) asm volatile ("fdce %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Instruction Cache Entry FICE x(s,b) */
+#define HPPA_ASM_FICE(grx,sr,grb) asm volatile ("fice %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Break BREAK i5,i13 */
+#define HPPA_ASM_BREAK(i5,i13) asm volatile ("break %0,%1" \
+ : : "i" (i5), \
+ "i" (i13))
+
+/* Load and Clear Word Short LDCWS d(s,b),t */
+#define HPPA_ASM_LDCWS(i,sr,grb,grt) asm volatile ("ldcws %1(%2,%3),%0" \
+ : "=r" (grt) \
+ : "i" (i), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Load and Clear Word Indexed LDCWX x(s,b),t */
+#define HPPA_ASM_LDCWX(grx,sr,grb,grt) asm volatile ("ldcwx %1(%2,%3),%0" \
+ : "=r" (grt) \
+ : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Load Word Absolute Short LDWAS d(b),t */
+/* NOTE: "short" here means "short displacement" */
+#define HPPA_ASM_LDWAS(disp,grbase,gr) asm volatile("ldwas %1(%2),%0" \
+ : "=r" (gr) \
+ : "i" (disp), \
+ "r" (grbase))
+
+/* Store Word Absolute Short STWAS r,d(b) */
+/* NOTE: "short" here means "short displacement" */
+#define HPPA_ASM_STWAS(gr,disp,grbase) asm volatile("stwas %0,%1(%2)" \
+ : : "r" (gr), \
+ "i" (disp), \
+ "r" (grbase))
+
+/*
+ * Swap bytes
+ * REFERENCE: PA72000 TRM -- Appendix C
+ */
+#define HPPA_ASM_SWAPBYTES(value, swapped) asm volatile( \
+ " shd %1,%1,16,%0 \n\
+ dep %0,15,8,%0 \n\
+ shd %1,%0,8,%0" \
+ : "=r" (swapped) \
+ : "r" (value) \
+ )
+
+
+/* 72000 Diagnose instructions follow
+ * These macros assume gas knows about these instructions.
+ * gas2.2.u1 did not.
+ * I added them to my copy and installed it locally.
+ *
+ * There are *very* special requirements for these guys
+ * ref: TRM 6.1.3 Programming Constraints
+ *
+ * The macros below handle the following rules
+ *
+ * Except for WIT, WDT, WDD, WIDO, WIDE, all DIAGNOSE must be doubled.
+ * Must never be nullified (hence the leading nop)
+ * NOP must preced every RDD,RDT,WDD,WDT,RDTLB
+ * Instruction preceeding GR_SHDW must not set any of the GR's saved
+ *
+ * The macros do *NOT* deal with the following problems
+ * doubled DIAGNOSE instructions must not straddle a page boundary
+ * if code translation enabled. (since 2nd could trap on ITLB)
+ * If you care about DHIT and DPE bits of DR0, then
+ * No store instruction in the 2 insn window before RDD
+ */
+
+
+/* Move To CPU/DIAG register MTCPU r,t */
+#define HPPA_ASM_MTCPU(gr,dr) asm volatile (" nop \n" \
+ " mtcpu %1,%0 \n" \
+ " mtcpu %1,%0" \
+ : : "i" (dr), \
+ "r" (gr))
+
+/* Move From CPU/DIAG register MFCPU r,t */
+#define HPPA_ASM_MFCPU(dr,gr) asm volatile (" nop \n" \
+ " mfcpu %1,%0\n" \
+ " mfcpu %1,%0" \
+ : "=r" (gr) \
+ : "i" (dr))
+
+/* Transfer of Control Enable TOC_EN */
+#define HPPA_ASM_TOC_EN() asm volatile (" tocen \n" \
+ " tocen")
+
+/* Transfer of Control Disable TOC_DIS */
+#define HPPA_ASM_TOC_DIS() asm volatile (" tocdis \n" \
+ " tocdis")
+
+/* Shadow Registers to General Register SHDW_GR */
+#define HPPA_ASM_SHDW_GR() asm volatile (" shdwgr \n" \
+ " shdwgr" \
+ ::: "r1" "r8" "r9" "r16" \
+ "r17" "r24" "r25")
+
+/* General Registers to Shadow Register GR_SHDW */
+#define HPPA_ASM_GR_SHDW() asm volatile (" nop \n" \
+ " grshdw \n" \
+ " grshdw")
+
+/*
+ * Definitions of special registers for use by the above macros.
+ */
+
+/* Hardware Space Registers */
+#define SR0 0
+#define SR1 1
+#define SR2 2
+#define SR3 3
+#define SR4 4
+#define SR5 5
+#define SR6 6
+#define SR7 7
+
+/* Hardware Control Registers */
+#define CR0 0
+#define RCTR 0 /* Recovery Counter Register */
+
+#define CR8 8 /* Protection ID 1 */
+#define PIDR1 8
+
+#define CR9 9 /* Protection ID 2 */
+#define PIDR2 9
+
+#define CR10 10
+#define CCR 10 /* Coprocessor Confiquration Register */
+
+#define CR11 11
+#define SAR 11 /* Shift Amount Register */
+
+#define CR12 12
+#define PIDR3 12 /* Protection ID 3 */
+
+#define CR13 13
+#define PIDR4 13 /* Protection ID 4 */
+
+#define CR14 14
+#define IVA 14 /* Interrupt Vector Address */
+
+#define CR15 15
+#define EIEM 15 /* External Interrupt Enable Mask */
+
+#define CR16 16
+#define ITMR 16 /* Interval Timer */
+
+#define CR17 17
+#define PCSQ 17 /* Program Counter Space queue */
+
+#define CR18 18
+#define PCOQ 18 /* Program Counter Offset queue */
+
+#define CR19 19
+#define IIR 19 /* Interruption Instruction Register */
+
+#define CR20 20
+#define ISR 20 /* Interruption Space Register */
+
+#define CR21 21
+#define IOR 21 /* Interruption Offset Register */
+
+#define CR22 22
+#define IPSW 22 /* Interrpution Processor Status Word */
+
+#define CR23 23
+#define EIRR 23 /* External Interrupt Request */
+
+#define CR24 24
+#define PPDA 24 /* Physcial Page Directory Address */
+#define TR0 24 /* Temporary register 0 */
+
+#define CR25 25
+#define HTA 25 /* Hash Table Address */
+#define TR1 25 /* Temporary register 1 */
+
+#define CR26 26
+#define TR2 26 /* Temporary register 2 */
+
+#define CR27 27
+#define TR3 27 /* Temporary register 3 */
+
+#define CR28 28
+#define TR4 28 /* Temporary register 4 */
+
+#define CR29 29
+#define TR5 29 /* Temporary register 5 */
+
+#define CR30 30
+#define TR6 30 /* Temporary register 6 */
+
+#define CR31 31
+#define CPUID 31 /* MP identifier */
+
+/*
+ * Diagnose registers
+ */
+
+#define DR0 0
+#define DR1 1
+#define DR8 8
+#define DR24 24
+#define DR25 25
+
+/*
+ * Tear apart a break instruction to find its type.
+ */
+#define HPPA_BREAK5(x) ((x) & 0x1F)
+#define HPPA_BREAK13(x) (((x) >> 13) & 0x1FFF)
+
+/* assemble a break instruction */
+#define HPPA_BREAK(i5,i13) (((i5) & 0x1F) | (((i13) & 0x1FFF) << 13))
+
+
+#ifndef ASM
+
+/*
+ * static inline utility functions to get at control registers
+ */
+
+#define EMIT_GET_CONTROL(name, reg) \
+static __inline__ unsigned int \
+get_ ## name (void) \
+{ \
+ unsigned int value; \
+ HPPA_ASM_MFCTL(reg, value); \
+ return value; \
+}
+
+#define EMIT_SET_CONTROL(name, reg) \
+static __inline__ unsigned int \
+set_ ## name (unsigned int new_value) \
+{ \
+ HPPA_ASM_MTCTL(new_value, reg); \
+}
+
+#define EMIT_CONTROLS(name, reg) \
+ EMIT_GET_CONTROL(name, reg) \
+ EMIT_SET_CONTROL(name, reg)
+
+EMIT_CONTROLS(recovery, RCTR); /* CR0 */
+EMIT_CONTROLS(pid1, PIDR1); /* CR8 */
+EMIT_CONTROLS(pid2, PIDR2); /* CR9 */
+EMIT_CONTROLS(ccr, CCR); /* CR10; CCR and SCR share CR10 */
+EMIT_CONTROLS(scr, CCR); /* CR10; CCR and SCR share CR10 */
+EMIT_CONTROLS(sar, SAR); /* CR11 */
+EMIT_CONTROLS(pid3, PIDR3); /* CR12 */
+EMIT_CONTROLS(pid4, PIDR4); /* CR13 */
+EMIT_CONTROLS(iva, IVA); /* CR14 */
+EMIT_CONTROLS(eiem, EIEM); /* CR15 */
+EMIT_CONTROLS(itimer, ITMR); /* CR16 */
+EMIT_CONTROLS(pcsq, PCSQ); /* CR17 */
+EMIT_CONTROLS(pcoq, PCOQ); /* CR18 */
+EMIT_CONTROLS(iir, IIR); /* CR19 */
+EMIT_CONTROLS(isr, ISR); /* CR20 */
+EMIT_CONTROLS(ior, IOR); /* CR21 */
+EMIT_CONTROLS(ipsw, IPSW); /* CR22 */
+EMIT_CONTROLS(eirr, EIRR); /* CR23 */
+EMIT_CONTROLS(tr0, TR0); /* CR24 */
+EMIT_CONTROLS(tr1, TR1); /* CR25 */
+EMIT_CONTROLS(tr2, TR2); /* CR26 */
+EMIT_CONTROLS(tr3, TR3); /* CR27 */
+EMIT_CONTROLS(tr4, TR4); /* CR28 */
+EMIT_CONTROLS(tr5, TR5); /* CR29 */
+EMIT_CONTROLS(tr6, TR6); /* CR30 */
+EMIT_CONTROLS(tr7, CR31); /* CR31 */
+
+/*
+ * If and How to invoke the debugger (a ROM debugger generally)
+ */
+
+#ifdef SIMHPPA_ROM
+/* invoke the pflow debugger */
+#define CPU_INVOKE_DEBUGGER \
+ do { \
+ extern void debugger_break(void); \
+ debugger_break(); \
+ } while (0)
+#endif
+
+
+#endif /* ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! _INCLUDE_HPPA_H */
+
diff --git a/c/src/exec/score/cpu/hppa1.1/rtems.s b/c/src/exec/score/cpu/hppa1.1/rtems.s
new file mode 100644
index 0000000000..dd215c9af0
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/rtems.s
@@ -0,0 +1,53 @@
+/* rtems.S
+ *
+ * This file contains the single entry point code for
+ * the HPPA implementation of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <hppa.h>
+#include <rtems/cpu_asm.h>
+
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+ .align 32
+ .EXPORT cpu_jump_to_directive,ENTRY,PRIV_LEV=0
+cpu_jump_to_directive
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+# invoke user interrupt handler
+
+# XXX: look at register usage and code
+# XXX: this is not necessarily right!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# r9 = directive number
+
+ .import _Entry_points,data
+ ldil L%_Entry_points,%r8
+ ldo R%_Entry_points(%r8),%r8
+ ldwx,s %r9(%r8),%r8
+
+ .call ARGW0=GR
+ bv,n 0(%r8)
+ nop
+
+ .EXIT
+ .PROCEND
+
diff --git a/c/src/exec/score/cpu/i386/asm.h b/c/src/exec/score/cpu/i386/asm.h
new file mode 100644
index 0000000000..f123defcd9
--- /dev/null
+++ b/c/src/exec/score/cpu/i386/asm.h
@@ -0,0 +1,131 @@
+/* asm.h
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ *
+ *
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ *
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted. This file is:
+ *
+ * COPYRIGHT (c) 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * $Id$
+ */
+
+#ifndef __i386_ASM_h
+#define __i386_ASM_h
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#define ASM
+#include <i386.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+/*
+ * Looks like there is a bug in gcc 2.6.2 where this is not
+ * defined correctly when configured as i386-coff and
+ * i386-aout.
+ */
+
+#undef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__ %
+
+/*
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+*/
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+#define eax REG (eax)
+#define ebx REG (ebx)
+#define ecx REG (ecx)
+#define edx REG (edx)
+#define esi REG (esi)
+#define edi REG (edi)
+#define esp REG (esp)
+#define ebp REG (ebp)
+
+#define ax REG (ax)
+#define bx REG (bx)
+#define cx REG (cx)
+#define dx REG (dx)
+#define si REG (si)
+#define di REG (di)
+#define sp REG (sp)
+#define bp REG (bp)
+
+#define ah REG (ah)
+#define al REG (al)
+
+#define cs REG (cs)
+#define ds REG (ds)
+#define es REG (es)
+#define fs REG (fs)
+#define gs REG (gs)
+#define ss REG (ss)
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+#endif
+/* end of include file */
+
+
diff --git a/c/src/exec/score/cpu/i386/cpu.c b/c/src/exec/score/cpu/i386/cpu.c
new file mode 100644
index 0000000000..05a836f7e3
--- /dev/null
+++ b/c/src/exec/score/cpu/i386/cpu.c
@@ -0,0 +1,121 @@
+/*
+ * Intel i386 Dependent Source
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+#include <rtems/wkspace.h>
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ register unsigned16 fp_status asm ("ax");
+ register unsigned8 *fp_context;
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ _CPU_Table = *cpu_table;
+
+ /*
+ * The following code saves a NULL i387 context which is given
+ * to each task at start and restart time. The following code
+ * is based upon that provided in the i386 Programmer's
+ * Manual and should work on any coprocessor greater than
+ * the i80287.
+ *
+ * NOTE: The NO RTEMS_WAIT form of the coprocessor instructions
+ * MUST be used in case there is not a coprocessor
+ * to wait for.
+ */
+
+ fp_status = 0xa5a5;
+ asm volatile( "fninit" );
+ asm volatile( "fnstsw %0" : "=a" (fp_status) : "0" (fp_status) );
+
+ if ( fp_status == 0 ) {
+
+ fp_context = _CPU_Null_fp_context;
+
+ asm volatile( "fsave (%0)" : "=r" (fp_context)
+ : "0" (fp_context)
+ );
+ }
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+void _ISR_Handler_0(), _ISR_Handler_1();
+
+#define PER_ISR_ENTRY \
+ (((unsigned32) _ISR_Handler_1 - (unsigned32) _ISR_Handler_0))
+
+#define _Interrupt_Handler_entry( _vector ) \
+ (((unsigned32)_ISR_Handler_0) + ((_vector) * PER_ISR_ENTRY))
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ i386_IDT_slot idt;
+ unsigned32 unique_handler;
+
+ /* calculate the unique entry point for this vector */
+ unique_handler = _Interrupt_Handler_entry( vector );
+
+ /* build the IDT entry */
+ idt.offset_0_15 = ((unsigned32) unique_handler) & 0xffff;
+ idt.segment_selector = i386_get_cs();
+ idt.reserved = 0x00;
+ idt.p_dpl = 0x8e; /* present, ISR */
+ idt.offset_16_31 = ((unsigned32) unique_handler) >> 16;
+
+ /* install the IDT entry */
+ i386_Install_idt(
+ (unsigned32) &idt,
+ _CPU_Table.interrupt_table_segment,
+ (unsigned32) _CPU_Table.interrupt_table_offset + (8 * vector)
+ );
+
+ /* "portable" part */
+ *old_handler = _ISR_Vector_table[ vector ];
+ _ISR_Vector_table[ vector ] = new_handler;
+}
diff --git a/c/src/exec/score/cpu/i386/cpu.h b/c/src/exec/score/cpu/i386/cpu.h
new file mode 100644
index 0000000000..a66cb37abc
--- /dev/null
+++ b/c/src/exec/score/cpu/i386/cpu.h
@@ -0,0 +1,367 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the Intel
+ * i386 processor.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <i386.h>
+
+/* conditional compilation parameters */
+
+#define CPU_INLINE_ENABLE_DISPATCH TRUE
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * i386 has an RTEMS allocated and managed interrupt stack.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Some family members have no FP, some have an FPU such as the i387
+ * for the i386, others have it built in (i486DX, Pentium).
+ */
+
+#if ( I386_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE /* i387 for i386 */
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+#define CPU_IDLE_TASK_IS_FP FALSE
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+#define CPU_STACK_GROWS_UP FALSE
+#define CPU_STRUCTURE_ALIGNMENT
+
+/* structures */
+
+/*
+ * Basic integer context for the i386 family.
+ */
+
+typedef struct {
+ unsigned32 eflags; /* extended flags register */
+ void *esp; /* extended stack pointer register */
+ void *ebp; /* extended base pointer register */
+ unsigned32 ebx; /* extended bx register */
+ unsigned32 esi; /* extended source index register */
+ unsigned32 edi; /* extended destination index flags register */
+} Context_Control;
+
+/*
+ * FP context save area for the i387 numeric coprocessors.
+ */
+
+typedef struct {
+ unsigned8 fp_save_area[108]; /* context size area for I80387 */
+ /* 28 bytes for environment */
+} Context_Control_fp;
+
+/*
+ * The following structure defines the set of information saved
+ * on the current stack by RTEMS upon receipt of each interrupt.
+ */
+
+typedef struct {
+ unsigned32 TBD; /* XXX Fix for this CPU */
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the i386 specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+
+ unsigned32 interrupt_table_segment;
+ void *interrupt_table_offset;
+} rtems_cpu_table;
+
+/*
+ * context size area for floating point
+ *
+ * NOTE: This is out of place on the i386 to avoid a forward reference.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/* variables */
+
+EXTERN unsigned8 _CPU_Null_fp_context[ CPU_CONTEXT_FP_SIZE ];
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/* constants */
+
+/*
+ * This defines the number of levels and the mask used to pick those
+ * bits out of a thread mode.
+ */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
+
+/*
+ * extra stack required by system initialization thread
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 1024
+
+/*
+ * i386 family supports 256 distinct vectors.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 256
+
+/*
+ * Minimum size of a thread's stack.
+ *
+ * NOTE: 256 bytes is probably too low in most cases.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE 256
+
+/*
+ * i386 is pretty tolerant of alignment. Just put things on 4 byte boundaries.
+ */
+
+#define CPU_ALIGNMENT 4
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * On i386 thread stacks require no further alignment after allocation
+ * from the Workspace.
+ */
+
+#define CPU_STACK_ALIGNMENT 0
+
+/* macros */
+
+/*
+ * ISR handler macros
+ *
+ * These macros perform the following functions:
+ * + disable all maskable CPU interrupts
+ * + restore previous interrupt level (enable)
+ * + temporarily restore interrupts (flash)
+ * + set a particular level
+ */
+
+#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
+
+#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
+
+#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
+
+#define _CPU_ISR_Set_level( _new_level ) \
+ { \
+ if ( _new_level ) asm volatile ( "cli" ); \
+ else asm volatile ( "sti" ); \
+ }
+
+/* end of ISR handler macros */
+
+/*
+ * Context handler macros
+ *
+ * These macros perform the following functions:
+ * + initialize a context area
+ * + restart the current thread
+ * + calculate the initial pointer into a FP context area
+ * + initialize an FP context area
+ */
+
+#define CPU_EFLAGS_INTERRUPTS_ON 0x00003202
+#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
+
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry_point ) \
+ do { \
+ unsigned32 _stack; \
+ \
+ if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
+ else (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
+ \
+ _stack = ((unsigned32)(_stack_base)) + (_size) - 4; \
+ \
+ *((proc_ptr *)(_stack)) = (_entry_point); \
+ (_the_context)->ebp = (void *) _stack; \
+ (_the_context)->esp = (void *) _stack; \
+ } while (0)
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+#define _CPU_Context_Initialize_fp( _fp_area ) \
+ { \
+ unsigned32 *_source = (unsigned32 *) _CPU_Null_fp_context; \
+ unsigned32 *_destination = (unsigned32 *) *(_fp_area); \
+ unsigned32 _index; \
+ \
+ for ( _index=0 ; _index < CPU_CONTEXT_FP_SIZE/4 ; _index++ ) \
+ *_destination++ = *_source++; \
+ }
+
+/* end of Context handler macros */
+
+/*
+ * Fatal Error manager macros
+ *
+ * These macros perform the following functions:
+ * + disable interrupts and halt the CPU
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ { \
+ asm volatile ( "cli ; \
+ movl %0,%%eax ; \
+ hlt" \
+ : "=r" ((_error)) : "0" ((_error)) \
+ ); \
+ }
+
+/* end of Fatal Error manager macros */
+
+/*
+ * Bitfield handler macros
+ *
+ * These macros perform the following functions:
+ * + scan for the highest numbered (MSB) set in a 16 bit bitfield
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ register unsigned16 __value_in_register = (_value); \
+ \
+ _output = 0; \
+ \
+ asm volatile ( "bsfw %0,%1 " \
+ : "=r" (__value_in_register), "=r" (_output) \
+ : "0" (__value_in_register), "1" (_output) \
+ ); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * Priority handler macros
+ *
+ * These macros perform the following functions:
+ * + return a mask with the bit for this major/minor portion of
+ * of thread priority set.
+ * + translate the bit number returned by "Bitfield_find_first_bit"
+ * into an index into the thread ready chain bit maps
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 1 << (_bit_number) )
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner and avoid stack conflicts.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/i386/cpu_asm.s b/c/src/exec/score/cpu/i386/cpu_asm.s
new file mode 100644
index 0000000000..121b4409d9
--- /dev/null
+++ b/c/src/exec/score/cpu/i386/cpu_asm.s
@@ -0,0 +1,654 @@
+/* cpu_asm.s
+ *
+ * This file contains all assembly code for the Intel i386 implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Format of i386 Register structure
+ */
+
+.set REG_EFLAGS, 0
+.set REG_ESP, REG_EFLAGS + 4
+.set REG_EBP, REG_ESP + 4
+.set REG_EBX, REG_EBP + 4
+.set REG_ESI, REG_EBX + 4
+.set REG_EDI, REG_ESI + 4
+.set SIZE_REGS, REG_EDI + 4
+
+ BEGIN_CODE
+
+/*
+ * void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ .align 2
+ PUBLIC (_CPU_Context_switch)
+
+.set RUNCONTEXT_ARG, 4 # save context argument
+.set HEIRCONTEXT_ARG, 8 # restore context argument
+
+SYM (_CPU_Context_switch):
+ movl RUNCONTEXT_ARG(esp),eax # eax = running threads context
+ pushf # push eflags
+ popl REG_EFLAGS(eax) # save eflags
+ movl esp,REG_ESP(eax) # save stack pointer
+ movl ebp,REG_EBP(eax) # save base pointer
+ movl ebx,REG_EBX(eax) # save ebx
+ movl esi,REG_ESI(eax) # save source register
+ movl edi,REG_EDI(eax) # save destination register
+
+ movl HEIRCONTEXT_ARG(esp),eax # eax = heir threads context
+
+restore:
+ pushl REG_EFLAGS(eax) # push eflags
+ popf # restore eflags
+ movl REG_ESP(eax),esp # restore stack pointer
+ movl REG_EBP(eax),ebp # restore base pointer
+ movl REG_EBX(eax),ebx # restore ebx
+ movl REG_ESI(eax),esi # restore source register
+ movl REG_EDI(eax),edi # restore destination register
+ ret
+
+/*
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+/*
+ * void _CPU_Context_restore( new_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ PUBLIC (_CPU_Context_restore)
+
+.set NEWCONTEXT_ARG, 4 # context to restore argument
+
+SYM (_CPU_Context_restore):
+
+ movl NEWCONTEXT_ARG(esp),eax # eax = running threads context
+ jmp restore
+
+/*PAGE
+ * void _CPU_Context_save_fp_context( &fp_context_ptr )
+ * void _CPU_Context_restore_fp_context( &fp_context_ptr )
+ *
+ * This section is used to context switch an i80287, i80387,
+ * the built-in coprocessor or the i80486 or compatible.
+ */
+
+.set FPCONTEXT_ARG, 4 # FP context argument
+
+ .align 2
+ PUBLIC (_CPU_Context_save_fp)
+SYM (_CPU_Context_save_fp):
+ movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area
+ movl (eax),eax # eax = FP context area
+ fsave (eax) # save FP context
+ ret
+
+ .align 2
+ PUBLIC (_CPU_Context_restore_fp)
+SYM (_CPU_Context_restore_fp):
+ movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area
+ movl (eax),eax # eax = FP context area
+ frstor (eax) # restore FP context
+ ret
+
+/*PAGE
+ * void _ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * NOTE:
+ * Upon entry, the stack will contain a stack frame back to the
+ * interrupted task. If dispatching is enabled, this is the
+ * outer most interrupt, and (a context switch is necessary or
+ * the current task has signals), then set up the stack to
+ * transfer control to the interrupt dispatcher.
+ */
+
+.set SET_SEGMENT_REGISTERS_IN_INTERRUPT, 0
+
+.set SAVED_REGS , 32 # space consumed by saved regs
+.set EIP_OFFSET , SAVED_REGS # offset of tasks eip
+.set CS_OFFSET , EIP_OFFSET+4 # offset of tasks code segment
+.set EFLAGS_OFFSET , CS_OFFSET+4 # offset of tasks eflags
+
+ .align 2
+ PUBLIC (_ISR_Handler)
+
+SYM (_ISR_Handler):
+ /*
+ * Before this was point is reached the vectors unique
+ * entry point did the following:
+ *
+ * 1. saved all registers with a "pusha"
+ * 2. put the vector number in eax.
+ *
+ * BEGINNING OF ESTABLISH SEGMENTS
+ *
+ * WARNING: If an interrupt can occur when the segments are
+ * not correct, then this is where we should establish
+ * the segments. In addition to establishing the
+ * segments, it may be necessary to establish a stack
+ * in the current data area on the outermost interrupt.
+ *
+ * NOTE: If the previous values of the segment registers are
+ * pushed, do not forget to adjust SAVED_REGS.
+ *
+ * NOTE: Make sure the exit code which restores these
+ * when this type of code is needed.
+ */
+
+ /***** ESTABLISH SEGMENTS CODE GOES HERE ******/
+
+ /*
+ * END OF ESTABLISH SEGMENTS
+ */
+
+ /*
+ * Now switch stacks if necessary
+ */
+
+ movl esp, edx # edx = previous stack pointer
+ cmpl $0, SYM (_ISR_Nest_level) # is this the outermost interrupt?
+ jne nested # No, then continue
+ movl SYM (_CPU_Interrupt_stack_high), esp
+
+ /*
+ * We want to insure that the old stack pointer is on the
+ * stack we will be on at the end of the ISR when we restore it.
+ * By saving it on every interrupt, all we have to do is pop it
+ * near the end of every interrupt.
+ */
+
+nested:
+ pushl edx # save the previous stack pointer
+ incl SYM (_ISR_Nest_level) # one nest level deeper
+ incl SYM (_Thread_Dispatch_disable_level) # disable multitasking
+
+ # EAX is preloaded with the vector number.
+ push eax # push vector number
+ mov SYM (_ISR_Vector_table) (,eax,4),eax
+ # eax = Users handler
+ call eax # invoke user ISR
+ pop eax # eax = vector number
+
+ decl SYM (_ISR_Nest_level) # one less ISR nest level
+ # If interrupts are nested,
+ # then dispatching is disabled
+
+ decl SYM (_Thread_Dispatch_disable_level)
+ # unnest multitasking
+ # Is dispatch disabled
+ jne exit # Yes, then exit
+
+ cmpl $0, SYM (_Context_Switch_necessary)
+ # Is task switch necessary?
+ jne bframe # Yes, then build stack
+
+ cmpl $0, SYM (_ISR_Signals_to_thread_executing)
+ # signals sent to Run_thread
+ # while in interrupt handler?
+ je exit # No, exit
+
+bframe:
+ cli # DISABLE INTERRUPTS!!
+ popl esp # restore the stack pointer
+ movl $0, SYM (_ISR_Signals_to_thread_executing)
+ # push the isf for Isr_dispatch
+ push EFLAGS_OFFSET(esp) # push tasks eflags
+ push cs # cs of Isr_dispatch
+ push $ SYM (_ISR_Dispatch) # entry point
+ iret
+
+exit:
+ cli # DISABLE INTERRUPTS!!
+ popl esp # restore the stack pointer
+
+ /*
+ * BEGINNING OF DE-ESTABLISH SEGMENTS
+ *
+ * NOTE: Make sure there is code here if code is added to
+ * load the segment registers.
+ *
+ */
+
+ /******* DE-ESTABLISH SEGMENTS CODE GOES HERE ********/
+
+ /*
+ * END OF DE-ESTABLISH SEGMENTS
+ */
+
+ popa # restore general registers
+ iret
+
+/*PAGE
+ * Distinct Interrupt Entry Points
+ *
+ * The following macro and the 256 instantiations of the macro
+ * are necessary to determine which interrupt vector occurred.
+ * The following macro allows a unique entry point to be defined
+ * for each vector.
+ *
+ * NOTE: There are not spaces around the vector number argument
+ * to the DISTINCT_INTERRUPT_ENTRY macro because m4 will
+ * undesirably generate the symbol "_Isr_handler_ N"
+ * instead of "_Isr_handler_N" like we want.
+ */
+
+#define DISTINCT_INTERRUPT_ENTRY(_vector) \
+ .align 16 ; \
+ PUBLIC (_ISR_Handler_ ## _vector ) ; \
+SYM (_ISR_Handler_ ## _vector ): \
+ pusha ; \
+ xor eax, eax ; \
+ movb $ ## _vector, al ; \
+ jmp SYM (_ISR_Handler) ;
+
+DISTINCT_INTERRUPT_ENTRY(0)
+DISTINCT_INTERRUPT_ENTRY(1)
+DISTINCT_INTERRUPT_ENTRY(2)
+DISTINCT_INTERRUPT_ENTRY(3)
+DISTINCT_INTERRUPT_ENTRY(4)
+DISTINCT_INTERRUPT_ENTRY(5)
+DISTINCT_INTERRUPT_ENTRY(6)
+DISTINCT_INTERRUPT_ENTRY(7)
+DISTINCT_INTERRUPT_ENTRY(8)
+DISTINCT_INTERRUPT_ENTRY(9)
+DISTINCT_INTERRUPT_ENTRY(10)
+DISTINCT_INTERRUPT_ENTRY(11)
+DISTINCT_INTERRUPT_ENTRY(12)
+DISTINCT_INTERRUPT_ENTRY(13)
+DISTINCT_INTERRUPT_ENTRY(14)
+DISTINCT_INTERRUPT_ENTRY(15)
+DISTINCT_INTERRUPT_ENTRY(16)
+DISTINCT_INTERRUPT_ENTRY(17)
+DISTINCT_INTERRUPT_ENTRY(18)
+DISTINCT_INTERRUPT_ENTRY(19)
+DISTINCT_INTERRUPT_ENTRY(20)
+DISTINCT_INTERRUPT_ENTRY(21)
+DISTINCT_INTERRUPT_ENTRY(22)
+DISTINCT_INTERRUPT_ENTRY(23)
+DISTINCT_INTERRUPT_ENTRY(24)
+DISTINCT_INTERRUPT_ENTRY(25)
+DISTINCT_INTERRUPT_ENTRY(26)
+DISTINCT_INTERRUPT_ENTRY(27)
+DISTINCT_INTERRUPT_ENTRY(28)
+DISTINCT_INTERRUPT_ENTRY(29)
+DISTINCT_INTERRUPT_ENTRY(30)
+DISTINCT_INTERRUPT_ENTRY(31)
+DISTINCT_INTERRUPT_ENTRY(32)
+DISTINCT_INTERRUPT_ENTRY(33)
+DISTINCT_INTERRUPT_ENTRY(34)
+DISTINCT_INTERRUPT_ENTRY(35)
+DISTINCT_INTERRUPT_ENTRY(36)
+DISTINCT_INTERRUPT_ENTRY(37)
+DISTINCT_INTERRUPT_ENTRY(38)
+DISTINCT_INTERRUPT_ENTRY(39)
+DISTINCT_INTERRUPT_ENTRY(40)
+DISTINCT_INTERRUPT_ENTRY(41)
+DISTINCT_INTERRUPT_ENTRY(42)
+DISTINCT_INTERRUPT_ENTRY(43)
+DISTINCT_INTERRUPT_ENTRY(44)
+DISTINCT_INTERRUPT_ENTRY(45)
+DISTINCT_INTERRUPT_ENTRY(46)
+DISTINCT_INTERRUPT_ENTRY(47)
+DISTINCT_INTERRUPT_ENTRY(48)
+DISTINCT_INTERRUPT_ENTRY(49)
+DISTINCT_INTERRUPT_ENTRY(50)
+DISTINCT_INTERRUPT_ENTRY(51)
+DISTINCT_INTERRUPT_ENTRY(52)
+DISTINCT_INTERRUPT_ENTRY(53)
+DISTINCT_INTERRUPT_ENTRY(54)
+DISTINCT_INTERRUPT_ENTRY(55)
+DISTINCT_INTERRUPT_ENTRY(56)
+DISTINCT_INTERRUPT_ENTRY(57)
+DISTINCT_INTERRUPT_ENTRY(58)
+DISTINCT_INTERRUPT_ENTRY(59)
+DISTINCT_INTERRUPT_ENTRY(60)
+DISTINCT_INTERRUPT_ENTRY(61)
+DISTINCT_INTERRUPT_ENTRY(62)
+DISTINCT_INTERRUPT_ENTRY(63)
+DISTINCT_INTERRUPT_ENTRY(64)
+DISTINCT_INTERRUPT_ENTRY(65)
+DISTINCT_INTERRUPT_ENTRY(66)
+DISTINCT_INTERRUPT_ENTRY(67)
+DISTINCT_INTERRUPT_ENTRY(68)
+DISTINCT_INTERRUPT_ENTRY(69)
+DISTINCT_INTERRUPT_ENTRY(70)
+DISTINCT_INTERRUPT_ENTRY(71)
+DISTINCT_INTERRUPT_ENTRY(72)
+DISTINCT_INTERRUPT_ENTRY(73)
+DISTINCT_INTERRUPT_ENTRY(74)
+DISTINCT_INTERRUPT_ENTRY(75)
+DISTINCT_INTERRUPT_ENTRY(76)
+DISTINCT_INTERRUPT_ENTRY(77)
+DISTINCT_INTERRUPT_ENTRY(78)
+DISTINCT_INTERRUPT_ENTRY(79)
+DISTINCT_INTERRUPT_ENTRY(80)
+DISTINCT_INTERRUPT_ENTRY(81)
+DISTINCT_INTERRUPT_ENTRY(82)
+DISTINCT_INTERRUPT_ENTRY(83)
+DISTINCT_INTERRUPT_ENTRY(84)
+DISTINCT_INTERRUPT_ENTRY(85)
+DISTINCT_INTERRUPT_ENTRY(86)
+DISTINCT_INTERRUPT_ENTRY(87)
+DISTINCT_INTERRUPT_ENTRY(88)
+DISTINCT_INTERRUPT_ENTRY(89)
+DISTINCT_INTERRUPT_ENTRY(90)
+DISTINCT_INTERRUPT_ENTRY(91)
+DISTINCT_INTERRUPT_ENTRY(92)
+DISTINCT_INTERRUPT_ENTRY(93)
+DISTINCT_INTERRUPT_ENTRY(94)
+DISTINCT_INTERRUPT_ENTRY(95)
+DISTINCT_INTERRUPT_ENTRY(96)
+DISTINCT_INTERRUPT_ENTRY(97)
+DISTINCT_INTERRUPT_ENTRY(98)
+DISTINCT_INTERRUPT_ENTRY(99)
+DISTINCT_INTERRUPT_ENTRY(100)
+DISTINCT_INTERRUPT_ENTRY(101)
+DISTINCT_INTERRUPT_ENTRY(102)
+DISTINCT_INTERRUPT_ENTRY(103)
+DISTINCT_INTERRUPT_ENTRY(104)
+DISTINCT_INTERRUPT_ENTRY(105)
+DISTINCT_INTERRUPT_ENTRY(106)
+DISTINCT_INTERRUPT_ENTRY(107)
+DISTINCT_INTERRUPT_ENTRY(108)
+DISTINCT_INTERRUPT_ENTRY(109)
+DISTINCT_INTERRUPT_ENTRY(110)
+DISTINCT_INTERRUPT_ENTRY(111)
+DISTINCT_INTERRUPT_ENTRY(112)
+DISTINCT_INTERRUPT_ENTRY(113)
+DISTINCT_INTERRUPT_ENTRY(114)
+DISTINCT_INTERRUPT_ENTRY(115)
+DISTINCT_INTERRUPT_ENTRY(116)
+DISTINCT_INTERRUPT_ENTRY(117)
+DISTINCT_INTERRUPT_ENTRY(118)
+DISTINCT_INTERRUPT_ENTRY(119)
+DISTINCT_INTERRUPT_ENTRY(120)
+DISTINCT_INTERRUPT_ENTRY(121)
+DISTINCT_INTERRUPT_ENTRY(122)
+DISTINCT_INTERRUPT_ENTRY(123)
+DISTINCT_INTERRUPT_ENTRY(124)
+DISTINCT_INTERRUPT_ENTRY(125)
+DISTINCT_INTERRUPT_ENTRY(126)
+DISTINCT_INTERRUPT_ENTRY(127)
+DISTINCT_INTERRUPT_ENTRY(128)
+DISTINCT_INTERRUPT_ENTRY(129)
+DISTINCT_INTERRUPT_ENTRY(130)
+DISTINCT_INTERRUPT_ENTRY(131)
+DISTINCT_INTERRUPT_ENTRY(132)
+DISTINCT_INTERRUPT_ENTRY(133)
+DISTINCT_INTERRUPT_ENTRY(134)
+DISTINCT_INTERRUPT_ENTRY(135)
+DISTINCT_INTERRUPT_ENTRY(136)
+DISTINCT_INTERRUPT_ENTRY(137)
+DISTINCT_INTERRUPT_ENTRY(138)
+DISTINCT_INTERRUPT_ENTRY(139)
+DISTINCT_INTERRUPT_ENTRY(140)
+DISTINCT_INTERRUPT_ENTRY(141)
+DISTINCT_INTERRUPT_ENTRY(142)
+DISTINCT_INTERRUPT_ENTRY(143)
+DISTINCT_INTERRUPT_ENTRY(144)
+DISTINCT_INTERRUPT_ENTRY(145)
+DISTINCT_INTERRUPT_ENTRY(146)
+DISTINCT_INTERRUPT_ENTRY(147)
+DISTINCT_INTERRUPT_ENTRY(148)
+DISTINCT_INTERRUPT_ENTRY(149)
+DISTINCT_INTERRUPT_ENTRY(150)
+DISTINCT_INTERRUPT_ENTRY(151)
+DISTINCT_INTERRUPT_ENTRY(152)
+DISTINCT_INTERRUPT_ENTRY(153)
+DISTINCT_INTERRUPT_ENTRY(154)
+DISTINCT_INTERRUPT_ENTRY(155)
+DISTINCT_INTERRUPT_ENTRY(156)
+DISTINCT_INTERRUPT_ENTRY(157)
+DISTINCT_INTERRUPT_ENTRY(158)
+DISTINCT_INTERRUPT_ENTRY(159)
+DISTINCT_INTERRUPT_ENTRY(160)
+DISTINCT_INTERRUPT_ENTRY(161)
+DISTINCT_INTERRUPT_ENTRY(162)
+DISTINCT_INTERRUPT_ENTRY(163)
+DISTINCT_INTERRUPT_ENTRY(164)
+DISTINCT_INTERRUPT_ENTRY(165)
+DISTINCT_INTERRUPT_ENTRY(166)
+DISTINCT_INTERRUPT_ENTRY(167)
+DISTINCT_INTERRUPT_ENTRY(168)
+DISTINCT_INTERRUPT_ENTRY(169)
+DISTINCT_INTERRUPT_ENTRY(170)
+DISTINCT_INTERRUPT_ENTRY(171)
+DISTINCT_INTERRUPT_ENTRY(172)
+DISTINCT_INTERRUPT_ENTRY(173)
+DISTINCT_INTERRUPT_ENTRY(174)
+DISTINCT_INTERRUPT_ENTRY(175)
+DISTINCT_INTERRUPT_ENTRY(176)
+DISTINCT_INTERRUPT_ENTRY(177)
+DISTINCT_INTERRUPT_ENTRY(178)
+DISTINCT_INTERRUPT_ENTRY(179)
+DISTINCT_INTERRUPT_ENTRY(180)
+DISTINCT_INTERRUPT_ENTRY(181)
+DISTINCT_INTERRUPT_ENTRY(182)
+DISTINCT_INTERRUPT_ENTRY(183)
+DISTINCT_INTERRUPT_ENTRY(184)
+DISTINCT_INTERRUPT_ENTRY(185)
+DISTINCT_INTERRUPT_ENTRY(186)
+DISTINCT_INTERRUPT_ENTRY(187)
+DISTINCT_INTERRUPT_ENTRY(188)
+DISTINCT_INTERRUPT_ENTRY(189)
+DISTINCT_INTERRUPT_ENTRY(190)
+DISTINCT_INTERRUPT_ENTRY(191)
+DISTINCT_INTERRUPT_ENTRY(192)
+DISTINCT_INTERRUPT_ENTRY(193)
+DISTINCT_INTERRUPT_ENTRY(194)
+DISTINCT_INTERRUPT_ENTRY(195)
+DISTINCT_INTERRUPT_ENTRY(196)
+DISTINCT_INTERRUPT_ENTRY(197)
+DISTINCT_INTERRUPT_ENTRY(198)
+DISTINCT_INTERRUPT_ENTRY(199)
+DISTINCT_INTERRUPT_ENTRY(200)
+DISTINCT_INTERRUPT_ENTRY(201)
+DISTINCT_INTERRUPT_ENTRY(202)
+DISTINCT_INTERRUPT_ENTRY(203)
+DISTINCT_INTERRUPT_ENTRY(204)
+DISTINCT_INTERRUPT_ENTRY(205)
+DISTINCT_INTERRUPT_ENTRY(206)
+DISTINCT_INTERRUPT_ENTRY(207)
+DISTINCT_INTERRUPT_ENTRY(208)
+DISTINCT_INTERRUPT_ENTRY(209)
+DISTINCT_INTERRUPT_ENTRY(210)
+DISTINCT_INTERRUPT_ENTRY(211)
+DISTINCT_INTERRUPT_ENTRY(212)
+DISTINCT_INTERRUPT_ENTRY(213)
+DISTINCT_INTERRUPT_ENTRY(214)
+DISTINCT_INTERRUPT_ENTRY(215)
+DISTINCT_INTERRUPT_ENTRY(216)
+DISTINCT_INTERRUPT_ENTRY(217)
+DISTINCT_INTERRUPT_ENTRY(218)
+DISTINCT_INTERRUPT_ENTRY(219)
+DISTINCT_INTERRUPT_ENTRY(220)
+DISTINCT_INTERRUPT_ENTRY(221)
+DISTINCT_INTERRUPT_ENTRY(222)
+DISTINCT_INTERRUPT_ENTRY(223)
+DISTINCT_INTERRUPT_ENTRY(224)
+DISTINCT_INTERRUPT_ENTRY(225)
+DISTINCT_INTERRUPT_ENTRY(226)
+DISTINCT_INTERRUPT_ENTRY(227)
+DISTINCT_INTERRUPT_ENTRY(228)
+DISTINCT_INTERRUPT_ENTRY(229)
+DISTINCT_INTERRUPT_ENTRY(230)
+DISTINCT_INTERRUPT_ENTRY(231)
+DISTINCT_INTERRUPT_ENTRY(232)
+DISTINCT_INTERRUPT_ENTRY(233)
+DISTINCT_INTERRUPT_ENTRY(234)
+DISTINCT_INTERRUPT_ENTRY(235)
+DISTINCT_INTERRUPT_ENTRY(236)
+DISTINCT_INTERRUPT_ENTRY(237)
+DISTINCT_INTERRUPT_ENTRY(238)
+DISTINCT_INTERRUPT_ENTRY(239)
+DISTINCT_INTERRUPT_ENTRY(240)
+DISTINCT_INTERRUPT_ENTRY(241)
+DISTINCT_INTERRUPT_ENTRY(242)
+DISTINCT_INTERRUPT_ENTRY(243)
+DISTINCT_INTERRUPT_ENTRY(244)
+DISTINCT_INTERRUPT_ENTRY(245)
+DISTINCT_INTERRUPT_ENTRY(246)
+DISTINCT_INTERRUPT_ENTRY(247)
+DISTINCT_INTERRUPT_ENTRY(248)
+DISTINCT_INTERRUPT_ENTRY(249)
+DISTINCT_INTERRUPT_ENTRY(250)
+DISTINCT_INTERRUPT_ENTRY(251)
+DISTINCT_INTERRUPT_ENTRY(252)
+DISTINCT_INTERRUPT_ENTRY(253)
+DISTINCT_INTERRUPT_ENTRY(254)
+DISTINCT_INTERRUPT_ENTRY(255)
+
+/*PAGE
+ * void _ISR_Dispatch()
+ *
+ * Entry point from the outermost interrupt service routine exit.
+ * The current stack is the supervisor mode stack.
+ */
+
+ PUBLIC (_ISR_Dispatch)
+SYM (_ISR_Dispatch):
+
+ call SYM (_Thread_Dispatch) # invoke Dispatcher
+
+ /*
+ * BEGINNING OF DE-ESTABLISH SEGMENTS
+ *
+ * NOTE: Make sure there is code here if code is added to
+ * load the segment registers.
+ *
+ */
+
+ /***** DE-ESTABLISH SEGMENTS CODE GOES HERE ****/
+
+ /*
+ * END OF DE-ESTABLISH SEGMENTS
+ */
+
+ popa # restore general registers
+ iret # return to interrupted thread
+
+/*PAGE
+ *
+ * void i386_Install_idt(
+ * unsigned32 source_offset,
+ * unsigned16 destination_segment,
+ * unsigned32 destination_offset
+ * );
+ */
+
+ .align 2
+ PUBLIC (i386_Install_idt)
+
+.set INSTALL_IDT_SAVED_REGS, 8
+
+.set SOURCE_OFFSET_ARG, INSTALL_IDT_SAVED_REGS + 4
+.set DESTINATION_SEGMENT_ARG, INSTALL_IDT_SAVED_REGS + 8
+.set DESTINATION_OFFSET_ARG, INSTALL_IDT_SAVED_REGS + 12
+
+SYM (i386_Install_idt):
+ push esi
+ push edi
+
+ movl SOURCE_OFFSET_ARG(esp),esi
+ movl DESTINATION_OFFSET_ARG(esp),edi
+
+ pushf # save flags
+ cli # DISABLE INTERRUPTS!!!
+
+ movw DESTINATION_SEGMENT_ARG+4(esp),ax
+ push es # save es
+ movw ax,es
+ movsl # copy 1st half of IDT entry
+ movsl # copy 2nd half of IDT entry
+ pop es # restore es
+
+ popf # ENABLE INTERRUPTS!!!
+
+ pop edi
+ pop esi
+ ret
+
+/*
+ * void *i386_Logical_to_physical(
+ * rtems_unsigned16 segment,
+ * void *address
+ * );
+ *
+ * Returns thirty-two bit physical address for segment:address.
+ */
+
+.set SEGMENT_ARG, 4
+.set ADDRESS_ARG, 8
+
+ PUBLIC (i386_Logical_to_physical)
+
+SYM (i386_Logical_to_physical):
+
+ xorl eax,eax # clear eax
+ movzwl SEGMENT_ARG(esp),ecx # ecx = segment value
+ movl $ SYM (_Global_descriptor_table),edx
+ # edx = address of our GDT
+ addl ecx,edx # edx = address of desired entry
+ movb 7(edx),ah # ah = base 31:24
+ movb 4(edx),al # al = base 23:16
+ shll $16,eax # move ax into correct bits
+ movw 2(edx),ax # ax = base 0:15
+ movl ADDRESS_ARG(esp),ecx # ecx = address to convert
+ addl eax,ecx # ecx = physical address equivalent
+ movl ecx,eax # eax = ecx
+ ret
+
+/*
+ * void *i386_Physical_to_logical(
+ * rtems_unsigned16 segment,
+ * void *address
+ * );
+ *
+ * Returns thirty-two bit physical address for segment:address.
+ */
+
+/*
+ *.set SEGMENT_ARG, 4
+ *.set ADDRESS_ARG, 8 -- use sets from above
+ */
+
+ PUBLIC (i386_Physical_to_logical)
+
+SYM (i386_Physical_to_logical):
+ xorl eax,eax # clear eax
+ movzwl SEGMENT_ARG(esp),ecx # ecx = segment value
+ movl $ SYM (_Global_descriptor_table),edx
+ # edx = address of our GDT
+ addl ecx,edx # edx = address of desired entry
+ movb 7(edx),ah # ah = base 31:24
+ movb 4(edx),al # al = base 23:16
+ shll $16,eax # move ax into correct bits
+ movw 2(edx),ax # ax = base 0:15
+ movl ADDRESS_ARG(esp),ecx # ecx = address to convert
+ subl eax,ecx # ecx = logical address equivalent
+ movl ecx,eax # eax = ecx
+ ret
+
+END_CODE
+
+END
diff --git a/c/src/exec/score/cpu/i386/i386.h b/c/src/exec/score/cpu/i386/i386.h
new file mode 100644
index 0000000000..a8db759984
--- /dev/null
+++ b/c/src/exec/score/cpu/i386/i386.h
@@ -0,0 +1,493 @@
+/* i386.h
+ *
+ * This include file contains information pertaining to the Intel
+ * i386 processor.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __i386_h
+#define __i386_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define i386
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This section contains the information required to build
+ * RTEMS for a particular member of the Intel i386
+ * family when executing in protected mode. It does
+ * this by setting variables to indicate which implementation
+ * dependent features are present in a particular member
+ * of the family.
+ *
+ * Currently recognized:
+ * i386_fp (i386 DX or SX w/i387)
+ * i386_fp (i386 DX or SX w/o i387)
+ * i486dx
+ * i486sx
+ * pentium
+ *
+ * Floating point is the only feature which currently varies. Eventually
+ * the i486-plus level instruction for endian swapping should be added
+ * to this feature list.
+ */
+
+#if defined(i386_fp)
+
+#define RTEMS_MODEL_NAME "i386 with i387"
+#define I386_HAS_FPU 1
+
+#elif defined(i386_nofp)
+
+#define RTEMS_MODEL_NAME "i386 w/o i387"
+#define I386_HAS_FPU 1
+
+#elif defined(i486dx)
+
+#define RTEMS_MODEL_NAME "i486dx"
+#define I386_HAS_FPU 1
+
+#elif defined(i486sx)
+
+#define RTEMS_MODEL_NAME "i486sx"
+#define I386_HAS_FPU 0
+
+#elif defined(pentium)
+
+#define RTEMS_MODEL_NAME "Pentium"
+#define I386_HAS_FPU 1
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "Intel i386"
+
+#ifndef ASM
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* 8-bit unsigned integer */
+typedef unsigned short unsigned16; /* 16-bit unsigned integer */
+typedef unsigned int unsigned32; /* 32-bit unsigned integer */
+typedef unsigned long long unsigned64; /* 64-bit unsigned integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef unsigned char signed8; /* 8-bit signed integer */
+typedef unsigned short signed16; /* 16-bit signed integer */
+typedef unsigned int signed32; /* 32-bit signed integer */
+typedef long long signed64; /* 64-bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+/*
+ * Structure which makes it easier to deal with LxDT and SxDT instructions.
+ */
+
+typedef struct {
+ unsigned short limit;
+ unsigned short physical_address[ 2 ];
+} i386_DTR_load_save_format;
+
+/* See Chapter 5 - Memory Management in i386 manual */
+
+typedef struct {
+ unsigned short limit_0_15;
+ unsigned short base_0_15;
+ unsigned char base_16_23;
+ unsigned char type_dt_dpl_p;
+ unsigned char limit_16_19_granularity;
+ unsigned char base_24_31;
+} i386_GDT_slot;
+
+/* See Chapter 9 - Exceptions and Interrupts in i386 manual
+ *
+ * NOTE: This is the IDT entry for interrupt gates ONLY.
+ */
+
+typedef struct {
+ unsigned short offset_0_15;
+ unsigned short segment_selector;
+ unsigned char reserved;
+ unsigned char p_dpl;
+ unsigned short offset_16_31;
+} i386_IDT_slot;
+
+typedef void ( *i386_isr )( void );
+
+#define i386_disable_interrupts( _level ) \
+ { \
+ _level = 0; /* avoids warnings */ \
+ asm volatile ( "pushf ; \
+ cli ; \
+ pop %0" \
+ : "=r" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+
+#define i386_enable_interrupts( _level ) \
+ { \
+ asm volatile ( "push %0 ; \
+ popf" \
+ : "=r" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+
+#define i386_flash_interrupts( _level ) \
+ { \
+ asm volatile ( "push %0 ; \
+ popf ; \
+ cli" \
+ : "=r" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+
+/*
+ * The following routine swaps the endian format of an unsigned int.
+ * It must be static so it can be referenced indirectly.
+ */
+
+static inline unsigned int i386_swap_U32(
+ unsigned int value
+)
+{
+ asm volatile( "rorw $8,%%ax;"
+ "rorl $16,%0;"
+ "rorw $8,%%ax" : "=a" (value) : "0" (value) );
+
+ return( value );
+}
+
+/*
+ * Segment Access Routines
+ *
+ * NOTE: Unfortunately, these are still static inlines even when the
+ * "macro" implementation of the generic code is used.
+ */
+
+static inline unsigned short i386_get_cs()
+{
+ register unsigned short segment = 0;
+
+ asm volatile ( "movw %%cs,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned short i386_get_ds()
+{
+ register unsigned short segment = 0;
+
+ asm volatile ( "movw %%ds,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned short i386_get_es()
+{
+ register unsigned short segment = 0;
+
+ asm volatile ( "movw %%es,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned short i386_get_ss()
+{
+ register unsigned short segment = 0;
+
+ asm volatile ( "movw %%ss,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned short i386_get_fs()
+{
+ register unsigned short segment = 0;
+
+ asm volatile ( "movw %%fs,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned short i386_get_gs()
+{
+ register unsigned short segment = 0;
+
+ asm volatile ( "movw %%gs,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+/*
+ * IO Port Access Routines
+ */
+
+#define i386_outport_byte( _port, _value ) \
+ { register unsigned short __port = _port; \
+ register unsigned char __value = _value; \
+ \
+ asm volatile ( "outb %0,%1" : "=a" (__value), "=d" (__port) \
+ : "0" (__value), "1" (__port) \
+ ); \
+ }
+
+#define i386_outport_word( _port, _value ) \
+ { register unsigned short __port = _port; \
+ register unsigned short __value = _value; \
+ \
+ asm volatile ( "outw %0,%1" : "=a" (__value), "=d" (__port) \
+ : "0" (__value), "1" (__port) \
+ ); \
+ }
+
+#define i386_outport_long( _port, _value ) \
+ { register unsigned short __port = _port; \
+ register unsigned int __value = _value; \
+ \
+ asm volatile ( "outl %0,%1" : "=a" (__value), "=d" (__port) \
+ : "0" (__value), "1" (__port) \
+ ); \
+ }
+
+#define i386_inport_byte( _port, _value ) \
+ { register unsigned short __port = _port; \
+ register unsigned char __value = 0; \
+ \
+ asm volatile ( "inb %1,%0" : "=a" (__value), "=d" (__port) \
+ : "0" (__value), "1" (__port) \
+ ); \
+ _value = __value; \
+ }
+
+#define i386_inport_word( _port, _value ) \
+ { register unsigned short __port = _port; \
+ register unsigned short __value = 0; \
+ \
+ asm volatile ( "inw %1,%0" : "=a" (__value), "=d" (__port) \
+ : "0" (__value), "1" (__port) \
+ ); \
+ _value = __value; \
+ }
+
+#define i386_inport_long( _port, _value ) \
+ { register unsigned short __port = _port; \
+ register unsigned int __value = 0; \
+ \
+ asm volatile ( "inl %1,%0" : "=a" (__value), "=d" (__port) \
+ : "0" (__value), "1" (__port) \
+ ); \
+ _value = __value; \
+ }
+
+/*
+ * Descriptor Table helper routines
+ */
+
+
+#define i386_get_GDTR( _gdtr_address ) \
+ { \
+ void *_gdtr = (_gdtr_address); \
+ \
+ asm volatile( "sgdt (%0)" : "=r" (_gdtr) : "0" (_gdtr) ); \
+ }
+
+#define i386_get_GDT_slot( _gdtr_base, _segment, _slot_address ) \
+ { \
+ register unsigned int _gdt_slot = (_gdtr_base) + (_segment); \
+ register volatile void *_slot = (_slot_address); \
+ register unsigned int _temporary = 0; \
+ \
+ asm volatile( "movl %%gs:(%0),%1 ; \
+ movl %1,(%2) ; \
+ movl %%gs:4(%0),%1 ; \
+ movl %1,4(%2)" \
+ : "=r" (_gdt_slot), "=r" (_temporary), "=r" (_slot) \
+ : "0" (_gdt_slot), "1" (_temporary), "2" (_slot) \
+ ); \
+ }
+
+#define i386_set_GDT_slot( _gdtr_base, _segment, _slot_address ) \
+ { \
+ register unsigned int _gdt_slot = (_gdtr_base) + (_segment); \
+ register volatile void *_slot = (_slot_address); \
+ register unsigned int _temporary = 0; \
+ \
+ asm volatile( "movl (%2),%1 ; \
+ movl %1,%%gs:(%0) ; \
+ movl 4(%2),%1 ; \
+ movl %1,%%gs:4(%0) \
+ " \
+ : "=r" (_gdt_slot), "=r" (_temporary), "=r" (_slot) \
+ : "0" (_gdt_slot), "1" (_temporary), "2" (_slot) \
+ ); \
+ }
+
+static inline void i386_set_segment(
+ unsigned short segment,
+ unsigned int base,
+ unsigned int limit
+)
+{
+ i386_DTR_load_save_format gdtr;
+ volatile i386_GDT_slot Gdt_slot;
+ volatile i386_GDT_slot *gdt_slot = &Gdt_slot;
+ unsigned short tmp_segment = 0;
+ unsigned int limit_adjusted;
+
+ /* load physical address of the GDT */
+
+ i386_get_GDTR( &gdtr );
+
+ gdt_slot->type_dt_dpl_p = 0x92; /* present, dpl=0, */
+ /* application=1, */
+ /* type=data read/write */
+ gdt_slot->limit_16_19_granularity = 0x40; /* 32 bit segment */
+
+ limit_adjusted = limit;
+ if ( limit > 4095 ) {
+ gdt_slot->limit_16_19_granularity |= 0x80; /* set granularity bit */
+ limit_adjusted /= 4096;
+ }
+
+ gdt_slot->limit_16_19_granularity |= (limit_adjusted >> 16) & 0xff;
+ gdt_slot->limit_0_15 = limit_adjusted & 0xffff;
+
+ gdt_slot->base_0_15 = base & 0xffff;
+ gdt_slot->base_16_23 = (base >> 16) & 0xff;
+ gdt_slot->base_24_31 = (base >> 24);
+
+ i386_set_GDT_slot(
+ gdtr.physical_address[0] + (gdtr.physical_address[1] << 16),
+ segment,
+ gdt_slot
+ );
+
+ /* Now, reload all segment registers so the limit takes effect. */
+
+ asm volatile( "movw %%ds,%0 ; movw %0,%%ds
+ movw %%es,%0 ; movw %0,%%es
+ movw %%fs,%0 ; movw %0,%%fs
+ movw %%gs,%0 ; movw %0,%%gs
+ movw %%ss,%0 ; movw %0,%%ss"
+ : "=r" (tmp_segment)
+ : "0" (tmp_segment)
+ );
+
+}
+
+/* routines */
+
+/*
+ * i386_Logical_to_physical
+ *
+ * Converts logical address to physical address.
+ */
+
+void *i386_Logical_to_physical(
+ unsigned short segment,
+ void *address
+);
+
+/*
+ * i386_Physical_to_logical
+ *
+ * Converts physical address to logical address.
+ */
+
+void *i386_Physical_to_logical(
+ unsigned short segment,
+ void *address
+);
+
+/*
+ * i386_Install_idt
+ *
+ * This routine installs an IDT entry.
+ */
+
+void i386_Install_idt(
+ unsigned int source_offset,
+ unsigned short destination_segment,
+ unsigned int destination_offset
+);
+
+/*
+ * "Simpler" names for a lot of the things defined in this file
+ */
+
+/* segment access routines */
+
+#define get_cs() i386_get_cs()
+#define get_ds() i386_get_ds()
+#define get_es() i386_get_es()
+#define get_ss() i386_get_ss()
+#define get_fs() i386_get_fs()
+#define get_gs() i386_get_gs()
+
+#define CPU_swap_u32( _value ) i386_swap_U32( _value )
+
+/* i80x86 I/O instructions */
+
+#define outport_byte( _port, _value ) i386_outport_byte( _port, _value )
+#define outport_word( _port, _value ) i386_outport_word( _port, _value )
+#define outport_long( _port, _value ) i386_outport_long( _port, _value )
+#define inport_byte( _port, _value ) i386_inport_byte( _port, _value )
+#define inport_word( _port, _value ) i386_inport_word( _port, _value )
+#define inport_long( _port, _value ) i386_inport_long( _port, _value )
+
+/* complicated static inline functions */
+
+#define get_GDTR( _gdtr_address ) \
+ i386_get_GDTR( _gdtr_address )
+
+#define get_GDT_slot( _gdtr_base, _segment, _slot_address ) \
+ i386_get_GDT_slot( _gdtr_base, _segment, _slot_address )
+
+#define set_GDT_slot( _gdtr_base, _segment, _slot_address ) \
+ i386_set_GDT_slot( _gdtr_base, _segment, _slot_address )
+
+#define set_segment( _segment, _base, _limit ) \
+ i386_set_segment( _segment, _base, _limit )
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/i386/rtems.s b/c/src/exec/score/cpu/i386/rtems.s
new file mode 100644
index 0000000000..df65600e15
--- /dev/null
+++ b/c/src/exec/score/cpu/i386/rtems.s
@@ -0,0 +1,31 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the i386 implementation of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+ EXTERN (_Entry_points)
+
+ BEGIN_CODE
+
+ .align 2
+ PUBLIC (RTEMS)
+
+SYM (RTEMS):
+ jmpl SYM (_Entry_points)(,eax,4)
+
+ END_CODE
+
+END
diff --git a/c/src/exec/score/cpu/i960/asm.h b/c/src/exec/score/cpu/i960/asm.h
new file mode 100644
index 0000000000..1c40601473
--- /dev/null
+++ b/c/src/exec/score/cpu/i960/asm.h
@@ -0,0 +1,107 @@
+/* asm.h
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ *
+ *
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ *
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted. This file is:
+ *
+ * COPYRIGHT (c) 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * $Id$
+ */
+
+#ifndef __i960_ASM_h
+#define __i960_ASM_h
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#define ASM
+#include <i960.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+#define g0 REG (g0)
+#define g1 REG (g1)
+#define g2 REG (g2)
+#define g3 REG (g3)
+#define g4 REG (g4)
+#define g5 REG (g5)
+#define g6 REG (g6)
+#define g7 REG (g7)
+#define g8 REG (g8)
+#define g9 REG (g9)
+#define g10 REG (g10)
+#define g11 REG (g11)
+#define g12 REG (g12)
+#define g13 REG (g13)
+#define g14 REG (g14)
+#define g15 REG (g15)
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/i960/cpu.c b/c/src/exec/score/cpu/i960/cpu.c
new file mode 100644
index 0000000000..68ecb0525c
--- /dev/null
+++ b/c/src/exec/score/cpu/i960/cpu.c
@@ -0,0 +1,124 @@
+/*
+ * Intel i960CA Dependent Source
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
+#else
+#warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA ONLY ***"
+#warning "*** THIS FILE WILL NOT COMPILE ON ANOTHER FAMILY MEMBER ***"
+#endif
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ *
+ * OUTPUT PARAMETERS: NONE
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ _CPU_Table = *cpu_table;
+
+}
+
+/* _CPU__ISR_Install_vector
+ *
+ * Install the RTEMS vector wrapper in the CPU's interrupt table.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+#define _Is_vector_caching_enabled( _prcb ) \
+ ((_prcb)->control_tbl->icon & 0x2000)
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ i960ca_PRCB *prcb = _CPU_Table.Prcb;
+ proc_ptr *cached_intr_tbl = NULL;
+
+/* The i80960CA does not support vectors 0-7. The first 9 entries
+ * in the Interrupt Table are used to manage pending interrupts.
+ * Thus vector 8, the first valid vector number, is actually in
+ * slot 9 in the table.
+ */
+
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ _ISR_Vector_table[ vector ] = new_handler;
+
+ prcb->intr_tbl[ vector + 1 ] = _ISR_Handler;
+ if ( _Is_vector_caching_enabled( prcb ) )
+ if ( (vector & 0xf) == 0x2 ) /* cacheable? */
+ cached_intr_tbl[ vector >> 4 ] = _ISR_Handler;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+#define soft_reset( prcb ) \
+ { register i960ca_PRCB *_prcb = (prcb); \
+ register unsigned32 *_next=0; \
+ register unsigned32 _cmd = 0x30000; \
+ asm volatile( "lda next,%1; \
+ sysctl %0,%1,%2; \
+ next: mov g0,g0" \
+ : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \
+ : "0" (_cmd), "1" (_next), "2" (_prcb) ); \
+ }
+
+void _CPU_Install_interrupt_stack( void )
+{
+ i960ca_PRCB *prcb = _CPU_Table.Prcb;
+ unsigned32 level;
+
+ /*
+ * Set the Interrupt Stack in the PRCB and force a reload of it.
+ * Interrupts are disabled for safety.
+ */
+
+ _CPU_ISR_Disable( level );
+
+ prcb->intr_stack = _CPU_Interrupt_stack_low;
+
+ soft_reset( prcb );
+
+ _CPU_ISR_Enable( level );
+}
diff --git a/c/src/exec/score/cpu/i960/cpu.h b/c/src/exec/score/cpu/i960/cpu.h
new file mode 100644
index 0000000000..71a3341702
--- /dev/null
+++ b/c/src/exec/score/cpu/i960/cpu.h
@@ -0,0 +1,424 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the Intel
+ * i960 processor family.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#pragma align 4 /* for GNU C structure alignment */
+
+#include <i960.h>
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Use the i960's hardware interrupt stack support and have the
+ * interrupt manager allocate the memory for it.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Some family members have no FP (SA/KA/CA/CF), others have it built in
+ * (KB/MC/MX). There does not appear to be an external coprocessor
+ * for this family.
+ */
+
+#if ( I960_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#error "Floating point support for i960 family has been implemented!!!"
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+#define CPU_IDLE_TASK_IS_FP FALSE
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+#define CPU_STACK_GROWS_UP TRUE
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
+
+/* structures */
+
+/*
+ * Basic integer context for the i960 family.
+ */
+
+typedef struct {
+ void *r0_pfp; /* (r0) Previous Frame Pointer */
+ void *r1_sp; /* (r1) Stack Pointer */
+ unsigned32 pc; /* (pc) Processor Control */
+ void *g8; /* (g8) Global Register 8 */
+ void *g9; /* (g9) Global Register 9 */
+ void *g10; /* (g10) Global Register 10 */
+ void *g11; /* (g11) Global Register 11 */
+ void *g12; /* (g12) Global Register 12 */
+ void *g13; /* (g13) Global Register 13 */
+ unsigned32 g14; /* (g14) Global Register 14 */
+ void *g15_fp; /* (g15) Frame Pointer */
+} Context_Control;
+
+/*
+ * FP context save area for the i960 Numeric Extension
+ */
+
+typedef struct {
+ unsigned32 fp0_1; /* (fp0) first word */
+ unsigned32 fp0_2; /* (fp0) second word */
+ unsigned32 fp0_3; /* (fp0) third word */
+ unsigned32 fp1_1; /* (fp1) first word */
+ unsigned32 fp1_2; /* (fp1) second word */
+ unsigned32 fp1_3; /* (fp1) third word */
+ unsigned32 fp2_1; /* (fp2) first word */
+ unsigned32 fp2_2; /* (fp2) second word */
+ unsigned32 fp2_3; /* (fp2) third word */
+ unsigned32 fp3_1; /* (fp3) first word */
+ unsigned32 fp3_2; /* (fp3) second word */
+ unsigned32 fp3_3; /* (fp3) third word */
+} Context_Control_fp;
+
+/*
+ * The following structure defines the set of information saved
+ * on the current stack by RTEMS upon receipt of each interrupt.
+ */
+
+typedef struct {
+ unsigned32 TBD; /* XXX Fix for this CPU */
+} CPU_Interrupt_frame;
+
+/*
+ * Call frame for the i960 family.
+ */
+
+typedef struct {
+ void *r0_pfp; /* (r0) Previous Frame Pointer */
+ void *r1_sp; /* (r1) Stack Pointer */
+ void *r2_rip; /* (r2) Return Instruction Pointer */
+ void *r3; /* (r3) Local Register 3 */
+ void *r4; /* (r4) Local Register 4 */
+ void *r5; /* (r5) Local Register 5 */
+ void *r6; /* (r6) Local Register 6 */
+ void *r7; /* (r7) Local Register 7 */
+ void *r8; /* (r8) Local Register 8 */
+ void *r9; /* (r9) Local Register 9 */
+ void *r10; /* (r10) Local Register 10 */
+ void *r11; /* (r11) Local Register 11 */
+ void *r12; /* (r12) Local Register 12 */
+ void *r13; /* (r13) Local Register 13 */
+ void *r14; /* (r14) Local Register 14 */
+ void *r15; /* (r15) Local Register 15 */
+ /* XXX Looks like sometimes there is FP stuff here (MC manual)? */
+} CPU_Call_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the i960 specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
+ i960ca_PRCB *Prcb;
+#endif
+} rtems_cpu_table;
+
+/* variables */
+
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/* constants */
+
+/*
+ * This defines the number of levels and the mask used to pick those
+ * bits out of a thread mode.
+ */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x0000001f /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x0000001f /* interrupt level in mode */
+
+/*
+ * context size area for floating point
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * extra stack required by system initialization thread
+ *
+ * NOTE: Make sure this stays positive ...
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK \
+ (4096 - CPU_STACK_MINIMUM_SIZE)
+
+/*
+ * i960 family supports 256 distinct vectors.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 256
+
+/*
+ * Minimum size of a thread's stack.
+ *
+ * NOTE: See CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK
+ */
+
+#define CPU_STACK_MINIMUM_SIZE 1024
+
+/*
+ * i960 is pretty tolerant of alignment. Just put things on 4 byte boundaries.
+ */
+
+#define CPU_ALIGNMENT 4
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * i960ca stack requires 16 byte alignment
+ *
+ * NOTE: This factor may need to be family member dependent.
+ */
+
+#define CPU_STACK_ALIGNMENT 16
+
+/* macros */
+
+/*
+ * ISR handler macros
+ *
+ * These macros perform the following functions:
+ * + disable all maskable CPU interrupts
+ * + restore previous interrupt level (enable)
+ * + temporarily restore interrupts (flash)
+ * + set a particular level
+ */
+
+#define _CPU_ISR_Disable( _level ) i960_disable_interrupts( _level )
+#define _CPU_ISR_Enable( _level ) i960_enable_interrupts( _level )
+#define _CPU_ISR_Flash( _level ) i960_flash_interrupts( _level )
+
+#define _CPU_ISR_Set_level( newlevel ) \
+ { \
+ unsigned32 _mask, _level=(newlevel); \
+ \
+ __asm__ volatile ( "ldconst 0x1f0000,%0; \
+ modpc 0,%0,%1" : "=d" (_mask), "=d" (_level) \
+ : "0" (_mask), "1" (_level) \
+ ); \
+ }
+
+/* ISR handler section macros */
+
+/*
+ * Context handler macros
+ *
+ * These macros perform the following functions:
+ * + initialize a context area
+ * + restart the current thread
+ * + calculate the initial pointer into a FP context area
+ * + initialize an FP context area
+ */
+
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry ) \
+ { CPU_Call_frame *_texit_frame; \
+ unsigned32 _mask; \
+ unsigned32 _base_pc; \
+ unsigned32 _stack_tmp; \
+ void *_stack; \
+ \
+ _stack_tmp = (unsigned32)(_stack_base) + CPU_STACK_ALIGNMENT; \
+ _stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
+ _stack = (void *) _stack_tmp; \
+ \
+ __asm__ volatile ( "flushreg" : : ); /* flush register cache */ \
+ \
+ (_the_context)->r0_pfp = _stack; \
+ (_the_context)->g15_fp = _stack + (1 * sizeof(CPU_Call_frame)); \
+ (_the_context)->r1_sp = _stack + (2 * sizeof(CPU_Call_frame)); \
+ __asm__ volatile ( "ldconst 0x1f0000,%0 ; " \
+ "modpc 0,0,%1 ; " \
+ "andnot %0,%1,%1 ; " \
+ : "=d" (_mask), "=d" (_base_pc) : ); \
+ (_the_context)->pc = _base_pc | ((_isr) << 16); \
+ (_the_context)->g14 = 0; \
+ \
+ _texit_frame = (CPU_Call_frame *)_stack; \
+ _texit_frame->r0_pfp = NULL; \
+ _texit_frame->r1_sp = (_the_context)->g15_fp; \
+ _texit_frame->r2_rip = (_entry); \
+ }
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+#define _CPU_Context_Fp_start( _base, _offset ) NULL
+
+#define _CPU_Context_Initialize_fp( _fp_area )
+
+/* end of Context handler macros */
+
+/*
+ * Fatal Error manager macros
+ *
+ * These macros perform the following functions:
+ * + disable interrupts and halt the CPU
+ */
+
+#define _CPU_Fatal_halt( _errorcode ) \
+ { unsigned32 _mask, _level; \
+ unsigned32 _error = (_errorcode); \
+ \
+ __asm__ volatile ( "ldconst 0x1f0000,%0 ; \
+ mov %0,%1 ; \
+ modpc 0,%0,%1 ; \
+ mov %2,g0 ; \
+ self: b self " \
+ : "=d" (_mask), "=d" (_level), "=d" (_error) : ); \
+ }
+
+/* end of Fatal Error Manager macros */
+
+/*
+ * Bitfield handler macros
+ *
+ * These macros perform the following functions:
+ * + scan for the highest numbered (MSB) set in a 16 bit bitfield
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { unsigned32 _search = (_value); \
+ \
+ __asm__ volatile ( "scanbit %0,%1 " \
+ : "=d" (_search), "=d" (_output) \
+ : "0" (_search), "1" (_output) ); \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * Priority handler macros
+ *
+ * These macros perform the following functions:
+ * + return a mask with the bit for this major/minor portion of
+ * of thread priority set.
+ * + translate the bit number returned by "Bitfield_find_first_bit"
+ * into an index into the thread ready chain bit maps
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x8000 >> (_bit_number) )
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ ( 15 - (_priority) )
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner and avoid stack conflicts.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/i960/cpu_asm.s b/c/src/exec/score/cpu/i960/cpu_asm.s
new file mode 100644
index 0000000000..eb11e14760
--- /dev/null
+++ b/c/src/exec/score/cpu/i960/cpu_asm.s
@@ -0,0 +1,199 @@
+/* cpu_asm.s
+ *
+ * This file contains all assembly code for the i960CA implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+ .text
+/*
+ * Format of i960ca Register structure
+ */
+
+.set REG_R0_PFP , 0 # (r0) Previous Frame Pointer
+.set REG_R1_SP , REG_R0_PFP+4 # (r1) Stack Pointer
+.set REG_PC , REG_R1_SP+4 # (pc) Processor Controls
+.set REG_G8 , REG_PC+4 # (g8) Global Register 8
+.set REG_G9 , REG_G8+4 # (g9) Global Register 9
+.set REG_G10 , REG_G9+4 # (g10) Global Register 10
+.set REG_G11 , REG_G10+4 # (g11) Global Register 11
+.set REG_G12 , REG_G11+4 # (g12) Global Register 12
+.set REG_G13 , REG_G12+4 # (g13) Global Register 13
+.set REG_G14 , REG_G13+4 # (g14) Global Register 14
+.set REG_G15_FP , REG_G14+4 # (g15) Global Register 15
+.set SIZE_REGS , REG_G15_FP+4 # size of cpu_context_registers
+ # structure
+
+/*
+ * void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+ .align 4
+ .globl __CPU_Context_switch
+
+__CPU_Context_switch:
+ modpc 0,0,g2 # get old intr level (PC)
+ st g2,REG_PC(g0) # save pc
+ stq g8,REG_G8(g0) # save g8-g11
+ stq g12,REG_G12(g0) # save g12-g15
+ stl pfp,REG_R0_PFP(g0) # save pfp, sp
+
+restore: flushreg # flush register cache
+ ldconst 0x001f0000,g2 # g2 = PC mask
+ ld REG_PC(g1),g3 # thread->Regs.pc = pc;
+ ldq REG_G12(g1),g12 # restore g12-g15
+ ldl REG_R0_PFP(g1),pfp # restore pfp, sp
+ ldq REG_G8(g1),g8 # restore g8-g11
+ modpc 0,g2,g3 # restore PC register
+ ret
+
+/*
+ * void _CPU_Context_restore( new_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ .globl __CPU_Context_restore
+__CPU_Context_restore:
+ mov g0,g1 # g0 = _Thread_executing
+ b restore
+
+/*PAGE
+ * void _CPU_Context_save_fp_context( &fp_context_ptr )
+ * void _CPU_Context_restore_fp_context( &fp_context_ptr )
+ *
+ * There is currently no hardware floating point for the i960.
+ */
+
+ .globl __CPU_Context_save_fp
+ .globl __CPU_Context_restore_fp
+__CPU_Context_save_fp:
+__CPU_Context_restore_fp:
+#if ( I960_HAS_FPU == 1 )
+#error "Floating point support for i960 family has been implemented!!!"
+#endif
+ ret
+
+/*PAGE
+ * void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ *
+ * NOTE:
+ * Upon entry, the supervisor stack will contain a stack frame
+ * back to the interrupted thread and the interrupt stack will contain
+ * an interrupt stack frame. If dispatching is enabled, this
+ * is the outer most interrupt, and (a context switch is necessary or
+ * the current thread has signals), then set up the supervisor stack to
+ * transfer control to the interrupt dispatcher.
+ */
+
+ .globl __ISR_Handler
+__ISR_Handler:
+ #ldconst 1,r8
+ #modpc 0,r8,r8 # enable tracing
+
+ # r4 = &_Thread_Dispatch_disable_level
+ ld __Thread_Dispatch_disable_level,r4
+ movl g0,r8 # save g0-g1
+
+ ld -16+8(fp),g0 # g0 = vector number
+ movl g2,r10 # save g2-g3
+
+ ld __ISR_Nest_level,r5 # r5 = &_Isr_nest_level
+ mov g14,r7 # save g14
+
+ lda 0,g14 # NOT Branch and Link
+ movl g4,r12 # save g4-g5
+
+ lda 1(r4),r4 # increment dispatch disable level
+ movl g6,r14 # save g6-g7
+
+ ld __ISR_Vector_table[g0*4],g1 # g1 = Users handler
+ addo 1,r5,r5 # increment ISR level
+
+ st r4,__Thread_Dispatch_disable_level
+ # one ISR nest level deeper
+ subo 1,r4,r4 # decrement dispatch disable level
+
+ st r5,__ISR_Nest_level # disable multitasking
+ subo 1,r5,r5 # decrement ISR nest level
+
+ callx (g1) # invoke user ISR
+
+ st r4,__Thread_Dispatch_disable_level
+ # unnest multitasking
+ st r5,__ISR_Nest_level # one less ISR nest level
+ cmpobne.f 0,r4,exit # If dispatch disabled, exit
+ ldl -16(fp),g0 # g0 = threads PC reg
+ # g1 = threads AC reg
+ ld __Context_Switch_necessary,r6
+ # r6 = Is thread switch necessary?
+ bbs.f 13,g0,exit # not outer level, then exit
+ cmpobne.f 0,r6,bframe # Switch necessary?
+
+ ld __ISR_Signals_to_thread_executing,g2
+ # signals sent to Run_thread
+ # while in interrupt handler?
+ cmpobe.f 0,g2,exit # No, then exit
+
+bframe: mov 0,g2
+ st g2,__ISR_Signals_to_thread_executing
+
+ ldconst 0x1f0000,g2 # g2 = intr disable mask
+ mov g2,g3 # g3 = new intr level
+ modpc 0,g2,g3 # set new level
+
+ andnot 7,pfp,r4 # r4 = pfp without ret type
+ flushreg # flush registers
+ # push _Isr_dispatch ret frame
+ # build ISF in r4-r6
+ ldconst 64,g2 # g2 = size of stack frame
+ ld 4(r4),g3 # g3 = previous sp
+ addo g2,g3,r5 # r5 = _Isr_dispatch SP
+ lda __ISR_Dispatch,r6 # r6 = _Isr_dispatch entry
+ stt r4,(g3) # set _Isr_dispatch ret info
+ st g1,16(g3) # set r4 = AC for ISR disp
+ or 7,g3,pfp # pfp to _Isr_dispatch
+
+exit: mov r7,g14 # restore g14
+ movq r8,g0 # restore g0-g3
+ movq r12,g4 # restore g4-g7
+ ret
+
+
+/*PAGE
+ *
+ * void __ISR_Dispatch()
+ *
+ * Entry point from the outermost interrupt service routine exit.
+ * The current stack is the supervisor mode stack.
+ */
+
+__ISR_Dispatch:
+ mov g14,r7
+ mov 0,g14
+ movq g0,r8
+ movq g4,r12
+ call __Thread_Dispatch
+
+ ldconst -1,r5 # r5 = reload mask
+ modac r5,r4,r4 # restore threads AC register
+ mov r7,g14
+ movq r8,g0
+ movq r12,g4
+ ret
diff --git a/c/src/exec/score/cpu/i960/i960.h b/c/src/exec/score/cpu/i960/i960.h
new file mode 100644
index 0000000000..fe7e68e95f
--- /dev/null
+++ b/c/src/exec/score/cpu/i960/i960.h
@@ -0,0 +1,289 @@
+/* i960.h
+ *
+ * This include file contains information pertaining to the Intel
+ * i960 processor family.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __i960_h
+#define __i960_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define i960
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This file contains the information required to build
+ * RTEMS for a particular member of the Intel i960
+ * family. It does this by setting variables to indicate
+ * which implementation dependent features are present
+ * in a particular member of the family.
+ *
+ * NOTE: For now i960 is really the i960ca. eventually need
+ * to put in at least support for FPU.
+ */
+
+#if defined(i960ca)
+
+#define RTEMS_MODEL_NAME "i960ca"
+#define I960_HAS_FPU 0
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "Intel i960"
+
+#ifndef ASM
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* 8-bit unsigned integer */
+typedef unsigned short unsigned16; /* 16-bit unsigned integer */
+typedef unsigned int unsigned32; /* 32-bit unsigned integer */
+typedef unsigned long long unsigned64; /* 64-bit unsigned integer */
+
+typedef unsigned32 Priority_Bit_map_control;
+
+typedef char signed8; /* 8-bit signed integer */
+typedef short signed16; /* 16-bit signed integer */
+typedef int signed32; /* 32-bit signed integer */
+typedef long long signed64; /* 64-bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+/*
+ * XXX should have an ifdef here and have stuff for the other
+ * XXX family members...
+ */
+
+#if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA)
+
+/* i960CA control structures */
+
+/* Intel i960CA Control Table */
+
+typedef struct {
+ /* Control Group 0 */
+ unsigned int ipb0; /* IP breakpoint 0 */
+ unsigned int ipb1; /* IP breakpoint 1 */
+ unsigned int dab0; /* data address breakpoint 0 */
+ unsigned int dab1; /* data address breakpoint 1 */
+ /* Control Group 1 */
+ unsigned int imap0; /* interrupt map 0 */
+ unsigned int imap1; /* interrupt map 1 */
+ unsigned int imap2; /* interrupt map 2 */
+ unsigned int icon; /* interrupt control */
+ /* Control Group 2 */
+ unsigned int mcon0; /* memory region 0 configuration */
+ unsigned int mcon1; /* memory region 1 configuration */
+ unsigned int mcon2; /* memory region 2 configuration */
+ unsigned int mcon3; /* memory region 3 configuration */
+ /* Control Group 3 */
+ unsigned int mcon4; /* memory region 4 configuration */
+ unsigned int mcon5; /* memory region 5 configuration */
+ unsigned int mcon6; /* memory region 6 configuration */
+ unsigned int mcon7; /* memory region 7 configuration */
+ /* Control Group 4 */
+ unsigned int mcon8; /* memory region 8 configuration */
+ unsigned int mcon9; /* memory region 9 configuration */
+ unsigned int mcon10; /* memory region 10 configuration */
+ unsigned int mcon11; /* memory region 11 configuration */
+ /* Control Group 5 */
+ unsigned int mcon12; /* memory region 12 configuration */
+ unsigned int mcon13; /* memory region 13 configuration */
+ unsigned int mcon14; /* memory region 14 configuration */
+ unsigned int mcon15; /* memory region 15 configuration */
+ /* Control Group 6 */
+ unsigned int bpcon; /* breakpoint control */
+ unsigned int tc; /* trace control */
+ unsigned int bcon; /* bus configuration control */
+ unsigned int reserved; /* reserved */
+} i960ca_control_table;
+
+/* Intel i960CA Processor Control Block */
+
+typedef struct {
+ unsigned int *fault_tbl; /* fault table base address */
+ i960ca_control_table
+ *control_tbl; /* control table base address */
+ unsigned int initial_ac; /* AC register initial value */
+ unsigned int fault_config; /* fault configuration word */
+ void **intr_tbl; /* interrupt table base address */
+ void *sys_proc_tbl; /* system procedure table
+ base address */
+ unsigned int reserved; /* reserved */
+ unsigned int *intr_stack; /* interrupt stack pointer */
+ unsigned int ins_cache_cfg; /* instruction cache
+ configuration word */
+ unsigned int reg_cache_cfg; /* register cache configuration word */
+} i960ca_PRCB;
+
+#endif
+
+typedef void ( *i960_isr )( void );
+
+#define i960_disable_interrupts( oldlevel ) \
+ { (oldlevel) = 0x1f0000; \
+ asm volatile ( "modpc 0,%1,%1" \
+ : "=d" ((oldlevel)) \
+ : "0" ((oldlevel)) ); \
+ }
+
+#define i960_enable_interrupts( oldlevel ) \
+ { unsigned int _mask = 0x1f0000; \
+ asm volatile ( "modpc 0,%0,%1" \
+ : "=d" (_mask), "=d" ((oldlevel)) \
+ : "0" (_mask), "1" ((oldlevel)) ); \
+ }
+
+#define i960_flash_interrupts( oldlevel ) \
+ { unsigned int _mask = 0x1f0000; \
+ asm volatile ( "modpc 0,%0,%1 ; \
+ mov %0,%1 ; \
+ modpc 0,%0,%1" \
+ : "=d" (_mask), "=d" ((oldlevel)) \
+ : "0" (_mask), "1" ((oldlevel)) ); \
+ }
+
+#define i960_atomic_modify( mask, addr, prev ) \
+ { register unsigned int _mask = (mask); \
+ register unsigned int *_addr = (unsigned int *)(addr); \
+ asm volatile( "atmod %0,%1,%1" \
+ : "=d" (_addr), "=d" (_mask) \
+ : "0" (_addr), "1" (_mask) ); \
+ (prev) = _mask; \
+ }
+
+
+#define atomic_modify( _mask, _address, _previous ) \
+ i960_atomic_modify( _mask, _address, _previous )
+
+#define i960_enable_tracing() \
+ { register unsigned32 _pc = 0x1; \
+ asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \
+ }
+
+#define i960_unmask_intr( xint ) \
+ { register unsigned32 _mask= (1<<(xint)); \
+ asm volatile( "or sf1,%0,sf1" : "=d" (_mask) : "0" (_mask) ); \
+ }
+
+#define i960_mask_intr( xint ) \
+ { register unsigned32 _mask= (1<<(xint)); \
+ asm volatile( "andnot %0,sf1,sf1" : "=d" (_mask) : "0" (_mask) ); \
+ }
+
+#define i960_clear_intr( xint ) \
+ { register unsigned32 _xint=(xint); \
+ asm volatile( "loop_til_cleared:
+ clrbit %0,sf0,sf0 ; \
+ bbs %0,sf0,loop_til_cleared" \
+ : "=d" (_xint) : "0" (_xint) ); \
+ }
+
+#define i960_reload_ctl_group( group ) \
+ { register int _cmd = ((group)|0x400) ; \
+ asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \
+ }
+
+#define i960_cause_intr( intr ) \
+ { register int _intr = (intr); \
+ asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \
+ }
+
+#define i960_soft_reset( prcb ) \
+ { register i960ca_PRCB *_prcb = (prcb); \
+ register unsigned32 *_next=0; \
+ register unsigned32 _cmd = 0x30000; \
+ asm volatile( "lda next,%1; \
+ sysctl %0,%1,%2; \
+ next: mov g0,g0" \
+ : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \
+ : "0" (_cmd), "1" (_next), "2" (_prcb) ); \
+ }
+
+static inline unsigned32 i960_pend_intrs()
+{ register unsigned32 _intr=0;
+ asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) );
+ return ( _intr );
+}
+
+static inline unsigned32 i960_mask_intrs()
+{ register unsigned32 _intr=0;
+ asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) );
+ return( _intr );
+}
+
+static inline unsigned32 i960_get_fp()
+{ register unsigned32 _fp=0;
+ asm volatile( "mov fp,%0" : "=d" (_fp) : "0" (_fp) );
+ return ( _fp );
+}
+
+/*
+ * The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version is based on code presented in Vol. 4, No. 4 of
+ * Insight 960. It is certainly something you wouldn't think
+ * of on your own.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ register unsigned int to_swap = value;
+ register unsigned int temp = 0xFF00FF00;
+ register unsigned int swapped = 0;
+
+ /* to_swap swapped */
+ asm volatile ( "rotate 16,%0,%2 ;" /* 0x12345678 0x56781234 */
+ "modify %1,%0,%2 ;" /* 0x12345678 0x12785634 */
+ "rotate 8,%2,%2" /* 0x12345678 0x78563412 */
+ : "=r" (to_swap), "=r" (temp), "=r" (swapped)
+ : "0" (to_swap), "1" (temp), "2" (swapped)
+ );
+ return( swapped );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/i960/rtems.s b/c/src/exec/score/cpu/i960/rtems.s
new file mode 100644
index 0000000000..8abf47a276
--- /dev/null
+++ b/c/src/exec/score/cpu/i960/rtems.s
@@ -0,0 +1,25 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the i960 implementation of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+ .text
+
+ .align 4
+ .globl RTEMS
+
+RTEMS:
+ ld __Entry_points[g7*4],r4
+ bx (r4)
+
diff --git a/c/src/exec/score/cpu/m68k/asm.h b/c/src/exec/score/cpu/m68k/asm.h
new file mode 100644
index 0000000000..068c58058c
--- /dev/null
+++ b/c/src/exec/score/cpu/m68k/asm.h
@@ -0,0 +1,127 @@
+/* asm.h
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ *
+ *
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ *
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted. This file is:
+ *
+ * COPYRIGHT (c) 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * $Id$
+ */
+
+#ifndef __M68k_ASM_h
+#define __M68k_ASM_h
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#define ASM
+#include <m68k.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+#define d0 REG (d0)
+#define d1 REG (d1)
+#define d2 REG (d2)
+#define d3 REG (d3)
+#define d4 REG (d4)
+#define d5 REG (d5)
+#define d6 REG (d6)
+#define d7 REG (d7)
+#define a0 REG (a0)
+#define a1 REG (a1)
+#define a2 REG (a2)
+#define a3 REG (a3)
+#define a4 REG (a4)
+#define a5 REG (a5)
+#define a6 REG (a6)
+#define a7 REG (a7)
+
+#define msp REG (msp)
+#define usp REG (usp)
+#define isp REG (isp)
+#define sr REG (sr)
+
+#define fp0 REG (fp0)
+#define fp1 REG (fp1)
+#define fp2 REG (fp2)
+#define fp3 REG (fp3)
+#define fp4 REG (fp4)
+#define fp5 REG (fp5)
+#define fp6 REG (fp6)
+#define fp7 REG (fp7)
+
+#define fpc REG (fpc)
+#define fpi REG (fpi)
+#define fps REG (fps)
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+#endif
+/* end of include file */
+
+
diff --git a/c/src/exec/score/cpu/m68k/cpu.c b/c/src/exec/score/cpu/m68k/cpu.c
new file mode 100644
index 0000000000..45484da1f4
--- /dev/null
+++ b/c/src/exec/score/cpu/m68k/cpu.c
@@ -0,0 +1,97 @@
+/*
+ * Motorola MC68020 Dependent Source
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - entry pointer to thread dispatcher
+ *
+ * OUTPUT PARAMETERS: NONE
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ _CPU_Table = *cpu_table;
+
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * new_handler - replacement ISR for this vector number
+ * old_handler - former ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ proc_ptr *interrupt_table = NULL;
+
+ m68k_get_vbr( interrupt_table );
+
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ _ISR_Vector_table[ vector ] = new_handler;
+ interrupt_table[ vector ] = _ISR_Handler;
+}
+
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+#if ( M68K_HAS_SEPARATE_STACKS == 1 )
+ void *isp = _CPU_Interrupt_stack_high;
+
+ asm volatile ( "movec %0,%%isp" : "=r" (isp) : "0" (isp) );
+#else
+#warning "FIX ME... HOW DO I INSTALL THE INTERRUPT STACK!!!"
+#endif
+}
+
diff --git a/c/src/exec/score/cpu/m68k/cpu.h b/c/src/exec/score/cpu/m68k/cpu.h
new file mode 100644
index 0000000000..a1dd27db57
--- /dev/null
+++ b/c/src/exec/score/cpu/m68k/cpu.h
@@ -0,0 +1,412 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the Motorola
+ * m68xxx processor family.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * If defined, this causes some of the macros to initialize their
+ * variables to zero before doing inline assembly. This gets rid
+ * of compile time warnings at the cost of a little execution time
+ * in some time critical routines.
+ */
+
+#define NO_UNINITIALIZED_WARNINGS
+
+#include <m68k.h>
+
+/* conditional compilation parameters */
+
+#define CPU_INLINE_ENABLE_DISPATCH TRUE
+#define CPU_UNROLL_ENQUEUE_PRIORITY FALSE
+
+/*
+ * Use the m68k's hardware interrupt stack support and have the
+ * interrupt manager allocate the memory for it.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Some family members have no FP, some have an FPU such as the
+ * MC68881/MC68882 for the MC68020, others have it built in (MC68030, 040).
+ */
+
+#if ( M68K_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * All tasks are not by default floating point tasks on this CPU.
+ * The IDLE task does not have a floating point context on this CPU.
+ * It is safe to use the deferred floating point context switch
+ * algorithm on this CPU.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+#define CPU_IDLE_TASK_IS_FP FALSE
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+#define CPU_STACK_GROWS_UP FALSE
+#define CPU_STRUCTURE_ALIGNMENT
+
+/* structures */
+
+/*
+ * Basic integer context for the m68k family.
+ */
+
+typedef struct {
+ unsigned32 sr; /* (sr) status register */
+ unsigned32 d2; /* (d2) data register 2 */
+ unsigned32 d3; /* (d3) data register 3 */
+ unsigned32 d4; /* (d4) data register 4 */
+ unsigned32 d5; /* (d5) data register 5 */
+ unsigned32 d6; /* (d6) data register 6 */
+ unsigned32 d7; /* (d7) data register 7 */
+ void *a2; /* (a2) address register 2 */
+ void *a3; /* (a3) address register 3 */
+ void *a4; /* (a4) address register 4 */
+ void *a5; /* (a5) address register 5 */
+ void *a6; /* (a6) address register 6 */
+ void *a7_msp; /* (a7) master stack pointer */
+} Context_Control;
+
+/*
+ * FP context save area for the M68881/M68882 numeric coprocessors.
+ */
+
+typedef struct {
+ unsigned8 fp_save_area[332]; /* 216 bytes for FSAVE/FRESTORE */
+ /* 96 bytes for FMOVEM FP0-7 */
+ /* 12 bytes for FMOVEM CREGS */
+ /* 4 bytes for non-null flag */
+} Context_Control_fp;
+
+/*
+ * The following structure defines the set of information saved
+ * on the current stack by RTEMS upon receipt of each interrupt.
+ */
+
+typedef struct {
+ unsigned32 TBD; /* XXX Fix for this CPU */
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the m68k specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+ m68k_isr *interrupt_vector_table;
+} rtems_cpu_table;
+
+/* variables */
+
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/* constants */
+
+/*
+ * This defines the number of levels and the mask used to pick those
+ * bits out of a thread mode.
+ */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000007 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000007 /* interrupt level in mode */
+
+/*
+ * context size area for floating point
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * extra stack required by system initialization thread
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 1024
+
+/*
+ * m68k family supports 256 distinct vectors.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 256
+
+/*
+ * Minimum size of a thread's stack.
+ *
+ * NOTE: 256 bytes is probably too low in most cases.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE 256
+
+/*
+ * m68k is pretty tolerant of alignment. Just put things on 4 byte boundaries.
+ */
+
+#define CPU_ALIGNMENT 4
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * On m68k thread stacks require no further alignment after allocation
+ * from the Workspace.
+ */
+
+#define CPU_STACK_ALIGNMENT 0
+
+/* macros */
+
+/*
+ * ISR handler macros
+ *
+ * These macros perform the following functions:
+ * + disable all maskable CPU interrupts
+ * + restore previous interrupt level (enable)
+ * + temporarily restore interrupts (flash)
+ * + set a particular level
+ */
+
+#define _CPU_ISR_Disable( _level ) \
+ m68k_disable_interrupts( _level )
+
+#define _CPU_ISR_Enable( _level ) \
+ m68k_enable_interrupts( _level )
+
+#define _CPU_ISR_Flash( _level ) \
+ m68k_flash_interrupts( _level )
+
+#define _CPU_ISR_Set_level( _newlevel ) \
+ m68k_set_interrupt_level( _newlevel )
+
+/* end of ISR handler macros */
+
+/*
+ * Context handler macros
+ *
+ * These macros perform the following functions:
+ * + initialize a context area
+ * + restart the current thread
+ * + calculate the initial pointer into a FP context area
+ * + initialize an FP context area
+ */
+
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry_point ) \
+ do { \
+ void *_stack; \
+ \
+ (_the_context)->sr = 0x3000 | ((_isr) << 8); \
+ _stack = (void *)(_stack_base) + (_size) - 4; \
+ (_the_context)->a7_msp = _stack; \
+ *(void **)_stack = (_entry_point); \
+ } while ( 0 )
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ { asm volatile( "movew %0,%%sr ; " \
+ "moval %1,%%a7 ; " \
+ "rts" \
+ : "=d" ((_the_context)->sr), "=d" ((_the_context)->a7_msp) \
+ : "0" ((_the_context)->sr), "1" ((_the_context)->a7_msp) ); \
+ }
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ((void *) \
+ _Addresses_Add_offset( \
+ (_base), \
+ (_offset) + CPU_CONTEXT_FP_SIZE - 4 \
+ ) \
+ )
+
+#define _CPU_Context_Initialize_fp( _fp_area ) \
+ { unsigned32 *_fp_context = (unsigned32 *)*(_fp_area); \
+ \
+ *(--(_fp_context)) = 0; \
+ *(_fp_area) = (unsigned8 *)(_fp_context); \
+ }
+
+/* end of Context handler macros */
+
+/*
+ * Fatal Error manager macros
+ *
+ * These macros perform the following functions:
+ * + disable interrupts and halt the CPU
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ { asm volatile( "movl %0,%%d0; " \
+ "orw #0x0700,%%sr; " \
+ "stop #0x2700" : "=d" ((_error)) : "0" ((_error)) ); \
+ }
+
+/* end of Fatal Error manager macros */
+
+/*
+ * Bitfield handler macros
+ *
+ * These macros perform the following functions:
+ * + scan for the highest numbered (MSB) set in a 16 bit bitfield
+ *
+ * NOTE:
+ *
+ * It appears that on the M68020 bitfield are always 32 bits wide
+ * when in a register. This code forces the bitfield to be in
+ * memory (it really always is anyway). This allows us to
+ * have a real 16 bit wide bitfield which operates "correctly."
+ */
+
+#if ( M68K_HAS_BFFFO == 1 )
+#ifdef NO_UNINITIALIZED_WARNINGS
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ register void *__base = (void *)&(_value); \
+ \
+ (_output) = 0; /* avoids warnings */ \
+ asm volatile( "bfffo (%0),#0,#16,%1" \
+ : "=a" (__base), "=d" ((_output)) \
+ : "0" (__base), "1" ((_output)) ) ; \
+ }
+#else
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ register void *__base = (void *)&(_value); \
+ \
+ asm volatile( "bfffo (%0),#0,#16,%1" \
+ : "=a" (__base), "=d" ((_output)) \
+ : "0" (__base), "1" ((_output)) ) ; \
+ }
+#endif
+
+#else
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ (_output) = 0 /* avoids warnings */
+
+#warning "FIX ME... NEEDS A SOFTWARE BFFFO IMPLEMENTATION"
+#warning "SEE no_cpu/cpu.h FOR POSSIBLE ALGORITHMS"
+
+#endif
+
+/* end of Bitfield handler macros */
+
+/*
+ * Priority handler macros
+ *
+ * These macros perform the following functions:
+ * + return a mask with the bit for this major/minor portion of
+ * of thread priority set.
+ * + translate the bit number returned by "Bitfield_find_first_bit"
+ * into an index into the thread ready chain bit maps
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x8000 >> (_bit_number) )
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/m68k/cpu_asm.s b/c/src/exec/score/cpu/m68k/cpu_asm.s
new file mode 100644
index 0000000000..d8615627a0
--- /dev/null
+++ b/c/src/exec/score/cpu/m68k/cpu_asm.s
@@ -0,0 +1,202 @@
+/* cpu_asm.s
+ *
+ * This file contains all assembly code for the MC68020 implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+
+#include <asm.h>
+
+ .text
+
+/* void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ .align 4
+ .global SYM (_CPU_Context_switch)
+
+.set RUNCONTEXT_ARG, 4 | save context argument
+.set HEIRCONTEXT_ARG, 8 | restore context argument
+
+SYM (_CPU_Context_switch):
+ moval a7@(RUNCONTEXT_ARG),a0| a0 = running thread context
+ movw sr,d1 | d1 = status register
+ movml d1-d7/a2-a7,a0@ | save context
+
+ moval a7@(HEIRCONTEXT_ARG),a0| a0 = heir thread context
+restore: movml a0@,d1-d7/a2-a7 | restore context
+ movw d1,sr | restore status register
+ rts
+
+/*PAGE
+ * void __CPU_Context_save_fp_context( &fp_context_ptr )
+ * void __CPU_Context_restore_fp_context( &fp_context_ptr )
+ *
+ * These routines are used to context switch a MC68881 or MC68882.
+ *
+ * NOTE: Context save and restore code is based upon the code shown
+ * on page 6-38 of the MC68881/68882 Users Manual (rev 1).
+ *
+ * CPU_FP_CONTEXT_SIZE is higher than expected to account for the
+ * -1 pushed at end of this sequence.
+ */
+
+.set FPCONTEXT_ARG, 4 | save FP context argument
+
+ .align 4
+ .global SYM (_CPU_Context_save_fp)
+SYM (_CPU_Context_save_fp):
+#if ( M68K_HAS_FPU == 1 )
+ moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area
+ moval a1@,a0 | a0 = Save context area
+ fsave a0@- | save 68881/68882 state frame
+ tstb a0@ | check for a null frame
+ beq nosv | Yes, skip save of user model
+ fmovem fp0-fp7,a0@- | save data registers (fp0-fp7)
+ fmovem fpc/fps/fpi,a0@- | and save control registers
+ movl #-1,a0@- | place not-null flag on stack
+nosv: movl a0,a1@ | save pointer to saved context
+#endif
+ rts
+
+ .align 4
+ .global SYM (_CPU_Context_restore_fp)
+SYM (_CPU_Context_restore_fp):
+#if ( M68K_HAS_FPU == 1 )
+ moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area
+ moval a1@,a0 | a0 = address of saved context
+ tstb a0@ | Null context frame?
+ beq norst | Yes, skip fp restore
+ addql #4,a0 | throwaway non-null flag
+ fmovem a0@+,fpc/fps/fpi | restore control registers
+ fmovem a0@+,fp0-fp7 | restore data regs (fp0-fp7)
+norst: frestore a0@+ | restore the fp state frame
+ movl a0,a1@ | save pointer to saved context
+#endif
+ rts
+
+/*PAGE
+ * void _ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * NOTE:
+ * Upon entry, the master stack will contain an interrupt stack frame
+ * back to the interrupted thread and the interrupt stack will contain
+ * a throwaway interrupt stack frame. If dispatching is enabled, this
+ * is the outer most interrupt, and (a context switch is necessary or
+ * the current thread has signals), then set up the master stack to
+ * transfer control to the interrupt dispatcher.
+ */
+
+.set SR_OFFSET, 0 | Status register offset
+.set PC_OFFSET, 2 | Program Counter offset
+.set FVO_OFFSET, 6 | Format/vector offset
+
+.set SAVED, 16 | space for saved registers
+
+ .align 4
+ .global SYM (_ISR_Handler)
+
+SYM (_ISR_Handler):
+ moveml d0-d1/a0-a1,a7@- | save d0-d1,a0-a1
+ addql #1,SYM (_ISR_Nest_level) | one nest level deeper
+ addql #1,SYM (_Thread_Dispatch_disable_level)
+ | disable multitasking
+ movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO
+ andl #0x0fff,d0 | d0 = vector offset in vbr
+
+#if ( M68K_HAS_PREINDEXING == 1 )
+ movel @( SYM (_ISR_Vector_table),d0:w:1),a0| fetch the ISR
+#else
+ movel # SYM (_ISR_Vector_table),a0 | a0 = base of RTEMS table
+ addal d0,a0 | a0 = address of vector
+ movel @(a0),a0 | a0 = address of user routine
+#warning "UNTESTED CODE!!!"
+#endif
+
+ lsrl #2,d0 | d0 = vector number
+ movel d0,a7@- | push vector number
+ jbsr a0@ | invoke the user ISR
+ addql #4,a7 | remove vector number
+
+ subql #1,SYM (_ISR_Nest_level) | one less nest level
+ subql #1,SYM (_Thread_Dispatch_disable_level)
+ | unnest multitasking
+ bne exit | If dispatch disabled, exit
+
+ movew #0xf000,d0 | isolate format nibble
+ andw a7@(SAVED+FVO_OFFSET),d0 | get F/VO
+ cmpiw #0x1000,d0 | is it a throwaway isf?
+ bne exit | NOT outer level, so branch
+
+ tstl SYM (_Context_Switch_necessary)
+ | Is thread switch necessary?
+ bne bframe | Yes, invoke dispatcher
+
+ tstl SYM (_ISR_Signals_to_thread_executing)
+ | signals sent to Run_thread
+ | while in interrupt handler?
+ beq exit | No, then exit
+
+
+bframe: clrl SYM (_ISR_Signals_to_thread_executing)
+ | If sent, will be processed
+#if ( M68K_HAS_SEPARATE_STACKS == 1 )
+ movec msp,a0 | a0 = master stack pointer
+ movew #0,a0@- | push format word
+ movel # SYM (_ISR_Dispatch),a0@- | push return addr
+ movew a0@(6+SR_OFFSET),a0@- | push thread sr
+ movec a0,msp | set master stack pointer
+#else
+#warning "FIX ME ... HOW DO I DISPATCH FROM AN INTERRUPT?"
+/* probably will simply need to push the _ISR_Dispatch frame */
+#endif
+
+exit: moveml a7@+,d0-d1/a0-a1 | restore d0-d1,a0-a1
+ rte | return to thread
+ | OR _Isr_dispatch
+
+/*PAGE
+ * void _ISR_Dispatch()
+ *
+ * Entry point from the outermost interrupt service routine exit.
+ * The current stack is the supervisor mode stack if this processor
+ * has separate stacks.
+ *
+ * 1. save all registers not preserved across C calls.
+ * 2. invoke the _Thread_Dispatch routine to switch tasks
+ * or a signal to the currently executing task.
+ * 3. restore all registers not preserved across C calls.
+ * 4. return from interrupt
+ */
+
+ .global SYM (_ISR_Dispatch)
+SYM (_ISR_Dispatch):
+ movml d0-d1/a0-a1,a7@-
+ jsr SYM (_Thread_Dispatch)
+ movml a7@+,d0-d1/a0-a1
+ rte
+
+
+
+
+
+
+
+
+
+
+
diff --git a/c/src/exec/score/cpu/m68k/m68k.h b/c/src/exec/score/cpu/m68k/m68k.h
new file mode 100644
index 0000000000..3a62b7553b
--- /dev/null
+++ b/c/src/exec/score/cpu/m68k/m68k.h
@@ -0,0 +1,282 @@
+/* m68k.h
+ *
+ * This include file contains information pertaining to the Motorola
+ * m68xxx processor family.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __M68k_h
+#define __M68k_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define m68k
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This section contains the information required to build
+ * RTEMS for a particular member of the Motorola MC68xxx
+ * family. It does this by setting variables to indicate
+ * which implementation dependent features are present in
+ * a particular member of the family.
+ *
+ * Currently recognized:
+ * m68000 (no FP)
+ * m68020 (implies FP)
+ * m68020_nofp (no FP)
+ * m68030 (implies FP)
+ * m68040 (implies FP)
+ * m68lc040 (no FP)
+ * m68ec040 (no FP)
+ *
+ * Primary difference (for RTEMS) between m68040, m680lc040, and
+ * m68ec040 is the presence or abscense of the FPU.
+ *
+ * Here is some information on the 040 variants (courtesy of Doug McBride,
+ * mcbride@rodin.colorado.edu):
+ *
+ * "The 68040 is a superset of the 68EC040 and the 68LC040. The
+ * 68EC040 and 68LC040 do not have FPU's. The 68LC040 and the
+ * 68EC040 have renamed the DLE pin as JS0 which must be tied to
+ * Gnd or Vcc. The 68EC040 has renamed the MDIS pin as JS1. The
+ * 68EC040 has access control units instead of memory management units.
+ * The 68EC040 should not have the PFLUSH or PTEST instructions executed
+ * (cause an indeterminate result). The 68EC040 and 68LC040 do not
+ * implement the DLE or multiplexed bus modes. The 68EC040 does not
+ * implement the output buffer impedance selection mode of operation."
+ */
+
+#if defined(m68000)
+
+#define RTEMS_MODEL_NAME "m68000"
+#define M68K_HAS_VBR 0
+#define M68K_HAS_SEPARATE_STACKS 0
+#define M68K_HAS_FPU 0
+#define M68K_HAS_BFFFO 0
+#define M68K_HAS_PREINDEXING 0
+
+#elif defined(m68020)
+
+#define RTEMS_MODEL_NAME "m68020"
+#define M68K_HAS_VBR 1
+#define M68K_HAS_SEPARATE_STACKS 1
+#define M68K_HAS_FPU 1
+#define M68K_HAS_BFFFO 1
+#define M68K_HAS_PREINDEXING 1
+
+#elif defined(m68020_nofp)
+
+#define RTEMS_MODEL_NAME "m68020 w/o fp"
+#define M68K_HAS_VBR 1
+#define M68K_HAS_SEPARATE_STACKS 1
+#define M68K_HAS_FPU 0
+#define M68K_HAS_BFFFO 1
+#define M68K_HAS_PREINDEXING 1
+
+#elif defined(m68030)
+
+#define RTEMS_MODEL_NAME "m68030"
+#define M68K_HAS_VBR 1
+#define M68K_HAS_SEPARATE_STACKS 1
+#define M68K_HAS_FPU 1
+#define M68K_HAS_BFFFO 1
+#define M68K_HAS_PREINDEXING 1
+
+#elif defined(m68040)
+
+#define RTEMS_MODEL_NAME "m68040"
+#define M68K_HAS_VBR 1
+#define M68K_HAS_SEPARATE_STACKS 1
+#define M68K_HAS_FPU 1
+#define M68K_HAS_BFFFO 1
+#define M68K_HAS_PREINDEXING 1
+
+#elif defined(m68lc040)
+
+#define RTEMS_MODEL_NAME "m68lc040"
+#define M68K_HAS_VBR 1
+#define M68K_HAS_SEPARATE_STACKS 1
+#define M68K_HAS_FPU 0
+#define M68K_HAS_BFFFO 1
+#define M68K_HAS_PREINDEXING 1
+
+#elif defined(m68ec040)
+
+#define RTEMS_MODEL_NAME "m68ec040"
+#define M68K_HAS_VBR 1
+#define M68K_HAS_SEPARATE_STACKS 1
+#define M68K_HAS_FPU 0
+#define M68K_HAS_BFFFO 1
+#define M68K_HAS_PREINDEXING 1
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * If defined, this causes some of the macros to initialize their
+ * variables to zero before doing inline assembly. This gets rid
+ * of compile time warnings at the cost of a little execution time
+ * in some time critical routines.
+ */
+
+#define NO_UNINITIALIZED_WARNINGS
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "Motorola MC68xxx"
+
+#ifndef ASM
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* unsigned 8-bit integer */
+typedef unsigned short unsigned16; /* unsigned 16-bit integer */
+typedef unsigned int unsigned32; /* unsigned 32-bit integer */
+typedef unsigned long long unsigned64; /* unsigned 64-bit integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef char signed8; /* signed 8-bit integer */
+typedef short signed16; /* signed 16-bit integer */
+typedef int signed32; /* signed 32-bit integer */
+typedef long long signed64; /* signed 64-bit integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+/*
+ *
+ */
+
+typedef void ( *m68k_isr )( void );
+
+#ifdef NO_UNINITIALIZED_WARNINGS
+#define m68k_disable_interrupts( _level ) \
+ { \
+ (_level) = 0; /* avoids warnings */ \
+ asm volatile ( "movew %%sr,%0 ; \
+ orw #0x0700,%%sr" \
+ : "=d" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+#else
+#define m68k_disable_interrupts( _level ) \
+ { \
+ asm volatile ( "movew %%sr,%0 ; \
+ orw #0x0700,%%sr" \
+ : "=d" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+#endif
+
+#define m68k_enable_interrupts( _level ) \
+ { \
+ asm volatile ( "movew %0,%%sr " \
+ : "=d" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+
+#define m68k_flash_interrupts( _level ) \
+ { \
+ asm volatile ( "movew %0,%%sr ; \
+ orw #0x0700,%%sr" \
+ : "=d" ((_level)) : "0" ((_level)) \
+ ); \
+ }
+
+#define m68k_set_interrupt_level( _newlevel ) \
+ { \
+ register unsigned32 _tmpsr = 0; \
+ \
+ asm volatile( "movw %%sr,%0" \
+ : "=d" (_tmpsr) : "0" (_tmpsr) \
+ ); \
+ \
+ _tmpsr = (_tmpsr & 0xf8ff) | ((_newlevel) << 8); \
+ \
+ asm volatile( "movw %0,%%sr" \
+ : "=d" (_tmpsr) : "0" (_tmpsr) \
+ ); \
+ }
+
+#if ( M68K_HAS_VBR == 1 )
+#define m68k_get_vbr( vbr ) \
+ { (vbr) = 0; \
+ asm volatile ( "movec %%vbr,%0 " \
+ : "=r" (vbr) : "0" (vbr) ); \
+ }
+
+#define m68k_set_vbr( vbr ) \
+ { register m68k_isr *_vbr= (m68k_isr *)(vbr); \
+ asm volatile ( "movec %0,%%vbr " \
+ : "=a" (_vbr) : "0" (_vbr) ); \
+ }
+#else
+#define m68k_get_vbr( _vbr ) _vbr = 0
+#define m68k_set_vbr( _vbr )
+#endif
+
+/*
+ * The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ */
+
+static inline unsigned int m68k_swap_u32(
+ unsigned int value
+)
+{
+ unsigned int swapped = value;
+
+ asm volatile( "rorw #8,%0" : "=d" (swapped) : "0" (swapped) );
+ asm volatile( "swap %0" : "=d" (swapped) : "0" (swapped) );
+ asm volatile( "rorw #8,%0" : "=d" (swapped) : "0" (swapped) );
+
+ return( swapped );
+}
+
+/* XXX this is only valid for some m68k family members and should be fixed */
+
+#define m68k_enable_caching() \
+ { register unsigned32 _ctl=0x01; \
+ asm volatile ( "movec %0,%%cacr" \
+ : "=d" (_ctl) : "0" (_ctl) ); \
+ }
+
+#define CPU_swap_u32( value ) m68k_swap_u32( value )
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif
+/* end of include file */
diff --git a/c/src/exec/score/cpu/m68k/rtems.s b/c/src/exec/score/cpu/m68k/rtems.s
new file mode 100644
index 0000000000..faae97e487
--- /dev/null
+++ b/c/src/exec/score/cpu/m68k/rtems.s
@@ -0,0 +1,46 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the m68k implementation of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+
+#include <asm.h>
+
+/*
+ * There seems to be no reason to have two versions of this.
+ * The following version should work across the entire family.
+ * The worst assumption is that gcc will put entry in a scratch
+ * register and not screw up the stack.
+ *
+ * NOTE: This is a 68020 version:
+ *
+ * jmpl @(%%d0:l:4)@(__Entry_points)
+ */
+
+ EXTERN (_Entry_points)
+
+ BEGIN_CODE
+
+ .align 4
+ .global SYM (RTEMS)
+
+SYM (RTEMS):
+ moveal SYM (_Entry_points), a0
+ lsll #2, d0
+ addal d0, a0
+ moveal @(a0),a0
+ jmpl @(a0)
+
+ END_CODE
+END
diff --git a/c/src/exec/score/cpu/no_cpu/asm.h b/c/src/exec/score/cpu/no_cpu/asm.h
new file mode 100644
index 0000000000..69b1f0f825
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/asm.h
@@ -0,0 +1,98 @@
+/* asm.h
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ *
+ *
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ *
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted. This file is:
+ *
+ * COPYRIGHT (c) 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * $Id$
+ */
+
+#ifndef __NO_CPU_ASM_h
+#define __NO_CPU_ASM_h
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#define ASM
+#include <no_cpu.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/*
+ * define macros for all of the registers on this CPU
+ *
+ * EXAMPLE: #define d0 REG (d0)
+ */
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+#endif
+/* end of include file */
+
+
diff --git a/c/src/exec/score/cpu/no_cpu/cpu.c b/c/src/exec/score/cpu/no_cpu/cpu.c
new file mode 100644
index 0000000000..f09d935c2d
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/cpu.c
@@ -0,0 +1,132 @@
+/*
+ * XXX CPU Dependent Source
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+#include <rtems/wkspace.h>
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ /*
+ * The thread_dispatch argument is the address of the entry point
+ * for the routine called at the end of an ISR once it has been
+ * decided a context switch is necessary. On some compilation
+ * systems it is difficult to call a high-level language routine
+ * from assembly. This allows us to trick these systems.
+ *
+ * If you encounter this problem save the entry point in a CPU
+ * dependent variable.
+ */
+
+ _CPU_Thread_dispatch_pointer = thread_dispatch;
+
+ /*
+ * XXX; If there is not an easy way to initialize the FP context
+ * during Context_Initialize, then it is usually easier to
+ * save an "uninitialized" FP context here and copy it to
+ * the task's during Context_Initialize.
+ */
+
+ /* XXX: FP context initialization support */
+
+ _CPU_Table = *cpu_table;
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+/*PAGE
+ *
+ * _CPU_Internal_threads_Idle_thread_body
+ *
+ * NOTES:
+ *
+ * 1. This is the same as the regular CPU independent algorithm.
+ *
+ * 2. If you implement this using a "halt", "idle", or "shutdown"
+ * instruction, then don't forget to put it in an infinite loop.
+ *
+ * 3. Be warned. Some processors with onboard DMA have been known
+ * to stop the DMA if the CPU were put in IDLE mode. This might
+ * also be a problem with other on-chip peripherals. So use this
+ * hook with caution.
+ */
+
+void _CPU_Internal_threads_Idle_thread_body( void )
+{
+
+ for( ; ; )
+ /* insert your "halt" instruction here */ ;
+}
diff --git a/c/src/exec/score/cpu/no_cpu/cpu.h b/c/src/exec/score/cpu/no_cpu/cpu.h
new file mode 100644
index 0000000000..cf38b64a4d
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/cpu.h
@@ -0,0 +1,818 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the XXX
+ * processor.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <no_cpu.h> /* pick up machine definitions */
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "NO_CPU_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#if ( NO_CPU_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP TRUE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
+ * must be provided and is the default IDLE thread body instead of
+ * _Internal_threads_Idle_thread_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP TRUE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ */
+
+#define CPU_MODES_INTERRUPT_MASK 0x00000001
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+typedef struct {
+ unsigned32 some_integer_register;
+ unsigned32 some_system_register;
+} Context_Control;
+
+typedef struct {
+ double some_float_register;
+} Context_Control_fp;
+
+typedef struct {
+ unsigned32 special_interrupt_register;
+} CPU_Interrupt_frame;
+
+
+/*
+ * The following table contains the information required to configure
+ * the XXX processor specific parameters.
+ *
+ * NOTE: The interrupt_stack_size field is required if
+ * CPU_ALLOCATE_INTERRUPT_STACK is defined as TRUE.
+ *
+ * The pretasking_hook, predriver_hook, and postdriver_hook,
+ * and the do_zero_of_workspace fields are required on ALL CPUs.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+ unsigned32 some_other_cpu_dependent_info;
+} rtems_cpu_table;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+EXTERN Context_Control_fp _CPU_Null_fp_context;
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+EXTERN void (*_CPU_Thread_dispatch_pointer)();
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+/* XXX: if needed, put more variables here */
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * system initialization thread. Remember that in a multiprocessor
+ * system the system intialization thread becomes the MP server thread.
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 32
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*4)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT 0
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _level.
+ */
+
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ { \
+ (_isr_cookie) = 0; /* do something to prevent warnings */ \
+ }
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of an RTEMS critical section. The parameter
+ * _level is not modified.
+ */
+
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ }
+
+/*
+ * This temporarily restores the interrupt to _level before immediately
+ * disabling them again. This is used to divide long RTEMS critical
+ * sections into two or more parts. The parameter _level is not
+ * modified.
+ */
+
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { \
+ }
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+#define _CPU_ISR_Set_level( new_level ) \
+ { \
+ }
+
+/* end of ISR handler macros */
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ */
+
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _isr, _entry_point ) \
+ { \
+ }
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) (_base) + (_offset) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ { \
+ }
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ (_output) = 0; /* do something to prevent warnings */ \
+ }
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 1 << (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Internal_threads_Idle_thread_body
+ *
+ * This routine is the CPU dependent IDLE thread body.
+ *
+ * NOTE: It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
+ * is TRUE.
+ */
+
+void _CPU_Internal_threads_Idle_thread_body( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 byte1, byte2, byte3, byte4, swapped;
+
+ byte4 = (value >> 24) & 0xff;
+ byte3 = (value >> 16) & 0xff;
+ byte2 = (value >> 8) & 0xff;
+ byte1 = value & 0xff;
+
+ swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+ return( swapped );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/exec/score/cpu/no_cpu/cpu_asm.c b/c/src/exec/score/cpu/no_cpu/cpu_asm.c
new file mode 100644
index 0000000000..26246a93c2
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/cpu_asm.c
@@ -0,0 +1,152 @@
+/* cpu_asm.c ===> cpu_asm.S or cpu_asm.s
+ *
+ * This file contains the basic algorithms for all assembly code used
+ * in an specific CPU port of RTEMS. These algorithms must be implemented
+ * in assembly language
+ *
+ * NOTE: This is supposed to be a .S or .s file NOT a C file.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+/*
+ * This is supposed to be an assembly file. This means that system.h
+ * and cpu.h should not be included in a "real" cpu_asm file. An
+ * implementation in assembly should include "cpu_asm.h>
+ */
+
+#include <rtems/system.h>
+#include <rtems/cpu.h>
+/* #include "cpu_asm.h> */
+
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+)
+{
+}
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+)
+{
+}
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+)
+{
+}
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+)
+{
+}
+
+/* void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ */
+
+void _ISR_Handler()
+{
+ /*
+ * This discussion ignores a lot of the ugly details in a real
+ * implementation such as saving enough registers/state to be
+ * able to do something real. Keep in mind that the goal is
+ * to invoke a user's ISR handler which is written in C and
+ * uses a certain set of registers.
+ *
+ * Also note that the exact order is to a large extent flexible.
+ * Hardware will dictate a sequence for a certain subset of
+ * _ISR_Handler while requirements for setting
+ */
+
+ /*
+ * At entry to "common" _ISR_Handler, the vector number must be
+ * available. On some CPUs the hardware puts either the vector
+ * number or the offset into the vector table for this ISR in a
+ * known place. If the hardware does not give us this information,
+ * then the assembly portion of RTEMS for this port will contain
+ * a set of distinct interrupt entry points which somehow place
+ * the vector number in a known place (which is safe if another
+ * interrupt nests this one) and branches to _ISR_Handler.
+ *
+ * save some or all context on stack
+ * may need to save some special interrupt information for exit
+ *
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ *
+ * _ISR_Nest_level++;
+ *
+ * _Thread_Dispatch_disable_level++;
+ *
+ * (*_ISR_Vector_table[ vector ])( vector );
+ *
+ * if ( --__ISR_Nest_level == 0 ) {
+ * if ( _Context_Switch_necessary || _ISR_Signals_to_thread_executing )
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ * }
+ *
+ * prepare to get out of interrupt
+ * return from interrupt
+ *
+ */
+}
+
diff --git a/c/src/exec/score/cpu/no_cpu/cpu_asm.h b/c/src/exec/score/cpu/no_cpu/cpu_asm.h
new file mode 100644
index 0000000000..0f4154a453
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/cpu_asm.h
@@ -0,0 +1,70 @@
+/*
+ * cpu_asm.h
+ *
+ * Very loose template for an include file for the cpu_asm.? file
+ * if it is implemented as a ".S" file (preprocessed by cpp) instead
+ * of a ".s" file (preprocessed by gm4 or gasp).
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef __CPU_ASM_h
+#define __CPU_ASM_h
+
+/* pull in the generated offsets */
+
+#include <offsets.h>
+
+/*
+ * Hardware General Registers
+ */
+
+/* put something here */
+
+/*
+ * Hardware Floating Point Registers
+ */
+
+/* put something here */
+
+/*
+ * Hardware Control Registers
+ */
+
+/* put something here */
+
+/*
+ * Calling Convention
+ */
+
+/* put something here */
+
+/*
+ * Temporary registers
+ */
+
+/* put something here */
+
+/*
+ * Floating Point Registers - SW Conventions
+ */
+
+/* put something here */
+
+/*
+ * Temporary floating point registers
+ */
+
+/* put something here */
+
+#endif
+
+/* end of file */
diff --git a/c/src/exec/score/cpu/no_cpu/no_cpu.h b/c/src/exec/score/cpu/no_cpu/no_cpu.h
new file mode 100644
index 0000000000..ec973dadcf
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/no_cpu.h
@@ -0,0 +1,86 @@
+/*
+ *
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef _INCLUDE_NO_CPU_h
+#define _INCLUDE_NO_CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define no_cpu
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+
+/*
+ * This file contains the information required to build
+ * RTEMS for a particular member of the "no cpu"
+ * family when executing in protected mode. It does
+ * this by setting variables to indicate which implementation
+ * dependent features are present in a particular member
+ * of the family.
+ */
+
+#if defined(no_cpu)
+
+#define RTEMS_MODEL_NAME "no_cpu"
+#define NOCPU_HAS_FPU 1
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "NO CPU"
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* 8-bit unsigned integer */
+typedef unsigned short unsigned16; /* 16-bit unsigned integer */
+typedef unsigned int unsigned32; /* 32-bit unsigned integer */
+typedef unsigned long long unsigned64; /* 64-bit unsigned integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef char signed8; /* 8-bit signed integer */
+typedef short signed16; /* 16-bit signed integer */
+typedef int signed32; /* 32-bit signed integer */
+typedef long long signed64; /* 64-bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+typedef void ( *no_cpu_isr_entry )( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! _INCLUDE_NO_CPU_h */
+/* end of include file */
diff --git a/c/src/exec/score/cpu/no_cpu/rtems.c b/c/src/exec/score/cpu/no_cpu/rtems.c
new file mode 100644
index 0000000000..5415ae9852
--- /dev/null
+++ b/c/src/exec/score/cpu/no_cpu/rtems.c
@@ -0,0 +1,45 @@
+/* rtems.c ===> rtems.S or rtems.s
+ *
+ * This file contains the single entry point code for
+ * the XXX implementation of RTEMS.
+ *
+ * NOTE: This is supposed to be a .S or .s file NOT a C file.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+/*
+ * This is supposed to be an assembly file. This means that system.h
+ * and cpu.h should not be included in a "real" rtems file.
+ */
+
+#include <rtems/system.h>
+#include <rtems/cpu.h>
+/* #include "asm.h> */
+
+/*
+ * RTEMS
+ *
+ * This routine jumps to the directive indicated in the
+ * CPU defined register. This routine is used when RTEMS is
+ * linked by itself and placed in ROM. This routine is the
+ * first address in the ROM space for RTEMS. The user "calls"
+ * this address with the directive arguments in the normal place.
+ * This routine then jumps indirectly to the correct directive
+ * preserving the arguments. The directive should not realize
+ * it has been "wrapped" in this way. The table "_Entry_points"
+ * is used to look up the directive.
+ */
+
+void RTEMS()
+{
+}
+
diff --git a/c/src/exec/score/cpu/unix/cpu.c b/c/src/exec/score/cpu/unix/cpu.c
new file mode 100644
index 0000000000..ed94953d58
--- /dev/null
+++ b/c/src/exec/score/cpu/unix/cpu.c
@@ -0,0 +1,529 @@
+/*
+ * HP PA-RISC CPU Dependent Source
+ *
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+#include <rtems/wkspace.h>
+/*
+ * In order to get the types and prototypes used in this file under
+ * Solaris 2.3, it is necessary to pull the following magic.
+ */
+
+#if defined(solaris)
+#warning "Ignore the undefining __STDC__ warning"
+#undef __STDC__
+#define __STDC__ 0
+#undef _POSIX_C_SOURCE
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <signal.h>
+#include <time.h>
+
+extern void set_vector(proc_ptr, int, int);
+extern void _Thread_Dispatch(void);
+
+extern unsigned32 _Thread_Dispatch_disable_level;
+extern unsigned32 _SYSTEM_ID;
+extern boolean _Context_Switch_necessary;
+
+
+rtems_status_code signal_initialize(void);
+void Stray_signal(int);
+void signal_enable(unsigned32);
+void signal_disable(unsigned32);
+void interrupt_handler();
+
+sigset_t UNIX_SIGNAL_MASK;
+jmp_buf default_context;
+
+/*
+ * Which cpu are we? Used by libcpu and libbsp.
+ */
+
+int cpu_number;
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ unsigned32 i;
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ /*
+ * The thread_dispatch argument is the address of the entry point
+ * for the routine called at the end of an ISR once it has been
+ * decided a context switch is necessary. On some compilation
+ * systems it is difficult to call a high-level language routine
+ * from assembly. This allows us to trick these systems.
+ *
+ * If you encounter this problem save the entry point in a CPU
+ * dependent variable.
+ */
+
+ _CPU_Thread_dispatch_pointer = thread_dispatch;
+
+ /*
+ * XXX; If there is not an easy way to initialize the FP context
+ * during Context_Initialize, then it is usually easier to
+ * save an "uninitialized" FP context here and copy it to
+ * the task's during Context_Initialize.
+ */
+
+ /* XXX: FP context initialization support */
+
+ _CPU_Table = *cpu_table;
+
+#if defined(hppa1_1) && defined(RTEMS_UNIXLIB)
+ /*
+ * HACK - set the _SYSTEM_ID to 0x20c so that setjmp/longjmp
+ * will handle the full 32 floating point registers.
+ *
+ * NOTE: Is this a bug in HPUX9?
+ */
+
+ _SYSTEM_ID = 0x20c;
+#endif
+
+ /*
+ * get default values to use in _CPU_Context_Initialize()
+ */
+
+ setjmp(default_context);
+
+ /*
+ * Block all the signals except SIGTRAP for the debugger
+ * and SIGABRT for fatal errors.
+ */
+
+ _CPU_ISR_Set_signal_level(1);
+
+ sigfillset(&UNIX_SIGNAL_MASK);
+ sigdelset(&UNIX_SIGNAL_MASK, SIGTRAP);
+ sigdelset(&UNIX_SIGNAL_MASK, SIGABRT);
+ sigdelset(&UNIX_SIGNAL_MASK, SIGIOT);
+ sigdelset(&UNIX_SIGNAL_MASK, SIGCONT);
+
+ sigprocmask(SIG_BLOCK, &UNIX_SIGNAL_MASK, 0);
+
+ /*
+ * Set the handler for all signals to be signal_handler
+ * which will then vector out to the correct handler
+ * for whichever signal actually happened. Initially
+ * set the vectors to the stray signal handler.
+ */
+
+ for (i = 0; i < 32; i++)
+ (void)set_vector(Stray_signal, i, 1);
+
+ signal_initialize();
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+/*PAGE
+ *
+ * _CPU_Internal_threads_Idle_thread_body
+ *
+ * NOTES:
+ *
+ * 1. This is the same as the regular CPU independent algorithm.
+ *
+ * 2. If you implement this using a "halt", "idle", or "shutdown"
+ * instruction, then don't forget to put it in an infinite loop.
+ *
+ * 3. Be warned. Some processors with onboard DMA have been known
+ * to stop the DMA if the CPU were put in IDLE mode. This might
+ * also be a problem with other on-chip peripherals. So use this
+ * hook with caution.
+ */
+
+void _CPU_Internal_threads_Idle_thread_body( void )
+{
+ while (1)
+ pause();
+}
+
+void _CPU_Context_Initialize(
+ Context_Control *_the_context,
+ unsigned32 *_stack_base,
+ unsigned32 _size,
+ unsigned32 _new_level,
+ proc_ptr *_entry_point
+)
+{
+ unsigned32 *addr;
+ unsigned32 jmp_addr;
+ unsigned32 _stack;
+ unsigned32 _the_size;
+
+ jmp_addr = (unsigned32) _entry_point;
+
+ _stack = ((unsigned32)(_stack_base) + CPU_STACK_ALIGNMENT);
+ _stack &= ~(CPU_STACK_ALIGNMENT - 1);
+
+ _the_size = _size & ~(CPU_STACK_ALIGNMENT - 1);
+
+ /*
+ * Slam our jmp_buf template into the context we are creating
+ */
+
+ memcpy(_the_context, default_context, sizeof(jmp_buf));
+
+ addr = (unsigned32 *)_the_context;
+
+#if defined(hppa1_1)
+ *(addr + RP_OFF) = jmp_addr;
+ *(addr + SP_OFF) = (unsigned32)(_stack + CPU_FRAME_SIZE);
+
+ /*
+ * See if we are using shared libraries by checking
+ * bit 30 in 24 off of newp. If bit 30 is set then
+ * we are using shared libraries and the jump address
+ * is at what 24 off of newp points to so shove that
+ * into 24 off of newp instead.
+ */
+
+ if (jmp_addr & 0x40000000) {
+ jmp_addr &= 0xfffffffc;
+ *(addr + RP_OFF) = (unsigned32)*(unsigned32 *)jmp_addr;
+ }
+#elif defined(sparc)
+
+ /*
+ * See /usr/include/sys/stack.h in Solaris 2.3 for a nice
+ * diagram of the stack.
+ */
+
+ asm ("ta 0x03"); /* flush registers */
+
+ *(addr + RP_OFF) = jmp_addr + ADDR_ADJ_OFFSET;
+ *(addr + SP_OFF) = (unsigned32)(_stack +_the_size - CPU_FRAME_SIZE);
+ *(addr + FP_OFF) = (unsigned32)(_stack +_the_size);
+#else
+#error "UNKNOWN CPU!!!"
+#endif
+
+ if (_new_level)
+ _CPU_ISR_Set_signal_level(1);
+ else
+ _CPU_ISR_Set_signal_level(0);
+
+}
+
+void _CPU_Context_restore(
+ Context_Control *next
+)
+{
+ longjmp(next->regs, 0);
+}
+
+void _CPU_Context_switch(
+ Context_Control *current,
+ Context_Control *next
+)
+{
+ /*
+ * Save the current context
+ */
+
+ if (setjmp(current->regs) == 0) {
+
+ /*
+ * Switch to the new context
+ */
+
+ longjmp(next->regs, 0);
+ }
+}
+
+void _CPU_Save_float_context(
+ Context_Control_fp *fp_context
+)
+{
+}
+
+void _CPU_Restore_float_context(
+ Context_Control_fp *fp_context
+)
+{
+}
+
+void _CPU_ISR_Set_signal_level(unsigned32 level)
+{
+ if (level)
+ _CPU_Disable_signal();
+ else
+ _CPU_Enable_signal(0);
+}
+
+
+unsigned32 _CPU_Disable_signal(void)
+{
+ sigset_t old_mask;
+ sigset_t empty_mask;
+
+ sigemptyset(&empty_mask);
+ sigemptyset(&old_mask);
+ sigprocmask(SIG_BLOCK, &UNIX_SIGNAL_MASK, &old_mask);
+
+ if (memcmp((char *)&empty_mask, (char *)&old_mask, sizeof(sigset_t)) != 0)
+ return 1;
+
+ return 0;
+}
+
+
+void _CPU_Enable_signal(unsigned32 level)
+{
+ if (level == 0)
+ sigprocmask(SIG_UNBLOCK, &UNIX_SIGNAL_MASK, 0);
+}
+
+
+/*
+ * Support for external and spurious interrupts on HPPA
+ *
+ * TODO:
+ * delete interrupt.c etc.
+ * Count interrupts
+ * make sure interrupts disabled properly
+ * should handler check again for more interrupts before exit?
+ * How to enable interrupts from an interrupt handler?
+ * Make sure there is an entry for everything in ISR_Vector_Table
+ */
+
+/*
+ * Init the external interrupt scheme
+ * called by bsp_start()
+ */
+
+rtems_status_code
+signal_initialize(void)
+{
+ struct sigaction act;
+ sigset_t mask;
+
+ /* mark them all active except for TraceTrap and Abort */
+
+ sigfillset(&mask);
+ sigdelset(&mask, SIGTRAP);
+ sigdelset(&mask, SIGABRT);
+ sigdelset(&mask, SIGIOT);
+ sigdelset(&mask, SIGCONT);
+ sigprocmask(SIG_UNBLOCK, &mask, 0);
+
+ act.sa_handler = interrupt_handler;
+ act.sa_mask = mask;
+#if defined(solaris)
+ act.sa_flags = SA_RESTART;
+#else
+ act.sa_flags = 0;
+#endif
+
+ sigaction(SIGHUP, &act, 0);
+ sigaction(SIGINT, &act, 0);
+ sigaction(SIGQUIT, &act, 0);
+ sigaction(SIGILL, &act, 0);
+ sigaction(SIGEMT, &act, 0);
+ sigaction(SIGFPE, &act, 0);
+ sigaction(SIGKILL, &act, 0);
+ sigaction(SIGBUS, &act, 0);
+ sigaction(SIGSEGV, &act, 0);
+ sigaction(SIGSYS, &act, 0);
+ sigaction(SIGPIPE, &act, 0);
+ sigaction(SIGALRM, &act, 0);
+ sigaction(SIGTERM, &act, 0);
+ sigaction(SIGUSR1, &act, 0);
+ sigaction(SIGUSR2, &act, 0);
+ sigaction(SIGCHLD, &act, 0);
+ sigaction(SIGCLD, &act, 0);
+ sigaction(SIGPWR, &act, 0);
+ sigaction(SIGVTALRM, &act, 0);
+ sigaction(SIGPROF, &act, 0);
+ sigaction(SIGIO, &act, 0);
+ sigaction(SIGWINCH, &act, 0);
+ sigaction(SIGSTOP, &act, 0);
+ sigaction(SIGTTIN, &act, 0);
+ sigaction(SIGTTOU, &act, 0);
+ sigaction(SIGURG, &act, 0);
+/*
+ * XXX: Really should be on HPUX.
+ */
+
+#if defined(hppa1_1)
+ sigaction(SIGLOST, &act, 0);
+#endif
+
+ return RTEMS_SUCCESSFUL;
+}
+
+
+/*
+ * External interrupt handler.
+ * This is installed as cpu interrupt handler.
+ * It vectors out to specific external interrupt handlers.
+ */
+
+void
+interrupt_handler(int vector)
+{
+ if (_ISR_Nest_level++ == 0) {
+ /* switch to interrupt stack */
+ }
+
+ _Thread_Dispatch_disable_level++;
+
+ if (_ISR_Vector_table[vector]) {
+ _ISR_Vector_table[vector](vector);
+ }
+ else {
+ Stray_signal(vector);
+ }
+
+ if (_ISR_Nest_level-- == 0) {
+ /* switch back to original stack */
+ }
+
+ _Thread_Dispatch_disable_level--;
+
+ if (_Thread_Dispatch_disable_level == 0 &&
+ (_Context_Switch_necessary || _ISR_Signals_to_thread_executing)) {
+ _CPU_Enable_signal(0);
+ _Thread_Dispatch();
+ }
+}
+
+
+void
+Stray_signal(int sig_num)
+{
+ char buffer[ 80 ];
+
+ /*
+ * We avoid using the stdio section of the library.
+ * The following is generally safe.
+ */
+
+ write(
+ 2,
+ buffer,
+ sprintf( buffer, "Stray signal %d\n", sig_num )
+ );
+
+ /*
+ * If it was a "fatal" signal, then exit here
+ * If app code has installed a hander for one of these, then
+ * we won't call Stray_signal, so this is ok.
+ */
+
+ switch (sig_num)
+ {
+ case SIGINT:
+ case SIGHUP:
+ case SIGQUIT:
+ case SIGILL:
+ case SIGEMT:
+ case SIGKILL:
+ case SIGBUS:
+ case SIGSEGV:
+ case SIGTERM:
+ _CPU_Fatal_error(0x100 + sig_num);
+ }
+}
+
+
+void
+_CPU_Fatal_error(unsigned32 error)
+{
+ setitimer(ITIMER_REAL, 0, 0);
+
+ _exit(error);
+}
+
+int
+_CPU_ffs(unsigned32 value)
+{
+ int output;
+
+ output = ffs(value);
+ output = output - 1;
+
+ return(output);
+}
diff --git a/c/src/exec/score/cpu/unix/cpu.h b/c/src/exec/score/cpu/unix/cpu.h
new file mode 100644
index 0000000000..e6b29bcd74
--- /dev/null
+++ b/c/src/exec/score/cpu/unix/cpu.h
@@ -0,0 +1,929 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the HP
+ * PA-RISC processor (Level 1.1).
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <setjmp.h>
+#include <string.h>
+#include <unix.h>
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of RTEMS while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in RTEMS. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
+
+/*
+ * Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the
+ * stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE if CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "NO_CPU_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out of RTEMS.
+ */
+
+#define CPU_HARDWARE_FP TRUE
+
+/*
+ * Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
+ * must be provided and is the default IDLE thread body instead of
+ * _Internal_threads_Idle_thread_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#if defined(hppa1_1)
+#define CPU_STACK_GROWS_UP TRUE
+#elif defined(sparc)
+#define CPU_STACK_GROWS_UP FALSE
+#else
+#error "unknown CPU!!"
+#endif
+
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical RTEMS structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ */
+
+#define CPU_MODES_INTERRUPT_MASK 0x00000001
+
+#define CPU_NAME "UNIX"
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* may need to put some structures here. */
+
+#if defined(hppa1_1)
+/*
+ * Word indices within a jmp_buf structure
+ */
+
+#ifdef RTEMS_NEWLIB
+#define RP_OFF 6
+#define SP_OFF 2
+#define R3_OFF 10
+#define R4_OFF 11
+#define R5_OFF 12
+#define R6_OFF 13
+#define R7_OFF 14
+#define R8_OFF 15
+#define R9_OFF 16
+#define R10_OFF 17
+#define R11_OFF 18
+#define R12_OFF 19
+#define R13_OFF 20
+#define R14_OFF 21
+#define R15_OFF 22
+#define R16_OFF 23
+#define R17_OFF 24
+#define R18_OFF 25
+#define DP_OFF 26
+#endif
+
+#ifdef RTEMS_UNIXLIB
+#define RP_OFF 0
+#define SP_OFF 1
+#define R3_OFF 4
+#define R4_OFF 5
+#define R5_OFF 6
+#define R6_OFF 7
+#define R7_OFF 8
+#define R8_OFF 9
+#define R9_OFF 10
+#define R10_OFF 11
+#define R11_OFF 12
+#define R12_OFF 13
+#define R13_OFF 14
+#define R14_OFF 15
+#define R15_OFF 16
+#define R16_OFF 17
+#define R17_OFF 18
+#define R18_OFF 19
+#define DP_OFF 20
+#endif
+#endif
+
+#if defined(sparc)
+
+/*
+ * Word indices within a jmp_buf structure
+ */
+
+#ifdef RTEMS_NEWLIB
+#define ADDR_ADJ_OFFSET -8
+#define SP_OFF 0
+#define RP_OFF 1
+#define FP_OFF 2
+#endif
+
+#ifdef RTEMS_UNIXLIB
+#define ADDR_ADJ_OFFSET 0
+#define G0_OFF 0
+#define SP_OFF 1
+#define RP_OFF 2
+#define FP_OFF 3
+#define I7_OFF 4
+#endif
+
+#endif
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for RTEMS, it is probably not enough for
+ * a debugger such as gdb. But that is another problem.
+ */
+
+typedef struct {
+ jmp_buf regs;
+} Context_Control;
+
+typedef struct {
+} Context_Control_fp;
+
+typedef struct {
+} CPU_Interrupt_frame;
+
+
+/*
+ * The following table contains the information required to configure
+ * the XXX processor specific parameters.
+ *
+ * NOTE: The interrupt_stack_size field is required if
+ * CPU_ALLOCATE_INTERRUPT_STACK is defined as TRUE.
+ *
+ * The pretasking_hook, predriver_hook, and postdriver_hook,
+ * and the do_zero_of_workspace fields are required on ALL CPUs.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+} rtems_cpu_table;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+EXTERN Context_Control_fp _CPU_Null_fp_context;
+
+/*
+ * On some CPUs, RTEMS supports a software managed interrupt stack.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+EXTERN void (*_CPU_Thread_dispatch_pointer)();
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+/* XXX: if needed, put more variables here */
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * The size of a frame on the stack
+ */
+
+#if defined(hppa1_1)
+#define CPU_FRAME_SIZE (32 * 4)
+#elif defined(sparc)
+#define CPU_FRAME_SIZE (112) /* based on disassembled test code */
+#else
+#error "Unknown CPU!!!"
+#endif
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * system initialization thread. Remember that in a multiprocessor
+ * system the system intialization thread becomes the MP server thread.
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 0
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by RTEMS.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 64
+
+/*
+ * Should be large enough to run all RTEMS tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (16 * 1024)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT 64
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for an RTEMS critical section. The previous
+ * level is returned in _level.
+ */
+
+#define _CPU_ISR_Disable( _level ) \
+ do { \
+ (_level) = _CPU_Disable_signal(); \
+ } while ( 0 )
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of an RTEMS critical section. The parameter
+ * _level is not modified.
+ */
+
+#define _CPU_ISR_Enable( _level ) \
+ do { \
+ _CPU_Enable_signal( (_level) ); \
+ } while ( 0 )
+
+/*
+ * This temporarily restores the interrupt to _level before immediately
+ * disabling them again. This is used to divide long RTEMS critical
+ * sections into two or more parts. The parameter _level is not
+ * modified.
+ */
+
+#define _CPU_ISR_Flash( _level ) \
+ do { \
+ register _ignored = 0; \
+ _CPU_ISR_Enable( (_level) ); \
+ _CPU_ISR_Disable( _ignored ); \
+ } while ( 0 )
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+#define _CPU_ISR_Set_level( new_level ) \
+ { \
+ if ( new_level ) \
+ (void) _CPU_Disable_signal(); \
+ else \
+ _CPU_Enable_signal( 0 ); \
+ }
+
+/* end of ISR handler macros */
+
+/* Context handler macros */
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) (_base) + (_offset) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
+ }
+
+#define _CPU_Context_save_fp( _fp_context ) \
+ _CPU_Save_float_context( *(Context_Control_fp **)(_fp_context))
+
+#define _CPU_Context_restore_fp( _fp_context ) \
+ _CPU_Restore_float_context( *(Context_Control_fp **)(_fp_context))
+
+extern void _CPU_Context_Initialize(
+ Context_Control *_the_context,
+ unsigned32 *_stack_base,
+ unsigned32 _size,
+ unsigned32 _new_level,
+ proc_ptr *_entry_point
+);
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ _CPU_Fatal_error( _error )
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * RTEMS guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ _output = _CPU_ffs( _value )
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 1 << (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Internal_threads_Idle_thread_body
+ *
+ * This routine is the CPU dependent IDLE thread body.
+ *
+ * NOTE: It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
+ * is TRUE.
+ */
+
+void _CPU_Internal_threads_Idle_thread_body( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Save_float_context
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Save_float_context(
+ Context_Control_fp *fp_context_ptr
+);
+
+/*
+ * _CPU_Restore_float_context
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Restore_float_context(
+ Context_Control_fp *fp_context_ptr
+);
+
+
+void _CPU_ISR_Set_signal_level(
+ unsigned32 level
+);
+
+unsigned32 _CPU_Disable_signal( void );
+
+void _CPU_Enable_signal(
+ unsigned32 level
+);
+
+void _CPU_Fatal_error(
+ unsigned32 _error
+);
+
+int _CPU_ffs(
+ unsigned32 _value
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 byte1, byte2, byte3, byte4, swapped;
+
+ byte4 = (value >> 24) & 0xff;
+ byte3 = (value >> 16) & 0xff;
+ byte2 = (value >> 8) & 0xff;
+ byte1 = value & 0xff;
+
+ swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+ return( swapped );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/exec/score/cpu/unix/unix.h b/c/src/exec/score/cpu/unix/unix.h
new file mode 100644
index 0000000000..823bbcfb34
--- /dev/null
+++ b/c/src/exec/score/cpu/unix/unix.h
@@ -0,0 +1,90 @@
+/* unix.h
+ *
+ * This include file contains the basic type definitions required by RTEMS
+ * which are typical for a modern UNIX computer using GCC.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#ifndef __UNIX_h
+#define __UNIX_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define unix
+#define REPLACE_THIS_WITH_THE_CPU_FAMILY
+#define REPLACE_THIS_WITH_THE_BSP
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+#define REPLACE_THIS_WITH_THE_UNIX_FLAVOR
+
+/*
+ * This file contains the information required to build
+ * RTEMS for a particular member of the "unix"
+ * family when executing in protected mode. It does
+ * this by setting variables to indicate which implementation
+ * dependent features are present in a particular member
+ * of the family.
+ */
+
+#if defined(hpux)
+
+#define RTEMS_MODEL_NAME "hpux"
+
+#elif defined(solaris)
+
+#define RTEMS_MODEL_NAME "solaris"
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+#ifndef ASM
+
+/* type definitions */
+
+typedef unsigned char unsigned8; /* 8-bit unsigned integer */
+typedef unsigned short unsigned16; /* 16-bit unsigned integer */
+typedef unsigned int unsigned32; /* 32-bit unsigned integer */
+typedef unsigned long long unsigned64; /* 64-bit unsigned integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef char signed8; /* 8-bit signed integer */
+typedef short signed16; /* 16-bit signed integer */
+typedef int signed32; /* 32-bit signed integer */
+typedef long long signed64; /* 64-bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+typedef void ( *unix_isr_entry )( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+#endif
+/* end of include file */
+