summaryrefslogtreecommitdiffstats
path: root/c/src/exec/score/cpu/hppa1.1
diff options
context:
space:
mode:
Diffstat (limited to 'c/src/exec/score/cpu/hppa1.1')
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu.c313
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu.h581
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu_asm.h73
-rw-r--r--c/src/exec/score/cpu/hppa1.1/cpu_asm.s797
-rw-r--r--c/src/exec/score/cpu/hppa1.1/hppa.h704
-rw-r--r--c/src/exec/score/cpu/hppa1.1/rtems.s53
6 files changed, 2521 insertions, 0 deletions
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu.c b/c/src/exec/score/cpu/hppa1.1/cpu.c
new file mode 100644
index 0000000000..b69a172b4e
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu.c
@@ -0,0 +1,313 @@
+/*
+ * HP PA-RISC Dependent Source
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/fatal.h>
+#include <rtems/isr.h>
+#include <rtems/intr.h>
+#include <rtems/wkspace.h>
+
+rtems_status_code hppa_external_interrupt_initialize(void);
+void hppa_external_interrupt_enable(unsigned32);
+void hppa_external_interrupt_disable(unsigned32);
+void hppa_external_interrupt(unsigned32, CPU_Interrupt_frame *);
+
+/*
+ * Our interrupt handlers take a 2nd argument:
+ * a pointer to a CPU_Interrupt_frame
+ * So we use our own prototype instead of rtems_isr_entry
+ */
+
+typedef rtems_isr ( *hppa_rtems_isr_entry )(
+ rtems_vector_number,
+ CPU_Interrupt_frame *
+ );
+
+
+/*
+ * who are we? cpu number
+ * Not used by executive proper, just kept (or not) as a convenience
+ * for libcpu and libbsp stuff that wants it.
+ *
+ * Defaults to 0. If the BSP doesn't like it, it can change it.
+ */
+
+int cpu_number; /* from 0; cpu number in a multi cpu system */
+
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ *
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ register unsigned8 *fp_context;
+ unsigned32 iva;
+ unsigned32 iva_table;
+ int i;
+
+ extern void IVA_Table(void);
+
+ if ( cpu_table == NULL )
+ rtems_fatal_error_occurred( RTEMS_NOT_CONFIGURED );
+
+ /*
+ * XXX; need to setup fpsr smarter perhaps
+ */
+
+ fp_context = (unsigned8*) &_CPU_Null_fp_context;
+ for (i=0 ; i<sizeof(Context_Control_fp); i++)
+ *fp_context++ = 0;
+
+ /*
+ * Set _CPU_Default_gr27 here so it will hopefully be the correct
+ * global data pointer for the entire system.
+ */
+
+ asm volatile( "stw %%r27,%0" : "=m" (_CPU_Default_gr27): );
+
+ /*
+ * Stabilize the interrupt stuff
+ */
+
+ (void) hppa_external_interrupt_initialize();
+
+ /*
+ * Set the IVA to point to physical address of the IVA_Table
+ */
+
+ iva_table = (unsigned32) IVA_Table;
+ HPPA_ASM_LPA(0, iva_table, iva);
+ set_iva(iva);
+
+ _CPU_Table = *cpu_table;
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+/*
+ * HPPA has 8w for each vector instead of an address to jump to.
+ * We put the actual ISR address in '_ISR_vector_table'. This will
+ * be pulled by the code in the vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ *old_handler = _ISR_Vector_table[vector];
+
+ _ISR_Vector_table[vector] = new_handler;
+
+ if (vector >= HPPA_INTERRUPT_EXTERNAL_BASE)
+ {
+ unsigned32 external_vector;
+
+ external_vector = vector - HPPA_INTERRUPT_EXTERNAL_BASE;
+ if (new_handler)
+ hppa_external_interrupt_enable(external_vector);
+ else
+ /* XXX this can never happen due to _ISR_Is_valid_user_handler */
+ hppa_external_interrupt_disable(external_vector);
+ }
+}
+
+
+/*
+ * Support for external and spurious interrupts on HPPA
+ *
+ * TODO:
+ * delete interrupt.c etc.
+ * Count interrupts
+ * make sure interrupts disabled properly
+ * should handler check again for more interrupts before exit?
+ * How to enable interrupts from an interrupt handler?
+ * Make sure there is an entry for everything in ISR_Vector_Table
+ */
+
+#define DISMISS(mask) set_eirr(mask)
+#define DISABLE(mask) set_eiem(get_eiem() & ~(mask))
+#define ENABLE(mask) set_eiem(get_eiem() | (mask))
+#define VECTOR_TO_MASK(v) (1 << (31 - (v)))
+
+/*
+ * Init the external interrupt scheme
+ * called by bsp_start()
+ */
+
+rtems_status_code
+hppa_external_interrupt_initialize(void)
+{
+ rtems_isr_entry ignore;
+
+ /* mark them all unused */
+
+ DISABLE(~0);
+ DISMISS(~0);
+
+ /* install the external interrupt handler */
+ rtems_interrupt_catch((rtems_isr_entry) hppa_external_interrupt,
+ HPPA_INTERRUPT_EXTERNAL_INTERRUPT, &ignore) ;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+/*
+ * Enable a specific external interrupt
+ */
+
+void
+hppa_external_interrupt_enable(unsigned32 v)
+{
+ unsigned32 isrlevel;
+
+ _CPU_ISR_Disable(isrlevel);
+ ENABLE(VECTOR_TO_MASK(v));
+ _CPU_ISR_Enable(isrlevel);
+}
+
+/*
+ * Does not clear or otherwise affect any pending requests
+ */
+
+void
+hppa_external_interrupt_disable(unsigned32 v)
+{
+ unsigned32 isrlevel;
+
+ _CPU_ISR_Disable(isrlevel);
+ DISABLE(VECTOR_TO_MASK(v));
+ _CPU_ISR_Enable(isrlevel);
+}
+
+void
+hppa_external_interrupt_spurious_handler(unsigned32 vector,
+ CPU_Interrupt_frame *iframe)
+{
+/* XXX should not be printing :)
+ printf("spurious external interrupt: %d at pc 0x%x; disabling\n",
+ vector, iframe->Interrupt.pcoqfront);
+*/
+ DISMISS(VECTOR_TO_MASK(vector));
+ DISABLE(VECTOR_TO_MASK(vector));
+}
+
+void
+hppa_external_interrupt_report_spurious(unsigned32 spurious,
+ CPU_Interrupt_frame *iframe)
+{
+ int v;
+ for (v=0; v < HPPA_EXTERNAL_INTERRUPTS; v++)
+ if (VECTOR_TO_MASK(v) & spurious)
+ hppa_external_interrupt_spurious_handler(v, iframe);
+ DISMISS(spurious);
+}
+
+
+/*
+ * External interrupt handler.
+ * This is installed as cpu interrupt handler for
+ * HPPA_INTERRUPT_EXTERNAL_INTERRUPT. It vectors out to
+ * specific external interrupt handlers.
+ */
+
+void
+hppa_external_interrupt(unsigned32 vector,
+ CPU_Interrupt_frame *iframe)
+{
+ unsigned32 mask;
+ unsigned32 *vp, *max_vp;
+ unsigned32 external_vector;
+ unsigned32 global_vector;
+ hppa_rtems_isr_entry handler;
+
+ max_vp = &_CPU_Table.external_interrupt[_CPU_Table.external_interrupts];
+ while ( (mask = (get_eirr() & get_eiem())) )
+ {
+ for (vp = _CPU_Table.external_interrupt; (vp < max_vp) && mask; vp++)
+ {
+ unsigned32 m;
+
+ external_vector = *vp;
+ global_vector = external_vector + HPPA_INTERRUPT_EXTERNAL_BASE;
+ m = VECTOR_TO_MASK(external_vector);
+ handler = (hppa_rtems_isr_entry) _ISR_Vector_table[global_vector];
+ if ((m & mask) && handler)
+ {
+ DISMISS(m);
+ mask &= ~m;
+ (*handler)(global_vector, iframe);
+ }
+ }
+
+ if (mask != 0) {
+ if ( _CPU_Table.spurious_handler )
+ (*((hppa_rtems_isr_entry) _CPU_Table.spurious_handler))(
+ mask,
+ iframe
+ );
+ else
+ hppa_external_interrupt_report_spurious(mask, iframe);
+ }
+ }
+}
+
+/*
+ * Halt the system.
+ * Called by the _CPU_Fatal_halt macro
+ *
+ * XXX
+ * Later on, this will allow us to return to the prom.
+ * For now, we just ignore 'type_of_halt'
+ */
+
+void
+hppa_cpu_halt(unsigned32 type_of_halt,
+ unsigned32 the_error)
+{
+ unsigned32 isrlevel;
+
+ _CPU_ISR_Disable(isrlevel);
+
+ asm volatile( "copy %0,%%r1" : : "r" (the_error) );
+ HPPA_ASM_BREAK(1, 0);
+}
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu.h b/c/src/exec/score/cpu/hppa1.1/cpu.h
new file mode 100644
index 0000000000..3e8f31fcf0
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu.h
@@ -0,0 +1,581 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the HP
+ * PA-RISC processor (Level 1.1).
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ *
+ * Note:
+ * This file is included by both C and assembler code ( -DASM )
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hppa.h> /* pick up machine definitions */
+
+/* conditional compilation parameters */
+
+#define CPU_INLINE_ENABLE_DISPATCH FALSE
+#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
+
+/*
+ * RTEMS manages an interrupt stack in software for the HPPA.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * HPPA has hardware FP, it is assumed to exist by GCC so all tasks
+ * may implicitly use it (especially for integer multiplies). Because
+ * the FP context is technically part of the basic integer context
+ * on this CPU, we cannot use the deferred FP context switch algorithm.
+ */
+
+#define CPU_HARDWARE_FP TRUE
+#define CPU_ALL_TASKS_ARE_FP TRUE
+#define CPU_IDLE_TASK_IS_FP FALSE
+#define CPU_USE_DEFERRED_FP_SWITCH FALSE
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+#define CPU_STACK_GROWS_UP TRUE
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((__aligned__ (32)))
+
+/* constants */
+
+#define CPU_MODES_INTERRUPT_LEVEL 0x00000001 /* interrupt level in mode */
+#define CPU_MODES_INTERRUPT_MASK 0x00000001 /* interrupt level in mode */
+
+/*
+ * PSW contstants
+ */
+
+#define CPU_PSW_BASE (HPPA_PSW_C | HPPA_PSW_Q | HPPA_PSW_P | HPPA_PSW_D)
+#define CPU_PSW_INTERRUPTS_ON (CPU_PSW_BASE | HPPA_PSW_I)
+#define CPU_PSW_INTERRUPTS_OFF (CPU_PSW_BASE)
+
+#define CPU_PSW_DEFAULT CPU_PSW_BASE
+
+
+#ifndef ASM
+
+/*
+ * Contexts
+ *
+ * This means we have the following context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ *
+ * The PA-RISC is very fast so the expense of saving an extra register
+ * or two is not of great concern at the present. So we are not making
+ * a distinction between what is saved during a task switch and what is
+ * saved at each interrupt. Plus saving the entire context should make
+ * it easier to make gdb aware of RTEMS tasks.
+ */
+
+typedef struct {
+ unsigned32 flags; /* whatever */
+ unsigned32 gr1; /* scratch -- caller saves */
+ unsigned32 gr2; /* RP -- return pointer */
+ unsigned32 gr3; /* scratch -- callee saves */
+ unsigned32 gr4; /* scratch -- callee saves */
+ unsigned32 gr5; /* scratch -- callee saves */
+ unsigned32 gr6; /* scratch -- callee saves */
+ unsigned32 gr7; /* scratch -- callee saves */
+ unsigned32 gr8; /* scratch -- callee saves */
+ unsigned32 gr9; /* scratch -- callee saves */
+ unsigned32 gr10; /* scratch -- callee saves */
+ unsigned32 gr11; /* scratch -- callee saves */
+ unsigned32 gr12; /* scratch -- callee saves */
+ unsigned32 gr13; /* scratch -- callee saves */
+ unsigned32 gr14; /* scratch -- callee saves */
+ unsigned32 gr15; /* scratch -- callee saves */
+ unsigned32 gr16; /* scratch -- callee saves */
+ unsigned32 gr17; /* scratch -- callee saves */
+ unsigned32 gr18; /* scratch -- callee saves */
+ unsigned32 gr19; /* scratch -- caller saves */
+ unsigned32 gr20; /* scratch -- caller saves */
+ unsigned32 gr21; /* scratch -- caller saves */
+ unsigned32 gr22; /* scratch -- caller saves */
+ unsigned32 gr23; /* argument 3 */
+ unsigned32 gr24; /* argument 2 */
+ unsigned32 gr25; /* argument 1 */
+ unsigned32 gr26; /* argument 0 */
+ unsigned32 gr27; /* DP -- global data pointer */
+ unsigned32 gr28; /* return values -- caller saves */
+ unsigned32 gr29; /* return values -- caller saves */
+ unsigned32 sp; /* gr30 */
+ unsigned32 gr31;
+
+ /* Various control registers */
+
+ unsigned32 sar; /* cr11 */
+ unsigned32 ipsw; /* cr22; full 32 bits of psw */
+ unsigned32 iir; /* cr19; interrupt instruction register */
+ unsigned32 ior; /* cr21; interrupt offset register */
+ unsigned32 isr; /* cr20; interrupt space register (not used) */
+ unsigned32 pcoqfront; /* cr18; front que offset */
+ unsigned32 pcoqback; /* cr18; back que offset */
+ unsigned32 pcsqfront; /* cr17; front que space (not used) */
+ unsigned32 pcsqback; /* cr17; back que space (not used) */
+ unsigned32 itimer; /* cr16; itimer value */
+
+} Context_Control;
+
+
+/* Must be double word aligned.
+ * This will be ok since our allocator returns 8 byte aligned chunks
+ */
+
+typedef struct {
+ double fr0; /* status */
+ double fr1; /* exception information */
+ double fr2; /* exception information */
+ double fr3; /* exception information */
+ double fr4; /* argument */
+ double fr5; /* argument */
+ double fr6; /* argument */
+ double fr7; /* argument */
+ double fr8; /* scratch -- caller saves */
+ double fr9; /* scratch -- caller saves */
+ double fr10; /* scratch -- caller saves */
+ double fr11; /* scratch -- caller saves */
+ double fr12; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr13; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr14; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr15; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr16; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr17; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr18; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr19; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr20; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr21; /* callee saves -- (PA-RISC 1.1 CPUs) */
+ double fr22; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr23; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr24; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr25; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr26; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr27; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr28; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr29; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr30; /* caller saves -- (PA-RISC 1.1 CPUs) */
+ double fr31; /* caller saves -- (PA-RISC 1.1 CPUs) */
+} Context_Control_fp;
+
+/*
+ * The following structure defines the set of information saved
+ * on the current stack by RTEMS upon receipt of each interrupt.
+ */
+
+typedef struct {
+ Context_Control Integer;
+ Context_Control_fp Floating_Point;
+} CPU_Interrupt_frame;
+
+/*
+ * The following table contains the information required to configure
+ * the HPPA specific parameters.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+
+ /* HPPA simulator is slow enough; don't waste time
+ * zeroing memory that is already zero
+ */
+ boolean do_zero_of_workspace;
+
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+
+ /*
+ * Control of external interrupts.
+ * We keep a table of external vector numbers (0 - 31)
+ * The table is sorted by priority, that is: the first entry
+ * in the table indicates the vector that is highest priorty.
+ * The handler function is stored in _ISR_Vector_Table[] and
+ * is set by rtems_interrupt_catch()
+ */
+
+ unsigned32 external_interrupts; /* # of external interrupts we use */
+ unsigned32 external_interrupt[HPPA_EXTERNAL_INTERRUPTS];
+
+ void (*spurious_handler)( unsigned32 mask, CPU_Interrupt_frame *);
+
+ unsigned32 itimer_clicks_per_microsecond; /* for use by Clock driver */
+} rtems_cpu_table;
+
+/* variables */
+
+EXTERN Context_Control_fp _CPU_Null_fp_context;
+EXTERN unsigned32 _CPU_Default_gr27;
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+#endif /* ! ASM */
+
+/*
+ * context size area for floating point
+ */
+
+#ifndef ASM
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+#endif
+
+/*
+ * size of a frame on the stack
+ */
+
+#define CPU_FRAME_SIZE (16 * 4)
+
+/*
+ * (Optional) # of bytes for libmisc/stackchk to check
+ * If not specifed, then it defaults to something reasonable
+ * for most architectures.
+ */
+
+#define CPU_STACK_CHECK_SIZE (CPU_FRAME_SIZE * 2)
+
+/*
+ * extra stack required by system initialization thread
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 0
+
+/*
+ * HPPA has 32 interrupts, then 32 external interrupts
+ * Rtems (_ISR_Vector_Table) is aware of the first 64
+ * A BSP may reserve more.
+ *
+ * External interrupts all come thru the same vector (4)
+ * The external handler is the only person aware of the other
+ * interrupts (genie, rhino, etc)
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS (HPPA_INTERRUPT_MAX)
+
+/*
+ * Don't be chintzy here; we don't want to debug these problems
+ * Some of the tests eat almost 4k.
+ * Plus, the HPPA always allocates chunks of 64 bytes for stack
+ * growth.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (8 * 1024)
+
+/*
+ * HPPA double's must be on 8 byte boundary
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ * just follow the basic HPPA alignment for the heap and partition
+ */
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * HPPA stack is best when 64 byte aligned.
+ */
+
+#define CPU_STACK_ALIGNMENT 64
+
+#ifndef ASM
+
+/* macros */
+
+/*
+ * ISR handler macros
+ *
+ * These macros perform the following functions:
+ * + disable all maskable CPU interrupts
+ * + restore previous interrupt level (enable)
+ * + temporarily restore interrupts (flash)
+ * + set a particular level
+ */
+
+/* Disable interrupts; returning previous level in _level */
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ do { \
+ HPPA_ASM_RSM(HPPA_PSW_I, _isr_cookie); \
+ } while(0)
+
+/* Enable interrupts to previous level from _CPU_ISR_Disable
+ * does not change 'level' */
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ { \
+ HPPA_ASM_MTSM( _isr_cookie ); \
+ }
+
+/* restore, then disable interrupts; does not change level */
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ { \
+ register int _ignore; \
+ _CPU_ISR_Enable( _isr_cookie ); \
+ _CPU_ISR_Disable( _ignore ); \
+ }
+
+/*
+ * Interrupt task levels
+ *
+ * Future scheme proposal
+ * level will be an index into a array.
+ * Each entry of array will be the interrupt bits
+ * enabled for that level. There will be 32 bits of external
+ * interrupts (to be placed in EIEM) and some (optional) bsp
+ * specific bits
+ *
+ * For pixel flow this *may* mean something like:
+ * level 0: all interrupts enabled (external + rhino)
+ * level 1: rhino disabled
+ * level 2: all io interrupts disabled (timer still enabled)
+ * level 7: *ALL* disabled (timer disabled)
+ */
+
+/* set interrupts on or off; does not return new level */
+#define _CPU_ISR_Set_level( new_level ) \
+ { \
+ volatile int ignore; \
+ if ( new_level ) HPPA_ASM_RSM(HPPA_PSW_I, ignore); \
+ else HPPA_ASM_SSM(HPPA_PSW_I, ignore); \
+ }
+
+/* end of ISR handler macros */
+
+/*
+ * Context handler macros
+ *
+ * These macros perform the following functions:
+ * + initialize a context area
+ * + restart the current thread
+ * + calculate the initial pointer into a FP context area
+ * + initialize an FP context area
+ *
+ * HPPA port adds two macros which hide the "indirectness" of the
+ * pointer passed the save/restore FP context assembly routines.
+ */
+
+#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
+ _new_level, _entry_point ) \
+ do { \
+ unsigned32 _stack; \
+ \
+ (_the_context)->flags = 0xfeedf00d; \
+ (_the_context)->pcoqfront = (unsigned32)(_entry_point); \
+ (_the_context)->pcoqback = (unsigned32)(_entry_point) + 4; \
+ (_the_context)->pcsqfront = 0; \
+ (_the_context)->pcsqback = 0; \
+ if ( (_new_level) ) \
+ (_the_context)->ipsw = CPU_PSW_INTERRUPTS_OFF; \
+ else \
+ (_the_context)->ipsw = CPU_PSW_INTERRUPTS_ON; \
+ \
+ _stack = ((unsigned32)(_stack_base) + (CPU_STACK_ALIGNMENT - 1)); \
+ _stack &= ~(CPU_STACK_ALIGNMENT - 1); \
+ if ((_stack - (unsigned32) (_stack_base)) < CPU_FRAME_SIZE) \
+ _stack += CPU_FRAME_SIZE; \
+ \
+ (_the_context)->sp = (_stack); \
+ (_the_context)->gr27 = _CPU_Default_gr27; \
+ } while (0)
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ do { \
+ _CPU_Context_restore( (_the_context) ); \
+ } while (0)
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) (_base) + (_offset) )
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ do { \
+ *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context;\
+ } while(0)
+
+#define _CPU_Context_save_fp( _fp_context ) \
+ _CPU_Save_float_context( *(Context_Control_fp **)(_fp_context) )
+
+#define _CPU_Context_restore_fp( _fp_context ) \
+ _CPU_Restore_float_context( *(Context_Control_fp **)(_fp_context) )
+
+/* end of Context handler macros */
+
+/*
+ * Fatal Error manager macros
+ *
+ * These macros perform the following functions:
+ * + disable interrupts and halt the CPU
+ */
+
+void hppa_cpu_halt(unsigned32 type_of_halt, unsigned32 the_error);
+#define _CPU_Fatal_halt( _error ) \
+ hppa_cpu_halt(0, _error)
+
+/* end of Fatal Error manager macros */
+
+/*
+ * Bitfield handler macros
+ *
+ * These macros perform the following functions:
+ * + scan for the highest numbered (MSB) set in a 16 bit bitfield
+ *
+ * NOTE:
+ *
+ * The HPPA does not have a scan instruction. This functionality
+ * is implemented in software.
+ */
+
+int hppa_rtems_ffs(unsigned int value);
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ _output = hppa_rtems_ffs(_value)
+
+/* end of Bitfield handler macros */
+
+/*
+ * Priority handler macros
+ *
+ * These macros perform the following functions:
+ * + return a mask with the bit for this major/minor portion of
+ * of thread priority set.
+ * + translate the bit number returned by "Bitfield_find_first_bit"
+ * into an index into the thread ready chain bit maps
+ *
+ * Note: 255 is the lowest priority
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 1 << (_bit_number) )
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (_priority)
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generally used only to restart self in an
+ * efficient manner and avoid stack conflicts.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Save_float_context
+ *
+ * This routine saves the floating point context passed to it.
+ *
+ * NOTE: _CPU_Context_save_fp is implemented as a macro on the HPPA
+ * which dereferences the pointer before calling this.
+ */
+
+void _CPU_Save_float_context(
+ Context_Control_fp *fp_context
+);
+
+/*
+ * _CPU_Restore_float_context
+ *
+ * This routine restores the floating point context passed to it.
+ *
+ * NOTE: _CPU_Context_save_fp is implemented as a macro on the HPPA
+ * which dereferences the pointer before calling this.
+ */
+
+void _CPU_Restore_float_context(
+ Context_Control_fp *fp_context
+);
+
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static so it can be referenced indirectly.
+ */
+
+static inline unsigned int
+CPU_swap_u32(unsigned32 value)
+{
+ unsigned32 swapped;
+
+ HPPA_ASM_SWAPBYTES(value, swapped);
+
+ return( swapped );
+}
+
+/*
+ * Unused; I think it should go away
+ */
+
+#if 0
+#define enable_tracing()
+#endif
+
+#endif /* ! ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! __CPU_h */
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu_asm.h b/c/src/exec/score/cpu/hppa1.1/cpu_asm.h
new file mode 100644
index 0000000000..8e480c2a5c
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu_asm.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1990,1991 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the Center
+ * for Software Science at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Utah $Hdr: asm.h 1.6 91/12/03$
+ *
+ * RTEMS: $Id$
+ */
+
+/*
+ * Hardware Space Registers
+ */
+sr0 .reg %sr0
+sr1 .reg %sr1
+sr2 .reg %sr2
+sr3 .reg %sr3
+sr4 .reg %sr4
+sr5 .reg %sr5
+sr6 .reg %sr6
+sr7 .reg %sr7
+
+/*
+ * Control register aliases
+ */
+
+rctr .reg %cr0
+pidr1 .reg %cr8
+pidr2 .reg %cr9
+ccr .reg %cr10
+sar .reg %cr11
+pidr3 .reg %cr12
+pidr4 .reg %cr13
+iva .reg %cr14
+eiem .reg %cr15
+itmr .reg %cr16
+pcsq .reg %cr17
+pcoq .reg %cr18
+iir .reg %cr19
+isr .reg %cr20
+ior .reg %cr21
+ipsw .reg %cr22
+eirr .reg %cr23
+
+/*
+ * Calling Convention
+ */
+rp .reg %r2
+arg3 .reg %r23
+arg2 .reg %r24
+arg1 .reg %r25
+arg0 .reg %r26
+dp .reg %r27
+ret0 .reg %r28
+ret1 .reg %r29
+sl .reg %r29
+sp .reg %r30
+
+
diff --git a/c/src/exec/score/cpu/hppa1.1/cpu_asm.s b/c/src/exec/score/cpu/hppa1.1/cpu_asm.s
new file mode 100644
index 0000000000..43a5bb2499
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/cpu_asm.s
@@ -0,0 +1,797 @@
+# @(#)cpu_asm.S 1.5 - 95/04/24
+#
+#
+# TODO:
+# Context_switch needs to only save callee save registers
+# I think this means can skip: r1, r2, r19-29, r31
+# Ref: p 3-2 of Procedure Calling Conventions Manual
+# This should be #ifndef DEBUG so that debugger has
+# accurate visibility into all registers
+#
+# This file contains the assembly code for the HPPA implementation
+# of RTEMS.
+#
+# COPYRIGHT (c) 1994,95 by Division Incorporated
+#
+# To anyone who acknowledges that this file is provided "AS IS"
+# without any express or implied warranty:
+# permission to use, copy, modify, and distribute this file
+# for any purpose is hereby granted without fee, provided that
+# the above copyright notice and this notice appears in all
+# copies, and that the name of Division Incorporated not be
+# used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# Division Incorporated makes no representations about the
+# suitability of this software for any purpose.
+#
+# $Id$
+#
+
+#include <rtems/hppa.h>
+#include <rtems/cpu_asm.h>
+#include <rtems/cpu.h>
+
+#include <offsets.h>
+
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+#
+# Special register usage for context switch and interrupts
+# Stay away from %cr28 which is used for TLB misses on 72000
+#
+
+isr_arg0 .reg %cr24
+isr_r9 .reg %cr25
+
+#
+# Interrupt stack frame looks like this
+#
+# offset item
+# -----------------------------------------------------------------
+# INTEGER_CONTEXT_OFFSET Context_Control
+# FP_CONTEXT_OFFSET Context_Control_fp
+#
+# It is padded out to a multiple of 64
+#
+
+
+# PAGE^L
+# void __Generic_ISR_Handler()
+#
+# This routine provides the RTEMS interrupt management.
+#
+# NOTE:
+# Upon entry, the stack will contain a stack frame back to the
+# interrupted task. If dispatching is enabled, this is the
+# outer most interrupt, (and a context switch is necessary or
+# the current task has signals), then set up the stack to
+# transfer control to the interrupt dispatcher.
+#
+#
+# We jump here from the interrupt vector.
+# The hardware has done some stuff for us:
+# PSW saved in IPSW
+# PSW set to 0
+# PSW[E] set to default (0)
+# PSW[M] set to 1 iff this is HPMC
+#
+# IIA queue is frozen (since PSW[Q] is now 0)
+# privilege level promoted to 0
+# IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap
+# registers GR 1,8,9,16,17,24,25 copied to shadow regs
+# SHR 0 1 2 3 4 5 6
+#
+# Our vector stub did the following
+# placed vector number is in r1
+#
+# stub
+# r1 <- vector number
+# save ipsw under rock
+# ipsw = ipsw & ~1 -- disable ints
+# save qregs under rock
+# qra = _Generic_ISR_handler
+# rfi
+#
+################################################
+
+# Distinct Interrupt Entry Points
+#
+# The following macro and the 32 instantiations of the macro
+# are necessary to determine which interrupt vector occurred.
+# The following macro allows a unique entry point to be defined
+# for each vector.
+#
+# r9 was loaded with the vector before branching here
+# scratch registers available: gr1, gr8, gr9, gr16, gr17, gr24
+#
+# NOTE:
+# .align 32 doesn not seem to work in the continuation below
+# so just have to count 8 instructions
+#
+# NOTE:
+# this whole scheme needs to be rethought for TLB traps which
+# have requirements about what tlb faults they can incur.
+# ref: TLB Operation Requirements in 1.1 arch book
+
+#define THANDLER(vector) \
+ mtctl %r9, isr_r9 ! \
+ b _Generic_ISR_Handler! \
+ ldi vector, %r9! \
+ nop ! \
+ nop ! \
+ nop ! \
+ nop ! \
+ nop
+
+ .align 4096
+ .EXPORT IVA_Table,ENTRY,PRIV_LEV=0
+IVA_Table:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+ THANDLER(0) /* unused */
+
+ THANDLER(HPPA_INTERRUPT_HIGH_PRIORITY_MACHINE_CHECK)
+
+ THANDLER(HPPA_INTERRUPT_POWER_FAIL)
+
+ THANDLER(HPPA_INTERRUPT_RECOVERY_COUNTER)
+
+ THANDLER(HPPA_INTERRUPT_EXTERNAL_INTERRUPT)
+
+ THANDLER(HPPA_INTERRUPT_LOW_PRIORITY_MACHINE_CHECK)
+
+ THANDLER(HPPA_INTERRUPT_INSTRUCTION_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_INSTRUCTION_MEMORY_PROTECTION)
+
+ THANDLER(HPPA_INTERRUPT_ILLEGAL_INSTRUCTION)
+
+ THANDLER(HPPA_INTERRUPT_BREAK_INSTRUCTION)
+
+ THANDLER(HPPA_INTERRUPT_PRIVILEGED_OPERATION)
+
+ THANDLER(HPPA_INTERRUPT_PRIVILEGED_REGISTER)
+
+ THANDLER(HPPA_INTERRUPT_OVERFLOW)
+
+ THANDLER(HPPA_INTERRUPT_CONDITIONAL)
+
+ THANDLER(HPPA_INTERRUPT_ASSIST_EXCEPTION)
+
+ THANDLER(HPPA_INTERRUPT_DATA_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_NON_ACCESS_INSTRUCTION_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_NON_ACCESS_DATA_TLB_MISS)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_PROTECTION)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_BREAK)
+
+ THANDLER(HPPA_INTERRUPT_TLB_DIRTY_BIT)
+
+ THANDLER(HPPA_INTERRUPT_PAGE_REFERENCE)
+
+ THANDLER(HPPA_INTERRUPT_ASSIST_EMULATION)
+
+ THANDLER(HPPA_INTERRUPT_HIGHER_PRIVILEGE_TRANSFER)
+
+ THANDLER(HPPA_INTERRUPT_LOWER_PRIVILEGE_TRANSFER)
+
+ THANDLER(HPPA_INTERRUPT_TAKEN_BRANCH)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_ACCESS_RIGHTS)
+
+ THANDLER(HPPA_INTERRUPT_DATA_MEMORY_PROTECTION_ID)
+
+ THANDLER(HPPA_INTERRUPT_UNALIGNED_DATA_REFERENCE)
+
+ THANDLER(HPPA_INTERRUPT_PERFORMANCE_MONITOR)
+
+ THANDLER(HPPA_INTERRUPT_INSTRUCTION_DEBUG)
+
+ THANDLER(HPPA_INTERRUPT_DATA_DEBUG)
+
+ .EXIT
+ .PROCEND
+
+ .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0
+_Generic_ISR_Handler:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+# Turn on the D bit in psw so we can start saving stuff on stack
+# (interrupt context pieces that need to be saved before the RFI)
+
+ ssm HPPA_PSW_D, %r0
+ mtctl arg0, isr_arg0
+
+# save interrupt state
+ mfctl ipsw, arg0
+ stw arg0, IPSW_OFFSET(sp)
+
+ mfctl iir, arg0
+ stw arg0, IIR_OFFSET(sp)
+
+ mfctl ior, arg0
+ stw arg0, IOR_OFFSET(sp)
+
+ mfctl pcoq, arg0
+ stw arg0, PCOQFRONT_OFFSET(sp)
+
+ mtctl %r0, pcoq
+ mfctl pcoq, arg0
+ stw arg0, PCOQBACK_OFFSET(sp)
+
+ mfctl %sar, arg0
+ stw arg0, SAR_OFFSET(sp)
+
+# Prepare to re-enter virtual mode
+# We need Q in case the interrupt handler enables interrupts
+#
+
+ ldil L%CPU_PSW_DEFAULT, arg0
+ ldo R%CPU_PSW_DEFAULT(arg0), arg0
+ mtctl arg0, ipsw
+
+# Now jump to "rest_of_isr_handler" with the rfi
+# We are assuming the space queues are all correct already
+
+ ldil L%rest_of_isr_handler, arg0
+ ldo R%rest_of_isr_handler(arg0), arg0
+ mtctl arg0, pcoq
+ ldo 4(arg0), arg0
+ mtctl arg0, pcoq
+
+ rfi
+ nop
+
+# At this point we are back in virtual mode and all our
+# normal addressing is once again ok.
+
+rest_of_isr_handler:
+
+#
+# Build an interrupt frame to hold the contexts we will need.
+# We have already saved the interrupt items on the stack
+
+# At this point the following registers are damaged wrt the interrupt
+# reg current value saved value
+# ------------------------------------------------
+# arg0 scratch isr_arg0 (ctl)
+# r9 vector number isr_r9 (ctl)
+#
+# Point to beginning of integer context and
+# save the integer context
+ stw %r1,R1_OFFSET(sp)
+ stw %r2,R2_OFFSET(sp)
+ stw %r3,R3_OFFSET(sp)
+ stw %r4,R4_OFFSET(sp)
+ stw %r5,R5_OFFSET(sp)
+ stw %r6,R6_OFFSET(sp)
+ stw %r7,R7_OFFSET(sp)
+ stw %r8,R8_OFFSET(sp)
+ stw %r9,R9_OFFSET(sp)
+ stw %r10,R10_OFFSET(sp)
+ stw %r11,R11_OFFSET(sp)
+ stw %r12,R12_OFFSET(sp)
+ stw %r13,R13_OFFSET(sp)
+ stw %r14,R14_OFFSET(sp)
+ stw %r15,R15_OFFSET(sp)
+ stw %r16,R16_OFFSET(sp)
+ stw %r17,R17_OFFSET(sp)
+ stw %r18,R18_OFFSET(sp)
+ stw %r19,R19_OFFSET(sp)
+ stw %r20,R20_OFFSET(sp)
+ stw %r21,R21_OFFSET(sp)
+ stw %r22,R22_OFFSET(sp)
+ stw %r23,R23_OFFSET(sp)
+ stw %r24,R24_OFFSET(sp)
+ stw %r25,R25_OFFSET(sp)
+ stw %r26,R26_OFFSET(sp)
+ stw %r27,R27_OFFSET(sp)
+ stw %r28,R28_OFFSET(sp)
+ stw %r29,R29_OFFSET(sp)
+ stw %r30,R30_OFFSET(sp)
+ stw %r31,R31_OFFSET(sp)
+
+# Now most registers are available since they have been saved
+#
+# The following items are currently wrong in the integer context
+# reg current value saved value
+# ------------------------------------------------
+# arg0 scratch isr_arg0 (ctl)
+# r9 vector number isr_r9 (ctl)
+#
+# Fix them
+
+ mfctl isr_arg0,%r3
+ stw %r3,ARG0_OFFSET(sp)
+
+ mfctl isr_r9,%r3
+ stw %r3,R9_OFFSET(sp)
+
+#
+# At this point we are done with isr_arg0, and isr_r9 control registers
+#
+
+
+# Point to beginning of float context and
+# save the floating point context -- doing whatever patches are necessary
+ .call ARGW0=GR
+ bl _CPU_Save_float_context,%r2
+ ldo FP_CONTEXT_OFFSET(sp),arg0
+
+# save the ptr to interrupt frame as an argument for the interrupt handler
+ copy sp, arg1
+
+# Advance the frame to point beyond all interrupt contexts (integer & float)
+# this also includes the pad to align to 64byte stack boundary
+ ldo CPU_INTERRUPT_FRAME_SIZE(sp), sp
+
+# r3 -- &_ISR_Nest_level
+# r5 -- value _ISR_Nest_level
+# r4 -- &_Thread_Dispatch_disable_level
+# r6 -- value _Thread_Dispatch_disable_level
+# r9 -- vector number
+
+ .import _ISR_Nest_level,data
+ ldil L%_ISR_Nest_level,%r3
+ ldo R%_ISR_Nest_level(%r3),%r3
+ ldw 0(%r3),%r5
+
+ .import _Thread_Dispatch_disable_level,data
+ ldil L%_Thread_Dispatch_disable_level,%r4
+ ldo R%_Thread_Dispatch_disable_level(%r4),%r4
+ ldw 0(%r4),%r6
+
+# increment interrupt nest level counter. If outermost interrupt
+# switch the stack and squirrel away the previous sp.
+ addi 1,%r5,%r5
+ stw %r5, 0(%r3)
+
+# compute and save new stack (with frame)
+# just in case we are nested -- simpler this way
+ comibf,= 1,%r5,stack_done
+ ldo 128(sp),%r7
+
+#
+# Switch to interrupt stack allocated by the interrupt manager (intr.c)
+#
+ .import _CPU_Interrupt_stack_low,data
+ ldil L%_CPU_Interrupt_stack_low,%r7
+ ldw R%_CPU_Interrupt_stack_low(%r7),%r7
+ ldo 128(%r7),%r7
+
+stack_done:
+# save our current stack pointer where the "old sp" is supposed to be
+ stw sp, -4(%r7)
+# and switch stacks (or advance old stack in nested case)
+ copy %r7, sp
+
+# increment the dispatch disable level counter.
+ addi 1,%r6,%r6
+ stw %r6, 0(%r4)
+
+# load address of user handler
+ .import _ISR_Vector_table,data
+ ldil L%_ISR_Vector_table,%r8
+ ldo R%_ISR_Vector_table(%r8),%r8
+ ldwx,s %r9(%r8),%r8
+
+# invoke user interrupt handler
+# Interrupts are currently disabled, as per RTEMS convention
+# The handler has the option of re-enabling interrupts
+# NOTE: can not use 'bl' since it uses "pc-relative" addressing
+# and we are using a hard coded address from a table
+# So... we fudge r2 ourselves (ala dynacall)
+#
+ copy %r9, %r26
+ .call ARGW0=GR, ARGW1=GR
+ blr %r0, rp
+ bv,n 0(%r8)
+
+post_user_interrupt_handler:
+
+# Back from user handler(s)
+# Disable external interrupts (since the interrupt handler could
+# have turned them on) and return to the interrupted task stack (assuming
+# (_ISR_Nest_level == 0)
+
+ rsm HPPA_PSW_I, %r0
+ ldw -4(sp), sp
+
+# r3 -- &_ISR_Nest_level
+# r5 -- value _ISR_Nest_level
+# r4 -- &_Thread_Dispatch_disable_level
+# r6 -- value _Thread_Dispatch_disable_level
+
+ .import _ISR_Nest_level,data
+ ldil L%_ISR_Nest_level,%r3
+ ldo R%_ISR_Nest_level(%r3),%r3
+ ldw 0(%r3),%r5
+
+ .import _Thread_Dispatch_disable_level,data
+ ldil L%_Thread_Dispatch_disable_level,%r4
+ ldo R%_Thread_Dispatch_disable_level(%r4),%r4
+ ldw 0(%r4), %r6
+
+# decrement isr nest level
+ addi -1, %r5, %r5
+ stw %r5, 0(%r3)
+
+# decrement dispatch disable level counter and, if not 0, go on
+ addi -1,%r6,%r6
+ comibf,= 0,%r6,isr_restore
+ stw %r6, 0(%r4)
+
+# check whether or not a context switch is necessary
+ .import _Context_Switch_necessary,data
+ ldil L%_Context_Switch_necessary,%r8
+ ldw R%_Context_Switch_necessary(%r8),%r8
+ comibf,=,n 0,%r8,ISR_dispatch
+
+# check whether or not a context switch is necessary because an ISR
+# sent signals to the interrupted task
+ .import _ISR_Signals_to_thread_executing,data
+ ldil L%_ISR_Signals_to_thread_executing,%r8
+ ldw R%_ISR_Signals_to_thread_executing(%r8),%r8
+ comibt,=,n 0,%r8,isr_restore
+
+# OK, something happened while in ISR and we need to switch to a task
+# other than the one which was interrupted or the
+# ISR_Signals_to_thread_executing case
+# We also turn on interrupts, since the interrupted task had them
+# on (obviously :-) and Thread_Dispatch is happy to leave ints on.
+#
+
+ISR_dispatch:
+ ssm HPPA_PSW_I, %r0
+
+ .import _Thread_Dispatch,code
+ .call
+ bl _Thread_Dispatch,%r2
+ ldo 128(sp),sp
+
+ ldo -128(sp),sp
+
+ rsm HPPA_PSW_I, %r0
+
+isr_restore:
+
+# Get a pointer to beginning of our stack frame
+ ldo -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1
+
+# restore float
+ .call ARGW0=GR
+ bl _CPU_Restore_float_context,%r2
+ ldo FP_CONTEXT_OFFSET(%arg1), arg0
+
+ copy %arg1, %arg0
+
+# ********** FALL THRU **********
+
+# Jump here from bottom of Context_Switch
+# Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self
+# restore interrupt state
+#
+
+ .EXPORT _CPU_Context_restore
+_CPU_Context_restore:
+
+# Turn off Q & I so we can write pcoq
+ rsm HPPA_PSW_Q + HPPA_PSW_I, %r0
+
+ ldw IPSW_OFFSET(arg0), %r8
+ mtctl %r8, ipsw
+
+ ldw SAR_OFFSET(arg0), %r9
+ mtctl %r9, sar
+
+ ldw PCOQFRONT_OFFSET(arg0), %r10
+ mtctl %r10, pcoq
+
+ ldw PCOQBACK_OFFSET(arg0), %r11
+ mtctl %r11, pcoq
+
+#
+# restore integer state
+#
+ ldw R1_OFFSET(arg0),%r1
+ ldw R2_OFFSET(arg0),%r2
+ ldw R3_OFFSET(arg0),%r3
+ ldw R4_OFFSET(arg0),%r4
+ ldw R5_OFFSET(arg0),%r5
+ ldw R6_OFFSET(arg0),%r6
+ ldw R7_OFFSET(arg0),%r7
+ ldw R8_OFFSET(arg0),%r8
+ ldw R9_OFFSET(arg0),%r9
+ ldw R10_OFFSET(arg0),%r10
+ ldw R11_OFFSET(arg0),%r11
+ ldw R12_OFFSET(arg0),%r12
+ ldw R13_OFFSET(arg0),%r13
+ ldw R14_OFFSET(arg0),%r14
+ ldw R15_OFFSET(arg0),%r15
+ ldw R16_OFFSET(arg0),%r16
+ ldw R17_OFFSET(arg0),%r17
+ ldw R18_OFFSET(arg0),%r18
+ ldw R19_OFFSET(arg0),%r19
+ ldw R20_OFFSET(arg0),%r20
+ ldw R21_OFFSET(arg0),%r21
+ ldw R22_OFFSET(arg0),%r22
+ ldw R23_OFFSET(arg0),%r23
+ ldw R24_OFFSET(arg0),%r24
+ ldw R25_OFFSET(arg0),%r25
+# skipping r26 (aka arg0) until we are done with it
+ ldw R27_OFFSET(arg0),%r27
+ ldw R28_OFFSET(arg0),%r28
+ ldw R29_OFFSET(arg0),%r29
+ ldw R30_OFFSET(arg0),%r30
+ ldw R31_OFFSET(arg0),%r31
+
+# Must load r26 last since it is arg0
+ ldw R26_OFFSET(arg0),%r26
+
+isr_exit:
+ rfi
+ .EXIT
+ .PROCEND
+
+#
+# This section is used to context switch floating point registers.
+# Ref: 6-35 of Architecture 1.1
+#
+# NOTE: since integer multiply uses the floating point unit,
+# we have to save/restore fp on every trap. We cannot
+# just try to keep track of fp usage.
+
+ .align 32
+ .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0
+_CPU_Save_float_context:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ fstds,ma %fr0,8(%arg0)
+ fstds,ma %fr1,8(%arg0)
+ fstds,ma %fr2,8(%arg0)
+ fstds,ma %fr3,8(%arg0)
+ fstds,ma %fr4,8(%arg0)
+ fstds,ma %fr5,8(%arg0)
+ fstds,ma %fr6,8(%arg0)
+ fstds,ma %fr7,8(%arg0)
+ fstds,ma %fr8,8(%arg0)
+ fstds,ma %fr9,8(%arg0)
+ fstds,ma %fr10,8(%arg0)
+ fstds,ma %fr11,8(%arg0)
+ fstds,ma %fr12,8(%arg0)
+ fstds,ma %fr13,8(%arg0)
+ fstds,ma %fr14,8(%arg0)
+ fstds,ma %fr15,8(%arg0)
+ fstds,ma %fr16,8(%arg0)
+ fstds,ma %fr17,8(%arg0)
+ fstds,ma %fr18,8(%arg0)
+ fstds,ma %fr19,8(%arg0)
+ fstds,ma %fr20,8(%arg0)
+ fstds,ma %fr21,8(%arg0)
+ fstds,ma %fr22,8(%arg0)
+ fstds,ma %fr23,8(%arg0)
+ fstds,ma %fr24,8(%arg0)
+ fstds,ma %fr25,8(%arg0)
+ fstds,ma %fr26,8(%arg0)
+ fstds,ma %fr27,8(%arg0)
+ fstds,ma %fr28,8(%arg0)
+ fstds,ma %fr29,8(%arg0)
+ fstds,ma %fr30,8(%arg0)
+ fstds %fr31,0(%arg0)
+ bv 0(%r2)
+ addi -(31*8), %arg0, %arg0 ; restore arg0 just for fun
+ .EXIT
+ .PROCEND
+
+ .align 32
+ .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0
+_CPU_Restore_float_context:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ addi (31*8), %arg0, %arg0 ; point at last double
+ fldds 0(%arg0),%fr31
+ fldds,mb -8(%arg0),%fr30
+ fldds,mb -8(%arg0),%fr29
+ fldds,mb -8(%arg0),%fr28
+ fldds,mb -8(%arg0),%fr27
+ fldds,mb -8(%arg0),%fr26
+ fldds,mb -8(%arg0),%fr25
+ fldds,mb -8(%arg0),%fr24
+ fldds,mb -8(%arg0),%fr23
+ fldds,mb -8(%arg0),%fr22
+ fldds,mb -8(%arg0),%fr21
+ fldds,mb -8(%arg0),%fr20
+ fldds,mb -8(%arg0),%fr19
+ fldds,mb -8(%arg0),%fr18
+ fldds,mb -8(%arg0),%fr17
+ fldds,mb -8(%arg0),%fr16
+ fldds,mb -8(%arg0),%fr15
+ fldds,mb -8(%arg0),%fr14
+ fldds,mb -8(%arg0),%fr13
+ fldds,mb -8(%arg0),%fr12
+ fldds,mb -8(%arg0),%fr11
+ fldds,mb -8(%arg0),%fr10
+ fldds,mb -8(%arg0),%fr9
+ fldds,mb -8(%arg0),%fr8
+ fldds,mb -8(%arg0),%fr7
+ fldds,mb -8(%arg0),%fr6
+ fldds,mb -8(%arg0),%fr5
+ fldds,mb -8(%arg0),%fr4
+ fldds,mb -8(%arg0),%fr3
+ fldds,mb -8(%arg0),%fr2
+ fldds,mb -8(%arg0),%fr1
+ bv 0(%r2)
+ fldds,mb -8(%arg0),%fr0
+ .EXIT
+ .PROCEND
+
+#
+# These 2 small routines are unused right now.
+# Normally we just go thru _CPU_Save_float_context (and Restore)
+#
+# Here we just deref the ptr and jump up, letting _CPU_Save_float_context
+# do the return for us.
+#
+ .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0
+_CPU_Context_save_fp:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ bl _CPU_Save_float_context, %r0
+ ldw 0(%arg0), %arg0
+ .EXIT
+ .PROCEND
+
+ .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0
+_CPU_Context_restore_fp:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ bl _CPU_Restore_float_context, %r0
+ ldw 0(%arg0), %arg0
+ .EXIT
+ .PROCEND
+
+
+# void _CPU_Context_switch( run_context, heir_context )
+#
+# This routine performs a normal non-FP context switch.
+#
+
+ .align 32
+ .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR
+_CPU_Context_switch:
+ .PROC
+ .CALLINFO FRAME=64
+ .ENTRY
+
+# Save the integer context
+ stw %r1,R1_OFFSET(arg0)
+ stw %r2,R2_OFFSET(arg0)
+ stw %r3,R3_OFFSET(arg0)
+ stw %r4,R4_OFFSET(arg0)
+ stw %r5,R5_OFFSET(arg0)
+ stw %r6,R6_OFFSET(arg0)
+ stw %r7,R7_OFFSET(arg0)
+ stw %r8,R8_OFFSET(arg0)
+ stw %r9,R9_OFFSET(arg0)
+ stw %r10,R10_OFFSET(arg0)
+ stw %r11,R11_OFFSET(arg0)
+ stw %r12,R12_OFFSET(arg0)
+ stw %r13,R13_OFFSET(arg0)
+ stw %r14,R14_OFFSET(arg0)
+ stw %r15,R15_OFFSET(arg0)
+ stw %r16,R16_OFFSET(arg0)
+ stw %r17,R17_OFFSET(arg0)
+ stw %r18,R18_OFFSET(arg0)
+ stw %r19,R19_OFFSET(arg0)
+ stw %r20,R20_OFFSET(arg0)
+ stw %r21,R21_OFFSET(arg0)
+ stw %r22,R22_OFFSET(arg0)
+ stw %r23,R23_OFFSET(arg0)
+ stw %r24,R24_OFFSET(arg0)
+ stw %r25,R25_OFFSET(arg0)
+ stw %r26,R26_OFFSET(arg0)
+ stw %r27,R27_OFFSET(arg0)
+ stw %r28,R28_OFFSET(arg0)
+ stw %r29,R29_OFFSET(arg0)
+ stw %r30,R30_OFFSET(arg0)
+ stw %r31,R31_OFFSET(arg0)
+
+# fill in interrupt context section
+ stw %r2, PCOQFRONT_OFFSET(%arg0)
+ ldo 4(%r2), %r2
+ stw %r2, PCOQBACK_OFFSET(%arg0)
+
+# Generate a suitable IPSW by using the system default psw
+# with the current low bits added in.
+
+ ldil L%CPU_PSW_DEFAULT, %r2
+ ldo R%CPU_PSW_DEFAULT(%r2), %r2
+ ssm 0, %arg2
+ dep %arg2, 31, 8, %r2
+ stw %r2, IPSW_OFFSET(%arg0)
+
+# at this point, the running task context is completely saved
+# Now jump to the bottom of the interrupt handler to load the
+# heirs context
+
+ b _CPU_Context_restore
+ copy %arg1, %arg0
+
+ .EXIT
+ .PROCEND
+
+
+/*
+ * Find first bit
+ * NOTE:
+ * This is used (and written) only for the ready chain code and
+ * priority bit maps.
+ * Any other use constitutes fraud.
+ * Returns first bit from the least significant side.
+ * Eg: if input is 0x8001
+ * output will indicate the '1' bit and return 0.
+ * This is counter to HPPA bit numbering which calls this
+ * bit 31. This way simplifies the macros _CPU_Priority_Mask
+ * and _CPU_Priority_Bits_index.
+ *
+ * NOTE:
+ * We just use 16 bit version
+ * does not handle zero case
+ *
+ * Based on the UTAH Mach libc version of ffs.
+ */
+
+ .align 32
+ .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR
+hppa_rtems_ffs:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+#ifdef RETURN_ERROR_ON_ZERO
+ comb,= %arg0,%r0,ffsdone ; If arg0 is 0
+ ldi -1,%ret0 ; return -1
+#endif
+
+#if BITFIELD_SIZE == 32
+ ldi 31,%ret0 ; Set return to high bit
+ extru,= %arg0,31,16,%r0 ; If low 16 bits are non-zero
+ addi,tr -16,%ret0,%ret0 ; subtract 16 from bitpos
+ shd %r0,%arg0,16,%arg0 ; else shift right 16 bits
+#else
+ ldi 15,%ret0 ; Set return to high bit
+#endif
+ extru,= %arg0,31,8,%r0 ; If low 8 bits are non-zero
+ addi,tr -8,%ret0,%ret0 ; subtract 8 from bitpos
+ shd %r0,%arg0,8,%arg0 ; else shift right 8 bits
+ extru,= %arg0,31,4,%r0 ; If low 4 bits are non-zero
+ addi,tr -4,%ret0,%ret0 ; subtract 4 from bitpos
+ shd %r0,%arg0,4,%arg0 ; else shift right 4 bits
+ extru,= %arg0,31,2,%r0 ; If low 2 bits are non-zero
+ addi,tr -2,%ret0,%ret0 ; subtract 2 from bitpos
+ shd %r0,%arg0,2,%arg0 ; else shift right 2 bits
+ extru,= %arg0,31,1,%r0 ; If low bit is non-zero
+ addi -1,%ret0,%ret0 ; subtract 1 from bitpos
+ffsdone:
+ bv,n 0(%r2)
+ nop
+ .EXIT
+ .PROCEND
diff --git a/c/src/exec/score/cpu/hppa1.1/hppa.h b/c/src/exec/score/cpu/hppa1.1/hppa.h
new file mode 100644
index 0000000000..8829bb8c87
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/hppa.h
@@ -0,0 +1,704 @@
+/*
+ * @(#)hppa.h 1.5 - 95/04/25
+ *
+ *
+ * File: $RCSfile$
+ * Project: PixelFlow
+ * Created: 94/10/4
+ * RespEngr: tony bennett
+ * Revision: $Revision$
+ * Last Mod: $Date$
+ *
+ * Description:
+ *
+ * Definitions for HP PA Risc
+ * ref: PA RISC 1.1 Architecture and Instruction Set Reference Manual
+ *
+ * COPYRIGHT (c) 1994 by Division Incorporated
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of Division Incorporated not be
+ * used in advertising or publicity pertaining to distribution
+ * of the software without specific, written prior permission.
+ * Division Incorporated makes no representations about the
+ * suitability of this software for any purpose.
+ *
+ *
+ * Note:
+ * This file is included by both C and assembler code ( -DASM )
+ *
+ * $Id$
+ */
+
+#ifndef _INCLUDE_HPPA_H
+#define _INCLUDE_HPPA_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#define hppa1_1
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This section contains the information required to build
+ * RTEMS for a particular member of the Hewlett Packard
+ * PA-RISC family. It does this by setting variables to
+ * indicate which implementation dependent features are
+ * present in a particular member of the family.
+ */
+
+#if defined(hppa7100)
+
+#define RTEMS_MODEL_NAME "hppa 7100"
+
+#elif defined(hppa7200)
+
+#define RTEMS_MODEL_NAME "hppa 7200"
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "HP PA-RISC 1.1"
+
+#ifndef ASM
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* 8-bit unsigned integer */
+typedef unsigned short unsigned16; /* 16-bit unsigned integer */
+typedef unsigned int unsigned32; /* 32-bit unsigned integer */
+typedef unsigned long long unsigned64; /* 64-bit unsigned integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef char signed8; /* 8-bit signed integer */
+typedef short signed16; /* 16-bit signed integer */
+typedef int signed32; /* 32-bit signed integer */
+typedef long long signed64; /* 64 bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+#endif /* !ASM */
+
+
+/*
+ * Processor Status Word (PSW) Masks
+ */
+
+#define HPPA_PSW_Y 0x80000000 /* Data Debug Trap Disable */
+#define HPPA_PSW_Z 0x40000000 /* Instruction Debug Trap Disable */
+#define HPPA_PSW_r2 0x20000000 /* reserved */
+#define HPPA_PSW_r3 0x10000000 /* reserved */
+#define HPPA_PSW_r4 0x08000000 /* reserved */
+#define HPPA_PSW_E 0x04000000 /* Little Endian on Memory References */
+#define HPPA_PSW_S 0x02000000 /* Secure Interval Timer */
+#define HPPA_PSW_T 0x01000000 /* Taken Branch Trap Enable */
+#define HPPA_PSW_H 0x00800000 /* Higher-Privilege Transfer Trap Enable*/
+#define HPPA_PSW_L 0x00400000 /* Lower-Privilege Transfer Trap Enable */
+#define HPPA_PSW_N 0x00200000 /* PC Queue Front Instruction Nullified */
+#define HPPA_PSW_X 0x00100000 /* Data Memory Break Disable */
+#define HPPA_PSW_B 0x00080000 /* Taken Branch in Previous Cycle */
+#define HPPA_PSW_C 0x00040000 /* Code Address Translation Enable */
+#define HPPA_PSW_V 0x00020000 /* Divide Step Correction */
+#define HPPA_PSW_M 0x00010000 /* High-Priority Machine Check Disable */
+#define HPPA_PSW_CB 0x0000ff00 /* Carry/Borrow Bits */
+#define HPPA_PSW_r24 0x00000080 /* reserved */
+#define HPPA_PSW_G 0x00000040 /* Debug trap Enable */
+#define HPPA_PSW_F 0x00000020 /* Performance monitor interrupt unmask */
+#define HPPA_PSW_R 0x00000010 /* Recovery Counter Enable */
+#define HPPA_PSW_Q 0x00000008 /* Interruption State Collection Enable */
+#define HPPA_PSW_P 0x00000004 /* Protection ID Validation Enable */
+#define HPPA_PSW_D 0x00000002 /* Data Address Translation Enable */
+#define HPPA_PSW_I 0x00000001 /* External, Power Failure, */
+ /* Low-Priority Machine Check */
+ /* Interruption Enable */
+
+/*
+ * HPPA traps and interrupts
+ * basic layout. Note numbers do not denote priority
+ *
+ * 0-31 basic traps and interrupts defined by HPPA architecture
+ * 32-63 32 external interrupts
+ * 64-... bsp defined
+ */
+
+#define HPPA_INTERRUPT_NON_EXISTENT 0
+/* group 1 */
+#define HPPA_INTERRUPT_HIGH_PRIORITY_MACHINE_CHECK 1
+/* group 2 */
+#define HPPA_INTERRUPT_POWER_FAIL 2
+#define HPPA_INTERRUPT_RECOVERY_COUNTER 3
+#define HPPA_INTERRUPT_EXTERNAL_INTERRUPT 4
+#define HPPA_INTERRUPT_LOW_PRIORITY_MACHINE_CHECK 5
+#define HPPA_INTERRUPT_PERFORMANCE_MONITOR 29
+/* group 3 */
+#define HPPA_INTERRUPT_INSTRUCTION_TLB_MISS 6
+#define HPPA_INTERRUPT_INSTRUCTION_MEMORY_PROTECTION 7
+#define HPPA_INTERRUPT_INSTRUCTION_DEBUG 30
+#define HPPA_INTERRUPT_ILLEGAL_INSTRUCTION 8
+#define HPPA_INTERRUPT_BREAK_INSTRUCTION 9
+#define HPPA_INTERRUPT_PRIVILEGED_OPERATION 10
+#define HPPA_INTERRUPT_PRIVILEGED_REGISTER 11
+#define HPPA_INTERRUPT_OVERFLOW 12
+#define HPPA_INTERRUPT_CONDITIONAL 13
+#define HPPA_INTERRUPT_ASSIST_EXCEPTION 14
+#define HPPA_INTERRUPT_DATA_TLB_MISS 15
+#define HPPA_INTERRUPT_NON_ACCESS_INSTRUCTION_TLB_MISS 16
+#define HPPA_INTERRUPT_NON_ACCESS_DATA_TLB_MISS 17
+#define HPPA_INTERRUPT_DATA_MEMORY_ACCESS_RIGHTS 26
+#define HPPA_INTERRUPT_DATA_MEMORY_PROTECTION_ID 27
+#define HPPA_INTERRUPT_UNALIGNED_DATA_REFERENCE 28
+#define HPPA_INTERRUPT_DATA_MEMORY_PROTECTION 18
+#define HPPA_INTERRUPT_DATA_MEMORY_BREAK 19
+#define HPPA_INTERRUPT_TLB_DIRTY_BIT 20
+#define HPPA_INTERRUPT_PAGE_REFERENCE 21
+#define HPPA_INTERRUPT_DATA_DEBUG 31
+#define HPPA_INTERRUPT_ASSIST_EMULATION 22
+/* group 4 */
+#define HPPA_INTERRUPT_HIGHER_PRIVILEGE_TRANSFER 23
+#define HPPA_INTERRUPT_LOWER_PRIVILEGE_TRANSFER 24
+#define HPPA_INTERRUPT_TAKEN_BRANCH 25
+
+#define HPPA_INTERRUPT_ON_CHIP_MAX 31
+
+/* External Interrupts via interrupt 4 */
+
+#define HPPA_INTERRUPT_EXTERNAL_BASE 32
+
+#define HPPA_INTERRUPT_EXTERNAL_0 32
+#define HPPA_INTERRUPT_EXTERNAL_1 33
+#define HPPA_INTERRUPT_EXTERNAL_2 34
+#define HPPA_INTERRUPT_EXTERNAL_3 35
+#define HPPA_INTERRUPT_EXTERNAL_4 36
+#define HPPA_INTERRUPT_EXTERNAL_5 37
+#define HPPA_INTERRUPT_EXTERNAL_6 38
+#define HPPA_INTERRUPT_EXTERNAL_7 39
+#define HPPA_INTERRUPT_EXTERNAL_8 40
+#define HPPA_INTERRUPT_EXTERNAL_9 41
+#define HPPA_INTERRUPT_EXTERNAL_10 42
+#define HPPA_INTERRUPT_EXTERNAL_11 43
+#define HPPA_INTERRUPT_EXTERNAL_12 44
+#define HPPA_INTERRUPT_EXTERNAL_13 45
+#define HPPA_INTERRUPT_EXTERNAL_14 46
+#define HPPA_INTERRUPT_EXTERNAL_15 47
+#define HPPA_INTERRUPT_EXTERNAL_16 48
+#define HPPA_INTERRUPT_EXTERNAL_17 49
+#define HPPA_INTERRUPT_EXTERNAL_18 50
+#define HPPA_INTERRUPT_EXTERNAL_19 51
+#define HPPA_INTERRUPT_EXTERNAL_20 52
+#define HPPA_INTERRUPT_EXTERNAL_21 53
+#define HPPA_INTERRUPT_EXTERNAL_22 54
+#define HPPA_INTERRUPT_EXTERNAL_23 55
+#define HPPA_INTERRUPT_EXTERNAL_24 56
+#define HPPA_INTERRUPT_EXTERNAL_25 57
+#define HPPA_INTERRUPT_EXTERNAL_26 58
+#define HPPA_INTERRUPT_EXTERNAL_27 59
+#define HPPA_INTERRUPT_EXTERNAL_28 60
+#define HPPA_INTERRUPT_EXTERNAL_29 61
+#define HPPA_INTERRUPT_EXTERNAL_30 62
+#define HPPA_INTERRUPT_EXTERNAL_31 63
+
+#define HPPA_INTERRUPT_EXTERNAL_INTERVAL_TIMER HPPA_INTERRUPT_EXTERNAL_0
+#define HPPA_EXTERNAL_INTERRUPTS 32
+
+/* BSP defined interrupts begin here */
+
+#define HPPA_INTERRUPT_MAX 64
+
+/*
+ * Inline macros for misc. interesting opcodes
+ */
+
+/* generate a global label */
+#define HPPA_ASM_LABEL(label) \
+ asm(".export " label ", ! .label " label);
+
+/* Return From Interrupt RFI */
+#define HPPA_ASM_RFI() asm volatile ("rfi")
+
+/* Set System Mask SSM i,t */
+#define HPPA_ASM_SSM(i,gr) asm volatile ("ssm %1, %0" \
+ : "=r" (gr) \
+ : "i" (i))
+/* Reset System Mask RSM i,t */
+#define HPPA_ASM_RSM(i,gr) asm volatile ("rsm %1, %0" \
+ : "=r" (gr) \
+ : "i" (i))
+/* Move To System Mask MTSM r */
+#define HPPA_ASM_MTSM(gr) asm volatile ("mtsm %0" \
+ : : "r" (gr))
+
+/* Load Space Identifier LDSID (s,b),t */
+#define HPPA_ASM_LDSID(sr,grb,grt) asm volatile ("ldsid (%1,%2),%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (grb))
+
+/*
+ * Gcc extended asm doesn't really allow for treatment of space registers
+ * as "registers", so we have to use "i" format.
+ * Unfortunately this means that the "=" constraint is not available.
+ */
+
+/* Move To Space Register MTSP r,sr */
+#define HPPA_ASM_MTSP(gr,sr) asm volatile ("mtsp %1,%0" \
+ : : "i" (sr), \
+ "r" (gr))
+
+/* Move From Space Register MFSP sr,t */
+#define HPPA_ASM_MFSP(sr,gr) asm volatile ("mfsp %1,%0" \
+ : "=r" (gr) \
+ : "i" (sr))
+
+/* Move To Control register MTCTL r,t */
+#define HPPA_ASM_MTCTL(gr,cr) asm volatile ("mtctl %1,%0" \
+ : : "i" (cr), \
+ "r" (gr))
+
+/* Move From Control register MFCTL r,t */
+#define HPPA_ASM_MFCTL(cr,gr) asm volatile ("mfctl %1,%0" \
+ : "=r" (gr) \
+ : "i" (cr))
+
+/* Synchronize caches SYNC */
+#define HPPA_ASM_SYNC() asm volatile ("sync")
+
+/* Probe Read Access PROBER (s,b),r,t */
+#define HPPA_ASM_PROBER(sr,groff,gracc,grt) \
+ asm volatile ("prober (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "r" (gracc))
+
+/* Probe Read Access Immediate PROBERI (s,b),i,t*/
+#define HPPA_ASM_PROBERI(sr,groff,iacc,grt) \
+ asm volatile ("proberi (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "i" (iacc))
+
+/* Probe Write Access PROBEW (s,b),r,t */
+#define HPPA_ASM_PROBEW(sr,groff,gracc,grt) \
+ asm volatile ("probew (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "r" (gracc))
+
+/* Probe Write Access Immediate PROBEWI (s,b),i,t */
+#define HPPA_ASM_PROBEWI(sr,groff,iacc,grt) \
+ asm volatile ("probewi (%1,%2),%3,%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (groff), \
+ "i" (iacc))
+
+/* Load Physical Address LPA x(s,b),t */
+#define HPPA_ASM_LPA(sr,grb,grt) asm volatile ("lpa %%r0(%1,%2),%0" \
+ : "=r" (grt) \
+ : "i" (sr), \
+ "r" (grb))
+
+/* Load Coherence Index LCI x(s,b),t */
+/* AKA: Load Hash Address LHA x(s,b),t */
+#define HPPA_ASM_LCI(grx,sr,grb,grt) asm volatile ("lha %1(%2,%3),%0" \
+ : "=r" (grt) \
+ : "r" (grx),\
+ "i" (sr), \
+ "r" (grb))
+#define HPPA_ASM_LHA(grx,sr,grb,grt) HPPA_ASM_LCI(grx,sr,grb,grt)
+
+/* Purge Data Tlb PDTLB x(s,b) */
+#define HPPA_ASM_PDTLB(grx,sr,grb) asm volatile ("pdtlb %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Instruction Tlb PITLB x(s,b) */
+#define HPPA_ASM_PITLB(grx,sr,grb) asm volatile ("pitlb %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Data Tlb Entry PDTLBE x(s,b) */
+#define HPPA_ASM_PDTLBE(grx,sr,grb) asm volatile ("pdtlbe %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Instruction Tlb Entry PITLBE x(s,b) */
+#define HPPA_ASM_PITLBE(grx,sr,grb) asm volatile ("pitlbe %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+
+/* Insert Data TLB Address IDTLBA r,(s,b) */
+#define HPPA_ASM_IDTLBA(gr,sr,grb) asm volatile ("idtlba %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Insert Instruction TLB Address IITLBA r,(s,b) */
+#define HPPA_ASM_IITLBA(gr,sr,grb) asm volatile ("iitlba %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Insert Data TLB Protection IDTLBP r,(s,b) */
+#define HPPA_ASM_IDTLBP(gr,sr,grb) asm volatile ("idtlbp %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Insert Instruction TLB Protection IITLBP r,(s,b) */
+#define HPPA_ASM_IITLBP(gr,sr,grb) asm volatile ("iitlbp %0,(%1,%2)" \
+ : : "r" (gr), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Purge Data Cache PDC x(s,b) */
+#define HPPA_ASM_PDC(grx,sr,grb) asm volatile ("pdc %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Data Cache FDC x(s,b) */
+#define HPPA_ASM_FDC(grx,sr,grb) asm volatile ("fdc %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Instruction Cache FDC x(s,b) */
+#define HPPA_ASM_FIC(grx,sr,grb) asm volatile ("fic %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Data Cache Entry FDCE x(s,b) */
+#define HPPA_ASM_FDCE(grx,sr,grb) asm volatile ("fdce %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Flush Instruction Cache Entry FICE x(s,b) */
+#define HPPA_ASM_FICE(grx,sr,grb) asm volatile ("fice %0(%1,%2)" \
+ : : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Break BREAK i5,i13 */
+#define HPPA_ASM_BREAK(i5,i13) asm volatile ("break %0,%1" \
+ : : "i" (i5), \
+ "i" (i13))
+
+/* Load and Clear Word Short LDCWS d(s,b),t */
+#define HPPA_ASM_LDCWS(i,sr,grb,grt) asm volatile ("ldcws %1(%2,%3),%0" \
+ : "=r" (grt) \
+ : "i" (i), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Load and Clear Word Indexed LDCWX x(s,b),t */
+#define HPPA_ASM_LDCWX(grx,sr,grb,grt) asm volatile ("ldcwx %1(%2,%3),%0" \
+ : "=r" (grt) \
+ : "r" (grx), \
+ "i" (sr), \
+ "r" (grb))
+
+/* Load Word Absolute Short LDWAS d(b),t */
+/* NOTE: "short" here means "short displacement" */
+#define HPPA_ASM_LDWAS(disp,grbase,gr) asm volatile("ldwas %1(%2),%0" \
+ : "=r" (gr) \
+ : "i" (disp), \
+ "r" (grbase))
+
+/* Store Word Absolute Short STWAS r,d(b) */
+/* NOTE: "short" here means "short displacement" */
+#define HPPA_ASM_STWAS(gr,disp,grbase) asm volatile("stwas %0,%1(%2)" \
+ : : "r" (gr), \
+ "i" (disp), \
+ "r" (grbase))
+
+/*
+ * Swap bytes
+ * REFERENCE: PA72000 TRM -- Appendix C
+ */
+#define HPPA_ASM_SWAPBYTES(value, swapped) asm volatile( \
+ " shd %1,%1,16,%0 \n\
+ dep %0,15,8,%0 \n\
+ shd %1,%0,8,%0" \
+ : "=r" (swapped) \
+ : "r" (value) \
+ )
+
+
+/* 72000 Diagnose instructions follow
+ * These macros assume gas knows about these instructions.
+ * gas2.2.u1 did not.
+ * I added them to my copy and installed it locally.
+ *
+ * There are *very* special requirements for these guys
+ * ref: TRM 6.1.3 Programming Constraints
+ *
+ * The macros below handle the following rules
+ *
+ * Except for WIT, WDT, WDD, WIDO, WIDE, all DIAGNOSE must be doubled.
+ * Must never be nullified (hence the leading nop)
+ * NOP must preced every RDD,RDT,WDD,WDT,RDTLB
+ * Instruction preceeding GR_SHDW must not set any of the GR's saved
+ *
+ * The macros do *NOT* deal with the following problems
+ * doubled DIAGNOSE instructions must not straddle a page boundary
+ * if code translation enabled. (since 2nd could trap on ITLB)
+ * If you care about DHIT and DPE bits of DR0, then
+ * No store instruction in the 2 insn window before RDD
+ */
+
+
+/* Move To CPU/DIAG register MTCPU r,t */
+#define HPPA_ASM_MTCPU(gr,dr) asm volatile (" nop \n" \
+ " mtcpu %1,%0 \n" \
+ " mtcpu %1,%0" \
+ : : "i" (dr), \
+ "r" (gr))
+
+/* Move From CPU/DIAG register MFCPU r,t */
+#define HPPA_ASM_MFCPU(dr,gr) asm volatile (" nop \n" \
+ " mfcpu %1,%0\n" \
+ " mfcpu %1,%0" \
+ : "=r" (gr) \
+ : "i" (dr))
+
+/* Transfer of Control Enable TOC_EN */
+#define HPPA_ASM_TOC_EN() asm volatile (" tocen \n" \
+ " tocen")
+
+/* Transfer of Control Disable TOC_DIS */
+#define HPPA_ASM_TOC_DIS() asm volatile (" tocdis \n" \
+ " tocdis")
+
+/* Shadow Registers to General Register SHDW_GR */
+#define HPPA_ASM_SHDW_GR() asm volatile (" shdwgr \n" \
+ " shdwgr" \
+ ::: "r1" "r8" "r9" "r16" \
+ "r17" "r24" "r25")
+
+/* General Registers to Shadow Register GR_SHDW */
+#define HPPA_ASM_GR_SHDW() asm volatile (" nop \n" \
+ " grshdw \n" \
+ " grshdw")
+
+/*
+ * Definitions of special registers for use by the above macros.
+ */
+
+/* Hardware Space Registers */
+#define SR0 0
+#define SR1 1
+#define SR2 2
+#define SR3 3
+#define SR4 4
+#define SR5 5
+#define SR6 6
+#define SR7 7
+
+/* Hardware Control Registers */
+#define CR0 0
+#define RCTR 0 /* Recovery Counter Register */
+
+#define CR8 8 /* Protection ID 1 */
+#define PIDR1 8
+
+#define CR9 9 /* Protection ID 2 */
+#define PIDR2 9
+
+#define CR10 10
+#define CCR 10 /* Coprocessor Confiquration Register */
+
+#define CR11 11
+#define SAR 11 /* Shift Amount Register */
+
+#define CR12 12
+#define PIDR3 12 /* Protection ID 3 */
+
+#define CR13 13
+#define PIDR4 13 /* Protection ID 4 */
+
+#define CR14 14
+#define IVA 14 /* Interrupt Vector Address */
+
+#define CR15 15
+#define EIEM 15 /* External Interrupt Enable Mask */
+
+#define CR16 16
+#define ITMR 16 /* Interval Timer */
+
+#define CR17 17
+#define PCSQ 17 /* Program Counter Space queue */
+
+#define CR18 18
+#define PCOQ 18 /* Program Counter Offset queue */
+
+#define CR19 19
+#define IIR 19 /* Interruption Instruction Register */
+
+#define CR20 20
+#define ISR 20 /* Interruption Space Register */
+
+#define CR21 21
+#define IOR 21 /* Interruption Offset Register */
+
+#define CR22 22
+#define IPSW 22 /* Interrpution Processor Status Word */
+
+#define CR23 23
+#define EIRR 23 /* External Interrupt Request */
+
+#define CR24 24
+#define PPDA 24 /* Physcial Page Directory Address */
+#define TR0 24 /* Temporary register 0 */
+
+#define CR25 25
+#define HTA 25 /* Hash Table Address */
+#define TR1 25 /* Temporary register 1 */
+
+#define CR26 26
+#define TR2 26 /* Temporary register 2 */
+
+#define CR27 27
+#define TR3 27 /* Temporary register 3 */
+
+#define CR28 28
+#define TR4 28 /* Temporary register 4 */
+
+#define CR29 29
+#define TR5 29 /* Temporary register 5 */
+
+#define CR30 30
+#define TR6 30 /* Temporary register 6 */
+
+#define CR31 31
+#define CPUID 31 /* MP identifier */
+
+/*
+ * Diagnose registers
+ */
+
+#define DR0 0
+#define DR1 1
+#define DR8 8
+#define DR24 24
+#define DR25 25
+
+/*
+ * Tear apart a break instruction to find its type.
+ */
+#define HPPA_BREAK5(x) ((x) & 0x1F)
+#define HPPA_BREAK13(x) (((x) >> 13) & 0x1FFF)
+
+/* assemble a break instruction */
+#define HPPA_BREAK(i5,i13) (((i5) & 0x1F) | (((i13) & 0x1FFF) << 13))
+
+
+#ifndef ASM
+
+/*
+ * static inline utility functions to get at control registers
+ */
+
+#define EMIT_GET_CONTROL(name, reg) \
+static __inline__ unsigned int \
+get_ ## name (void) \
+{ \
+ unsigned int value; \
+ HPPA_ASM_MFCTL(reg, value); \
+ return value; \
+}
+
+#define EMIT_SET_CONTROL(name, reg) \
+static __inline__ unsigned int \
+set_ ## name (unsigned int new_value) \
+{ \
+ HPPA_ASM_MTCTL(new_value, reg); \
+}
+
+#define EMIT_CONTROLS(name, reg) \
+ EMIT_GET_CONTROL(name, reg) \
+ EMIT_SET_CONTROL(name, reg)
+
+EMIT_CONTROLS(recovery, RCTR); /* CR0 */
+EMIT_CONTROLS(pid1, PIDR1); /* CR8 */
+EMIT_CONTROLS(pid2, PIDR2); /* CR9 */
+EMIT_CONTROLS(ccr, CCR); /* CR10; CCR and SCR share CR10 */
+EMIT_CONTROLS(scr, CCR); /* CR10; CCR and SCR share CR10 */
+EMIT_CONTROLS(sar, SAR); /* CR11 */
+EMIT_CONTROLS(pid3, PIDR3); /* CR12 */
+EMIT_CONTROLS(pid4, PIDR4); /* CR13 */
+EMIT_CONTROLS(iva, IVA); /* CR14 */
+EMIT_CONTROLS(eiem, EIEM); /* CR15 */
+EMIT_CONTROLS(itimer, ITMR); /* CR16 */
+EMIT_CONTROLS(pcsq, PCSQ); /* CR17 */
+EMIT_CONTROLS(pcoq, PCOQ); /* CR18 */
+EMIT_CONTROLS(iir, IIR); /* CR19 */
+EMIT_CONTROLS(isr, ISR); /* CR20 */
+EMIT_CONTROLS(ior, IOR); /* CR21 */
+EMIT_CONTROLS(ipsw, IPSW); /* CR22 */
+EMIT_CONTROLS(eirr, EIRR); /* CR23 */
+EMIT_CONTROLS(tr0, TR0); /* CR24 */
+EMIT_CONTROLS(tr1, TR1); /* CR25 */
+EMIT_CONTROLS(tr2, TR2); /* CR26 */
+EMIT_CONTROLS(tr3, TR3); /* CR27 */
+EMIT_CONTROLS(tr4, TR4); /* CR28 */
+EMIT_CONTROLS(tr5, TR5); /* CR29 */
+EMIT_CONTROLS(tr6, TR6); /* CR30 */
+EMIT_CONTROLS(tr7, CR31); /* CR31 */
+
+/*
+ * If and How to invoke the debugger (a ROM debugger generally)
+ */
+
+#ifdef SIMHPPA_ROM
+/* invoke the pflow debugger */
+#define CPU_INVOKE_DEBUGGER \
+ do { \
+ extern void debugger_break(void); \
+ debugger_break(); \
+ } while (0)
+#endif
+
+
+#endif /* ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! _INCLUDE_HPPA_H */
+
diff --git a/c/src/exec/score/cpu/hppa1.1/rtems.s b/c/src/exec/score/cpu/hppa1.1/rtems.s
new file mode 100644
index 0000000000..dd215c9af0
--- /dev/null
+++ b/c/src/exec/score/cpu/hppa1.1/rtems.s
@@ -0,0 +1,53 @@
+/* rtems.S
+ *
+ * This file contains the single entry point code for
+ * the HPPA implementation of RTEMS.
+ *
+ * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
+ * On-Line Applications Research Corporation (OAR).
+ * All rights assigned to U.S. Government, 1994.
+ *
+ * This material may be reproduced by or for the U.S. Government pursuant
+ * to the copyright license under the clause at DFARS 252.227-7013. This
+ * notice must appear in all copies of this file and its derivatives.
+ *
+ * $Id$
+ */
+
+#include <hppa.h>
+#include <rtems/cpu_asm.h>
+
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+ .align 32
+ .EXPORT cpu_jump_to_directive,ENTRY,PRIV_LEV=0
+cpu_jump_to_directive
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+# invoke user interrupt handler
+
+# XXX: look at register usage and code
+# XXX: this is not necessarily right!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# r9 = directive number
+
+ .import _Entry_points,data
+ ldil L%_Entry_points,%r8
+ ldo R%_Entry_points(%r8),%r8
+ ldwx,s %r9(%r8),%r8
+
+ .call ARGW0=GR
+ bv,n 0(%r8)
+ nop
+
+ .EXIT
+ .PROCEND
+