From f198c63d6a1d172aef422353e42b41f8cb128275 Mon Sep 17 00:00:00 2001 From: Joel Sherrill Date: Fri, 6 Sep 1996 18:11:41 +0000 Subject: new file for MIPS port by Craig Lebakken (lebakken@minn.net) and Derrick Ostertag (ostertag@transition.com). --- cpukit/score/cpu/mips/asm.h | 99 +++ cpukit/score/cpu/mips/cpu.c | 216 +++++++ cpukit/score/cpu/mips/cpu_asm.S | 972 +++++++++++++++++++++++++++++ cpukit/score/cpu/mips/idtcpu.h | 439 +++++++++++++ cpukit/score/cpu/mips/iregdef.h | 324 ++++++++++ cpukit/score/cpu/mips/rtems/asm.h | 99 +++ cpukit/score/cpu/mips/rtems/mips/idtcpu.h | 439 +++++++++++++ cpukit/score/cpu/mips/rtems/mips/iregdef.h | 324 ++++++++++ cpukit/score/cpu/mips64orion/asm.h | 99 +++ cpukit/score/cpu/mips64orion/cpu.c | 216 +++++++ cpukit/score/cpu/mips64orion/cpu_asm.S | 972 +++++++++++++++++++++++++++++ cpukit/score/cpu/mips64orion/cpu_asm.h | 115 ++++ cpukit/score/cpu/mips64orion/idtcpu.h | 439 +++++++++++++ cpukit/score/cpu/mips64orion/idtmon.h | 170 +++++ cpukit/score/cpu/mips64orion/iregdef.h | 324 ++++++++++ 15 files changed, 5247 insertions(+) create mode 100644 cpukit/score/cpu/mips/asm.h create mode 100644 cpukit/score/cpu/mips/cpu.c create mode 100644 cpukit/score/cpu/mips/cpu_asm.S create mode 100644 cpukit/score/cpu/mips/idtcpu.h create mode 100644 cpukit/score/cpu/mips/iregdef.h create mode 100644 cpukit/score/cpu/mips/rtems/asm.h create mode 100644 cpukit/score/cpu/mips/rtems/mips/idtcpu.h create mode 100644 cpukit/score/cpu/mips/rtems/mips/iregdef.h create mode 100644 cpukit/score/cpu/mips64orion/asm.h create mode 100644 cpukit/score/cpu/mips64orion/cpu.c create mode 100644 cpukit/score/cpu/mips64orion/cpu_asm.S create mode 100644 cpukit/score/cpu/mips64orion/cpu_asm.h create mode 100644 cpukit/score/cpu/mips64orion/idtcpu.h create mode 100644 cpukit/score/cpu/mips64orion/idtmon.h create mode 100644 cpukit/score/cpu/mips64orion/iregdef.h (limited to 'cpukit/score/cpu') diff --git a/cpukit/score/cpu/mips/asm.h b/cpukit/score/cpu/mips/asm.h new file mode 100644 index 0000000000..d5a5d69eaa --- /dev/null +++ b/cpukit/score/cpu/mips/asm.h @@ -0,0 +1,99 @@ +/* asm.h + * + * This include file attempts to address the problems + * caused by incompatible flavors of assemblers and + * toolsets. It primarily addresses variations in the + * use of leading underscores on symbols and the requirement + * that register names be preceded by a %. + * + * + * NOTE: The spacing in the use of these macros + * is critical to them working as advertised. + * + * COPYRIGHT: + * + * This file is based on similar code found in newlib available + * from ftp.cygnus.com. The file which was used had no copyright + * notice. This file is freely distributable as long as the source + * of the file is noted. This file is: + * + * COPYRIGHT (c) 1994. + * On-Line Applications Research Corporation (OAR). + * + * asm.h,v 1.4 1995/09/26 19:25:36 joel Exp + */ +/* @(#)asm.h 03/15/96 1.1 */ + +#ifndef __NO_CPU_ASM_h +#define __NO_CPU_ASM_h + +/* + * Indicate we are in an assembly file and get the basic CPU definitions. + */ + +#define ASM +#include + +/* + * Recent versions of GNU cpp define variables which indicate the + * need for underscores and percents. If not using GNU cpp or + * the version does not support this, then you will obviously + * have to define these as appropriate. + */ + +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ _ +#endif + +#ifndef __REGISTER_PREFIX__ +#define __REGISTER_PREFIX__ +#endif + +/* ANSI concatenation macros. */ + +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ + +#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) + +/* Use the right prefix for registers. */ + +#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) + +/* + * define macros for all of the registers on this CPU + * + * EXAMPLE: #define d0 REG (d0) + */ + +/* + * Define macros to handle section beginning and ends. + */ + + +#define BEGIN_CODE_DCL .text +#define END_CODE_DCL +#define BEGIN_DATA_DCL .data +#define END_DATA_DCL +#define BEGIN_CODE .text +#define END_CODE +#define BEGIN_DATA +#define END_DATA +#define BEGIN_BSS +#define END_BSS +#define END + +/* + * Following must be tailor for a particular flavor of the C compiler. + * They may need to put underscores in front of the symbols. + */ + +#define PUBLIC(sym) .globl SYM (sym) +#define EXTERN(sym) .globl SYM (sym) + +#endif +/* end of include file */ + + diff --git a/cpukit/score/cpu/mips/cpu.c b/cpukit/score/cpu/mips/cpu.c new file mode 100644 index 0000000000..ec8c73393e --- /dev/null +++ b/cpukit/score/cpu/mips/cpu.c @@ -0,0 +1,216 @@ +/* + * Mips CPU Dependent Source + * + * Author: Craig Lebakken + * + * COPYRIGHT (c) 1996 by Transition Networks Inc. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Transition Networks not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * Transition Networks makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/score/cpu/no_cpu/cpu.c: + * + * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994. + * On-Line Applications Research Corporation (OAR). + * All rights assigned to U.S. Government, 1994. + * + * This material may be reproduced by or for the U.S. Government pursuant + * to the copyright license under the clause at DFARS 252.227-7013. This + * notice must appear in all copies of this file and its derivatives. + * + * cpu.c,v 1.7 1995/09/26 19:25:37 joel Exp + */ + +#ifndef lint +static char _sccsid[] = "@(#)cpu.c 08/20/96 1.5\n"; +#endif + +#include +#include +#include + + +ISR_Handler_entry _ISR_Vector_table[ ISR_NUMBER_OF_VECTORS ]; + +/* _CPU_Initialize + * + * This routine performs processor dependent initialization. + * + * INPUT PARAMETERS: + * cpu_table - CPU table to initialize + * thread_dispatch - address of disptaching routine + */ + + +void null_handler( void ) +{ +} + + +void _CPU_Initialize( + rtems_cpu_table *cpu_table, + void (*thread_dispatch) /* ignored on this CPU */ +) +{ + unsigned int i = ISR_NUMBER_OF_VECTORS; + + while ( i-- ) + { + _ISR_Vector_table[i] = (ISR_Handler_entry)null_handler; + } + + /* + * The thread_dispatch argument is the address of the entry point + * for the routine called at the end of an ISR once it has been + * decided a context switch is necessary. On some compilation + * systems it is difficult to call a high-level language routine + * from assembly. This allows us to trick these systems. + * + * If you encounter this problem save the entry point in a CPU + * dependent variable. + */ + + _CPU_Thread_dispatch_pointer = thread_dispatch; + + /* + * If there is not an easy way to initialize the FP context + * during Context_Initialize, then it is usually easier to + * save an "uninitialized" FP context here and copy it to + * the task's during Context_Initialize. + */ + + /* FP context initialization support goes here */ + + _CPU_Table = *cpu_table; + +} + +/*PAGE + * + * _CPU_ISR_Get_level + */ + +#if 0 /* located in cpu_asm.S */ +unsigned32 _CPU_ISR_Get_level( void ) +{ + /* + * This routine returns the current interrupt level. + */ +} +#endif + +/*PAGE + * + * _CPU_ISR_install_raw_handler + */ + +void _CPU_ISR_install_raw_handler( + unsigned32 vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + /* + * This is where we install the interrupt handler into the "raw" interrupt + * table used by the CPU to dispatch interrupt handlers. + */ + +#if 0 /* not necessary */ +/* use IDT/Sim to set interrupt vector. Needed to co-exist with debugger. */ + add_ext_int_func( vector, new_handler ); +#endif +} + +/*PAGE + * + * _CPU_ISR_install_vector + * + * This kernel routine installs the RTEMS handler for the + * specified vector. + * + * Input parameters: + * vector - interrupt vector number + * old_handler - former ISR for this vector number + * new_handler - replacement ISR for this vector number + * + * Output parameters: NONE + * + */ + +void _CPU_ISR_install_vector( + unsigned32 vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + *old_handler = _ISR_Vector_table[ vector ]; + + /* + * If the interrupt vector table is a table of pointer to isr entry + * points, then we need to install the appropriate RTEMS interrupt + * handler for this vector number. + */ + + _CPU_ISR_install_raw_handler( vector, _ISR_Handler, old_handler ); + + /* + * We put the actual user ISR address in '_ISR_vector_table'. This will + * be used by the _ISR_Handler so the user gets control. + */ + + _ISR_Vector_table[ vector ] = new_handler; +} + +/*PAGE + * + * _CPU_Install_interrupt_stack + */ + +void _CPU_Install_interrupt_stack( void ) +{ +/* we don't support this yet */ +} + +/*PAGE + * + * _CPU_Internal_threads_Idle_thread_body + * + * NOTES: + * + * 1. This is the same as the regular CPU independent algorithm. + * + * 2. If you implement this using a "halt", "idle", or "shutdown" + * instruction, then don't forget to put it in an infinite loop. + * + * 3. Be warned. Some processors with onboard DMA have been known + * to stop the DMA if the CPU were put in IDLE mode. This might + * also be a problem with other on-chip peripherals. So use this + * hook with caution. + */ + +#if 0 /* located in cpu_asm.S */ +void _CPU_Thread_Idle_body( void ) +{ + + for( ; ; ) + /* insert your "halt" instruction here */ ; +} +#endif + +extern void mips_break( int error ); + +#include + +void mips_fatal_error( int error ) +{ + printf("fatal error 0x%x %d\n",error,error); + mips_break( error ); +} diff --git a/cpukit/score/cpu/mips/cpu_asm.S b/cpukit/score/cpu/mips/cpu_asm.S new file mode 100644 index 0000000000..d9e38231bf --- /dev/null +++ b/cpukit/score/cpu/mips/cpu_asm.S @@ -0,0 +1,972 @@ +/* cpu_asm.S + * + * This file contains the basic algorithms for all assembly code used + * in an specific CPU port of RTEMS. These algorithms must be implemented + * in assembly language + * + * Author: Craig Lebakken + * + * COPYRIGHT (c) 1996 by Transition Networks Inc. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Transition Networks not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * Transition Networks makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/score/cpu/no_cpu/cpu_asm.s: + * + * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994. + * On-Line Applications Research Corporation (OAR). + * All rights assigned to U.S. Government, 1994. + * + * This material may be reproduced by or for the U.S. Government pursuant + * to the copyright license under the clause at DFARS 252.227-7013. This + * notice must appear in all copies of this file and its derivatives. + * + * cpu_asm.c,v 1.5 1995/09/26 19:25:39 joel Exp + */ +/* @(#)cpu_asm.S 08/20/96 1.15 */ + +#include "cpu_asm.h" + +#include "iregdef.h" +#include "idtcpu.h" + +#define FRAME(name,frm_reg,offset,ret_reg) \ + .globl name; \ + .ent name; \ +name:; \ + .frame frm_reg,offset,ret_reg +#define ENDFRAME(name) \ + .end name + + +#define EXCP_STACK_SIZE (NREGS*R_SZ) + +#if __ghs__ +#define sd sw +#define ld lw +#define dmtc0 mtc0 +#define dsll sll +#define dmfc0 mfc0 +#endif + +#if 1 /* 32 bit unsigned32 types */ +#define sint sw +#define lint lw +#define stackadd addiu +#define intadd addu +#define SZ_INT 4 +#define SZ_INT_POW2 2 +#else /* 64 bit unsigned32 types */ +#define sint dw +#define lint dw +#define stackadd daddiu +#define intadd daddu +#define SZ_INT 8 +#define SZ_INT_POW2 3 +#endif + +#ifdef __GNUC__ +#define EXTERN(x,size) .extern x,size +#else +#define EXTERN(x,size) +#endif + +/* NOTE: these constants must match the Context_Control structure in cpu.h */ +#define S0_OFFSET 0 +#define S1_OFFSET 1 +#define S2_OFFSET 2 +#define S3_OFFSET 3 +#define S4_OFFSET 4 +#define S5_OFFSET 5 +#define S6_OFFSET 6 +#define S7_OFFSET 7 +#define SP_OFFSET 8 +#define FP_OFFSET 9 +#define RA_OFFSET 10 +#define C0_SR_OFFSET 11 +#define C0_EPC_OFFSET 12 + +/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */ +#define FP0_OFFSET 0 +#define FP1_OFFSET 1 +#define FP2_OFFSET 2 +#define FP3_OFFSET 3 +#define FP4_OFFSET 4 +#define FP5_OFFSET 5 +#define FP6_OFFSET 6 +#define FP7_OFFSET 7 +#define FP8_OFFSET 8 +#define FP9_OFFSET 9 +#define FP10_OFFSET 10 +#define FP11_OFFSET 11 +#define FP12_OFFSET 12 +#define FP13_OFFSET 13 +#define FP14_OFFSET 14 +#define FP15_OFFSET 15 +#define FP16_OFFSET 16 +#define FP17_OFFSET 17 +#define FP18_OFFSET 18 +#define FP19_OFFSET 19 +#define FP20_OFFSET 20 +#define FP21_OFFSET 21 +#define FP22_OFFSET 22 +#define FP23_OFFSET 23 +#define FP24_OFFSET 24 +#define FP25_OFFSET 25 +#define FP26_OFFSET 26 +#define FP27_OFFSET 27 +#define FP28_OFFSET 28 +#define FP29_OFFSET 29 +#define FP30_OFFSET 30 +#define FP31_OFFSET 31 + + +/*PAGE + * + * _CPU_ISR_Get_level + */ + +#if 0 +unsigned32 _CPU_ISR_Get_level( void ) +{ + /* + * This routine returns the current interrupt level. + */ +} +#endif +/* return the current exception level for the 4650 */ +FRAME(_CPU_ISR_Get_level,sp,0,ra) + mfc0 v0,C0_SR + nop + andi v0,SR_EXL + srl v0,1 + j ra +ENDFRAME(_CPU_ISR_Get_level) + +FRAME(_CPU_ISR_Set_level,sp,0,ra) + nop + mfc0 a0,C0_SR + nop + andi a0,SR_EXL + beqz a0,_CPU_ISR_Set_1 /* normalize a0 */ + nop + li a0,1 +_CPU_ISR_Set_1: + beq v0,a0,_CPU_ISR_Set_exit /* if (current_level != new_level ) */ + nop + bnez a0,_CPU_ISR_Set_2 + nop + nop + mfc0 t0,C0_SR + nop + li t1,~SR_EXL + and t0,t1 + nop + mtc0 t0,C0_SR /* disable exception level */ + nop + j ra + nop +_CPU_ISR_Set_2: + nop + mfc0 t0,C0_SR + nop + li t1,~SR_IE + and t0,t1 + nop + mtc0 t0,C0_SR /* first disable ie bit (recommended) */ + nop + ori t0,SR_EXL|SR_IE /* enable exception level */ + nop + mtc0 t0,C0_SR + nop +_CPU_ISR_Set_exit: + j ra + nop +ENDFRAME(_CPU_ISR_Set_level) + +/* + * _CPU_Context_save_fp_context + * + * This routine is responsible for saving the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * Sometimes a macro implementation of this is in cpu.h which dereferences + * the ** and a similarly named routine in this file is passed something + * like a (Context_Control_fp *). The general rule on making this decision + * is to avoid writing assembly language. + */ + +/* void _CPU_Context_save_fp( + * void **fp_context_ptr + * ) + * { + * } + */ + +FRAME(_CPU_Context_save_fp,sp,0,ra) + .set noat + ld a1,(a0) + swc1 $f0,FP0_OFFSET*4(a1) + swc1 $f1,FP1_OFFSET*4(a1) + swc1 $f2,FP2_OFFSET*4(a1) + swc1 $f3,FP3_OFFSET*4(a1) + swc1 $f4,FP4_OFFSET*4(a1) + swc1 $f5,FP5_OFFSET*4(a1) + swc1 $f6,FP6_OFFSET*4(a1) + swc1 $f7,FP7_OFFSET*4(a1) + swc1 $f8,FP8_OFFSET*4(a1) + swc1 $f9,FP9_OFFSET*4(a1) + swc1 $f10,FP10_OFFSET*4(a1) + swc1 $f11,FP11_OFFSET*4(a1) + swc1 $f12,FP12_OFFSET*4(a1) + swc1 $f13,FP13_OFFSET*4(a1) + swc1 $f14,FP14_OFFSET*4(a1) + swc1 $f15,FP15_OFFSET*4(a1) + swc1 $f16,FP16_OFFSET*4(a1) + swc1 $f17,FP17_OFFSET*4(a1) + swc1 $f18,FP18_OFFSET*4(a1) + swc1 $f19,FP19_OFFSET*4(a1) + swc1 $f20,FP20_OFFSET*4(a1) + swc1 $f21,FP21_OFFSET*4(a1) + swc1 $f22,FP22_OFFSET*4(a1) + swc1 $f23,FP23_OFFSET*4(a1) + swc1 $f24,FP24_OFFSET*4(a1) + swc1 $f25,FP25_OFFSET*4(a1) + swc1 $f26,FP26_OFFSET*4(a1) + swc1 $f27,FP27_OFFSET*4(a1) + swc1 $f28,FP28_OFFSET*4(a1) + swc1 $f29,FP29_OFFSET*4(a1) + swc1 $f30,FP30_OFFSET*4(a1) + swc1 $f31,FP31_OFFSET*4(a1) + j ra + nop + .set at +ENDFRAME(_CPU_Context_save_fp) + +/* + * _CPU_Context_restore_fp_context + * + * This routine is responsible for restoring the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * Sometimes a macro implementation of this is in cpu.h which dereferences + * the ** and a similarly named routine in this file is passed something + * like a (Context_Control_fp *). The general rule on making this decision + * is to avoid writing assembly language. + */ + +/* void _CPU_Context_restore_fp( + * void **fp_context_ptr + * ) + * { + * } + */ + +FRAME(_CPU_Context_restore_fp,sp,0,ra) + .set noat + ld a1,(a0) + lwc1 $f0,FP0_OFFSET*4(a1) + lwc1 $f1,FP1_OFFSET*4(a1) + lwc1 $f2,FP2_OFFSET*4(a1) + lwc1 $f3,FP3_OFFSET*4(a1) + lwc1 $f4,FP4_OFFSET*4(a1) + lwc1 $f5,FP5_OFFSET*4(a1) + lwc1 $f6,FP6_OFFSET*4(a1) + lwc1 $f7,FP7_OFFSET*4(a1) + lwc1 $f8,FP8_OFFSET*4(a1) + lwc1 $f9,FP9_OFFSET*4(a1) + lwc1 $f10,FP10_OFFSET*4(a1) + lwc1 $f11,FP11_OFFSET*4(a1) + lwc1 $f12,FP12_OFFSET*4(a1) + lwc1 $f13,FP13_OFFSET*4(a1) + lwc1 $f14,FP14_OFFSET*4(a1) + lwc1 $f15,FP15_OFFSET*4(a1) + lwc1 $f16,FP16_OFFSET*4(a1) + lwc1 $f17,FP17_OFFSET*4(a1) + lwc1 $f18,FP18_OFFSET*4(a1) + lwc1 $f19,FP19_OFFSET*4(a1) + lwc1 $f20,FP20_OFFSET*4(a1) + lwc1 $f21,FP21_OFFSET*4(a1) + lwc1 $f22,FP22_OFFSET*4(a1) + lwc1 $f23,FP23_OFFSET*4(a1) + lwc1 $f24,FP24_OFFSET*4(a1) + lwc1 $f25,FP25_OFFSET*4(a1) + lwc1 $f26,FP26_OFFSET*4(a1) + lwc1 $f27,FP27_OFFSET*4(a1) + lwc1 $f28,FP28_OFFSET*4(a1) + lwc1 $f29,FP29_OFFSET*4(a1) + lwc1 $f30,FP30_OFFSET*4(a1) + lwc1 $f31,FP31_OFFSET*4(a1) + j ra + nop + .set at +ENDFRAME(_CPU_Context_restore_fp) + +/* _CPU_Context_switch + * + * This routine performs a normal non-FP context switch. + */ + +/* void _CPU_Context_switch( + * Context_Control *run, + * Context_Control *heir + * ) + * { + * } + */ + +FRAME(_CPU_Context_switch,sp,0,ra) + + mfc0 t0,C0_SR + li t1,~SR_IE + sd t0,C0_SR_OFFSET*8(a0) /* save status register */ + and t0,t1 + mtc0 t0,C0_SR /* first disable ie bit (recommended) */ + ori t0,SR_EXL|SR_IE /* enable exception level to disable interrupts */ + mtc0 t0,C0_SR + + sd ra,RA_OFFSET*8(a0) /* save current context */ + sd sp,SP_OFFSET*8(a0) + sd fp,FP_OFFSET*8(a0) + sd s0,S0_OFFSET*8(a0) + sd s1,S1_OFFSET*8(a0) + sd s2,S2_OFFSET*8(a0) + sd s3,S3_OFFSET*8(a0) + sd s4,S4_OFFSET*8(a0) + sd s5,S5_OFFSET*8(a0) + sd s6,S6_OFFSET*8(a0) + sd s7,S7_OFFSET*8(a0) + dmfc0 t0,C0_EPC + sd t0,C0_EPC_OFFSET*8(a0) + +_CPU_Context_switch_restore: + ld s0,S0_OFFSET*8(a1) /* restore context */ + ld s1,S1_OFFSET*8(a1) + ld s2,S2_OFFSET*8(a1) + ld s3,S3_OFFSET*8(a1) + ld s4,S4_OFFSET*8(a1) + ld s5,S5_OFFSET*8(a1) + ld s6,S6_OFFSET*8(a1) + ld s7,S7_OFFSET*8(a1) + ld fp,FP_OFFSET*8(a1) + ld sp,SP_OFFSET*8(a1) + ld ra,RA_OFFSET*8(a1) + ld t0,C0_EPC_OFFSET*8(a1) + dmtc0 t0,C0_EPC + ld t0,C0_SR_OFFSET*8(a1) + andi t0,SR_EXL + bnez t0,_CPU_Context_1 /* set exception level from restore context */ + li t0,~SR_EXL + mfc0 t1,C0_SR + nop + and t1,t0 + mtc0 t1,C0_SR +_CPU_Context_1: + j ra + nop +ENDFRAME(_CPU_Context_switch) + +/* + * _CPU_Context_restore + * + * This routine is generally used only to restart self in an + * efficient manner. It may simply be a label in _CPU_Context_switch. + * + * NOTE: May be unnecessary to reload some registers. + */ + +#if 0 +void _CPU_Context_restore( + Context_Control *new_context +) +{ +} +#endif + +FRAME(_CPU_Context_restore,sp,0,ra) + dadd a1,a0,zero + j _CPU_Context_switch_restore + nop +ENDFRAME(_CPU_Context_restore) + +EXTERN(_ISR_Nest_level, SZ_INT) +EXTERN(_Thread_Dispatch_disable_level,SZ_INT) +EXTERN(_Context_Switch_necessary,SZ_INT) +EXTERN(_ISR_Signals_to_thread_executing,SZ_INT) +.extern _Thread_Dispatch +.extern _ISR_Vector_table + +/* void __ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + */ + +#if 0 +void _ISR_Handler() +{ + /* + * This discussion ignores a lot of the ugly details in a real + * implementation such as saving enough registers/state to be + * able to do something real. Keep in mind that the goal is + * to invoke a user's ISR handler which is written in C and + * uses a certain set of registers. + * + * Also note that the exact order is to a large extent flexible. + * Hardware will dictate a sequence for a certain subset of + * _ISR_Handler while requirements for setting + */ + + /* + * At entry to "common" _ISR_Handler, the vector number must be + * available. On some CPUs the hardware puts either the vector + * number or the offset into the vector table for this ISR in a + * known place. If the hardware does not give us this information, + * then the assembly portion of RTEMS for this port will contain + * a set of distinct interrupt entry points which somehow place + * the vector number in a known place (which is safe if another + * interrupt nests this one) and branches to _ISR_Handler. + * + */ +#endif +FRAME(_ISR_Handler,sp,0,ra) +.set noreorder +#if USE_IDTKIT +/* IDT/Kit incorrectly adds 4 to EPC before returning. This compensates */ + lreg k0, R_EPC*R_SZ(sp) + daddiu k0,k0,-4 + sreg k0, R_EPC*R_SZ(sp) + lreg k0, R_CAUSE*R_SZ(sp) + li k1, ~CAUSE_BD + and k0, k1 + sreg k0, R_CAUSE*R_SZ(sp) +#endif + +/* save registers not already saved by IDT/sim */ + stackadd sp,sp,-EXCP_STACK_SIZE /* store ra on the stack */ + + sreg ra, R_RA*R_SZ(sp) + sreg v0, R_V0*R_SZ(sp) + sreg v1, R_V1*R_SZ(sp) + sreg a0, R_A0*R_SZ(sp) + sreg a1, R_A1*R_SZ(sp) + sreg a2, R_A2*R_SZ(sp) + sreg a3, R_A3*R_SZ(sp) + sreg t0, R_T0*R_SZ(sp) + sreg t1, R_T1*R_SZ(sp) + sreg t2, R_T2*R_SZ(sp) + sreg t3, R_T3*R_SZ(sp) + sreg t4, R_T4*R_SZ(sp) + sreg t5, R_T5*R_SZ(sp) + sreg t6, R_T6*R_SZ(sp) + sreg t7, R_T7*R_SZ(sp) + mflo k0 + sreg t8, R_T8*R_SZ(sp) + sreg k0, R_MDLO*R_SZ(sp) + sreg t9, R_T9*R_SZ(sp) + mfhi k0 + sreg gp, R_GP*R_SZ(sp) + sreg fp, R_FP*R_SZ(sp) + sreg k0, R_MDHI*R_SZ(sp) + .set noat + sreg AT, R_AT*R_SZ(sp) + .set at + + stackadd sp,sp,-40 /* store ra on the stack */ + sd ra,32(sp) + +/* determine if an interrupt generated this exception */ + mfc0 k0,C0_CAUSE + and k1,k0,CAUSE_EXCMASK + bnez k1,_ISR_Handler_prom_exit /* not an external interrupt, pass exception to Monitor */ + mfc0 k1,C0_SR + and k0,k1 + and k0,CAUSE_IPMASK + beq k0,zero,_ISR_Handler_quick_exit /* external interrupt not enabled, ignore */ + nop + + /* + * save some or all context on stack + * may need to save some special interrupt information for exit + * + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * if ( _ISR_Nest_level == 0 ) + * switch to software interrupt stack + * #endif + */ +#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + lint t0,_ISR_Nest_level + beq t0, zero, _ISR_Handler_1 + nop + /* switch stacks */ +_ISR_Handler_1: +#else + lint t0,_ISR_Nest_level +#endif + /* + * _ISR_Nest_level++; + */ + addi t0,t0,1 + sint t0,_ISR_Nest_level + /* + * _Thread_Dispatch_disable_level++; + */ + lint t1,_Thread_Dispatch_disable_level + addi t1,t1,1 + sint t1,_Thread_Dispatch_disable_level +#if 0 + nop + j _ISR_Handler_4 + nop + /* + * while ( interrupts_pending(cause_reg) ) { + * vector = BITFIELD_TO_INDEX(cause_reg); + * (*_ISR_Vector_table[ vector ])( vector ); + * } + */ +_ISR_Handler_2: +/* software interrupt priorities can be applied here */ + li t1,-1 +/* convert bit field into interrupt index */ +_ISR_Handler_3: + andi t2,t0,1 + addi t1,1 + beql t2,zero,_ISR_Handler_3 + dsrl t0,1 + li t1,7 + dsll t1,3 /* convert index to byte offset (*8) */ + la t3,_ISR_Vector_table + intadd t1,t3 + lint t1,(t1) + jalr t1 + nop + j _ISR_Handler_5 + nop +_ISR_Handler_4: + mfc0 t0,C0_CAUSE + andi t0,CAUSE_IPMASK + bne t0,zero,_ISR_Handler_2 + dsrl t0,t0,8 +_ISR_Handler_5: +#else + nop + li t1,7 + dsll t1,t1,SZ_INT_POW2 + la t3,_ISR_Vector_table + intadd t1,t3 + lint t1,(t1) + jalr t1 + nop +#endif + /* + * --_ISR_Nest_level; + */ + lint t2,_ISR_Nest_level + addi t2,t2,-1 + sint t2,_ISR_Nest_level + /* + * --_Thread_Dispatch_disable_level; + */ + lint t1,_Thread_Dispatch_disable_level + addi t1,t1,-1 + sint t1,_Thread_Dispatch_disable_level + /* + * if ( _Thread_Dispatch_disable_level || _ISR_Nest_level ) + * goto the label "exit interrupt (simple case)" + */ + or t0,t2,t1 + bne t0,zero,_ISR_Handler_exit + nop + /* + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * restore stack + * #endif + * + * if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing ) + * goto the label "exit interrupt (simple case)" + */ + lint t0,_Context_Switch_necessary + lint t1,_ISR_Signals_to_thread_executing + or t0,t0,t1 + beq t0,zero,_ISR_Handler_exit + nop + + /* + * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch + */ + jal _Thread_Dispatch + nop + /* + * prepare to get out of interrupt + * return from interrupt (maybe to _ISR_Dispatch) + * + * LABEL "exit interrupt (simple case): + * prepare to get out of interrupt + * return from interrupt + */ +_ISR_Handler_exit: + ld ra,32(sp) + stackadd sp,sp,40 + +/* restore interrupt context from stack */ + lreg k0, R_MDLO*R_SZ(sp) + mtlo k0 + lreg k0, R_MDHI*R_SZ(sp) + lreg a2, R_A2*R_SZ(sp) + mthi k0 + lreg a3, R_A3*R_SZ(sp) + lreg t0, R_T0*R_SZ(sp) + lreg t1, R_T1*R_SZ(sp) + lreg t2, R_T2*R_SZ(sp) + lreg t3, R_T3*R_SZ(sp) + lreg t4, R_T4*R_SZ(sp) + lreg t5, R_T5*R_SZ(sp) + lreg t6, R_T6*R_SZ(sp) + lreg t7, R_T7*R_SZ(sp) + lreg t8, R_T8*R_SZ(sp) + lreg t9, R_T9*R_SZ(sp) + lreg gp, R_GP*R_SZ(sp) + lreg fp, R_FP*R_SZ(sp) + lreg ra, R_RA*R_SZ(sp) + lreg a0, R_A0*R_SZ(sp) + lreg a1, R_A1*R_SZ(sp) + lreg v1, R_V1*R_SZ(sp) + lreg v0, R_V0*R_SZ(sp) + .set noat + lreg AT, R_AT*R_SZ(sp) + .set at + + stackadd sp,sp,EXCP_STACK_SIZE /* store ra on the stack */ + +#if USE_IDTKIT +/* we handled exception, so return non-zero value */ + li v0,1 +#endif + +_ISR_Handler_quick_exit: +#ifdef USE_IDTKIT + j ra +#else + eret +#endif + nop + +_ISR_Handler_prom_exit: +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + + .set reorder + +ENDFRAME(_ISR_Handler) + + +FRAME(mips_enable_interrupts,sp,0,ra) + mfc0 t0,C0_SR /* get status reg */ + nop + or t0,t0,a0 + mtc0 t0,C0_SR /* save updated status reg */ + j ra + nop +ENDFRAME(mips_enable_interrupts) + +FRAME(mips_disable_interrupts,sp,0,ra) + mfc0 v0,C0_SR /* get status reg */ + li t1,SR_IMASK /* t1 = load interrupt mask word */ + not t0,t1 /* t0 = ~t1 */ + and t0,v0 /* clear imask bits */ + mtc0 t0,C0_SR /* save status reg */ + and v0,t1 /* mask return value (only return imask bits) */ + jr ra + nop +ENDFRAME(mips_disable_interrupts) + +FRAME(mips_enable_global_interrupts,sp,0,ra) + mfc0 t0,C0_SR /* get status reg */ + nop + ori t0,SR_IE + mtc0 t0,C0_SR /* save updated status reg */ + j ra + nop +ENDFRAME(mips_enable_global_interrupts) + +FRAME(mips_disable_global_interrupts,sp,0,ra) + li t1,SR_IE + mfc0 t0,C0_SR /* get status reg */ + not t1 + and t0,t1 + mtc0 t0,C0_SR /* save updated status reg */ + j ra + nop +ENDFRAME(mips_disable_global_interrupts) + +/* return the value of the status register in v0. Used for debugging */ +FRAME(mips_get_sr,sp,0,ra) + mfc0 v0,C0_SR + j ra + nop +ENDFRAME(mips_get_sr) + +FRAME(mips_break,sp,0,ra) +#if 1 + break 0x0 + j mips_break +#else + j ra +#endif + nop +ENDFRAME(mips_break) + +/*PAGE + * + * _CPU_Internal_threads_Idle_thread_body + * + * NOTES: + * + * 1. This is the same as the regular CPU independent algorithm. + * + * 2. If you implement this using a "halt", "idle", or "shutdown" + * instruction, then don't forget to put it in an infinite loop. + * + * 3. Be warned. Some processors with onboard DMA have been known + * to stop the DMA if the CPU were put in IDLE mode. This might + * also be a problem with other on-chip peripherals. So use this + * hook with caution. + */ + +FRAME(_CPU_Thread_Idle_body,sp,0,ra) + wait /* enter low power mode */ + j _CPU_Thread_Idle_body + nop +ENDFRAME(_CPU_Thread_Idle_body) + +#define VEC_CODE_LENGTH 10*4 + +/************************************************************************** +** +** init_exc_vecs() - moves the exception code into the addresses +** reserved for exception vectors +** +** UTLB Miss exception vector at address 0x80000000 +** +** General exception vector at address 0x80000080 +** +** RESET exception vector is at address 0xbfc00000 +** +***************************************************************************/ + +#define INITEXCFRM ((2*4)+4) /* ra + 2 arguments */ +FRAME(init_exc_vecs,sp,0,ra) +/* This code yanked from SIM */ +#if defined(CPU_R3000) + .set noreorder + la t1,exc_utlb_code + la t2,exc_norm_code + li t3,UT_VEC + li t4,E_VEC + li t5,VEC_CODE_LENGTH +1: + lw t6,0(t1) + lw t7,0(t2) + sw t6,0(t3) + sw t7,0(t4) + addiu t1,4 + addiu t3,4 + addiu t4,4 + subu t5,4 + bne t5,zero,1b + addiu t2,4 + move t5,ra # assumes clear_cache doesnt use t5 + li a0,UT_VEC + jal clear_cache + li a1,VEC_CODE_LENGTH + nop + li a0,E_VEC + jal clear_cache + li a1,VEC_CODE_LENGTH + move ra,t5 # restore ra + j ra + nop + .set reorder +#endif +#if defined(CPU_R4000) + .set reorder + move t5,ra # assumes clear_cache doesnt use t5 + + /* TLB exception vector */ + la t1,exc_tlb_code + li t2,T_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + li a0,T_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + la t1,exc_xtlb_code + li t2,X_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + /* extended TLB exception vector */ + li a0,X_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + /* cache error exception vector */ + la t1,exc_cache_code + li t2,C_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + li a0,C_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + /* normal exception vector */ + la t1,exc_norm_code + li t2,E_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + li a0,E_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + move ra,t5 # restore ra + j ra +#endif +ENDFRAME(init_exc_vecs) + + +#if defined(CPU_R4000) +FRAME(exc_tlb_code,sp,0,ra) +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + +ENDFRAME(exc_tlb_code) + + +FRAME(exc_xtlb_code,sp,0,ra) +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + +ENDFRAME(exc_xtlb_code) + + +FRAME(exc_cache_code,sp,0,ra) +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + +ENDFRAME(exc_cache_code) + + +FRAME(exc_norm_code,sp,0,ra) + la k0, _ISR_Handler /* generic external int hndlr */ + j k0 + nop + subu sp, EXCP_STACK_SIZE /* set up local stack frame */ +ENDFRAME(exc_norm_code) +#endif + +/************************************************************************** +** +** enable_int(mask) - enables interrupts - mask is positioned so it only +** needs to be or'ed into the status reg. This +** also does some other things !!!! caution should +** be used if invoking this while in the middle +** of a debugging session where the client may have +** nested interrupts. +** +****************************************************************************/ +FRAME(enable_int,sp,0,ra) + .set noreorder + mfc0 t0,C0_SR + or a0,1 + or t0,a0 + mtc0 t0,C0_SR + j ra + nop + .set reorder +ENDFRAME(enable_int) + + +/*************************************************************************** +** +** disable_int(mask) - disable the interrupt - mask is the complement +** of the bits to be cleared - i.e. to clear ext int +** 5 the mask would be - 0xffff7fff +** +****************************************************************************/ +FRAME(disable_int,sp,0,ra) + .set noreorder + mfc0 t0,C0_SR + nop + and t0,a0 + mtc0 t0,C0_SR + j ra + nop +ENDFRAME(disable_int) + + diff --git a/cpukit/score/cpu/mips/idtcpu.h b/cpukit/score/cpu/mips/idtcpu.h new file mode 100644 index 0000000000..833c5ebe02 --- /dev/null +++ b/cpukit/score/cpu/mips/idtcpu.h @@ -0,0 +1,439 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** idtcpu.h -- cpu related defines +*/ + +#ifndef _IDTCPU_H__ +#define _IDTCPU_H__ + +/* + * 950313: Ketan added Register definition for XContext reg. + * added define for WAIT instruction. + * 950421: Ketan added Register definition for Config reg (R3081) + */ + +/* +** memory configuration and mapping +*/ +#define K0BASE 0x80000000 +#define K0SIZE 0x20000000 +#define K1BASE 0xa0000000 +#define K1SIZE 0x20000000 +#define K2BASE 0xc0000000 +#define K2SIZE 0x20000000 +#if defined(CPU_R4000) +#define KSBASE 0xe0000000 +#define KSSIZE 0x20000000 +#endif + +#define KUBASE 0 +#define KUSIZE 0x80000000 + +/* +** Exception Vectors +*/ +#if defined(CPU_R3000) +#define UT_VEC K0BASE /* utlbmiss vector */ +#define E_VEC (K0BASE+0x80) /* exception vevtor */ +#endif +#if defined(CPU_R4000) +#define T_VEC (K0BASE+0x000) /* tlbmiss vector */ +#define X_VEC (K0BASE+0x080) /* xtlbmiss vector */ +#define C_VEC (K0BASE+0x100) /* cache error vector */ +#define E_VEC (K0BASE+0x180) /* exception vector */ +#endif +#define R_VEC (K1BASE+0x1fc00000) /* reset vector */ + +/* +** Address conversion macros +*/ +#ifdef CLANGUAGE +#define CAST(as) (as) +#else +#define CAST(as) +#endif +#define K0_TO_K1(x) (CAST(unsigned)(x)|0xA0000000) /* kseg0 to kseg1 */ +#define K1_TO_K0(x) (CAST(unsigned)(x)&0x9FFFFFFF) /* kseg1 to kseg0 */ +#define K0_TO_PHYS(x) (CAST(unsigned)(x)&0x1FFFFFFF) /* kseg0 to physical */ +#define K1_TO_PHYS(x) (CAST(unsigned)(x)&0x1FFFFFFF) /* kseg1 to physical */ +#define PHYS_TO_K0(x) (CAST(unsigned)(x)|0x80000000) /* physical to kseg0 */ +#define PHYS_TO_K1(x) (CAST(unsigned)(x)|0xA0000000) /* physical to kseg1 */ + +/* +** Cache size constants +*/ +#define MINCACHE 0x200 /* 512 For 3041. */ +#define MAXCACHE 0x40000 /* 256*1024 256k */ + +#if defined(CPU_R4000) +/* R4000 configuration register definitions */ +#define CFG_CM 0x80000000 /* Master-Checker mode */ +#define CFG_ECMASK 0x70000000 /* System Clock Ratio */ +#define CFG_ECBY2 0x00000000 /* divide by 2 */ +#define CFG_ECBY3 0x10000000 /* divide by 3 */ +#define CFG_ECBY4 0x20000000 /* divide by 4 */ +#define CFG_EPMASK 0x0f000000 /* Transmit data pattern */ +#define CFG_EPD 0x00000000 /* D */ +#define CFG_EPDDX 0x01000000 /* DDX */ +#define CFG_EPDDXX 0x02000000 /* DDXX */ +#define CFG_EPDXDX 0x03000000 /* DXDX */ +#define CFG_EPDDXXX 0x04000000 /* DDXXX */ +#define CFG_EPDDXXXX 0x05000000 /* DDXXXX */ +#define CFG_EPDXXDXX 0x06000000 /* DXXDXX */ +#define CFG_EPDDXXXXX 0x07000000 /* DDXXXXX */ +#define CFG_EPDXXXDXXX 0x08000000 /* DXXXDXXX */ +#define CFG_SBMASK 0x00c00000 /* Secondary cache block size */ +#define CFG_SBSHIFT 22 +#define CFG_SB4 0x00000000 /* 4 words */ +#define CFG_SB8 0x00400000 /* 8 words */ +#define CFG_SB16 0x00800000 /* 16 words */ +#define CFG_SB32 0x00c00000 /* 32 words */ +#define CFG_SS 0x00200000 /* Split secondary cache */ +#define CFG_SW 0x00100000 /* Secondary cache port width */ +#define CFG_EWMASK 0x000c0000 /* System port width */ +#define CFG_EWSHIFT 18 +#define CFG_EW64 0x00000000 /* 64 bit */ +#define CFG_EW32 0x00010000 /* 32 bit */ +#define CFG_SC 0x00020000 /* Secondary cache absent */ +#define CFG_SM 0x00010000 /* Dirty Shared mode disabled */ +#define CFG_BE 0x00008000 /* Big Endian */ +#define CFG_EM 0x00004000 /* ECC mode enable */ +#define CFG_EB 0x00002000 /* Block ordering */ +#define CFG_ICMASK 0x00000e00 /* Instruction cache size */ +#define CFG_ICSHIFT 9 +#define CFG_DCMASK 0x000001c0 /* Data cache size */ +#define CFG_DCSHIFT 6 +#define CFG_IB 0x00000020 /* Instruction cache block size */ +#define CFG_DB 0x00000010 /* Data cache block size */ +#define CFG_CU 0x00000008 /* Update on Store Conditional */ +#define CFG_K0MASK 0x00000007 /* KSEG0 coherency algorithm */ + +/* + * R4000 primary cache mode + */ +#define CFG_C_UNCACHED 2 +#define CFG_C_NONCOHERENT 3 +#define CFG_C_COHERENTXCL 4 +#define CFG_C_COHERENTXCLW 5 +#define CFG_C_COHERENTUPD 6 + +/* + * R4000 cache operations (should be in assembler...?) + */ +#define Index_Invalidate_I 0x0 /* 0 0 */ +#define Index_Writeback_Inv_D 0x1 /* 0 1 */ +#define Index_Invalidate_SI 0x2 /* 0 2 */ +#define Index_Writeback_Inv_SD 0x3 /* 0 3 */ +#define Index_Load_Tag_I 0x4 /* 1 0 */ +#define Index_Load_Tag_D 0x5 /* 1 1 */ +#define Index_Load_Tag_SI 0x6 /* 1 2 */ +#define Index_Load_Tag_SD 0x7 /* 1 3 */ +#define Index_Store_Tag_I 0x8 /* 2 0 */ +#define Index_Store_Tag_D 0x9 /* 2 1 */ +#define Index_Store_Tag_SI 0xA /* 2 2 */ +#define Index_Store_Tag_SD 0xB /* 2 3 */ +#define Create_Dirty_Exc_D 0xD /* 3 1 */ +#define Create_Dirty_Exc_SD 0xF /* 3 3 */ +#define Hit_Invalidate_I 0x10 /* 4 0 */ +#define Hit_Invalidate_D 0x11 /* 4 1 */ +#define Hit_Invalidate_SI 0x12 /* 4 2 */ +#define Hit_Invalidate_SD 0x13 /* 4 3 */ +#define Hit_Writeback_Inv_D 0x15 /* 5 1 */ +#define Hit_Writeback_Inv_SD 0x17 /* 5 3 */ +#define Fill_I 0x14 /* 5 0 */ +#define Hit_Writeback_D 0x19 /* 6 1 */ +#define Hit_Writeback_SD 0x1B /* 6 3 */ +#define Hit_Writeback_I 0x18 /* 6 0 */ +#define Hit_Set_Virtual_SI 0x1E /* 7 2 */ +#define Hit_Set_Virtual_SD 0x1F /* 7 3 */ + +#ifndef WAIT +#define WAIT .word 0x42000020 +#endif WAIT + +#ifndef wait +#define wait .word 0x42000020 +#endif wait + +#endif + +/* +** TLB resource defines +*/ +#if defined(CPU_R3000) +#define N_TLB_ENTRIES 64 +#define TLB_PGSIZE 0x1000 +#define RANDBASE 8 +#define TLBLO_PFNMASK 0xfffff000 +#define TLBLO_PFNSHIFT 12 +#define TLBLO_N 0x800 /* non-cacheable */ +#define TLBLO_D 0x400 /* writeable */ +#define TLBLO_V 0x200 /* valid bit */ +#define TLBLO_G 0x100 /* global access bit */ + +#define TLBHI_VPNMASK 0xfffff000 +#define TLBHI_VPNSHIFT 12 +#define TLBHI_PIDMASK 0xfc0 +#define TLBHI_PIDSHIFT 6 +#define TLBHI_NPID 64 + +#define TLBINX_PROBE 0x80000000 +#define TLBINX_INXMASK 0x00003f00 +#define TLBINX_INXSHIFT 8 + +#define TLBRAND_RANDMASK 0x00003f00 +#define TLBRAND_RANDSHIFT 8 + +#define TLBCTXT_BASEMASK 0xffe00000 +#define TLBCTXT_BASESHIFT 21 + +#define TLBCTXT_VPNMASK 0x001ffffc +#define TLBCTXT_VPNSHIFT 2 +#endif +#if defined(CPU_R4000) +#define N_TLB_ENTRIES 48 + +#define TLBHI_VPN2MASK 0xffffe000 +#define TLBHI_PIDMASK 0x000000ff +#define TLBHI_NPID 256 + +#define TLBLO_PFNMASK 0x3fffffc0 +#define TLBLO_PFNSHIFT 6 +#define TLBLO_D 0x00000004 /* writeable */ +#define TLBLO_V 0x00000002 /* valid bit */ +#define TLBLO_G 0x00000001 /* global access bit */ +#define TLBLO_CMASK 0x00000038 /* cache algorithm mask */ +#define TLBLO_CSHIFT 3 + +#define TLBLO_UNCACHED (CFG_C_UNCACHED< k, 1 => u */ +#define SR_IEO 0x00000010 /* old interrupt enable, 1 => enable */ +#define SR_KUP 0x00000008 /* prev kernel/user, 0 => k, 1 => u */ +#define SR_IEP 0x00000004 /* prev interrupt enable, 1 => enable */ +#define SR_KUC 0x00000002 /* cur kernel/user, 0 => k, 1 => u */ +#define SR_IEC 0x00000001 /* cur interrupt enable, 1 => enable */ +#endif + +#if defined(CPU_R4000) +#define SR_CUMASK 0xf0000000 /* coproc usable bits */ +#define SR_CU3 0x80000000 /* Coprocessor 3 usable */ +#define SR_CU2 0x40000000 /* Coprocessor 2 usable */ +#define SR_CU1 0x20000000 /* Coprocessor 1 usable */ +#define SR_CU0 0x10000000 /* Coprocessor 0 usable */ + +#define SR_RP 0x08000000 /* Reduced power operation */ +#define SR_FR 0x04000000 /* Additional floating point registers */ +#define SR_RE 0x02000000 /* Reverse endian in user mode */ + +#define SR_BEV 0x00400000 /* Use boot exception vectors */ +#define SR_TS 0x00200000 /* TLB shutdown */ +#define SR_SR 0x00100000 /* Soft reset */ +#define SR_CH 0x00040000 /* Cache hit */ +#define SR_CE 0x00020000 /* Use cache ECC */ +#define SR_DE 0x00010000 /* Disable cache exceptions */ + +/* +** status register interrupt masks and bits +*/ + +#define SR_IMASK 0x0000ff00 /* Interrupt mask */ +#define SR_IMASK8 0x00000000 /* mask level 8 */ +#define SR_IMASK7 0x00008000 /* mask level 7 */ +#define SR_IMASK6 0x0000c000 /* mask level 6 */ +#define SR_IMASK5 0x0000e000 /* mask level 5 */ +#define SR_IMASK4 0x0000f000 /* mask level 4 */ +#define SR_IMASK3 0x0000f800 /* mask level 3 */ +#define SR_IMASK2 0x0000fc00 /* mask level 2 */ +#define SR_IMASK1 0x0000fe00 /* mask level 1 */ +#define SR_IMASK0 0x0000ff00 /* mask level 0 */ + +#define SR_IMASKSHIFT 8 + +#define SR_IBIT8 0x00008000 /* bit level 8 */ +#define SR_IBIT7 0x00004000 /* bit level 7 */ +#define SR_IBIT6 0x00002000 /* bit level 6 */ +#define SR_IBIT5 0x00001000 /* bit level 5 */ +#define SR_IBIT4 0x00000800 /* bit level 4 */ +#define SR_IBIT3 0x00000400 /* bit level 3 */ +#define SR_IBIT2 0x00000200 /* bit level 2 */ +#define SR_IBIT1 0x00000100 /* bit level 1 */ + +#define SR_KSMASK 0x00000018 /* Kernel mode mask */ +#define SR_KSUSER 0x00000010 /* User mode */ +#define SR_KSSUPER 0x00000008 /* Supervisor mode */ +#define SR_KSKERNEL 0x00000000 /* Kernel mode */ +#define SR_ERL 0x00000004 /* Error level */ +#define SR_EXL 0x00000002 /* Exception level */ +#define SR_IE 0x00000001 /* Interrupts enabled */ +#endif + + + +/* + * Cause Register + */ +#define CAUSE_BD 0x80000000 /* Branch delay slot */ +#define CAUSE_CEMASK 0x30000000 /* coprocessor error */ +#define CAUSE_CESHIFT 28 + + +#define CAUSE_IPMASK 0x0000FF00 /* Pending interrupt mask */ +#define CAUSE_IPSHIFT 8 + +#define CAUSE_EXCMASK 0x0000003C /* Cause code bits */ +#define CAUSE_EXCSHIFT 2 + +#ifndef XDS +/* +** Coprocessor 0 registers +*/ +#define C0_INX $0 /* tlb index */ +#define C0_RAND $1 /* tlb random */ +#if defined(CPU_R3000) +#define C0_TLBLO $2 /* tlb entry low */ +#endif +#if defined(CPU_R4000) +#define C0_TLBLO0 $2 /* tlb entry low 0 */ +#define C0_TLBLO1 $3 /* tlb entry low 1 */ +#endif + +#define C0_CTXT $4 /* tlb context */ + +#if defined(CPU_R4000) +#define C0_PAGEMASK $5 /* tlb page mask */ +#define C0_WIRED $6 /* number of wired tlb entries */ +#endif + +#define C0_BADVADDR $8 /* bad virtual address */ + +#if defined(CPU_R4000) +#define C0_COUNT $9 /* cycle count */ +#endif + +#define C0_TLBHI $10 /* tlb entry hi */ + +#if defined(CPU_R4000) +#define C0_COMPARE $11 /* cyccle count comparator */ +#endif + +#define C0_SR $12 /* status register */ +#define C0_CAUSE $13 /* exception cause */ +#define C0_EPC $14 /* exception pc */ +#define C0_PRID $15 /* revision identifier */ + +#if defined(CPU_R3000) +#define C0_CONFIG $3 /* configuration register R3081*/ +#endif + +#if defined(CPU_R4000) +#define C0_CONFIG $16 /* configuration register */ +#define C0_LLADDR $17 /* linked load address */ +#define C0_WATCHLO $18 /* watchpoint trap register */ +#define C0_WATCHHI $19 /* watchpoint trap register */ +#define C0_XCTXT $20 /* extended tlb context */ +#define C0_ECC $26 /* secondary cache ECC control */ +#define C0_CACHEERR $27 /* cache error status */ +#define C0_TAGLO $28 /* cache tag lo */ +#define C0_TAGHI $29 /* cache tag hi */ +#define C0_ERRPC $30 /* cache error pc */ +#endif + +#endif XDS + +#ifdef R4650 +#define IWATCH $18 +#define DWATCH $19 +#define IBASE $0 +#define IBOUND $1 +#define DBASE $2 +#define DBOUND $3 +#define CALG $17 +#endif + +#endif /* _IDTCPU_H__ */ + diff --git a/cpukit/score/cpu/mips/iregdef.h b/cpukit/score/cpu/mips/iregdef.h new file mode 100644 index 0000000000..e7a4ba5ed1 --- /dev/null +++ b/cpukit/score/cpu/mips/iregdef.h @@ -0,0 +1,324 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** iregdef.h - IDT R3000 register structure header file +** +** Copyright 1989 Integrated Device Technology, Inc +** All Rights Reserved +** +*/ +#ifndef __IREGDEF_H__ +#define __IREGDEF_H__ + +/* + * 950313: Ketan added sreg/lreg and R_SZ for 64-bit saves + * added Register definition for XContext reg. + * Look towards end of this file. + */ +/* +** register names +*/ +#define r0 $0 +#define r1 $1 +#define r2 $2 +#define r3 $3 +#define r4 $4 +#define r5 $5 +#define r6 $6 +#define r7 $7 +#define r8 $8 +#define r9 $9 +#define r10 $10 +#define r11 $11 +#define r12 $12 +#define r13 $13 + +#define r14 $14 +#define r15 $15 +#define r16 $16 +#define r17 $17 +#define r18 $18 +#define r19 $19 +#define r20 $20 +#define r21 $21 +#define r22 $22 +#define r23 $23 +#define r24 $24 +#define r25 $25 +#define r26 $26 +#define r27 $27 +#define r28 $28 +#define r29 $29 +#define r30 $30 +#define r31 $31 + +#define fp0 $f0 +#define fp1 $f1 +#define fp2 $f2 +#define fp3 $f3 +#define fp4 $f4 +#define fp5 $f5 +#define fp6 $f6 +#define fp7 $f7 +#define fp8 $f8 +#define fp9 $f9 +#define fp10 $f10 +#define fp11 $f11 +#define fp12 $f12 +#define fp13 $f13 +#define fp14 $f14 +#define fp15 $f15 +#define fp16 $f16 +#define fp17 $f17 +#define fp18 $f18 +#define fp19 $f19 +#define fp20 $f20 +#define fp21 $f21 +#define fp22 $f22 +#define fp23 $f23 +#define fp24 $f24 +#define fp25 $f25 +#define fp26 $f26 +#define fp27 $f27 +#define fp28 $f28 +#define fp29 $f29 +#define fp30 $f30 +#define fp31 $f31 + +#define fcr0 $0 +#define fcr30 $30 +#define fcr31 $31 + +#define zero $0 /* wired zero */ +#define AT $at /* assembler temp */ +#define v0 $2 /* return value */ +#define v1 $3 +#define a0 $4 /* argument registers a0-a3 */ +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define t0 $8 /* caller saved t0-t9 */ +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define s0 $16 /* callee saved s0-s8 */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define t8 $24 +#define t9 $25 +#define k0 $26 /* kernel usage */ +#define k1 $27 /* kernel usage */ +#define gp $28 /* sdata pointer */ +#define sp $29 /* stack pointer */ +#define s8 $30 /* yet another saved reg for the callee */ +#define fp $30 /* frame pointer - this is being phased out by MIPS */ +#define ra $31 /* return address */ + + +/* +** relative position of registers in save reg area +*/ +#define R_R0 0 +#define R_R1 1 +#define R_R2 2 +#define R_R3 3 +#define R_R4 4 +#define R_R5 5 +#define R_R6 6 +#define R_R7 7 +#define R_R8 8 +#define R_R9 9 +#define R_R10 10 +#define R_R11 11 +#define R_R12 12 +#define R_R13 13 +#define R_R14 14 +#define R_R15 15 +#define R_R16 16 +#define R_R17 17 +#define R_R18 18 +#define R_R19 19 +#define R_R20 20 +#define R_R21 21 +#define R_R22 22 +#define R_R23 23 +#define R_R24 24 +#define R_R25 25 +#define R_R26 26 +#define R_R27 27 +#define R_R28 28 +#define R_R29 29 +#define R_R30 30 +#define R_R31 31 +#define R_F0 32 +#define R_F1 33 +#define R_F2 34 +#define R_F3 35 +#define R_F4 36 +#define R_F5 37 +#define R_F6 38 +#define R_F7 39 +#define R_F8 40 +#define R_F9 41 +#define R_F10 42 +#define R_F11 43 +#define R_F12 44 +#define R_F13 45 +#define R_F14 46 +#define R_F15 47 +#define R_F16 48 +#define R_F17 49 +#define R_F18 50 +#define R_F19 51 +#define R_F20 52 +#define R_F21 53 +#define R_F22 54 +#define R_F23 55 +#define R_F24 56 +#define R_F25 57 +#define R_F26 58 +#define R_F27 59 +#define R_F28 60 +#define R_F29 61 +#define R_F30 62 +#define R_F31 63 +#define NCLIENTREGS 64 +#define R_EPC 64 +#define R_MDHI 65 +#define R_MDLO 66 +#define R_SR 67 +#define R_CAUSE 68 +#define R_TLBHI 69 +#if defined(CPU_R3000) +#define R_TLBLO 70 +#endif +#if defined(CPU_R4000) +#define R_TLBLO0 70 +#endif +#define R_BADVADDR 71 +#define R_INX 72 +#define R_RAND 73 +#define R_CTXT 74 +#define R_EXCTYPE 75 +#define R_MODE 76 +#define R_PRID 77 +#define R_FCSR 78 +#define R_FEIR 79 +#if defined(CPU_R3000) +#define NREGS 80 +#endif +#if defined(CPU_R4000) +#define R_TLBLO1 80 +#define R_PAGEMASK 81 +#define R_WIRED 82 +#define R_COUNT 83 +#define R_COMPARE 84 +#define R_CONFIG 85 +#define R_LLADDR 86 +#define R_WATCHLO 87 +#define R_WATCHHI 88 +#define R_ECC 89 +#define R_CACHEERR 90 +#define R_TAGLO 91 +#define R_TAGHI 92 +#define R_ERRPC 93 +#define R_XCTXT 94 /* Ketan added from SIM64bit */ + +#define NREGS 95 +#endif + +/* +** For those who like to think in terms of the compiler names for the regs +*/ +#define R_ZERO R_R0 +#define R_AT R_R1 +#define R_V0 R_R2 +#define R_V1 R_R3 +#define R_A0 R_R4 +#define R_A1 R_R5 +#define R_A2 R_R6 +#define R_A3 R_R7 +#define R_T0 R_R8 +#define R_T1 R_R9 +#define R_T2 R_R10 +#define R_T3 R_R11 +#define R_T4 R_R12 +#define R_T5 R_R13 +#define R_T6 R_R14 +#define R_T7 R_R15 +#define R_S0 R_R16 +#define R_S1 R_R17 +#define R_S2 R_R18 +#define R_S3 R_R19 +#define R_S4 R_R20 +#define R_S5 R_R21 +#define R_S6 R_R22 +#define R_S7 R_R23 +#define R_T8 R_R24 +#define R_T9 R_R25 +#define R_K0 R_R26 +#define R_K1 R_R27 +#define R_GP R_R28 +#define R_SP R_R29 +#define R_FP R_R30 +#define R_RA R_R31 + +/* Ketan added the following */ +#ifdef CPU_R3000 +#define sreg sw +#define lreg lw +#define rmfc0 mfc0 +#define rmtc0 mtc0 +#define R_SZ 4 +#endif CPU_R3000 + +#ifdef CPU_R4000 +#if __mips < 3 +#define sreg sw +#define lreg lw +#define rmfc0 mfc0 +#define rmtc0 mtc0 +#define R_SZ 4 +#else +#define sreg sd +#define lreg ld +#define rmfc0 dmfc0 +#define rmtc0 dmtc0 +#define R_SZ 8 +#endif +#endif CPU_R4000 +/* Ketan till here */ + +#endif /* __IREGDEF_H__ */ + diff --git a/cpukit/score/cpu/mips/rtems/asm.h b/cpukit/score/cpu/mips/rtems/asm.h new file mode 100644 index 0000000000..d5a5d69eaa --- /dev/null +++ b/cpukit/score/cpu/mips/rtems/asm.h @@ -0,0 +1,99 @@ +/* asm.h + * + * This include file attempts to address the problems + * caused by incompatible flavors of assemblers and + * toolsets. It primarily addresses variations in the + * use of leading underscores on symbols and the requirement + * that register names be preceded by a %. + * + * + * NOTE: The spacing in the use of these macros + * is critical to them working as advertised. + * + * COPYRIGHT: + * + * This file is based on similar code found in newlib available + * from ftp.cygnus.com. The file which was used had no copyright + * notice. This file is freely distributable as long as the source + * of the file is noted. This file is: + * + * COPYRIGHT (c) 1994. + * On-Line Applications Research Corporation (OAR). + * + * asm.h,v 1.4 1995/09/26 19:25:36 joel Exp + */ +/* @(#)asm.h 03/15/96 1.1 */ + +#ifndef __NO_CPU_ASM_h +#define __NO_CPU_ASM_h + +/* + * Indicate we are in an assembly file and get the basic CPU definitions. + */ + +#define ASM +#include + +/* + * Recent versions of GNU cpp define variables which indicate the + * need for underscores and percents. If not using GNU cpp or + * the version does not support this, then you will obviously + * have to define these as appropriate. + */ + +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ _ +#endif + +#ifndef __REGISTER_PREFIX__ +#define __REGISTER_PREFIX__ +#endif + +/* ANSI concatenation macros. */ + +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ + +#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) + +/* Use the right prefix for registers. */ + +#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) + +/* + * define macros for all of the registers on this CPU + * + * EXAMPLE: #define d0 REG (d0) + */ + +/* + * Define macros to handle section beginning and ends. + */ + + +#define BEGIN_CODE_DCL .text +#define END_CODE_DCL +#define BEGIN_DATA_DCL .data +#define END_DATA_DCL +#define BEGIN_CODE .text +#define END_CODE +#define BEGIN_DATA +#define END_DATA +#define BEGIN_BSS +#define END_BSS +#define END + +/* + * Following must be tailor for a particular flavor of the C compiler. + * They may need to put underscores in front of the symbols. + */ + +#define PUBLIC(sym) .globl SYM (sym) +#define EXTERN(sym) .globl SYM (sym) + +#endif +/* end of include file */ + + diff --git a/cpukit/score/cpu/mips/rtems/mips/idtcpu.h b/cpukit/score/cpu/mips/rtems/mips/idtcpu.h new file mode 100644 index 0000000000..833c5ebe02 --- /dev/null +++ b/cpukit/score/cpu/mips/rtems/mips/idtcpu.h @@ -0,0 +1,439 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** idtcpu.h -- cpu related defines +*/ + +#ifndef _IDTCPU_H__ +#define _IDTCPU_H__ + +/* + * 950313: Ketan added Register definition for XContext reg. + * added define for WAIT instruction. + * 950421: Ketan added Register definition for Config reg (R3081) + */ + +/* +** memory configuration and mapping +*/ +#define K0BASE 0x80000000 +#define K0SIZE 0x20000000 +#define K1BASE 0xa0000000 +#define K1SIZE 0x20000000 +#define K2BASE 0xc0000000 +#define K2SIZE 0x20000000 +#if defined(CPU_R4000) +#define KSBASE 0xe0000000 +#define KSSIZE 0x20000000 +#endif + +#define KUBASE 0 +#define KUSIZE 0x80000000 + +/* +** Exception Vectors +*/ +#if defined(CPU_R3000) +#define UT_VEC K0BASE /* utlbmiss vector */ +#define E_VEC (K0BASE+0x80) /* exception vevtor */ +#endif +#if defined(CPU_R4000) +#define T_VEC (K0BASE+0x000) /* tlbmiss vector */ +#define X_VEC (K0BASE+0x080) /* xtlbmiss vector */ +#define C_VEC (K0BASE+0x100) /* cache error vector */ +#define E_VEC (K0BASE+0x180) /* exception vector */ +#endif +#define R_VEC (K1BASE+0x1fc00000) /* reset vector */ + +/* +** Address conversion macros +*/ +#ifdef CLANGUAGE +#define CAST(as) (as) +#else +#define CAST(as) +#endif +#define K0_TO_K1(x) (CAST(unsigned)(x)|0xA0000000) /* kseg0 to kseg1 */ +#define K1_TO_K0(x) (CAST(unsigned)(x)&0x9FFFFFFF) /* kseg1 to kseg0 */ +#define K0_TO_PHYS(x) (CAST(unsigned)(x)&0x1FFFFFFF) /* kseg0 to physical */ +#define K1_TO_PHYS(x) (CAST(unsigned)(x)&0x1FFFFFFF) /* kseg1 to physical */ +#define PHYS_TO_K0(x) (CAST(unsigned)(x)|0x80000000) /* physical to kseg0 */ +#define PHYS_TO_K1(x) (CAST(unsigned)(x)|0xA0000000) /* physical to kseg1 */ + +/* +** Cache size constants +*/ +#define MINCACHE 0x200 /* 512 For 3041. */ +#define MAXCACHE 0x40000 /* 256*1024 256k */ + +#if defined(CPU_R4000) +/* R4000 configuration register definitions */ +#define CFG_CM 0x80000000 /* Master-Checker mode */ +#define CFG_ECMASK 0x70000000 /* System Clock Ratio */ +#define CFG_ECBY2 0x00000000 /* divide by 2 */ +#define CFG_ECBY3 0x10000000 /* divide by 3 */ +#define CFG_ECBY4 0x20000000 /* divide by 4 */ +#define CFG_EPMASK 0x0f000000 /* Transmit data pattern */ +#define CFG_EPD 0x00000000 /* D */ +#define CFG_EPDDX 0x01000000 /* DDX */ +#define CFG_EPDDXX 0x02000000 /* DDXX */ +#define CFG_EPDXDX 0x03000000 /* DXDX */ +#define CFG_EPDDXXX 0x04000000 /* DDXXX */ +#define CFG_EPDDXXXX 0x05000000 /* DDXXXX */ +#define CFG_EPDXXDXX 0x06000000 /* DXXDXX */ +#define CFG_EPDDXXXXX 0x07000000 /* DDXXXXX */ +#define CFG_EPDXXXDXXX 0x08000000 /* DXXXDXXX */ +#define CFG_SBMASK 0x00c00000 /* Secondary cache block size */ +#define CFG_SBSHIFT 22 +#define CFG_SB4 0x00000000 /* 4 words */ +#define CFG_SB8 0x00400000 /* 8 words */ +#define CFG_SB16 0x00800000 /* 16 words */ +#define CFG_SB32 0x00c00000 /* 32 words */ +#define CFG_SS 0x00200000 /* Split secondary cache */ +#define CFG_SW 0x00100000 /* Secondary cache port width */ +#define CFG_EWMASK 0x000c0000 /* System port width */ +#define CFG_EWSHIFT 18 +#define CFG_EW64 0x00000000 /* 64 bit */ +#define CFG_EW32 0x00010000 /* 32 bit */ +#define CFG_SC 0x00020000 /* Secondary cache absent */ +#define CFG_SM 0x00010000 /* Dirty Shared mode disabled */ +#define CFG_BE 0x00008000 /* Big Endian */ +#define CFG_EM 0x00004000 /* ECC mode enable */ +#define CFG_EB 0x00002000 /* Block ordering */ +#define CFG_ICMASK 0x00000e00 /* Instruction cache size */ +#define CFG_ICSHIFT 9 +#define CFG_DCMASK 0x000001c0 /* Data cache size */ +#define CFG_DCSHIFT 6 +#define CFG_IB 0x00000020 /* Instruction cache block size */ +#define CFG_DB 0x00000010 /* Data cache block size */ +#define CFG_CU 0x00000008 /* Update on Store Conditional */ +#define CFG_K0MASK 0x00000007 /* KSEG0 coherency algorithm */ + +/* + * R4000 primary cache mode + */ +#define CFG_C_UNCACHED 2 +#define CFG_C_NONCOHERENT 3 +#define CFG_C_COHERENTXCL 4 +#define CFG_C_COHERENTXCLW 5 +#define CFG_C_COHERENTUPD 6 + +/* + * R4000 cache operations (should be in assembler...?) + */ +#define Index_Invalidate_I 0x0 /* 0 0 */ +#define Index_Writeback_Inv_D 0x1 /* 0 1 */ +#define Index_Invalidate_SI 0x2 /* 0 2 */ +#define Index_Writeback_Inv_SD 0x3 /* 0 3 */ +#define Index_Load_Tag_I 0x4 /* 1 0 */ +#define Index_Load_Tag_D 0x5 /* 1 1 */ +#define Index_Load_Tag_SI 0x6 /* 1 2 */ +#define Index_Load_Tag_SD 0x7 /* 1 3 */ +#define Index_Store_Tag_I 0x8 /* 2 0 */ +#define Index_Store_Tag_D 0x9 /* 2 1 */ +#define Index_Store_Tag_SI 0xA /* 2 2 */ +#define Index_Store_Tag_SD 0xB /* 2 3 */ +#define Create_Dirty_Exc_D 0xD /* 3 1 */ +#define Create_Dirty_Exc_SD 0xF /* 3 3 */ +#define Hit_Invalidate_I 0x10 /* 4 0 */ +#define Hit_Invalidate_D 0x11 /* 4 1 */ +#define Hit_Invalidate_SI 0x12 /* 4 2 */ +#define Hit_Invalidate_SD 0x13 /* 4 3 */ +#define Hit_Writeback_Inv_D 0x15 /* 5 1 */ +#define Hit_Writeback_Inv_SD 0x17 /* 5 3 */ +#define Fill_I 0x14 /* 5 0 */ +#define Hit_Writeback_D 0x19 /* 6 1 */ +#define Hit_Writeback_SD 0x1B /* 6 3 */ +#define Hit_Writeback_I 0x18 /* 6 0 */ +#define Hit_Set_Virtual_SI 0x1E /* 7 2 */ +#define Hit_Set_Virtual_SD 0x1F /* 7 3 */ + +#ifndef WAIT +#define WAIT .word 0x42000020 +#endif WAIT + +#ifndef wait +#define wait .word 0x42000020 +#endif wait + +#endif + +/* +** TLB resource defines +*/ +#if defined(CPU_R3000) +#define N_TLB_ENTRIES 64 +#define TLB_PGSIZE 0x1000 +#define RANDBASE 8 +#define TLBLO_PFNMASK 0xfffff000 +#define TLBLO_PFNSHIFT 12 +#define TLBLO_N 0x800 /* non-cacheable */ +#define TLBLO_D 0x400 /* writeable */ +#define TLBLO_V 0x200 /* valid bit */ +#define TLBLO_G 0x100 /* global access bit */ + +#define TLBHI_VPNMASK 0xfffff000 +#define TLBHI_VPNSHIFT 12 +#define TLBHI_PIDMASK 0xfc0 +#define TLBHI_PIDSHIFT 6 +#define TLBHI_NPID 64 + +#define TLBINX_PROBE 0x80000000 +#define TLBINX_INXMASK 0x00003f00 +#define TLBINX_INXSHIFT 8 + +#define TLBRAND_RANDMASK 0x00003f00 +#define TLBRAND_RANDSHIFT 8 + +#define TLBCTXT_BASEMASK 0xffe00000 +#define TLBCTXT_BASESHIFT 21 + +#define TLBCTXT_VPNMASK 0x001ffffc +#define TLBCTXT_VPNSHIFT 2 +#endif +#if defined(CPU_R4000) +#define N_TLB_ENTRIES 48 + +#define TLBHI_VPN2MASK 0xffffe000 +#define TLBHI_PIDMASK 0x000000ff +#define TLBHI_NPID 256 + +#define TLBLO_PFNMASK 0x3fffffc0 +#define TLBLO_PFNSHIFT 6 +#define TLBLO_D 0x00000004 /* writeable */ +#define TLBLO_V 0x00000002 /* valid bit */ +#define TLBLO_G 0x00000001 /* global access bit */ +#define TLBLO_CMASK 0x00000038 /* cache algorithm mask */ +#define TLBLO_CSHIFT 3 + +#define TLBLO_UNCACHED (CFG_C_UNCACHED< k, 1 => u */ +#define SR_IEO 0x00000010 /* old interrupt enable, 1 => enable */ +#define SR_KUP 0x00000008 /* prev kernel/user, 0 => k, 1 => u */ +#define SR_IEP 0x00000004 /* prev interrupt enable, 1 => enable */ +#define SR_KUC 0x00000002 /* cur kernel/user, 0 => k, 1 => u */ +#define SR_IEC 0x00000001 /* cur interrupt enable, 1 => enable */ +#endif + +#if defined(CPU_R4000) +#define SR_CUMASK 0xf0000000 /* coproc usable bits */ +#define SR_CU3 0x80000000 /* Coprocessor 3 usable */ +#define SR_CU2 0x40000000 /* Coprocessor 2 usable */ +#define SR_CU1 0x20000000 /* Coprocessor 1 usable */ +#define SR_CU0 0x10000000 /* Coprocessor 0 usable */ + +#define SR_RP 0x08000000 /* Reduced power operation */ +#define SR_FR 0x04000000 /* Additional floating point registers */ +#define SR_RE 0x02000000 /* Reverse endian in user mode */ + +#define SR_BEV 0x00400000 /* Use boot exception vectors */ +#define SR_TS 0x00200000 /* TLB shutdown */ +#define SR_SR 0x00100000 /* Soft reset */ +#define SR_CH 0x00040000 /* Cache hit */ +#define SR_CE 0x00020000 /* Use cache ECC */ +#define SR_DE 0x00010000 /* Disable cache exceptions */ + +/* +** status register interrupt masks and bits +*/ + +#define SR_IMASK 0x0000ff00 /* Interrupt mask */ +#define SR_IMASK8 0x00000000 /* mask level 8 */ +#define SR_IMASK7 0x00008000 /* mask level 7 */ +#define SR_IMASK6 0x0000c000 /* mask level 6 */ +#define SR_IMASK5 0x0000e000 /* mask level 5 */ +#define SR_IMASK4 0x0000f000 /* mask level 4 */ +#define SR_IMASK3 0x0000f800 /* mask level 3 */ +#define SR_IMASK2 0x0000fc00 /* mask level 2 */ +#define SR_IMASK1 0x0000fe00 /* mask level 1 */ +#define SR_IMASK0 0x0000ff00 /* mask level 0 */ + +#define SR_IMASKSHIFT 8 + +#define SR_IBIT8 0x00008000 /* bit level 8 */ +#define SR_IBIT7 0x00004000 /* bit level 7 */ +#define SR_IBIT6 0x00002000 /* bit level 6 */ +#define SR_IBIT5 0x00001000 /* bit level 5 */ +#define SR_IBIT4 0x00000800 /* bit level 4 */ +#define SR_IBIT3 0x00000400 /* bit level 3 */ +#define SR_IBIT2 0x00000200 /* bit level 2 */ +#define SR_IBIT1 0x00000100 /* bit level 1 */ + +#define SR_KSMASK 0x00000018 /* Kernel mode mask */ +#define SR_KSUSER 0x00000010 /* User mode */ +#define SR_KSSUPER 0x00000008 /* Supervisor mode */ +#define SR_KSKERNEL 0x00000000 /* Kernel mode */ +#define SR_ERL 0x00000004 /* Error level */ +#define SR_EXL 0x00000002 /* Exception level */ +#define SR_IE 0x00000001 /* Interrupts enabled */ +#endif + + + +/* + * Cause Register + */ +#define CAUSE_BD 0x80000000 /* Branch delay slot */ +#define CAUSE_CEMASK 0x30000000 /* coprocessor error */ +#define CAUSE_CESHIFT 28 + + +#define CAUSE_IPMASK 0x0000FF00 /* Pending interrupt mask */ +#define CAUSE_IPSHIFT 8 + +#define CAUSE_EXCMASK 0x0000003C /* Cause code bits */ +#define CAUSE_EXCSHIFT 2 + +#ifndef XDS +/* +** Coprocessor 0 registers +*/ +#define C0_INX $0 /* tlb index */ +#define C0_RAND $1 /* tlb random */ +#if defined(CPU_R3000) +#define C0_TLBLO $2 /* tlb entry low */ +#endif +#if defined(CPU_R4000) +#define C0_TLBLO0 $2 /* tlb entry low 0 */ +#define C0_TLBLO1 $3 /* tlb entry low 1 */ +#endif + +#define C0_CTXT $4 /* tlb context */ + +#if defined(CPU_R4000) +#define C0_PAGEMASK $5 /* tlb page mask */ +#define C0_WIRED $6 /* number of wired tlb entries */ +#endif + +#define C0_BADVADDR $8 /* bad virtual address */ + +#if defined(CPU_R4000) +#define C0_COUNT $9 /* cycle count */ +#endif + +#define C0_TLBHI $10 /* tlb entry hi */ + +#if defined(CPU_R4000) +#define C0_COMPARE $11 /* cyccle count comparator */ +#endif + +#define C0_SR $12 /* status register */ +#define C0_CAUSE $13 /* exception cause */ +#define C0_EPC $14 /* exception pc */ +#define C0_PRID $15 /* revision identifier */ + +#if defined(CPU_R3000) +#define C0_CONFIG $3 /* configuration register R3081*/ +#endif + +#if defined(CPU_R4000) +#define C0_CONFIG $16 /* configuration register */ +#define C0_LLADDR $17 /* linked load address */ +#define C0_WATCHLO $18 /* watchpoint trap register */ +#define C0_WATCHHI $19 /* watchpoint trap register */ +#define C0_XCTXT $20 /* extended tlb context */ +#define C0_ECC $26 /* secondary cache ECC control */ +#define C0_CACHEERR $27 /* cache error status */ +#define C0_TAGLO $28 /* cache tag lo */ +#define C0_TAGHI $29 /* cache tag hi */ +#define C0_ERRPC $30 /* cache error pc */ +#endif + +#endif XDS + +#ifdef R4650 +#define IWATCH $18 +#define DWATCH $19 +#define IBASE $0 +#define IBOUND $1 +#define DBASE $2 +#define DBOUND $3 +#define CALG $17 +#endif + +#endif /* _IDTCPU_H__ */ + diff --git a/cpukit/score/cpu/mips/rtems/mips/iregdef.h b/cpukit/score/cpu/mips/rtems/mips/iregdef.h new file mode 100644 index 0000000000..e7a4ba5ed1 --- /dev/null +++ b/cpukit/score/cpu/mips/rtems/mips/iregdef.h @@ -0,0 +1,324 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** iregdef.h - IDT R3000 register structure header file +** +** Copyright 1989 Integrated Device Technology, Inc +** All Rights Reserved +** +*/ +#ifndef __IREGDEF_H__ +#define __IREGDEF_H__ + +/* + * 950313: Ketan added sreg/lreg and R_SZ for 64-bit saves + * added Register definition for XContext reg. + * Look towards end of this file. + */ +/* +** register names +*/ +#define r0 $0 +#define r1 $1 +#define r2 $2 +#define r3 $3 +#define r4 $4 +#define r5 $5 +#define r6 $6 +#define r7 $7 +#define r8 $8 +#define r9 $9 +#define r10 $10 +#define r11 $11 +#define r12 $12 +#define r13 $13 + +#define r14 $14 +#define r15 $15 +#define r16 $16 +#define r17 $17 +#define r18 $18 +#define r19 $19 +#define r20 $20 +#define r21 $21 +#define r22 $22 +#define r23 $23 +#define r24 $24 +#define r25 $25 +#define r26 $26 +#define r27 $27 +#define r28 $28 +#define r29 $29 +#define r30 $30 +#define r31 $31 + +#define fp0 $f0 +#define fp1 $f1 +#define fp2 $f2 +#define fp3 $f3 +#define fp4 $f4 +#define fp5 $f5 +#define fp6 $f6 +#define fp7 $f7 +#define fp8 $f8 +#define fp9 $f9 +#define fp10 $f10 +#define fp11 $f11 +#define fp12 $f12 +#define fp13 $f13 +#define fp14 $f14 +#define fp15 $f15 +#define fp16 $f16 +#define fp17 $f17 +#define fp18 $f18 +#define fp19 $f19 +#define fp20 $f20 +#define fp21 $f21 +#define fp22 $f22 +#define fp23 $f23 +#define fp24 $f24 +#define fp25 $f25 +#define fp26 $f26 +#define fp27 $f27 +#define fp28 $f28 +#define fp29 $f29 +#define fp30 $f30 +#define fp31 $f31 + +#define fcr0 $0 +#define fcr30 $30 +#define fcr31 $31 + +#define zero $0 /* wired zero */ +#define AT $at /* assembler temp */ +#define v0 $2 /* return value */ +#define v1 $3 +#define a0 $4 /* argument registers a0-a3 */ +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define t0 $8 /* caller saved t0-t9 */ +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define s0 $16 /* callee saved s0-s8 */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define t8 $24 +#define t9 $25 +#define k0 $26 /* kernel usage */ +#define k1 $27 /* kernel usage */ +#define gp $28 /* sdata pointer */ +#define sp $29 /* stack pointer */ +#define s8 $30 /* yet another saved reg for the callee */ +#define fp $30 /* frame pointer - this is being phased out by MIPS */ +#define ra $31 /* return address */ + + +/* +** relative position of registers in save reg area +*/ +#define R_R0 0 +#define R_R1 1 +#define R_R2 2 +#define R_R3 3 +#define R_R4 4 +#define R_R5 5 +#define R_R6 6 +#define R_R7 7 +#define R_R8 8 +#define R_R9 9 +#define R_R10 10 +#define R_R11 11 +#define R_R12 12 +#define R_R13 13 +#define R_R14 14 +#define R_R15 15 +#define R_R16 16 +#define R_R17 17 +#define R_R18 18 +#define R_R19 19 +#define R_R20 20 +#define R_R21 21 +#define R_R22 22 +#define R_R23 23 +#define R_R24 24 +#define R_R25 25 +#define R_R26 26 +#define R_R27 27 +#define R_R28 28 +#define R_R29 29 +#define R_R30 30 +#define R_R31 31 +#define R_F0 32 +#define R_F1 33 +#define R_F2 34 +#define R_F3 35 +#define R_F4 36 +#define R_F5 37 +#define R_F6 38 +#define R_F7 39 +#define R_F8 40 +#define R_F9 41 +#define R_F10 42 +#define R_F11 43 +#define R_F12 44 +#define R_F13 45 +#define R_F14 46 +#define R_F15 47 +#define R_F16 48 +#define R_F17 49 +#define R_F18 50 +#define R_F19 51 +#define R_F20 52 +#define R_F21 53 +#define R_F22 54 +#define R_F23 55 +#define R_F24 56 +#define R_F25 57 +#define R_F26 58 +#define R_F27 59 +#define R_F28 60 +#define R_F29 61 +#define R_F30 62 +#define R_F31 63 +#define NCLIENTREGS 64 +#define R_EPC 64 +#define R_MDHI 65 +#define R_MDLO 66 +#define R_SR 67 +#define R_CAUSE 68 +#define R_TLBHI 69 +#if defined(CPU_R3000) +#define R_TLBLO 70 +#endif +#if defined(CPU_R4000) +#define R_TLBLO0 70 +#endif +#define R_BADVADDR 71 +#define R_INX 72 +#define R_RAND 73 +#define R_CTXT 74 +#define R_EXCTYPE 75 +#define R_MODE 76 +#define R_PRID 77 +#define R_FCSR 78 +#define R_FEIR 79 +#if defined(CPU_R3000) +#define NREGS 80 +#endif +#if defined(CPU_R4000) +#define R_TLBLO1 80 +#define R_PAGEMASK 81 +#define R_WIRED 82 +#define R_COUNT 83 +#define R_COMPARE 84 +#define R_CONFIG 85 +#define R_LLADDR 86 +#define R_WATCHLO 87 +#define R_WATCHHI 88 +#define R_ECC 89 +#define R_CACHEERR 90 +#define R_TAGLO 91 +#define R_TAGHI 92 +#define R_ERRPC 93 +#define R_XCTXT 94 /* Ketan added from SIM64bit */ + +#define NREGS 95 +#endif + +/* +** For those who like to think in terms of the compiler names for the regs +*/ +#define R_ZERO R_R0 +#define R_AT R_R1 +#define R_V0 R_R2 +#define R_V1 R_R3 +#define R_A0 R_R4 +#define R_A1 R_R5 +#define R_A2 R_R6 +#define R_A3 R_R7 +#define R_T0 R_R8 +#define R_T1 R_R9 +#define R_T2 R_R10 +#define R_T3 R_R11 +#define R_T4 R_R12 +#define R_T5 R_R13 +#define R_T6 R_R14 +#define R_T7 R_R15 +#define R_S0 R_R16 +#define R_S1 R_R17 +#define R_S2 R_R18 +#define R_S3 R_R19 +#define R_S4 R_R20 +#define R_S5 R_R21 +#define R_S6 R_R22 +#define R_S7 R_R23 +#define R_T8 R_R24 +#define R_T9 R_R25 +#define R_K0 R_R26 +#define R_K1 R_R27 +#define R_GP R_R28 +#define R_SP R_R29 +#define R_FP R_R30 +#define R_RA R_R31 + +/* Ketan added the following */ +#ifdef CPU_R3000 +#define sreg sw +#define lreg lw +#define rmfc0 mfc0 +#define rmtc0 mtc0 +#define R_SZ 4 +#endif CPU_R3000 + +#ifdef CPU_R4000 +#if __mips < 3 +#define sreg sw +#define lreg lw +#define rmfc0 mfc0 +#define rmtc0 mtc0 +#define R_SZ 4 +#else +#define sreg sd +#define lreg ld +#define rmfc0 dmfc0 +#define rmtc0 dmtc0 +#define R_SZ 8 +#endif +#endif CPU_R4000 +/* Ketan till here */ + +#endif /* __IREGDEF_H__ */ + diff --git a/cpukit/score/cpu/mips64orion/asm.h b/cpukit/score/cpu/mips64orion/asm.h new file mode 100644 index 0000000000..d5a5d69eaa --- /dev/null +++ b/cpukit/score/cpu/mips64orion/asm.h @@ -0,0 +1,99 @@ +/* asm.h + * + * This include file attempts to address the problems + * caused by incompatible flavors of assemblers and + * toolsets. It primarily addresses variations in the + * use of leading underscores on symbols and the requirement + * that register names be preceded by a %. + * + * + * NOTE: The spacing in the use of these macros + * is critical to them working as advertised. + * + * COPYRIGHT: + * + * This file is based on similar code found in newlib available + * from ftp.cygnus.com. The file which was used had no copyright + * notice. This file is freely distributable as long as the source + * of the file is noted. This file is: + * + * COPYRIGHT (c) 1994. + * On-Line Applications Research Corporation (OAR). + * + * asm.h,v 1.4 1995/09/26 19:25:36 joel Exp + */ +/* @(#)asm.h 03/15/96 1.1 */ + +#ifndef __NO_CPU_ASM_h +#define __NO_CPU_ASM_h + +/* + * Indicate we are in an assembly file and get the basic CPU definitions. + */ + +#define ASM +#include + +/* + * Recent versions of GNU cpp define variables which indicate the + * need for underscores and percents. If not using GNU cpp or + * the version does not support this, then you will obviously + * have to define these as appropriate. + */ + +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ _ +#endif + +#ifndef __REGISTER_PREFIX__ +#define __REGISTER_PREFIX__ +#endif + +/* ANSI concatenation macros. */ + +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ + +#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) + +/* Use the right prefix for registers. */ + +#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) + +/* + * define macros for all of the registers on this CPU + * + * EXAMPLE: #define d0 REG (d0) + */ + +/* + * Define macros to handle section beginning and ends. + */ + + +#define BEGIN_CODE_DCL .text +#define END_CODE_DCL +#define BEGIN_DATA_DCL .data +#define END_DATA_DCL +#define BEGIN_CODE .text +#define END_CODE +#define BEGIN_DATA +#define END_DATA +#define BEGIN_BSS +#define END_BSS +#define END + +/* + * Following must be tailor for a particular flavor of the C compiler. + * They may need to put underscores in front of the symbols. + */ + +#define PUBLIC(sym) .globl SYM (sym) +#define EXTERN(sym) .globl SYM (sym) + +#endif +/* end of include file */ + + diff --git a/cpukit/score/cpu/mips64orion/cpu.c b/cpukit/score/cpu/mips64orion/cpu.c new file mode 100644 index 0000000000..ec8c73393e --- /dev/null +++ b/cpukit/score/cpu/mips64orion/cpu.c @@ -0,0 +1,216 @@ +/* + * Mips CPU Dependent Source + * + * Author: Craig Lebakken + * + * COPYRIGHT (c) 1996 by Transition Networks Inc. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Transition Networks not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * Transition Networks makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/score/cpu/no_cpu/cpu.c: + * + * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994. + * On-Line Applications Research Corporation (OAR). + * All rights assigned to U.S. Government, 1994. + * + * This material may be reproduced by or for the U.S. Government pursuant + * to the copyright license under the clause at DFARS 252.227-7013. This + * notice must appear in all copies of this file and its derivatives. + * + * cpu.c,v 1.7 1995/09/26 19:25:37 joel Exp + */ + +#ifndef lint +static char _sccsid[] = "@(#)cpu.c 08/20/96 1.5\n"; +#endif + +#include +#include +#include + + +ISR_Handler_entry _ISR_Vector_table[ ISR_NUMBER_OF_VECTORS ]; + +/* _CPU_Initialize + * + * This routine performs processor dependent initialization. + * + * INPUT PARAMETERS: + * cpu_table - CPU table to initialize + * thread_dispatch - address of disptaching routine + */ + + +void null_handler( void ) +{ +} + + +void _CPU_Initialize( + rtems_cpu_table *cpu_table, + void (*thread_dispatch) /* ignored on this CPU */ +) +{ + unsigned int i = ISR_NUMBER_OF_VECTORS; + + while ( i-- ) + { + _ISR_Vector_table[i] = (ISR_Handler_entry)null_handler; + } + + /* + * The thread_dispatch argument is the address of the entry point + * for the routine called at the end of an ISR once it has been + * decided a context switch is necessary. On some compilation + * systems it is difficult to call a high-level language routine + * from assembly. This allows us to trick these systems. + * + * If you encounter this problem save the entry point in a CPU + * dependent variable. + */ + + _CPU_Thread_dispatch_pointer = thread_dispatch; + + /* + * If there is not an easy way to initialize the FP context + * during Context_Initialize, then it is usually easier to + * save an "uninitialized" FP context here and copy it to + * the task's during Context_Initialize. + */ + + /* FP context initialization support goes here */ + + _CPU_Table = *cpu_table; + +} + +/*PAGE + * + * _CPU_ISR_Get_level + */ + +#if 0 /* located in cpu_asm.S */ +unsigned32 _CPU_ISR_Get_level( void ) +{ + /* + * This routine returns the current interrupt level. + */ +} +#endif + +/*PAGE + * + * _CPU_ISR_install_raw_handler + */ + +void _CPU_ISR_install_raw_handler( + unsigned32 vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + /* + * This is where we install the interrupt handler into the "raw" interrupt + * table used by the CPU to dispatch interrupt handlers. + */ + +#if 0 /* not necessary */ +/* use IDT/Sim to set interrupt vector. Needed to co-exist with debugger. */ + add_ext_int_func( vector, new_handler ); +#endif +} + +/*PAGE + * + * _CPU_ISR_install_vector + * + * This kernel routine installs the RTEMS handler for the + * specified vector. + * + * Input parameters: + * vector - interrupt vector number + * old_handler - former ISR for this vector number + * new_handler - replacement ISR for this vector number + * + * Output parameters: NONE + * + */ + +void _CPU_ISR_install_vector( + unsigned32 vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + *old_handler = _ISR_Vector_table[ vector ]; + + /* + * If the interrupt vector table is a table of pointer to isr entry + * points, then we need to install the appropriate RTEMS interrupt + * handler for this vector number. + */ + + _CPU_ISR_install_raw_handler( vector, _ISR_Handler, old_handler ); + + /* + * We put the actual user ISR address in '_ISR_vector_table'. This will + * be used by the _ISR_Handler so the user gets control. + */ + + _ISR_Vector_table[ vector ] = new_handler; +} + +/*PAGE + * + * _CPU_Install_interrupt_stack + */ + +void _CPU_Install_interrupt_stack( void ) +{ +/* we don't support this yet */ +} + +/*PAGE + * + * _CPU_Internal_threads_Idle_thread_body + * + * NOTES: + * + * 1. This is the same as the regular CPU independent algorithm. + * + * 2. If you implement this using a "halt", "idle", or "shutdown" + * instruction, then don't forget to put it in an infinite loop. + * + * 3. Be warned. Some processors with onboard DMA have been known + * to stop the DMA if the CPU were put in IDLE mode. This might + * also be a problem with other on-chip peripherals. So use this + * hook with caution. + */ + +#if 0 /* located in cpu_asm.S */ +void _CPU_Thread_Idle_body( void ) +{ + + for( ; ; ) + /* insert your "halt" instruction here */ ; +} +#endif + +extern void mips_break( int error ); + +#include + +void mips_fatal_error( int error ) +{ + printf("fatal error 0x%x %d\n",error,error); + mips_break( error ); +} diff --git a/cpukit/score/cpu/mips64orion/cpu_asm.S b/cpukit/score/cpu/mips64orion/cpu_asm.S new file mode 100644 index 0000000000..d9e38231bf --- /dev/null +++ b/cpukit/score/cpu/mips64orion/cpu_asm.S @@ -0,0 +1,972 @@ +/* cpu_asm.S + * + * This file contains the basic algorithms for all assembly code used + * in an specific CPU port of RTEMS. These algorithms must be implemented + * in assembly language + * + * Author: Craig Lebakken + * + * COPYRIGHT (c) 1996 by Transition Networks Inc. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Transition Networks not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * Transition Networks makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/score/cpu/no_cpu/cpu_asm.s: + * + * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994. + * On-Line Applications Research Corporation (OAR). + * All rights assigned to U.S. Government, 1994. + * + * This material may be reproduced by or for the U.S. Government pursuant + * to the copyright license under the clause at DFARS 252.227-7013. This + * notice must appear in all copies of this file and its derivatives. + * + * cpu_asm.c,v 1.5 1995/09/26 19:25:39 joel Exp + */ +/* @(#)cpu_asm.S 08/20/96 1.15 */ + +#include "cpu_asm.h" + +#include "iregdef.h" +#include "idtcpu.h" + +#define FRAME(name,frm_reg,offset,ret_reg) \ + .globl name; \ + .ent name; \ +name:; \ + .frame frm_reg,offset,ret_reg +#define ENDFRAME(name) \ + .end name + + +#define EXCP_STACK_SIZE (NREGS*R_SZ) + +#if __ghs__ +#define sd sw +#define ld lw +#define dmtc0 mtc0 +#define dsll sll +#define dmfc0 mfc0 +#endif + +#if 1 /* 32 bit unsigned32 types */ +#define sint sw +#define lint lw +#define stackadd addiu +#define intadd addu +#define SZ_INT 4 +#define SZ_INT_POW2 2 +#else /* 64 bit unsigned32 types */ +#define sint dw +#define lint dw +#define stackadd daddiu +#define intadd daddu +#define SZ_INT 8 +#define SZ_INT_POW2 3 +#endif + +#ifdef __GNUC__ +#define EXTERN(x,size) .extern x,size +#else +#define EXTERN(x,size) +#endif + +/* NOTE: these constants must match the Context_Control structure in cpu.h */ +#define S0_OFFSET 0 +#define S1_OFFSET 1 +#define S2_OFFSET 2 +#define S3_OFFSET 3 +#define S4_OFFSET 4 +#define S5_OFFSET 5 +#define S6_OFFSET 6 +#define S7_OFFSET 7 +#define SP_OFFSET 8 +#define FP_OFFSET 9 +#define RA_OFFSET 10 +#define C0_SR_OFFSET 11 +#define C0_EPC_OFFSET 12 + +/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */ +#define FP0_OFFSET 0 +#define FP1_OFFSET 1 +#define FP2_OFFSET 2 +#define FP3_OFFSET 3 +#define FP4_OFFSET 4 +#define FP5_OFFSET 5 +#define FP6_OFFSET 6 +#define FP7_OFFSET 7 +#define FP8_OFFSET 8 +#define FP9_OFFSET 9 +#define FP10_OFFSET 10 +#define FP11_OFFSET 11 +#define FP12_OFFSET 12 +#define FP13_OFFSET 13 +#define FP14_OFFSET 14 +#define FP15_OFFSET 15 +#define FP16_OFFSET 16 +#define FP17_OFFSET 17 +#define FP18_OFFSET 18 +#define FP19_OFFSET 19 +#define FP20_OFFSET 20 +#define FP21_OFFSET 21 +#define FP22_OFFSET 22 +#define FP23_OFFSET 23 +#define FP24_OFFSET 24 +#define FP25_OFFSET 25 +#define FP26_OFFSET 26 +#define FP27_OFFSET 27 +#define FP28_OFFSET 28 +#define FP29_OFFSET 29 +#define FP30_OFFSET 30 +#define FP31_OFFSET 31 + + +/*PAGE + * + * _CPU_ISR_Get_level + */ + +#if 0 +unsigned32 _CPU_ISR_Get_level( void ) +{ + /* + * This routine returns the current interrupt level. + */ +} +#endif +/* return the current exception level for the 4650 */ +FRAME(_CPU_ISR_Get_level,sp,0,ra) + mfc0 v0,C0_SR + nop + andi v0,SR_EXL + srl v0,1 + j ra +ENDFRAME(_CPU_ISR_Get_level) + +FRAME(_CPU_ISR_Set_level,sp,0,ra) + nop + mfc0 a0,C0_SR + nop + andi a0,SR_EXL + beqz a0,_CPU_ISR_Set_1 /* normalize a0 */ + nop + li a0,1 +_CPU_ISR_Set_1: + beq v0,a0,_CPU_ISR_Set_exit /* if (current_level != new_level ) */ + nop + bnez a0,_CPU_ISR_Set_2 + nop + nop + mfc0 t0,C0_SR + nop + li t1,~SR_EXL + and t0,t1 + nop + mtc0 t0,C0_SR /* disable exception level */ + nop + j ra + nop +_CPU_ISR_Set_2: + nop + mfc0 t0,C0_SR + nop + li t1,~SR_IE + and t0,t1 + nop + mtc0 t0,C0_SR /* first disable ie bit (recommended) */ + nop + ori t0,SR_EXL|SR_IE /* enable exception level */ + nop + mtc0 t0,C0_SR + nop +_CPU_ISR_Set_exit: + j ra + nop +ENDFRAME(_CPU_ISR_Set_level) + +/* + * _CPU_Context_save_fp_context + * + * This routine is responsible for saving the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * Sometimes a macro implementation of this is in cpu.h which dereferences + * the ** and a similarly named routine in this file is passed something + * like a (Context_Control_fp *). The general rule on making this decision + * is to avoid writing assembly language. + */ + +/* void _CPU_Context_save_fp( + * void **fp_context_ptr + * ) + * { + * } + */ + +FRAME(_CPU_Context_save_fp,sp,0,ra) + .set noat + ld a1,(a0) + swc1 $f0,FP0_OFFSET*4(a1) + swc1 $f1,FP1_OFFSET*4(a1) + swc1 $f2,FP2_OFFSET*4(a1) + swc1 $f3,FP3_OFFSET*4(a1) + swc1 $f4,FP4_OFFSET*4(a1) + swc1 $f5,FP5_OFFSET*4(a1) + swc1 $f6,FP6_OFFSET*4(a1) + swc1 $f7,FP7_OFFSET*4(a1) + swc1 $f8,FP8_OFFSET*4(a1) + swc1 $f9,FP9_OFFSET*4(a1) + swc1 $f10,FP10_OFFSET*4(a1) + swc1 $f11,FP11_OFFSET*4(a1) + swc1 $f12,FP12_OFFSET*4(a1) + swc1 $f13,FP13_OFFSET*4(a1) + swc1 $f14,FP14_OFFSET*4(a1) + swc1 $f15,FP15_OFFSET*4(a1) + swc1 $f16,FP16_OFFSET*4(a1) + swc1 $f17,FP17_OFFSET*4(a1) + swc1 $f18,FP18_OFFSET*4(a1) + swc1 $f19,FP19_OFFSET*4(a1) + swc1 $f20,FP20_OFFSET*4(a1) + swc1 $f21,FP21_OFFSET*4(a1) + swc1 $f22,FP22_OFFSET*4(a1) + swc1 $f23,FP23_OFFSET*4(a1) + swc1 $f24,FP24_OFFSET*4(a1) + swc1 $f25,FP25_OFFSET*4(a1) + swc1 $f26,FP26_OFFSET*4(a1) + swc1 $f27,FP27_OFFSET*4(a1) + swc1 $f28,FP28_OFFSET*4(a1) + swc1 $f29,FP29_OFFSET*4(a1) + swc1 $f30,FP30_OFFSET*4(a1) + swc1 $f31,FP31_OFFSET*4(a1) + j ra + nop + .set at +ENDFRAME(_CPU_Context_save_fp) + +/* + * _CPU_Context_restore_fp_context + * + * This routine is responsible for restoring the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * Sometimes a macro implementation of this is in cpu.h which dereferences + * the ** and a similarly named routine in this file is passed something + * like a (Context_Control_fp *). The general rule on making this decision + * is to avoid writing assembly language. + */ + +/* void _CPU_Context_restore_fp( + * void **fp_context_ptr + * ) + * { + * } + */ + +FRAME(_CPU_Context_restore_fp,sp,0,ra) + .set noat + ld a1,(a0) + lwc1 $f0,FP0_OFFSET*4(a1) + lwc1 $f1,FP1_OFFSET*4(a1) + lwc1 $f2,FP2_OFFSET*4(a1) + lwc1 $f3,FP3_OFFSET*4(a1) + lwc1 $f4,FP4_OFFSET*4(a1) + lwc1 $f5,FP5_OFFSET*4(a1) + lwc1 $f6,FP6_OFFSET*4(a1) + lwc1 $f7,FP7_OFFSET*4(a1) + lwc1 $f8,FP8_OFFSET*4(a1) + lwc1 $f9,FP9_OFFSET*4(a1) + lwc1 $f10,FP10_OFFSET*4(a1) + lwc1 $f11,FP11_OFFSET*4(a1) + lwc1 $f12,FP12_OFFSET*4(a1) + lwc1 $f13,FP13_OFFSET*4(a1) + lwc1 $f14,FP14_OFFSET*4(a1) + lwc1 $f15,FP15_OFFSET*4(a1) + lwc1 $f16,FP16_OFFSET*4(a1) + lwc1 $f17,FP17_OFFSET*4(a1) + lwc1 $f18,FP18_OFFSET*4(a1) + lwc1 $f19,FP19_OFFSET*4(a1) + lwc1 $f20,FP20_OFFSET*4(a1) + lwc1 $f21,FP21_OFFSET*4(a1) + lwc1 $f22,FP22_OFFSET*4(a1) + lwc1 $f23,FP23_OFFSET*4(a1) + lwc1 $f24,FP24_OFFSET*4(a1) + lwc1 $f25,FP25_OFFSET*4(a1) + lwc1 $f26,FP26_OFFSET*4(a1) + lwc1 $f27,FP27_OFFSET*4(a1) + lwc1 $f28,FP28_OFFSET*4(a1) + lwc1 $f29,FP29_OFFSET*4(a1) + lwc1 $f30,FP30_OFFSET*4(a1) + lwc1 $f31,FP31_OFFSET*4(a1) + j ra + nop + .set at +ENDFRAME(_CPU_Context_restore_fp) + +/* _CPU_Context_switch + * + * This routine performs a normal non-FP context switch. + */ + +/* void _CPU_Context_switch( + * Context_Control *run, + * Context_Control *heir + * ) + * { + * } + */ + +FRAME(_CPU_Context_switch,sp,0,ra) + + mfc0 t0,C0_SR + li t1,~SR_IE + sd t0,C0_SR_OFFSET*8(a0) /* save status register */ + and t0,t1 + mtc0 t0,C0_SR /* first disable ie bit (recommended) */ + ori t0,SR_EXL|SR_IE /* enable exception level to disable interrupts */ + mtc0 t0,C0_SR + + sd ra,RA_OFFSET*8(a0) /* save current context */ + sd sp,SP_OFFSET*8(a0) + sd fp,FP_OFFSET*8(a0) + sd s0,S0_OFFSET*8(a0) + sd s1,S1_OFFSET*8(a0) + sd s2,S2_OFFSET*8(a0) + sd s3,S3_OFFSET*8(a0) + sd s4,S4_OFFSET*8(a0) + sd s5,S5_OFFSET*8(a0) + sd s6,S6_OFFSET*8(a0) + sd s7,S7_OFFSET*8(a0) + dmfc0 t0,C0_EPC + sd t0,C0_EPC_OFFSET*8(a0) + +_CPU_Context_switch_restore: + ld s0,S0_OFFSET*8(a1) /* restore context */ + ld s1,S1_OFFSET*8(a1) + ld s2,S2_OFFSET*8(a1) + ld s3,S3_OFFSET*8(a1) + ld s4,S4_OFFSET*8(a1) + ld s5,S5_OFFSET*8(a1) + ld s6,S6_OFFSET*8(a1) + ld s7,S7_OFFSET*8(a1) + ld fp,FP_OFFSET*8(a1) + ld sp,SP_OFFSET*8(a1) + ld ra,RA_OFFSET*8(a1) + ld t0,C0_EPC_OFFSET*8(a1) + dmtc0 t0,C0_EPC + ld t0,C0_SR_OFFSET*8(a1) + andi t0,SR_EXL + bnez t0,_CPU_Context_1 /* set exception level from restore context */ + li t0,~SR_EXL + mfc0 t1,C0_SR + nop + and t1,t0 + mtc0 t1,C0_SR +_CPU_Context_1: + j ra + nop +ENDFRAME(_CPU_Context_switch) + +/* + * _CPU_Context_restore + * + * This routine is generally used only to restart self in an + * efficient manner. It may simply be a label in _CPU_Context_switch. + * + * NOTE: May be unnecessary to reload some registers. + */ + +#if 0 +void _CPU_Context_restore( + Context_Control *new_context +) +{ +} +#endif + +FRAME(_CPU_Context_restore,sp,0,ra) + dadd a1,a0,zero + j _CPU_Context_switch_restore + nop +ENDFRAME(_CPU_Context_restore) + +EXTERN(_ISR_Nest_level, SZ_INT) +EXTERN(_Thread_Dispatch_disable_level,SZ_INT) +EXTERN(_Context_Switch_necessary,SZ_INT) +EXTERN(_ISR_Signals_to_thread_executing,SZ_INT) +.extern _Thread_Dispatch +.extern _ISR_Vector_table + +/* void __ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + */ + +#if 0 +void _ISR_Handler() +{ + /* + * This discussion ignores a lot of the ugly details in a real + * implementation such as saving enough registers/state to be + * able to do something real. Keep in mind that the goal is + * to invoke a user's ISR handler which is written in C and + * uses a certain set of registers. + * + * Also note that the exact order is to a large extent flexible. + * Hardware will dictate a sequence for a certain subset of + * _ISR_Handler while requirements for setting + */ + + /* + * At entry to "common" _ISR_Handler, the vector number must be + * available. On some CPUs the hardware puts either the vector + * number or the offset into the vector table for this ISR in a + * known place. If the hardware does not give us this information, + * then the assembly portion of RTEMS for this port will contain + * a set of distinct interrupt entry points which somehow place + * the vector number in a known place (which is safe if another + * interrupt nests this one) and branches to _ISR_Handler. + * + */ +#endif +FRAME(_ISR_Handler,sp,0,ra) +.set noreorder +#if USE_IDTKIT +/* IDT/Kit incorrectly adds 4 to EPC before returning. This compensates */ + lreg k0, R_EPC*R_SZ(sp) + daddiu k0,k0,-4 + sreg k0, R_EPC*R_SZ(sp) + lreg k0, R_CAUSE*R_SZ(sp) + li k1, ~CAUSE_BD + and k0, k1 + sreg k0, R_CAUSE*R_SZ(sp) +#endif + +/* save registers not already saved by IDT/sim */ + stackadd sp,sp,-EXCP_STACK_SIZE /* store ra on the stack */ + + sreg ra, R_RA*R_SZ(sp) + sreg v0, R_V0*R_SZ(sp) + sreg v1, R_V1*R_SZ(sp) + sreg a0, R_A0*R_SZ(sp) + sreg a1, R_A1*R_SZ(sp) + sreg a2, R_A2*R_SZ(sp) + sreg a3, R_A3*R_SZ(sp) + sreg t0, R_T0*R_SZ(sp) + sreg t1, R_T1*R_SZ(sp) + sreg t2, R_T2*R_SZ(sp) + sreg t3, R_T3*R_SZ(sp) + sreg t4, R_T4*R_SZ(sp) + sreg t5, R_T5*R_SZ(sp) + sreg t6, R_T6*R_SZ(sp) + sreg t7, R_T7*R_SZ(sp) + mflo k0 + sreg t8, R_T8*R_SZ(sp) + sreg k0, R_MDLO*R_SZ(sp) + sreg t9, R_T9*R_SZ(sp) + mfhi k0 + sreg gp, R_GP*R_SZ(sp) + sreg fp, R_FP*R_SZ(sp) + sreg k0, R_MDHI*R_SZ(sp) + .set noat + sreg AT, R_AT*R_SZ(sp) + .set at + + stackadd sp,sp,-40 /* store ra on the stack */ + sd ra,32(sp) + +/* determine if an interrupt generated this exception */ + mfc0 k0,C0_CAUSE + and k1,k0,CAUSE_EXCMASK + bnez k1,_ISR_Handler_prom_exit /* not an external interrupt, pass exception to Monitor */ + mfc0 k1,C0_SR + and k0,k1 + and k0,CAUSE_IPMASK + beq k0,zero,_ISR_Handler_quick_exit /* external interrupt not enabled, ignore */ + nop + + /* + * save some or all context on stack + * may need to save some special interrupt information for exit + * + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * if ( _ISR_Nest_level == 0 ) + * switch to software interrupt stack + * #endif + */ +#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + lint t0,_ISR_Nest_level + beq t0, zero, _ISR_Handler_1 + nop + /* switch stacks */ +_ISR_Handler_1: +#else + lint t0,_ISR_Nest_level +#endif + /* + * _ISR_Nest_level++; + */ + addi t0,t0,1 + sint t0,_ISR_Nest_level + /* + * _Thread_Dispatch_disable_level++; + */ + lint t1,_Thread_Dispatch_disable_level + addi t1,t1,1 + sint t1,_Thread_Dispatch_disable_level +#if 0 + nop + j _ISR_Handler_4 + nop + /* + * while ( interrupts_pending(cause_reg) ) { + * vector = BITFIELD_TO_INDEX(cause_reg); + * (*_ISR_Vector_table[ vector ])( vector ); + * } + */ +_ISR_Handler_2: +/* software interrupt priorities can be applied here */ + li t1,-1 +/* convert bit field into interrupt index */ +_ISR_Handler_3: + andi t2,t0,1 + addi t1,1 + beql t2,zero,_ISR_Handler_3 + dsrl t0,1 + li t1,7 + dsll t1,3 /* convert index to byte offset (*8) */ + la t3,_ISR_Vector_table + intadd t1,t3 + lint t1,(t1) + jalr t1 + nop + j _ISR_Handler_5 + nop +_ISR_Handler_4: + mfc0 t0,C0_CAUSE + andi t0,CAUSE_IPMASK + bne t0,zero,_ISR_Handler_2 + dsrl t0,t0,8 +_ISR_Handler_5: +#else + nop + li t1,7 + dsll t1,t1,SZ_INT_POW2 + la t3,_ISR_Vector_table + intadd t1,t3 + lint t1,(t1) + jalr t1 + nop +#endif + /* + * --_ISR_Nest_level; + */ + lint t2,_ISR_Nest_level + addi t2,t2,-1 + sint t2,_ISR_Nest_level + /* + * --_Thread_Dispatch_disable_level; + */ + lint t1,_Thread_Dispatch_disable_level + addi t1,t1,-1 + sint t1,_Thread_Dispatch_disable_level + /* + * if ( _Thread_Dispatch_disable_level || _ISR_Nest_level ) + * goto the label "exit interrupt (simple case)" + */ + or t0,t2,t1 + bne t0,zero,_ISR_Handler_exit + nop + /* + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * restore stack + * #endif + * + * if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing ) + * goto the label "exit interrupt (simple case)" + */ + lint t0,_Context_Switch_necessary + lint t1,_ISR_Signals_to_thread_executing + or t0,t0,t1 + beq t0,zero,_ISR_Handler_exit + nop + + /* + * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch + */ + jal _Thread_Dispatch + nop + /* + * prepare to get out of interrupt + * return from interrupt (maybe to _ISR_Dispatch) + * + * LABEL "exit interrupt (simple case): + * prepare to get out of interrupt + * return from interrupt + */ +_ISR_Handler_exit: + ld ra,32(sp) + stackadd sp,sp,40 + +/* restore interrupt context from stack */ + lreg k0, R_MDLO*R_SZ(sp) + mtlo k0 + lreg k0, R_MDHI*R_SZ(sp) + lreg a2, R_A2*R_SZ(sp) + mthi k0 + lreg a3, R_A3*R_SZ(sp) + lreg t0, R_T0*R_SZ(sp) + lreg t1, R_T1*R_SZ(sp) + lreg t2, R_T2*R_SZ(sp) + lreg t3, R_T3*R_SZ(sp) + lreg t4, R_T4*R_SZ(sp) + lreg t5, R_T5*R_SZ(sp) + lreg t6, R_T6*R_SZ(sp) + lreg t7, R_T7*R_SZ(sp) + lreg t8, R_T8*R_SZ(sp) + lreg t9, R_T9*R_SZ(sp) + lreg gp, R_GP*R_SZ(sp) + lreg fp, R_FP*R_SZ(sp) + lreg ra, R_RA*R_SZ(sp) + lreg a0, R_A0*R_SZ(sp) + lreg a1, R_A1*R_SZ(sp) + lreg v1, R_V1*R_SZ(sp) + lreg v0, R_V0*R_SZ(sp) + .set noat + lreg AT, R_AT*R_SZ(sp) + .set at + + stackadd sp,sp,EXCP_STACK_SIZE /* store ra on the stack */ + +#if USE_IDTKIT +/* we handled exception, so return non-zero value */ + li v0,1 +#endif + +_ISR_Handler_quick_exit: +#ifdef USE_IDTKIT + j ra +#else + eret +#endif + nop + +_ISR_Handler_prom_exit: +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + + .set reorder + +ENDFRAME(_ISR_Handler) + + +FRAME(mips_enable_interrupts,sp,0,ra) + mfc0 t0,C0_SR /* get status reg */ + nop + or t0,t0,a0 + mtc0 t0,C0_SR /* save updated status reg */ + j ra + nop +ENDFRAME(mips_enable_interrupts) + +FRAME(mips_disable_interrupts,sp,0,ra) + mfc0 v0,C0_SR /* get status reg */ + li t1,SR_IMASK /* t1 = load interrupt mask word */ + not t0,t1 /* t0 = ~t1 */ + and t0,v0 /* clear imask bits */ + mtc0 t0,C0_SR /* save status reg */ + and v0,t1 /* mask return value (only return imask bits) */ + jr ra + nop +ENDFRAME(mips_disable_interrupts) + +FRAME(mips_enable_global_interrupts,sp,0,ra) + mfc0 t0,C0_SR /* get status reg */ + nop + ori t0,SR_IE + mtc0 t0,C0_SR /* save updated status reg */ + j ra + nop +ENDFRAME(mips_enable_global_interrupts) + +FRAME(mips_disable_global_interrupts,sp,0,ra) + li t1,SR_IE + mfc0 t0,C0_SR /* get status reg */ + not t1 + and t0,t1 + mtc0 t0,C0_SR /* save updated status reg */ + j ra + nop +ENDFRAME(mips_disable_global_interrupts) + +/* return the value of the status register in v0. Used for debugging */ +FRAME(mips_get_sr,sp,0,ra) + mfc0 v0,C0_SR + j ra + nop +ENDFRAME(mips_get_sr) + +FRAME(mips_break,sp,0,ra) +#if 1 + break 0x0 + j mips_break +#else + j ra +#endif + nop +ENDFRAME(mips_break) + +/*PAGE + * + * _CPU_Internal_threads_Idle_thread_body + * + * NOTES: + * + * 1. This is the same as the regular CPU independent algorithm. + * + * 2. If you implement this using a "halt", "idle", or "shutdown" + * instruction, then don't forget to put it in an infinite loop. + * + * 3. Be warned. Some processors with onboard DMA have been known + * to stop the DMA if the CPU were put in IDLE mode. This might + * also be a problem with other on-chip peripherals. So use this + * hook with caution. + */ + +FRAME(_CPU_Thread_Idle_body,sp,0,ra) + wait /* enter low power mode */ + j _CPU_Thread_Idle_body + nop +ENDFRAME(_CPU_Thread_Idle_body) + +#define VEC_CODE_LENGTH 10*4 + +/************************************************************************** +** +** init_exc_vecs() - moves the exception code into the addresses +** reserved for exception vectors +** +** UTLB Miss exception vector at address 0x80000000 +** +** General exception vector at address 0x80000080 +** +** RESET exception vector is at address 0xbfc00000 +** +***************************************************************************/ + +#define INITEXCFRM ((2*4)+4) /* ra + 2 arguments */ +FRAME(init_exc_vecs,sp,0,ra) +/* This code yanked from SIM */ +#if defined(CPU_R3000) + .set noreorder + la t1,exc_utlb_code + la t2,exc_norm_code + li t3,UT_VEC + li t4,E_VEC + li t5,VEC_CODE_LENGTH +1: + lw t6,0(t1) + lw t7,0(t2) + sw t6,0(t3) + sw t7,0(t4) + addiu t1,4 + addiu t3,4 + addiu t4,4 + subu t5,4 + bne t5,zero,1b + addiu t2,4 + move t5,ra # assumes clear_cache doesnt use t5 + li a0,UT_VEC + jal clear_cache + li a1,VEC_CODE_LENGTH + nop + li a0,E_VEC + jal clear_cache + li a1,VEC_CODE_LENGTH + move ra,t5 # restore ra + j ra + nop + .set reorder +#endif +#if defined(CPU_R4000) + .set reorder + move t5,ra # assumes clear_cache doesnt use t5 + + /* TLB exception vector */ + la t1,exc_tlb_code + li t2,T_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + li a0,T_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + la t1,exc_xtlb_code + li t2,X_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + /* extended TLB exception vector */ + li a0,X_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + /* cache error exception vector */ + la t1,exc_cache_code + li t2,C_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + li a0,C_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + /* normal exception vector */ + la t1,exc_norm_code + li t2,E_VEC |K1BASE + li t3,VEC_CODE_LENGTH +1: + lw t6,0(t1) + addiu t1,4 + subu t3,4 + sw t6,0(t2) + addiu t2,4 + bne t3,zero,1b + + li a0,E_VEC + li a1,VEC_CODE_LENGTH + jal clear_cache + + move ra,t5 # restore ra + j ra +#endif +ENDFRAME(init_exc_vecs) + + +#if defined(CPU_R4000) +FRAME(exc_tlb_code,sp,0,ra) +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + +ENDFRAME(exc_tlb_code) + + +FRAME(exc_xtlb_code,sp,0,ra) +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + +ENDFRAME(exc_xtlb_code) + + +FRAME(exc_cache_code,sp,0,ra) +#ifdef CPU_R3000 + la k0, (R_VEC+((48)*8)) +#endif + +#ifdef CPU_R4000 + la k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */ +#endif + j k0 + nop + +ENDFRAME(exc_cache_code) + + +FRAME(exc_norm_code,sp,0,ra) + la k0, _ISR_Handler /* generic external int hndlr */ + j k0 + nop + subu sp, EXCP_STACK_SIZE /* set up local stack frame */ +ENDFRAME(exc_norm_code) +#endif + +/************************************************************************** +** +** enable_int(mask) - enables interrupts - mask is positioned so it only +** needs to be or'ed into the status reg. This +** also does some other things !!!! caution should +** be used if invoking this while in the middle +** of a debugging session where the client may have +** nested interrupts. +** +****************************************************************************/ +FRAME(enable_int,sp,0,ra) + .set noreorder + mfc0 t0,C0_SR + or a0,1 + or t0,a0 + mtc0 t0,C0_SR + j ra + nop + .set reorder +ENDFRAME(enable_int) + + +/*************************************************************************** +** +** disable_int(mask) - disable the interrupt - mask is the complement +** of the bits to be cleared - i.e. to clear ext int +** 5 the mask would be - 0xffff7fff +** +****************************************************************************/ +FRAME(disable_int,sp,0,ra) + .set noreorder + mfc0 t0,C0_SR + nop + and t0,a0 + mtc0 t0,C0_SR + j ra + nop +ENDFRAME(disable_int) + + diff --git a/cpukit/score/cpu/mips64orion/cpu_asm.h b/cpukit/score/cpu/mips64orion/cpu_asm.h new file mode 100644 index 0000000000..9696204962 --- /dev/null +++ b/cpukit/score/cpu/mips64orion/cpu_asm.h @@ -0,0 +1,115 @@ +/* + * cpu_asm.h + * + * Author: Craig Lebakken + * + * COPYRIGHT (c) 1996 by Transition Networks Inc. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Transition Networks not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * Transition Networks makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/score/cpu/no_cpu/cpu_asm.h: + * + * COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994. + * On-Line Applications Research Corporation (OAR). + * + * This material may be reproduced by or for the U.S. Government pursuant + * to the copyright license under the clause at DFARS 252.227-7013. This + * notice must appear in all copies of this file and its derivatives. + * + * cpu_asm.h,v 1.4 1995/09/26 19:25:39 joel Exp + * + */ +/* @(#)cpu_asm.h 08/20/96 1.2 */ + +#ifndef __CPU_ASM_h +#define __CPU_ASM_h + +/* pull in the generated offsets */ + +/* #include */ + +/* + * Hardware General Registers + */ + +/* put something here */ + +/* + * Hardware Floating Point Registers + */ + +#define R_FP0 0 +#define R_FP1 1 +#define R_FP2 2 +#define R_FP3 3 +#define R_FP4 4 +#define R_FP5 5 +#define R_FP6 6 +#define R_FP7 7 +#define R_FP8 8 +#define R_FP9 9 +#define R_FP10 10 +#define R_FP11 11 +#define R_FP12 12 +#define R_FP13 13 +#define R_FP14 14 +#define R_FP15 15 +#define R_FP16 16 +#define R_FP17 17 +#define R_FP18 18 +#define R_FP19 19 +#define R_FP20 20 +#define R_FP21 21 +#define R_FP22 22 +#define R_FP23 23 +#define R_FP24 24 +#define R_FP25 25 +#define R_FP26 26 +#define R_FP27 27 +#define R_FP28 28 +#define R_FP29 29 +#define R_FP30 30 +#define R_FP31 31 + +/* + * Hardware Control Registers + */ + +/* put something here */ + +/* + * Calling Convention + */ + +/* put something here */ + +/* + * Temporary registers + */ + +/* put something here */ + +/* + * Floating Point Registers - SW Conventions + */ + +/* put something here */ + +/* + * Temporary floating point registers + */ + +/* put something here */ + +#endif + +/* end of file */ diff --git a/cpukit/score/cpu/mips64orion/idtcpu.h b/cpukit/score/cpu/mips64orion/idtcpu.h new file mode 100644 index 0000000000..833c5ebe02 --- /dev/null +++ b/cpukit/score/cpu/mips64orion/idtcpu.h @@ -0,0 +1,439 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** idtcpu.h -- cpu related defines +*/ + +#ifndef _IDTCPU_H__ +#define _IDTCPU_H__ + +/* + * 950313: Ketan added Register definition for XContext reg. + * added define for WAIT instruction. + * 950421: Ketan added Register definition for Config reg (R3081) + */ + +/* +** memory configuration and mapping +*/ +#define K0BASE 0x80000000 +#define K0SIZE 0x20000000 +#define K1BASE 0xa0000000 +#define K1SIZE 0x20000000 +#define K2BASE 0xc0000000 +#define K2SIZE 0x20000000 +#if defined(CPU_R4000) +#define KSBASE 0xe0000000 +#define KSSIZE 0x20000000 +#endif + +#define KUBASE 0 +#define KUSIZE 0x80000000 + +/* +** Exception Vectors +*/ +#if defined(CPU_R3000) +#define UT_VEC K0BASE /* utlbmiss vector */ +#define E_VEC (K0BASE+0x80) /* exception vevtor */ +#endif +#if defined(CPU_R4000) +#define T_VEC (K0BASE+0x000) /* tlbmiss vector */ +#define X_VEC (K0BASE+0x080) /* xtlbmiss vector */ +#define C_VEC (K0BASE+0x100) /* cache error vector */ +#define E_VEC (K0BASE+0x180) /* exception vector */ +#endif +#define R_VEC (K1BASE+0x1fc00000) /* reset vector */ + +/* +** Address conversion macros +*/ +#ifdef CLANGUAGE +#define CAST(as) (as) +#else +#define CAST(as) +#endif +#define K0_TO_K1(x) (CAST(unsigned)(x)|0xA0000000) /* kseg0 to kseg1 */ +#define K1_TO_K0(x) (CAST(unsigned)(x)&0x9FFFFFFF) /* kseg1 to kseg0 */ +#define K0_TO_PHYS(x) (CAST(unsigned)(x)&0x1FFFFFFF) /* kseg0 to physical */ +#define K1_TO_PHYS(x) (CAST(unsigned)(x)&0x1FFFFFFF) /* kseg1 to physical */ +#define PHYS_TO_K0(x) (CAST(unsigned)(x)|0x80000000) /* physical to kseg0 */ +#define PHYS_TO_K1(x) (CAST(unsigned)(x)|0xA0000000) /* physical to kseg1 */ + +/* +** Cache size constants +*/ +#define MINCACHE 0x200 /* 512 For 3041. */ +#define MAXCACHE 0x40000 /* 256*1024 256k */ + +#if defined(CPU_R4000) +/* R4000 configuration register definitions */ +#define CFG_CM 0x80000000 /* Master-Checker mode */ +#define CFG_ECMASK 0x70000000 /* System Clock Ratio */ +#define CFG_ECBY2 0x00000000 /* divide by 2 */ +#define CFG_ECBY3 0x10000000 /* divide by 3 */ +#define CFG_ECBY4 0x20000000 /* divide by 4 */ +#define CFG_EPMASK 0x0f000000 /* Transmit data pattern */ +#define CFG_EPD 0x00000000 /* D */ +#define CFG_EPDDX 0x01000000 /* DDX */ +#define CFG_EPDDXX 0x02000000 /* DDXX */ +#define CFG_EPDXDX 0x03000000 /* DXDX */ +#define CFG_EPDDXXX 0x04000000 /* DDXXX */ +#define CFG_EPDDXXXX 0x05000000 /* DDXXXX */ +#define CFG_EPDXXDXX 0x06000000 /* DXXDXX */ +#define CFG_EPDDXXXXX 0x07000000 /* DDXXXXX */ +#define CFG_EPDXXXDXXX 0x08000000 /* DXXXDXXX */ +#define CFG_SBMASK 0x00c00000 /* Secondary cache block size */ +#define CFG_SBSHIFT 22 +#define CFG_SB4 0x00000000 /* 4 words */ +#define CFG_SB8 0x00400000 /* 8 words */ +#define CFG_SB16 0x00800000 /* 16 words */ +#define CFG_SB32 0x00c00000 /* 32 words */ +#define CFG_SS 0x00200000 /* Split secondary cache */ +#define CFG_SW 0x00100000 /* Secondary cache port width */ +#define CFG_EWMASK 0x000c0000 /* System port width */ +#define CFG_EWSHIFT 18 +#define CFG_EW64 0x00000000 /* 64 bit */ +#define CFG_EW32 0x00010000 /* 32 bit */ +#define CFG_SC 0x00020000 /* Secondary cache absent */ +#define CFG_SM 0x00010000 /* Dirty Shared mode disabled */ +#define CFG_BE 0x00008000 /* Big Endian */ +#define CFG_EM 0x00004000 /* ECC mode enable */ +#define CFG_EB 0x00002000 /* Block ordering */ +#define CFG_ICMASK 0x00000e00 /* Instruction cache size */ +#define CFG_ICSHIFT 9 +#define CFG_DCMASK 0x000001c0 /* Data cache size */ +#define CFG_DCSHIFT 6 +#define CFG_IB 0x00000020 /* Instruction cache block size */ +#define CFG_DB 0x00000010 /* Data cache block size */ +#define CFG_CU 0x00000008 /* Update on Store Conditional */ +#define CFG_K0MASK 0x00000007 /* KSEG0 coherency algorithm */ + +/* + * R4000 primary cache mode + */ +#define CFG_C_UNCACHED 2 +#define CFG_C_NONCOHERENT 3 +#define CFG_C_COHERENTXCL 4 +#define CFG_C_COHERENTXCLW 5 +#define CFG_C_COHERENTUPD 6 + +/* + * R4000 cache operations (should be in assembler...?) + */ +#define Index_Invalidate_I 0x0 /* 0 0 */ +#define Index_Writeback_Inv_D 0x1 /* 0 1 */ +#define Index_Invalidate_SI 0x2 /* 0 2 */ +#define Index_Writeback_Inv_SD 0x3 /* 0 3 */ +#define Index_Load_Tag_I 0x4 /* 1 0 */ +#define Index_Load_Tag_D 0x5 /* 1 1 */ +#define Index_Load_Tag_SI 0x6 /* 1 2 */ +#define Index_Load_Tag_SD 0x7 /* 1 3 */ +#define Index_Store_Tag_I 0x8 /* 2 0 */ +#define Index_Store_Tag_D 0x9 /* 2 1 */ +#define Index_Store_Tag_SI 0xA /* 2 2 */ +#define Index_Store_Tag_SD 0xB /* 2 3 */ +#define Create_Dirty_Exc_D 0xD /* 3 1 */ +#define Create_Dirty_Exc_SD 0xF /* 3 3 */ +#define Hit_Invalidate_I 0x10 /* 4 0 */ +#define Hit_Invalidate_D 0x11 /* 4 1 */ +#define Hit_Invalidate_SI 0x12 /* 4 2 */ +#define Hit_Invalidate_SD 0x13 /* 4 3 */ +#define Hit_Writeback_Inv_D 0x15 /* 5 1 */ +#define Hit_Writeback_Inv_SD 0x17 /* 5 3 */ +#define Fill_I 0x14 /* 5 0 */ +#define Hit_Writeback_D 0x19 /* 6 1 */ +#define Hit_Writeback_SD 0x1B /* 6 3 */ +#define Hit_Writeback_I 0x18 /* 6 0 */ +#define Hit_Set_Virtual_SI 0x1E /* 7 2 */ +#define Hit_Set_Virtual_SD 0x1F /* 7 3 */ + +#ifndef WAIT +#define WAIT .word 0x42000020 +#endif WAIT + +#ifndef wait +#define wait .word 0x42000020 +#endif wait + +#endif + +/* +** TLB resource defines +*/ +#if defined(CPU_R3000) +#define N_TLB_ENTRIES 64 +#define TLB_PGSIZE 0x1000 +#define RANDBASE 8 +#define TLBLO_PFNMASK 0xfffff000 +#define TLBLO_PFNSHIFT 12 +#define TLBLO_N 0x800 /* non-cacheable */ +#define TLBLO_D 0x400 /* writeable */ +#define TLBLO_V 0x200 /* valid bit */ +#define TLBLO_G 0x100 /* global access bit */ + +#define TLBHI_VPNMASK 0xfffff000 +#define TLBHI_VPNSHIFT 12 +#define TLBHI_PIDMASK 0xfc0 +#define TLBHI_PIDSHIFT 6 +#define TLBHI_NPID 64 + +#define TLBINX_PROBE 0x80000000 +#define TLBINX_INXMASK 0x00003f00 +#define TLBINX_INXSHIFT 8 + +#define TLBRAND_RANDMASK 0x00003f00 +#define TLBRAND_RANDSHIFT 8 + +#define TLBCTXT_BASEMASK 0xffe00000 +#define TLBCTXT_BASESHIFT 21 + +#define TLBCTXT_VPNMASK 0x001ffffc +#define TLBCTXT_VPNSHIFT 2 +#endif +#if defined(CPU_R4000) +#define N_TLB_ENTRIES 48 + +#define TLBHI_VPN2MASK 0xffffe000 +#define TLBHI_PIDMASK 0x000000ff +#define TLBHI_NPID 256 + +#define TLBLO_PFNMASK 0x3fffffc0 +#define TLBLO_PFNSHIFT 6 +#define TLBLO_D 0x00000004 /* writeable */ +#define TLBLO_V 0x00000002 /* valid bit */ +#define TLBLO_G 0x00000001 /* global access bit */ +#define TLBLO_CMASK 0x00000038 /* cache algorithm mask */ +#define TLBLO_CSHIFT 3 + +#define TLBLO_UNCACHED (CFG_C_UNCACHED< k, 1 => u */ +#define SR_IEO 0x00000010 /* old interrupt enable, 1 => enable */ +#define SR_KUP 0x00000008 /* prev kernel/user, 0 => k, 1 => u */ +#define SR_IEP 0x00000004 /* prev interrupt enable, 1 => enable */ +#define SR_KUC 0x00000002 /* cur kernel/user, 0 => k, 1 => u */ +#define SR_IEC 0x00000001 /* cur interrupt enable, 1 => enable */ +#endif + +#if defined(CPU_R4000) +#define SR_CUMASK 0xf0000000 /* coproc usable bits */ +#define SR_CU3 0x80000000 /* Coprocessor 3 usable */ +#define SR_CU2 0x40000000 /* Coprocessor 2 usable */ +#define SR_CU1 0x20000000 /* Coprocessor 1 usable */ +#define SR_CU0 0x10000000 /* Coprocessor 0 usable */ + +#define SR_RP 0x08000000 /* Reduced power operation */ +#define SR_FR 0x04000000 /* Additional floating point registers */ +#define SR_RE 0x02000000 /* Reverse endian in user mode */ + +#define SR_BEV 0x00400000 /* Use boot exception vectors */ +#define SR_TS 0x00200000 /* TLB shutdown */ +#define SR_SR 0x00100000 /* Soft reset */ +#define SR_CH 0x00040000 /* Cache hit */ +#define SR_CE 0x00020000 /* Use cache ECC */ +#define SR_DE 0x00010000 /* Disable cache exceptions */ + +/* +** status register interrupt masks and bits +*/ + +#define SR_IMASK 0x0000ff00 /* Interrupt mask */ +#define SR_IMASK8 0x00000000 /* mask level 8 */ +#define SR_IMASK7 0x00008000 /* mask level 7 */ +#define SR_IMASK6 0x0000c000 /* mask level 6 */ +#define SR_IMASK5 0x0000e000 /* mask level 5 */ +#define SR_IMASK4 0x0000f000 /* mask level 4 */ +#define SR_IMASK3 0x0000f800 /* mask level 3 */ +#define SR_IMASK2 0x0000fc00 /* mask level 2 */ +#define SR_IMASK1 0x0000fe00 /* mask level 1 */ +#define SR_IMASK0 0x0000ff00 /* mask level 0 */ + +#define SR_IMASKSHIFT 8 + +#define SR_IBIT8 0x00008000 /* bit level 8 */ +#define SR_IBIT7 0x00004000 /* bit level 7 */ +#define SR_IBIT6 0x00002000 /* bit level 6 */ +#define SR_IBIT5 0x00001000 /* bit level 5 */ +#define SR_IBIT4 0x00000800 /* bit level 4 */ +#define SR_IBIT3 0x00000400 /* bit level 3 */ +#define SR_IBIT2 0x00000200 /* bit level 2 */ +#define SR_IBIT1 0x00000100 /* bit level 1 */ + +#define SR_KSMASK 0x00000018 /* Kernel mode mask */ +#define SR_KSUSER 0x00000010 /* User mode */ +#define SR_KSSUPER 0x00000008 /* Supervisor mode */ +#define SR_KSKERNEL 0x00000000 /* Kernel mode */ +#define SR_ERL 0x00000004 /* Error level */ +#define SR_EXL 0x00000002 /* Exception level */ +#define SR_IE 0x00000001 /* Interrupts enabled */ +#endif + + + +/* + * Cause Register + */ +#define CAUSE_BD 0x80000000 /* Branch delay slot */ +#define CAUSE_CEMASK 0x30000000 /* coprocessor error */ +#define CAUSE_CESHIFT 28 + + +#define CAUSE_IPMASK 0x0000FF00 /* Pending interrupt mask */ +#define CAUSE_IPSHIFT 8 + +#define CAUSE_EXCMASK 0x0000003C /* Cause code bits */ +#define CAUSE_EXCSHIFT 2 + +#ifndef XDS +/* +** Coprocessor 0 registers +*/ +#define C0_INX $0 /* tlb index */ +#define C0_RAND $1 /* tlb random */ +#if defined(CPU_R3000) +#define C0_TLBLO $2 /* tlb entry low */ +#endif +#if defined(CPU_R4000) +#define C0_TLBLO0 $2 /* tlb entry low 0 */ +#define C0_TLBLO1 $3 /* tlb entry low 1 */ +#endif + +#define C0_CTXT $4 /* tlb context */ + +#if defined(CPU_R4000) +#define C0_PAGEMASK $5 /* tlb page mask */ +#define C0_WIRED $6 /* number of wired tlb entries */ +#endif + +#define C0_BADVADDR $8 /* bad virtual address */ + +#if defined(CPU_R4000) +#define C0_COUNT $9 /* cycle count */ +#endif + +#define C0_TLBHI $10 /* tlb entry hi */ + +#if defined(CPU_R4000) +#define C0_COMPARE $11 /* cyccle count comparator */ +#endif + +#define C0_SR $12 /* status register */ +#define C0_CAUSE $13 /* exception cause */ +#define C0_EPC $14 /* exception pc */ +#define C0_PRID $15 /* revision identifier */ + +#if defined(CPU_R3000) +#define C0_CONFIG $3 /* configuration register R3081*/ +#endif + +#if defined(CPU_R4000) +#define C0_CONFIG $16 /* configuration register */ +#define C0_LLADDR $17 /* linked load address */ +#define C0_WATCHLO $18 /* watchpoint trap register */ +#define C0_WATCHHI $19 /* watchpoint trap register */ +#define C0_XCTXT $20 /* extended tlb context */ +#define C0_ECC $26 /* secondary cache ECC control */ +#define C0_CACHEERR $27 /* cache error status */ +#define C0_TAGLO $28 /* cache tag lo */ +#define C0_TAGHI $29 /* cache tag hi */ +#define C0_ERRPC $30 /* cache error pc */ +#endif + +#endif XDS + +#ifdef R4650 +#define IWATCH $18 +#define DWATCH $19 +#define IBASE $0 +#define IBOUND $1 +#define DBASE $2 +#define DBOUND $3 +#define CALG $17 +#endif + +#endif /* _IDTCPU_H__ */ + diff --git a/cpukit/score/cpu/mips64orion/idtmon.h b/cpukit/score/cpu/mips64orion/idtmon.h new file mode 100644 index 0000000000..1cddbcb2a3 --- /dev/null +++ b/cpukit/score/cpu/mips64orion/idtmon.h @@ -0,0 +1,170 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** idtmon.h - General header file for the IDT Prom Monitor +** +** Copyright 1989 Integrated Device Technology, Inc. +** All Rights Reserved. +** +** June 1989 - D.Cahoon +*/ +#ifndef __IDTMON_H__ +#define __IDTMON_H__ + +/* +** P_STACKSIZE is the size of the Prom Stack. +** the prom stack grows downward +*/ +#define P_STACKSIZE 0x2000 /* sets stack size to 8k */ + +/* +** M_BUSWIDTH +** Memory bus width (including bank interleaving) in bytes +** used when doing memory sizing to prevent bus capacitance +** reporting ghost memory locations +*/ +#if defined(CPU_R3000) +#define M_BUSWIDTH 8 /* 32bit memory bank interleaved */ +#endif +#if defined(CPU_R4000) +#define M_BUSWIDTH 16 /* 64 bit memory bank interleaved */ +#endif + +/* +** this is the default value for the number of bytes to add in calculating +** the checksums in the checksum command +*/ +#define CHK_SUM_CNT 0x20000 /* number of bytes to calc chksum for */ + +/* +** Monitor modes +*/ +#define MODE_MONITOR 5 /* IDT Prom Monitor is executing */ +#define MODE_USER 0xa /* USER is executing */ + +/* +** memory reference widths +*/ +#define SW_BYTE 1 +#define SW_HALFWORD 2 +#define SW_WORD 4 +#define SW_TRIBYTEL 12 +#define SW_TRIBYTER 20 + +#ifdef CPU_R4000 +/* +** definitions for select_cache call +*/ +#define DCACHE 0 +#define ICACHE 1 +#define SCACHE 2 + +#endif + +#if defined (CLANGUAGE) || defined(_LANGUAGE_C) +typedef struct { + unsigned int mem_size; + unsigned int icache_size; + unsigned int dcache_size; +#ifdef CPU_R4000 + unsigned int scache_size; +#endif + + } mem_config; + +#endif CLANGUAGE || defined(_LANGUAGE_C) + +/* +** general equates for diagnostics and boolean functions +*/ +#define PASS 0 +#define FAIL 1 + +#ifndef TRUE +#define TRUE 1 +#endif TRUE +#ifndef NULL +#define NULL 0 +#endif NULL + +#ifndef FALSE +#define FALSE 0 +#endif FALSE + + +/* +** portablility equates +*/ + +#ifndef BOOL +#define BOOL unsigned int +#endif BOOL + +#ifndef GLOBAL +#define GLOBAL /**/ +#endif GLOBAL + +#ifndef MLOCAL +#define MLOCAL static +#endif MLOCAL + + +#ifdef XDS +#define CONST const +#else +#define CONST +#endif XDS + +#define u_char unsigned char +#define u_short unsigned short +#define u_int unsigned int +/* +** assembly instructions for compatability between xds and mips +*/ +#ifndef XDS +#define sllv sll +#define srlv srl +#endif XDS +/* +** debugger macros for assembly language routines. Allows the +** programmer to set up the necessary stack frame info +** required by debuggers to do stack traces. +*/ + +#ifndef XDS +#define FRAME(name,frm_reg,offset,ret_reg) \ + .globl name; \ + .ent name; \ +name:; \ + .frame frm_reg,offset,ret_reg +#define ENDFRAME(name) \ + .end name +#else +#define FRAME(name,frm_reg,offset,ret_reg) \ + .globl _##name;\ +_##name: +#define ENDFRAME(name) +#endif XDS +#endif /* __IDTMON_H__ */ diff --git a/cpukit/score/cpu/mips64orion/iregdef.h b/cpukit/score/cpu/mips64orion/iregdef.h new file mode 100644 index 0000000000..e7a4ba5ed1 --- /dev/null +++ b/cpukit/score/cpu/mips64orion/iregdef.h @@ -0,0 +1,324 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + +*/ + +/* +** iregdef.h - IDT R3000 register structure header file +** +** Copyright 1989 Integrated Device Technology, Inc +** All Rights Reserved +** +*/ +#ifndef __IREGDEF_H__ +#define __IREGDEF_H__ + +/* + * 950313: Ketan added sreg/lreg and R_SZ for 64-bit saves + * added Register definition for XContext reg. + * Look towards end of this file. + */ +/* +** register names +*/ +#define r0 $0 +#define r1 $1 +#define r2 $2 +#define r3 $3 +#define r4 $4 +#define r5 $5 +#define r6 $6 +#define r7 $7 +#define r8 $8 +#define r9 $9 +#define r10 $10 +#define r11 $11 +#define r12 $12 +#define r13 $13 + +#define r14 $14 +#define r15 $15 +#define r16 $16 +#define r17 $17 +#define r18 $18 +#define r19 $19 +#define r20 $20 +#define r21 $21 +#define r22 $22 +#define r23 $23 +#define r24 $24 +#define r25 $25 +#define r26 $26 +#define r27 $27 +#define r28 $28 +#define r29 $29 +#define r30 $30 +#define r31 $31 + +#define fp0 $f0 +#define fp1 $f1 +#define fp2 $f2 +#define fp3 $f3 +#define fp4 $f4 +#define fp5 $f5 +#define fp6 $f6 +#define fp7 $f7 +#define fp8 $f8 +#define fp9 $f9 +#define fp10 $f10 +#define fp11 $f11 +#define fp12 $f12 +#define fp13 $f13 +#define fp14 $f14 +#define fp15 $f15 +#define fp16 $f16 +#define fp17 $f17 +#define fp18 $f18 +#define fp19 $f19 +#define fp20 $f20 +#define fp21 $f21 +#define fp22 $f22 +#define fp23 $f23 +#define fp24 $f24 +#define fp25 $f25 +#define fp26 $f26 +#define fp27 $f27 +#define fp28 $f28 +#define fp29 $f29 +#define fp30 $f30 +#define fp31 $f31 + +#define fcr0 $0 +#define fcr30 $30 +#define fcr31 $31 + +#define zero $0 /* wired zero */ +#define AT $at /* assembler temp */ +#define v0 $2 /* return value */ +#define v1 $3 +#define a0 $4 /* argument registers a0-a3 */ +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define t0 $8 /* caller saved t0-t9 */ +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 +#define s0 $16 /* callee saved s0-s8 */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define t8 $24 +#define t9 $25 +#define k0 $26 /* kernel usage */ +#define k1 $27 /* kernel usage */ +#define gp $28 /* sdata pointer */ +#define sp $29 /* stack pointer */ +#define s8 $30 /* yet another saved reg for the callee */ +#define fp $30 /* frame pointer - this is being phased out by MIPS */ +#define ra $31 /* return address */ + + +/* +** relative position of registers in save reg area +*/ +#define R_R0 0 +#define R_R1 1 +#define R_R2 2 +#define R_R3 3 +#define R_R4 4 +#define R_R5 5 +#define R_R6 6 +#define R_R7 7 +#define R_R8 8 +#define R_R9 9 +#define R_R10 10 +#define R_R11 11 +#define R_R12 12 +#define R_R13 13 +#define R_R14 14 +#define R_R15 15 +#define R_R16 16 +#define R_R17 17 +#define R_R18 18 +#define R_R19 19 +#define R_R20 20 +#define R_R21 21 +#define R_R22 22 +#define R_R23 23 +#define R_R24 24 +#define R_R25 25 +#define R_R26 26 +#define R_R27 27 +#define R_R28 28 +#define R_R29 29 +#define R_R30 30 +#define R_R31 31 +#define R_F0 32 +#define R_F1 33 +#define R_F2 34 +#define R_F3 35 +#define R_F4 36 +#define R_F5 37 +#define R_F6 38 +#define R_F7 39 +#define R_F8 40 +#define R_F9 41 +#define R_F10 42 +#define R_F11 43 +#define R_F12 44 +#define R_F13 45 +#define R_F14 46 +#define R_F15 47 +#define R_F16 48 +#define R_F17 49 +#define R_F18 50 +#define R_F19 51 +#define R_F20 52 +#define R_F21 53 +#define R_F22 54 +#define R_F23 55 +#define R_F24 56 +#define R_F25 57 +#define R_F26 58 +#define R_F27 59 +#define R_F28 60 +#define R_F29 61 +#define R_F30 62 +#define R_F31 63 +#define NCLIENTREGS 64 +#define R_EPC 64 +#define R_MDHI 65 +#define R_MDLO 66 +#define R_SR 67 +#define R_CAUSE 68 +#define R_TLBHI 69 +#if defined(CPU_R3000) +#define R_TLBLO 70 +#endif +#if defined(CPU_R4000) +#define R_TLBLO0 70 +#endif +#define R_BADVADDR 71 +#define R_INX 72 +#define R_RAND 73 +#define R_CTXT 74 +#define R_EXCTYPE 75 +#define R_MODE 76 +#define R_PRID 77 +#define R_FCSR 78 +#define R_FEIR 79 +#if defined(CPU_R3000) +#define NREGS 80 +#endif +#if defined(CPU_R4000) +#define R_TLBLO1 80 +#define R_PAGEMASK 81 +#define R_WIRED 82 +#define R_COUNT 83 +#define R_COMPARE 84 +#define R_CONFIG 85 +#define R_LLADDR 86 +#define R_WATCHLO 87 +#define R_WATCHHI 88 +#define R_ECC 89 +#define R_CACHEERR 90 +#define R_TAGLO 91 +#define R_TAGHI 92 +#define R_ERRPC 93 +#define R_XCTXT 94 /* Ketan added from SIM64bit */ + +#define NREGS 95 +#endif + +/* +** For those who like to think in terms of the compiler names for the regs +*/ +#define R_ZERO R_R0 +#define R_AT R_R1 +#define R_V0 R_R2 +#define R_V1 R_R3 +#define R_A0 R_R4 +#define R_A1 R_R5 +#define R_A2 R_R6 +#define R_A3 R_R7 +#define R_T0 R_R8 +#define R_T1 R_R9 +#define R_T2 R_R10 +#define R_T3 R_R11 +#define R_T4 R_R12 +#define R_T5 R_R13 +#define R_T6 R_R14 +#define R_T7 R_R15 +#define R_S0 R_R16 +#define R_S1 R_R17 +#define R_S2 R_R18 +#define R_S3 R_R19 +#define R_S4 R_R20 +#define R_S5 R_R21 +#define R_S6 R_R22 +#define R_S7 R_R23 +#define R_T8 R_R24 +#define R_T9 R_R25 +#define R_K0 R_R26 +#define R_K1 R_R27 +#define R_GP R_R28 +#define R_SP R_R29 +#define R_FP R_R30 +#define R_RA R_R31 + +/* Ketan added the following */ +#ifdef CPU_R3000 +#define sreg sw +#define lreg lw +#define rmfc0 mfc0 +#define rmtc0 mtc0 +#define R_SZ 4 +#endif CPU_R3000 + +#ifdef CPU_R4000 +#if __mips < 3 +#define sreg sw +#define lreg lw +#define rmfc0 mfc0 +#define rmtc0 mtc0 +#define R_SZ 4 +#else +#define sreg sd +#define lreg ld +#define rmfc0 dmfc0 +#define rmtc0 dmtc0 +#define R_SZ 8 +#endif +#endif CPU_R4000 +/* Ketan till here */ + +#endif /* __IREGDEF_H__ */ + -- cgit v1.2.3