diff options
author | Ralf Corsepius <ralf.corsepius@rtems.org> | 2006-03-16 17:34:05 +0000 |
---|---|---|
committer | Ralf Corsepius <ralf.corsepius@rtems.org> | 2006-03-16 17:34:05 +0000 |
commit | 15ebdf1f6b05f829c783298ad39ba3214ed42515 (patch) | |
tree | 7187ba8a5c804ecad37f2c00c5e2445304975406 /c/src/lib/libbsp/mips/rbtx4925/startup | |
parent | New. (diff) | |
download | rtems-15ebdf1f6b05f829c783298ad39ba3214ed42515.tar.bz2 |
New (Submission by Bruce Robinson <brucer@pmccorp.com>)
Diffstat (limited to 'c/src/lib/libbsp/mips/rbtx4925/startup')
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/bspclean.c | 16 | ||||
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/bspstart.c | 104 | ||||
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/exception.S | 631 | ||||
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/idtmem.S | 922 | ||||
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/idttlb.S | 390 | ||||
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/inittlb.c | 16 | ||||
-rw-r--r-- | c/src/lib/libbsp/mips/rbtx4925/startup/linkcmds | 163 |
7 files changed, 2242 insertions, 0 deletions
diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/bspclean.c b/c/src/lib/libbsp/mips/rbtx4925/startup/bspclean.c new file mode 100644 index 0000000000..f9ba70b0c2 --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/bspclean.c @@ -0,0 +1,16 @@ +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.com/license/LICENSE. + * + * bspclean.c,v 1.2.2.1 2003/09/04 18:44:49 joel Exp + */ + +void bsp_cleanup( void ) +{ + extern void _sys_exit(int); + _sys_exit(0); +} diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/bspstart.c b/c/src/lib/libbsp/mips/rbtx4925/startup/bspstart.c new file mode 100644 index 0000000000..0cda9b5f36 --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/bspstart.c @@ -0,0 +1,104 @@ +/* + * This routine starts the application. It includes application, + * board, and monitor specific initialization and configuration. + * The generic CPU dependent initialization has been performed + * before this routine is invoked. + * + * COPYRIGHT (c) 1989-2000. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.com/license/LICENSE. + * + * bspstart.c,v 1.4.2.1 2003/09/04 18:44:49 joel Exp + */ + +#include <string.h> + +#include <bsp.h> +#include <rtems/libio.h> +#include <rtems/libcsupport.h> + +#define LIBC_HEAP_SIZE (64 * 1024) + +extern int end; /* defined by linker */ + +/* + * The original table from the application and our copy of it with + * some changes. + */ + +extern rtems_configuration_table Configuration; + +rtems_configuration_table BSP_Configuration; + +rtems_cpu_table Cpu_table; + +char *rtems_progname; + +/* + * Use the shared implementations of the following routines + */ + +void bsp_postdriver_hook(void); +void bsp_libc_init( void *, uint32_t, int ); + +void init_exc_vecs(void); + +/* + * Function: bsp_pretasking_hook + * Created: 95/03/10 + * + * Description: + * BSP pretasking hook. Called just before drivers are initialized. + * Used to setup libc and install any BSP extensions. + * + * NOTES: + * Must not use libc (to do io) from here, since drivers are + * not yet initialized. + * + */ + +void bsp_pretasking_hook(void) +{ + uint32_t heap_start; + + heap_start = (uint32_t) &end; + if (heap_start & (CPU_ALIGNMENT-1)) + heap_start = (heap_start + CPU_ALIGNMENT) & ~(CPU_ALIGNMENT-1); + + bsp_libc_init((void *) heap_start, LIBC_HEAP_SIZE, 0); + +#ifdef RTEMS_DEBUG + rtems_debug_enable( RTEMS_DEBUG_ALL_MASK ); +#endif + +} + +/* + * bsp_start + * + * This routine does the bulk of the system initialization. + */ + +void bsp_start( void ) +{ + extern int WorkspaceBase; + extern void mips_install_isr_entries(void); + + /* Configure Number of Register Caches */ + + Cpu_table.pretasking_hook = bsp_pretasking_hook; /* init libc, etc. */ + Cpu_table.postdriver_hook = bsp_postdriver_hook; + Cpu_table.interrupt_stack_size = 4096; + + BSP_Configuration.work_space_start = + (void *)((uint64_t)((&end) + LIBC_HEAP_SIZE + 0x100) & ~0x7); + + mips_install_isr_entries(); /* Install generic MIPS exception handler */ + +/* init_exc_vecs(); */ /* Install BSP specific exception handler */ + +} + diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/exception.S b/c/src/lib/libbsp/mips/rbtx4925/startup/exception.S new file mode 100644 index 0000000000..c9ea273e5b --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/exception.S @@ -0,0 +1,631 @@ +/* exception.S + * + * This file contains a customized MIPS exception handler. + * It hooks into the exception handler present in the resident + * PMON debug monitor. + * + * Author: Bruce Robinson + * + * This code was derived from cpu_asm.S with the following copyright: + * + * COPYRIGHT (c) 1996 by Transition Networks Inc. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Transition Networks not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * Transition Networks makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/score/cpu/no_cpu/cpu_asm.s: + * + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ +/* @(#)exception.S 7/27/04 1.00 */ + +#include <rtems/mips/iregdef.h> +#include <rtems/mips/idtcpu.h> + + +#define FRAME(name,frm_reg,offset,ret_reg) \ + .globl name; \ + .ent name; \ +name:; \ + .frame frm_reg,offset,ret_reg +#define ENDFRAME(name) \ + .end name + + +#if __mips == 3 +/* 64 bit register operations */ +#define NOP nop +#define ADD dadd +#define STREG sd +#define LDREG ld +#define ADDU addu +#define ADDIU addiu +#define STREGC1 sdc1 +#define LDREGC1 ldc1 +#define R_SZ 8 +#define F_SZ 8 +#define SZ_INT 8 +#define SZ_INT_POW2 3 + +/* XXX if we don't always want 64 bit register ops, then another ifdef */ + +#elif __mips == 1 +/* 32 bit register operations*/ +#define NOP nop +#define ADD add +#define STREG sw +#define LDREG lw +#define ADDU add +#define ADDIU addi +#define STREGC1 swc1 +#define LDREGC1 lwc1 +#define R_SZ 4 +#define F_SZ 4 +#define SZ_INT 4 +#define SZ_INT_POW2 2 +#else +#error "mips assembly: what size registers do I deal with?" +#endif + + +#define ISR_VEC_SIZE 4 +#define EXCP_STACK_SIZE (NREGS*R_SZ) + + +#ifdef __GNUC__ +#define EXTERN(x,size) .extern x,size +#else +#define EXTERN(x,size) +#endif + + +EXTERN(_ISR_Nest_level, 4) +EXTERN(_Thread_Dispatch_disable_level,4) +EXTERN(_Context_Switch_necessary,4) +EXTERN(_ISR_Signals_to_thread_executing,4) +.extern _Thread_Dispatch +.extern _ISR_Vector_table + +/* void __ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + */ + +#if 0 +void _ISR_Handler() +{ + /* + * This discussion ignores a lot of the ugly details in a real + * implementation such as saving enough registers/state to be + * able to do something real. Keep in mind that the goal is + * to invoke a user's ISR handler which is written in C and + * uses a certain set of registers. + * + * Also note that the exact order is to a large extent flexible. + * Hardware will dictate a sequence for a certain subset of + * _ISR_Handler while requirements for setting + */ + + /* + * At entry to "common" _ISR_Handler, the vector number must be + * available. On some CPUs the hardware puts either the vector + * number or the offset into the vector table for this ISR in a + * known place. If the hardware does not give us this information, + * then the assembly portion of RTEMS for this port will contain + * a set of distinct interrupt entry points which somehow place + * the vector number in a known place (which is safe if another + * interrupt nests this one) and branches to _ISR_Handler. + * + */ +#endif +FRAME(rbtx4925_ISR_Handler,sp,0,ra) + .set noreorder + +#if 0 +/* Activate TX4925 PIO19 signal for diagnostics */ + lui k0,0xff1f + ori k0,k0,0xf500 + lw k0,(k0) + lui k1,0x8 + or k1,k1,k0 + lui k0,0xff1f + ori k0,k0,0xf500 + sw k1,(k0) +#endif + + mfc0 k0,C0_CAUSE /* Determine if an interrupt generated this exception */ + nop + and k1,k0,CAUSE_EXCMASK + beq k1,zero,_chk_int /* If so, branch to service here */ + nop + la k0,_int_esr_link /* Otherwise, jump to next exception handler in PMON exception chain */ + lw k0,(k0) + lw k0,4(k0) + j k0 + nop +_chk_int: + mfc0 k1,C0_SR + nop + and k0,k1 + and k0,(SR_IBIT1 | SR_IBIT2 | SR_IBIT3) + beq k0,zero,_ISR_Handler_quick_exit /* external interrupt not enabled, ignore */ + nop + +/* For debugging interrupts, clear EXL to allow breakpoints */ +#if 0 + MFC0 k0, C0_SR + li k1,SR_EXL /* Clear EXL and Set IE to enable interrupts */ + not k1 + and k0,k1 + li k1,SR_IE + or k0, k1 + mtc0 k0, C0_SR + NOP +#endif + + + /* + * save some or all context on stack + * may need to save some special interrupt information for exit + */ + + /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */ + + /* wastes a lot of stack space for context?? */ + ADDIU sp,sp,-EXCP_STACK_SIZE + + STREG ra, R_RA*R_SZ(sp) /* store ra on the stack */ + STREG v0, R_V0*R_SZ(sp) + STREG v1, R_V1*R_SZ(sp) + STREG a0, R_A0*R_SZ(sp) + STREG a1, R_A1*R_SZ(sp) + STREG a2, R_A2*R_SZ(sp) + STREG a3, R_A3*R_SZ(sp) + STREG t0, R_T0*R_SZ(sp) + STREG t1, R_T1*R_SZ(sp) + STREG t2, R_T2*R_SZ(sp) + STREG t3, R_T3*R_SZ(sp) + STREG t4, R_T4*R_SZ(sp) + STREG t5, R_T5*R_SZ(sp) + STREG t6, R_T6*R_SZ(sp) + STREG t7, R_T7*R_SZ(sp) + mflo t0 + STREG t8, R_T8*R_SZ(sp) + STREG t0, R_MDLO*R_SZ(sp) + STREG t9, R_T9*R_SZ(sp) + mfhi t0 + STREG gp, R_GP*R_SZ(sp) + STREG t0, R_MDHI*R_SZ(sp) + STREG fp, R_FP*R_SZ(sp) + + .set noat + STREG AT, R_AT*R_SZ(sp) + .set at + + mfc0 t0,C0_SR + dmfc0 t1,C0_EPC + STREG t0,R_SR*R_SZ(sp) + STREG t1,R_EPC*R_SZ(sp) + + /* + * + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * if ( _ISR_Nest_level == 0 ) + * switch to software interrupt stack + * #endif + */ + + /* + * _ISR_Nest_level++; + */ + lw t0,_ISR_Nest_level + NOP + add t0,t0,1 + sw t0,_ISR_Nest_level + /* + * _Thread_Dispatch_disable_level++; + */ + lw t1,_Thread_Dispatch_disable_level + NOP + add t1,t1,1 + sw t1,_Thread_Dispatch_disable_level + + + /* DEBUG - Add the following code to disable interrupts and clear EXL in status register, this will + allow memory exceptions to occur while servicing the current interrupt */ +#if 0 + li t0,~CAUSE_IP2_MASK /* Disable interrupts from internal interrupt controller */ + mfc0 t1,C0_SR + nop + and t1,t0 + mtc0 t1,C0_SR + nop + li t0,~SR_EXL /* Clear EXL in status register to allow memory exceptions to occur */ + mfc0 t1,C0_SR + nop + and t1,t0 + mtc0 t1,C0_SR + nop +#endif + + /* + * Call the CPU model or BSP specific routine to decode the + * interrupt source and actually vector to device ISR handlers. + */ + move a0,sp + jal mips_vector_isr_handlers + NOP + + /* Add the following code to disable interrupts (see DEBUG above) */ +#if 0 + li t0,SR_EXL /* Set EXL to hold off interrupts */ + mfc0 t1,C0_SR + nop + or t1,t0 + mtc0 t1,C0_SR + nop + li t0,CAUSE_IP2_MASK /* Enable interrupts from internal interrupt controller */ + mfc0 t1,C0_SR + nop + or t1,t0 + mtc0 t1,C0_SR + nop +#endif + +_ISR_Handler_cleanup: + + /* + * --_ISR_Nest_level; + */ + lw t2,_ISR_Nest_level + NOP + add t2,t2,-1 + sw t2,_ISR_Nest_level + /* + * --_Thread_Dispatch_disable_level; + */ + lw t1,_Thread_Dispatch_disable_level + NOP + add t1,t1,-1 + sw t1,_Thread_Dispatch_disable_level + /* + * if ( _Thread_Dispatch_disable_level || _ISR_Nest_level ) + * goto the label "exit interrupt (simple case)" + */ + or t0,t2,t1 + bne t0,zero,_ISR_Handler_exit + NOP + + + /* + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * restore stack + * #endif + * + * if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing ) + * goto the label "exit interrupt (simple case)" + */ + lw t0,_Context_Switch_necessary + lw t1,_ISR_Signals_to_thread_executing + NOP + or t0,t0,t1 + beq t0,zero,_ISR_Handler_exit + NOP + +/* +** Turn on interrupts before entering Thread_Dispatch which +** will run for a while, thus allowing new interrupts to +** be serviced. Observe the Thread_Dispatch_disable_level interlock +** that prevents recursive entry into Thread_Dispatch. +*/ + + mfc0 t0, C0_SR +#if __mips == 3 + li t1,SR_EXL /* Clear EXL and Set IE to enable interrupts */ + not t1 + and t0,t1 + li t1,SR_IE +#elif __mips == 1 + li t1,SR_IEC +#endif + or t0, t1 + mtc0 t0, C0_SR + NOP + + /* save off our stack frame so the context switcher can get to it */ + la t0,__exceptionStackFrame + STREG sp,(t0) + + jal _Thread_Dispatch + NOP + + /* and make sure its clear in case we didn't dispatch. if we did, its + ** already cleared */ + la t0,__exceptionStackFrame + STREG zero,(t0) + NOP + +/* +** turn interrupts back off while we restore context so +** a badly timed interrupt won't accidentally mess things up +*/ + mfc0 t0, C0_SR + li t1,SR_IE /* Clear IE first (recommended) */ + not t1 + and t0,t1 + mtc0 t0, C0_SR + li t1,SR_EXL | SR_IE /* Set EXL and IE, this puts status register bits back to interrupted state */ + or t0,t1 + + mtc0 t0, C0_SR + NOP + + /* + * prepare to get out of interrupt + * return from interrupt (maybe to _ISR_Dispatch) + * + * LABEL "exit interrupt (simple case):" + * prepare to get out of interrupt + * return from interrupt + */ + +_ISR_Handler_exit: + +/* restore interrupt context from stack */ + LDREG t8, R_MDLO*R_SZ(sp) + LDREG t0, R_T0*R_SZ(sp) + mtlo t8 + LDREG t8, R_MDHI*R_SZ(sp) + LDREG t1, R_T1*R_SZ(sp) + mthi t8 + LDREG t2, R_T2*R_SZ(sp) + LDREG t3, R_T3*R_SZ(sp) + LDREG t4, R_T4*R_SZ(sp) + LDREG t5, R_T5*R_SZ(sp) + LDREG t6, R_T6*R_SZ(sp) + LDREG t7, R_T7*R_SZ(sp) + LDREG t8, R_T8*R_SZ(sp) + LDREG t9, R_T9*R_SZ(sp) + LDREG gp, R_GP*R_SZ(sp) + LDREG fp, R_FP*R_SZ(sp) + LDREG ra, R_RA*R_SZ(sp) + LDREG a0, R_A0*R_SZ(sp) + LDREG a1, R_A1*R_SZ(sp) + LDREG a2, R_A2*R_SZ(sp) + LDREG a3, R_A3*R_SZ(sp) + LDREG v1, R_V1*R_SZ(sp) + LDREG v0, R_V0*R_SZ(sp) + + LDREG k1, R_EPC*R_SZ(sp) + mtc0 k1,C0_EPC + + .set noat + LDREG AT, R_AT*R_SZ(sp) + .set at + + ADDIU sp,sp,EXCP_STACK_SIZE + +_ISR_Handler_quick_exit: + eret + nop + + +#if 0 + .global int7_isr + .extern Interrupt_7_isr +int7_isr: + /* Verify interrupt is from Timer */ + la k0,IRCS /* read Interrupt Current Status register */ + lw k0,(k0) + nop /* reading from external device */ + li k1,IRCS_CAUSE_MASK + and k0,k0,k1 /* isolate interrupt cause */ + + li k1,INT7INT /* test for interrupt 7 */ + subu k1,k0,k1 + beq k1,zero,int7_isr1 + nop + j ra /* interrupt 7 no longer valid, return without doing anything */ + nop +int7_isr1: + j Interrupt_7_isr /* Jump to Interrupt 7 isr */ + nop +#endif + + .set reorder + +ENDFRAME(rbtx4925_ISR_Handler) + + +FRAME(_BRK_Handler,sp,0,ra) + .set noreorder + +#ifdef USC + la k0,INT_CFG3 /* Disable heartbeat interrupt in USC320, it interferes with PMON exception handler */ + lw k1,(k0) + li k0,~HBI_MASK + and k1,k1,k0 + la k0,INT_CFG3 + sw k1,(k0) +#endif + + la k0,_brk_esr_link /* Jump to next exception handler in PMON exception chain */ + lw k0,(k0) + lw k0,4(k0) + j k0 + nop + + .set reorder +ENDFRAME(_BRK_Handler) + + +/************************************************************************** +** +** init_exc_vecs() - moves the exception code into the addresses +** reserved for exception vectors +** +** UTLB Miss exception vector at address 0x80000000 +** +** General exception vector at address 0x80000080 +** +** RESET exception vector is at address 0xbfc00000 +** +***************************************************************************/ + +FRAME(init_exc_vecs,sp,0,ra) + .set noreorder + + .extern mon_onintr + +/* Install interrupt handler in PMON exception handling chain */ + + addiu sp,sp,-8 + sw ra,(sp) /* Save ra contents on stack */ + move a0,zero + la a1,_int_esr_link + jal mon_onintr /* Make PMON system call to install interrupt exception handler */ + nop + li a0,9 + la a1,_brk_esr_link + jal mon_onintr /* Make PMON system call to install break exception handler */ + nop + lw ra,(sp) + addiu sp,sp,8 /* Restore ra contents from stack */ + j ra + nop + + .set reorder +ENDFRAME(init_exc_vecs) + + +#if 0 /* Unused code below */ + +/************************************************************* +* enable_int7(ints) +* Enable interrupt 7 +*/ +FRAME(enable_int7,sp,0,ra) + .set noreorder + + la t0,IRDM1 # Set interrupt controller detection mode (bits 2-3 = 0 for int 7 active low) + li t1,0x0 + sw t1,(t0) + + la t0,IRLVL4 # Set interrupt controller level (bit 8-10 = 2 for int 7 at level 2) + li t1,0x200 + sw t1,(t0) + + la t0,IRMSK # Set interrupt controller mask + li t1,0x0 + sw t1,(t0) + + la t0,IRDEN # Enable interrupts from controller + li t1,0x1 + sw t1,(t0) + + j ra + nop + .set reorder +ENDFRAME(enable_int7) + +/************************************************************* +* disable_int7(ints) +* Disable interrupt 7 +*/ +FRAME(disable_int7,sp,0,ra) + .set noreorder + + la t0,IRLVL4 # Set interrupt controller level (bit 8-10 = 0 to diasble int 7) + li t1,0x200 + sw t1,(t0) + + j ra + nop + .set reorder +ENDFRAME(disable_int7) + +#endif + +/************************************************************* +* tx4925exception: +* Diagnostic code that can be hooked to PMON interrupt handler. +* Generates pulse on PIO22 pin. +* Called from _exception code in PMON (see mips.s of PMON). +* Return address is located in k1. +*/ +FRAME(tx4925exception,sp,0,ra) + .set noreorder + la k0,k1tmp + sw k1,(k0) + +/* Activate TX4925 PIO22 signal for diagnostics */ + lui k0,0xff1f + ori k0,k0,0xf500 + lw k0,(k0) + lui k1,0x40 + or k1,k1,k0 + lui k0,0xff1f + ori k0,k0,0xf500 + sw k1,(k0) + nop + +/* De-activate TX4925 PIO22 signal for diagnostics */ + lui k0,0xff1f + ori k0,k0,0xf500 + lw k0,(k0) + lui k1,0x40 + not k1 + and k1,k1,k0 + lui k0,0xff1f + ori k0,k0,0xf500 + sw k1,(k0) + nop + + la k0,k1tmp + lw k1,(k0) + j k1 + .set reorder +ENDFRAME(tx4925exception) + + + + + .data + +k1tmp: .word 0 /* Temporary strage for K1 during interrupt service */ + +/************************************************************* +* +* Exception handler links, used in PMON exception handler chains +*/ + /* Interrupt exception service routine link */ + .global _int_esr_link +_int_esr_link: + .word 0 + .word rbtx4925_ISR_Handler + + /* Break exception service routine link */ + .global _brk_esr_link +_brk_esr_link: + .word 0 + .word _BRK_Handler + + + + diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/idtmem.S b/c/src/lib/libbsp/mips/rbtx4925/startup/idtmem.S new file mode 100644 index 0000000000..4dde9b5716 --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/idtmem.S @@ -0,0 +1,922 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + + $Id$ + +*/ + +/************************************************************************ +** +** idtmem.s - memory and cache functions +** +** Copyright 1991 Integrated Device Technology, Inc. +** All Rights Reserved +** +**************************************************************************/ + +/* + * 950313: Ketan fixed bugs in mfc0/mtc0 hazards, and removed hack + * to set mem_size. + */ + +#include <rtems/mips/iregdef.h> +#include <rtems/mips/idtcpu.h> +#include <rtems/asm.h> + + .data +mem_size: + .word 0 +dcache_size: + .word 0 +icache_size: +#if __mips == 1 + .word MINCACHE +#endif +#if __mips == 3 + .word 0 +#endif + +#if __mips == 3 + .data +scache_size: + .word 0 +icache_linesize: + .word 0 +dcache_linesize: + .word 0 +scache_linesize: + .word 0 +#endif + + .text + +#if __mips == 1 +#define CONFIGFRM ((2*4)+4) + +/************************************************************************* +** +** Config_Dcache() -- determine size of Data cache +** +**************************************************************************/ + +FRAME(config_Dcache,sp, CONFIGFRM, ra) + .set noreorder + subu sp,CONFIGFRM + sw ra,CONFIGFRM-4(sp) /* save return address */ + sw s0,4*4(sp) /* save s0 in first regsave slot */ + mfc0 s0,C0_SR /* save SR */ + nop + mtc0 zero,C0_SR /* disable interrupts */ + .set reorder + jal _size_cache /* returns Data cache size in v0 */ + sw v0, dcache_size /* save it */ + and s0, ~SR_PE /* do not clear PE */ + .set noreorder + mtc0 s0,C0_SR /* restore SR */ + nop + .set reorder + lw s0, 4*4(sp) /* restore s0 */ + lw ra,CONFIGFRM-4(sp) /* restore ra */ + addu sp,CONFIGFRM /* pop stack */ + j ra +ENDFRAME(config_Dcache) + +/************************************************************************* +** +** Config_Icache() -- determine size of Instruction cache +** MUST be run in uncached mode/handled in idt_csu.s +** +**************************************************************************/ + +FRAME(config_Icache,sp, CONFIGFRM, ra) + .set noreorder + subu sp,CONFIGFRM + sw ra,CONFIGFRM-4(sp) /* save return address */ + sw s0,4*4(sp) /* save s0 in first regsave slot */ + mfc0 s0,C0_SR /* save SR */ + nop + mtc0 zero, C0_SR /* disable interrupts */ + li v0,SR_SWC /* swap caches/disable ints */ + mtc0 v0,C0_SR + nop + .set reorder + jal _size_cache /* returns instruction cache size */ + .set noreorder + mtc0 zero,C0_SR /* swap back caches */ + nop + and s0,~SR_PE /* do not inadvertantly clear PE */ + mtc0 s0,C0_SR /* restore SR */ + nop + .set reorder + sw v0, icache_size /* save it AFTER caches back */ + lw s0,4*4(sp) /* restore s0 */ + lw ra,CONFIGFRM-4(sp) /* restore ra */ + addu sp,CONFIGFRM /* pop stack */ + j ra +ENDFRAME(config_Icache) + +/************************************************************************ +** +** _size_cache() +** returns cache size in v0 +** +************************************************************************/ + +FRAME(_size_cache,sp,0,ra) + .set noreorder + mfc0 t0,C0_SR /* save current sr */ + nop + and t0,~SR_PE /* do not inadvertently clear PE */ + or v0,t0,SR_ISC /* isolate cache */ + mtc0 v0,C0_SR + /* + * First check if there is a cache there at all + */ + move v0,zero + li v1,0xa5a5a5a5 /* distinctive pattern */ + sw v1,K0BASE /* try to write into cache */ + lw t1,K0BASE /* try to read from cache */ + nop + mfc0 t2,C0_SR + nop + .set reorder + and t2,SR_CM + bne t2,zero,3f /* cache miss, must be no cache */ + bne v1,t1,3f /* data not equal -> no cache */ + /* + * Clear cache size boundries to known state. + */ + li v0,MINCACHE +1: + sw zero,K0BASE(v0) + sll v0,1 + ble v0,MAXCACHE,1b + + li v0,-1 + sw v0,K0BASE(zero) /* store marker in cache */ + li v0,MINCACHE /* MIN cache size */ + +2: lw v1,K0BASE(v0) /* Look for marker */ + bne v1,zero,3f /* found marker */ + sll v0,1 /* cache size * 2 */ + ble v0,MAXCACHE,2b /* keep looking */ + move v0,zero /* must be no cache */ + .set noreorder +3: mtc0 t0,C0_SR /* restore sr */ + j ra + nop +ENDFRAME(_size_cache) + .set reorder + +#define FLUSHFRM (2*4) + +/*************************************************************************** +** +** flush_Dcache() - flush entire Data cache +** +****************************************************************************/ +FRAME(flush_Dcache,sp,FLUSHFRM,ra) + lw t2, dcache_size + .set noreorder + mfc0 t3,C0_SR /* save SR */ + nop + and t3,~SR_PE /* dont inadvertently clear PE */ + beq t2,zero,_Dflush_done /* no D cache, get out! */ + nop + li v0, SR_ISC /* isolate cache */ + mtc0 v0, C0_SR + nop + .set reorder + li t0,K0BASE /* set loop registers */ + or t1,t0,t2 + +2: sb zero,0(t0) + sb zero,4(t0) + sb zero,8(t0) + sb zero,12(t0) + sb zero,16(t0) + sb zero,20(t0) + sb zero,24(t0) + addu t0,32 + sb zero,-4(t0) + bne t0,t1,2b + + .set noreorder +_Dflush_done: + mtc0 t3,C0_SR /* restore Status Register */ + .set reorder + j ra +ENDFRAME(flush_Dcache) + +/*************************************************************************** +** +** flush_Icache() - flush entire Instruction cache +** +** NOTE: Icache can only be flushed/cleared when uncached +** Code forces into uncached memory regardless of calling mode +** +****************************************************************************/ +FRAME(flush_Icache,sp,FLUSHFRM,ra) + lw t1,icache_size + .set noreorder + mfc0 t3,C0_SR /* save SR */ + nop + la v0,1f + li v1,K1BASE + or v0,v1 + j v0 /* force into non-cached space */ + nop +1: + and t3,~SR_PE /* dont inadvertently clear PE */ + beq t1,zero,_Iflush_done /* no i-cache get out */ + nop + li v0,SR_ISC|SR_SWC /* disable intr, isolate and swap */ + mtc0 v0,C0_SR + li t0,K0BASE + .set reorder + or t1,t0,t1 + +1: sb zero,0(t0) + sb zero,4(t0) + sb zero,8(t0) + sb zero,12(t0) + sb zero,16(t0) + sb zero,20(t0) + sb zero,24(t0) + addu t0,32 + sb zero,-4(t0) + bne t0,t1,1b + .set noreorder +_Iflush_done: + mtc0 t3,C0_SR /* un-isolate, enable interrupts */ + .set reorder + j ra +ENDFRAME(flush_Icache) + +/************************************************************************** +** +** clear_Dcache(base_addr, byte_count) - flush portion of Data cache +** +** a0 = base address of portion to be cleared +** a1 = byte count of length +** +***************************************************************************/ +FRAME(clear_Dcache,sp,0,ra) + + lw t2, dcache_size /* Data cache size */ + .set noreorder + mfc0 t3,C0_SR /* save SR */ + nop + and t3,~SR_PE /* dont inadvertently clear PE */ + nop + nop + .set reorder + /* + * flush data cache + */ + + .set noreorder + nop + li v0,SR_ISC /* isolate data cache */ + mtc0 v0,C0_SR + .set reorder + bltu t2,a1,1f /* cache is smaller than region */ + move t2,a1 +1: addu t2,a0 /* ending address + 1 */ + move t0,a0 + +1: sb zero,0(t0) + sb zero,4(t0) + sb zero,8(t0) + sb zero,12(t0) + sb zero,16(t0) + sb zero,20(t0) + sb zero,24(t0) + addu t0,32 + sb zero,-4(t0) + bltu t0,t2,1b + + .set noreorder + mtc0 t3,C0_SR /* un-isolate, enable interrupts */ + nop + .set reorder + j ra +ENDFRAME(clear_Dcache) + +/************************************************************************** +** +** clear_Icache(base_addr, byte_count) - flush portion of Instruction cache +** +** a0 = base address of portion to be cleared +** a1 = byte count of length +** +** NOTE: Icache can only be flushed/cleared when uncached +** Code forces into uncached memory regardless of calling mode +** +***************************************************************************/ +FRAME(clear_Icache,sp,0,ra) + + lw t1, icache_size /* Instruction cache size */ + /* + * flush text cache + */ + .set noreorder + mfc0 t3,C0_SR /* save SR */ + nop + la v0,1f + li v1,K1BASE + or v0,v1 + j v0 /* force into non-cached space */ + nop +1: + and t3,~SR_PE /* dont inadvertently clear PE */ + nop + nop + li v0,SR_ISC|SR_SWC /* disable intr, isolate and swap */ + mtc0 v0,C0_SR + .set reorder + bltu t1,a1,1f /* cache is smaller than region */ + move t1,a1 +1: addu t1,a0 /* ending address + 1 */ + move t0,a0 + + sb zero,0(t0) + sb zero,4(t0) + sb zero,8(t0) + sb zero,12(t0) + sb zero,16(t0) + sb zero,20(t0) + sb zero,24(t0) + addu t0,32 + sb zero,-4(t0) + bltu t0,t1,1b + .set noreorder + mtc0 t3,C0_SR /* un-isolate, enable interrupts */ + nop + nop + nop /* allow time for caches to swap */ + .set reorder + j ra +ENDFRAME(clear_Icache) + +/************************************************************************** +** +** get_mem_conf - get memory configuration +** +***************************************************************************/ + +FRAME(get_mem_conf,sp,0,ra) + + lw t6, mem_size + sw t6, 0(a0) + lw t7, icache_size + sw t7, 4(a0) + lw t8, dcache_size + sw t8, 8(a0) + j ra + +ENDFRAME(get_mem_conf) +#endif /* __mips == 1 */ + +#if __mips == 3 +#define LEAF(label) FRAME(label,sp,0,ra) +#define XLEAF(label) \ + .globl label ; \ +label: + +/* + * cacheop macro to automate cache operations + * first some helpers... + */ +#define _mincache(size, maxsize) \ + bltu size,maxsize,8f ; \ + move size,maxsize ; \ +8: + +#define _align(tmp, minaddr, maxaddr, linesize) \ + subu tmp,linesize,1 ; \ + not tmp ; \ + and minaddr,tmp ; \ + addu maxaddr,-1 ; \ + and maxaddr,tmp + +/* This is a bit of a hack really because it relies on minaddr=a0 */ +#define _doop1(op1) \ + cache op1,0(a0) + +#define _doop2(op1, op2) \ + cache op1,0(a0) ; \ + cache op2,0(a0) + +/* specials for cache initialisation */ +#define _doop1lw1(op1) \ + cache op1,0(a0) ; \ + lw zero,0(a0) ; \ + cache op1,0(a0) + +#define _doop121(op1,op2) \ + cache op1,0(a0) ; \ + nop; \ + cache op2,0(a0) ; \ + nop; \ + cache op1,0(a0) + +#define _oploopn(minaddr, maxaddr, linesize, tag, ops) \ + .set noreorder ; \ +7: _doop##tag##ops ; \ + bne minaddr,maxaddr,7b ; \ + addu minaddr,linesize ; \ + .set reorder + +/* finally the cache operation macros */ +#define icacheopn(kva, n, cache_size, cache_linesize, tag, ops) \ + _mincache(n, cache_size); \ + blez n,9f ; \ + addu n,kva ; \ + _align(t1, kva, n, cache_linesize) ; \ + _oploopn(kva, n, cache_linesize, tag, ops) ; \ +9: + +#define vcacheopn(kva, n, cache_size, cache_linesize, tag, ops) \ + blez n,9f ; \ + addu n,kva ; \ + _align(t1, kva, n, cache_linesize) ; \ + _oploopn(kva, n, cache_linesize, tag, ops) ; \ +9: + +#define icacheop(kva, n, cache_size, cache_linesize, op) \ + icacheopn(kva, n, cache_size, cache_linesize, 1, (op)) + +#define vcacheop(kva, n, cache_size, cache_linesize, op) \ + vcacheopn(kva, n, cache_size, cache_linesize, 1, (op)) + + .text + +/* + * static void _size_cache() R4000 + * + * Internal routine to determine cache sizes by looking at R4000 config + * register. Sizes are returned in registers, as follows: + * t2 icache size + * t3 dcache size + * t6 scache size + * t4 icache line size + * t5 dcache line size + * t7 scache line size + */ +LEAF(_size_cache) + mfc0 t0,C0_CONFIG + + and t1,t0,CFG_ICMASK + srl t1,CFG_ICSHIFT + li t2,0x1000 + sll t2,t1 + + and t1,t0,CFG_DCMASK + srl t1,CFG_DCSHIFT + li t3,0x1000 + sll t3,t1 + + li t4,32 + and t1,t0,CFG_IB + bnez t1,1f + li t4,16 +1: + + li t5,32 + and t1,t0,CFG_DB + bnez t1,1f + li t5,16 +1: + + move t6,zero # default to no scache + move t7,zero # + + and t1,t0,CFG_C_UNCACHED # test config register + bnez t1,1f # no scache if uncached/non-coherent + + li t6,0x100000 # assume 1Mb scache <<-NOTE + and t1,t0,CFG_SBMASK + srl t1,CFG_SBSHIFT + li t7,16 + sll t7,t1 +1: j ra +ENDFRAME(_size_cache) + +/* + * void config_cache() R4000 + * + * Work out size of I, D & S caches, assuming they are already initialised. + */ +LEAF(config_cache) + lw t0,icache_size + bgtz t0,8f # already known? + move v0,ra + bal _size_cache + move ra,v0 + + sw t2,icache_size + sw t3,dcache_size + sw t6,scache_size + sw t4,icache_linesize + sw t5,dcache_linesize + sw t7,scache_linesize +8: j ra +ENDFRAME(config_cache) + +/* + * void _init_cache() R4000 + */ +LEAF(_init_cache) + /* + * First work out the sizes + */ + move v0,ra + bal _size_cache + move ra,v0 + + /* + * The caches may be in an indeterminate state, + * so we force good parity into them by doing an + * invalidate, load/fill, invalidate for each line. + */ + + /* disable all i/u and cache exceptions */ + mfc0 v0,C0_SR + and v1,v0,~SR_IE + or v1,SR_DE + mtc0 v1,C0_SR + + mtc0 zero,C0_TAGLO + mtc0 zero,C0_TAGHI + + /* assume bottom of RAM will generate good parity for the cache */ + li a0,PHYS_TO_K0(0) + move a2,t2 # icache_size + move a3,t4 # icache_linesize + move a1,a2 + icacheopn(a0,a1,a2,a3,121,(Index_Store_Tag_I,Fill_I)) + + li a0,PHYS_TO_K0(0) + move a2,t3 # dcache_size + move a3,t5 # dcache_linesize + move a1,a2 + icacheopn(a0,a1,a2,a3,1lw1,(Index_Store_Tag_D)) + + /* assume unified I & D in scache <<-NOTE */ + blez t6,1f + li a0,PHYS_TO_K0(0) + move a2,t6 + move a3,t7 + move a1,a2 + icacheopn(a0,a1,a2,a3,1lw1,(Index_Store_Tag_SD)) + +1: mtc0 v0,C0_SR + j ra +ENDFRAME(_init_cache) + +/* + * void flush_cache (void) R4000 + * + * Flush and invalidate all caches + */ +LEAF(flush_cache) + /* secondary cacheops do all the work if present */ + lw a2,scache_size + blez a2,1f + lw a3,scache_linesize + li a0,PHYS_TO_K0(0) + move a1,a2 + icacheop(a0,a1,a2,a3,Index_Writeback_Inv_SD) + b 2f + +1: + lw a2,icache_size + blez a2,2f + lw a3,icache_linesize + li a0,PHYS_TO_K0(0) + move a1,a2 + icacheop(a0,a1,a2,a3,Index_Invalidate_I) + + lw a2,dcache_size + lw a3,dcache_linesize + li a0,PHYS_TO_K0(0) + move a1,a2 + icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D) + +2: j ra +ENDFRAME(flush_cache) + +/* + * void flush_cache_nowrite (void) R4000 + * + * Invalidate all caches + */ +LEAF(flush_cache_nowrite) + mfc0 v0,C0_SR + and v1,v0,~SR_IE + mtc0 v1,C0_SR + + mtc0 zero,C0_TAGLO + mtc0 zero,C0_TAGHI + + lw a2,icache_size + blez a2,2f + lw a3,icache_linesize + li a0,PHYS_TO_K0(0) + move a1,a2 + icacheop(a0,a1,a2,a3,Index_Invalidate_I) + + lw a2,dcache_size + lw a3,dcache_linesize + li a0,PHYS_TO_K0(0) + move a1,a2 + icacheop(a0,a1,a2,a3,Index_Store_Tag_D) + + lw a2,scache_size + blez a2,2f + lw a3,scache_linesize + li a0,PHYS_TO_K0(0) + move a1,a2 + icacheop(a0,a1,a2,a3,Index_Store_Tag_SD) + +2: mtc0 v0,C0_SR + j ra +ENDFRAME(flush_cache_nowrite) + +/* + * void clean_cache (unsigned kva, size_t n) R4000 + * + * Writeback and invalidate address range in all caches + */ +LEAF(clean_cache) +XLEAF(clear_cache) + + /* secondary cacheops do all the work (if fitted) */ + lw a2,scache_size + blez a2,1f + lw a3,scache_linesize + vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_SD) + b 2f + +1: lw a2,icache_size + blez a2,2f + lw a3,icache_linesize + /* save kva & n for subsequent loop */ + move t8,a0 + move t9,a1 + vcacheop(a0,a1,a2,a3,Hit_Invalidate_I) + + lw a2,dcache_size + lw a3,dcache_linesize + /* restore kva & n */ + move a0,t8 + move a1,t9 + vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_D) + +2: j ra +ENDFRAME(clean_cache) + +/* + * void clean_dcache (unsigned kva, size_t n) R4000 + * + * Writeback and invalidate address range in primary data cache + */ +LEAF(clean_dcache) + lw a2,dcache_size + blez a2,2f + lw a3,dcache_linesize + + vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_D) + +2: j ra +ENDFRAME(clean_dcache) + +/* + * void clean_dcache_indexed (unsigned kva, size_t n) R4000 + * + * Writeback and invalidate indexed range in primary data cache + */ +LEAF(clean_dcache_indexed) + lw a2,dcache_size + blez a2,2f + lw a3,dcache_linesize + +#ifdef CPU_ORION + srl a2,1 # do one set (half cache) at a time + move t8,a0 # save kva & n + move t9,a1 + icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D) + + addu a0,t8,a2 # do next set + move a1,t9 # restore n +#endif + icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D) + +2: j ra +ENDFRAME(clean_dcache_indexed) + +/* + * void clean_dcache_nowrite (unsigned kva, size_t n) R4000 + * + * Invalidate an address range in primary data cache + */ +LEAF(clean_dcache_nowrite) + lw a2,dcache_size + blez a2,2f + lw a3,dcache_linesize + + vcacheop(a0,a1,a2,a3,Hit_Invalidate_D) + +2: j ra +ENDFRAME(clean_dcache_nowrite) + +/* + * void clean_dcache_nowrite_indexed (unsigned kva, size_t n) R4000 + * + * Invalidate indexed range in primary data cache + */ +LEAF(clean_dcache_nowrite_indexed) + mfc0 v0,C0_SR + and v1,v0,~SR_IE + mtc0 v1,C0_SR + + mtc0 zero,C0_TAGLO + mtc0 zero,C0_TAGHI + + lw a2,dcache_size + blez a2,2f + lw a3,dcache_linesize + +#ifdef CPU_ORION + srl a2,1 # do one set (half cache) at a time + move t8,a0 # save kva & n + move t9,a1 + icacheop(a0,a1,a2,a3,Index_Store_Tag_D) + + addu a0,t8,a2 # do next set + move a1,t9 # restore n +#endif + icacheop(a0,a1,a2,a3,Index_Store_Tag_D) + +2: mtc0 v0,C0_SR + j ra +ENDFRAME(clean_dcache_nowrite_indexed) + +/* + * void clean_icache (unsigned kva, size_t n) R4000 + * + * Invalidate address range in primary instruction cache + */ +LEAF(clean_icache) + lw a2,icache_size + blez a2,2f + lw a3,icache_linesize + + vcacheop(a0,a1,a2,a3,Hit_Invalidate_I) + +2: j ra +ENDFRAME(clean_icache) + +/* + * void clean_icache_indexed (unsigned kva, size_t n) R4000 + * + * Invalidate indexed range in primary instruction cache + */ +LEAF(clean_icache_indexed) + lw a2,icache_size + blez a2,2f + lw a3,icache_linesize + +#ifdef CPU_ORION + srl a2,1 # do one set (half cache) at a time + move t8,a0 # save kva & n + move t9,a1 + icacheop(a0,a1,a2,a3,Index_Invalidate_I) + + addu a0,t8,a2 # do next set + move a1,t9 # restore n +#endif + icacheop(a0,a1,a2,a3,Index_Invalidate_I) + +2: j ra +ENDFRAME(clean_icache_indexed) + +/* + * void clean_scache (unsigned kva, size_t n) R4000 + * + * Writeback and invalidate address range in secondary cache + */ +LEAF(clean_scache) + lw a2,scache_size + blez a2,2f + lw a3,scache_linesize + vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_SD) + +2: j ra +ENDFRAME(clean_scache) + +/* + * void clean_scache_indexed (unsigned kva, size_t n) R4000 + * + * Writeback and invalidate indexed range in secondary cache + */ +LEAF(clean_scache_indexed) + lw a2,scache_size + blez a2,2f + lw a3,scache_linesize + + icacheop(a0,a1,a2,a3,Index_Writeback_Inv_SD) + +2: j ra +ENDFRAME(clean_scache_indexed) + +/* + * void clean_scache_nowrite (unsigned kva, size_t n) R4000 + * + * Invalidate an address range in secondary cache + */ +LEAF(clean_scache_nowrite) + lw a2,scache_size + blez a2,2f + lw a3,scache_linesize + + vcacheop(a0,a1,a2,a3,Hit_Invalidate_SD) + +2: j ra +ENDFRAME(clean_scache_nowrite) + +/* + * void clean_scache_nowrite_indexed (unsigned kva, size_t n) R4000 + * + * Invalidate indexed range in secondary cache + */ +LEAF(clean_scache_nowrite_indexed) + mfc0 v0,C0_SR + and v1,v0,~SR_IE + mtc0 v1,C0_SR + + mtc0 zero,C0_TAGLO + mtc0 zero,C0_TAGHI + + lw a2,scache_size + blez a2,2f + lw a3,scache_linesize + + icacheop(a0,a1,a2,a3,Index_Store_Tag_SD) + +2: mtc0 v0,C0_SR + j ra +ENDFRAME(clean_scache_nowrite_indexed) + +/************************************************************************** +** +** get_mem_conf - get memory configuration R4000 +** +***************************************************************************/ + +FRAME(get_mem_conf,sp,0,ra) + + lw t6, mem_size + sw t6, 0(a0) + lw t7, icache_size + sw t7, 4(a0) + lw t8, dcache_size + sw t8, 8(a0) + lw t7, scache_size + sw t7, 12(a0) + j ra + +ENDFRAME(get_mem_conf) + +#endif /* __mips == 3 */ + +/* + * void set_mem_size (mem_size) + * + * config_memory()'s memory size gets written into mem_size here. + * Now we don't need to call config_cache() with memory size - New to IDTC6.0 + */ +FRAME(set_memory_size,sp,0,ra) + sw a0, mem_size + j ra +ENDFRAME(set_memory_size) diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/idttlb.S b/c/src/lib/libbsp/mips/rbtx4925/startup/idttlb.S new file mode 100644 index 0000000000..ed8517363c --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/idttlb.S @@ -0,0 +1,390 @@ +/* + +Based upon IDT provided code with the following release: + +This source code has been made available to you by IDT on an AS-IS +basis. Anyone receiving this source is licensed under IDT copyrights +to use it in any way he or she deems fit, including copying it, +modifying it, compiling it, and redistributing it either with or +without modifications. No license under IDT patents or patent +applications is to be implied by the copyright license. + +Any user of this software should understand that IDT cannot provide +technical support for this software and will not be responsible for +any consequences resulting from the use of this software. + +Any person who transfers this source code or any derivative work must +include the IDT copyright notice, this paragraph, and the preceeding +two paragraphs in the transferred software. + +COPYRIGHT IDT CORPORATION 1996 +LICENSED MATERIAL - PROGRAM PROPERTY OF IDT + + idttlb.S,v 1.3 2000/10/24 21:50:37 joel Exp +*/ + + +/* +** idttlb.s - fetch the registers associated with and the contents +** of the tlb. +** +*/ +/* 950308: Ketan patched a few tlb functions that would not have worked.*/ +#include <rtems/mips/iregdef.h> +#include <rtems/mips/idtcpu.h> +#include <rtems/asm.h> + + + .text + +#if __mips == 1 +/* +** ret_tlblo -- returns the 'entrylo' contents for the TLB +** 'c' callable - as ret_tlblo(index) - where index is the +** tlb entry to return the lo value for - if called from assembly +** language then index should be in register a0. +*/ +FRAME(ret_tlblo,sp,0,ra) + .set noreorder + mfc0 t0,C0_SR # save sr + nop + and t0,~SR_PE # dont inadvertantly clear PE + mtc0 zero,C0_SR # clear interrupts + mfc0 t1,C0_TLBHI # save pid + sll a0,TLBINX_INXSHIFT # position index + mtc0 a0,C0_INX # write to index register + nop + tlbr # put tlb entry in entrylo and hi + nop + mfc0 v0,C0_TLBLO # get the requested entry lo + mtc0 t1,C0_TLBHI # restore pid + mtc0 t0,C0_SR # restore status register + j ra + nop + .set reorder +ENDFRAME(ret_tlblo) +#endif +#if __mips == 3 +/* +** ret_tlblo[01] -- returns the 'entrylo' contents for the TLB +** 'c' callable - as ret_tlblo(index) - where index is the +** tlb entry to return the lo value for - if called from assembly +** language then index should be in register a0. +*/ +FRAME(ret_tlblo0,sp,0,ra) + mfc0 t0,C0_SR # save sr + mtc0 zero,C0_SR # clear interrupts + mfc0 t1,C0_TLBHI # save pid + mtc0 a0,C0_INX # write to index register + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbr # put tlb entry in entrylo and hi + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mfc0 v0,C0_TLBLO0 # get the requested entry lo + mtc0 t1,C0_TLBHI # restore pid + mtc0 t0,C0_SR # restore status register + j ra +ENDFRAME(ret_tlblo0) + +FRAME(ret_tlblo1,sp,0,ra) + mfc0 t0,C0_SR # save sr + mtc0 zero,C0_SR # clear interrupts + mfc0 t1,C0_TLBHI # save pid + mtc0 a0,C0_INX # write to index register + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbr # put tlb entry in entrylo and hi + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mfc0 v0,C0_TLBLO1 # get the requested entry lo + mtc0 t1,C0_TLBHI # restore pid + mtc0 t0,C0_SR # restore status register + j ra +ENDFRAME(ret_tlblo1) + +/* +** ret_pagemask(index) -- return pagemask contents of tlb entry "index" +*/ +FRAME(ret_pagemask,sp,0,ra) + mfc0 t0,C0_SR # save sr + mtc0 zero,C0_SR # disable interrupts + mfc0 t1,C0_TLBHI # save current pid + mtc0 a0,C0_INX # drop it in C0 register + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbr # read entry to entry hi/lo + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mfc0 v0,C0_PAGEMASK # to return value + mtc0 t1,C0_TLBHI # restore current pid + mtc0 t0,C0_SR # restore sr + j ra +ENDFRAME(ret_pagemask) + +/* +** ret_tlbwired(void) -- return wired register +*/ +FRAME(ret_tlbwired,sp,0,ra) + mfc0 v0,C0_WIRED + j ra +ENDFRAME(ret_tlbwired) +#endif + +/* +** ret_tlbhi -- return the tlb entry high content for tlb entry +** index +*/ +FRAME(ret_tlbhi,sp,0,ra) +#if __mips == 1 + .set noreorder + mfc0 t0,C0_SR # save sr + nop + and t0,~SR_PE + mtc0 zero,C0_SR # disable interrupts + mfc0 t1,C0_TLBHI # save current pid + sll a0,TLBINX_INXSHIFT # position index + mtc0 a0,C0_INX # drop it in C0 register + nop + tlbr # read entry to entry hi/lo + nop + mfc0 v0,C0_TLBHI # to return value + mtc0 t1,C0_TLBHI # restore current pid + mtc0 t0,C0_SR # restore sr + j ra + nop + .set reorder +#endif +#if __mips == 3 + mfc0 t0,C0_SR # save sr + mtc0 zero,C0_SR # disable interrupts + mfc0 t1,C0_TLBHI # save current pid + mtc0 a0,C0_INX # drop it in C0 register + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbr # read entry to entry hi/lo0/lo1/mask + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mfc0 v0,C0_TLBHI # to return value + mtc0 t1,C0_TLBHI # restore current pid + mtc0 t0,C0_SR # restore sr + j ra +#endif +ENDFRAME(ret_tlbhi) + +/* +** ret_tlbpid() -- return tlb pid contained in the current entry hi +*/ +FRAME(ret_tlbpid,sp,0,ra) +#if __mips == 1 + .set noreorder + mfc0 v0,C0_TLBHI # fetch tlb high + nop + and v0,TLBHI_PIDMASK # isolate and position + srl v0,TLBHI_PIDSHIFT + j ra + nop + .set reorder +#endif +#if __mips == 3 + mfc0 v0,C0_TLBHI # to return value + nop + and v0,TLBHI_PIDMASK + j ra +#endif +ENDFRAME(ret_tlbpid) + +/* +** tlbprobe(address, pid) -- probe the tlb to see if address is currently +** mapped +** a0 = vpn - virtual page numbers are 0=0 1=0x1000, 2=0x2000... +** virtual page numbers for the r3000 are in +** entry hi bits 31-12 +** a1 = pid - this is a process id ranging from 0 to 63 +** this process id is shifted left 6 bits and or'ed into +** the entry hi register +** returns an index value (0-63) if successful -1 -f not +*/ +FRAME(tlbprobe,sp,0,ra) +#if __mips == 1 + .set noreorder + mfc0 t0,C0_SR /* fetch status reg */ + and a0,TLBHI_VPNMASK /* isolate just the vpn */ + and t0,~SR_PE /* don't inadvertantly clear pe */ + mtc0 zero,C0_SR + mfc0 t1,C0_TLBHI + sll a1,TLBHI_PIDSHIFT /* possition the pid */ + and a1,TLBHI_PIDMASK + or a0,a1 /* build entry hi value */ + mtc0 a0,C0_TLBHI + nop + tlbp /* do the probe */ + nop + mfc0 v1,C0_INX + li v0,-1 + bltz v1,1f + nop + sra v0,v1,TLBINX_INXSHIFT /* get index positioned for return */ +1: + mtc0 t1,C0_TLBHI /* restore tlb hi */ + mtc0 t0,C0_SR /* restore the status reg */ + j ra + nop + .set reorder +#endif +#if __mips == 3 + mfc0 t0,C0_SR # save sr + mtc0 zero,C0_SR # disable interrupts + mfc0 t1,C0_TLBHI # save current pid + and a0,TLBHI_VPN2MASK # construct tlbhi for probe + and a1,TLBHI_PIDMASK + or a0,a1 + mtc0 a0,C0_TLBHI + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbp # probe entry to entry hi/lo0/lo1/mask + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mfc0 v1,C0_INX + li v0,-1 + bltz v1,1f + move v0,v1 +1: mtc0 t1,C0_TLBHI # restore current pid + mtc0 t0,C0_SR # restore sr + j ra +#endif +ENDFRAME(tlbprobe) + +/* +** resettlb(index) Invalidate the TLB entry specified by index +*/ +FRAME(resettlb,sp,0,ra) +#if __mips == 1 + .set noreorder + mfc0 t0,C0_TLBHI # fetch the current hi + mfc0 v0,C0_SR # fetch the status reg. + li t2,K0BASE&TLBHI_VPNMASK + and v0,~SR_PE # dont inadvertantly clear PE + mtc0 zero,C0_SR + mtc0 t2,C0_TLBHI # set up tlbhi + mtc0 zero,C0_TLBLO + sll a0,TLBINX_INXSHIFT + mtc0 a0,C0_INX + nop + tlbwi # do actual invalidate + nop + mtc0 t0,C0_TLBHI + mtc0 v0,C0_SR + j ra + nop + .set reorder +#endif +#if __mips == 3 + li t2,K0BASE&TLBHI_VPN2MASK + mfc0 t0,C0_TLBHI # save current TLBHI + mfc0 v0,C0_SR # save SR and disable interrupts + mtc0 zero,C0_SR + mtc0 t2,C0_TLBHI # invalidate entry + mtc0 zero,C0_TLBLO0 + mtc0 zero,C0_TLBLO1 + mtc0 a0,C0_INX + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbwi + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mtc0 t0,C0_TLBHI + mtc0 v0,C0_SR + j ra +#endif +ENDFRAME(resettlb) + +#if __mips == 1 +/* +** Setup TLB entry +** +** map_tlb(index, tlbhi, phypage) +** a0 = TLB entry index +** a1 = virtual page number and PID +** a2 = physical page +*/ +FRAME(map_tlb,sp,0,ra) + .set noreorder + sll a0,TLBINX_INXSHIFT + mfc0 v0,C0_SR # fetch the current status + mfc0 a3,C0_TLBHI # save the current hi + and v0,~SR_PE # dont inadvertantly clear parity + + mtc0 zero,C0_SR + mtc0 a1,C0_TLBHI # set the hi entry + mtc0 a2,C0_TLBLO # set the lo entry + mtc0 a0,C0_INX # load the index + nop + tlbwi # put the hi/lo in tlb entry indexed + nop + mtc0 a3,C0_TLBHI # put back the tlb hi reg + mtc0 v0,C0_SR # restore the status register + j ra + nop + .set reorder +ENDFRAME(map_tlb) +#endif +#if __mips == 3 +/* +** Setup R4000 TLB entry +** +** map_tlb4000(mask_index, tlbhi, pte_even, pte_odd) +** a0 = TLB entry index and page mask +** a1 = virtual page number and PID +** a2 = pte -- contents of even pte +** a3 = pte -- contents of odd pte +*/ +FRAME(map_tlb4000,sp,0,ra) + and t2,a0,TLBPGMASK_MASK + and a0,TLBINX_INXMASK + mfc0 t1,C0_TLBHI # save current TLBPID + mfc0 v0,C0_SR # save SR and disable interrupts + mtc0 zero,C0_SR + mtc0 t2,C0_PAGEMASK # set + mtc0 a1,C0_TLBHI # set VPN and TLBPID + mtc0 a2,C0_TLBLO0 # set PPN and access bits + mtc0 a3,C0_TLBLO1 # set PPN and access bits + mtc0 a0,C0_INX # set INDEX to wired entry + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + tlbwi # drop it in + .set noreorder + nop; nop; nop; nop; nop; nop; nop; nop + .set reorder + mtc0 t1,C0_TLBHI # restore TLBPID + mtc0 v0,C0_SR # restore SR + j ra +ENDFRAME(map_tlb4000) +#endif + + +/* +** Set current TLBPID. This assumes PID is positioned correctly in reg. +** a0. +*/ +FRAME(set_tlbpid,sp,0,ra) + .set noreorder + mtc0 a0,C0_TLBHI + j ra + nop + .set reorder +ENDFRAME(set_tlbpid) + diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/inittlb.c b/c/src/lib/libbsp/mips/rbtx4925/startup/inittlb.c new file mode 100644 index 0000000000..40df161286 --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/inittlb.c @@ -0,0 +1,16 @@ +/* + * inittlb.c,v 1.2 1999/03/31 23:21:19 joel Exp + */ + +#include <rtems/mips/idtcpu.h> + +extern void resettlb( int i ); + +void init_tlb(void) +{ + int i; + + for (i = 0; i < N_TLB_ENTRIES; i++ ) + resettlb(i); +} + diff --git a/c/src/lib/libbsp/mips/rbtx4925/startup/linkcmds b/c/src/lib/libbsp/mips/rbtx4925/startup/linkcmds new file mode 100644 index 0000000000..70b8f60df8 --- /dev/null +++ b/c/src/lib/libbsp/mips/rbtx4925/startup/linkcmds @@ -0,0 +1,163 @@ +/* + * linkcmds,v 1.10 2003/01/20 20:20:11 joel Exp + * + */ + +/* + * Declare some sizes. + */ +_RamBase = DEFINED(_RamBase) ? _RamBase : 0x80000000; +_RamSize = DEFINED(_RamSize) ? _RamSize : 4M; +_StackSize = DEFINED(_StackSize) ? _StackSize : 0x1000; + +SECTIONS +{ + /* 0x80000000 - 0x8001FFFF used by PMON (with 0x80010000 - 0x8001FFFF as heap for symbol storage) + 0x80020000 - 0x8002FFFF reserved for shared memory + 0x80030000 beginning of text (code) section + */ + .text 0x80030000 : + { + _ftext = . ; + eprol = .; + *(.text) + *(.text.*) + *(.gnu.linkonce.t*) + *(.mips16.fn.*) + *(.mips16.call.*) + PROVIDE (__runtime_reloc_start = .); + *(.rel.sdata) + PROVIDE (__runtime_reloc_stop = .); + + /* + * Special FreeBSD sysctl sections. + */ + . = ALIGN (16); + __start_set_sysctl_set = .; + *(set_sysctl_*); + __stop_set_sysctl_set = ABSOLUTE(.); + *(set_domain_*); + *(set_pseudo_*); + + *(.gcc_except_table) + *(.eh_frame_hdr) + *(.eh_frame) + } + + .init : + { + KEEP(*crti.o(.init)) + KEEP(*(.init)) + KEEP(*crtn.o(.init)) + } + + .fini : + { + KEEP(*crti.o(.fini)) + KEEP(*(.fini)) + KEEP(*crtn.o(.fini)) + } + + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + + KEEP (*crtbegin.o(.ctors)) + + /* We don't want to include the .ctor section from + from the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + + KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + + etext = .; + _etext = .; + } + + .rdata : { + *(.rdata) + *(.rodata) + *(.rodata.*) + *(.gnu.linkonce.r*) + } + _fdata = ALIGN(16); + + .data : { + *(.data) + *(.data.*) + *(.gnu.linkonce.d*) + SORT(CONSTRUCTORS) + } + . = ALIGN(8); + + .jcr : { + KEEP (*(.jcr)) + } + + _gp = ALIGN(16) + 0x8000; + __global = _gp; + + .sdata : { + *(.sdata) + *(.sdata.*) + *(.gnu.linkonce.s*) + } + .lit8 : { + *(.lit8) + } + .lit4 : { + *(.lit4) + } + + edata = .; + _edata = .; + _fbss = .; + + .sbss : { + *(.sbss) + *(.scommon) + } + .bss : { + _bss_start = . ; + *(.bss) + *(.reginfo) + *(COMMON) + . = ALIGN (64); + _stack_limit = .; + . += _StackSize; + __stack = .; + _stack_init = .; + end = .; + _end = .; + } + + + /* Debug sections. These should never be loadable, but they must have + zero addresses for the debuggers to work correctly. */ + .line 0 : { *(.line) } + .debug 0 : { *(.debug) } + .debug_sfnames 0 : { *(.debug_sfnames) } + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_macinfo 0 : { *(.debug_macinfo) } + .debug_pubnames 0 : { *(.debug_pubnames) } + .debug_aranges 0 : { *(.debug_aranges) } +} |