summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libbsp/mips
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2010-05-13 20:51:39 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2010-05-13 20:51:39 +0000
commita690c003e0f103041f49c065243d651821897605 (patch)
tree4753b371c8f1254c367a43f7a4ceebe7ff827a41 /c/src/lib/libbsp/mips
parent2010-05-13 Joel Sherrill <joel.sherrill@oarcorp.com> (diff)
downloadrtems-a690c003e0f103041f49c065243d651821897605.tar.bz2
2010-05-13 Joel Sherrill <joel.sherrill@oarcorp.com>
* shared/liblnk/lnklib.S, shared/liblnk/pmon.S, shared/liblnk/regs.h, shared/startup/idtmem.S, shared/startup/idttlb.S: New files.
Diffstat (limited to 'c/src/lib/libbsp/mips')
-rw-r--r--c/src/lib/libbsp/mips/ChangeLog5
-rw-r--r--c/src/lib/libbsp/mips/shared/liblnk/lnklib.S62
-rw-r--r--c/src/lib/libbsp/mips/shared/liblnk/pmon.S180
-rw-r--r--c/src/lib/libbsp/mips/shared/liblnk/regs.h137
-rw-r--r--c/src/lib/libbsp/mips/shared/startup/idtmem.S922
-rw-r--r--c/src/lib/libbsp/mips/shared/startup/idttlb.S390
6 files changed, 1696 insertions, 0 deletions
diff --git a/c/src/lib/libbsp/mips/ChangeLog b/c/src/lib/libbsp/mips/ChangeLog
index 8970a093ea..8a6b78b311 100644
--- a/c/src/lib/libbsp/mips/ChangeLog
+++ b/c/src/lib/libbsp/mips/ChangeLog
@@ -1,5 +1,10 @@
2010-05-13 Joel Sherrill <joel.sherrill@oarcorp.com>
+ * shared/liblnk/lnklib.S, shared/liblnk/pmon.S, shared/liblnk/regs.h,
+ shared/startup/idtmem.S, shared/startup/idttlb.S: New files.
+
+2010-05-13 Joel Sherrill <joel.sherrill@oarcorp.com>
+
* shared/irq/exception.S: rbtx4925, rbtx4938, and hurricane had very
similar versions of exception. Now all use shared/irq/exception.S
diff --git a/c/src/lib/libbsp/mips/shared/liblnk/lnklib.S b/c/src/lib/libbsp/mips/shared/liblnk/lnklib.S
new file mode 100644
index 0000000000..0800e25078
--- /dev/null
+++ b/c/src/lib/libbsp/mips/shared/liblnk/lnklib.S
@@ -0,0 +1,62 @@
+/*
+ * lnklib.S,v 1.4 1999/03/31 23:21:19 joel Exp
+ */
+
+#include <rtems/mips/iregdef.h>
+#include <rtems/mips/idtcpu.h>
+
+#define FRAME(name,frm_reg,offset,ret_reg) \
+ .globl name; \
+ .ent name; \
+name:; \
+ .frame frm_reg,offset,ret_reg
+
+#define ENDFRAME(name) \
+ .end name
+
+#define PROM_LINK(name,entry) \
+ .globl name; \
+ .ent name; \
+name: la $2,+entry; \
+ j $2; \
+ .end name
+
+#define PROM_ENTRY(x) (0xbfc00000+((x)*8))
+
+#define PROM_RESET PROM_ENTRY(0)
+#define PROM_NOT_IMP PROM_ENTRY(1)
+#define PROM_RESTART PROM_ENTRY(2)
+#define PROM_REINIT PROM_ENTRY(3)
+#define PROM_GETCHAR PROM_ENTRY(11)
+#define PROM_PUTCHAR PROM_ENTRY(12)
+#define PROM_SHOWCHAR PROM_ENTRY(13)
+#define PROM_PRINTF PROM_ENTRY(16)
+#define PROM_RETURN PROM_ENTRY(17)
+
+#define PROM_RGETS PROM_ENTRY(25)
+#define PROM_FLUSHCACHE PROM_ENTRY(28)
+#define PROM_CLEARCACHE PROM_ENTRY(29)
+#define PROM_SETJMP PROM_ENTRY(30)
+#define PROM_LONGJMP PROM_ENTRY(31)
+
+ .text
+
+PROM_LINK(idtsim_putchar, PROM_PUTCHAR)
+PROM_LINK(idtsim_getchar, PROM_GETCHAR)
+PROM_LINK(idtsim_showchar, PROM_SHOWCHAR)
+PROM_LINK(idtsim__exit, PROM_RETURN)
+PROM_LINK(idtsim_reinit, PROM_REINIT)
+PROM_LINK(idtsim_restart, PROM_RESTART)
+PROM_LINK(idtsim_reset, PROM_RESET)
+PROM_LINK(idtsim_promexit, PROM_RETURN)
+PROM_LINK(idtsim_setjmp, PROM_SETJMP)
+PROM_LINK(idtsim_longjmp, PROM_LONGJMP)
+
+FRAME(idtsim_init_sbrk,sp,0,ra)
+ j ra
+ENDFRAME(idtsim_init_sbrk)
+
+FRAME(idtsim_init_file,sp,0,ra)
+ j ra
+ENDFRAME(idtsim_init_file)
+
diff --git a/c/src/lib/libbsp/mips/shared/liblnk/pmon.S b/c/src/lib/libbsp/mips/shared/liblnk/pmon.S
new file mode 100644
index 0000000000..206cfe9a57
--- /dev/null
+++ b/c/src/lib/libbsp/mips/shared/liblnk/pmon.S
@@ -0,0 +1,180 @@
+/*
+ * pmon.S -- low-level entry points into PMON monitor.
+ *
+ * Copyright (c) 1996, 1997 Cygnus Support
+ *
+ * The authors hereby grant permission to use, copy, modify, distribute,
+ * and license this software and its documentation for any purpose, provided
+ * that existing copyright notices are retained in all copies and that this
+ * notice is included verbatim in any distributions. No written agreement,
+ * license, or royalty fee is required for any of the authorized uses.
+ * Modifications to this software may be copyrighted by their authors
+ * and need not follow the licensing terms described here, provided that
+ * the new terms are clearly indicated on the first page of each file where
+ * they apply.
+ */
+
+#ifdef __mips16
+/* This file contains 32 bit assembly code. */
+ .set nomips16
+#endif
+
+#if __mips < 3
+ /* This machine does not support 64-bit operations. */
+ #define ADDU addu
+ #define SUBU subu
+#else
+ /* This machine supports 64-bit operations. */
+ #define ADDU daddu
+ #define SUBU dsubu
+#endif
+
+#include <bsp/regs.h>
+
+ .text
+ .align 2
+
+#ifdef LSI
+ #define PMON_VECTOR 0xbfc00200
+#else
+ #define PMON_VECTOR 0xbfc00500
+#endif
+
+#ifndef __mips_eabi
+/* Provide named functions for entry into the monitor: */
+#define INDIRECT(name,index) \
+ .globl name; \
+ .ent name; \
+ .set noreorder; \
+name: la $2,+(PMON_VECTOR+((index)*4)); \
+ lw $2,0($2); \
+ j $2; \
+ nop; \
+ .set reorder; \
+ .end name
+
+#else
+#define INDIRECT(name,index) \
+ .globl name; \
+ .ent name; \
+ .set noreorder; \
+name: la $2,+(PMON_VECTOR+((index)*4)); \
+ lw $2,0($2); \
+ SUBU sp,sp,0x40; \
+ sd ra,0x38(sp); \
+ sd fp,0x30(sp); \
+ jal $2; \
+ move fp,sp; \
+ ld ra,0x38(sp); \
+ ld fp,0x30(sp); \
+ j ra; \
+ ADDU sp,sp,0x40; \
+ .set reorder; \
+ .end name
+#endif
+
+
+/* The following magic numbers are for the slots into the PMON monitor */
+/* The first are used as the lo-level library run-time: */
+INDIRECT(mon_read,0)
+INDIRECT(mon_write,1)
+INDIRECT(mon_open,2)
+INDIRECT(mon_close,3)
+/* The following are useful monitor routines: */
+INDIRECT(mon_ioctl,4)
+INDIRECT(mon_printf,5)
+INDIRECT(mon_vsprintf,6)
+INDIRECT(mon_ttctl,7)
+INDIRECT(mon_cliexit,8)
+INDIRECT(mon_getenv,9)
+INDIRECT(mon_onintr,10)
+INDIRECT(mon_flush_cache,11)
+INDIRECT(mon_exception,12)
+INDIRECT(mon_fpgaconfig,21)
+
+#if 0
+
+/* The following routine is required by the "print()" function: */
+ .globl pmon_outbyte
+ .ent pmon_outbyte
+ .set noreorder
+pmon_outbyte:
+ subu sp,sp,0x20 /* allocate stack space for string */
+ sd ra,0x18(sp) /* stack return address */
+ sd fp,0x10(sp) /* stack frame-pointer */
+ move fp,sp /* take a copy of the stack pointer */
+ /* We leave so much space on the stack for the string (16
+ characters), since the call to mon_printf seems to corrupt
+ the 8bytes at offset 8 into the string/stack. */
+ sb a0,0x00(sp) /* character to print */
+ sb z0,0x01(sp) /* NUL terminator */
+ jal mon_printf /* and output the string */
+ move a0,sp /* take a copy of the string pointer {DELAY SLOT} */
+
+ move sp,fp /* recover stack pointer */
+ ld ra,0x18(sp) /* recover return address */
+ ld fp,0x10(sp) /* recover frame-pointer */
+ j ra /* return to the caller */
+ addu sp,sp,0x20 /* dump the stack space {DELAY SLOT} */
+ .set reorder
+ .end pmon_outbyte
+
+/* The following routine is required by the "sbrk()" function: */
+ .globl get_mem_info
+ .ent get_mem_info
+ .set noreorder
+get_mem_info:
+ # in: a0 = pointer to 3 word structure
+ # out: void
+ subu sp,sp,0x18 /* create some stack space */
+ sd ra,0x00(sp) /* stack return address */
+ sd fp,0x08(sp) /* stack frame-pointer */
+ sd a0,0x10(sp) /* stack structure pointer */
+ move fp,sp /* take a copy of the stack pointer */
+
+ # The monitor has already sized memory, but unfortunately we
+ # do not have access to the data location containing the
+ # memory size.
+
+ jal __sizemem
+ nop
+
+ ld a0,0x10(sp) # recover structure pointer
+ sw v0,0(a0) # amount of memory available
+
+ # Deal with getting the cache size information:
+ mfc0 a1, C0_CONFIG
+ nop
+ nop
+ andi a2,a1,0x7 << 9 # bits 11..9 for instruction cache size
+ sll a2,a2,12 - 8
+ sw a2,4(a0)
+ andi a2,a1,0x7 << 6 # bits 8..6 for data cache size
+ sll a2,a2,12 - 5
+ sw a2,8(a0) # data cache size
+ #
+ move sp,fp /* recover stack pointer */
+ ld ra,0x00(sp) /* recover return address */
+ ld fp,0x08(sp) /* recover frame-pointer */
+ j ra /* return to the caller */
+ addu sp,sp,0x18 /* restore stack pointer {DELAY SLOT} */
+ .set reorder
+ .end get_mem_info
+
+#ifdef LSI
+
+# For the LSI MiniRISC board, we can safely assume that we have
+# at least one megabyte of RAM.
+
+ .globl __sizemem
+ .ent __sizemem
+__sizemem:
+ li v0,0x100000
+ j ra
+ .end __sizemem
+#else
+
+#endif
+
+#endif
+/* EOF pmon.S */
diff --git a/c/src/lib/libbsp/mips/shared/liblnk/regs.h b/c/src/lib/libbsp/mips/shared/liblnk/regs.h
new file mode 100644
index 0000000000..1befa8dc79
--- /dev/null
+++ b/c/src/lib/libbsp/mips/shared/liblnk/regs.h
@@ -0,0 +1,137 @@
+/*
+ * regs.S -- standard MIPS register names.
+ *
+ * Copyright (c) 1995 Cygnus Support
+ *
+ * The authors hereby grant permission to use, copy, modify, distribute,
+ * and license this software and its documentation for any purpose, provided
+ * that existing copyright notices are retained in all copies and that this
+ * notice is included verbatim in any distributions. No written agreement,
+ * license, or royalty fee is required for any of the authorized uses.
+ * Modifications to this software may be copyrighted by their authors
+ * and need not follow the licensing terms described here, provided that
+ * the new terms are clearly indicated on the first page of each file where
+ * they apply.
+ */
+
+/* Standard MIPS register names: */
+#define zero $0
+#define z0 $0
+#define v0 $2
+#define v1 $3
+#define a0 $4
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+#define s0 $16
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24
+#define t9 $25
+#define k0 $26 /* kernel private register 0 */
+#define k1 $27 /* kernel private register 1 */
+#define gp $28 /* global data pointer */
+#define sp $29 /* stack-pointer */
+#define fp $30 /* frame-pointer */
+#define ra $31 /* return address */
+#define pc $pc /* pc, used on mips16 */
+
+#define fp0 $f0
+#define fp1 $f1
+
+/* Useful memory constants: */
+#define K0BASE 0x80000000
+#ifndef __mips64
+#define K1BASE 0xA0000000
+#else
+#define K1BASE 0xFFFFFFFFA0000000LL
+#endif
+
+#define PHYS_TO_K1(a) ((unsigned)(a) | K1BASE)
+
+/* Standard Co-Processor 0 register numbers: */
+#define C0_COUNT $9 /* Count Register */
+#define C0_SR $12 /* Status Register */
+#define C0_CAUSE $13 /* last exception description */
+#define C0_EPC $14 /* Exception error address */
+#define C0_CONFIG $16 /* CPU configuration */
+
+/* Standard Status Register bitmasks: */
+#define SR_CU1 0x20000000 /* Mark CP1 as usable */
+#define SR_FR 0x04000000 /* Enable MIPS III FP registers */
+#define SR_BEV 0x00400000 /* Controls location of exception vectors */
+#define SR_PE 0x00100000 /* Mark soft reset (clear parity error) */
+
+#define SR_KX 0x00000080 /* Kernel extended addressing enabled */
+#define SR_SX 0x00000040 /* Supervisor extended addressing enabled */
+#define SR_UX 0x00000020 /* User extended addressing enabled */
+
+/* Standard (R4000) cache operations. Taken from "MIPS R4000
+ Microprocessor User's Manual" 2nd edition: */
+
+#define CACHE_I (0) /* primary instruction */
+#define CACHE_D (1) /* primary data */
+#define CACHE_SI (2) /* secondary instruction */
+#define CACHE_SD (3) /* secondary data (or combined instruction/data) */
+
+#define INDEX_INVALIDATE (0) /* also encodes WRITEBACK if CACHE_D or CACHE_SD */
+#define INDEX_LOAD_TAG (1)
+#define INDEX_STORE_TAG (2)
+#define CREATE_DIRTY_EXCLUSIVE (3) /* CACHE_D and CACHE_SD only */
+#define HIT_INVALIDATE (4)
+#define CACHE_FILL (5) /* CACHE_I only */
+#define HIT_WRITEBACK_INVALIDATE (5) /* CACHE_D and CACHE_SD only */
+#define HIT_WRITEBACK (6) /* CACHE_I, CACHE_D and CACHE_SD only */
+#define HIT_SET_VIRTUAL (7) /* CACHE_SI and CACHE_SD only */
+
+#define BUILD_CACHE_OP(o,c) (((o) << 2) | (c))
+
+/* Individual cache operations: */
+#define INDEX_INVALIDATE_I BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_I)
+#define INDEX_WRITEBACK_INVALIDATE_D BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_D)
+#define INDEX_INVALIDATE_SI BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_SI)
+#define INDEX_WRITEBACK_INVALIDATE_SD BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_SD)
+
+#define INDEX_LOAD_TAG_I BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_I)
+#define INDEX_LOAD_TAG_D BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_D)
+#define INDEX_LOAD_TAG_SI BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_SI)
+#define INDEX_LOAD_TAG_SD BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_SD)
+
+#define INDEX_STORE_TAG_I BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_I)
+#define INDEX_STORE_TAG_D BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_D)
+#define INDEX_STORE_TAG_SI BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_SI)
+#define INDEX_STORE_TAG_SD BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_SD)
+
+#define CREATE_DIRTY_EXCLUSIVE_D BUILD_CACHE_OP(CREATE_DIRTY_EXCLUSIVE,CACHE_D)
+#define CREATE_DIRTY_EXCLUSIVE_SD BUILD_CACHE_OP(CREATE_DIRTY_EXCLUSIVE,CACHE_SD)
+
+#define HIT_INVALIDATE_I BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_I)
+#define HIT_INVALIDATE_D BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_D)
+#define HIT_INVALIDATE_SI BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_SI)
+#define HIT_INVALIDATE_SD BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_SD)
+
+#define CACHE_FILL_I BUILD_CACHE_OP(CACHE_FILL,CACHE_I)
+#define HIT_WRITEBACK_INVALIDATE_D BUILD_CACHE_OP(HIT_WRITEBACK_INVALIDATE,CACHE_D)
+#define HIT_WRITEBACK_INVALIDATE_SD BUILD_CACHE_OP(HIT_WRITEBACK_INVALIDATE,CACHE_SD)
+
+#define HIT_WRITEBACK_I BUILD_CACHE_OP(HIT_WRITEBACK,CACHE_I)
+#define HIT_WRITEBACK_D BUILD_CACHE_OP(HIT_WRITEBACK,CACHE_D)
+#define HIT_WRITEBACK_SD BUILD_CACHE_OP(HIT_WRITEBACK,CACHE_SD)
+
+#define HIT_SET_VIRTUAL_SI BUILD_CACHE_OP(HIT_SET_VIRTUAL,CACHE_SI)
+#define HIT_SET_VIRTUAL_SD BUILD_CACHE_OP(HIT_SET_VIRTUAL,CACHE_SD)
+
+/*> EOF regs.S <*/
diff --git a/c/src/lib/libbsp/mips/shared/startup/idtmem.S b/c/src/lib/libbsp/mips/shared/startup/idtmem.S
new file mode 100644
index 0000000000..4dde9b5716
--- /dev/null
+++ b/c/src/lib/libbsp/mips/shared/startup/idtmem.S
@@ -0,0 +1,922 @@
+/*
+
+Based upon IDT provided code with the following release:
+
+This source code has been made available to you by IDT on an AS-IS
+basis. Anyone receiving this source is licensed under IDT copyrights
+to use it in any way he or she deems fit, including copying it,
+modifying it, compiling it, and redistributing it either with or
+without modifications. No license under IDT patents or patent
+applications is to be implied by the copyright license.
+
+Any user of this software should understand that IDT cannot provide
+technical support for this software and will not be responsible for
+any consequences resulting from the use of this software.
+
+Any person who transfers this source code or any derivative work must
+include the IDT copyright notice, this paragraph, and the preceeding
+two paragraphs in the transferred software.
+
+COPYRIGHT IDT CORPORATION 1996
+LICENSED MATERIAL - PROGRAM PROPERTY OF IDT
+
+ $Id$
+
+*/
+
+/************************************************************************
+**
+** idtmem.s - memory and cache functions
+**
+** Copyright 1991 Integrated Device Technology, Inc.
+** All Rights Reserved
+**
+**************************************************************************/
+
+/*
+ * 950313: Ketan fixed bugs in mfc0/mtc0 hazards, and removed hack
+ * to set mem_size.
+ */
+
+#include <rtems/mips/iregdef.h>
+#include <rtems/mips/idtcpu.h>
+#include <rtems/asm.h>
+
+ .data
+mem_size:
+ .word 0
+dcache_size:
+ .word 0
+icache_size:
+#if __mips == 1
+ .word MINCACHE
+#endif
+#if __mips == 3
+ .word 0
+#endif
+
+#if __mips == 3
+ .data
+scache_size:
+ .word 0
+icache_linesize:
+ .word 0
+dcache_linesize:
+ .word 0
+scache_linesize:
+ .word 0
+#endif
+
+ .text
+
+#if __mips == 1
+#define CONFIGFRM ((2*4)+4)
+
+/*************************************************************************
+**
+** Config_Dcache() -- determine size of Data cache
+**
+**************************************************************************/
+
+FRAME(config_Dcache,sp, CONFIGFRM, ra)
+ .set noreorder
+ subu sp,CONFIGFRM
+ sw ra,CONFIGFRM-4(sp) /* save return address */
+ sw s0,4*4(sp) /* save s0 in first regsave slot */
+ mfc0 s0,C0_SR /* save SR */
+ nop
+ mtc0 zero,C0_SR /* disable interrupts */
+ .set reorder
+ jal _size_cache /* returns Data cache size in v0 */
+ sw v0, dcache_size /* save it */
+ and s0, ~SR_PE /* do not clear PE */
+ .set noreorder
+ mtc0 s0,C0_SR /* restore SR */
+ nop
+ .set reorder
+ lw s0, 4*4(sp) /* restore s0 */
+ lw ra,CONFIGFRM-4(sp) /* restore ra */
+ addu sp,CONFIGFRM /* pop stack */
+ j ra
+ENDFRAME(config_Dcache)
+
+/*************************************************************************
+**
+** Config_Icache() -- determine size of Instruction cache
+** MUST be run in uncached mode/handled in idt_csu.s
+**
+**************************************************************************/
+
+FRAME(config_Icache,sp, CONFIGFRM, ra)
+ .set noreorder
+ subu sp,CONFIGFRM
+ sw ra,CONFIGFRM-4(sp) /* save return address */
+ sw s0,4*4(sp) /* save s0 in first regsave slot */
+ mfc0 s0,C0_SR /* save SR */
+ nop
+ mtc0 zero, C0_SR /* disable interrupts */
+ li v0,SR_SWC /* swap caches/disable ints */
+ mtc0 v0,C0_SR
+ nop
+ .set reorder
+ jal _size_cache /* returns instruction cache size */
+ .set noreorder
+ mtc0 zero,C0_SR /* swap back caches */
+ nop
+ and s0,~SR_PE /* do not inadvertantly clear PE */
+ mtc0 s0,C0_SR /* restore SR */
+ nop
+ .set reorder
+ sw v0, icache_size /* save it AFTER caches back */
+ lw s0,4*4(sp) /* restore s0 */
+ lw ra,CONFIGFRM-4(sp) /* restore ra */
+ addu sp,CONFIGFRM /* pop stack */
+ j ra
+ENDFRAME(config_Icache)
+
+/************************************************************************
+**
+** _size_cache()
+** returns cache size in v0
+**
+************************************************************************/
+
+FRAME(_size_cache,sp,0,ra)
+ .set noreorder
+ mfc0 t0,C0_SR /* save current sr */
+ nop
+ and t0,~SR_PE /* do not inadvertently clear PE */
+ or v0,t0,SR_ISC /* isolate cache */
+ mtc0 v0,C0_SR
+ /*
+ * First check if there is a cache there at all
+ */
+ move v0,zero
+ li v1,0xa5a5a5a5 /* distinctive pattern */
+ sw v1,K0BASE /* try to write into cache */
+ lw t1,K0BASE /* try to read from cache */
+ nop
+ mfc0 t2,C0_SR
+ nop
+ .set reorder
+ and t2,SR_CM
+ bne t2,zero,3f /* cache miss, must be no cache */
+ bne v1,t1,3f /* data not equal -> no cache */
+ /*
+ * Clear cache size boundries to known state.
+ */
+ li v0,MINCACHE
+1:
+ sw zero,K0BASE(v0)
+ sll v0,1
+ ble v0,MAXCACHE,1b
+
+ li v0,-1
+ sw v0,K0BASE(zero) /* store marker in cache */
+ li v0,MINCACHE /* MIN cache size */
+
+2: lw v1,K0BASE(v0) /* Look for marker */
+ bne v1,zero,3f /* found marker */
+ sll v0,1 /* cache size * 2 */
+ ble v0,MAXCACHE,2b /* keep looking */
+ move v0,zero /* must be no cache */
+ .set noreorder
+3: mtc0 t0,C0_SR /* restore sr */
+ j ra
+ nop
+ENDFRAME(_size_cache)
+ .set reorder
+
+#define FLUSHFRM (2*4)
+
+/***************************************************************************
+**
+** flush_Dcache() - flush entire Data cache
+**
+****************************************************************************/
+FRAME(flush_Dcache,sp,FLUSHFRM,ra)
+ lw t2, dcache_size
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ beq t2,zero,_Dflush_done /* no D cache, get out! */
+ nop
+ li v0, SR_ISC /* isolate cache */
+ mtc0 v0, C0_SR
+ nop
+ .set reorder
+ li t0,K0BASE /* set loop registers */
+ or t1,t0,t2
+
+2: sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bne t0,t1,2b
+
+ .set noreorder
+_Dflush_done:
+ mtc0 t3,C0_SR /* restore Status Register */
+ .set reorder
+ j ra
+ENDFRAME(flush_Dcache)
+
+/***************************************************************************
+**
+** flush_Icache() - flush entire Instruction cache
+**
+** NOTE: Icache can only be flushed/cleared when uncached
+** Code forces into uncached memory regardless of calling mode
+**
+****************************************************************************/
+FRAME(flush_Icache,sp,FLUSHFRM,ra)
+ lw t1,icache_size
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ la v0,1f
+ li v1,K1BASE
+ or v0,v1
+ j v0 /* force into non-cached space */
+ nop
+1:
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ beq t1,zero,_Iflush_done /* no i-cache get out */
+ nop
+ li v0,SR_ISC|SR_SWC /* disable intr, isolate and swap */
+ mtc0 v0,C0_SR
+ li t0,K0BASE
+ .set reorder
+ or t1,t0,t1
+
+1: sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bne t0,t1,1b
+ .set noreorder
+_Iflush_done:
+ mtc0 t3,C0_SR /* un-isolate, enable interrupts */
+ .set reorder
+ j ra
+ENDFRAME(flush_Icache)
+
+/**************************************************************************
+**
+** clear_Dcache(base_addr, byte_count) - flush portion of Data cache
+**
+** a0 = base address of portion to be cleared
+** a1 = byte count of length
+**
+***************************************************************************/
+FRAME(clear_Dcache,sp,0,ra)
+
+ lw t2, dcache_size /* Data cache size */
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ nop
+ nop
+ .set reorder
+ /*
+ * flush data cache
+ */
+
+ .set noreorder
+ nop
+ li v0,SR_ISC /* isolate data cache */
+ mtc0 v0,C0_SR
+ .set reorder
+ bltu t2,a1,1f /* cache is smaller than region */
+ move t2,a1
+1: addu t2,a0 /* ending address + 1 */
+ move t0,a0
+
+1: sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bltu t0,t2,1b
+
+ .set noreorder
+ mtc0 t3,C0_SR /* un-isolate, enable interrupts */
+ nop
+ .set reorder
+ j ra
+ENDFRAME(clear_Dcache)
+
+/**************************************************************************
+**
+** clear_Icache(base_addr, byte_count) - flush portion of Instruction cache
+**
+** a0 = base address of portion to be cleared
+** a1 = byte count of length
+**
+** NOTE: Icache can only be flushed/cleared when uncached
+** Code forces into uncached memory regardless of calling mode
+**
+***************************************************************************/
+FRAME(clear_Icache,sp,0,ra)
+
+ lw t1, icache_size /* Instruction cache size */
+ /*
+ * flush text cache
+ */
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ la v0,1f
+ li v1,K1BASE
+ or v0,v1
+ j v0 /* force into non-cached space */
+ nop
+1:
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ nop
+ nop
+ li v0,SR_ISC|SR_SWC /* disable intr, isolate and swap */
+ mtc0 v0,C0_SR
+ .set reorder
+ bltu t1,a1,1f /* cache is smaller than region */
+ move t1,a1
+1: addu t1,a0 /* ending address + 1 */
+ move t0,a0
+
+ sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bltu t0,t1,1b
+ .set noreorder
+ mtc0 t3,C0_SR /* un-isolate, enable interrupts */
+ nop
+ nop
+ nop /* allow time for caches to swap */
+ .set reorder
+ j ra
+ENDFRAME(clear_Icache)
+
+/**************************************************************************
+**
+** get_mem_conf - get memory configuration
+**
+***************************************************************************/
+
+FRAME(get_mem_conf,sp,0,ra)
+
+ lw t6, mem_size
+ sw t6, 0(a0)
+ lw t7, icache_size
+ sw t7, 4(a0)
+ lw t8, dcache_size
+ sw t8, 8(a0)
+ j ra
+
+ENDFRAME(get_mem_conf)
+#endif /* __mips == 1 */
+
+#if __mips == 3
+#define LEAF(label) FRAME(label,sp,0,ra)
+#define XLEAF(label) \
+ .globl label ; \
+label:
+
+/*
+ * cacheop macro to automate cache operations
+ * first some helpers...
+ */
+#define _mincache(size, maxsize) \
+ bltu size,maxsize,8f ; \
+ move size,maxsize ; \
+8:
+
+#define _align(tmp, minaddr, maxaddr, linesize) \
+ subu tmp,linesize,1 ; \
+ not tmp ; \
+ and minaddr,tmp ; \
+ addu maxaddr,-1 ; \
+ and maxaddr,tmp
+
+/* This is a bit of a hack really because it relies on minaddr=a0 */
+#define _doop1(op1) \
+ cache op1,0(a0)
+
+#define _doop2(op1, op2) \
+ cache op1,0(a0) ; \
+ cache op2,0(a0)
+
+/* specials for cache initialisation */
+#define _doop1lw1(op1) \
+ cache op1,0(a0) ; \
+ lw zero,0(a0) ; \
+ cache op1,0(a0)
+
+#define _doop121(op1,op2) \
+ cache op1,0(a0) ; \
+ nop; \
+ cache op2,0(a0) ; \
+ nop; \
+ cache op1,0(a0)
+
+#define _oploopn(minaddr, maxaddr, linesize, tag, ops) \
+ .set noreorder ; \
+7: _doop##tag##ops ; \
+ bne minaddr,maxaddr,7b ; \
+ addu minaddr,linesize ; \
+ .set reorder
+
+/* finally the cache operation macros */
+#define icacheopn(kva, n, cache_size, cache_linesize, tag, ops) \
+ _mincache(n, cache_size); \
+ blez n,9f ; \
+ addu n,kva ; \
+ _align(t1, kva, n, cache_linesize) ; \
+ _oploopn(kva, n, cache_linesize, tag, ops) ; \
+9:
+
+#define vcacheopn(kva, n, cache_size, cache_linesize, tag, ops) \
+ blez n,9f ; \
+ addu n,kva ; \
+ _align(t1, kva, n, cache_linesize) ; \
+ _oploopn(kva, n, cache_linesize, tag, ops) ; \
+9:
+
+#define icacheop(kva, n, cache_size, cache_linesize, op) \
+ icacheopn(kva, n, cache_size, cache_linesize, 1, (op))
+
+#define vcacheop(kva, n, cache_size, cache_linesize, op) \
+ vcacheopn(kva, n, cache_size, cache_linesize, 1, (op))
+
+ .text
+
+/*
+ * static void _size_cache() R4000
+ *
+ * Internal routine to determine cache sizes by looking at R4000 config
+ * register. Sizes are returned in registers, as follows:
+ * t2 icache size
+ * t3 dcache size
+ * t6 scache size
+ * t4 icache line size
+ * t5 dcache line size
+ * t7 scache line size
+ */
+LEAF(_size_cache)
+ mfc0 t0,C0_CONFIG
+
+ and t1,t0,CFG_ICMASK
+ srl t1,CFG_ICSHIFT
+ li t2,0x1000
+ sll t2,t1
+
+ and t1,t0,CFG_DCMASK
+ srl t1,CFG_DCSHIFT
+ li t3,0x1000
+ sll t3,t1
+
+ li t4,32
+ and t1,t0,CFG_IB
+ bnez t1,1f
+ li t4,16
+1:
+
+ li t5,32
+ and t1,t0,CFG_DB
+ bnez t1,1f
+ li t5,16
+1:
+
+ move t6,zero # default to no scache
+ move t7,zero #
+
+ and t1,t0,CFG_C_UNCACHED # test config register
+ bnez t1,1f # no scache if uncached/non-coherent
+
+ li t6,0x100000 # assume 1Mb scache <<-NOTE
+ and t1,t0,CFG_SBMASK
+ srl t1,CFG_SBSHIFT
+ li t7,16
+ sll t7,t1
+1: j ra
+ENDFRAME(_size_cache)
+
+/*
+ * void config_cache() R4000
+ *
+ * Work out size of I, D & S caches, assuming they are already initialised.
+ */
+LEAF(config_cache)
+ lw t0,icache_size
+ bgtz t0,8f # already known?
+ move v0,ra
+ bal _size_cache
+ move ra,v0
+
+ sw t2,icache_size
+ sw t3,dcache_size
+ sw t6,scache_size
+ sw t4,icache_linesize
+ sw t5,dcache_linesize
+ sw t7,scache_linesize
+8: j ra
+ENDFRAME(config_cache)
+
+/*
+ * void _init_cache() R4000
+ */
+LEAF(_init_cache)
+ /*
+ * First work out the sizes
+ */
+ move v0,ra
+ bal _size_cache
+ move ra,v0
+
+ /*
+ * The caches may be in an indeterminate state,
+ * so we force good parity into them by doing an
+ * invalidate, load/fill, invalidate for each line.
+ */
+
+ /* disable all i/u and cache exceptions */
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ or v1,SR_DE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ /* assume bottom of RAM will generate good parity for the cache */
+ li a0,PHYS_TO_K0(0)
+ move a2,t2 # icache_size
+ move a3,t4 # icache_linesize
+ move a1,a2
+ icacheopn(a0,a1,a2,a3,121,(Index_Store_Tag_I,Fill_I))
+
+ li a0,PHYS_TO_K0(0)
+ move a2,t3 # dcache_size
+ move a3,t5 # dcache_linesize
+ move a1,a2
+ icacheopn(a0,a1,a2,a3,1lw1,(Index_Store_Tag_D))
+
+ /* assume unified I & D in scache <<-NOTE */
+ blez t6,1f
+ li a0,PHYS_TO_K0(0)
+ move a2,t6
+ move a3,t7
+ move a1,a2
+ icacheopn(a0,a1,a2,a3,1lw1,(Index_Store_Tag_SD))
+
+1: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(_init_cache)
+
+/*
+ * void flush_cache (void) R4000
+ *
+ * Flush and invalidate all caches
+ */
+LEAF(flush_cache)
+ /* secondary cacheops do all the work if present */
+ lw a2,scache_size
+ blez a2,1f
+ lw a3,scache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_SD)
+ b 2f
+
+1:
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+ lw a2,dcache_size
+ lw a3,dcache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(flush_cache)
+
+/*
+ * void flush_cache_nowrite (void) R4000
+ *
+ * Invalidate all caches
+ */
+LEAF(flush_cache_nowrite)
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+ lw a2,dcache_size
+ lw a3,dcache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_D)
+
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_SD)
+
+2: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(flush_cache_nowrite)
+
+/*
+ * void clean_cache (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate address range in all caches
+ */
+LEAF(clean_cache)
+XLEAF(clear_cache)
+
+ /* secondary cacheops do all the work (if fitted) */
+ lw a2,scache_size
+ blez a2,1f
+ lw a3,scache_linesize
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_SD)
+ b 2f
+
+1: lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+ /* save kva & n for subsequent loop */
+ move t8,a0
+ move t9,a1
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_I)
+
+ lw a2,dcache_size
+ lw a3,dcache_linesize
+ /* restore kva & n */
+ move a0,t8
+ move a1,t9
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(clean_cache)
+
+/*
+ * void clean_dcache (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate address range in primary data cache
+ */
+LEAF(clean_dcache)
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(clean_dcache)
+
+/*
+ * void clean_dcache_indexed (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate indexed range in primary data cache
+ */
+LEAF(clean_dcache_indexed)
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+#ifdef CPU_ORION
+ srl a2,1 # do one set (half cache) at a time
+ move t8,a0 # save kva & n
+ move t9,a1
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D)
+
+ addu a0,t8,a2 # do next set
+ move a1,t9 # restore n
+#endif
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(clean_dcache_indexed)
+
+/*
+ * void clean_dcache_nowrite (unsigned kva, size_t n) R4000
+ *
+ * Invalidate an address range in primary data cache
+ */
+LEAF(clean_dcache_nowrite)
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_D)
+
+2: j ra
+ENDFRAME(clean_dcache_nowrite)
+
+/*
+ * void clean_dcache_nowrite_indexed (unsigned kva, size_t n) R4000
+ *
+ * Invalidate indexed range in primary data cache
+ */
+LEAF(clean_dcache_nowrite_indexed)
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+#ifdef CPU_ORION
+ srl a2,1 # do one set (half cache) at a time
+ move t8,a0 # save kva & n
+ move t9,a1
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_D)
+
+ addu a0,t8,a2 # do next set
+ move a1,t9 # restore n
+#endif
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_D)
+
+2: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(clean_dcache_nowrite_indexed)
+
+/*
+ * void clean_icache (unsigned kva, size_t n) R4000
+ *
+ * Invalidate address range in primary instruction cache
+ */
+LEAF(clean_icache)
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_I)
+
+2: j ra
+ENDFRAME(clean_icache)
+
+/*
+ * void clean_icache_indexed (unsigned kva, size_t n) R4000
+ *
+ * Invalidate indexed range in primary instruction cache
+ */
+LEAF(clean_icache_indexed)
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+
+#ifdef CPU_ORION
+ srl a2,1 # do one set (half cache) at a time
+ move t8,a0 # save kva & n
+ move t9,a1
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+ addu a0,t8,a2 # do next set
+ move a1,t9 # restore n
+#endif
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+2: j ra
+ENDFRAME(clean_icache_indexed)
+
+/*
+ * void clean_scache (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate address range in secondary cache
+ */
+LEAF(clean_scache)
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_SD)
+
+2: j ra
+ENDFRAME(clean_scache)
+
+/*
+ * void clean_scache_indexed (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate indexed range in secondary cache
+ */
+LEAF(clean_scache_indexed)
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_SD)
+
+2: j ra
+ENDFRAME(clean_scache_indexed)
+
+/*
+ * void clean_scache_nowrite (unsigned kva, size_t n) R4000
+ *
+ * Invalidate an address range in secondary cache
+ */
+LEAF(clean_scache_nowrite)
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_SD)
+
+2: j ra
+ENDFRAME(clean_scache_nowrite)
+
+/*
+ * void clean_scache_nowrite_indexed (unsigned kva, size_t n) R4000
+ *
+ * Invalidate indexed range in secondary cache
+ */
+LEAF(clean_scache_nowrite_indexed)
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_SD)
+
+2: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(clean_scache_nowrite_indexed)
+
+/**************************************************************************
+**
+** get_mem_conf - get memory configuration R4000
+**
+***************************************************************************/
+
+FRAME(get_mem_conf,sp,0,ra)
+
+ lw t6, mem_size
+ sw t6, 0(a0)
+ lw t7, icache_size
+ sw t7, 4(a0)
+ lw t8, dcache_size
+ sw t8, 8(a0)
+ lw t7, scache_size
+ sw t7, 12(a0)
+ j ra
+
+ENDFRAME(get_mem_conf)
+
+#endif /* __mips == 3 */
+
+/*
+ * void set_mem_size (mem_size)
+ *
+ * config_memory()'s memory size gets written into mem_size here.
+ * Now we don't need to call config_cache() with memory size - New to IDTC6.0
+ */
+FRAME(set_memory_size,sp,0,ra)
+ sw a0, mem_size
+ j ra
+ENDFRAME(set_memory_size)
diff --git a/c/src/lib/libbsp/mips/shared/startup/idttlb.S b/c/src/lib/libbsp/mips/shared/startup/idttlb.S
new file mode 100644
index 0000000000..d8800ae7ee
--- /dev/null
+++ b/c/src/lib/libbsp/mips/shared/startup/idttlb.S
@@ -0,0 +1,390 @@
+/*
+
+Based upon IDT provided code with the following release:
+
+This source code has been made available to you by IDT on an AS-IS
+basis. Anyone receiving this source is licensed under IDT copyrights
+to use it in any way he or she deems fit, including copying it,
+modifying it, compiling it, and redistributing it either with or
+without modifications. No license under IDT patents or patent
+applications is to be implied by the copyright license.
+
+Any user of this software should understand that IDT cannot provide
+technical support for this software and will not be responsible for
+any consequences resulting from the use of this software.
+
+Any person who transfers this source code or any derivative work must
+include the IDT copyright notice, this paragraph, and the preceeding
+two paragraphs in the transferred software.
+
+COPYRIGHT IDT CORPORATION 1996
+LICENSED MATERIAL - PROGRAM PROPERTY OF IDT
+
+ idttlb.S,v 1.3 2000/10/24 21:50:37 joel Exp
+*/
+
+
+/*
+** idttlb.s - fetch the registers associated with and the contents
+** of the tlb.
+**
+*/
+/* 950308: Ketan patched a few tlb functions that would not have worked.*/
+#include <rtems/mips/iregdef.h>
+#include <rtems/mips/idtcpu.h>
+#include <rtems/asm.h>
+
+
+ .text
+
+#if __mips == 1
+/*
+** ret_tlblo -- returns the 'entrylo' contents for the TLB
+** 'c' callable - as ret_tlblo(index) - where index is the
+** tlb entry to return the lo value for - if called from assembly
+** language then index should be in register a0.
+*/
+FRAME(ret_tlblo,sp,0,ra)
+ .set noreorder
+ mfc0 t0,C0_SR # save sr
+ nop
+ and t0,~SR_PE # dont inadvertantly clear PE
+ mtc0 zero,C0_SR # clear interrupts
+ mfc0 t1,C0_TLBHI # save pid
+ sll a0,TLBINX_INXSHIFT # position index
+ mtc0 a0,C0_INX # write to index register
+ nop
+ tlbr # put tlb entry in entrylo and hi
+ nop
+ mfc0 v0,C0_TLBLO # get the requested entry lo
+ mtc0 t1,C0_TLBHI # restore pid
+ mtc0 t0,C0_SR # restore status register
+ j ra
+ nop
+ .set reorder
+ENDFRAME(ret_tlblo)
+#endif
+#if __mips == 3
+/*
+** ret_tlblo[01] -- returns the 'entrylo' contents for the TLB
+** 'c' callable - as ret_tlblo(index) - where index is the
+** tlb entry to return the lo value for - if called from assembly
+** language then index should be in register a0.
+*/
+FRAME(ret_tlblo0,sp,0,ra)
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # clear interrupts
+ mfc0 t1,C0_TLBHI # save pid
+ mtc0 a0,C0_INX # write to index register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # put tlb entry in entrylo and hi
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_TLBLO0 # get the requested entry lo
+ mtc0 t1,C0_TLBHI # restore pid
+ mtc0 t0,C0_SR # restore status register
+ j ra
+ENDFRAME(ret_tlblo0)
+
+FRAME(ret_tlblo1,sp,0,ra)
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # clear interrupts
+ mfc0 t1,C0_TLBHI # save pid
+ mtc0 a0,C0_INX # write to index register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # put tlb entry in entrylo and hi
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_TLBLO1 # get the requested entry lo
+ mtc0 t1,C0_TLBHI # restore pid
+ mtc0 t0,C0_SR # restore status register
+ j ra
+ENDFRAME(ret_tlblo1)
+
+/*
+** ret_pagemask(index) -- return pagemask contents of tlb entry "index"
+*/
+FRAME(ret_pagemask,sp,0,ra)
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ mtc0 a0,C0_INX # drop it in C0 register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # read entry to entry hi/lo
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_PAGEMASK # to return value
+ mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+ENDFRAME(ret_pagemask)
+
+/*
+** ret_tlbwired(void) -- return wired register
+*/
+FRAME(ret_tlbwired,sp,0,ra)
+ mfc0 v0,C0_WIRED
+ j ra
+ENDFRAME(ret_tlbwired)
+#endif
+
+/*
+** ret_tlbhi -- return the tlb entry high content for tlb entry
+** index
+*/
+FRAME(ret_tlbhi,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 t0,C0_SR # save sr
+ nop
+ and t0,~SR_PE
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ sll a0,TLBINX_INXSHIFT # position index
+ mtc0 a0,C0_INX # drop it in C0 register
+ nop
+ tlbr # read entry to entry hi/lo
+ nop
+ mfc0 v0,C0_TLBHI # to return value
+ mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ mtc0 a0,C0_INX # drop it in C0 register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # read entry to entry hi/lo0/lo1/mask
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_TLBHI # to return value
+ mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+#endif
+ENDFRAME(ret_tlbhi)
+
+/*
+** ret_tlbpid() -- return tlb pid contained in the current entry hi
+*/
+FRAME(ret_tlbpid,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 v0,C0_TLBHI # fetch tlb high
+ nop
+ and v0,TLBHI_PIDMASK # isolate and position
+ srl v0,TLBHI_PIDSHIFT
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ mfc0 v0,C0_TLBHI # to return value
+ nop
+ and v0,TLBHI_PIDMASK
+ j ra
+#endif
+ENDFRAME(ret_tlbpid)
+
+/*
+** tlbprobe(address, pid) -- probe the tlb to see if address is currently
+** mapped
+** a0 = vpn - virtual page numbers are 0=0 1=0x1000, 2=0x2000...
+** virtual page numbers for the r3000 are in
+** entry hi bits 31-12
+** a1 = pid - this is a process id ranging from 0 to 63
+** this process id is shifted left 6 bits and or'ed into
+** the entry hi register
+** returns an index value (0-63) if successful -1 -f not
+*/
+FRAME(tlbprobe,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 t0,C0_SR /* fetch status reg */
+ and a0,TLBHI_VPNMASK /* isolate just the vpn */
+ and t0,~SR_PE /* don't inadvertantly clear pe */
+ mtc0 zero,C0_SR
+ mfc0 t1,C0_TLBHI
+ sll a1,TLBHI_PIDSHIFT /* possition the pid */
+ and a1,TLBHI_PIDMASK
+ or a0,a1 /* build entry hi value */
+ mtc0 a0,C0_TLBHI
+ nop
+ tlbp /* do the probe */
+ nop
+ mfc0 v1,C0_INX
+ li v0,-1
+ bltz v1,1f
+ nop
+ sra v0,v1,TLBINX_INXSHIFT /* get index positioned for return */
+1:
+ mtc0 t1,C0_TLBHI /* restore tlb hi */
+ mtc0 t0,C0_SR /* restore the status reg */
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ and a0,TLBHI_VPN2MASK # construct tlbhi for probe
+ and a1,TLBHI_PIDMASK
+ or a0,a1
+ mtc0 a0,C0_TLBHI
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbp # probe entry to entry hi/lo0/lo1/mask
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v1,C0_INX
+ li v0,-1
+ bltz v1,1f
+ move v0,v1
+1: mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+#endif
+ENDFRAME(tlbprobe)
+
+/*
+** resettlb(index) Invalidate the TLB entry specified by index
+*/
+FRAME(resettlb,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 t0,C0_TLBHI # fetch the current hi
+ mfc0 v0,C0_SR # fetch the status reg.
+ li t2,K0BASE&TLBHI_VPNMASK
+ and v0,~SR_PE # dont inadvertantly clear PE
+ mtc0 zero,C0_SR
+ mtc0 t2,C0_TLBHI # set up tlbhi
+ mtc0 zero,C0_TLBLO
+ sll a0,TLBINX_INXSHIFT
+ mtc0 a0,C0_INX
+ nop
+ tlbwi # do actual invalidate
+ nop
+ mtc0 t0,C0_TLBHI
+ mtc0 v0,C0_SR
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ li t2,K0BASE&TLBHI_VPN2MASK
+ mfc0 t0,C0_TLBHI # save current TLBHI
+ mfc0 v0,C0_SR # save SR and disable interrupts
+ mtc0 zero,C0_SR
+ mtc0 t2,C0_TLBHI # invalidate entry
+ mtc0 zero,C0_TLBLO0
+ mtc0 zero,C0_TLBLO1
+ mtc0 a0,C0_INX
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbwi
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mtc0 t0,C0_TLBHI
+ mtc0 v0,C0_SR
+ j ra
+#endif
+ENDFRAME(resettlb)
+
+#if __mips == 1
+/*
+** Setup TLB entry
+**
+** map_tlb(index, tlbhi, phypage)
+** a0 = TLB entry index
+** a1 = virtual page number and PID
+** a2 = physical page
+*/
+FRAME(map_tlb,sp,0,ra)
+ .set noreorder
+ sll a0,TLBINX_INXSHIFT
+ mfc0 v0,C0_SR # fetch the current status
+ mfc0 a3,C0_TLBHI # save the current hi
+ and v0,~SR_PE # dont inadvertantly clear parity
+
+ mtc0 zero,C0_SR
+ mtc0 a1,C0_TLBHI # set the hi entry
+ mtc0 a2,C0_TLBLO # set the lo entry
+ mtc0 a0,C0_INX # load the index
+ nop
+ tlbwi # put the hi/lo in tlb entry indexed
+ nop
+ mtc0 a3,C0_TLBHI # put back the tlb hi reg
+ mtc0 v0,C0_SR # restore the status register
+ j ra
+ nop
+ .set reorder
+ENDFRAME(map_tlb)
+#endif
+#if __mips == 3
+/*
+** Setup R4000 TLB entry
+**
+** map_tlb4000(mask_index, tlbhi, pte_even, pte_odd)
+** a0 = TLB entry index and page mask
+** a1 = virtual page number and PID
+** a2 = pte -- contents of even pte
+** a3 = pte -- contents of odd pte
+*/
+FRAME(map_tlb4000,sp,0,ra)
+ and t2,a0,TLBPGMASK_MASK
+ and a0,TLBINX_INXMASK
+ mfc0 t1,C0_TLBHI # save current TLBPID
+ mfc0 v0,C0_SR # save SR and disable interrupts
+ mtc0 zero,C0_SR
+ mtc0 t2,C0_PAGEMASK # set
+ mtc0 a1,C0_TLBHI # set VPN and TLBPID
+ mtc0 a2,C0_TLBLO0 # set PPN and access bits
+ mtc0 a3,C0_TLBLO1 # set PPN and access bits
+ mtc0 a0,C0_INX # set INDEX to wired entry
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbwi # drop it in
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mtc0 t1,C0_TLBHI # restore TLBPID
+ mtc0 v0,C0_SR # restore SR
+ j ra
+ENDFRAME(map_tlb4000)
+#endif
+
+
+/*
+** Set current TLBPID. This assumes PID is positioned correctly in reg.
+** a0.
+*/
+FRAME(set_tlbpid,sp,0,ra)
+ .set noreorder
+ mtc0 a0,C0_TLBHI
+ j ra
+ nop
+ .set reorder
+ENDFRAME(set_tlbpid)
+