summaryrefslogtreecommitdiffstats
path: root/bsps/mips/shared
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-04-20 10:35:35 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-04-20 13:52:14 +0200
commit99648958668d3a33ee57974479b36201fe303f34 (patch)
tree6f27ea790e2823c6156e71219a4f54680263fac6 /bsps/mips/shared
parentbsps: Move start files to bsps (diff)
downloadrtems-99648958668d3a33ee57974479b36201fe303f34.tar.bz2
bsps: Move startup files to bsps
Adjust build support files to new directory layout. This patch is a part of the BSP source reorganization. Update #3285.
Diffstat (limited to 'bsps/mips/shared')
-rw-r--r--bsps/mips/shared/start/idtmem.S920
-rw-r--r--bsps/mips/shared/start/idttlb.S388
2 files changed, 1308 insertions, 0 deletions
diff --git a/bsps/mips/shared/start/idtmem.S b/bsps/mips/shared/start/idtmem.S
new file mode 100644
index 0000000000..ed51e67362
--- /dev/null
+++ b/bsps/mips/shared/start/idtmem.S
@@ -0,0 +1,920 @@
+/*
+
+Based upon IDT provided code with the following release:
+
+This source code has been made available to you by IDT on an AS-IS
+basis. Anyone receiving this source is licensed under IDT copyrights
+to use it in any way he or she deems fit, including copying it,
+modifying it, compiling it, and redistributing it either with or
+without modifications. No license under IDT patents or patent
+applications is to be implied by the copyright license.
+
+Any user of this software should understand that IDT cannot provide
+technical support for this software and will not be responsible for
+any consequences resulting from the use of this software.
+
+Any person who transfers this source code or any derivative work must
+include the IDT copyright notice, this paragraph, and the preceeding
+two paragraphs in the transferred software.
+
+COPYRIGHT IDT CORPORATION 1996
+LICENSED MATERIAL - PROGRAM PROPERTY OF IDT
+
+*/
+
+/************************************************************************
+**
+** idtmem.s - memory and cache functions
+**
+** Copyright 1991 Integrated Device Technology, Inc.
+** All Rights Reserved
+**
+**************************************************************************/
+
+/*
+ * 950313: Ketan fixed bugs in mfc0/mtc0 hazards, and removed hack
+ * to set mem_size.
+ */
+
+#include <rtems/mips/iregdef.h>
+#include <rtems/mips/idtcpu.h>
+#include <rtems/asm.h>
+
+ .data
+mem_size:
+ .word 0
+dcache_size:
+ .word 0
+icache_size:
+#if __mips == 1
+ .word MINCACHE
+#endif
+#if __mips == 3
+ .word 0
+#endif
+
+#if __mips == 3
+ .data
+scache_size:
+ .word 0
+icache_linesize:
+ .word 0
+dcache_linesize:
+ .word 0
+scache_linesize:
+ .word 0
+#endif
+
+ .text
+
+#if __mips == 1
+#define CONFIGFRM ((2*4)+4)
+
+/*************************************************************************
+**
+** Config_Dcache() -- determine size of Data cache
+**
+**************************************************************************/
+
+FRAME(config_Dcache,sp, CONFIGFRM, ra)
+ .set noreorder
+ subu sp,CONFIGFRM
+ sw ra,CONFIGFRM-4(sp) /* save return address */
+ sw s0,4*4(sp) /* save s0 in first regsave slot */
+ mfc0 s0,C0_SR /* save SR */
+ nop
+ mtc0 zero,C0_SR /* disable interrupts */
+ .set reorder
+ jal _size_cache /* returns Data cache size in v0 */
+ sw v0, dcache_size /* save it */
+ and s0, ~SR_PE /* do not clear PE */
+ .set noreorder
+ mtc0 s0,C0_SR /* restore SR */
+ nop
+ .set reorder
+ lw s0, 4*4(sp) /* restore s0 */
+ lw ra,CONFIGFRM-4(sp) /* restore ra */
+ addu sp,CONFIGFRM /* pop stack */
+ j ra
+ENDFRAME(config_Dcache)
+
+/*************************************************************************
+**
+** Config_Icache() -- determine size of Instruction cache
+** MUST be run in uncached mode/handled in idt_csu.s
+**
+**************************************************************************/
+
+FRAME(config_Icache,sp, CONFIGFRM, ra)
+ .set noreorder
+ subu sp,CONFIGFRM
+ sw ra,CONFIGFRM-4(sp) /* save return address */
+ sw s0,4*4(sp) /* save s0 in first regsave slot */
+ mfc0 s0,C0_SR /* save SR */
+ nop
+ mtc0 zero, C0_SR /* disable interrupts */
+ li v0,SR_SWC /* swap caches/disable ints */
+ mtc0 v0,C0_SR
+ nop
+ .set reorder
+ jal _size_cache /* returns instruction cache size */
+ .set noreorder
+ mtc0 zero,C0_SR /* swap back caches */
+ nop
+ and s0,~SR_PE /* do not inadvertantly clear PE */
+ mtc0 s0,C0_SR /* restore SR */
+ nop
+ .set reorder
+ sw v0, icache_size /* save it AFTER caches back */
+ lw s0,4*4(sp) /* restore s0 */
+ lw ra,CONFIGFRM-4(sp) /* restore ra */
+ addu sp,CONFIGFRM /* pop stack */
+ j ra
+ENDFRAME(config_Icache)
+
+/************************************************************************
+**
+** _size_cache()
+** returns cache size in v0
+**
+************************************************************************/
+
+FRAME(_size_cache,sp,0,ra)
+ .set noreorder
+ mfc0 t0,C0_SR /* save current sr */
+ nop
+ and t0,~SR_PE /* do not inadvertently clear PE */
+ or v0,t0,SR_ISC /* isolate cache */
+ mtc0 v0,C0_SR
+ /*
+ * First check if there is a cache there at all
+ */
+ move v0,zero
+ li v1,0xa5a5a5a5 /* distinctive pattern */
+ sw v1,K0BASE /* try to write into cache */
+ lw t1,K0BASE /* try to read from cache */
+ nop
+ mfc0 t2,C0_SR
+ nop
+ .set reorder
+ and t2,SR_CM
+ bne t2,zero,3f /* cache miss, must be no cache */
+ bne v1,t1,3f /* data not equal -> no cache */
+ /*
+ * Clear cache size boundries to known state.
+ */
+ li v0,MINCACHE
+1:
+ sw zero,K0BASE(v0)
+ sll v0,1
+ ble v0,MAXCACHE,1b
+
+ li v0,-1
+ sw v0,K0BASE(zero) /* store marker in cache */
+ li v0,MINCACHE /* MIN cache size */
+
+2: lw v1,K0BASE(v0) /* Look for marker */
+ bne v1,zero,3f /* found marker */
+ sll v0,1 /* cache size * 2 */
+ ble v0,MAXCACHE,2b /* keep looking */
+ move v0,zero /* must be no cache */
+ .set noreorder
+3: mtc0 t0,C0_SR /* restore sr */
+ j ra
+ nop
+ENDFRAME(_size_cache)
+ .set reorder
+
+#define FLUSHFRM (2*4)
+
+/***************************************************************************
+**
+** flush_Dcache() - flush entire Data cache
+**
+****************************************************************************/
+FRAME(flush_Dcache,sp,FLUSHFRM,ra)
+ lw t2, dcache_size
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ beq t2,zero,_Dflush_done /* no D cache, get out! */
+ nop
+ li v0, SR_ISC /* isolate cache */
+ mtc0 v0, C0_SR
+ nop
+ .set reorder
+ li t0,K0BASE /* set loop registers */
+ or t1,t0,t2
+
+2: sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bne t0,t1,2b
+
+ .set noreorder
+_Dflush_done:
+ mtc0 t3,C0_SR /* restore Status Register */
+ .set reorder
+ j ra
+ENDFRAME(flush_Dcache)
+
+/***************************************************************************
+**
+** flush_Icache() - flush entire Instruction cache
+**
+** NOTE: Icache can only be flushed/cleared when uncached
+** Code forces into uncached memory regardless of calling mode
+**
+****************************************************************************/
+FRAME(flush_Icache,sp,FLUSHFRM,ra)
+ lw t1,icache_size
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ la v0,1f
+ li v1,K1BASE
+ or v0,v1
+ j v0 /* force into non-cached space */
+ nop
+1:
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ beq t1,zero,_Iflush_done /* no i-cache get out */
+ nop
+ li v0,SR_ISC|SR_SWC /* disable intr, isolate and swap */
+ mtc0 v0,C0_SR
+ li t0,K0BASE
+ .set reorder
+ or t1,t0,t1
+
+1: sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bne t0,t1,1b
+ .set noreorder
+_Iflush_done:
+ mtc0 t3,C0_SR /* un-isolate, enable interrupts */
+ .set reorder
+ j ra
+ENDFRAME(flush_Icache)
+
+/**************************************************************************
+**
+** clear_Dcache(base_addr, byte_count) - flush portion of Data cache
+**
+** a0 = base address of portion to be cleared
+** a1 = byte count of length
+**
+***************************************************************************/
+FRAME(clear_Dcache,sp,0,ra)
+
+ lw t2, dcache_size /* Data cache size */
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ nop
+ nop
+ .set reorder
+ /*
+ * flush data cache
+ */
+
+ .set noreorder
+ nop
+ li v0,SR_ISC /* isolate data cache */
+ mtc0 v0,C0_SR
+ .set reorder
+ bltu t2,a1,1f /* cache is smaller than region */
+ move t2,a1
+1: addu t2,a0 /* ending address + 1 */
+ move t0,a0
+
+1: sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bltu t0,t2,1b
+
+ .set noreorder
+ mtc0 t3,C0_SR /* un-isolate, enable interrupts */
+ nop
+ .set reorder
+ j ra
+ENDFRAME(clear_Dcache)
+
+/**************************************************************************
+**
+** clear_Icache(base_addr, byte_count) - flush portion of Instruction cache
+**
+** a0 = base address of portion to be cleared
+** a1 = byte count of length
+**
+** NOTE: Icache can only be flushed/cleared when uncached
+** Code forces into uncached memory regardless of calling mode
+**
+***************************************************************************/
+FRAME(clear_Icache,sp,0,ra)
+
+ lw t1, icache_size /* Instruction cache size */
+ /*
+ * flush text cache
+ */
+ .set noreorder
+ mfc0 t3,C0_SR /* save SR */
+ nop
+ la v0,1f
+ li v1,K1BASE
+ or v0,v1
+ j v0 /* force into non-cached space */
+ nop
+1:
+ and t3,~SR_PE /* dont inadvertently clear PE */
+ nop
+ nop
+ li v0,SR_ISC|SR_SWC /* disable intr, isolate and swap */
+ mtc0 v0,C0_SR
+ .set reorder
+ bltu t1,a1,1f /* cache is smaller than region */
+ move t1,a1
+1: addu t1,a0 /* ending address + 1 */
+ move t0,a0
+
+ sb zero,0(t0)
+ sb zero,4(t0)
+ sb zero,8(t0)
+ sb zero,12(t0)
+ sb zero,16(t0)
+ sb zero,20(t0)
+ sb zero,24(t0)
+ addu t0,32
+ sb zero,-4(t0)
+ bltu t0,t1,1b
+ .set noreorder
+ mtc0 t3,C0_SR /* un-isolate, enable interrupts */
+ nop
+ nop
+ nop /* allow time for caches to swap */
+ .set reorder
+ j ra
+ENDFRAME(clear_Icache)
+
+/**************************************************************************
+**
+** get_mem_conf - get memory configuration
+**
+***************************************************************************/
+
+FRAME(get_mem_conf,sp,0,ra)
+
+ lw t6, mem_size
+ sw t6, 0(a0)
+ lw t7, icache_size
+ sw t7, 4(a0)
+ lw t8, dcache_size
+ sw t8, 8(a0)
+ j ra
+
+ENDFRAME(get_mem_conf)
+#endif /* __mips == 1 */
+
+#if __mips == 3
+#define LEAF(label) FRAME(label,sp,0,ra)
+#define XLEAF(label) \
+ .globl label ; \
+label:
+
+/*
+ * cacheop macro to automate cache operations
+ * first some helpers...
+ */
+#define _mincache(size, maxsize) \
+ bltu size,maxsize,8f ; \
+ move size,maxsize ; \
+8:
+
+#define _align(tmp, minaddr, maxaddr, linesize) \
+ subu tmp,linesize,1 ; \
+ not tmp ; \
+ and minaddr,tmp ; \
+ addu maxaddr,-1 ; \
+ and maxaddr,tmp
+
+/* This is a bit of a hack really because it relies on minaddr=a0 */
+#define _doop1(op1) \
+ cache op1,0(a0)
+
+#define _doop2(op1, op2) \
+ cache op1,0(a0) ; \
+ cache op2,0(a0)
+
+/* specials for cache initialisation */
+#define _doop1lw1(op1) \
+ cache op1,0(a0) ; \
+ lw zero,0(a0) ; \
+ cache op1,0(a0)
+
+#define _doop121(op1,op2) \
+ cache op1,0(a0) ; \
+ nop; \
+ cache op2,0(a0) ; \
+ nop; \
+ cache op1,0(a0)
+
+#define _oploopn(minaddr, maxaddr, linesize, tag, ops) \
+ .set noreorder ; \
+7: _doop##tag##ops ; \
+ bne minaddr,maxaddr,7b ; \
+ addu minaddr,linesize ; \
+ .set reorder
+
+/* finally the cache operation macros */
+#define icacheopn(kva, n, cache_size, cache_linesize, tag, ops) \
+ _mincache(n, cache_size); \
+ blez n,9f ; \
+ addu n,kva ; \
+ _align(t1, kva, n, cache_linesize) ; \
+ _oploopn(kva, n, cache_linesize, tag, ops) ; \
+9:
+
+#define vcacheopn(kva, n, cache_size, cache_linesize, tag, ops) \
+ blez n,9f ; \
+ addu n,kva ; \
+ _align(t1, kva, n, cache_linesize) ; \
+ _oploopn(kva, n, cache_linesize, tag, ops) ; \
+9:
+
+#define icacheop(kva, n, cache_size, cache_linesize, op) \
+ icacheopn(kva, n, cache_size, cache_linesize, 1, (op))
+
+#define vcacheop(kva, n, cache_size, cache_linesize, op) \
+ vcacheopn(kva, n, cache_size, cache_linesize, 1, (op))
+
+ .text
+
+/*
+ * static void _size_cache() R4000
+ *
+ * Internal routine to determine cache sizes by looking at R4000 config
+ * register. Sizes are returned in registers, as follows:
+ * t2 icache size
+ * t3 dcache size
+ * t6 scache size
+ * t4 icache line size
+ * t5 dcache line size
+ * t7 scache line size
+ */
+LEAF(_size_cache)
+ mfc0 t0,C0_CONFIG
+
+ and t1,t0,CFG_ICMASK
+ srl t1,CFG_ICSHIFT
+ li t2,0x1000
+ sll t2,t1
+
+ and t1,t0,CFG_DCMASK
+ srl t1,CFG_DCSHIFT
+ li t3,0x1000
+ sll t3,t1
+
+ li t4,32
+ and t1,t0,CFG_IB
+ bnez t1,1f
+ li t4,16
+1:
+
+ li t5,32
+ and t1,t0,CFG_DB
+ bnez t1,1f
+ li t5,16
+1:
+
+ move t6,zero # default to no scache
+ move t7,zero #
+
+ and t1,t0,CFG_C_UNCACHED # test config register
+ bnez t1,1f # no scache if uncached/non-coherent
+
+ li t6,0x100000 # assume 1Mb scache <<-NOTE
+ and t1,t0,CFG_SBMASK
+ srl t1,CFG_SBSHIFT
+ li t7,16
+ sll t7,t1
+1: j ra
+ENDFRAME(_size_cache)
+
+/*
+ * void config_cache() R4000
+ *
+ * Work out size of I, D & S caches, assuming they are already initialised.
+ */
+LEAF(config_cache)
+ lw t0,icache_size
+ bgtz t0,8f # already known?
+ move v0,ra
+ bal _size_cache
+ move ra,v0
+
+ sw t2,icache_size
+ sw t3,dcache_size
+ sw t6,scache_size
+ sw t4,icache_linesize
+ sw t5,dcache_linesize
+ sw t7,scache_linesize
+8: j ra
+ENDFRAME(config_cache)
+
+/*
+ * void _init_cache() R4000
+ */
+LEAF(_init_cache)
+ /*
+ * First work out the sizes
+ */
+ move v0,ra
+ bal _size_cache
+ move ra,v0
+
+ /*
+ * The caches may be in an indeterminate state,
+ * so we force good parity into them by doing an
+ * invalidate, load/fill, invalidate for each line.
+ */
+
+ /* disable all i/u and cache exceptions */
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ or v1,SR_DE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ /* assume bottom of RAM will generate good parity for the cache */
+ li a0,PHYS_TO_K0(0)
+ move a2,t2 # icache_size
+ move a3,t4 # icache_linesize
+ move a1,a2
+ icacheopn(a0,a1,a2,a3,121,(Index_Store_Tag_I,Fill_I))
+
+ li a0,PHYS_TO_K0(0)
+ move a2,t3 # dcache_size
+ move a3,t5 # dcache_linesize
+ move a1,a2
+ icacheopn(a0,a1,a2,a3,1lw1,(Index_Store_Tag_D))
+
+ /* assume unified I & D in scache <<-NOTE */
+ blez t6,1f
+ li a0,PHYS_TO_K0(0)
+ move a2,t6
+ move a3,t7
+ move a1,a2
+ icacheopn(a0,a1,a2,a3,1lw1,(Index_Store_Tag_SD))
+
+1: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(_init_cache)
+
+/*
+ * void flush_cache (void) R4000
+ *
+ * Flush and invalidate all caches
+ */
+LEAF(flush_cache)
+ /* secondary cacheops do all the work if present */
+ lw a2,scache_size
+ blez a2,1f
+ lw a3,scache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_SD)
+ b 2f
+
+1:
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+ lw a2,dcache_size
+ lw a3,dcache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(flush_cache)
+
+/*
+ * void flush_cache_nowrite (void) R4000
+ *
+ * Invalidate all caches
+ */
+LEAF(flush_cache_nowrite)
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+ lw a2,dcache_size
+ lw a3,dcache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_D)
+
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+ li a0,PHYS_TO_K0(0)
+ move a1,a2
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_SD)
+
+2: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(flush_cache_nowrite)
+
+/*
+ * void clean_cache (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate address range in all caches
+ */
+LEAF(clean_cache)
+XLEAF(clear_cache)
+
+ /* secondary cacheops do all the work (if fitted) */
+ lw a2,scache_size
+ blez a2,1f
+ lw a3,scache_linesize
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_SD)
+ b 2f
+
+1: lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+ /* save kva & n for subsequent loop */
+ move t8,a0
+ move t9,a1
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_I)
+
+ lw a2,dcache_size
+ lw a3,dcache_linesize
+ /* restore kva & n */
+ move a0,t8
+ move a1,t9
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(clean_cache)
+
+/*
+ * void clean_dcache (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate address range in primary data cache
+ */
+LEAF(clean_dcache)
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(clean_dcache)
+
+/*
+ * void clean_dcache_indexed (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate indexed range in primary data cache
+ */
+LEAF(clean_dcache_indexed)
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+#ifdef CPU_ORION
+ srl a2,1 # do one set (half cache) at a time
+ move t8,a0 # save kva & n
+ move t9,a1
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D)
+
+ addu a0,t8,a2 # do next set
+ move a1,t9 # restore n
+#endif
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_D)
+
+2: j ra
+ENDFRAME(clean_dcache_indexed)
+
+/*
+ * void clean_dcache_nowrite (unsigned kva, size_t n) R4000
+ *
+ * Invalidate an address range in primary data cache
+ */
+LEAF(clean_dcache_nowrite)
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_D)
+
+2: j ra
+ENDFRAME(clean_dcache_nowrite)
+
+/*
+ * void clean_dcache_nowrite_indexed (unsigned kva, size_t n) R4000
+ *
+ * Invalidate indexed range in primary data cache
+ */
+LEAF(clean_dcache_nowrite_indexed)
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ lw a2,dcache_size
+ blez a2,2f
+ lw a3,dcache_linesize
+
+#ifdef CPU_ORION
+ srl a2,1 # do one set (half cache) at a time
+ move t8,a0 # save kva & n
+ move t9,a1
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_D)
+
+ addu a0,t8,a2 # do next set
+ move a1,t9 # restore n
+#endif
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_D)
+
+2: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(clean_dcache_nowrite_indexed)
+
+/*
+ * void clean_icache (unsigned kva, size_t n) R4000
+ *
+ * Invalidate address range in primary instruction cache
+ */
+LEAF(clean_icache)
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_I)
+
+2: j ra
+ENDFRAME(clean_icache)
+
+/*
+ * void clean_icache_indexed (unsigned kva, size_t n) R4000
+ *
+ * Invalidate indexed range in primary instruction cache
+ */
+LEAF(clean_icache_indexed)
+ lw a2,icache_size
+ blez a2,2f
+ lw a3,icache_linesize
+
+#ifdef CPU_ORION
+ srl a2,1 # do one set (half cache) at a time
+ move t8,a0 # save kva & n
+ move t9,a1
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+ addu a0,t8,a2 # do next set
+ move a1,t9 # restore n
+#endif
+ icacheop(a0,a1,a2,a3,Index_Invalidate_I)
+
+2: j ra
+ENDFRAME(clean_icache_indexed)
+
+/*
+ * void clean_scache (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate address range in secondary cache
+ */
+LEAF(clean_scache)
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+ vcacheop(a0,a1,a2,a3,Hit_Writeback_Inv_SD)
+
+2: j ra
+ENDFRAME(clean_scache)
+
+/*
+ * void clean_scache_indexed (unsigned kva, size_t n) R4000
+ *
+ * Writeback and invalidate indexed range in secondary cache
+ */
+LEAF(clean_scache_indexed)
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+
+ icacheop(a0,a1,a2,a3,Index_Writeback_Inv_SD)
+
+2: j ra
+ENDFRAME(clean_scache_indexed)
+
+/*
+ * void clean_scache_nowrite (unsigned kva, size_t n) R4000
+ *
+ * Invalidate an address range in secondary cache
+ */
+LEAF(clean_scache_nowrite)
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+
+ vcacheop(a0,a1,a2,a3,Hit_Invalidate_SD)
+
+2: j ra
+ENDFRAME(clean_scache_nowrite)
+
+/*
+ * void clean_scache_nowrite_indexed (unsigned kva, size_t n) R4000
+ *
+ * Invalidate indexed range in secondary cache
+ */
+LEAF(clean_scache_nowrite_indexed)
+ mfc0 v0,C0_SR
+ and v1,v0,~SR_IE
+ mtc0 v1,C0_SR
+
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ lw a2,scache_size
+ blez a2,2f
+ lw a3,scache_linesize
+
+ icacheop(a0,a1,a2,a3,Index_Store_Tag_SD)
+
+2: mtc0 v0,C0_SR
+ j ra
+ENDFRAME(clean_scache_nowrite_indexed)
+
+/**************************************************************************
+**
+** get_mem_conf - get memory configuration R4000
+**
+***************************************************************************/
+
+FRAME(get_mem_conf,sp,0,ra)
+
+ lw t6, mem_size
+ sw t6, 0(a0)
+ lw t7, icache_size
+ sw t7, 4(a0)
+ lw t8, dcache_size
+ sw t8, 8(a0)
+ lw t7, scache_size
+ sw t7, 12(a0)
+ j ra
+
+ENDFRAME(get_mem_conf)
+
+#endif /* __mips == 3 */
+
+/*
+ * void set_mem_size (mem_size)
+ *
+ * config_memory()'s memory size gets written into mem_size here.
+ * Now we don't need to call config_cache() with memory size - New to IDTC6.0
+ */
+FRAME(set_memory_size,sp,0,ra)
+ sw a0, mem_size
+ j ra
+ENDFRAME(set_memory_size)
diff --git a/bsps/mips/shared/start/idttlb.S b/bsps/mips/shared/start/idttlb.S
new file mode 100644
index 0000000000..2574027dc9
--- /dev/null
+++ b/bsps/mips/shared/start/idttlb.S
@@ -0,0 +1,388 @@
+/*
+
+Based upon IDT provided code with the following release:
+
+This source code has been made available to you by IDT on an AS-IS
+basis. Anyone receiving this source is licensed under IDT copyrights
+to use it in any way he or she deems fit, including copying it,
+modifying it, compiling it, and redistributing it either with or
+without modifications. No license under IDT patents or patent
+applications is to be implied by the copyright license.
+
+Any user of this software should understand that IDT cannot provide
+technical support for this software and will not be responsible for
+any consequences resulting from the use of this software.
+
+Any person who transfers this source code or any derivative work must
+include the IDT copyright notice, this paragraph, and the preceeding
+two paragraphs in the transferred software.
+
+COPYRIGHT IDT CORPORATION 1996
+LICENSED MATERIAL - PROGRAM PROPERTY OF IDT
+*/
+
+
+/*
+** idttlb.s - fetch the registers associated with and the contents
+** of the tlb.
+**
+*/
+/* 950308: Ketan patched a few tlb functions that would not have worked.*/
+#include <rtems/mips/iregdef.h>
+#include <rtems/mips/idtcpu.h>
+#include <rtems/asm.h>
+
+
+ .text
+
+#if __mips == 1
+/*
+** ret_tlblo -- returns the 'entrylo' contents for the TLB
+** 'c' callable - as ret_tlblo(index) - where index is the
+** tlb entry to return the lo value for - if called from assembly
+** language then index should be in register a0.
+*/
+FRAME(ret_tlblo,sp,0,ra)
+ .set noreorder
+ mfc0 t0,C0_SR # save sr
+ nop
+ and t0,~SR_PE # dont inadvertantly clear PE
+ mtc0 zero,C0_SR # clear interrupts
+ mfc0 t1,C0_TLBHI # save pid
+ sll a0,TLBINX_INXSHIFT # position index
+ mtc0 a0,C0_INX # write to index register
+ nop
+ tlbr # put tlb entry in entrylo and hi
+ nop
+ mfc0 v0,C0_TLBLO # get the requested entry lo
+ mtc0 t1,C0_TLBHI # restore pid
+ mtc0 t0,C0_SR # restore status register
+ j ra
+ nop
+ .set reorder
+ENDFRAME(ret_tlblo)
+#endif
+#if __mips == 3
+/*
+** ret_tlblo[01] -- returns the 'entrylo' contents for the TLB
+** 'c' callable - as ret_tlblo(index) - where index is the
+** tlb entry to return the lo value for - if called from assembly
+** language then index should be in register a0.
+*/
+FRAME(ret_tlblo0,sp,0,ra)
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # clear interrupts
+ mfc0 t1,C0_TLBHI # save pid
+ mtc0 a0,C0_INX # write to index register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # put tlb entry in entrylo and hi
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_TLBLO0 # get the requested entry lo
+ mtc0 t1,C0_TLBHI # restore pid
+ mtc0 t0,C0_SR # restore status register
+ j ra
+ENDFRAME(ret_tlblo0)
+
+FRAME(ret_tlblo1,sp,0,ra)
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # clear interrupts
+ mfc0 t1,C0_TLBHI # save pid
+ mtc0 a0,C0_INX # write to index register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # put tlb entry in entrylo and hi
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_TLBLO1 # get the requested entry lo
+ mtc0 t1,C0_TLBHI # restore pid
+ mtc0 t0,C0_SR # restore status register
+ j ra
+ENDFRAME(ret_tlblo1)
+
+/*
+** ret_pagemask(index) -- return pagemask contents of tlb entry "index"
+*/
+FRAME(ret_pagemask,sp,0,ra)
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ mtc0 a0,C0_INX # drop it in C0 register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # read entry to entry hi/lo
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_PAGEMASK # to return value
+ mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+ENDFRAME(ret_pagemask)
+
+/*
+** ret_tlbwired(void) -- return wired register
+*/
+FRAME(ret_tlbwired,sp,0,ra)
+ mfc0 v0,C0_WIRED
+ j ra
+ENDFRAME(ret_tlbwired)
+#endif
+
+/*
+** ret_tlbhi -- return the tlb entry high content for tlb entry
+** index
+*/
+FRAME(ret_tlbhi,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 t0,C0_SR # save sr
+ nop
+ and t0,~SR_PE
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ sll a0,TLBINX_INXSHIFT # position index
+ mtc0 a0,C0_INX # drop it in C0 register
+ nop
+ tlbr # read entry to entry hi/lo
+ nop
+ mfc0 v0,C0_TLBHI # to return value
+ mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ mtc0 a0,C0_INX # drop it in C0 register
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbr # read entry to entry hi/lo0/lo1/mask
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v0,C0_TLBHI # to return value
+ mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+#endif
+ENDFRAME(ret_tlbhi)
+
+/*
+** ret_tlbpid() -- return tlb pid contained in the current entry hi
+*/
+FRAME(ret_tlbpid,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 v0,C0_TLBHI # fetch tlb high
+ nop
+ and v0,TLBHI_PIDMASK # isolate and position
+ srl v0,TLBHI_PIDSHIFT
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ mfc0 v0,C0_TLBHI # to return value
+ nop
+ and v0,TLBHI_PIDMASK
+ j ra
+#endif
+ENDFRAME(ret_tlbpid)
+
+/*
+** tlbprobe(address, pid) -- probe the tlb to see if address is currently
+** mapped
+** a0 = vpn - virtual page numbers are 0=0 1=0x1000, 2=0x2000...
+** virtual page numbers for the r3000 are in
+** entry hi bits 31-12
+** a1 = pid - this is a process id ranging from 0 to 63
+** this process id is shifted left 6 bits and or'ed into
+** the entry hi register
+** returns an index value (0-63) if successful -1 -f not
+*/
+FRAME(tlbprobe,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 t0,C0_SR /* fetch status reg */
+ and a0,TLBHI_VPNMASK /* isolate just the vpn */
+ and t0,~SR_PE /* don't inadvertantly clear pe */
+ mtc0 zero,C0_SR
+ mfc0 t1,C0_TLBHI
+ sll a1,TLBHI_PIDSHIFT /* possition the pid */
+ and a1,TLBHI_PIDMASK
+ or a0,a1 /* build entry hi value */
+ mtc0 a0,C0_TLBHI
+ nop
+ tlbp /* do the probe */
+ nop
+ mfc0 v1,C0_INX
+ li v0,-1
+ bltz v1,1f
+ nop
+ sra v0,v1,TLBINX_INXSHIFT /* get index positioned for return */
+1:
+ mtc0 t1,C0_TLBHI /* restore tlb hi */
+ mtc0 t0,C0_SR /* restore the status reg */
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ mfc0 t0,C0_SR # save sr
+ mtc0 zero,C0_SR # disable interrupts
+ mfc0 t1,C0_TLBHI # save current pid
+ and a0,TLBHI_VPN2MASK # construct tlbhi for probe
+ and a1,TLBHI_PIDMASK
+ or a0,a1
+ mtc0 a0,C0_TLBHI
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbp # probe entry to entry hi/lo0/lo1/mask
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mfc0 v1,C0_INX
+ li v0,-1
+ bltz v1,1f
+ move v0,v1
+1: mtc0 t1,C0_TLBHI # restore current pid
+ mtc0 t0,C0_SR # restore sr
+ j ra
+#endif
+ENDFRAME(tlbprobe)
+
+/*
+** resettlb(index) Invalidate the TLB entry specified by index
+*/
+FRAME(resettlb,sp,0,ra)
+#if __mips == 1
+ .set noreorder
+ mfc0 t0,C0_TLBHI # fetch the current hi
+ mfc0 v0,C0_SR # fetch the status reg.
+ li t2,K0BASE&TLBHI_VPNMASK
+ and v0,~SR_PE # dont inadvertantly clear PE
+ mtc0 zero,C0_SR
+ mtc0 t2,C0_TLBHI # set up tlbhi
+ mtc0 zero,C0_TLBLO
+ sll a0,TLBINX_INXSHIFT
+ mtc0 a0,C0_INX
+ nop
+ tlbwi # do actual invalidate
+ nop
+ mtc0 t0,C0_TLBHI
+ mtc0 v0,C0_SR
+ j ra
+ nop
+ .set reorder
+#endif
+#if __mips == 3
+ li t2,K0BASE&TLBHI_VPN2MASK
+ mfc0 t0,C0_TLBHI # save current TLBHI
+ mfc0 v0,C0_SR # save SR and disable interrupts
+ mtc0 zero,C0_SR
+ mtc0 t2,C0_TLBHI # invalidate entry
+ mtc0 zero,C0_TLBLO0
+ mtc0 zero,C0_TLBLO1
+ mtc0 a0,C0_INX
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbwi
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mtc0 t0,C0_TLBHI
+ mtc0 v0,C0_SR
+ j ra
+#endif
+ENDFRAME(resettlb)
+
+#if __mips == 1
+/*
+** Setup TLB entry
+**
+** map_tlb(index, tlbhi, phypage)
+** a0 = TLB entry index
+** a1 = virtual page number and PID
+** a2 = physical page
+*/
+FRAME(map_tlb,sp,0,ra)
+ .set noreorder
+ sll a0,TLBINX_INXSHIFT
+ mfc0 v0,C0_SR # fetch the current status
+ mfc0 a3,C0_TLBHI # save the current hi
+ and v0,~SR_PE # dont inadvertantly clear parity
+
+ mtc0 zero,C0_SR
+ mtc0 a1,C0_TLBHI # set the hi entry
+ mtc0 a2,C0_TLBLO # set the lo entry
+ mtc0 a0,C0_INX # load the index
+ nop
+ tlbwi # put the hi/lo in tlb entry indexed
+ nop
+ mtc0 a3,C0_TLBHI # put back the tlb hi reg
+ mtc0 v0,C0_SR # restore the status register
+ j ra
+ nop
+ .set reorder
+ENDFRAME(map_tlb)
+#endif
+#if __mips == 3
+/*
+** Setup R4000 TLB entry
+**
+** map_tlb4000(mask_index, tlbhi, pte_even, pte_odd)
+** a0 = TLB entry index and page mask
+** a1 = virtual page number and PID
+** a2 = pte -- contents of even pte
+** a3 = pte -- contents of odd pte
+*/
+FRAME(map_tlb4000,sp,0,ra)
+ and t2,a0,TLBPGMASK_MASK
+ and a0,TLBINX_INXMASK
+ mfc0 t1,C0_TLBHI # save current TLBPID
+ mfc0 v0,C0_SR # save SR and disable interrupts
+ mtc0 zero,C0_SR
+ mtc0 t2,C0_PAGEMASK # set
+ mtc0 a1,C0_TLBHI # set VPN and TLBPID
+ mtc0 a2,C0_TLBLO0 # set PPN and access bits
+ mtc0 a3,C0_TLBLO1 # set PPN and access bits
+ mtc0 a0,C0_INX # set INDEX to wired entry
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ tlbwi # drop it in
+ .set noreorder
+ nop; nop; nop; nop; nop; nop; nop; nop
+ .set reorder
+ mtc0 t1,C0_TLBHI # restore TLBPID
+ mtc0 v0,C0_SR # restore SR
+ j ra
+ENDFRAME(map_tlb4000)
+#endif
+
+
+/*
+** Set current TLBPID. This assumes PID is positioned correctly in reg.
+** a0.
+*/
+FRAME(set_tlbpid,sp,0,ra)
+ .set noreorder
+ mtc0 a0,C0_TLBHI
+ j ra
+ nop
+ .set reorder
+ENDFRAME(set_tlbpid)
+