summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu/arm/shared/arm920/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'c/src/lib/libcpu/arm/shared/arm920/mmu.c')
-rw-r--r--c/src/lib/libcpu/arm/shared/arm920/mmu.c242
1 files changed, 242 insertions, 0 deletions
diff --git a/c/src/lib/libcpu/arm/shared/arm920/mmu.c b/c/src/lib/libcpu/arm/shared/arm920/mmu.c
new file mode 100644
index 0000000000..967768d911
--- /dev/null
+++ b/c/src/lib/libcpu/arm/shared/arm920/mmu.c
@@ -0,0 +1,242 @@
+/*
+ * ARM920 MMU functions
+ *
+ * Copyright (c) 2004 by Cogent Computer Systems
+ * Written by Jay Monkman <jtm@lopingdog.com>
+ *
+ * $Id$
+ */
+#include <libcpu/mmu.h>
+
+typedef uint32_t mmu_lvl1_t;
+
+extern uint32_t _ttbl_base;
+
+static inline uint32_t mmu_get_id(void);
+static inline uint32_t mmu_get_ctrl(void);
+static inline void mmu_set_ctrl(uint32_t val);
+static inline uint32_t mmu_get_trans_tbl(void);
+static inline void mmu_set_trans_tbl(uint32_t val);
+static inline uint32_t mmu_get_domain_ctrl(void);
+static inline void mmu_set_domain_ctrl(uint32_t val);
+static inline uint32_t mmu_get_fault_stat(void);
+static inline void mmu_set_fault_stat(uint32_t val);
+static inline uint32_t mmu_get_fault_addr(void);
+static inline void mmu_set_fault_addr(uint32_t val);
+static inline void mmu_set_cache_inval(void);
+static inline void mmu_set_tlb_inval(void);
+static inline uint32_t mmu_get_proc_id(void);
+static inline void mmu_set_proc_id(uint32_t val);
+static void mmu_set_map_inval(mmu_lvl1_t *base);
+
+#define MMU_CTRL_MMU_EN (1 << 0)
+#define MMU_CTRL_ALIGN_FAULT_EN (1 << 1)
+#define MMU_CTRL_D_CACHE_EN (1 << 2)
+#define MMU_CTRL_DEFAULT (0xf << 3)
+#define MMU_CTRL_LITTLE_ENDIAN (0 << 7)
+#define MMU_CTRL_BIG_ENDIAN (1 << 7)
+#define MMU_CTRL_SYS_PROT (1 << 8)
+#define MMU_CTRL_ROM_PROT (1 << 9)
+#define MMU_CTRL_I_CACHE_EN (1 << 12)
+#define MMU_CTRL_LOW_VECT (0 << 13)
+#define MMU_CTRL_HIGH_VECT (1 << 13)
+
+
+#define MMU_SET_LVL1_SECT(addr, ap, dom, ce, be) \
+ (((addr) & 0xfff00000) | \
+ (ap) | \
+ (dom) | \
+ ((ce) << 3) | \
+ ((be) << 2) | \
+ 0x12)
+
+#define MMU_SET_LVL1_INVAL (0x0)
+
+#define MMU_SECT_AP_ALL (0x3 << 10)
+
+#define NOP ( { asm volatile ("nop\n" ); } )
+
+void mmu_init(mmu_sect_map_t *map)
+{
+ mmu_lvl1_t *lvl1_base;
+ int i;
+
+ /* flush the cache and TLB */
+ mmu_set_cache_inval();
+ mmu_set_tlb_inval();
+
+ /* set manage mode access for all domains */
+ mmu_set_domain_ctrl(0xffffffff);
+
+ lvl1_base = (mmu_lvl1_t *)&_ttbl_base;
+
+ /* set up the trans table */
+ mmu_set_map_inval(lvl1_base);
+ mmu_set_trans_tbl((uint32_t) lvl1_base);
+
+ /* create a 1:1 mapping of the entire address space */
+ i = 0;
+ while(map[i].size != 0) {
+ int c;
+ int b;
+ int pbase;
+ int vbase;
+ int sects;
+
+ switch (map[i].cache_flags) {
+ case MMU_CACHE_NONE:
+ c = 0;
+ b = 0;
+ break;
+ case MMU_CACHE_BUFFERED:
+ c = 0;
+ b = 1;
+ break;
+ case MMU_CACHE_WTHROUGH:
+ c = 1;
+ b = 0;
+ break;
+ case MMU_CACHE_WBACK:
+ c = 1;
+ b = 1;
+ break;
+ }
+
+ pbase = (map[i].paddr & 0xfff00000) >> 20;
+ vbase = (map[i].vaddr & 0xfff00000) >> 20;
+ sects = map[i].size;
+
+ while (sects > 0) {
+ lvl1_base[vbase] = MMU_SET_LVL1_SECT(pbase << 20,
+ MMU_SECT_AP_ALL,
+ 0,
+ c,
+ b);
+ pbase++;
+ vbase++;
+ sects--;
+ }
+ i++;
+ }
+
+ /* flush the cache and TLB */
+ mmu_set_cache_inval();
+ mmu_set_tlb_inval();
+
+ NOP;
+ NOP;
+
+ /* I & D caches turned on */
+ mmu_set_ctrl(MMU_CTRL_DEFAULT |
+ MMU_CTRL_D_CACHE_EN |
+ MMU_CTRL_I_CACHE_EN |
+ MMU_CTRL_ALIGN_FAULT_EN |
+ MMU_CTRL_LITTLE_ENDIAN |
+ MMU_CTRL_MMU_EN);
+
+ NOP;
+ NOP;
+
+ return;
+}
+
+
+static inline uint32_t mmu_get_id(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr0, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline uint32_t mmu_get_ctrl(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr1, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline void mmu_set_ctrl(uint32_t val)
+{
+ asm volatile ("mcr 15, 0, %0, cr1, cr0, 0\n" : :"r" (val));
+}
+
+static inline uint32_t mmu_get_trans_tbl(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr2, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline void mmu_set_trans_tbl(uint32_t val)
+{
+ asm volatile ("mcr 15, 0, %0, cr2, cr0, 0\n" : :"r" (val));
+}
+
+static inline uint32_t mmu_get_domain_ctrl(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr3, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline void mmu_set_domain_ctrl(uint32_t val)
+{
+ asm volatile ("mcr 15, 0, %0, cr3, cr0, 0\n" : :"r" (val));
+}
+
+static inline uint32_t mmu_get_fault_stat(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr5, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline void mmu_set_fault_stat(uint32_t val)
+{
+ asm volatile ("mcr 15, 0, %0, cr5, cr0, 0\n" : :"r" (val));
+}
+
+static inline uint32_t mmu_get_fault_addr(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr6, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline void mmu_set_fault_addr(uint32_t val)
+{
+ asm volatile ("mcr 15, 0, %0, cr6, cr0, 0\n" : :"r" (val));
+}
+
+static inline void mmu_set_cache_inval(void)
+{
+ uint32_t val = 0;
+ asm volatile ("mcr 15, 0, %0, cr7, cr7, 0\n" : :"r" (val));
+}
+
+static inline void mmu_set_tlb_inval(void)
+{
+ uint32_t val = 0;
+ asm volatile ("mcr 15, 0, %0, cr8, cr7, 0\n" : :"r" (val));
+}
+
+static inline uint32_t mmu_get_proc_id(void)
+{
+ uint32_t val;
+ asm volatile ("msr 15, 0, %0, cr13, cr0\n" : "=r" (val));
+ return val;
+}
+
+static inline void mmu_set_proc_id(uint32_t val)
+{
+ asm volatile ("mcr 15, 0, %0, cr13, cr0, 0\n" : :"r" (val));
+}
+
+/* set all the level 1 entrys to be invalid descriptors */
+static void mmu_set_map_inval(mmu_lvl1_t *base)
+{
+ int i;
+ for (i = 0; i < (0x4000 / 4); i++) {
+ base[i] = MMU_SET_LVL1_INVAL;
+ }
+}