summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu/arm/shared/arm920/mmu.c
blob: 752314723de948a7f442fe243a72afc46063fc16 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
/*
 *  ARM920 MMU functions
 *
 *  Copyright (c) 2004 by Cogent Computer Systems
 *  Written by Jay Monkman <jtm@lopingdog.com>
 *
 *  $Id$
 */
#include <libcpu/mmu.h>

typedef uint32_t mmu_lvl1_t;

extern uint32_t _ttbl_base;

static inline uint32_t mmu_get_id(void);
static inline uint32_t mmu_get_ctrl(void);
static inline void mmu_set_ctrl(uint32_t val);
static inline uint32_t mmu_get_trans_tbl(void);
static inline void mmu_set_trans_tbl(uint32_t val);
static inline uint32_t mmu_get_domain_ctrl(void);
static inline void mmu_set_domain_ctrl(uint32_t val);
static inline uint32_t mmu_get_fault_stat(void);
static inline void mmu_set_fault_stat(uint32_t val);
static inline uint32_t mmu_get_fault_addr(void);
static inline void mmu_set_fault_addr(uint32_t val);
static inline void mmu_set_cache_inval(void);
static inline void mmu_set_tlb_inval(void);
static inline uint32_t mmu_get_proc_id(void);
static inline void mmu_set_proc_id(uint32_t val);
static void mmu_set_map_inval(mmu_lvl1_t *base);

#define MMU_CTRL_MMU_EN             (1 << 0)
#define MMU_CTRL_ALIGN_FAULT_EN     (1 << 1)
#define MMU_CTRL_D_CACHE_EN         (1 << 2)
#define MMU_CTRL_DEFAULT            (0xf << 3)
#define MMU_CTRL_LITTLE_ENDIAN      (0 << 7)
#define MMU_CTRL_BIG_ENDIAN         (1 << 7)
#define MMU_CTRL_SYS_PROT           (1 << 8)
#define MMU_CTRL_ROM_PROT           (1 << 9)
#define MMU_CTRL_I_CACHE_EN         (1 << 12)
#define MMU_CTRL_LOW_VECT           (0 << 13)
#define MMU_CTRL_HIGH_VECT          (1 << 13)


#define MMU_SET_LVL1_SECT(addr, ap, dom, ce, be) \
          (((addr) & 0xfff00000) |     \
           (ap)                  |     \
           (dom)                 |     \
           ((ce) << 3)           |     \
           ((be) << 2)           |     \
           0x12)

#define MMU_SET_LVL1_INVAL (0x0)

#define MMU_SECT_AP_ALL (0x3 << 10)

#define NOP ( { asm volatile ("nop\n" ); } )

void mmu_init(mmu_sect_map_t *map)
{
    mmu_lvl1_t *lvl1_base;
    int i;

    /* flush the cache and TLB */
    mmu_set_cache_inval();
    mmu_set_tlb_inval();

    /* set manage mode access for all domains */
    mmu_set_domain_ctrl(0xffffffff);

    lvl1_base = (mmu_lvl1_t *)&_ttbl_base;

    /* set up the trans table */
    mmu_set_map_inval(lvl1_base);
    mmu_set_trans_tbl((uint32_t) lvl1_base);

    /* create a 1:1 mapping of the entire address space */
    i = 0;
    while(map[i].size != 0) {
        int c = 0;  /* to avoid uninitialized warnings */
        int b = 0;  /* to avoid uninitialized warnings */
        int pbase;
        int vbase;
        int sects;

        switch (map[i].cache_flags) {
        case MMU_CACHE_NONE:
            c = 0;
            b = 0;
            break;
        case MMU_CACHE_BUFFERED:
            c = 0;
            b = 1;
            break;
        case MMU_CACHE_WTHROUGH:
            c = 1;
            b = 0;
            break;
        case MMU_CACHE_WBACK:
            c = 1;
            b = 1;
            break;
        }

        pbase = (map[i].paddr & 0xfff00000) >> 20;
        vbase = (map[i].vaddr & 0xfff00000) >> 20;
        sects = map[i].size;

        while (sects > 0) {
            lvl1_base[vbase] = MMU_SET_LVL1_SECT(pbase << 20,
                                                 MMU_SECT_AP_ALL,
                                                 0,
                                                 c,
                                                 b);
            pbase++;
            vbase++;
            sects--;
        }
        i++;
    }

    /* flush the cache and TLB */
    mmu_set_cache_inval();
    mmu_set_tlb_inval();

    NOP;
    NOP;

    /*  I & D caches turned on */
    mmu_set_ctrl(MMU_CTRL_DEFAULT |
                 MMU_CTRL_D_CACHE_EN |
                 MMU_CTRL_I_CACHE_EN |
                 MMU_CTRL_ALIGN_FAULT_EN |
                 MMU_CTRL_LITTLE_ENDIAN |
                 MMU_CTRL_MMU_EN);

    NOP;
    NOP;

    return;
}


static inline uint32_t mmu_get_id(void)
{
    uint32_t val;
    asm volatile ("msr 15, 0, %0, cr0, cr0\n" : "=r" (val));
    return val;
}

static inline uint32_t mmu_get_ctrl(void)
{
    uint32_t val;
    asm volatile ("mrc 15, 0, %0, cr1, cr0\n" : "=r" (val));
    return val;
}

static inline void mmu_set_ctrl(uint32_t val)
{
    asm volatile ("mcr 15, 0, %0, cr1, cr0, 0\n" : :"r" (val));
}

static inline uint32_t mmu_get_trans_tbl(void)
{
    uint32_t val;
    asm volatile ("msr 15, 0, %0, cr2, cr0\n" : "=r" (val));
    return val;
}

static inline void mmu_set_trans_tbl(uint32_t val)
{
    asm volatile ("mcr 15, 0, %0, cr2, cr0, 0\n" : :"r" (val));
}

static inline uint32_t mmu_get_domain_ctrl(void)
{
    uint32_t val;
    asm volatile ("msr 15, 0, %0, cr3, cr0\n" : "=r" (val));
    return val;
}

static inline void mmu_set_domain_ctrl(uint32_t val)
{
    asm volatile ("mcr 15, 0, %0, cr3, cr0, 0\n" : :"r" (val));
}

static inline uint32_t mmu_get_fault_stat(void)
{
    uint32_t val;
    asm volatile ("msr 15, 0, %0, cr5, cr0\n" : "=r" (val));
    return val;
}

static inline void mmu_set_fault_stat(uint32_t val)
{
    asm volatile ("mcr 15, 0, %0, cr5, cr0, 0\n" : :"r" (val));
}

static inline uint32_t mmu_get_fault_addr(void)
{
    uint32_t val;
    asm volatile ("msr 15, 0, %0, cr6, cr0\n" : "=r" (val));
    return val;
}

static inline void mmu_set_fault_addr(uint32_t val)
{
    asm volatile ("mcr 15, 0, %0, cr6, cr0, 0\n" : :"r" (val));
}

static inline void mmu_set_cache_inval(void)
{
    uint32_t val = 0;
    asm volatile ("mcr 15, 0, %0, cr7, cr7, 0\n" : :"r" (val));
}

static inline void mmu_set_tlb_inval(void)
{
    uint32_t val = 0;
    asm volatile ("mcr 15, 0, %0, cr8, cr7, 0\n" : :"r" (val));
}

static inline uint32_t mmu_get_proc_id(void)
{
    uint32_t val;
    asm volatile ("msr 15, 0, %0, cr13, cr0\n" : "=r" (val));
    return val;
}

static inline void mmu_set_proc_id(uint32_t val)
{
    asm volatile ("mcr 15, 0, %0, cr13, cr0, 0\n" : :"r" (val));
}

/* set all the level 1 entrys to be invalid descriptors */
static void mmu_set_map_inval(mmu_lvl1_t *base)
{
    int i;
    for (i = 0; i < (0x4000 / 4); i++) {
        base[i] = MMU_SET_LVL1_INVAL;
    }
}


void mmu_set_cpu_async_mode(void)
{
    uint32_t reg;
    reg = mmu_get_ctrl();
    reg |= 0xc0000000;
    mmu_set_ctrl(reg);
}