summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu/powerpc/mpc6xx
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2002-05-14 16:56:44 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2002-05-14 16:56:44 +0000
commit0d776cd24743625f2888d04d72188b2e3f416a3e (patch)
tree3f146379e2bd7bdd9f4ee85485fb5039fc036b71 /c/src/lib/libcpu/powerpc/mpc6xx
parent2001-05-14 Till Straumann <strauman@slac.stanford.edu> (diff)
downloadrtems-0d776cd24743625f2888d04d72188b2e3f416a3e.tar.bz2
2001-05-14 Till Straumann <strauman@slac.stanford.edu>
* rtems/powerpc/registers.h, rtems/score/ppc.h: Per PR213, add the following: - support for the MPC74000 (AKA G4); there is no AltiVec support yet, however. - the cache flushing assembly code uses hardware-flush on the G4. Also, a couple of hardcoded numerical values were replaced by more readable symbolic constants. - extended interrupt-disabled code section so enclose the entire cache flush/invalidate procedure (as recommended by the book). This is not (latency) critical as it is only used by init code but prevents possible corruption. - Trivial page table support as been added. (1:1 effective-virtual-physical address mapping which is only useful only on CPUs which feature hardware TLB replacement, e.g. >604. This allows for write-protecting memory regions, e.g. text/ro-data which makes catching corruptors a lot easier. It also frees one DBAT/IBAT and gives more flexibility for setting up address maps :-) - setdbat() allows changing BAT0 also (since the BSP may use a page table, BAT0 could be available...). - asm_setdbatX() violated the SVR ABI by using r20 as a scratch register; changed for r0 - according to the book, a context synchronizing instruction is necessary prior to and after changing a DBAT -> isync added
Diffstat (limited to 'c/src/lib/libcpu/powerpc/mpc6xx')
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c1
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.am6
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c1
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S200
4 files changed, 153 insertions, 55 deletions
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
index 7c6730a81b..d964cbbe59 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
@@ -113,6 +113,7 @@ int mpc604_vector_is_valid(rtems_vector vector)
int mpc60x_vector_is_valid(rtems_vector vector)
{
switch (current_ppc_cpu) {
+ case PPC_7400:
case PPC_750:
if (!mpc750_vector_is_valid(vector)) {
return 0;
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.am b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.am
index c55d3cdd81..fa40244040 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.am
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/Makefile.am
@@ -5,13 +5,13 @@
PGM = $(ARCH)/mmu.rel
-C_FILES = bat.c
+C_FILES = bat.c pte121.c
S_FILES = mmuAsm.S
include_libcpudir = $(includedir)/libcpu
-include_libcpu_HEADERS = bat.h
+include_libcpu_HEADERS = bat.h pte121.h
mmu_rel_OBJECTS = $(C_FILES:%.c=$(ARCH)/%.o) $(S_FILES:%.S=$(ARCH)/%.o)
@@ -38,6 +38,6 @@ all-local: $(ARCH) $(PREINSTALL_FILES) $(mmu_rel_OBJECTS) $(PGM)
.PRECIOUS: $(PGM)
-EXTRA_DIST = bat.c bat.h mmuAsm.S
+EXTRA_DIST = bat.c bat.h mmuAsm.S pte121.c pte121.h
include $(top_srcdir)/../../../../../automake/local.am
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
index e39ab96ec5..8f54024bc5 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
@@ -55,6 +55,7 @@ void setdbat(int bat_index, unsigned long virt, unsigned long phys,
bat_addrs[bat_index].limit = virt + ((bl + 1) << 17) - 1;
bat_addrs[bat_index].phys = phys;
switch (bat_index) {
+ case 0 : asm_setdbat1(bat.word[0], bat.word[1]); break;
case 1 : asm_setdbat1(bat.word[0], bat.word[1]); break;
case 2 : asm_setdbat2(bat.word[0], bat.word[1]); break;
case 3 : asm_setdbat3(bat.word[0], bat.word[1]); break;
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
index 470bf45ae7..cbeb40be32 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
@@ -12,55 +12,115 @@
* found in found in the file LICENSE in this distribution or at
* http://www.OARcorp.com/rtems/license.html.
*
+ * T. Straumann - 11/2001: added support for 7400 (no AltiVec yet)
*/
#include <asm.h>
#include <rtems/score/cpu.h>
#include <libcpu/io.h>
+/* Unfortunately, the CPU types defined in cpu.h are
+ * an 'enum' type and hence not available :-(
+ */
+#define PPC_601 0x1
+#define PPC_603 0x3
+#define PPC_604 0x4
+#define PPC_603e 0x6
+#define PPC_603ev 0x7
+#define PPC_750 0x8
+#define PPC_604e 0x9
+#define PPC_604r 0xA
+#define PPC_7400 0xC
+#define PPC_620 0x16
+#define PPC_860 0x50
+#define PPC_821 PPC_860
+#define PPC_8260 0x81
+
+/* ALTIVEC instructions (not recognized by off-the shelf gcc yet) */
+#define DSSALL .long 0x7e00066c /* DSSALL altivec instruction opcode */
+
+/* A couple of defines to make the code more readable */
+#define CACHE_LINE_SIZE 32
+
+#ifndef MSSCR0
+#define MSSCR0 1014
+#else
+#warning MSSCR0 seems to be known, update __FILE__
+#endif
+
+#define DL1HWF (1<<(31-8))
+#define L2HWF (1<<(31-20))
+
+
+
/*
* Each setdbat routine start by invalidating the DBAT as some
* proc (604e) request the valid bit set to 0 before accepting
* to write in BAT
*/
+ .globl asm_setdbat0
+ .type asm_setdbat0,@function
+asm_setdbat0:
+ li r0,0
+ sync
+ isync
+ mtspr DBAT0U,r0
+ mtspr DBAT0L,r0
+ sync
+ isync
+ mtspr DBAT0L, r4
+ mtspr DBAT0U, r3
+ sync
+ isync
+ blr
+
.globl asm_setdbat1
.type asm_setdbat1,@function
asm_setdbat1:
- li r20,0
- SYNC
- mtspr DBAT1U,r20
- mtspr DBAT1L,r20
- SYNC
+ li r0,0
+ sync
+ isync
+ mtspr DBAT1U,r0
+ mtspr DBAT1L,r0
+ sync
+ isync
mtspr DBAT1L, r4
mtspr DBAT1U, r3
- SYNC
+ sync
+ isync
blr
.globl asm_setdbat2
.type asm_setdbat2,@function
asm_setdbat2:
- li r20,0
- SYNC
- mtspr DBAT2U,r20
- mtspr DBAT2L,r20
- SYNC
+ li r0,0
+ sync
+ isync
+ mtspr DBAT2U,r0
+ mtspr DBAT2L,r0
+ sync
+ isync
mtspr DBAT2L, r4
mtspr DBAT2U, r3
- SYNC
+ sync
+ isync
blr
.globl asm_setdbat3
.type asm_setdbat3,@function
asm_setdbat3:
- li r20,0
- SYNC
- mtspr DBAT3U,r20
- mtspr DBAT3L,r20
- SYNC
+ li r0,0
+ sync
+ isync
+ mtspr DBAT3U,r0
+ mtspr DBAT3L,r0
+ sync
+ isync
mtspr DBAT3L, r4
mtspr DBAT3U, r3
- SYNC
+ sync
+ isync
blr
.globl L1_caches_enables
@@ -72,7 +132,7 @@ L1_caches_enables:
*/
mfspr r9,PVR
rlwinm r9,r9,16,16,31
- cmpi 0,r9,1
+ cmpi 0,r9,PPC_601
beq 4f /* not needed for 601 */
mfspr r11,HID0
andi. r0,r11,HID0_DCE
@@ -87,12 +147,19 @@ L1_caches_enables:
mtspr HID0,r11 /* enable caches */
sync
isync
- cmpi 0,r9,4 /* check for 604 */
- cmpi 1,r9,9 /* or 604e */
- cmpi 2,r9,10 /* or mach5 */
+ cmpi 0,r9,PPC_604 /* check for 604 */
+ cmpi 1,r9,PPC_604e /* or 604e */
+ cmpi 2,r9,PPC_604r /* or mach5 */
cror 2,2,6
cror 2,2,10
+ cmpi 1,r9,PPC_750 /* or 750 */
+ cror 2,2,6
+ cmpi 1,r9,PPC_7400 /* or 7400 */
+ bne 3f
+ ori r11,r11,HID0_BTIC /* enable branch tgt cache on 7400 */
+3: cror 2,2,6
bne 4f
+ /* on 7400 SIED is actually SGE (store gathering enable) */
ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */
bne 2,5f
ori r11,r11,HID0_BTCD
@@ -103,13 +170,17 @@ L1_caches_enables:
.globl get_L2CR
.type get_L2CR, @function
get_L2CR:
- /* Make sure this is a 750 chip */
+ /* Make sure this is a > 750 chip */
mfspr r3,PVR
rlwinm r3,r3,16,16,31
- cmplwi r3,0x0008
+ cmplwi r3,PPC_750 /* it's a 750 */
+ beq 1f
+ cmplwi r3,PPC_7400 /* it's a 7400 */
+ beq 1f
li r3,0
- bnelr
+ blr
+1:
/* Return the L2CR contents */
mfspr r3,L2CR
blr
@@ -146,10 +217,12 @@ set_L2CR:
*the L2 cache instead of to main memory.
*/
- /* Make sure this is a 750 chip */
- mfspr r4,PVR
- rlwinm r4,r4,16,16,31
- cmplwi r4,0x0008
+ /* Make sure this is a > 750 chip */
+ mfspr r0,PVR
+ rlwinm r0,r0,16,16,31
+ cmplwi r0,PPC_750
+ beq thisIs750
+ cmplwi r0,PPC_7400
beq thisIs750
li r3,-1
blr
@@ -161,53 +234,74 @@ thisIs750:
/* See if we want to perform a global inval this time. */
rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
- rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
+ rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
- or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
- bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
+ or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
+ mfmsr r7 /* shut off interrupts around critical flush/invalidate sections */
+ rlwinm r4,r7,0,17,15 /* Turn off EE bit - an external exception while we are flushing
+ the cache is fatal (comment this line and see!) */
+ mtmsr r4
+ bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
+
+ cmplwi r0,PPC_7400 /* > 7400 ? */
+ bne disableCache /* use traditional method */
+
+ /* On the 7400, they recommend using the hardware flush feature */
+ DSSALL /* stop all data streams */
+ sync
+ /* we wouldn't have to flush L1, but for sake of consistency with the other code we do it anyway */
+ mfspr r4, MSSCR0
+ oris r4, r4, DL1HWF@h
+ mtspr MSSCR0, r4
+ sync
+ /* L1 flushed */
+ mfspr r4, L2CR
+ ori r4, r4, L2HWF
+ mtspr L2CR, r4
+ sync
+ /* L2 flushed */
+ b flushDone
disableCache:
/* Disable the cache. First, we turn off data relocation. */
- mfmsr r7
- rlwinm r4,r7,0,28,26 /* Turn off DR bit */
- rlwinm r4,r4,0,17,15 /* Turn off EE bit - an external exception while we are flushing
- the cache is fatal (comment this line and see!) */
- sync
+ rlwinm r4,r4,0,28,26 /* Turn off DR bit */
mtmsr r4
- sync
+ isync /* make sure memory accesses have completed */
/*
Now, read the first 2MB of memory to put new data in the cache.
(Actually we only need the size of the L2 cache plus
the size of the L1 cache, but 2MB will cover everything just to be safe).
*/
- lis r4,0x0001
+ lis r4,0x0001
mtctr r4
- li r4,0
+ li r4,0
loadLoop:
lwzx r0,r0,r4
- addi r4,r4,0x0020 /* Go to start of next cache line */
+ addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
bdnz loadLoop
/* Now, flush the first 2MB of memory */
- lis r4,0x0001
+ lis r4,0x0001
mtctr r4
- li r4,0
+ li r4,0
sync
flushLoop:
dcbf r0,r4
- addi r4,r4,0x0020 /* Go to start of next cache line */
+ addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
bdnz flushLoop
+ sync
+
+ rlwinm r4,r7,0,17,15 /* still mask EE but reenable data relocation */
+ mtmsr r4
+ isync
+
+flushDone:
/* Turn off the L2CR enable bit. */
rlwinm r3,r3,0,1,31
- /* Reenable data relocation. */
- sync
- mtmsr r7
- sync
-
dontDisableCache:
/* Set up the L2CR configuration bits */
sync
@@ -219,10 +313,10 @@ dontDisableCache:
/* Perform a global invalidation */
oris r3,r3,0x0020
sync
- mtspr 1017,r3
+ mtspr L2CR,r3
sync
-invalCompleteLoop: /* Wait for the invalidation to complete */
- mfspr r3,1017
+invalCompleteLoop: /* Wait for the invalidation to complete */
+ mfspr r3,L2CR
rlwinm. r4,r3,0,31,31
bne invalCompleteLoop
@@ -232,6 +326,8 @@ invalCompleteLoop: /* Wait for the invalidation to complete */
sync
noInval:
+ /* re-enable interrupts, i.e. restore original MSR */
+ mtmsr r7 /* (no sync needed) */
/* See if we need to enable the cache */
cmplwi r5,0
beqlr