summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu
diff options
context:
space:
mode:
authorTill Straumann <strauman@slac.stanford.edu>2005-11-03 01:54:59 +0000
committerTill Straumann <strauman@slac.stanford.edu>2005-11-03 01:54:59 +0000
commit912ab10e29e318802da485772252d8d9962b5363 (patch)
tree8cc907627297e181ce249b5ab6f454ec03059de2 /c/src/lib/libcpu
parent2005-11-02 straumanatslacdotstanford.edu (diff)
downloadrtems-912ab10e29e318802da485772252d8d9962b5363.tar.bz2
2005-11-02 straumanatslacdotstanford.edu
* mpc6xx/mmu/bat.c, mpc6xx/mmu/bat.h, mpc6xx/mmu/mmuAsm.S: moved assembly code to C; setdbat now supports high bats on 7450 CPUs; added argument checking to setdbat; added getdbat; moved early initialization code (clear_bats) from BSP to libcpu (CPU_clear_bats_early)
Diffstat (limited to 'c/src/lib/libcpu')
-rw-r--r--c/src/lib/libcpu/powerpc/ChangeLog8
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c473
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h41
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S157
4 files changed, 575 insertions, 104 deletions
diff --git a/c/src/lib/libcpu/powerpc/ChangeLog b/c/src/lib/libcpu/powerpc/ChangeLog
index 28487a72d8..134d9b4fe3 100644
--- a/c/src/lib/libcpu/powerpc/ChangeLog
+++ b/c/src/lib/libcpu/powerpc/ChangeLog
@@ -1,3 +1,11 @@
+2005-11-02 straumanatslacdotstanford.edu
+
+ * mpc6xx/mmu/bat.c, mpc6xx/mmu/bat.h, mpc6xx/mmu/mmuAsm.S: moved
+ assembly code to C; setdbat now supports high bats on 7450 CPUs;
+ added argument checking to setdbat; added getdbat; moved early
+ initialization code (clear_bats) from BSP to libcpu
+ (CPU_clear_bats_early)
+
2005-11-02 straumanatslacdotstanford.edu
* configure.ac, mpc6xx/exceptions/raw_exception.c,
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
index e706256535..42071a7642 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.c
@@ -19,52 +19,469 @@
*
* $Id$
*/
-
+#include <rtems.h>
#include <libcpu/bat.h>
+#include <libcpu/spr.h>
+#include <rtems/bspIo.h>
+
+#include <libcpu/cpuIdent.h>
+
+typedef union
+{ /* BAT register values to be loaded */
+ BAT bat;
+ struct
+ {
+ unsigned int u, l;
+ } words;
+} ubat;
+
+typedef struct batrange
+{ /* stores address ranges mapped by BATs */
+ unsigned long start;
+ unsigned long limit;
+ unsigned long phys;
+} batrange;
+
+batrange bat_addrs[8] = { {0,} };
+
+/* could encode this in bat_addrs but I don't touch that one for bwds compat. reasons */
+/* bitmask of used bats */
+static unsigned bat_in_use = 0;
+
+/* define a few macros */
+
+#define CLRBAT_ASM(batu,r) \
+ " sync \n" \
+ " isync \n" \
+ " li "#r", 0 \n" \
+ " mtspr "#batu", "#r"\n" \
+ " sync \n" \
+ " isync \n"
+
+#define SETBAT_ASM(batu, batl, u, l)\
+ " mtspr "#batl", "#l" \n" \
+ " sync \n" \
+ " isync \n" \
+ " mtspr "#batu", "#u" \n" \
+ " sync \n" \
+ " isync \n"
+
+#define CLRBAT(bat) \
+ asm volatile( \
+ CLRBAT_ASM(%0, 0) \
+ : \
+ :"i"(bat##U) \
+ :"0")
+
+#define GETBAT(bat,u,l) \
+ asm volatile( \
+ " mfspr %0, %2 \n" \
+ " mfspr %1, %3 \n" \
+ :"=r"(u),"=r"(l) \
+ :"i"(bat##U),"i"(bat##L) \
+ )
+
+#define DECL_SETBAT(lcbat,bat) \
+void \
+asm_set##lcbat(unsigned int upper, unsigned int lower) \
+{ \
+asm volatile( \
+ CLRBAT_ASM(%0,0) \
+ SETBAT_ASM(%0,%1,%2,%3) \
+ : \
+ :"i"(bat##U), \
+ "i"(bat##L), \
+ "r"(upper),"r"(lower) \
+ :"0"); \
+}
+
+/* export the 'asm' versions for historic reasons */
+DECL_SETBAT (dbat0, DBAT0)
+DECL_SETBAT (dbat1, DBAT1)
+DECL_SETBAT (dbat2, DBAT2)
+DECL_SETBAT (dbat3, DBAT3)
+
+static DECL_SETBAT (dbat4, DBAT4)
+static DECL_SETBAT (dbat5, DBAT5)
+static DECL_SETBAT (dbat6, DBAT6)
+static DECL_SETBAT (dbat7, DBAT7)
+
+SPR_RO (HID0);
+
+static void
+set_hid0_sync (unsigned long val)
+{
+ asm volatile (
+ " sync \n"
+ " isync \n"
+ " mtspr %0, %1 \n"
+ " sync \n"
+ " isync \n"
+ :
+ :"i" (HID0), "r" (val)
+ );
+}
+
+static void
+bat_addrs_put (ubat * bat, int idx)
+{
+ unsigned long bl;
+ if (bat->bat.batu.vp || bat->bat.batu.vs) {
+ bat_addrs[idx].start = bat->bat.batu.bepi << 17;
+ bat_addrs[idx].phys = bat->bat.batl.brpn << 17;
+
+ /* extended BL cannot be extracted using BAT union
+ * - let's just hope the upper bits read 0 on pre 745x
+ * CPUs.
+ */
+ bl = (bat->words.u << 15) | ((1 << 17) - 1);
+ bat_addrs[idx].limit = bat_addrs[idx].start + bl;
-typedef union { /* BAT register values to be loaded */
- BAT bat;
- unsigned int word[2];
-}ubat;
+ bat_in_use |= (1 << idx);
+ }
+}
+
+/* We don't know how the board was initialized. Therefore,
+ * when 'setdbat' is first used we must initialize our
+ * cache.
+ */
+static void
+bat_addrs_init ()
+{
+ ppc_cpu_id_t cpu = get_ppc_cpu_type ();
+
+ ubat bat;
+
+ GETBAT (DBAT0, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 0);
+ GETBAT (DBAT1, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 1);
+ GETBAT (DBAT2, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 2);
+ GETBAT (DBAT3, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 3);
+
+ if ((cpu == PPC_7455 || cpu == PPC_7457)
+ && (HID0_7455_HIGH_BAT_EN & _read_HID0 ())) {
+ GETBAT (DBAT4, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 4);
+ GETBAT (DBAT5, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 5);
+ GETBAT (DBAT6, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 6);
+ GETBAT (DBAT7, bat.words.u, bat.words.l);
+ bat_addrs_put (&bat, 7);
+ }
+}
-typedef struct batrange { /* stores address ranges mapped by BATs */
- unsigned long start;
- unsigned long limit;
- unsigned long phys;
-}batrange;
+static void
+do_dssall ()
+{
+ /* Before changing BATs, 'dssall' must be issued.
+ * We check MSR for MSR_VE and issue a 'dssall' if
+ * MSR_VE is set hoping that
+ * a) on non-altivec CPUs MSR_VE reads as zero
+ * b) all altivec CPUs use the same bit
+ */
+ if (_read_MSR () & MSR_VE) {
+ /* this construct is needed because we don't know
+ * if this file is compiled with -maltivec.
+ * (I plan to add altivec support outside of
+ * RTEMS core and hence I'd rather not
+ * rely on consistent compiler flags).
+ */
+#define DSSALL 0x7e00066c /* dssall opcode */
+ asm volatile (" .long %0"::"i" (DSSALL));
+#undef DSSALL
+ }
+}
-batrange bat_addrs[4];
+/* Clear I/D bats 4..7 ONLY ON 7455 etc. */
+static void
+clear_hi_bats ()
+{
+ do_dssall ();
+ CLRBAT (DBAT4);
+ CLRBAT (DBAT5);
+ CLRBAT (DBAT6);
+ CLRBAT (DBAT7);
+ CLRBAT (IBAT4);
+ CLRBAT (IBAT5);
+ CLRBAT (IBAT6);
+ CLRBAT (IBAT7);
+}
-void asm_setdbat0(unsigned int, unsigned int);
-void setdbat(int bat_index, unsigned long virt, unsigned long phys,
- unsigned int size, int flags)
+static int
+check_bat_index (int i)
+{
+ unsigned long hid0;
+
+ if (i >= 0 && i < 4)
+ return 0;
+ if (i >= 4 && i < 8) {
+ /* don't use current_ppc_cpu because we don't know if it has been set already */
+ ppc_cpu_id_t cpu = get_ppc_cpu_type ();
+ if (cpu != PPC_7455 && cpu != PPC_7457)
+ return -1;
+ /* OK, we're on the right hardware;
+ * check if we are already enabled
+ */
+ hid0 = _read_HID0 ();
+ if (HID0_7455_HIGH_BAT_EN & hid0)
+ return 0;
+ /* No; enable now */
+ clear_hi_bats ();
+ set_hid0_sync (hid0 | HID0_7455_HIGH_BAT_EN);
+ return 0;
+ }
+ return -1;
+}
+
+/* size argument check:
+ * - must be a power of two or zero
+ * - must be <= 1<<28 ( non 745x cpu )
+ * - can be 1<<29..1<31 or 0xffffffff on 745x
+ * - size < 1<<17 means 0
+ * computes and returns the block mask
+ * RETURNS:
+ * block mask on success or -1 on error
+ */
+static int
+check_bat_size (unsigned long size)
{
+ unsigned long bit;
+ unsigned long hid0;
+
+ /* First of all, it must be a power of two */
+ if (0 == size)
+ return 0;
+
+ if (0xffffffff == size) {
+ bit = 32;
+ } else {
+ asm volatile (" cntlzw %0, %1":"=r" (bit):"r" (size));
+ bit = 31 - bit;
+ if (1 << bit != size)
+ return -1;
+ }
+ /* bit < 17 is not really legal but we aliased it to 0 in the past */
+ if (bit > (11 + 17)) {
+ /* don't use current_ppc_cpu because we don't know if it has been set already */
+ ppc_cpu_id_t cpu = get_ppc_cpu_type ();
+ if (cpu != PPC_7455 && cpu != PPC_7457)
+ return -1;
+
+ hid0 = _read_HID0 ();
+ /* Let's enable the larger block size if necessary */
+ if (!(HID0_7455_XBSEN & hid0))
+ set_hid0_sync (hid0 | HID0_7455_XBSEN);
+ }
+
+ return (1 << (bit - 17)) - 1;
+}
+
+static int
+check_overlap (unsigned long start, unsigned long size)
+{
+ int i;
+ unsigned long limit = start + size - 1;
+ for (i = 0; i < sizeof (bat_addrs) / sizeof (bat_addrs[0]); i++) {
+ if (!((1 << i) & bat_in_use))
+ continue; /* unused bat */
+ /* safe is 'limit < bat_addrs[i].start || start > bat_addrs[i].limit */
+ if (limit >= bat_addrs[i].start && start <= bat_addrs[i].limit)
+ return i;
+ }
+ return -1;
+}
+
+
+/* Take no risks -- the essential parts of this routine run with
+ * interrupts disabled!
+ */
+
+void
+setdbat (int bat_index, unsigned long virt, unsigned long phys,
+ unsigned int size, int flags)
+{
+ unsigned long level;
unsigned int bl;
+ int err;
int wimgxpp;
ubat bat;
- bl = (size >= (1<<17)) ? (size >> 17) - 1 : 0;
+ if (check_bat_index (bat_index)) {
+ printk ("Invalid BAT index\n", bat_index);
+ return;
+ }
+
+ if ((int) (bl = check_bat_size (size)) < 0) {
+ printk ("Invalid BAT size\n", size);
+ return;
+ }
+
+ if (virt & (size - 1)) {
+ printk ("BAT effective address 0x%08x misaligned (size is 0x%08x)\n",
+ virt, size);
+ return;
+ }
+
+ if (phys & (size - 1)) {
+ printk ("BAT physical address 0x%08x misaligned (size is 0x%08x)\n", phys,
+ size);
+ return;
+ }
+
+ if (virt + size - 1 < virt) {
+ printk ("BAT range invalid: wraps around zero 0x%08x..0x%08x\n", virt,
+ virt + size - 1);
+ return;
+ }
+
+/* must protect the bat_addrs table -- since this routine is only used for board setup
+ * or similar special purposes we don't bother about interrupt latency too much.
+ */
+ rtems_interrupt_disable (level);
+
+ { /* might have to initialize our cached data */
+ static char init_done = 0;
+ if (!init_done) {
+ bat_addrs_init ();
+ init_done = 1;
+ }
+ }
+
+ if (size >= (1 << 17) && (err = check_overlap (virt, size)) >= 0) {
+ rtems_interrupt_enable (level);
+ printk ("BATs must not overlap; area 0x%08x..0x%08x hits BAT %i\n",
+ virt, virt + size, err);
+ return;
+ }
+
/* 603, 604, etc. */
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
- | _PAGE_COHERENT | _PAGE_GUARDED);
- wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
- bat.word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
- bat.word[1] = phys | wimgxpp;
+ | _PAGE_COHERENT | _PAGE_GUARDED);
+ wimgxpp |= (flags & _PAGE_RW) ? BPP_RW : BPP_RX;
+ bat.words.u = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+ bat.words.l = phys | wimgxpp;
if (flags & _PAGE_USER)
bat.bat.batu.vp = 1;
bat_addrs[bat_index].start = virt;
- bat_addrs[bat_index].limit = virt + (bl ? ((bl + 1) << 17) - 1 : 0);
+ bat_addrs[bat_index].limit = virt + ((bl + 1) << 17) - 1;
bat_addrs[bat_index].phys = phys;
- if ( 0 == bl ) {
+ bat_in_use |= 1 << bat_index;
+ if (size < (1 << 17)) {
/* size of 0 tells us to switch it off */
- bat.bat.batu.vp = 0;
- bat.bat.batu.vs = 0;
+ bat.bat.batu.vp = 0;
+ bat.bat.batu.vs = 0;
+ bat_in_use &= ~(1 << bat_index);
+ /* mimic old behavior when bl was 0 (bs==0 is actually legal; it doesnt
+ * indicate a size of zero. We now accept bl==0 and look at the size.
+ */
+ bat_addrs[bat_index].limit = virt;
}
+ do_dssall ();
switch (bat_index) {
- case 0 : asm_setdbat0(bat.word[0], bat.word[1]); break;
- case 1 : asm_setdbat1(bat.word[0], bat.word[1]); break;
- case 2 : asm_setdbat2(bat.word[0], bat.word[1]); break;
- case 3 : asm_setdbat3(bat.word[0], bat.word[1]); break;
- default: printk("bat.c : invalid BAT bat_index\n");
+ case 0:
+ asm_setdbat0 (bat.words.u, bat.words.l);
+ break;
+ case 1:
+ asm_setdbat1 (bat.words.u, bat.words.l);
+ break;
+ case 2:
+ asm_setdbat2 (bat.words.u, bat.words.l);
+ break;
+ case 3:
+ asm_setdbat3 (bat.words.u, bat.words.l);
+ break;
+ /* cpu check already done in check_index */
+ case 4:
+ asm_setdbat4 (bat.words.u, bat.words.l);
+ break;
+ case 5:
+ asm_setdbat5 (bat.words.u, bat.words.l);
+ break;
+ case 6:
+ asm_setdbat6 (bat.words.u, bat.words.l);
+ break;
+ case 7:
+ asm_setdbat7 (bat.words.u, bat.words.l);
+ break;
+ default: /* should never get here anyways */
+ break;
+ }
+ rtems_interrupt_enable (level);
+}
+
+int
+getdbat (int idx, unsigned long *pu, unsigned long *pl)
+{
+ unsigned long u, l;
+
+ if (check_bat_index (idx)) {
+ printk ("Invalid BAT #%i\n", idx);
+ return -1;
+ }
+ switch (idx) {
+ case 0:
+ GETBAT (DBAT0, u, l);
+ break;
+ case 1:
+ GETBAT (DBAT1, u, l);
+ break;
+ case 2:
+ GETBAT (DBAT2, u, l);
+ break;
+ case 3:
+ GETBAT (DBAT3, u, l);
+ break;
+ /* cpu check already done in check_index */
+ case 4:
+ GETBAT (DBAT4, u, l);
+ break;
+ case 5:
+ GETBAT (DBAT5, u, l);
+ break;
+ case 6:
+ GETBAT (DBAT6, u, l);
+ break;
+ case 7:
+ GETBAT (DBAT7, u, l);
+ break;
+ default: /* should never get here anyways */
+ return -1;
+ }
+ if (pu) {
+ *pu = u;
+ }
+ if (pl) {
+ *pl = l;
+ }
+
+ if (!pu && !pl) {
+ /* dump */
+ ubat b;
+ b.words.u = u;
+ b.words.l = l;
+ printk ("Raw DBAT %i contents; UPPER: (0x%08x)", idx, u);
+ printk (" BEPI: 0x%08x", b.bat.batu.bepi);
+ printk (" BL: 0x%08x", (u >> 2) & ((1 << 15) - 1));
+ printk (" VS: 0b%i", b.bat.batu.vs);
+ printk (" VP: 0b%i", b.bat.batu.vp);
+ printk ("\n");
+ printk (" LOWER: (0x%08x)", l);
+ printk (" RPN: 0x%08x", b.bat.batl.brpn);
+ printk (" wimg: 0b%1i%1i%1i%1i", b.bat.batl.w, b.bat.batl.i,
+ b.bat.batl.m, b.bat.batl.g);
+ printk (" PP: 0x%1x", b.bat.batl.pp);
+ printk ("\n");
+ printk ("Covering EA Range: ");
+ if (bat_in_use & (1 << idx))
+ printk ("0x%08x .. 0x%08x\n", bat_addrs[idx].start,
+ bat_addrs[idx].limit);
+ else
+ printk ("<none> (BAT off)\n");
+
}
+ return u;
}
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
index acd7509d8a..4748f5e6ee 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/bat.h
@@ -25,16 +25,53 @@
#include <libcpu/mmu.h>
#include <libcpu/pgtable.h>
-#include <rtems/bspIo.h>
#define IO_PAGE (_PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_RW)
+#ifndef ASM
+/* Take no risks -- the essential parts of this routine run with
+ * interrupts disabled!
+ *
+ * The routine does basic parameter checks:
+ * - Index must be 0..3 (0..7 on 7455, 7457).
+ * If an index > 3 is requested the 745x is
+ * programmed to enable the higher BATs.
+ * - Size must be a power of two and <= 1<<28
+ * (<=1<<31 on 7455, 7457. Also, on these processors
+ * the special value 0xffffffff is allowed which stands
+ * for 1<<32).
+ * If a size > 1<<28 is requested, the 745x is
+ * programmed to enable the larger block sizes.
+ * - Bat ranges must not overlap.
+ * - Physical & virtual addresses must be aligned
+ * to the size.
+ */
extern void setdbat(int bat_index, unsigned long virt, unsigned long phys,
unsigned int size, int flags);
+/* read DBAT # 'idx' into *pu/*pl. NULL pointers may be passed.
+ * If pu and pl are NULL, the bat contents are dumped to the console (printk).
+ *
+ * RETURNS: upper BAT contents or (-1) if index is invalid
+ */
+extern int getdbat(int bat_index, unsigned long *pu, unsigned long *pl);
+
+extern void asm_setdbat0(unsigned int uperPart, unsigned int lowerPart);
extern void asm_setdbat1(unsigned int uperPart, unsigned int lowerPart);
extern void asm_setdbat2(unsigned int uperPart, unsigned int lowerPart);
extern void asm_setdbat3(unsigned int uperPart, unsigned int lowerPart);
-extern void asm_setdbat4(unsigned int uperPart, unsigned int lowerPart);
+#else
+
+/* Initialize all bats (upper and lower) to zero. This routine should *only*
+ * be called during early BSP initialization when no C-ABI is available
+ * yet.
+ * This routine clobbers r3 and r4.
+ * NOTE: on 7450 CPUs all 8 dbat/ibat units are cleared. On 601 CPUs only
+ * 4 ibats.
+ */
+ .globl CPU_clear_bats_early
+ .type CPU_clear_bats_early,@function
+
+#endif
#endif /* _LIBCPU_BAT_H */
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
index 55ebcc8b1e..89651d2218 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
@@ -10,7 +10,7 @@
*
* The license and distribution terms for this file may be
* found in found in the file LICENSE in this distribution or at
- * http://www.rtems.com/license/LICENSE.
+ * http://www.OARcorp.com/rtems/license.html.
*
* T. Straumann - 11/2001: added support for 7400 (no AltiVec yet)
* S.K. Feng - 10/2003: added support for 7455 (no AltiVec yet)
@@ -20,6 +20,7 @@
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include <libcpu/io.h>
+#include <libcpu/bat.h>
/* Unfortunately, the CPU types defined in cpu.h are
* an 'enum' type and hence not available :-(
@@ -34,6 +35,7 @@
#define PPC_604r 0xA
#define PPC_7400 0xC
#define PPC_7455 0x8001
+#define PPC_7457 0x8002
#define PPC_620 0x16
#define PPC_860 0x50
#define PPC_821 PPC_860
@@ -53,76 +55,8 @@
#define DL1HWF (1<<(31-8))
#define L2HWF (1<<(31-20))
-/*
- * Each setdbat routine start by invalidating the DBAT as some
- * proc (604e) request the valid bit set to 0 before accepting
- * to write in BAT
- */
-
- .globl asm_setdbat0
- .type asm_setdbat0,@function
-asm_setdbat0:
- li r0,0
- sync
- isync
- mtspr DBAT0U,r0
- mtspr DBAT0L,r0
- sync
- isync
- mtspr DBAT0L, r4
- mtspr DBAT0U, r3
- sync
- isync
- blr
-
- .globl asm_setdbat1
- .type asm_setdbat1,@function
-asm_setdbat1:
- li r0,0
- sync
- isync
- mtspr DBAT1U,r0
- mtspr DBAT1L,r0
- sync
- isync
- mtspr DBAT1L, r4
- mtspr DBAT1U, r3
- sync
- isync
- blr
-
- .globl asm_setdbat2
- .type asm_setdbat2,@function
-asm_setdbat2:
- li r0,0
- sync
- isync
- mtspr DBAT2U,r0
- mtspr DBAT2L,r0
- sync
- isync
- mtspr DBAT2L, r4
- mtspr DBAT2U, r3
- sync
- isync
- blr
+#FIXME Should really move this to C code
- .globl asm_setdbat3
- .type asm_setdbat3,@function
-asm_setdbat3:
- li r0,0
- sync
- isync
- mtspr DBAT3U,r0
- mtspr DBAT3L,r0
- sync
- isync
- mtspr DBAT3L, r4
- mtspr DBAT3U, r3
- sync
- isync
- blr
-
.globl L1_caches_enables
.type L1_caches_enables, @function
@@ -157,7 +91,10 @@ L1_caches_enables:
cmpi 2,r9,PPC_7400 /* or 7400 */
cror 6,6,10
cmpli 0,r9,PPC_7455 /* or 7455 */
- bne 2f
+ beq 1f
+ cmpli 0,r9,PPC_7457 /* or 7457 */
+ bne 2f
+1:
/* 7455:link register stack,branch folding &
* TBEN : enable the time base and decrementer.
* EMCP bit is defined in HID1. However, it's not used
@@ -168,7 +105,7 @@ L1_caches_enables:
ori r11,r11,(HID0_LRSTK|HID0_FOLD|HID0_TBEN)@l
2: cror 2,2,10
bne 3f
- ori r11,r11,HID0_BTIC /* enable branch tgt cache on 7400 & 7455 */
+ ori r11,r11,HID0_BTIC /* enable branch tgt cache on 7400 , 7455 , 7457 */
3: cror 2,2,6
bne 4f
/* on 7400 SIED is actually SGE (store gathering enable) */
@@ -199,6 +136,8 @@ get_L2CR:
beq 1f
cmplwi r3,PPC_7455 /* it's a 7455 */
beq 1f
+ cmplwi r3,PPC_7457 /* it's a 7457 */
+ beq 1f
li r3,-1
blr
@@ -248,6 +187,8 @@ set_L2CR:
beq thisIs750
cmplwi r0,PPC_7455
beq thisIs750
+ cmplwi r0,PPC_7457
+ beq thisIs750
li r3,-1
blr
@@ -293,9 +234,12 @@ disableCache:
mtmsr r4
isync /* make sure memory accesses have completed */
cmplwi r0,PPC_7455 /* 7455 ? */
+ beq 1f
+ cmplwi r0,PPC_7457 /* 7457 ? */
bne not745x
- /* 7455:L1 Load/Flush, L2, L3 : hardware flush */
- /* If not using AltiVec data streaming instructions,DSSALL not necessary */
+1:
+ /* 745x:L1 Load/Flush, L2, L3 : hardware flush */
+ DSSALL
sync
mfspr r4, MSSCR0
rlwinm r4,r4,0,29,0 /* Turn off the L2PFE bits */
@@ -406,6 +350,8 @@ get_L3CR:
rlwinm r3,r3,16,16,31
cmplwi r3,PPC_7455 /* it's a 7455 */
beq 1f
+ cmplwi r3,PPC_7457 /* it's a 7457 */
+ beq 1f
li r3,-1
blr
@@ -434,6 +380,8 @@ set_L3CR:
rlwinm r0,r0,16,16,31
cmplwi r0,PPC_7455
beq thisIs7455
+ cmplwi r0,PPC_7457
+ beq thisIs7455
li r3,-1
blr
@@ -515,3 +463,64 @@ enableL3Cache:
mtspr L3CR,r3
sync
blr
+
+/*
+ * An undocumented "feature" of 604e requires that the v bit
+ * be cleared before changing BAT values.
+ *
+ * Also, newer IBM firmware does not clear bat3 and 4 so
+ * this makes sure it's done.
+ * -- Cort
+ */
+ .globl CPU_clear_bats_early
+ .type CPU_clear_bats_early,@function
+CPU_clear_bats_early:
+ li r3,0
+ mfspr r4,PVR
+ rlwinm r4,r4,16,16,31 /* r4 = 1 for 601, 4 for 604 */
+ cmpwi r4, 1
+ sync
+ isync
+ beq 1f
+ cmplwi r4,0x8001 /* 7445, 7455 (0x8001), 7447, 7457 (0x8002) */
+ blt 2f /* 7447a (0x8003) and 7448 (0x8004) have 16 bats */
+ cmplwi r4,0x8004
+ bgt 2f
+ mtspr DBAT4U,r3
+ mtspr DBAT4L,r3
+ mtspr DBAT5U,r3
+ mtspr DBAT5L,r3
+ mtspr DBAT6U,r3
+ mtspr DBAT6L,r3
+ mtspr DBAT7U,r3
+ mtspr DBAT7L,r3
+ mtspr IBAT4U,r3
+ mtspr IBAT4L,r3
+ mtspr IBAT5U,r3
+ mtspr IBAT5L,r3
+ mtspr IBAT6U,r3
+ mtspr IBAT6L,r3
+ mtspr IBAT7U,r3
+ mtspr IBAT7L,r3
+2:
+ mtspr DBAT0U,r3
+ mtspr DBAT0L,r3
+ mtspr DBAT1U,r3
+ mtspr DBAT1L,r3
+ mtspr DBAT2U,r3
+ mtspr DBAT2L,r3
+ mtspr DBAT3U,r3
+ mtspr DBAT3L,r3
+1:
+ mtspr IBAT0U,r3
+ mtspr IBAT0L,r3
+ mtspr IBAT1U,r3
+ mtspr IBAT1L,r3
+ mtspr IBAT2U,r3
+ mtspr IBAT2L,r3
+ mtspr IBAT3U,r3
+ mtspr IBAT3L,r3
+ sync
+ isync
+ blr
+