summaryrefslogtreecommitdiffstats
path: root/c
diff options
context:
space:
mode:
authorEric Norum <WENorum@lbl.gov>2004-10-20 15:42:24 +0000
committerEric Norum <WENorum@lbl.gov>2004-10-20 15:42:24 +0000
commit83d7232232cc6ecaa8defb9b5658c21a7f32c86a (patch)
tree3c822dceabe3072add629a589b76eb5a44be1463 /c
parentAdd MVME5500 BSP. (diff)
downloadrtems-83d7232232cc6ecaa8defb9b5658c21a7f32c86a.tar.bz2
Add Kate Feng's MVME5500 BSP.
Diffstat (limited to 'c')
-rw-r--r--c/src/lib/libcpu/powerpc/ChangeLog8
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c1
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S230
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c21
-rw-r--r--c/src/lib/libcpu/powerpc/rtems/powerpc/powerpc.h12
-rw-r--r--c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c2
-rw-r--r--c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h1
7 files changed, 240 insertions, 35 deletions
diff --git a/c/src/lib/libcpu/powerpc/ChangeLog b/c/src/lib/libcpu/powerpc/ChangeLog
index d98b3913c3..12f4a2502b 100644
--- a/c/src/lib/libcpu/powerpc/ChangeLog
+++ b/c/src/lib/libcpu/powerpc/ChangeLog
@@ -1,6 +1,12 @@
2004-10-20 Eric Norum <norume@aps.anl.gov>
- * configure.ac: Add MPC7455 support
+ Add Kate Feng's MPC7455 support
+ * configure.ac
+ * mpc6xx/exceptions/raw_exception.c
+ * mpc6xx/mmu/mmuAsm.S
+ * mpc6xx/mmu/pte121.c
+ * shared/include/cpuIdent.c
+ * shared/include/cpuIdent.h
2004-10-19 Ralf Corsepius <ralf_corsepius@rtems.org>
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
index 3ea32b1829..1989a9bfb0 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/exceptions/raw_exception.c
@@ -126,6 +126,7 @@ int mpc60x_vector_is_valid(rtems_vector vector)
case PPC_604:
case PPC_604e:
case PPC_604r:
+ case PPC_7455: /* Kate Feng */
if (!mpc604_vector_is_valid(vector)) {
return 0;
}
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
index b69d9eb8bf..e97ac653fb 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/mmuAsm.S
@@ -13,6 +13,8 @@
* http://www.rtems.com/license/LICENSE.
*
* T. Straumann - 11/2001: added support for 7400 (no AltiVec yet)
+ * S.K. Feng - 10/2003: added support for 7455 (no AltiVec yet)
+ *
*/
#include <rtems/asm.h>
@@ -31,6 +33,7 @@
#define PPC_604e 0x9
#define PPC_604r 0xA
#define PPC_7400 0xC
+#define PPC_7455 0x8001
#define PPC_620 0x16
#define PPC_860 0x50
#define PPC_821 PPC_860
@@ -50,9 +53,7 @@
#define DL1HWF (1<<(31-8))
#define L2HWF (1<<(31-20))
-
-
-
+
/*
* Each setdbat routine start by invalidating the DBAT as some
* proc (604e) request the valid bit set to 0 before accepting
@@ -147,16 +148,28 @@ L1_caches_enables:
mtspr HID0,r11 /* enable caches */
sync
isync
- cmpi 0,r9,PPC_604 /* check for 604 */
- cmpi 1,r9,PPC_604e /* or 604e */
- cmpi 2,r9,PPC_604r /* or mach5 */
- cror 2,2,6
- cror 2,2,10
- cmpi 1,r9,PPC_750 /* or 750 */
- cror 2,2,6
- cmpi 1,r9,PPC_7400 /* or 7400 */
+ cmpi 1,r9,PPC_604 /* check for 604 */
+ cmpi 2,r9,PPC_604e /* or 604e */
+ cmpi 3,r9,PPC_604r /* or mach5 */
+ cror 6,6,10
+ cror 6,6,14
+ cmpi 2,r9,PPC_750 /* or 750 */
+ cror 6,6,10
+ cmpi 2,r9,PPC_7400 /* or 7400 */
+ cror 6,6,10
+ cmpli 0,r9,PPC_7455 /* or 7455 */
+ bne 2f
+ /* 7455:link register stack,branch folding &
+ * TBEN : enable the time base and decrementer.
+ * EMCP bit is defined in HID1. However, it's not used
+ * in mvme5500 board because of GT64260 (e.g. it's connected
+ * pull-up).
+ */
+ oris r11,r11,(HID0_LRSTK|HID0_FOLD|HID0_TBEN)@h
+ ori r11,r11,(HID0_LRSTK|HID0_FOLD|HID0_TBEN)@l
+2: cror 2,2,10
bne 3f
- ori r11,r11,HID0_BTIC /* enable branch tgt cache on 7400 */
+ ori r11,r11,HID0_BTIC /* enable branch tgt cache on 7400 & 7455 */
3: cror 2,2,6
bne 4f
/* on 7400 SIED is actually SGE (store gathering enable) */
@@ -164,9 +177,17 @@ L1_caches_enables:
bne 2,5f
ori r11,r11,HID0_BTCD
5: mtspr HID0,r11 /* superscalar exec & br history tbl */
+ sync /* for SGE bit */
+ isync /* P2-17 to 2-22 in MPC7450UM */
4:
blr
-
+
+ .globl get_L1CR
+.type get_L1CR, @function
+get_L1CR:
+ mfspr r3,HID0
+ blr
+
.globl get_L2CR
.type get_L2CR, @function
get_L2CR:
@@ -177,7 +198,9 @@ get_L2CR:
beq 1f
cmplwi r3,PPC_7400 /* it's a 7400 */
beq 1f
- li r3,0
+ cmplwi r3,PPC_7455 /* it's a 7455 */
+ beq 1f
+ li r3,-1
blr
1:
@@ -224,6 +247,8 @@ set_L2CR:
beq thisIs750
cmplwi r0,PPC_7400
beq thisIs750
+ cmplwi r0,PPC_7455
+ beq thisIs750
li r3,-1
blr
@@ -234,18 +259,18 @@ thisIs750:
/* See if we want to perform a global inval this time. */
rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
- rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
+ rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
- or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
- mfmsr r7 /* shut off interrupts around critical flush/invalidate sections */
+ or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
+ mfmsr r7 /* shut off interrupts around critical flush/invalidate sections */
rlwinm r4,r7,0,17,15 /* Turn off EE bit - an external exception while we are flushing
the cache is fatal (comment this line and see!) */
mtmsr r4
- bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
+ bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
- cmplwi r0,PPC_7400 /* > 7400 ? */
- bne disableCache /* use traditional method */
+ cmplwi r0,PPC_7400 /* 7400 ? */
+ bne disableCache /* use traditional method */
/* On the 7400, they recommend using the hardware flush feature */
DSSALL /* stop all data streams */
@@ -257,26 +282,60 @@ thisIs750:
sync
/* L1 flushed */
mfspr r4, L2CR
- ori r4, r4, L2HWF
+ ori r4, r4, L2HWF
mtspr L2CR, r4
sync
/* L2 flushed */
- b flushDone
+ b flushDone
disableCache:
/* Disable the cache. First, we turn off data relocation. */
rlwinm r4,r4,0,28,26 /* Turn off DR bit */
mtmsr r4
- isync /* make sure memory accesses have completed */
+ isync /* make sure memory accesses have completed */
+ cmplwi r0,PPC_7455 /* 7455 ? */
+ bne not745x
+ /* 7455:L1 Load/Flush, L2, L3 : hardware flush */
+ /* If not using AltiVec data streaming instructions,DSSALL not necessary */
+ sync
+ mfspr r4, MSSCR0
+ rlwinm r4,r4,0,29,0 /* Turn off the L2PFE bits */
+ mtspr MSSCR0, r4
+ sync
+ /* flush L1 first */
+ lis r4,0x0001
+ mtctr r4
+ li r4,0
+ li r0,0
+loadFlush:
+ lwzx r0,r0,r4
+ dcbf r0,r4
+ addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
+ bdnz loadFlush
+ sync
+ /* Set the L2CR[L2IO & L2DO] bits to completely lock the L2 cache */
+ mfspr r0, L2CR
+ lis r4,L2CR_LOCK_745x@h
+ ori r4,r4,L2CR_LOCK_745x@l
+ or r4,r0,r4
+ rlwinm r4,r4,0,11,9 /* make sure the invalidate bit off */
+ mtspr L2CR, r4
+ sync
+ ori r4, r4, L2HWF
+ mtspr L2CR, r4
+ sync
+ /* L2 flushed,L2IO & L2DO got cleared in the dontDisableCache: */
+ b reenableDR
+not745x:
/*
Now, read the first 2MB of memory to put new data in the cache.
(Actually we only need the size of the L2 cache plus
the size of the L1 cache, but 2MB will cover everything just to be safe).
*/
- lis r4,0x0001
+ lis r4,0x0001
mtctr r4
- li r4,0
+ li r4,0
loadLoop:
lwzx r0,r0,r4
addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
@@ -292,7 +351,7 @@ flushLoop:
addi r4,r4,CACHE_LINE_SIZE /* Go to start of next cache line */
bdnz flushLoop
sync
-
+reenableDR:
rlwinm r4,r7,0,17,15 /* still mask EE but reenable data relocation */
mtmsr r4
isync
@@ -338,3 +397,122 @@ enableCache:
mtspr L2CR,r3
sync
blr
+
+
+ .globl get_L3CR
+ .type get_L3CR, @function
+get_L3CR:
+ /* Make sure this is a 7455 chip */
+ mfspr r3,PVR
+ rlwinm r3,r3,16,16,31
+ cmplwi r3,PPC_7455 /* it's a 7455 */
+ beq 1f
+ li r3,-1
+ blr
+
+1:
+ /* Return the L3CR contents */
+ mfspr r3,L3CR
+ blr
+
+ .globl set_L3CR
+ .type set_L3CR, @function
+set_L3CR:
+ /* Usage:
+ * When setting the L3CR register, you must do a few special things.
+ * If you are enabling the cache, you must perform a global invalidate.
+ * Then call cpu_enable_l3cr(l3cr).
+ * If you are disabling the cache, you must flush the cache contents first.
+ * This routine takes care of doing these things. If you
+ * want to modify the L3CR contents after the cache has been enabled,
+ * the recommended procedure is to first call __setL3CR(0) to disable
+ * the cache and then call cpu_enable_l3cr with the new values for
+ * L3CR.
+ */
+
+ /* Make sure this is a 7455 chip */
+ mfspr r0,PVR
+ rlwinm r0,r0,16,16,31
+ cmplwi r0,PPC_7455
+ beq thisIs7455
+ li r3,-1
+ blr
+
+thisIs7455:
+ /* Get the current enable bit of the L3CR into r4 */
+ mfspr r4,L3CR
+ rlwinm r4,r4,0,0,0
+
+ /* See if we want to perform a global inval this time. */
+ rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
+ rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
+ rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
+ rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
+ or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
+ mfmsr r7 /* shut off interrupts around critical flush/invalidate sections */
+ rlwinm r4,r7,0,17,15 /* Turn off EE bit - an external exception while we are flushing
+ the cache is fatal (comment this line and see!) */
+ mtmsr r4
+ bne dontDisableL3Cache /* Only disable the cache if L3CRApply has the enable bit off */
+ /* Before the L3 is disabled, it must be flused to prevent coherency problems */
+ /* First, we turn off data relocation. */
+ rlwinm r4,r4,0,28,26 /* Turn off DR bit */
+ mtmsr r4
+ isync /* make sure memory accesses have completed */
+ /* 7455: L3 : hardware flush
+ * Set the L3CR[L3IO & L3DO] bits to completely lock the L3 cache */
+ mfspr r0, L3CR
+ lis r4, L3CR_LOCK_745x@h
+ ori r4,r4, L3CR_LOCK_745x@l
+ or r4,r0,r4
+ rlwinm r4,r4,0,11,9 /* make sure the invalidate bit off */
+ mtspr L3CR, r4
+ sync
+ ori r4, r4, L3CR_L3HWF
+ mtspr L3CR, r4
+ sync
+ /* L3 flushed,L3IO & L3DO got cleared in the dontDisableL3Cache: */
+ rlwinm r4,r7,0,17,15 /* still mask EE but reenable data relocation */
+ mtmsr r4
+ isync
+
+ /* Turn off the L3CR enable bit. */
+ rlwinm r3,r3,0,1,31
+
+dontDisableL3Cache:
+ /* Set up the L3CR configuration bits */
+ sync
+ mtspr L3CR,r3
+ sync
+ifL3Inval:
+ cmplwi r6,0
+ beq noL3Inval
+
+ /* Perform a global invalidation */
+ oris r3,r3,0x0020
+ sync
+ mtspr L3CR,r3
+ sync
+invalCompleteL3: /* Wait for the invalidation to complete */
+ mfspr r3,L3CR
+ rlwinm. r4,r3,0,31,31
+ bne invalCompleteL3
+
+ rlwinm r3,r3,0,11,9; /* Turn off the L3I bit */
+ sync
+ mtspr L3CR,r3
+ sync
+
+noL3Inval:
+ /* re-enable interrupts, i.e. restore original MSR */
+ mtmsr r7 /* (no sync needed) */
+ /* See if we need to enable the cache */
+ cmplwi r5,0
+ beqlr
+
+enableL3Cache:
+ /* Enable the cache */
+ oris r3,r3,0x8000
+ mtspr L3CR,r3
+ sync
+ blr
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
index 37d2eb2891..5edc2cf82e 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
@@ -382,7 +382,8 @@ triv121PgTblInit(unsigned long base, unsigned ldSize)
PPC_604e !=current_ppc_cpu &&
PPC_604r !=current_ppc_cpu &&
PPC_750 !=current_ppc_cpu &&
- PPC_7400 !=current_ppc_cpu )
+ PPC_7400 !=current_ppc_cpu &&
+ PPC_7455 !=current_ppc_cpu )
return 0; /* unsupported by this CPU */
pgTbl.base=(PTE)base;
@@ -425,24 +426,24 @@ triv121PgTblGet(void)
long
triv121PgTblMap(
Triv121PgTbl pt,
- long vsid,
+ long ovsid,
unsigned long start,
unsigned long numPages,
unsigned attributes,
unsigned protection
)
{
-int i,pass;
+int i,pass;
unsigned long pi;
-PTE pte;
-
+PTE pte;
+long vsid;
/* already activated - no change allowed */
if (pt->active)
return -1;
- if (vsid < 0) {
+ if (ovsid < 0) {
/* use 1:1 mapping */
- vsid = VSID121(start);
+ ovsid = VSID121(start);
}
#ifdef DEBUG
@@ -465,7 +466,11 @@ PTE pte;
*/
for (pass=0; pass<2; pass++) {
/* check if we would succeed during the first pass */
- for (i=0, pi=PI121(start); i<numPages; i++,pi++) {
+ for (i=0, pi=PI121(start), vsid = ovsid; i<numPages; i++,pi++) {
+ if ( pi >= 1<<LD_PI_SIZE ) {
+ vsid++;
+ pi = 0;
+ }
/* leave alone existing mappings for this EA */
if (!alreadyMapped(pt, vsid, pi)) {
if (!(pte=slotFor(pt, vsid, pi))) {
diff --git a/c/src/lib/libcpu/powerpc/rtems/powerpc/powerpc.h b/c/src/lib/libcpu/powerpc/rtems/powerpc/powerpc.h
index 061772b656..15914bbf3e 100644
--- a/c/src/lib/libcpu/powerpc/rtems/powerpc/powerpc.h
+++ b/c/src/lib/libcpu/powerpc/rtems/powerpc/powerpc.h
@@ -263,6 +263,18 @@ extern "C" {
#define PPC_I_CACHE 32768
#define PPC_D_CACHE 32768
+#elif defined(mpc7455)
+/*
+ * Added by S.K. Feng <feng1@bnl.gov> 10/03
+ */
+
+#define CPU_MODEL_NAME "PowerPC 7455"
+
+#define PPC_ALIGNMENT 8
+#define PPC_CACHE_ALIGNMENT 32
+#define PPC_I_CACHE 32768
+#define PPC_D_CACHE 32768
+
#elif defined(mpc8260)
/*
* Added by Andy Dachs <a.dachs@sstl.co.uk> 23/11/2000
diff --git a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c
index 8c8cd8e559..74e18078cf 100644
--- a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c
+++ b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.c
@@ -36,6 +36,7 @@ char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu)
case PPC_604: return "MPC604";
case PPC_750: return "MPC750";
case PPC_7400: return "MPC7400";
+ case PPC_7455: return "MPC7455";
case PPC_604e: return "MPC604e";
case PPC_604r: return "MPC604r";
case PPC_620: return "MPC620";
@@ -60,6 +61,7 @@ ppc_cpu_id_t get_ppc_cpu_type()
case PPC_604r:
case PPC_750:
case PPC_7400:
+ case PPC_7455:
case PPC_604e:
case PPC_620:
case PPC_860:
diff --git a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h
index d68a4bfeb2..03205c772f 100644
--- a/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h
+++ b/c/src/lib/libcpu/powerpc/shared/include/cpuIdent.h
@@ -29,6 +29,7 @@ typedef enum
PPC_604e = 0x9,
PPC_604r = 0xA,
PPC_7400 = 0xC,
+ PPC_7455 = 0x8001, /* Kate Feng */
PPC_620 = 0x16,
PPC_860 = 0x50,
PPC_821 = PPC_860,