summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTill Straumann <strauman@slac.stanford.edu>2005-11-03 02:26:08 +0000
committerTill Straumann <strauman@slac.stanford.edu>2005-11-03 02:26:08 +0000
commit1b1b43ccc1a6d654199ddb6329366d2e44bdaea3 (patch)
tree54e658a14060013c6858ba635f0cb43f1814a198
parent2005-11-02 straumanatslacdotstanford.edu (diff)
downloadrtems-1b1b43ccc1a6d654199ddb6329366d2e44bdaea3.tar.bz2
2005-11-02 straumanatslacdotstanford.edu
* mpc6xx/mmu/pte121.c, mpc6xx/mmu/pte121.h: enhancements to mpc6xx page table support - PTEs can now be modified even if the page table is already active; bugfix: address range crossing 256MB boundary was not handled correctly
-rw-r--r--c/src/lib/libcpu/powerpc/ChangeLog7
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c1257
-rw-r--r--c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.h163
3 files changed, 842 insertions, 585 deletions
diff --git a/c/src/lib/libcpu/powerpc/ChangeLog b/c/src/lib/libcpu/powerpc/ChangeLog
index 134d9b4fe3..8121787325 100644
--- a/c/src/lib/libcpu/powerpc/ChangeLog
+++ b/c/src/lib/libcpu/powerpc/ChangeLog
@@ -1,5 +1,12 @@
2005-11-02 straumanatslacdotstanford.edu
+ * mpc6xx/mmu/pte121.c, mpc6xx/mmu/pte121.h: enhancements to mpc6xx page
+ table support - PTEs can now be modified even if the page table is
+ already active; bugfix: address range crossing 256MB boundary was not
+ handled correctly
+
+2005-11-02 straumanatslacdotstanford.edu
+
* mpc6xx/mmu/bat.c, mpc6xx/mmu/bat.h, mpc6xx/mmu/mmuAsm.S: moved
assembly code to C; setdbat now supports high bats on 7450 CPUs;
added argument checking to setdbat; added getdbat; moved early
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
index 5edc2cf82e..86e0d8df43 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c
@@ -7,12 +7,12 @@
*/
/* Chose debugging options */
-#undef DEBUG_MAIN /* create a standalone (host) program for basic testing */
-#undef DEBUG /* target debugging and consistency checking */
-#undef DEBUG_EXC /* add exception handler which reenables BAT0 and recovers from a page fault */
+#undef DEBUG_MAIN /* create a standalone (host) program for basic testing */
+#undef DEBUG /* target debugging and consistency checking */
+#undef DEBUG_EXC /* add exception handler which reenables BAT0 and recovers from a page fault */
#ifdef DEBUG_MAIN
-#undef DEBUG /* must not use these together with DEBUG_MAIN */
+#undef DEBUG /* must not use these together with DEBUG_MAIN */
#undef DEBUG_EXC
#endif
@@ -22,6 +22,7 @@
#include <rtems.h>
#include <rtems/bspIo.h>
#include <libcpu/cpuIdent.h>
+#include <libcpu/spr.h>
#ifdef DEBUG_EXC
#include <bsp.h>
#include <bsp/vectors.h>
@@ -41,48 +42,49 @@
#ifndef DEBUG_MAIN
-#define LD_PHYS_SIZE 32 /* physical address space */
-#define LD_PG_SIZE 12 /* page size */
-#define LD_PTEG_SIZE 6 /* PTEG size */
-#define LD_PTE_SIZE 3 /* PTE size */
-#define LD_SEG_SIZE 28 /* segment size */
-#define LD_MIN_PT_SIZE 16 /* minimal size of a page table */
-#define LD_HASH_SIZE 19 /* lengh of a hash */
+#define LD_PHYS_SIZE 32 /* physical address space */
+#define LD_PG_SIZE 12 /* page size */
+#define LD_PTEG_SIZE 6 /* PTEG size */
+#define LD_PTE_SIZE 3 /* PTE size */
+#define LD_SEG_SIZE 28 /* segment size */
+#define LD_MIN_PT_SIZE 16 /* minimal size of a page table */
+#define LD_HASH_SIZE 19 /* lengh of a hash */
+#define LD_VSID_SIZE 24 /* vsid bits in seg. register */
#else /* DEBUG_MAIN */
/* Reduced 'fantasy' sizes for testing */
-#define LD_PHYS_SIZE 32 /* physical address space */
-#define LD_PG_SIZE 6 /* page size */
-#define LD_PTEG_SIZE 5 /* PTEG size */
-#define LD_PTE_SIZE 3 /* PTE size */
-#define LD_SEG_SIZE 28 /* segment size */
-#define LD_MIN_PT_SIZE 7 /* minimal size of a page table */
-#define LD_HASH_SIZE 19 /* lengh of a hash */
+#define LD_PHYS_SIZE 32 /* physical address space */
+#define LD_PG_SIZE 6 /* page size */
+#define LD_PTEG_SIZE 5 /* PTEG size */
+#define LD_PTE_SIZE 3 /* PTE size */
+#define LD_SEG_SIZE 28 /* segment size */
+#define LD_MIN_PT_SIZE 7 /* minimal size of a page table */
+#define LD_HASH_SIZE 19 /* lengh of a hash */
#endif /* DEBUG_MAIN */
/* Derived sizes */
/* Size of a page index */
-#define LD_PI_SIZE ((LD_SEG_SIZE) - (LD_PG_SIZE))
+#define LD_PI_SIZE ((LD_SEG_SIZE) - (LD_PG_SIZE))
/* Number of PTEs in a PTEG */
#define PTE_PER_PTEG (1<<((LD_PTEG_SIZE)-(LD_PTE_SIZE)))
/* Segment register bits */
-#define KEY_SUP (1<<30) /* supervisor mode key */
-#define KEY_USR (1<<29) /* user mode key */
+#define KEY_SUP (1<<30) /* supervisor mode key */
+#define KEY_USR (1<<29) /* user mode key */
/* The range of effective addresses to scan with 'tlbie'
* instructions in order to flush all TLBs.
* On the 750 and 7400, there are 128 two way I and D TLBs,
* indexed by EA[14:19]. Hence calling
- * tlbie rx
+ * tlbie rx
* where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000
* is sufficient to do the job
*/
-#define NUM_TLB_PER_WAY 64 /* 750 and 7400 have 128 two way TLBs */
+#define NUM_TLB_PER_WAY 64 /* 750 and 7400 have 128 two way TLBs */
#define FLUSH_EA_RANGE (NUM_TLB_PER_WAY<<LD_PG_SIZE)
/*************************** MACRO DEFINITIONS ****************************/
@@ -108,6 +110,17 @@
/* page index of an EA */
#define PI121(ea) (((ea)>>LD_PG_SIZE) & ((1<<LD_PI_SIZE)-1))
+/* read VSID from segment register */
+#ifndef DEBUG_MAIN
+static unsigned32
+seg2vsid (unsigned32 ea)
+{
+ asm volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
+ return ea & ((1 << LD_VSID_SIZE) - 1);
+}
+#else
+#define seg2vsid(ea) VSID121(ea)
+#endif
/* Primary and secondary PTE hash functions */
@@ -125,28 +138,24 @@
/* Horrible Macros */
-#ifdef DEBUG
#ifdef __rtems__
/* must not use printf until multitasking is up */
-typedef void (*PrintF)(char *,...);
-static PrintF whatPrintf(void)
+typedef void (*PrintF) (char *, ...);
+static PrintF
+whatPrintf (void)
{
- return _Thread_Executing ?
- (PrintF)printf :
- printk;
+ return _Thread_Executing ? (PrintF) printf : printk;
}
#define PRINTF(args...) ((void)(whatPrintf())(args))
#else
#define PRINTF(args...) printf(args)
#endif
-#endif
#ifdef DEBUG
-unsigned long
-triv121PgTblConsistency(Triv121PgTbl pt, int pass, int expect);
+unsigned long triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expect);
-static int consistencyPass=0;
+static int consistencyPass = 0;
#define CONSCHECK(expect) triv121PgTblConsistency(&pgTbl,consistencyPass++,(expect))
#else
#define CONSCHECK(expect) do {} while (0)
@@ -154,58 +163,50 @@ static int consistencyPass=0;
/**************************** TYPE DEFINITIONS ****************************/
-/* A PTE entry */
-typedef struct PTERec_ {
- unsigned long v:1, vsid:24, h:1, api: 6;
- unsigned long rpn:20, pad: 3, r:1, c:1, wimg:4, marked:1, pp:2;
-} PTERec, *PTE;
-
/* internal description of a trivial page table */
-typedef struct Triv121PgTblRec_ {
- PTE base;
- unsigned long size;
- int active;
+typedef struct Triv121PgTblRec_
+{
+ APte base;
+ unsigned long size;
+ int active;
} Triv121PgTblRec;
/************************** FORWARD DECLARATIONS *************************/
#ifdef DEBUG_EXC
-static void
-myhdl(BSP_Exception_frame* excPtr);
+static void myhdl (BSP_Exception_frame * excPtr);
#endif
-#if defined(DEBUG_MAIN) || defined(DEBUG)
-static void
-dumpPte(PTE pte);
-#endif
+static void dumpPte (APte pte);
-#ifdef DEBUG
static void
-dumpPteg(unsigned long vsid, unsigned long pi, unsigned long hash);
+dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash);
unsigned long
-triv121IsRangeMapped(unsigned long start, unsigned long end);
-#endif
+triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end);
+
+static void do_dssall ();
/**************************** STATIC VARIABLES ****************************/
/* dont malloc - we might have to use this before
* we have malloc or even RTEMS workspace available
*/
-static Triv121PgTblRec pgTbl={0};
+static Triv121PgTblRec pgTbl = { 0 };
#ifdef DEBUG_EXC
-static void *ohdl; /* keep a pointer to the original handler */
+static void *ohdl; /* keep a pointer to the original handler */
#endif
/*********************** INLINES & PRIVATE ROUTINES ***********************/
/* compute the page table entry group (PTEG) of a hash */
-static inline PTE
-ptegOf(Triv121PgTbl pt, unsigned long hash)
+static inline APte
+ptegOf (Triv121PgTbl pt, unsigned long hash)
{
- hash &= ((1<<LD_HASH_SIZE)-1);
- return (PTE)(((unsigned long)pt->base) | ((hash<<LD_PTEG_SIZE) & (pt->size-1)));
+ hash &= ((1 << LD_HASH_SIZE) - 1);
+ return (APte) (((unsigned long) pt->
+ base) | ((hash << LD_PTEG_SIZE) & (pt->size - 1)));
}
/* see if a vsid/pi combination is already mapped
@@ -216,32 +217,35 @@ ptegOf(Triv121PgTbl pt, unsigned long hash)
* routine that 'pi' is actually an EA to
* be split into vsid and pi...
*/
-static PTE
-alreadyMapped(Triv121PgTbl pt, long vsid, unsigned long pi)
+static APte
+alreadyMapped (Triv121PgTbl pt, long vsid, unsigned long pi)
{
-int i;
-unsigned long hash,api;
-PTE pte;
-
- if (!pt->size)
- return 0;
-
- if (vsid<0) {
- vsid=VSID121(pi);
- pi=PI121(pi);
- }
-
- hash = PTE_HASH1(vsid,pi);
- api=API(pi);
- for (i=0, pte=ptegOf(pt,hash); i<PTE_PER_PTEG; i++,pte++)
- if (pte->v && pte->vsid==vsid && pte->api==api && 0==pte->h)
- return pte;
- /* try the secondary hash table */
- hash = PTE_HASH2(hash);
- for (i=0, pte=ptegOf(pt,hash); i<PTE_PER_PTEG; i++,pte++)
- if (pte->v && pte->vsid==vsid && pte->api==api && 1==pte->h)
- return pte;
- return 0;
+ int i;
+ unsigned long hash, api;
+ APte pte;
+
+ if (!pt->size)
+ return 0;
+
+ if (TRIV121_121_VSID == vsid) {
+ vsid = VSID121 (pi);
+ pi = PI121 (pi);
+ } else if (TRIV121_SEG_VSID == vsid) {
+ vsid = seg2vsid (pi);
+ pi = PI121 (pi);
+ }
+
+ hash = PTE_HASH1 (vsid, pi);
+ api = API (pi);
+ for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
+ if (pte->v && pte->vsid == vsid && pte->api == api && 0 == pte->h)
+ return pte;
+ /* try the secondary hash table */
+ hash = PTE_HASH2 (hash);
+ for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
+ if (pte->v && pte->vsid == vsid && pte->api == api && 1 == pte->h)
+ return pte;
+ return 0;
}
/* find the first available slot for vsid/pi
@@ -253,64 +257,64 @@ PTE pte;
* the slot was allocated by using the primary or
* the secondary hash, respectively.
*/
-static PTE
-slotFor(Triv121PgTbl pt, unsigned long vsid, unsigned long pi)
+static APte
+slotFor (Triv121PgTbl pt, unsigned long vsid, unsigned long pi)
{
-int i;
-unsigned long hash,api;
-PTE pte;
-
- /* primary hash */
- hash = PTE_HASH1(vsid,pi);
- api=API(pi);
- /* linear search thru all buckets for this hash */
- for (i=0, pte=ptegOf(pt,hash); i<PTE_PER_PTEG; i++,pte++) {
- if (!pte->v && !pte->marked) {
- /* found a free PTE; mark it as potentially used and return */
- pte->h=0; /* found by the primary hash fn */
- pte->marked=1;
- return pte;
- }
- }
+ int i;
+ unsigned long hash, api;
+ APte pte;
+
+ /* primary hash */
+ hash = PTE_HASH1 (vsid, pi);
+ api = API (pi);
+ /* linear search thru all buckets for this hash */
+ for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
+ if (!pte->v && !pte->marked) {
+ /* found a free PTE; mark it as potentially used and return */
+ pte->h = 0; /* found by the primary hash fn */
+ pte->marked = 1;
+ return pte;
+ }
+ }
#ifdef DEBUG
- /* Strange: if the hash table was allocated big enough,
- * this should not happen (when using a 1:1 mapping)
- * Give them some information...
- */
- PRINTF("## First hash bucket full - ");
- dumpPteg(vsid,pi,hash);
+ /* Strange: if the hash table was allocated big enough,
+ * this should not happen (when using a 1:1 mapping)
+ * Give them some information...
+ */
+ PRINTF ("## First hash bucket full - ");
+ dumpPteg (vsid, pi, hash);
#endif
- hash = PTE_HASH2(hash);
+ hash = PTE_HASH2 (hash);
#ifdef DEBUG
- PRINTF(" Secondary pteg is 0x%08x\n", (unsigned)ptegOf(pt,hash));
+ PRINTF (" Secondary pteg is 0x%08x\n", (unsigned) ptegOf (pt, hash));
#endif
- for (i=0, pte=ptegOf(pt,hash); i<PTE_PER_PTEG; i++,pte++) {
- if (!pte->v && !pte->marked) {
- /* mark this pte as potentially used */
- pte->marked=1;
- pte->h=1;
- return pte;
- }
- }
+ for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
+ if (!pte->v && !pte->marked) {
+ /* mark this pte as potentially used */
+ pte->marked = 1;
+ pte->h = 1;
+ return pte;
+ }
+ }
#ifdef DEBUG
- /* Even more strange - most likely, something is REALLY messed up */
- PRINTF("## Second hash bucket full - ");
- dumpPteg(vsid,pi,hash);
+ /* Even more strange - most likely, something is REALLY messed up */
+ PRINTF ("## Second hash bucket full - ");
+ dumpPteg (vsid, pi, hash);
#endif
- return 0;
+ return 0;
}
/* unmark all entries */
static void
-unmarkAll(Triv121PgTbl pt)
+unmarkAll (Triv121PgTbl pt)
{
-unsigned long n=pt->size / sizeof(PTERec);
-unsigned long i;
-PTE pte;
- for (i=0,pte=pt->base; i<n; i++,pte++)
- pte->marked=0;
+ unsigned long n = pt->size / sizeof (PTERec);
+ unsigned long i;
+ APte pte;
+ for (i = 0, pte = pt->base; i < n; i++, pte++)
+ pte->marked = 0;
}
@@ -324,25 +328,25 @@ PTE pte;
* (64k).
*/
unsigned long
-triv121PgTblLdMinSize(unsigned long size)
+triv121PgTblLdMinSize (unsigned long size)
{
-unsigned long i;
- /* round 'size' up to the next page boundary */
- size += (1<<LD_PG_SIZE)-1;
- size &= ~((1<<LD_PG_SIZE)-1);
- /* divide by number of PTEs and multiply
- * by the size of a PTE.
- */
- size >>= LD_PG_SIZE - LD_PTE_SIZE;
- /* find the next power of 2 >= size */
- for (i=0; i<LD_PHYS_SIZE; i++) {
- if ((1<<i) >= size)
- break;
- }
- /* pop up to the allowed minimum, if necessary */
- if (i<LD_MIN_PT_SIZE)
- i=LD_MIN_PT_SIZE;
- return i;
+ unsigned long i;
+ /* round 'size' up to the next page boundary */
+ size += (1 << LD_PG_SIZE) - 1;
+ size &= ~((1 << LD_PG_SIZE) - 1);
+ /* divide by number of PTEs and multiply
+ * by the size of a PTE.
+ */
+ size >>= LD_PG_SIZE - LD_PTE_SIZE;
+ /* find the next power of 2 >= size */
+ for (i = 0; i < LD_PHYS_SIZE; i++) {
+ if ((1 << i) >= size)
+ break;
+ }
+ /* pop up to the allowed minimum, if necessary */
+ if (i < LD_MIN_PT_SIZE)
+ i = LD_MIN_PT_SIZE;
+ return i;
}
/* initialize a trivial page table of 2^ldSize bytes
@@ -352,68 +356,69 @@ unsigned long i;
* or NULL on failure.
*/
Triv121PgTbl
-triv121PgTblInit(unsigned long base, unsigned ldSize)
+triv121PgTblInit (unsigned long base, unsigned ldSize)
{
- if (pgTbl.size) {
- /* already initialized */
- return 0;
- }
-
- if (ldSize < LD_MIN_PT_SIZE)
- return 0; /* too small */
-
- if (base & ((1<<ldSize)-1))
- return 0; /* misaligned */
-
- /* This was tested on 604r, 750 and 7400.
- * On other CPUs, verify that the TLB invalidation works
- * for a new CPU variant and that it has hardware PTE lookup/
- * TLB replacement before adding it to this list.
- *
- * NOTE: The 603 features no hardware PTE lookup - and
- * hence the page tables should NOT be used.
- * Although lookup could be implemented in
- * software this is probably not desirable
- * as it could have an impact on hard realtime
- * performance, screwing deterministic latency!
- * (Could still be useful for debugging, though)
- */
- if ( PPC_604 !=current_ppc_cpu &&
- PPC_604e !=current_ppc_cpu &&
- PPC_604r !=current_ppc_cpu &&
- PPC_750 !=current_ppc_cpu &&
- PPC_7400 !=current_ppc_cpu &&
- PPC_7455 !=current_ppc_cpu )
- return 0; /* unsupported by this CPU */
-
- pgTbl.base=(PTE)base;
- pgTbl.size=1<<ldSize;
- /* clear all page table entries */
- memset(pgTbl.base, 0, pgTbl.size);
-
- CONSCHECK(0);
-
- /* map the page table itself 'm' and 'readonly' */
- if (triv121PgTblMap(&pgTbl,
- TRIV121_121_VSID,
- base,
- (pgTbl.size >> LD_PG_SIZE),
- TRIV121_ATTR_M,
- TRIV121_PP_RO_PAGE) >= 0)
- return 0;
-
- CONSCHECK((pgTbl.size>>LD_PG_SIZE));
-
- return &pgTbl;
+ if (pgTbl.size) {
+ /* already initialized */
+ return 0;
+ }
+
+ if (ldSize < LD_MIN_PT_SIZE)
+ return 0; /* too small */
+
+ if (base & ((1 << ldSize) - 1))
+ return 0; /* misaligned */
+
+ /* This was tested on 604r, 750 and 7400.
+ * On other CPUs, verify that the TLB invalidation works
+ * for a new CPU variant and that it has hardware PTE lookup/
+ * TLB replacement before adding it to this list.
+ *
+ * NOTE: The 603 features no hardware PTE lookup - and
+ * hence the page tables should NOT be used.
+ * Although lookup could be implemented in
+ * software this is probably not desirable
+ * as it could have an impact on hard realtime
+ * performance, screwing deterministic latency!
+ * (Could still be useful for debugging, though)
+ */
+ if ( PPC_604 != current_ppc_cpu
+ && PPC_604e != current_ppc_cpu
+ && PPC_604r != current_ppc_cpu
+ && PPC_750 != current_ppc_cpu
+ && PPC_7400 != current_ppc_cpu
+ && PPC_7455 != current_ppc_cpu
+ && PPC_7457 != current_ppc_cpu
+ )
+ return 0; /* unsupported by this CPU */
+
+ pgTbl.base = (APte) base;
+ pgTbl.size = 1 << ldSize;
+ /* clear all page table entries */
+ memset (pgTbl.base, 0, pgTbl.size);
+
+ CONSCHECK (0);
+
+ /* map the page table itself 'm' and 'readonly' */
+ if (triv121PgTblMap (&pgTbl,
+ TRIV121_121_VSID,
+ base,
+ (pgTbl.size >> LD_PG_SIZE),
+ TRIV121_ATTR_M, TRIV121_PP_RO_PAGE) >= 0)
+ return 0;
+
+ CONSCHECK ((pgTbl.size >> LD_PG_SIZE));
+
+ return &pgTbl;
}
/* return the handle of the (one and only) page table
* or NULL if none has been initialized yet.
*/
Triv121PgTbl
-triv121PgTblGet(void)
+triv121PgTblGet (void)
{
- return pgTbl.size ? &pgTbl : 0;
+ return pgTbl.size ? &pgTbl : 0;
}
/* NOTE: this routine returns -1 on success;
@@ -424,179 +429,196 @@ triv121PgTblGet(void)
* description)
*/
long
-triv121PgTblMap(
- Triv121PgTbl pt,
- long ovsid,
- unsigned long start,
- unsigned long numPages,
- unsigned attributes,
- unsigned protection
- )
+triv121PgTblMap (Triv121PgTbl pt,
+ long ovsid,
+ unsigned long start,
+ unsigned long numPages,
+ unsigned attributes, unsigned protection)
{
-int i,pass;
-unsigned long pi;
-PTE pte;
-long vsid;
- /* already activated - no change allowed */
- if (pt->active)
- return -1;
-
- if (ovsid < 0) {
- /* use 1:1 mapping */
- ovsid = VSID121(start);
- }
+ int i, pass;
+ unsigned long pi;
+ APte pte;
+ long vsid;
+#ifdef DEBUG
+ long saved_vsid = ovsid;
+#endif
+
+ if (TRIV121_121_VSID == ovsid) {
+ /* use 1:1 mapping */
+ ovsid = VSID121 (start);
+ } else if (TRIV121_SEG_VSID == ovsid) {
+ ovsid = seg2vsid (start);
+ }
#ifdef DEBUG
- PRINTF("Mapping %i (0x%x) pages at 0x%08x for VSID 0x%08x\n",
- (unsigned)numPages, (unsigned)numPages,
- (unsigned)start, (unsigned)vsid);
+ PRINTF ("Mapping %i (0x%x) pages at 0x%08x for VSID 0x%08x\n",
+ (unsigned) numPages, (unsigned) numPages,
+ (unsigned) start, (unsigned) ovsid);
#endif
- /* map in two passes. During the first pass, we try
- * to claim entries as needed. The 'slotFor()' routine
- * will 'mark' the claimed entries without 'valid'ating
- * them.
- * If the mapping fails, all claimed entries are unmarked
- * and we return the PI for which allocation failed.
- *
- * Once we know that the allocation would succeed, we
- * do a second pass; during the second pass, the PTE
- * is actually written.
- *
- */
- for (pass=0; pass<2; pass++) {
- /* check if we would succeed during the first pass */
- for (i=0, pi=PI121(start), vsid = ovsid; i<numPages; i++,pi++) {
- if ( pi >= 1<<LD_PI_SIZE ) {
- vsid++;
- pi = 0;
- }
- /* leave alone existing mappings for this EA */
- if (!alreadyMapped(pt, vsid, pi)) {
- if (!(pte=slotFor(pt, vsid, pi))) {
- /* no free slot found for page index 'pi' */
- unmarkAll(pt);
- return pi;
- } else {
- /* have a free slot; marked by slotFor() */
- if (pass) {
- /* second pass; do the real work */
- pte->vsid=vsid;
- /* H was set by slotFor() */
- pte->api =API(pi);
- /* set up 1:1 mapping */
- pte->rpn =((((unsigned long)vsid)&((1<<(LD_PHYS_SIZE-LD_SEG_SIZE))-1))<<LD_PI_SIZE) | pi;
- pte->wimg=attributes & 0xf;
- pte->pp=protection&0x3;
- /* mark it valid */
- pte->v=1;
- pte->marked=0;
+ /* map in two passes. During the first pass, we try
+ * to claim entries as needed. The 'slotFor()' routine
+ * will 'mark' the claimed entries without 'valid'ating
+ * them.
+ * If the mapping fails, all claimed entries are unmarked
+ * and we return the PI for which allocation failed.
+ *
+ * Once we know that the allocation would succeed, we
+ * do a second pass; during the second pass, the PTE
+ * is actually written.
+ *
+ */
+ for (pass = 0; pass < 2; pass++) {
+ /* check if we would succeed during the first pass */
+ for (i = 0, pi = PI121 (start), vsid = ovsid; i < numPages; i++, pi++) {
+ if (pi >= 1 << LD_PI_SIZE) {
+ vsid++;
+ pi = 0;
+ }
+ /* leave alone existing mappings for this EA */
+ if (!alreadyMapped (pt, vsid, pi)) {
+ if (!(pte = slotFor (pt, vsid, pi))) {
+ /* no free slot found for page index 'pi' */
+ unmarkAll (pt);
+ return pi;
+ } else {
+ /* have a free slot; marked by slotFor() */
+ if (pass) {
+ /* second pass; do the real work */
+ pte->vsid = vsid;
+ /* H was set by slotFor() */
+ pte->api = API (pi);
+ /* set up 1:1 mapping */
+ pte->rpn =
+ ((((unsigned long) vsid) &
+ ((1 << (LD_PHYS_SIZE - LD_SEG_SIZE)) -
+ 1)) << LD_PI_SIZE) | pi;
+ pte->wimg = attributes & 0xf;
+ pte->pp = protection & 0x3;
+ /* mark it valid */
+ pte->marked = 0;
+ if (pt->active) {
+ unsigned32 flags;
+ rtems_interrupt_disable (flags);
+ /* order setting 'v' after writing everything else */
+ asm volatile ("eieio");
+ pte->v = 1;
+ asm volatile ("sync");
+ rtems_interrupt_enable (flags);
+ } else {
+ pte->v = 1;
+ }
+
#ifdef DEBUG
- /* add paranoia */
- assert(alreadyMapped(pt, vsid, pi) == pte);
+ /* add paranoia */
+ assert (alreadyMapped (pt, vsid, pi) == pte);
#endif
- }
- }
- }
- }
- unmarkAll(pt);
- }
+ }
+ }
+ }
+ }
+ unmarkAll (pt);
+ }
#ifdef DEBUG
- {
- unsigned long failedat;
- CONSCHECK(-1);
- /* double check that the requested range is mapped */
- failedat=triv121IsRangeMapped(start, start + (1<<LD_PG_SIZE)*numPages);
- if (0x0C0C != failedat) {
- PRINTF("triv121 mapping failed at 0x%08x\n",(unsigned)failedat);
- return PI121(failedat);
- }
- }
+ {
+ unsigned long failedat;
+ CONSCHECK (-1);
+ /* double check that the requested range is mapped */
+ failedat =
+ triv121IsRangeMapped (saved_vsid, start,
+ start + (1 << LD_PG_SIZE) * numPages);
+ if (0x0C0C != failedat) {
+ PRINTF ("triv121 mapping failed at 0x%08x\n", (unsigned) failedat);
+ return PI121 (failedat);
+ }
+ }
#endif
- return TRIV121_MAP_SUCCESS; /* -1 !! */
+ return TRIV121_MAP_SUCCESS; /* -1 !! */
}
unsigned long
-triv121PgTblSDR1(Triv121PgTbl pt)
+triv121PgTblSDR1 (Triv121PgTbl pt)
{
- return (((unsigned long)pt->base) & ~(LD_MIN_PT_SIZE-1)) |
- ( ((pt->size-1) >> LD_MIN_PT_SIZE) &
- ((1<<(LD_HASH_SIZE-(LD_MIN_PT_SIZE-LD_PTEG_SIZE)))-1)
- );
+ return (((unsigned long) pt->base) & ~((1 << LD_MIN_PT_SIZE) - 1)) |
+ (((pt->size - 1) >> LD_MIN_PT_SIZE) &
+ ((1 << (LD_HASH_SIZE - (LD_MIN_PT_SIZE - LD_PTEG_SIZE))) - 1)
+ );
}
void
-triv121PgTblActivate(Triv121PgTbl pt)
+triv121PgTblActivate (Triv121PgTbl pt)
{
#ifndef DEBUG_MAIN
-unsigned long sdr1=triv121PgTblSDR1(pt);
+ unsigned long sdr1 = triv121PgTblSDR1 (pt);
#endif
- pt->active=1;
+ pt->active = 1;
#ifndef DEBUG_MAIN
#ifdef DEBUG_EXC
- /* install our exception handler */
- ohdl=globalExceptHdl;
- globalExceptHdl=myhdl;
- __asm__ __volatile__ ("sync");
+ /* install our exception handler */
+ ohdl = globalExceptHdl;
+ globalExceptHdl = myhdl;
+ __asm__ __volatile__ ("sync");
#endif
- /* This section of assembly code takes care of the
- * following:
- * - get MSR and switch interrupts + MMU off
- *
- * - load up the segment registers with a
- * 1:1 effective <-> virtual mapping;
- * give user & supervisor keys
- *
- * - flush all TLBs;
- * NOTE: the TLB flushing code is probably
- * CPU dependent!
- *
- * - setup SDR1
- *
- * - restore original MSR
- */
- __asm__ __volatile(
- " mtctr %0\n"
- /* Get MSR and switch interrupts off - just in case.
- * Also switch the MMU off; the book
- * says that SDR1 must not be changed with either
- * MSR_IR or MSR_DR set. I would guess that it could
- * be safe as long as the IBAT & DBAT mappings override
- * the page table...
- */
- " mfmsr %0\n"
- " andc %6, %0, %6\n"
- " mtmsr %6\n"
- " isync \n"
- /* set up the segment registers */
- " li %6, 0\n"
- "1: mtsrin %1, %6\n"
- " addis %6, %6, 0x1000\n" /* address next SR */
- " addi %1, %1, 1\n" /* increment VSID */
- " bdnz 1b\n"
- /* Now flush all TLBs, starting with the topmost index */
- " lis %6, %2@h\n"
- "2: addic. %6, %6, -%3\n" /* address the next one (decrementing) */
- " tlbie %6\n" /* invalidate & repeat */
- " bgt 2b\n"
- " tlbsync\n"
- " sync\n"
- /* set up SDR1 */
- " mtspr %4, %5\n"
- /* restore original MSR */
- " mtmsr %0\n"
- " isync \n"
- ::"r"(16), "b"(KEY_USR | KEY_SUP),
- "i"(FLUSH_EA_RANGE), "i"(1<<LD_PG_SIZE),
- "i"(SDR1), "r"(sdr1),
- "b"(MSR_EE | MSR_IR | MSR_DR)
- : "ctr","cc");
-
- /* At this point, BAT0 is probably still active; it's the
- * caller's job to deactivate it...
- */
+ /* This section of assembly code takes care of the
+ * following:
+ * - get MSR and switch interrupts + MMU off
+ *
+ * - load up the segment registers with a
+ * 1:1 effective <-> virtual mapping;
+ * give user & supervisor keys
+ *
+ * - flush all TLBs;
+ * NOTE: the TLB flushing code is probably
+ * CPU dependent!
+ *
+ * - setup SDR1
+ *
+ * - restore original MSR
+ */
+ __asm__ __volatile (
+ " mtctr %0\n"
+ /* Get MSR and switch interrupts off - just in case.
+ * Also switch the MMU off; the book
+ * says that SDR1 must not be changed with either
+ * MSR_IR or MSR_DR set. I would guess that it could
+ * be safe as long as the IBAT & DBAT mappings override
+ * the page table...
+ */
+ " mfmsr %0\n"
+ " andc %6, %0, %6\n"
+ " mtmsr %6\n"
+ " isync \n"
+ /* set up the segment registers */
+ " li %6, 0\n"
+ "1: mtsrin %1, %6\n"
+ " addis %6, %6, 0x1000\n" /* address next SR */
+ " addi %1, %1, 1\n" /* increment VSID */
+ " bdnz 1b\n"
+ /* Now flush all TLBs, starting with the topmost index */
+ " lis %6, %2@h\n"
+ "2: addic. %6, %6, -%3\n" /* address the next one (decrementing) */
+ " tlbie %6\n" /* invalidate & repeat */
+ " bgt 2b\n"
+ " eieio \n"
+ " tlbsync \n"
+ " sync \n"
+ /* set up SDR1 */
+ " mtspr %4, %5\n"
+ /* restore original MSR */
+ " mtmsr %0\n"
+ " isync \n"
+ :
+ :"r" (16), "b" (KEY_USR | KEY_SUP),
+ "i" (FLUSH_EA_RANGE), "i" (1 << LD_PG_SIZE),
+ "i" (SDR1), "r" (sdr1), "b" (MSR_EE | MSR_IR | MSR_DR)
+ :"ctr", "cc"
+ );
+
+ /* At this point, BAT0 is probably still active; it's the
+ * caller's job to deactivate it...
+ */
#endif
}
@@ -605,49 +627,47 @@ unsigned long sdr1=triv121PgTblSDR1(pt);
/* Exception handler to catch page faults */
#ifdef DEBUG_EXC
-#define BAT_VALID_BOTH 3 /* allow user + super access */
+#define BAT_VALID_BOTH 3 /* allow user + super access */
static void
-myhdl(BSP_Exception_frame* excPtr)
+myhdl (BSP_Exception_frame * excPtr)
{
-if (3==excPtr->_EXC_number) {
- unsigned long dsisr;
-
- /* reactivate DBAT0 and read DSISR */
- __asm__ __volatile__(
- "mfspr %0, %1\n"
- "ori %0,%0,3\n"
- "mtspr %1, %0\n"
- "sync\n"
- "mfspr %0, %2\n"
- :"=r"(dsisr)
- :"i"(DBAT0U),"i"(DSISR),"i"(BAT_VALID_BOTH)
- );
-
- printk("Data Access Exception (DSI) # 3\n");
- printk("Reactivated DBAT0 mapping\n");
-
-
- printk("DSISR 0x%08x\n",dsisr);
-
- printk("revectoring to prevent default handler panic().\n");
- printk("NOTE: exception number %i below is BOGUS\n",
- ASM_DEC_VECTOR);
- /* make this exception 'recoverable' for
- * the default handler by faking a decrementer
- * exception.
- * Note that the default handler's message will be
- * wrong about the exception number.
- */
- excPtr->_EXC_number = ASM_DEC_VECTOR;
-}
+ if (3 == excPtr->_EXC_number) {
+ unsigned long dsisr;
+
+ /* reactivate DBAT0 and read DSISR */
+ __asm__ __volatile__ (
+ "mfspr %0, %1 \n"
+ "ori %0, %0, 3\n"
+ "mtspr %1, %0 \n"
+ "sync\n"
+ "mfspr %0, %2\n"
+ :"=&r" (dsisr)
+ :"i" (DBAT0U), "i" (DSISR), "i" (BAT_VALID_BOTH)
+ );
+
+ printk ("Data Access Exception (DSI) # 3\n");
+ printk ("Reactivated DBAT0 mapping\n");
+
+
+ printk ("DSISR 0x%08x\n", dsisr);
+
+ printk ("revectoring to prevent default handler panic().\n");
+ printk ("NOTE: exception number %i below is BOGUS\n", ASM_DEC_VECTOR);
+ /* make this exception 'recoverable' for
+ * the default handler by faking a decrementer
+ * exception.
+ * Note that the default handler's message will be
+ * wrong about the exception number.
+ */
+ excPtr->_EXC_number = ASM_DEC_VECTOR;
+ }
/* now call the original handler */
-((void(*)())ohdl)(excPtr);
+ ((void (*)()) ohdl) (excPtr);
}
#endif
-#ifdef DEBUG
/* test the consistency of the page table
*
@@ -668,117 +688,284 @@ if (3==excPtr->_EXC_number) {
* RETURNS: total number of valid plus 'marked' slots.
*/
unsigned long
-triv121PgTblConsistency(Triv121PgTbl pt, int pass, int expected)
+triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expected)
{
-PTE pte;
-int i;
-unsigned v,m;
-int warn=0;
-static int maxw=20; /* mute after detecting this many errors */
+ APte pte;
+ int i;
+ unsigned v, m;
+ int warn = 0;
+ static int maxw = 20; /* mute after detecting this many errors */
- PRINTF("Checking page table at 0x%08x (size %i==0x%x)\n",
- (unsigned)pt->base, (unsigned)pt->size, (unsigned)pt->size);
+ PRINTF ("Checking page table at 0x%08x (size %i==0x%x)\n",
+ (unsigned) pt->base, (unsigned) pt->size, (unsigned) pt->size);
- if (!pt->base || !pt->size) {
- PRINTF("Uninitialized Page Table!\n");
- return 0;
- }
+ if (!pt->base || !pt->size) {
+ PRINTF ("Uninitialized Page Table!\n");
+ return 0;
+ }
- v=m=0;
+ v = m = 0;
#if 1
- /* 10/9/2002: I had machine checks crashing after this loop
- * terminated. Maybe caused by speculative loads
- * from beyond the valid memory area (since the
- * page hash table sits at the top of physical
- * memory).
- * Very bizarre - the other loops in this file
- * seem to be fine. Maybe there is a compiler bug??
- * For the moment, I let the loop run backwards...
- *
- * Also see the comment a couple of lines down.
- */
- for (i=pt->size/sizeof(PTERec)-1, pte=pt->base + i; i>=0; i--,pte--)
+ /* 10/9/2002: I had machine checks crashing after this loop
+ * terminated. Maybe caused by speculative loads
+ * from beyond the valid memory area (since the
+ * page hash table sits at the top of physical
+ * memory).
+ * Very bizarre - the other loops in this file
+ * seem to be fine. Maybe there is a compiler bug??
+ * For the moment, I let the loop run backwards...
+ *
+ * Also see the comment a couple of lines down.
+ */
+ for (i = pt->size / sizeof (PTERec) - 1, pte = pt->base + i; i >= 0;
+ i--, pte--)
#else
- for (i=0, pte=pt->base; i<pt->size/sizeof(PTERec); i++,pte++)
+ for (i = 0, pte = pt->base; i < pt->size / sizeof (PTERec); i++, pte++)
#endif
- {
- int err=0;
- char buf[500];
- unsigned long *lp=(unsigned long*)pte;
+ {
+ int err = 0;
+ char buf[500];
+ unsigned long *lp = (unsigned long *) pte;
#if 0
- /* If I put this bogus while statement here (the body is
- * never reached), the original loop works OK
- */
- while (pte >= pt->base + pt->size/sizeof(PTERec))
- /* never reached */;
+ /* If I put this bogus while statement here (the body is
+ * never reached), the original loop works OK
+ */
+ while (pte >= pt->base + pt->size / sizeof (PTERec))
+ /* never reached */ ;
#endif
- if ( (*lp & (0xfffff0<<7)) || *(lp+1) & 0xe00 || (pte->v && pte->marked)) {
- /* check for vsid (without segment bits) == 0, unused bits == 0, valid && marked */
- sprintf(buf,"invalid VSID , unused bits or v && m");
- err=1;
- } else {
- if (pte->v) v++;
- if (pte->marked) m++;
- }
- if (err && maxw) {
- PRINTF("Pass %i -- strange PTE at 0x%08x found for page index %i == 0x%08x:\n",
- pass,(unsigned)pte,i,i);
- PRINTF("Reason: %s\n",buf);
- dumpPte(pte);
- warn++;
- maxw--;
- }
- }
- if (warn) {
- PRINTF("%i errors found; currently %i entries marked, %i are valid\n",
- warn, m, v);
- }
- v+=m;
- if (maxw && expected>=0 && expected != v) {
- /* number of occupied slots not what they expected */
- PRINTF("Wrong # of occupied slots detected during pass");
- PRINTF("%i; should be %i (0x%x) is %i (0x%x)\n",
- pass, expected, (unsigned)expected, v, (unsigned)v);
- maxw--;
- }
- return v;
+ if ((*lp & (0xfffff0 << 7)) || *(lp + 1) & 0xe00
+ || (pte->v && pte->marked)) {
+ /* check for vsid (without segment bits) == 0, unused bits == 0, valid && marked */
+ sprintf (buf, "invalid VSID , unused bits or v && m");
+ err = 1;
+ } else {
+ if (pte->v)
+ v++;
+ if (pte->marked)
+ m++;
+ }
+ if (err && maxw) {
+ PRINTF
+ ("Pass %i -- strange PTE at 0x%08x found for page index %i == 0x%08x:\n",
+ pass, (unsigned) pte, i, i);
+ PRINTF ("Reason: %s\n", buf);
+ dumpPte (pte);
+ warn++;
+ maxw--;
+ }
+ }
+ if (warn) {
+ PRINTF ("%i errors found; currently %i entries marked, %i are valid\n",
+ warn, m, v);
+ }
+ v += m;
+ if (maxw && expected >= 0 && expected != v) {
+ /* number of occupied slots not what they expected */
+ PRINTF ("Wrong # of occupied slots detected during pass");
+ PRINTF ("%i; should be %i (0x%x) is %i (0x%x)\n",
+ pass, expected, (unsigned) expected, v, (unsigned) v);
+ maxw--;
+ }
+ return v;
}
/* Find the PTE for a EA and print its contents
* RETURNS: pte for EA or NULL if no entry was found.
*/
-PTE
-triv121DumpPte(unsigned long ea)
+APte
+triv121DumpEa (unsigned long ea)
+{
+ APte pte;
+
+ pte =
+ alreadyMapped (&pgTbl, pgTbl.active ? TRIV121_SEG_VSID : TRIV121_121_VSID,
+ ea);
+
+ if (pte)
+ dumpPte (pte);
+ return pte;
+}
+
+APte
+triv121FindPte (unsigned long vsid, unsigned long pi)
+{
+ return alreadyMapped (&pgTbl, vsid, pi);
+}
+
+APte
+triv121UnmapEa (unsigned long ea)
+{
+ unsigned32 flags;
+ APte pte;
+
+ if (!pgTbl.active) {
+ pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
+ if (pte) /* alreadyMapped checks for pte->v */
+ pte->v = 0;
+ return pte;
+ }
+
+ pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
+
+ if (!pte)
+ return 0;
+
+ rtems_interrupt_disable (flags);
+ pte->v = 0;
+ do_dssall ();
+ asm volatile (" sync \n\t"
+ " tlbie %0 \n\t"
+ " eieio \n\t"
+ " tlbsync \n\t"
+ " sync \n\t"::"r" (ea));
+ rtems_interrupt_enable (flags);
+ return pte;
+}
+
+/* A context synchronizing jump */
+#define SYNC_LONGJMP(msr) \
+ asm volatile( \
+ " mtsrr1 %0 \n\t" \
+ " bl 1f \n\t" \
+ "1: mflr 3 \n\t" \
+ " addi 3,3,1f-1b \n\t" \
+ " mtsrr0 3 \n\t" \
+ " rfi \n\t" \
+ "1: \n\t" \
+ : \
+ :"r"(msr) \
+ :"3","lr")
+
+/* The book doesn't mention dssall when changing PTEs
+ * but they require it for BAT changes and I guess
+ * it makes sense in the case of PTEs as well.
+ * Just do it to be on the safe side...
+ */
+static void
+do_dssall ()
+{
+ /* Before changing BATs, 'dssall' must be issued.
+ * We check MSR for MSR_VE and issue a 'dssall' if
+ * MSR_VE is set hoping that
+ * a) on non-altivec CPUs MSR_VE reads as zero
+ * b) all altivec CPUs use the same bit
+ */
+ if (_read_MSR () & MSR_VE) {
+ /* this construct is needed because we don't know
+ * if this file is compiled with -maltivec.
+ * (I plan to add altivec support outside of
+ * RTEMS core and hence I'd rather not
+ * rely on consistent compiler flags).
+ */
+#define DSSALL 0x7e00066c /* dssall opcode */
+ asm volatile (" .long %0"::"i" (DSSALL));
+#undef DSSALL
+ }
+}
+
+APte
+triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
+{
+ APte pte;
+ unsigned long msr;
+
+ if (!pgTbl.active) {
+ pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
+ if (!pte)
+ return 0;
+ if (wimg > 0)
+ pte->wimg = wimg;
+ if (pp > 0)
+ pte->pp = pp;
+ return pte;
+ }
+
+ pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
+
+ if (!pte)
+ return 0;
+
+ if (wimg < 0 && pp < 0)
+ return pte;
+
+ asm volatile ("mfmsr %0":"=r" (msr));
+
+ /* switch MMU and IRQs off */
+ SYNC_LONGJMP (msr & ~(MSR_EE | MSR_DR | MSR_IR));
+
+ pte->v = 0;
+ do_dssall ();
+ asm volatile ("sync");
+ if (wimg >= 0)
+ pte->wimg = wimg;
+ if (pp >= 0)
+ pte->pp = pp;
+ asm volatile ("tlbie %0; eieio"::"r" (ea));
+ pte->v = 1;
+ asm volatile ("tlbsync; sync");
+
+ /* restore, i.e., switch MMU and IRQs back on */
+ SYNC_LONGJMP (msr);
+
+ return pte;
+}
+
+static void
+pgtblChangePP (Triv121PgTbl pt, int pp)
+{
+ unsigned long n = pt->size >> LD_PG_SIZE;
+ unsigned long b, i;
+
+ for (i = 0, b = (unsigned long) pt->base; i < n;
+ i++, b += (1 << LD_PG_SIZE)) {
+ triv121ChangeEaAttributes (b, -1, pp);
+ }
+}
+
+void
+triv121MakePgTblRW ()
{
-PTE pte;
+ pgtblChangePP (&pgTbl, TRIV121_PP_RW_PAGE);
+}
- pte=alreadyMapped(&pgTbl,TRIV121_121_VSID,ea);
+void
+triv121MakePgTblRO ()
+{
+ pgtblChangePP (&pgTbl, TRIV121_PP_RO_PAGE);
+}
- if (pte)
- dumpPte(pte);
- return pte;
+long
+triv121DumpPte (APte pte)
+{
+ if (pte)
+ dumpPte (pte);
+ return 0;
}
+
+#ifdef DEBUG
/* Dump an entire PTEG */
static void
-dumpPteg(unsigned long vsid, unsigned long pi, unsigned long hash)
+dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash)
{
-PTE pte=ptegOf(&pgTbl,hash);
-int i;
- PRINTF("hash 0x%08x, pteg 0x%08x (vsid 0x%08x, pi 0x%08x)\n",
- (unsigned)hash, (unsigned)pte,
- (unsigned)vsid, (unsigned)pi);
- for (i=0; i<PTE_PER_PTEG; i++,pte++) {
- PRINTF("pte 0x%08x is 0x%08x : 0x%08x\n",
- (unsigned)pte,
- (unsigned)*(unsigned long*)pte,
- (unsigned)*(((unsigned long*)pte)+1));
- }
+ APte pte = ptegOf (&pgTbl, hash);
+ int i;
+ PRINTF ("hash 0x%08x, pteg 0x%08x (vsid 0x%08x, pi 0x%08x)\n",
+ (unsigned) hash, (unsigned) pte, (unsigned) vsid, (unsigned) pi);
+ for (i = 0; i < PTE_PER_PTEG; i++, pte++) {
+ PRINTF ("pte 0x%08x is 0x%08x : 0x%08x\n",
+ (unsigned) pte,
+ (unsigned) *(unsigned long *) pte,
+ (unsigned) *(((unsigned long *) pte) + 1));
+ }
}
-
+#endif
+
/* Verify that a range of EAs is mapped the page table
+ * (if vsid has one of the special values -- otherwise,
+ * start/end are page indices).
*
* RETURNS: address of the first page for which no
* PTE was found (i.e. page index * page size)
@@ -788,42 +975,38 @@ int i;
* a valid page address].
*/
unsigned long
-triv121IsRangeMapped(unsigned long start, unsigned long end)
+triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end)
{
- start&=~((1<<LD_PG_SIZE)-1);
- while (start < end) {
- if (!alreadyMapped(&pgTbl,TRIV121_121_VSID,start))
- return start;
- start+=1<<LD_PG_SIZE;
- }
- return 0x0C0C; /* OKOK - not on a page boundary */
+ start &= ~((1 << LD_PG_SIZE) - 1);
+ while (start < end) {
+ if (!alreadyMapped (&pgTbl, vsid, start))
+ return start;
+ start += 1 << LD_PG_SIZE;
+ }
+ return 0x0C0C; /* OKOK - not on a page boundary */
}
-#endif
-
-#if defined(DEBUG_MAIN) || defined(DEBUG)
#include <stdlib.h>
/* print a PTE */
static void
-dumpPte(PTE pte)
+dumpPte (APte pte)
{
- if (0==((unsigned long)pte & ((1<<LD_PTEG_SIZE)-1)))
- PRINTF("PTEG--");
- else
- PRINTF("......");
- if (pte->v) {
- PRINTF("VSID: 0x%08x H:%1i API: 0x%02x\n",
- pte->vsid, pte->h, pte->api);
- PRINTF(" ");
- PRINTF("RPN: 0x%08x WIMG: 0x%1x, (m %1i), pp: 0x%1x\n",
- pte->rpn, pte->wimg, pte->marked, pte->pp);
- } else {
- PRINTF("xxxxxx\n");
- PRINTF(" ");
- PRINTF("xxxxxx\n");
- }
+ if (0 == ((unsigned long) pte & ((1 << LD_PTEG_SIZE) - 1)))
+ PRINTF ("PTEG--");
+ else
+ PRINTF ("......");
+ if (pte->v) {
+ PRINTF ("VSID: 0x%08x H:%1i API: 0x%02x\n", pte->vsid, pte->h, pte->api);
+ PRINTF (" ");
+ PRINTF ("RPN: 0x%08x WIMG: 0x%1x, (m %1i), pp: 0x%1x\n",
+ pte->rpn, pte->wimg, pte->marked, pte->pp);
+ } else {
+ PRINTF ("xxxxxx\n");
+ PRINTF (" ");
+ PRINTF ("xxxxxx\n");
+ }
}
@@ -834,20 +1017,20 @@ dumpPte(PTE pte)
* RETURNS 0
*/
int
-triv121PgTblDump(Triv121PgTbl pt, unsigned from, unsigned to)
+triv121PgTblDump (Triv121PgTbl pt, unsigned from, unsigned to)
{
-int i;
-PTE pte;
- PRINTF("Dumping PT [size 0x%08x == %i] at 0x%08x\n",
- (unsigned)pt->size, (unsigned)pt->size, (unsigned)pt->base);
- if (from> pt->size>>LD_PTE_SIZE)
- from=0;
- if (to > pt->size>>LD_PTE_SIZE)
- to=(pt->size>>LD_PTE_SIZE);
- for (i=from,pte=pt->base+from; i<(long)to; i++, pte++) {
- dumpPte(pte);
- }
- return 0;
+ int i;
+ APte pte;
+ PRINTF ("Dumping PT [size 0x%08x == %i] at 0x%08x\n",
+ (unsigned) pt->size, (unsigned) pt->size, (unsigned) pt->base);
+ if (from > pt->size >> LD_PTE_SIZE)
+ from = 0;
+ if (to > pt->size >> LD_PTE_SIZE)
+ to = (pt->size >> LD_PTE_SIZE);
+ for (i = from, pte = pt->base + from; i < (long) to; i++, pte++) {
+ dumpPte (pte);
+ }
+ return 0;
}
@@ -856,33 +1039,35 @@ PTE pte;
#define LD_DBG_PT_SIZE LD_MIN_PT_SIZE
int
-main(int argc, char **argv)
+main (int argc, char **argv)
{
-unsigned long base,start,numPages;
-unsigned long size=1<<LD_DBG_PT_SIZE;
-Triv121PgTbl pt;
-
- base=(unsigned long)malloc(size<<1);
-
- assert(base);
-
- /* align pt */
- base += size-1;
- base &= ~(size-1);
-
- assert(pt=triv121PgTblInit(base,LD_DBG_PT_SIZE));
-
- triv121PgTblDump(pt,(unsigned)-1, (unsigned)-1);
- do {
- do {
- PRINTF("Start Address:"); fflush(stdout);
- } while (1!=scanf("%i",&start));
- do {
- PRINTF("# pages:"); fflush(stdout);
- } while (1!=scanf("%i",&numPages));
- } while (TRIV121_MAP_SUCCESS==triv121PgTblMap(pt,TRIV121_121_VSID,start,numPages,
- TRIV121_ATTR_IO_PAGE,2) &&
- 0==triv121PgTblDump(pt,(unsigned)-1,(unsigned)-1));
+ unsigned long base, start, numPages;
+ unsigned long size = 1 << LD_DBG_PT_SIZE;
+ Triv121PgTbl pt;
+
+ base = (unsigned long) malloc (size << 1);
+
+ assert (base);
+
+ /* align pt */
+ base += size - 1;
+ base &= ~(size - 1);
+
+ assert (pt = triv121PgTblInit (base, LD_DBG_PT_SIZE));
+
+ triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1);
+ do {
+ do {
+ PRINTF ("Start Address:");
+ fflush (stdout);
+ } while (1 != scanf ("%i", &start));
+ do {
+ PRINTF ("# pages:");
+ fflush (stdout);
+ } while (1 != scanf ("%i", &numPages));
+ } while (TRIV121_MAP_SUCCESS ==
+ triv121PgTblMap (pt, TRIV121_121_VSID, start, numPages,
+ TRIV121_ATTR_IO_PAGE, 2)
+ && 0 == triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1));
}
#endif
-#endif
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.h b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.h
index 2cf62fe9d6..6fd9121446 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.h
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.h
@@ -13,19 +13,34 @@
* 1) allow write-protection of text/read-only data areas
* 2) provide more effective-address space in case
* the BATs are not enough
+ * 3) allow 'alias' mappings. Such aliases can only use
+ * the upper bits of the VSID since VSID & 0xf and the
+ * PI are always mapped 1:1 to the RPN.
* LIMITATIONS:
- * - once activated, the page table cannot be changed
* - no PTE replacement (makes no sense in a real-time
* environment, anyway) -> the page table just MUST
* be big enough!.
* - only one page table supported.
+ * - no locking implemented. If multiple threads modify
+ * the page table, it is the user's responsibility to
+ * implement exclusive access.
*/
-/* Author: Till Straumann <strauman@slac.stanford.edu>, 4/2002 */
+/* Author: Till Straumann <strauman@slac.stanford.edu>, 4/2002 - 2004 */
+
+/* I don't include mmu.h here because it says it's derived from linux
+ * and I want to avoid licensing problems
+ */
/* Abstract handle for a page table */
typedef struct Triv121PgTblRec_ *Triv121PgTbl;
+/* A PTE entry */
+typedef struct PTERec_ {
+ volatile unsigned long v:1, vsid:24, h:1, api: 6;
+ volatile unsigned long rpn:20, pad: 3, r:1, c:1, wimg:4, marked:1, pp:2;
+} PTERec, *APte;
+
/* Initialize a trivial page table
* using 2^ldSize bytes of memory starting at
* 'base'.
@@ -41,8 +56,8 @@ typedef struct Triv121PgTblRec_ *Triv121PgTbl;
* the CPU from overwriting the page table,
* it can still be corrupted by PCI bus masters
* (like DMA engines, [VME] bridges etc.) and
- * even by this CPU if either the MMU is off
- * or if there is a DBAT mapping granting write
+ * even by this CPU if either the MMU is off
+ * or if there is a DBAT mapping granting write
* access...
*/
Triv121PgTbl
@@ -56,8 +71,8 @@ triv121PgTblInit(unsigned long base, unsigned ldSize);
* be allocated at the top of the available
* memory (assuming 'memsize' is a power of two):
*
- * ldSize = triv121PgTblLdMinSize(memsize);
- * memsize -= (1<<ldSize); / * reduce memory available to RTEMS * /
+ * ldSize = triv121PgTblLdMinSize(memsize);
+ * memsize -= (1<<ldSize); / * reduce memory available to RTEMS * /
* pgTbl = triv121PgTblInit(memsize,ldSize);
*
*/
@@ -81,55 +96,56 @@ triv121PgTblLdMinSize(unsigned long size);
*/
long
triv121PgTblMap(
- Triv121PgTbl pgTbl, /* handle, returned by Init or Get */
-
- long vsid, /* vsid for this mapping (contains topmost 4 bits of EA);
- *
- * NOTE: it is allowed to pass a VSID < 0 to tell this
- * routine it should use a VSID corresponding to a
- * 1:1:1 effective - virtual - physical mapping
- */
-
- unsigned long start, /* segment offset (lowermost 28 bits of EA) of address range
- *
- * NOTE: if VSID < 0 (TRIV121_121_VSID), 'start' is inter-
- * preted as an effective address (EA), i.e. all 32
- * bits are used - the most significant four going into
- * to the VSID...
- */
-
- unsigned long numPages, /* number of pages to map */
-
- unsigned wimgAttr, /* 'wimg' attributes
- * (Write thru, cache Inhibit, coherent Memory,
- * Guarded memory)
- */
-
- unsigned protection /* 'pp' access protection: Super User
- *
- * 0 r/w none
- * 1 r/w ro
- * 2 r/w r/w
- * 3 ro ro
- */
- );
-
-#define TRIV121_ATTR_W 8
-#define TRIV121_ATTR_I 4
-#define TRIV121_ATTR_M 2
-#define TRIV121_ATTR_G 1
+ Triv121PgTbl pgTbl, /* handle, returned by Init or Get */
+
+ long vsid, /* vsid for this mapping (contains topmost 4 bits of EA);
+ *
+ * NOTE: it is allowed to pass a VSID < 0 to tell this
+ * routine it should use a VSID corresponding to a
+ * 1:1:1 effective - virtual - physical mapping
+ */
+
+ unsigned long start, /* segment offset (lowermost 28 bits of EA) of address range
+ *
+ * NOTE: if VSID < 0 (TRIV121_121_VSID), 'start' is inter-
+ * preted as an effective address (EA), i.e. all 32
+ * bits are used - the most significant four going into
+ * to the VSID...
+ */
+
+ unsigned long numPages, /* number of pages to map */
+
+ unsigned wimgAttr, /* 'wimg' attributes
+ * (Write thru, cache Inhibit, coherent Memory,
+ * Guarded memory)
+ */
+
+ unsigned protection /* 'pp' access protection: Super User
+ *
+ * 0 r/w none
+ * 1 r/w ro
+ * 2 r/w r/w
+ * 3 ro ro
+ */
+);
+
+#define TRIV121_ATTR_W 8
+#define TRIV121_ATTR_I 4
+#define TRIV121_ATTR_M 2
+#define TRIV121_ATTR_G 1
/* for I/O pages (e.g. PCI, VME addresses) use cache inhibited
* and guarded pages. RTM about the 'eieio' instruction!
*/
-#define TRIV121_ATTR_IO_PAGE (TRIV121_ATTR_I|TRIV121_ATTR_G)
+#define TRIV121_ATTR_IO_PAGE (TRIV121_ATTR_I|TRIV121_ATTR_G)
-#define TRIV121_PP_RO_PAGE (3) /* read-only for everyone */
-#define TRIV121_PP_RW_PAGE (2) /* read-write for everyone */
+#define TRIV121_PP_RO_PAGE (1) /* read-only for key = 1, unlocked by key=0 */
+#define TRIV121_PP_RW_PAGE (2) /* read-write for key = 1/0 */
-#define TRIV121_121_VSID (-1) /* use 1:1 effective<->virtual address mapping */
+#define TRIV121_121_VSID (-1) /* use 1:1 effective<->virtual address mapping */
+#define TRIV121_SEG_VSID (-2) /* lookup VSID in the segment register */
-#define TRIV121_MAP_SUCCESS (-1) /* triv121PgTblMap() returns this on SUCCESS */
+#define TRIV121_MAP_SUCCESS (-1) /* triv121PgTblMap() returns this on SUCCESS */
/* get a handle to the one and only page table
* (must have been initialized/allocated)
@@ -148,7 +164,7 @@ triv121PgTblSDR1(Triv121PgTbl pgTbl);
/*
* Activate the page table:
- * - set up the segment registers for a 1:1 effective <-> virtual address mapping,
+ * - set up the segment registers for a 1:1 effective <-> virtual address mapping,
* give user and supervisor keys.
* - set up the SDR1 register
* - flush all tlbs
@@ -162,4 +178,53 @@ triv121PgTblSDR1(Triv121PgTbl pgTbl);
void
triv121PgTblActivate(Triv121PgTbl pgTbl);
+/* Find the PTE for a EA and print its contents to stdout
+ * RETURNS: pte for EA or NULL if no entry was found.
+ */
+APte
+triv121DumpEa(unsigned long ea);
+
+/* Find and return a PTE for a vsid/pi combination
+ * RETURNS: pte or NULL if no entry was found
+ */
+APte
+triv121FindPte(unsigned long vsid, unsigned long pi);
+
+/*
+ * Unmap an effective address
+ *
+ * RETURNS: pte that mapped the ea or NULL if no
+ * mapping existed.
+ */
+APte
+triv121UnmapEa(unsigned long ea);
+
+/*
+ * Change the WIMG and PP attributes of the page containing 'ea'
+ *
+ * NOTES: The 'wimg' and 'pp' may be <0 to indicate that no
+ * change is desired.
+ *
+ * RETURNS: Pointer to modified PTE or NULL if 'ea' is not mapped.
+ */
+APte
+triv121ChangeEaAttributes(unsigned long ea, int wimg, int pp);
+
+/* Make the whole page table writable
+ * NOTES: If the page table has not been initialized yet,
+ * this routine has no effect (i.e., after
+ * initialization the page table will still be read-only).
+ */
+void
+triv121MakePgTblRW();
+
+/* Make the whole page table read-only
+ */
+void
+triv121MakePgTblRO();
+
+/* Dump a pte to stdout */
+long
+triv121DumpPte(APte pte);
+
#endif