diff options
Diffstat (limited to 'c/src/lib/libcpu/powerpc/shared/include/pgtable.h')
-rw-r--r-- | c/src/lib/libcpu/powerpc/shared/include/pgtable.h | 144 |
1 files changed, 0 insertions, 144 deletions
diff --git a/c/src/lib/libcpu/powerpc/shared/include/pgtable.h b/c/src/lib/libcpu/powerpc/shared/include/pgtable.h deleted file mode 100644 index 5be5874b4f..0000000000 --- a/c/src/lib/libcpu/powerpc/shared/include/pgtable.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * pgtable.h - * - * PowerPC memory management structures - * - * It is a stripped down version of linux ppc file... - * - * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr) - * Canon Centre Recherche France. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _LIBCPU_PGTABLE_H -#define _LIBCPU_PGTABLE_H - -/* - * The PowerPC MMU uses a hash table containing PTEs, together with - * a set of 16 segment registers (on 32-bit implementations), to define - * the virtual to physical address mapping. - * - * We use the hash table as an extended TLB, i.e. a cache of currently - * active mappings. We maintain a two-level page table tree, much like - * that used by the i386, for the sake of the Linux memory management code. - * Low-level assembler code in head.S (procedure hash_page) is responsible - * for extracting ptes from the tree and putting them into the hash table - * when necessary, and updating the accessed and modified bits in the - * page table tree. - * - * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. - * We also use the two level tables, but we can put the real bits in them - * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, - * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has - * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit - * based upon user/super access. The TLB does not have accessed nor write - * protect. We assume that if the TLB get loaded with an entry it is - * accessed, and overload the changed bit for write protect. We use - * two bits in the software pte that are supposed to be set to zero in - * the TLB entry (24 and 25) for these indicators. Although the level 1 - * descriptor contains the guarded and writethrough/copyback bits, we can - * set these at the page level since they get copied from the Mx_TWC - * register when the TLB entry is loaded. We will use bit 27 for guard, since - * that is where it exists in the MD_TWC, and bit 26 for writethrough. - * These will get masked from the level 2 descriptor at TLB load time, and - * copied to the MD_TWC before it gets loaded. - */ - -/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */ -#define PMD_SHIFT 22 -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT 22 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -/* - * entries per page directory level: our page-table tree is two-level, so - * we don't really have any PMD directory. - */ -#define PTRS_PER_PTE 1024 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 1024 -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) - -/* Just any arbitrary offset to the start of the vmalloc VM area: the - * current 64MB value just means that there will be a 64MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - * - * We no longer map larger than phys RAM with the BATs so we don't have - * to worry about the VMALLOC_OFFSET causing problems. We do have to worry - * about clashes between our early calls to ioremap() that start growing down - * from ioremap_base being run into the VM area allocations (growing upwards - * from VMALLOC_START). For this reason we have ioremap_bot to check when - * we actually run into our mappings setup in the early boot with the VM - * system. This really does become a problem for machines with good amounts - * of RAM. -- Cort - */ -#define VMALLOC_OFFSET (0x4000000) /* 64M */ -#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define VMALLOC_END ioremap_bot - -/* - * Bits in a linux-style PTE. These match the bits in the - * (hardware-defined) PowerPC PTE as closely as possible. - */ -#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ -#define _PAGE_USER 0x002 /* matches one of the PP bits */ -#define _PAGE_RW 0x004 /* software: user write access allowed */ -#define _PAGE_GUARDED 0x008 -#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ -#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ -#define _PAGE_DIRTY 0x080 /* C: page changed */ -#define _PAGE_ACCESSED 0x100 /* R: page referenced */ -#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */ -#define _PAGE_SHARED 0 - -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) - -#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED -#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE - -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) - -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \ - _PAGE_SHARED) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED) -#define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \ - _PAGE_NO_CACHE ) - -/* - * The PowerPC can only do execute protection on a segment (256MB) basis, - * not on a page basis. So we consider execute permission the same as read. - * Also, write permissions imply read permissions. - * This is the closest we can get.. - */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED -#endif /* _LIBCPU_PGTABLE_H */ |