summaryrefslogtreecommitdiffstats
path: root/bsps/powerpc/include/libcpu
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-12-23 18:18:56 +1100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-25 08:45:26 +0100
commit2afb22b7e1ebcbe40373ff7e0efae7d207c655a9 (patch)
tree44759efe9374f13200a97e96d91bd9a2b7e5ce2a /bsps/powerpc/include/libcpu
parentMAINTAINERS: Add myself to Write After Approval. (diff)
downloadrtems-2afb22b7e1ebcbe40373ff7e0efae7d207c655a9.tar.bz2
Remove make preinstall
A speciality of the RTEMS build system was the make preinstall step. It copied header files from arbitrary locations into the build tree. The header files were included via the -Bsome/build/tree/path GCC command line option. This has at least seven problems: * The make preinstall step itself needs time and disk space. * Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error. * There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult. * The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit. * An introduction of a new build system is difficult. * Include paths specified by the -B option are system headers. This may suppress warnings. * The parallel build had sporadic failures on some hosts. This patch removes the make preinstall step. All installed header files are moved to dedicated include directories in the source tree. Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc, etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g. erc32, imx, qoriq, etc. The new cpukit include directories are: * cpukit/include * cpukit/score/cpu/@RTEMS_CPU@/include * cpukit/libnetworking The new BSP include directories are: * bsps/include * bsps/@RTEMS_CPU@/include * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include There are build tree include directories for generated files. The include directory order favours the most general header file, e.g. it is not possible to override general header files via the include path order. The "bootstrap -p" option was removed. The new "bootstrap -H" option should be used to regenerate the "headers.am" files. Update #3254.
Diffstat (limited to 'bsps/powerpc/include/libcpu')
-rw-r--r--bsps/powerpc/include/libcpu/bat.h85
-rw-r--r--bsps/powerpc/include/libcpu/byteorder.h54
-rw-r--r--bsps/powerpc/include/libcpu/c_clock.h56
-rwxr-xr-xbsps/powerpc/include/libcpu/cpuIdent.h158
-rw-r--r--bsps/powerpc/include/libcpu/e500_mmu.h230
-rw-r--r--bsps/powerpc/include/libcpu/io.h139
-rw-r--r--bsps/powerpc/include/libcpu/irq.h199
-rw-r--r--bsps/powerpc/include/libcpu/mmu.h304
-rw-r--r--bsps/powerpc/include/libcpu/page.h66
-rw-r--r--bsps/powerpc/include/libcpu/pgtable.h144
-rw-r--r--bsps/powerpc/include/libcpu/powerpc-utility.h985
-rw-r--r--bsps/powerpc/include/libcpu/pte121.h265
-rw-r--r--bsps/powerpc/include/libcpu/raw_exception.h161
-rw-r--r--bsps/powerpc/include/libcpu/spr.h78
-rw-r--r--bsps/powerpc/include/libcpu/stackTrace.h8
-rw-r--r--bsps/powerpc/include/libcpu/vectors.h115
16 files changed, 3047 insertions, 0 deletions
diff --git a/bsps/powerpc/include/libcpu/bat.h b/bsps/powerpc/include/libcpu/bat.h
new file mode 100644
index 0000000000..2a27e810ee
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/bat.h
@@ -0,0 +1,85 @@
+/*
+ * bat.h
+ *
+ * This file contains declaration of C function to
+ * Instantiate 60x/7xx ppc Block Address Translation (BAT) registers.
+ * More detailed information can be found on motorola
+ * site and more precisely in the following book :
+ *
+ * MPC750
+ * Risc Microporcessor User's Manual
+ * Motorola REF : MPC750UM/AD 8/97
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_BAT_H
+#define _LIBCPU_BAT_H
+
+#include <libcpu/mmu.h>
+#include <libcpu/pgtable.h>
+
+#define IO_PAGE (_PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_RW)
+
+#ifndef ASM
+/* Take no risks -- the essential parts of this routine run with
+ * interrupts disabled!
+ *
+ * The routine does basic parameter checks:
+ * - Index must be 0..3 (0..7 on 7455, 7457).
+ * If an index > 3 is requested the 745x is
+ * programmed to enable the higher BATs.
+ * - Size must be a power of two and <= 1<<28
+ * (<=1<<31 on 7455, 7457. Also, on these processors
+ * the special value 0xffffffff is allowed which stands
+ * for 1<<32).
+ * If a size > 1<<28 is requested, the 745x is
+ * programmed to enable the larger block sizes.
+ * - Bat ranges must not overlap.
+ * - Physical & virtual addresses must be aligned
+ * to the size.
+ *
+ * RETURNS: zero on success, nonzero on failure.
+ */
+extern int setdbat(int bat_index, unsigned long virt, unsigned long phys,
+ unsigned int size, int flags);
+
+/* Same as setdbat but sets IBAT */
+extern int setibat(int bat_index, unsigned long virt, unsigned long phys,
+ unsigned int size, int flags);
+
+/* read DBAT # 'idx' into *pu / *pl. NULL pointers may be passed.
+ * If pu and pl are NULL, the bat contents are dumped to the console (printk).
+ *
+ * RETURNS: upper BAT contents or (-1) if index is invalid
+ */
+extern int getdbat(int bat_index, unsigned long *pu, unsigned long *pl);
+
+/* Same as getdbat but reads IBAT */
+extern int getibat(int bat_index, unsigned long *pu, unsigned long *pl);
+
+/* Do not use the asm-routines; they are obsolete; use setdbat() instead */
+extern void asm_setdbat0(unsigned int uperPart, unsigned int lowerPart);
+extern void asm_setdbat1(unsigned int uperPart, unsigned int lowerPart);
+extern void asm_setdbat2(unsigned int uperPart, unsigned int lowerPart);
+extern void asm_setdbat3(unsigned int uperPart, unsigned int lowerPart);
+#else
+
+/* Initialize all bats (upper and lower) to zero. This routine should *only*
+ * be called during early BSP initialization when no C-ABI is available
+ * yet.
+ * This routine clobbers r3 and r4.
+ * NOTE: on 7450 CPUs all 8 dbat/ibat units are cleared. On 601 CPUs only
+ * 4 ibats.
+ */
+ .globl CPU_clear_bats_early
+ .type CPU_clear_bats_early,@function
+
+#endif
+
+#endif /* _LIBCPU_BAT_H */
diff --git a/bsps/powerpc/include/libcpu/byteorder.h b/bsps/powerpc/include/libcpu/byteorder.h
new file mode 100644
index 0000000000..0654fefb58
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/byteorder.h
@@ -0,0 +1,54 @@
+/*
+ * byteorder.h
+ *
+ * This file contains inline implementation of function to
+ * deal with endian conversion.
+ *
+ * It is a stripped down version of linux ppc file...
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_BYTEORDER_H
+#define _LIBCPU_BYTEORDER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline unsigned ld_le16(volatile uint16_t *addr)
+{
+ unsigned val;
+
+ __asm__ volatile ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static inline void st_le16(volatile uint16_t *addr, unsigned val)
+{
+ __asm__ volatile ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline unsigned ld_le32(volatile uint32_t *addr)
+{
+ unsigned val;
+
+ __asm__ volatile ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static inline void st_le32(volatile uint32_t *addr, unsigned val)
+{
+ __asm__ volatile ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBCPU_BYTEORDER_H */
diff --git a/bsps/powerpc/include/libcpu/c_clock.h b/bsps/powerpc/include/libcpu/c_clock.h
new file mode 100644
index 0000000000..bd4918fb44
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/c_clock.h
@@ -0,0 +1,56 @@
+/*
+ * Clock Tick Device Driver
+ *
+ * This routine utilizes the Decrementer Register common to the PPC family.
+ *
+ * The tick frequency is directly programmed to the configured number of
+ * microseconds per tick.
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * Modified to support the MPC750.
+ * Modifications Copyright (c) 1999 Eric Valette valette@crf.canon.fr
+ */
+
+#ifndef _LIBCPU_C_CLOCK_H
+#define _LIBCPU_C_CLOCK_H
+
+#include <rtems.h>
+
+/*
+ * These functions and variables represent the API exported by the
+ * CPU to the BSP.
+ */
+
+extern void clockOff (void* unused);
+extern void clockOn (void* unused);
+extern void clockIsr (void* unused);
+/* bookE decrementer is slightly different */
+extern void clockIsrBookE (void *unused);
+extern int clockIsOn (void* unused);
+
+/*
+ * These functions and variables represent the assumptions of this
+ * driver on the BSP.
+ */
+
+extern int BSP_disconnect_clock_handler (void);
+/*
+ * PCI Bus Frequency
+ */
+extern unsigned int BSP_bus_frequency;
+/*
+ * processor clock frequency
+ */
+extern unsigned int BSP_processor_frequency;
+/*
+ * Time base divisior (how many tick for 1 second).
+ */
+extern unsigned int BSP_time_base_divisor;
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/cpuIdent.h b/bsps/powerpc/include/libcpu/cpuIdent.h
new file mode 100755
index 0000000000..e051deba92
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/cpuIdent.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * Added MPC8260 Andy Dachs <a.dachs@sstl.co.uk>
+ * Surrey Satellite Technology Limited
+ *
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_CPUIDENT_H
+#define _LIBCPU_CPUIDENT_H
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef ASM
+typedef enum
+{
+ PPC_601 = 0x1,
+ PPC_5XX = 0x2,
+ PPC_603 = 0x3,
+ PPC_604 = 0x4,
+ PPC_603e = 0x6,
+ PPC_603ev = 0x7,
+ PPC_750 = 0x8,
+ PPC_750_IBM = 0x7000,
+ PPC_604e = 0x9,
+ PPC_604r = 0xA,
+ PPC_7400 = 0xC,
+ PPC_405 = 0x2001, /* Xilinx Virtex-II Pro or -4 */
+ PPC_405EX = 0x1291, /* + 405EXr */
+ PPC_405GP = 0x4011, /* + 405CR */
+ PPC_405GPr = 0x5091,
+ PPC_405EZ = 0x4151,
+ PPC_405EP = 0x5121,
+ PPC_440 = 0x7ff2, /* Xilinx Virtex-5*/
+ PPC_7455 = 0x8001, /* Kate Feng */
+ PPC_7457 = 0x8002,
+ PPC_620 = 0x16,
+ PPC_860 = 0x50,
+ PPC_821 = PPC_860,
+ PPC_823 = PPC_860,
+ PPC_8260 = 0x81,
+ PPC_8240 = PPC_8260,
+ PPC_8245 = 0x8081,
+ PPC_8540 = 0x8020,
+ PPC_e500v2 = 0x8021,
+ PPC_e6500 = 0x8040,
+ PPC_603le = 0x8082, /* 603le core, in MGT5100 and MPC5200 */
+ PPC_e300c1 = 0x8083, /* e300c1 core, in MPC83xx*/
+ PPC_e300c2 = 0x8084, /* e300c2 core */
+ PPC_e300c3 = 0x8085, /* e300c3 core */
+ PPC_e200z0 = 0x8170,
+ PPC_e200z1 = 0x8140,
+ PPC_e200z4 = 0x8150,
+ PPC_e200z6 = 0x8110,
+ PPC_e200z7 = 0x8160,
+ PPC_PSIM = 0xfffe, /* GDB PowerPC simulator -- fake version */
+ PPC_UNKNOWN = 0xffff
+} ppc_cpu_id_t;
+
+/* Bitfield of for identifying features or groups of cpu flavors.
+ * DO NOT USE DIRECTLY (as implementation may change)
+ * only use the 'ppc_is_xxx() / ppc_has_xxx()' macros/inlines
+ * below.
+ */
+
+typedef struct {
+ unsigned has_altivec : 1;
+ unsigned has_fpu : 1;
+ unsigned has_hw_ptbl_lkup : 1;
+#define PPC_BOOKE_405 1 /* almost like booke but with some significant differences */
+#define PPC_BOOKE_STD 2
+#define PPC_BOOKE_E500 3 /* bookE with extensions */
+ unsigned is_bookE : 2;
+ unsigned has_16byte_clne : 1;
+ unsigned is_60x : 1;
+ unsigned has_8_bats : 1;
+ unsigned has_epic : 1;
+ unsigned has_shadowed_gprs : 1;
+} ppc_feature_t;
+
+extern ppc_feature_t current_ppc_features;
+extern ppc_cpu_id_t current_ppc_cpu;
+
+typedef unsigned short ppc_cpu_revision_t;
+
+extern ppc_cpu_id_t get_ppc_cpu_type (void);
+extern const char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu);
+extern ppc_cpu_revision_t get_ppc_cpu_revision (void);
+extern ppc_cpu_revision_t current_ppc_revision;
+
+/* PUBLIC ACCESS ROUTINES */
+#define _PPC_FEAT_DECL(x) \
+static inline unsigned ppc_cpu_##x(void) { \
+ if ( PPC_UNKNOWN == current_ppc_cpu ) \
+ get_ppc_cpu_type(); \
+ return current_ppc_features.x; \
+}
+
+_PPC_FEAT_DECL(has_altivec)
+/* has_fpu not implemented yet */
+_PPC_FEAT_DECL(has_hw_ptbl_lkup)
+_PPC_FEAT_DECL(is_bookE)
+_PPC_FEAT_DECL(is_60x)
+_PPC_FEAT_DECL(has_8_bats)
+_PPC_FEAT_DECL(has_epic)
+_PPC_FEAT_DECL(has_shadowed_gprs)
+
+#undef _PPC_FEAT_DECL
+
+static inline ppc_cpu_id_t ppc_cpu_current(void)
+{
+ return current_ppc_cpu;
+}
+
+static inline bool ppc_cpu_is_e200(void)
+{
+ return (ppc_cpu_current() & 0xff80) == 0x8100;
+}
+
+static inline bool ppc_cpu_is_specific_e200(ppc_cpu_id_t id)
+{
+ return (ppc_cpu_current() & 0xfff0) == id;
+}
+
+static inline bool ppc_cpu_is_e300(void)
+{
+ return ppc_cpu_current() == PPC_e300c1
+ || ppc_cpu_current() == PPC_e300c2
+ || ppc_cpu_current() == PPC_e300c3;
+}
+
+static inline bool ppc_cpu_is_e500(void)
+{
+ return ppc_cpu_current() == PPC_8540
+ || ppc_cpu_current() == PPC_e500v2;
+}
+
+static inline bool ppc_cpu_is(ppc_cpu_id_t cpu)
+{
+ return ppc_cpu_current() == cpu;
+}
+
+#endif /* ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/e500_mmu.h b/bsps/powerpc/include/libcpu/e500_mmu.h
new file mode 100644
index 0000000000..84920e0d14
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/e500_mmu.h
@@ -0,0 +1,230 @@
+#ifndef RTEMS_E500_MMU_DRIVER_H
+#define RTEMS_E500_MMU_DRIVER_H
+
+/*
+ * Routines to manipulate e500 TLBs; TLB0 (fixed 4k page size)
+ * is not very useful so we mostly focus on TLB1 (variable page size)
+ */
+
+/*
+ * Authorship
+ * ----------
+ * This software was created by
+ * Till Straumann <strauman@slac.stanford.edu>, 2005-2007,
+ * Stanford Linear Accelerator Center, Stanford University.
+ *
+ * Acknowledgement of sponsorship
+ * ------------------------------
+ * This software was produced by
+ * the Stanford Linear Accelerator Center, Stanford University,
+ * under Contract DE-AC03-76SFO0515 with the Department of Energy.
+ *
+ * Government disclaimer of liability
+ * ----------------------------------
+ * Neither the United States nor the United States Department of Energy,
+ * nor any of their employees, makes any warranty, express or implied, or
+ * assumes any legal liability or responsibility for the accuracy,
+ * completeness, or usefulness of any data, apparatus, product, or process
+ * disclosed, or represents that its use would not infringe privately owned
+ * rights.
+ *
+ * Stanford disclaimer of liability
+ * --------------------------------
+ * Stanford University makes no representations or warranties, express or
+ * implied, nor assumes any liability for the use of this software.
+ *
+ * Stanford disclaimer of copyright
+ * --------------------------------
+ * Stanford University, owner of the copyright, hereby disclaims its
+ * copyright and all other rights in this software. Hence, anyone may
+ * freely use it for any purpose without restriction.
+ *
+ * Maintenance of notices
+ * ----------------------
+ * In the interest of clarity regarding the origin and status of this
+ * SLAC software, this and all the preceding Stanford University notices
+ * are to remain affixed to any copy or derivative of this software made
+ * or distributed by the recipient and are to be affixed to any copy of
+ * software made or distributed by the recipient that contains a copy or
+ * derivative of this software.
+ *
+ * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
+ */
+
+#include <rtems.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Some routines require or return a index 'key'. This
+ * is simply the TLB entry # ORed with E500_SELTLB_0
+ * or E500_SELTLB_1 specifying an entry in TLB0 or TLB1,
+ * respectively.
+ */
+typedef int rtems_e500_tlb_idx;
+#define E500_SELTLB_0 0x0000
+#define E500_SELTLB_1 0x1000
+
+/* Cache the relevant TLB1 entries so that we can
+ * make sure the user cannot create conflicting
+ * (overlapping) entries.
+ * Keep them public for informational purposes.
+ */
+typedef struct {
+ struct {
+ uint32_t va_epn: 20;
+ uint32_t va_tid: 12;
+ } va;
+ uint32_t rpn;
+ struct {
+ uint32_t sz: 4;
+ uint32_t ts: 1;
+ uint32_t v: 1;
+ uint32_t perm: 10;
+ uint32_t wimge: 7;
+ } att;
+} E500_tlb_va_cache_t;
+
+extern E500_tlb_va_cache_t rtems_e500_tlb_va_cache[16];
+
+/*
+ * Dump (cleartext) content info from cached TLB entries
+ * to a file (stdout if f==NULL).
+ */
+void
+rtems_e500_dmptlbc(FILE *f);
+
+/*
+ * Read a TLB entry from the hardware; if it is a TLB1 entry
+ * then the current settings are stored in the
+ * rtems_e500_tlb_va_cache[] structure.
+ *
+ * The routine can perform this operation quietly or
+ * print information to a file.
+ *
+ * 'key': TLB entry index ORed with selector bit
+ * (E500_SELTLB_0 for TLB0, E500_SELTLB_1 for TLB1).
+ * 'quiet': perform operation silently (no info printed)
+ * if nonzero.
+ * 'f': open FILE where to print information. May be
+ * NULL in which case 'stdout' is used.
+ *
+ * RETURNS:
+ * 0: success; TLB entry is VALID
+ * +1: success but TLB entry is INVALID
+ * < 0: error (-1: invalid argument)
+ */
+int
+rtems_e500_prtlb(rtems_e500_tlb_idx key, int quiet, FILE *f);
+
+/* Initialize cache; verify that TLB0 is unused;
+ *
+ * RETURNS: zero on success, nonzero on error (TLB0
+ * seems to be in use); in this case the
+ * driver will refuse to change TLB1 entries
+ * (other than disabling them).
+ */
+int
+rtems_e500_initlb(void);
+
+/*
+ * Write TLB1 entry (can also be used to disable an entry).
+ *
+ * The routine checks against the cached data in
+ * rtems_e500_tlb_va[] to prevent the user from generating
+ * overlapping entries.
+ *
+ * 'idx': TLB 1 entry # to manipulate
+ * 'ea': Effective address (must be page aligned)
+ * 'pa': Physical address (must be page aligned)
+ * 'sz': Page size selector; page size is
+ * 1024 * 2^(2*sz) bytes.
+ * 'sz' may also be one of the following:
+ * - page size in bytes ( >= 1024 ); the selector
+ * value is then computed by this routine.
+ * However, 'sz' must be a valid page size
+ * or -1 will be returned.
+ * - a value < 0 to invalidate/disable the
+ * TLB entry.
+ * 'attr': Page attributes; ORed combination of WIMGE,
+ * PERMissions, TID and TS. Use ATTR_xxx macros
+ *
+ * RETURNS: 0 on success, nonzero on error:
+ *
+ * >0: requested mapping would overlap with
+ * existing mapping in other entry. Return
+ * value gives conflicting entry + 1; i.e.,
+ * if a value of 4 is returned then the request
+ * conflicts with existing mapping in entry 3.
+ * -1: invalid argument
+ * -3: driver not initialized (or initialization
+ * failed because TLB0 is in use).
+ * <0: other error
+ *
+ */
+#define E500_TLB_ATTR_WIMGE(x) ((x)&0x7f) /* includes user bits */
+#define E500_TLB_ATTR_WIMGE_GET(x) ((x)&0x7f)
+#define E500_TLB_ATTR_TS (1<<7)
+#define E500_TLB_ATTR_PERM(x) (((x)&0x3ff)<<8)
+#define E500_TLB_ATTR_PERM_GET(x) (((x)>>8)&0x3ff)
+#define E500_TLB_ATTR_TID(x) (((x)&0xfff)<<20)
+#define E500_TLB_ATTR_TID_GET(x) (((x)>>20)&0xfff)
+
+int
+rtems_e500_wrtlb(int idx, uint32_t ea, uint32_t pa, int sz, uint32_t attr);
+
+/*
+ * Check if a ts/tid/ea/sz mapping overlaps
+ * with an existing entry.
+ *
+ * ASSUMPTION: all TLB0 (fixed 4k pages) are invalid and always unused.
+ *
+ * NOTE: 'sz' is the 'logarithmic' size selector; the page size
+ * is 1024*2^(2*sz).
+ *
+ * RETURNS:
+ * >= 0: index of TLB1 entry that already provides a mapping
+ * which overlaps within the ea range.
+ * -1: SUCCESS (no conflicting entry found)
+ * <=-2: ERROR (invalid input)
+ */
+int
+rtems_e500_matchtlb(uint32_t ea, uint32_t tid, int ts, int sz);
+
+/* Find TLB index that maps 'ea/as' combination
+ *
+ * RETURNS: index 'key'; i.e., the index number plus
+ * a bit (E500_SELTLB_1) which indicates whether
+ * the mapping was found in TLB0 (4k fixed page
+ * size) or in TLB1 (variable page size).
+ *
+ * On error (no mapping) -1 is returned.
+ */
+rtems_e500_tlb_idx
+rtems_e500_ftlb(uint32_t ea, int as);
+
+/* Mark TLB entry as invalid ('disabled'). Unlike
+ * rtems_e500_wrtlb() with a negative size argument
+ * this routine also can disable TLB0 entries.
+ *
+ * 'key': TLB entry index ORed with selector bit
+ * (E500_SELTLB_0 for TLB0, E500_SELTLB_1 for TLB1).
+ *
+ * RETURNS: zero on success, nonzero on error (TLB
+ * unchanged).
+ *
+ * NOTE: If a TLB1 entry is disabled the associated
+ * entry in rtems_e500_va_cache[] is also
+ * marked as disabled.
+ */
+int
+rtems_e500_clrtlb(rtems_e500_tlb_idx key);
+
+#ifdef __cplusplus
+};
+#endif
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/io.h b/bsps/powerpc/include/libcpu/io.h
new file mode 100644
index 0000000000..841df81f47
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/io.h
@@ -0,0 +1,139 @@
+/*
+ * io.h
+ *
+ * This file contains inline implementation of function to
+ * deal with IO.
+ *
+ * It is a stripped down version of linux ppc file...
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+#ifndef _LIBCPU_IO_H
+#define _LIBCPU_IO_H
+
+
+#define PREP_ISA_IO_BASE 0x80000000
+#define PREP_ISA_MEM_BASE 0xc0000000
+#define PREP_PCI_DRAM_OFFSET 0x80000000
+
+#define CHRP_ISA_IO_BASE 0xfe000000
+#define CHRP_ISA_MEM_BASE 0xfd000000
+#define CHRP_PCI_DRAM_OFFSET 0x00000000
+
+/* _IO_BASE, _ISA_MEM_BASE, PCI_DRAM_OFFSET are now defined by bsp.h */
+
+#ifndef ASM
+
+#include <bsp.h> /* for _IO_BASE & friends */
+#include <stdint.h>
+
+/* NOTE: The use of these macros is DISCOURAGED.
+ * you should consider e.g. using in_xxx / out_xxx
+ * with a device specific base address that is
+ * defined by the BSP. This makes drivers easier
+ * to port.
+ */
+#define inb(port) in_8((uint8_t *)((port)+_IO_BASE))
+#define outb(val, port) out_8((uint8_t *)((port)+_IO_BASE), (val))
+#define inw(port) in_le16((uint16_t *)((port)+_IO_BASE))
+#define outw(val, port) out_le16((uint16_t *)((port)+_IO_BASE), (val))
+#define inl(port) in_le32((uint32_t *)((port)+_IO_BASE))
+#define outl(val, port) out_le32((uint32_t *)((port)+_IO_BASE), (val))
+
+/*
+ * Enforce In-order Execution of I/O:
+ * Acts as a barrier to ensure all previous I/O accesses have
+ * completed before any further ones are issued.
+ */
+static inline void eieio(void)
+{
+ __asm__ __volatile__ ("eieio");
+}
+
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r() eieio()
+#define iobarrier_w() eieio()
+
+/*
+ * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
+ */
+static inline uint8_t in_8(const volatile uint8_t *addr)
+{
+ uint8_t ret;
+
+ __asm__ __volatile__("lbz%U1%X1 %0,%1; eieio" : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+static inline void out_8(volatile uint8_t *addr, uint8_t val)
+{
+ __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
+}
+
+static inline uint16_t in_le16(const volatile uint16_t *addr)
+{
+ uint16_t ret;
+
+ __asm__ __volatile__("lhbrx %0,0,%1; eieio" : "=r" (ret) :
+ "r" (addr), "m" (*addr));
+ return ret;
+}
+
+static inline uint16_t in_be16(const volatile uint16_t *addr)
+{
+ uint16_t ret;
+
+ __asm__ __volatile__("lhz%U1%X1 %0,%1; eieio" : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+static inline void out_le16(volatile uint16_t *addr, uint16_t val)
+{
+ __asm__ __volatile__("sthbrx %1,0,%2; eieio" : "=m" (*addr) :
+ "r" (val), "r" (addr));
+}
+
+static inline void out_be16(volatile uint16_t *addr, uint16_t val)
+{
+ __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
+}
+
+static inline uint32_t in_le32(const volatile uint32_t *addr)
+{
+ uint32_t ret;
+
+ __asm__ __volatile__("lwbrx %0,0,%1; eieio" : "=r" (ret) :
+ "r" (addr), "m" (*addr));
+ return ret;
+}
+
+static inline uint32_t in_be32(const volatile uint32_t *addr)
+{
+ uint32_t ret;
+
+ __asm__ __volatile__("lwz%U1%X1 %0,%1; eieio" : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+static inline void out_le32(volatile uint32_t *addr, uint32_t val)
+{
+ __asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) :
+ "r" (val), "r" (addr));
+}
+
+static inline void out_be32(volatile uint32_t *addr, uint32_t val)
+{
+ __asm__ __volatile__("stw%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
+}
+
+#endif /* ASM */
+#endif /* _LIBCPU_IO_H */
diff --git a/bsps/powerpc/include/libcpu/irq.h b/bsps/powerpc/include/libcpu/irq.h
new file mode 100644
index 0000000000..ab06041d24
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/irq.h
@@ -0,0 +1,199 @@
+/*
+ * irq.h
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to write interrupt handlers.
+ *
+ *
+ * MPC5xx port sponsored by Defence Research and Development Canada - Suffield
+ * Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
+ *
+ * Derived from libbsp/powerpc/mbx8xx/irq/irq.h:
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * This code is heavilly inspired by the public specification of STREAM V2
+ * that can be found at :
+ *
+ * <http://www.chorus.com/Documentation/index.html> by following
+ * the STREAM API Specification Document link.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_IRQ_H
+#define _LIBCPU_IRQ_H
+
+#include <rtems/irq.h>
+
+#define CPU_ASM_IRQ_VECTOR_BASE 0x0
+
+#ifndef ASM
+
+extern volatile unsigned int ppc_cached_irq_mask;
+
+/*
+ * Symblolic IRQ names and related definitions.
+ */
+
+ /*
+ * Base vector for our USIU IRQ handlers.
+ */
+#define CPU_USIU_VECTOR_BASE (CPU_ASM_IRQ_VECTOR_BASE)
+ /*
+ * USIU IRQ handler related definitions
+ */
+#define CPU_USIU_IRQ_COUNT (16) /* 16 reserved but in the future... */
+#define CPU_USIU_IRQ_MIN_OFFSET (0)
+#define CPU_USIU_IRQ_MAX_OFFSET (CPU_USIU_IRQ_MIN_OFFSET + CPU_USIU_IRQ_COUNT - 1)
+ /*
+ * UIMB IRQ handlers related definitions
+ */
+#define CPU_UIMB_IRQ_COUNT (32 - 8) /* first 8 overlap USIU */
+#define CPU_UIMB_IRQ_MIN_OFFSET (CPU_USIU_IRQ_COUNT + CPU_USIU_VECTOR_BASE)
+#define CPU_UIMB_IRQ_MAX_OFFSET (CPU_UIMB_IRQ_MIN_OFFSET + CPU_UIMB_IRQ_COUNT - 1)
+ /*
+ * PowerPc exceptions handled as interrupt where a rtems managed interrupt
+ * handler might be connected
+ */
+#define CPU_PROC_IRQ_COUNT (1)
+#define CPU_PROC_IRQ_MIN_OFFSET (CPU_UIMB_IRQ_MAX_OFFSET + 1)
+#define CPU_PROC_IRQ_MAX_OFFSET (CPU_PROC_IRQ_MIN_OFFSET + CPU_PROC_IRQ_COUNT - 1)
+ /*
+ * Summary
+ */
+#define CPU_IRQ_COUNT (CPU_PROC_IRQ_MAX_OFFSET + 1)
+#define CPU_MIN_OFFSET (CPU_USIU_IRQ_MIN_OFFSET)
+#define CPU_MAX_OFFSET (CPU_PROC_IRQ_MAX_OFFSET)
+ /*
+ * USIU IRQ symbolic name definitions.
+ */
+#define CPU_USIU_EXT_IRQ_0 (CPU_USIU_IRQ_MIN_OFFSET + 0)
+#define CPU_USIU_INT_IRQ_0 (CPU_USIU_IRQ_MIN_OFFSET + 1)
+
+#define CPU_USIU_EXT_IRQ_1 (CPU_USIU_IRQ_MIN_OFFSET + 2)
+#define CPU_USIU_INT_IRQ_1 (CPU_USIU_IRQ_MIN_OFFSET + 3)
+
+#define CPU_USIU_EXT_IRQ_2 (CPU_USIU_IRQ_MIN_OFFSET + 4)
+#define CPU_USIU_INT_IRQ_2 (CPU_USIU_IRQ_MIN_OFFSET + 5)
+
+#define CPU_USIU_EXT_IRQ_3 (CPU_USIU_IRQ_MIN_OFFSET + 6)
+#define CPU_USIU_INT_IRQ_3 (CPU_USIU_IRQ_MIN_OFFSET + 7)
+
+#define CPU_USIU_EXT_IRQ_4 (CPU_USIU_IRQ_MIN_OFFSET + 8)
+#define CPU_USIU_INT_IRQ_4 (CPU_USIU_IRQ_MIN_OFFSET + 9)
+
+#define CPU_USIU_EXT_IRQ_5 (CPU_USIU_IRQ_MIN_OFFSET + 10)
+#define CPU_USIU_INT_IRQ_5 (CPU_USIU_IRQ_MIN_OFFSET + 11)
+
+#define CPU_USIU_EXT_IRQ_6 (CPU_USIU_IRQ_MIN_OFFSET + 12)
+#define CPU_USIU_INT_IRQ_6 (CPU_USIU_IRQ_MIN_OFFSET + 13)
+
+#define CPU_USIU_EXT_IRQ_7 (CPU_USIU_IRQ_MIN_OFFSET + 14)
+#define CPU_USIU_INT_IRQ_7 (CPU_USIU_IRQ_MIN_OFFSET + 15)
+
+ /*
+ * Symbolic names for UISU interrupt sources.
+ */
+#define CPU_PERIODIC_TIMER (CPU_USIU_INT_IRQ_6)
+#define CPU_UIMB_INTERRUPT (CPU_USIU_INT_IRQ_7)
+
+ /*
+ * UIMB IRQ symbolic name definitions. The first 8 sources are aliases to
+ * the USIU interrupts of the same number, because they are detected in
+ * the USIU pending register rather than the UIMB pending register.
+ */
+#define CPU_UIMB_IRQ_0 (CPU_USIU_INT_IRQ_0)
+#define CPU_UIMB_IRQ_1 (CPU_USIU_INT_IRQ_1)
+#define CPU_UIMB_IRQ_2 (CPU_USIU_INT_IRQ_2)
+#define CPU_UIMB_IRQ_3 (CPU_USIU_INT_IRQ_3)
+#define CPU_UIMB_IRQ_4 (CPU_USIU_INT_IRQ_4)
+#define CPU_UIMB_IRQ_5 (CPU_USIU_INT_IRQ_5)
+#define CPU_UIMB_IRQ_6 (CPU_USIU_INT_IRQ_6)
+#define CPU_UIMB_IRQ_7 (CPU_USIU_INT_IRQ_7)
+
+#define CPU_UIMB_IRQ_8 (CPU_UIMB_IRQ_MIN_OFFSET+ 0)
+#define CPU_UIMB_IRQ_9 (CPU_UIMB_IRQ_MIN_OFFSET+ 1)
+#define CPU_UIMB_IRQ_10 (CPU_UIMB_IRQ_MIN_OFFSET+ 2)
+#define CPU_UIMB_IRQ_11 (CPU_UIMB_IRQ_MIN_OFFSET+ 3)
+#define CPU_UIMB_IRQ_12 (CPU_UIMB_IRQ_MIN_OFFSET+ 4)
+#define CPU_UIMB_IRQ_13 (CPU_UIMB_IRQ_MIN_OFFSET+ 5)
+#define CPU_UIMB_IRQ_14 (CPU_UIMB_IRQ_MIN_OFFSET+ 6)
+#define CPU_UIMB_IRQ_15 (CPU_UIMB_IRQ_MIN_OFFSET+ 7)
+#define CPU_UIMB_IRQ_16 (CPU_UIMB_IRQ_MIN_OFFSET+ 8)
+#define CPU_UIMB_IRQ_17 (CPU_UIMB_IRQ_MIN_OFFSET+ 9)
+#define CPU_UIMB_IRQ_18 (CPU_UIMB_IRQ_MIN_OFFSET+ 0)
+#define CPU_UIMB_IRQ_19 (CPU_UIMB_IRQ_MIN_OFFSET+11)
+#define CPU_UIMB_IRQ_20 (CPU_UIMB_IRQ_MIN_OFFSET+12)
+#define CPU_UIMB_IRQ_21 (CPU_UIMB_IRQ_MIN_OFFSET+13)
+#define CPU_UIMB_IRQ_22 (CPU_UIMB_IRQ_MIN_OFFSET+14)
+#define CPU_UIMB_IRQ_23 (CPU_UIMB_IRQ_MIN_OFFSET+15)
+#define CPU_UIMB_IRQ_24 (CPU_UIMB_IRQ_MIN_OFFSET+16)
+#define CPU_UIMB_IRQ_25 (CPU_UIMB_IRQ_MIN_OFFSET+17)
+#define CPU_UIMB_IRQ_26 (CPU_UIMB_IRQ_MIN_OFFSET+18)
+#define CPU_UIMB_IRQ_27 (CPU_UIMB_IRQ_MIN_OFFSET+19)
+#define CPU_UIMB_IRQ_28 (CPU_UIMB_IRQ_MIN_OFFSET+20)
+#define CPU_UIMB_IRQ_29 (CPU_UIMB_IRQ_MIN_OFFSET+21)
+#define CPU_UIMB_IRQ_30 (CPU_UIMB_IRQ_MIN_OFFSET+22)
+#define CPU_UIMB_IRQ_31 (CPU_UIMB_IRQ_MIN_OFFSET+23)
+
+ /*
+ * Symbolic names for UIMB interrupt sources.
+ */
+#define CPU_IRQ_SCI (CPU_UIMB_IRQ_5)
+
+ /*
+ * Processor exceptions handled as rtems IRQ symbolic name definitions.
+ */
+#define CPU_DECREMENTER (CPU_PROC_IRQ_MIN_OFFSET)
+
+/*
+ * Convert an rtems_irq_number constant to an interrupt level
+ * suitable for programming into an I/O device's interrupt level field.
+ */
+int CPU_irq_level_from_symbolic_name(const rtems_irq_number name);
+
+/*-------------------------------------------------------------------------+
+| Function Prototypes.
++--------------------------------------------------------------------------*/
+
+extern void CPU_rtems_irq_mng_init(unsigned cpuId);
+
+typedef struct MPC5XX_Interrupt_frame {
+ uint32_t stacklink; /* Ensure this is a real frame (also reg1 save) */
+ uint32_t calleeLr; /* link register used by callees: SVR4/EABI */
+
+ /* This is what is left out of the primary contexts */
+ uint32_t gpr0;
+ uint32_t gpr2; /* play safe */
+ uint32_t gpr3;
+ uint32_t gpr4;
+ uint32_t gpr5;
+ uint32_t gpr6;
+ uint32_t gpr7;
+ uint32_t gpr8;
+ uint32_t gpr9;
+ uint32_t gpr10;
+ uint32_t gpr11;
+ uint32_t gpr12;
+ uint32_t gpr13; /* Play safe */
+ uint32_t gpr28; /* For internal use by the IRQ handler */
+ uint32_t gpr29; /* For internal use by the IRQ handler */
+ uint32_t gpr30; /* For internal use by the IRQ handler */
+ uint32_t gpr31; /* For internal use by the IRQ handler */
+ uint32_t cr; /* Bits of this are volatile, so no-one may save */
+ uint32_t ctr;
+ uint32_t xer;
+ uint32_t lr;
+ uint32_t pc;
+ uint32_t msr;
+ uint32_t pad[3];
+} MPC5XX_Interrupt_frame;
+
+void C_dispatch_irq_handler(MPC5XX_Interrupt_frame *frame, unsigned int excNum);
+
+#endif
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/mmu.h b/bsps/powerpc/include/libcpu/mmu.h
new file mode 100644
index 0000000000..d3081316eb
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/mmu.h
@@ -0,0 +1,304 @@
+/*
+ * mmu.h
+ *
+ * PowerPC memory management structures
+ *
+ * It is a stripped down version of linux ppc file...
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_MMU_H
+#define _LIBCPU_MMU_H
+
+#ifndef ASM
+/* Hardware Page Table Entry */
+typedef struct _PTE {
+ unsigned long v:1; /* Entry is valid */
+ unsigned long vsid:24; /* Virtual segment identifier */
+ unsigned long h:1; /* Hash algorithm indicator */
+ unsigned long api:6; /* Abbreviated page index */
+ unsigned long rpn:20; /* Real (physical) page number */
+ unsigned long :3; /* Unused */
+ unsigned long r:1; /* Referenced */
+ unsigned long c:1; /* Changed */
+ unsigned long w:1; /* Write-thru cache mode */
+ unsigned long i:1; /* Cache inhibited */
+ unsigned long m:1; /* Memory coherence */
+ unsigned long g:1; /* Guarded */
+ unsigned long :1; /* Unused */
+ unsigned long pp:2; /* Page protection */
+} PTE;
+
+/* Values for PP (assumes Ks=0, Kp=1) */
+#define PP_RWXX 0 /* Supervisor read/write, User none */
+#define PP_RWRX 1 /* Supervisor read/write, User read */
+#define PP_RWRW 2 /* Supervisor read/write, User read/write */
+#define PP_RXRX 3 /* Supervisor read, User read */
+
+/* Segment Register */
+typedef struct _SEGREG {
+ unsigned long t:1; /* Normal or I/O type */
+ unsigned long ks:1; /* Supervisor 'key' (normally 0) */
+ unsigned long kp:1; /* User 'key' (normally 1) */
+ unsigned long n:1; /* No-execute */
+ unsigned long :4; /* Unused */
+ unsigned long vsid:24; /* Virtual Segment Identifier */
+} SEGREG;
+
+/* Block Address Translation (BAT) Registers */
+typedef struct _P601_BATU { /* Upper part of BAT for 601 processor */
+ unsigned long bepi:15; /* Effective page index (virtual address) */
+ unsigned long :8; /* unused */
+ unsigned long w:1;
+ unsigned long i:1; /* Cache inhibit */
+ unsigned long m:1; /* Memory coherence */
+ unsigned long ks:1; /* Supervisor key (normally 0) */
+ unsigned long kp:1; /* User key (normally 1) */
+ unsigned long pp:2; /* Page access protections */
+} P601_BATU;
+
+typedef struct _BATU { /* Upper part of BAT (all except 601) */
+ unsigned long bepi:15; /* Effective page index (virtual address) */
+ unsigned long :4; /* Unused */
+ unsigned long bl:11; /* Block size mask */
+ unsigned long vs:1; /* Supervisor valid */
+ unsigned long vp:1; /* User valid */
+} BATU;
+
+typedef struct _P601_BATL { /* Lower part of BAT for 601 processor */
+ unsigned long brpn:15; /* Real page index (physical address) */
+ unsigned long :10; /* Unused */
+ unsigned long v:1; /* Valid bit */
+ unsigned long bl:6; /* Block size mask */
+} P601_BATL;
+
+typedef struct _BATL { /* Lower part of BAT (all except 601) */
+ unsigned long brpn:15; /* Real page index (physical address) */
+ unsigned long :10; /* Unused */
+ unsigned long w:1; /* Write-thru cache */
+ unsigned long i:1; /* Cache inhibit */
+ unsigned long m:1; /* Memory coherence */
+ unsigned long g:1; /* Guarded (MBZ in IBAT) */
+ unsigned long :1; /* Unused */
+ unsigned long pp:2; /* Page access protections */
+} BATL;
+
+typedef struct _BAT {
+ BATU batu; /* Upper register */
+ BATL batl; /* Lower register */
+} BAT;
+
+typedef struct _P601_BAT {
+ P601_BATU batu; /* Upper register */
+ P601_BATL batl; /* Lower register */
+} P601_BAT;
+
+/* Block size masks */
+#define BL_128K 0x000
+#define BL_256K 0x001
+#define BL_512K 0x003
+#define BL_1M 0x007
+#define BL_2M 0x00F
+#define BL_4M 0x01F
+#define BL_8M 0x03F
+#define BL_16M 0x07F
+#define BL_32M 0x0FF
+#define BL_64M 0x1FF
+#define BL_128M 0x3FF
+#define BL_256M 0x7FF
+
+/* BAT Access Protection */
+#define BPP_XX 0x00 /* No access */
+#define BPP_RX 0x01 /* Read only */
+#define BPP_RW 0x02 /* Read/write */
+
+/*
+ * Simulated two-level MMU. This structure is used by the kernel
+ * to keep track of MMU mappings and is used to update/maintain
+ * the hardware HASH table which is really a cache of mappings.
+ *
+ * The simulated structures mimic the hardware available on other
+ * platforms, notably the 80x86 and 680x0.
+ */
+
+typedef struct _pte {
+ unsigned long page_num:20;
+ unsigned long flags:12; /* Page flags (some unused bits) */
+} pte;
+
+#define PD_SHIFT (10+12) /* Page directory */
+#define PD_MASK 0x03FF
+#define PT_SHIFT (12) /* Page Table */
+#define PT_MASK 0x03FF
+#define PG_SHIFT (12) /* Page Entry */
+
+
+/* MMU context */
+
+typedef struct _MMU_context {
+ SEGREG segs[16]; /* Segment registers */
+ pte **pmap; /* Two-level page-map structure */
+} MMU_context;
+
+/* Used to set up SDR1 register */
+#define HASH_TABLE_SIZE_64K 0x00010000
+#define HASH_TABLE_SIZE_128K 0x00020000
+#define HASH_TABLE_SIZE_256K 0x00040000
+#define HASH_TABLE_SIZE_512K 0x00080000
+#define HASH_TABLE_SIZE_1M 0x00100000
+#define HASH_TABLE_SIZE_2M 0x00200000
+#define HASH_TABLE_SIZE_4M 0x00400000
+#define HASH_TABLE_MASK_64K 0x000
+#define HASH_TABLE_MASK_128K 0x001
+#define HASH_TABLE_MASK_256K 0x003
+#define HASH_TABLE_MASK_512K 0x007
+#define HASH_TABLE_MASK_1M 0x00F
+#define HASH_TABLE_MASK_2M 0x01F
+#define HASH_TABLE_MASK_4M 0x03F
+
+/* invalidate a TLB entry */
+static inline void _tlbie(unsigned long va)
+{
+ asm volatile ("tlbie %0" : : "r"(va));
+}
+
+extern void _tlbia(void); /* invalidate all TLB entries */
+#endif /* ASM */
+
+/* Control/status registers for the MPC8xx.
+ * A write operation to these registers causes serialized access.
+ * During software tablewalk, the registers used perform mask/shift-add
+ * operations when written/read. A TLB entry is created when the Mx_RPN
+ * is written, and the contents of several registers are used to
+ * create the entry.
+ */
+#define MI_CTR 784 /* Instruction TLB control register */
+#define MI_GPM 0x80000000 /* Set domain manager mode */
+#define MI_PPM 0x40000000 /* Set subpage protection */
+#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
+#define MI_RESETVAL 0x00000000 /* Value of register at reset */
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define MI_AP 786
+#define MI_Ks 0x80000000 /* Should not be set */
+#define MI_Kp 0x40000000 /* Should always be set */
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MI_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define MI_EPN 787
+#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MI_EVALID 0x00000200 /* Entry is valid */
+#define MI_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the instruction TLB, it contains bits that get loaded into the
+ * TLB entry when the MI_RPN is written.
+ */
+#define MI_TWC 789
+#define MI_APG 0x000001e0 /* Access protection group (0) */
+#define MI_GUARDED 0x00000010 /* Guarded storage */
+#define MI_PSMASK 0x0000000c /* Mask of page size bits */
+#define MI_PS8MEG 0x0000000c /* 8M page size */
+#define MI_PS512K 0x00000004 /* 512K page size */
+#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MI_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the instruction TLB, using
+ * additional information from the MI_EPN, and MI_TWC registers.
+ */
+#define MI_RPN 790
+
+/* Define an RPN value for mapping kernel memory to large virtual
+ * pages for boot initialization. This has real page number of 0,
+ * large page size, shared page, cache enabled, and valid.
+ * Also mark all subpages valid and write access.
+ */
+#define MI_BOOTINIT 0x000001fd
+
+#define MD_CTR 792 /* Data TLB control register */
+#define MD_GPM 0x80000000 /* Set domain manager mode */
+#define MD_PPM 0x40000000 /* Set subpage protection */
+#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
+#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
+#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
+#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
+#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
+#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
+#define MD_RESETVAL 0x04000000 /* Value of register at reset */
+
+#define M_CASID 793 /* Address space ID (context) to match */
+#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
+
+
+/* These are the Ks and Kp from the PowerPC books. For proper operation,
+ * Ks = 0, Kp = 1.
+ */
+#define MD_AP 794
+#define MD_Ks 0x80000000 /* Should not be set */
+#define MD_Kp 0x40000000 /* Should always be set */
+
+/* The effective page number register. When read, contains the information
+ * about the last instruction TLB miss. When MD_RPN is written, bits in
+ * this register are used to create the TLB entry.
+ */
+#define MD_EPN 795
+#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
+#define MD_EVALID 0x00000200 /* Entry is valid */
+#define MD_ASIDMASK 0x0000000f /* ASID match value */
+ /* Reset value is undefined */
+
+/* The pointer to the base address of the first level page table.
+ * During a software tablewalk, reading this register provides the address
+ * of the entry associated with MD_EPN.
+ */
+#define M_TWB 796
+#define M_L1TB 0xfffff000 /* Level 1 table base address */
+#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
+ /* Reset value is undefined */
+
+/* A "level 1" or "segment" or whatever you want to call it register.
+ * For the data TLB, it contains bits that get loaded into the TLB entry
+ * when the MD_RPN is written. It is also provides the hardware assist
+ * for finding the PTE address during software tablewalk.
+ */
+#define MD_TWC 797
+#define MD_L2TB 0xfffff000 /* Level 2 table base address */
+#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
+#define MD_APG 0x000001e0 /* Access protection group (0) */
+#define MD_GUARDED 0x00000010 /* Guarded storage */
+#define MD_PSMASK 0x0000000c /* Mask of page size bits */
+#define MD_PS8MEG 0x0000000c /* 8M page size */
+#define MD_PS512K 0x00000004 /* 512K page size */
+#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
+#define MD_WT 0x00000002 /* Use writethrough page attribute */
+#define MD_SVALID 0x00000001 /* Segment entry is valid */
+ /* Reset value is undefined */
+
+
+/* Real page number. Defined by the pte. Writing this register
+ * causes a TLB entry to be created for the data TLB, using
+ * additional information from the MD_EPN, and MD_TWC registers.
+ */
+#define MD_RPN 798
+
+/* This is a temporary storage register that could be used to save
+ * a processor working register during a tablewalk.
+ */
+#define M_TW 799
+#endif /* _LIBCPU_MMU_H */
diff --git a/bsps/powerpc/include/libcpu/page.h b/bsps/powerpc/include/libcpu/page.h
new file mode 100644
index 0000000000..3efbdef5bc
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/page.h
@@ -0,0 +1,66 @@
+/*
+ * page.h
+ *
+ * PowerPC memory management structures
+ *
+ * It is a stripped down version of linux ppc file...
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_PAGE_H
+#define _LIBCPU_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PAGE_OFFSET 0xc0000000
+
+
+#ifndef ASM
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+
+/* align addr on a size boundry - adjust address up if needed -- Cort */
+#define _ALIGN(addr,size) (((addr)+size-1)&(~(size-1)))
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+/* map phys->virtual and virtual->phys for RAM pages */
+
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+
+#define MAP_NR(addr) (((unsigned long)addr-PAGE_OFFSET) >> PAGE_SHIFT)
+#define MAP_PAGE_RESERVED (1<<15)
+
+extern unsigned long get_zero_page_fast(void);
+#endif /* ASM */
+#endif /* _LIBCPU_PAGE_H */
diff --git a/bsps/powerpc/include/libcpu/pgtable.h b/bsps/powerpc/include/libcpu/pgtable.h
new file mode 100644
index 0000000000..5be5874b4f
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/pgtable.h
@@ -0,0 +1,144 @@
+/*
+ * pgtable.h
+ *
+ * PowerPC memory management structures
+ *
+ * It is a stripped down version of linux ppc file...
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_PGTABLE_H
+#define _LIBCPU_PGTABLE_H
+
+/*
+ * The PowerPC MMU uses a hash table containing PTEs, together with
+ * a set of 16 segment registers (on 32-bit implementations), to define
+ * the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much like
+ * that used by the i386, for the sake of the Linux memory management code.
+ * Low-level assembler code in head.S (procedure hash_page) is responsible
+ * for extracting ptes from the tree and putting them into the hash table
+ * when necessary, and updating the accessed and modified bits in the
+ * page table tree.
+ *
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access. The TLB does not have accessed nor write
+ * protect. We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect. We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators. Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded. We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ */
+
+/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
+#define PMD_SHIFT 22
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 1024
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 64MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START). For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system. This really does become a problem for machines with good amounts
+ * of RAM. -- Cort
+ */
+#define VMALLOC_OFFSET (0x4000000) /* 64M */
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END ioremap_bot
+
+/*
+ * Bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_USER 0x002 /* matches one of the PP bits */
+#define _PAGE_RW 0x004 /* software: user write access allowed */
+#define _PAGE_GUARDED 0x008
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
+#define _PAGE_SHARED 0
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
+#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
+
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
+ _PAGE_SHARED)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
+#define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
+ _PAGE_NO_CACHE )
+
+/*
+ * The PowerPC can only do execute protection on a segment (256MB) basis,
+ * not on a page basis. So we consider execute permission the same as read.
+ * Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+#endif /* _LIBCPU_PGTABLE_H */
diff --git a/bsps/powerpc/include/libcpu/powerpc-utility.h b/bsps/powerpc/include/libcpu/powerpc-utility.h
new file mode 100644
index 0000000000..4d6af38485
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/powerpc-utility.h
@@ -0,0 +1,985 @@
+/**
+ * @file
+ *
+ * @ingroup powerpc_shared
+ *
+ * @brief General purpose assembler macros, linker command file support and
+ * some inline functions for direct register access.
+ */
+
+/*
+ * Copyright (c) 2008-2015 embedded brains GmbH.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * access function for Device Control Registers inspired by "ppc405common.h"
+ * from Michael Hamel ADInstruments May 2008
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/**
+ * @defgroup powerpc_shared Shared PowerPC Code
+ */
+
+#ifndef __LIBCPU_POWERPC_UTILITY_H
+#define __LIBCPU_POWERPC_UTILITY_H
+
+#if !defined(ASM)
+ #include <rtems.h>
+#endif
+
+#include <rtems/score/cpu.h>
+#include <rtems/powerpc/registers.h>
+#include <rtems/powerpc/powerpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(ASM)
+
+#include <rtems/bspIo.h>
+#include <rtems/system.h>
+
+#include <libcpu/cpuIdent.h>
+
+#define LINKER_SYMBOL(sym) extern char sym [];
+
+/**
+ * @brief Read one byte from @a src.
+ */
+static inline uint8_t ppc_read_byte(const volatile void *src)
+{
+ uint8_t value;
+
+ __asm__ volatile (
+ "lbz %0, 0(%1)"
+ : "=r" (value)
+ : "b" (src)
+ );
+
+ return value;
+}
+
+/**
+ * @brief Read one half word from @a src.
+ */
+static inline uint16_t ppc_read_half_word(const volatile void *src)
+{
+ uint16_t value;
+
+ __asm__ volatile (
+ "lhz %0, 0(%1)"
+ : "=r" (value)
+ : "b" (src)
+ );
+
+ return value;
+}
+
+/**
+ * @brief Read one word from @a src.
+ */
+static inline uint32_t ppc_read_word(const volatile void *src)
+{
+ uint32_t value;
+
+ __asm__ volatile (
+ "lwz %0, 0(%1)"
+ : "=r" (value)
+ : "b" (src)
+ );
+
+ return value;
+}
+
+/**
+ * @brief Write one byte @a value to @a dest.
+ */
+static inline void ppc_write_byte(uint8_t value, volatile void *dest)
+{
+ __asm__ volatile (
+ "stb %0, 0(%1)"
+ :
+ : "r" (value), "b" (dest)
+ );
+}
+
+/**
+ * @brief Write one half word @a value to @a dest.
+ */
+static inline void ppc_write_half_word(uint16_t value, volatile void *dest)
+{
+ __asm__ volatile (
+ "sth %0, 0(%1)"
+ :
+ : "r" (value), "b" (dest)
+ );
+}
+
+/**
+ * @brief Write one word @a value to @a dest.
+ */
+static inline void ppc_write_word(uint32_t value, volatile void *dest)
+{
+ __asm__ volatile (
+ "stw %0, 0(%1)" :
+ : "r" (value), "b" (dest)
+ );
+}
+
+
+static inline void *ppc_stack_pointer(void)
+{
+ void *sp;
+
+ __asm__ volatile (
+ "mr %0, 1"
+ : "=r" (sp)
+ );
+
+ return sp;
+}
+
+static inline void ppc_set_stack_pointer(void *sp)
+{
+ __asm__ volatile (
+ "mr 1, %0"
+ :
+ : "r" (sp)
+ );
+}
+
+static inline void *ppc_link_register(void)
+{
+ void *lr;
+
+ __asm__ volatile (
+ "mflr %0"
+ : "=r" (lr)
+ );
+
+ return lr;
+}
+
+static inline void ppc_set_link_register(void *lr)
+{
+ __asm__ volatile (
+ "mtlr %0"
+ :
+ : "r" (lr)
+ );
+}
+
+static inline uint32_t ppc_machine_state_register(void)
+{
+ uint32_t msr;
+
+ __asm__ volatile (
+ "mfmsr %0"
+ : "=r" (msr)
+ );
+
+ return msr;
+}
+
+static inline void ppc_set_machine_state_register(uint32_t msr)
+{
+ __asm__ volatile (
+ "mtmsr %0"
+ :
+ : "r" (msr)
+ );
+}
+
+static inline void ppc_synchronize_data(void)
+{
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ __asm__ volatile ("sync");
+}
+
+static inline void ppc_light_weight_synchronize(void)
+{
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ __asm__ volatile ("lwsync");
+}
+
+static inline void ppc_synchronize_instructions(void)
+{
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ __asm__ volatile ("isync");
+}
+
+static inline void ppc_enforce_in_order_execution_of_io(void)
+{
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ __asm__ volatile ("eieio");
+}
+
+static inline void ppc_data_cache_block_flush(void *addr)
+{
+ __asm__ volatile (
+ "dcbf 0, %0"
+ :
+ : "r" (addr)
+ : "memory"
+ );
+}
+
+static inline void ppc_data_cache_block_flush_2(
+ void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "dcbf %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ : "memory"
+ );
+}
+
+static inline void ppc_data_cache_block_invalidate(void *addr)
+{
+ __asm__ volatile (
+ "dcbi 0, %0"
+ :
+ : "r" (addr)
+ : "memory"
+ );
+}
+
+static inline void ppc_data_cache_block_invalidate_2(
+ void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "dcbi %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ : "memory"
+ );
+}
+
+static inline void ppc_data_cache_block_store(const void *addr)
+{
+ __asm__ volatile (
+ "dcbst 0, %0"
+ :
+ : "r" (addr)
+ );
+}
+
+static inline void ppc_data_cache_block_store_2(
+ const void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "dcbst %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ );
+}
+
+static inline void ppc_data_cache_block_touch(const void *addr)
+{
+ __asm__ volatile (
+ "dcbt 0, %0"
+ :
+ : "r" (addr)
+ );
+}
+
+static inline void ppc_data_cache_block_touch_2(
+ const void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "dcbt %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ );
+}
+
+static inline void ppc_data_cache_block_touch_for_store(const void *addr)
+{
+ __asm__ volatile (
+ "dcbtst 0, %0"
+ :
+ : "r" (addr)
+ );
+}
+
+static inline void ppc_data_cache_block_touch_for_store_2(
+ const void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "dcbtst %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ );
+}
+
+static inline void ppc_data_cache_block_clear_to_zero(void *addr)
+{
+ __asm__ volatile (
+ "dcbz 0, %0"
+ :
+ : "r" (addr)
+ : "memory"
+ );
+}
+
+static inline void ppc_data_cache_block_clear_to_zero_2(
+ void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "dcbz %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ : "memory"
+ );
+}
+
+static inline void ppc_instruction_cache_block_invalidate(void *addr)
+{
+ __asm__ volatile (
+ "icbi 0, %0"
+ :
+ : "r" (addr)
+ );
+}
+
+static inline void ppc_instruction_cache_block_invalidate_2(
+ void *base,
+ uintptr_t offset
+)
+{
+ __asm__ volatile (
+ "icbi %0, %1"
+ :
+ : "b" (base), "r" (offset)
+ );
+}
+
+/**
+ * @brief Enables external exceptions.
+ *
+ * You can use this function to enable the external exceptions and restore the
+ * machine state with ppc_external_exceptions_disable() later.
+ */
+static inline uint32_t ppc_external_exceptions_enable(void)
+{
+ uint32_t current_msr;
+ uint32_t new_msr;
+
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ __asm__ volatile (
+ "mfmsr %0;"
+ "ori %1, %0, 0x8000;"
+ "mtmsr %1"
+ : "=r" (current_msr), "=r" (new_msr)
+ );
+
+ return current_msr;
+}
+
+/**
+ * @brief Restores machine state.
+ *
+ * @see ppc_external_exceptions_enable()
+ */
+static inline void ppc_external_exceptions_disable(uint32_t msr)
+{
+ ppc_set_machine_state_register(msr);
+
+ RTEMS_COMPILER_MEMORY_BARRIER();
+}
+
+static inline uint32_t ppc_count_leading_zeros(uint32_t value)
+{
+ uint32_t count;
+
+ __asm__ (
+ "cntlzw %0, %1;"
+ : "=r" (count)
+ : "r" (value)
+ );
+
+ return count;
+}
+
+/*
+ * Simple spin delay in microsecond units for device drivers.
+ * This is very dependent on the clock speed of the target.
+ */
+
+#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
+/* Wonderful bookE doesn't have mftb/mftbu; they only
+ * define the TBRU/TBRL SPRs so we use these. Luckily,
+ * we run in supervisory mode so that should work on
+ * all CPUs. In user mode we'd have a problem...
+ * 2007/11/30, T.S.
+ *
+ * OTOH, PSIM currently lacks support for reading
+ * SPRs 268/269. You need GDB patch sim/2376 to avoid
+ * a crash...
+ * OTOH, the MPC8xx do not allow to read the timebase registers via mfspr.
+ * we NEED a mftb to access the time base.
+ * 2009/10/30 Th. D.
+ */
+#define CPU_Get_timebase_low( _value ) \
+ __asm__ volatile( "mftb %0" : "=r" (_value) )
+#else
+#define CPU_Get_timebase_low( _value ) \
+ __asm__ volatile( "mfspr %0,268" : "=r" (_value) )
+#endif
+
+/* Must be provided for rtems_bsp_delay to work */
+extern uint32_t bsp_clicks_per_usec;
+
+#define rtems_bsp_delay( _microseconds ) \
+ do { \
+ uint32_t start, ticks, now; \
+ CPU_Get_timebase_low( start ) ; \
+ ticks = (_microseconds) * bsp_clicks_per_usec; \
+ do \
+ CPU_Get_timebase_low( now ) ; \
+ while (now - start < ticks); \
+ } while (0)
+
+#define rtems_bsp_delay_in_bus_cycles( _cycles ) \
+ do { \
+ uint32_t start, now; \
+ CPU_Get_timebase_low( start ); \
+ do \
+ CPU_Get_timebase_low( now ); \
+ while (now - start < (_cycles)); \
+ } while (0)
+
+/*
+ * Routines to access the decrementer register
+ */
+
+#define PPC_Set_decrementer( _clicks ) \
+ do { \
+ __asm__ volatile( "mtdec %0" : : "r" ((_clicks)) ); \
+ } while (0)
+
+#define PPC_Get_decrementer( _clicks ) \
+ __asm__ volatile( "mfdec %0" : "=r" (_clicks) )
+
+/*
+ * Routines to access the time base register
+ */
+
+static inline uint64_t PPC_Get_timebase_register( void )
+{
+ uint32_t tbr_low;
+ uint32_t tbr_high;
+ uint32_t tbr_high_old;
+ uint64_t tbr;
+
+ do {
+#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
+/* See comment above (CPU_Get_timebase_low) */
+ __asm__ volatile( "mftbu %0" : "=r" (tbr_high_old));
+ __asm__ volatile( "mftb %0" : "=r" (tbr_low));
+ __asm__ volatile( "mftbu %0" : "=r" (tbr_high));
+#else
+ __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high_old));
+ __asm__ volatile( "mfspr %0, 268" : "=r" (tbr_low));
+ __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high));
+#endif
+ } while ( tbr_high_old != tbr_high );
+
+ tbr = tbr_high;
+ tbr <<= 32;
+ tbr |= tbr_low;
+ return tbr;
+}
+
+static inline void PPC_Set_timebase_register (uint64_t tbr)
+{
+ uint32_t tbr_low;
+ uint32_t tbr_high;
+
+ tbr_low = (uint32_t) tbr;
+ tbr_high = (uint32_t) (tbr >> 32);
+ __asm__ volatile( "mtspr 284, %0" : : "r" (tbr_low));
+ __asm__ volatile( "mtspr 285, %0" : : "r" (tbr_high));
+
+}
+
+static inline uint32_t ppc_decrementer_register(void)
+{
+ uint32_t dec;
+
+ PPC_Get_decrementer(dec);
+
+ return dec;
+}
+
+static inline void ppc_set_decrementer_register(uint32_t dec)
+{
+ PPC_Set_decrementer(dec);
+}
+
+/**
+ * @brief Preprocessor magic for stringification of @a x.
+ */
+#define PPC_STRINGOF(x) #x
+
+/**
+ * @brief Returns the value of the Special Purpose Register with number @a spr.
+ *
+ * @note This macro uses a GNU C extension.
+ */
+#define PPC_SPECIAL_PURPOSE_REGISTER(spr) \
+ ({ \
+ uint32_t val; \
+ __asm__ volatile (\
+ "mfspr %0, " PPC_STRINGOF(spr) \
+ : "=r" (val) \
+ ); \
+ val;\
+ } )
+
+/**
+ * @brief Sets the Special Purpose Register with number @a spr to the value in
+ * @a val.
+ */
+#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val) \
+ do { \
+ __asm__ volatile (\
+ "mtspr " PPC_STRINGOF(spr) ", %0" \
+ : \
+ : "r" (val) \
+ ); \
+ } while (0)
+
+/**
+ * @brief Sets in the Special Purpose Register with number @a spr all bits
+ * which are set in @a bits.
+ *
+ * Interrupts are disabled throughout this operation.
+ */
+#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
+ do { \
+ ISR_Level level; \
+ uint32_t val; \
+ uint32_t mybits = bits; \
+ _ISR_Local_disable(level); \
+ val = PPC_SPECIAL_PURPOSE_REGISTER(spr); \
+ val |= mybits; \
+ PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
+ _ISR_Local_enable(level); \
+ } while (0)
+
+/**
+ * @brief Sets in the Special Purpose Register with number @a spr all bits
+ * which are set in @a bits. The previous register value will be masked with
+ * @a mask.
+ *
+ * Interrupts are disabled throughout this operation.
+ */
+#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS_MASKED(spr, bits, mask) \
+ do { \
+ ISR_Level level; \
+ uint32_t val; \
+ uint32_t mybits = bits; \
+ uint32_t mymask = mask; \
+ _ISR_Local_disable(level); \
+ val = PPC_SPECIAL_PURPOSE_REGISTER(spr); \
+ val &= ~mymask; \
+ val |= mybits; \
+ PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
+ _ISR_Local_enable(level); \
+ } while (0)
+
+/**
+ * @brief Clears in the Special Purpose Register with number @a spr all bits
+ * which are set in @a bits.
+ *
+ * Interrupts are disabled throughout this operation.
+ */
+#define PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
+ do { \
+ ISR_Level level; \
+ uint32_t val; \
+ uint32_t mybits = bits; \
+ _ISR_Local_disable(level); \
+ val = PPC_SPECIAL_PURPOSE_REGISTER(spr); \
+ val &= ~mybits; \
+ PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
+ _ISR_Local_enable(level); \
+ } while (0)
+
+/**
+ * @brief Returns the value of the Thread Management Register with number @a tmr.
+ *
+ * @note This macro uses a GNU C extension.
+ */
+#define PPC_THREAD_MGMT_REGISTER(tmr) \
+ ({ \
+ uint32_t val; \
+ __asm__ volatile (\
+ "mftmr %0, " PPC_STRINGOF(tmr) \
+ : "=r" (val) \
+ ); \
+ val;\
+ } )
+
+/**
+ * @brief Sets the Thread Management Register with number @a tmr to the value in
+ * @a val.
+ */
+#define PPC_SET_THREAD_MGMT_REGISTER(tmr, val) \
+ do { \
+ __asm__ volatile (\
+ "mttmr " PPC_STRINGOF(tmr) ", %0" \
+ : \
+ : "r" (val) \
+ ); \
+ } while (0)
+
+/**
+ * @brief Returns the value of the Device Control Register with number @a dcr.
+ *
+ * The PowerPC 4XX family has Device Control Registers.
+ *
+ * @note This macro uses a GNU C extension.
+ */
+#define PPC_DEVICE_CONTROL_REGISTER(dcr) \
+ ({ \
+ uint32_t val; \
+ __asm__ volatile (\
+ "mfdcr %0, " PPC_STRINGOF(dcr) \
+ : "=r" (val) \
+ ); \
+ val;\
+ } )
+
+/**
+ * @brief Sets the Device Control Register with number @a dcr to the value in
+ * @a val.
+ *
+ * The PowerPC 4XX family has Device Control Registers.
+ */
+#define PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val) \
+ do { \
+ __asm__ volatile (\
+ "mtdcr " PPC_STRINGOF(dcr) ", %0" \
+ : \
+ : "r" (val) \
+ ); \
+ } while (0)
+
+/**
+ * @brief Sets in the Device Control Register with number @a dcr all bits
+ * which are set in @a bits.
+ *
+ * Interrupts are disabled throughout this operation.
+ */
+#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
+ do { \
+ ISR_Level level; \
+ uint32_t val; \
+ uint32_t mybits = bits; \
+ _ISR_Local_disable(level); \
+ val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
+ val |= mybits; \
+ PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
+ _ISR_Local_enable(level); \
+ } while (0)
+
+/**
+ * @brief Sets in the Device Control Register with number @a dcr all bits
+ * which are set in @a bits. The previous register value will be masked with
+ * @a mask.
+ *
+ * Interrupts are disabled throughout this operation.
+ */
+#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS_MASKED(dcr, bits, mask) \
+ do { \
+ ISR_Level level; \
+ uint32_t val; \
+ uint32_t mybits = bits; \
+ uint32_t mymask = mask; \
+ _ISR_Local_disable(level); \
+ val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
+ val &= ~mymask; \
+ val |= mybits; \
+ PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
+ _ISR_Local_enable(level); \
+ } while (0)
+
+/**
+ * @brief Clears in the Device Control Register with number @a dcr all bits
+ * which are set in @a bits.
+ *
+ * Interrupts are disabled throughout this operation.
+ */
+#define PPC_CLEAR_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
+ do { \
+ ISR_Level level; \
+ uint32_t val; \
+ uint32_t mybits = bits; \
+ _ISR_Local_disable(level); \
+ val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
+ val &= ~mybits; \
+ PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
+ _ISR_Local_enable(level); \
+ } while (0)
+
+static inline uint32_t ppc_time_base(void)
+{
+ uint32_t val;
+
+ CPU_Get_timebase_low(val);
+
+ return val;
+}
+
+static inline void ppc_set_time_base(uint32_t val)
+{
+ PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWL, val);
+}
+
+static inline uint32_t ppc_time_base_upper(void)
+{
+ return PPC_SPECIAL_PURPOSE_REGISTER(TBRU);
+}
+
+static inline void ppc_set_time_base_upper(uint32_t val)
+{
+ PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWU, val);
+}
+
+static inline uint64_t ppc_time_base_64(void)
+{
+ return PPC_Get_timebase_register();
+}
+
+static inline void ppc_set_time_base_64(uint64_t val)
+{
+ PPC_Set_timebase_register(val);
+}
+
+static inline uint32_t ppc_alternate_time_base(void)
+{
+ return PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBL);
+}
+
+static inline uint32_t ppc_alternate_time_base_upper(void)
+{
+ return PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBU);
+}
+
+static inline uint64_t ppc_alternate_time_base_64(void)
+{
+ uint32_t atbl;
+ uint32_t atbu_0;
+ uint32_t atbu_1;
+
+ do {
+ atbu_0 = ppc_alternate_time_base_upper();
+ atbl = ppc_alternate_time_base();
+ atbu_1 = ppc_alternate_time_base_upper();
+ } while (atbu_0 != atbu_1);
+
+ return (((uint64_t) atbu_1) << 32) | ((uint64_t) atbl);
+}
+
+static inline uint32_t ppc_processor_id(void)
+{
+ return PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR);
+}
+
+static inline void ppc_set_processor_id(uint32_t val)
+{
+ PPC_SET_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
+}
+
+static inline uint32_t ppc_fsl_system_version(void)
+{
+ return PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_SVR);
+}
+
+static inline uint32_t ppc_fsl_system_version_cid(uint32_t svr)
+{
+ return (svr >> 28) & 0xf;
+}
+
+static inline uint32_t ppc_fsl_system_version_sid(uint32_t svr)
+{
+ return (svr >> 16) & 0xfff;
+}
+
+static inline uint32_t ppc_fsl_system_version_proc(uint32_t svr)
+{
+ return (svr >> 12) & 0xf;
+}
+
+static inline uint32_t ppc_fsl_system_version_mfg(uint32_t svr)
+{
+ return (svr >> 8) & 0xf;
+}
+
+static inline uint32_t ppc_fsl_system_version_mjrev(uint32_t svr)
+{
+ return (svr >> 4) & 0xf;
+}
+
+static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
+{
+ return (svr >> 0) & 0xf;
+}
+
+void ppc_code_copy(void *dest, const void *src, size_t n);
+
+/* FIXME: Do not use this function */
+void printBAT(int bat, uint32_t upper, uint32_t lower);
+
+/* FIXME: Do not use this function */
+void ShowBATS(void);
+
+#endif /* ifndef ASM */
+
+#if defined(ASM)
+#include <rtems/asm.h>
+
+.macro LA reg, addr
+#if defined(__powerpc64__)
+ lis \reg, (\addr)@highest
+ ori \reg, \reg, (\addr)@higher
+ rldicr \reg, \reg, 32, 31
+ oris \reg, \reg, (\addr)@h
+ ori \reg, \reg, (\addr)@l
+#else
+ lis \reg, (\addr)@h
+ ori \reg, \reg, (\addr)@l
+#endif
+.endm
+
+.macro LA32 reg, addr
+ lis \reg, (\addr)@h
+ ori \reg, \reg, (\addr)@l
+.endm
+
+.macro LWI reg, value
+ lis \reg, (\value)@h
+ ori \reg, \reg, (\value)@l
+.endm
+
+.macro LW reg, addr
+ lis \reg, \addr@ha
+ lwz \reg, \addr@l(\reg)
+.endm
+
+/*
+ * Tests the bits in reg1 against the bits set in mask. A match is indicated
+ * by EQ = 0 in CR0. A mismatch is indicated by EQ = 1 in CR0. The register
+ * reg2 is used to load the mask.
+ */
+.macro TSTBITS reg1, reg2, mask
+ LWI \reg2, \mask
+ and \reg1, \reg1, \reg2
+ cmplw \reg1, \reg2
+.endm
+
+.macro SETBITS reg1, reg2, mask
+ LWI \reg2, \mask
+ or \reg1, \reg1, \reg2
+.endm
+
+.macro CLRBITS reg1, reg2, mask
+ LWI \reg2, \mask
+ andc \reg1, \reg1, \reg2
+.endm
+
+.macro GLOBAL_FUNCTION name
+ .global \name
+ .type \name, @function
+\name:
+.endm
+
+/*
+ * Obtain interrupt mask
+ */
+.macro GET_INTERRUPT_MASK mask
+ lis \mask, _PPC_INTERRUPT_DISABLE_MASK@h
+ ori \mask, \mask, _PPC_INTERRUPT_DISABLE_MASK@l
+.endm
+
+/*
+ * Disables all asynchronous exeptions (interrupts) which may cause a context
+ * switch.
+ */
+.macro INTERRUPT_DISABLE level, mask
+ mfmsr \level
+ GET_INTERRUPT_MASK mask=\mask
+ andc \mask, \level, \mask
+ mtmsr \mask
+.endm
+
+/*
+ * Restore previous machine state.
+ */
+.macro INTERRUPT_ENABLE level
+ mtmsr \level
+.endm
+
+.macro SET_SELF_CPU_CONTROL reg_0, reg_1
+#if defined(RTEMS_SMP)
+ /* Use Book E Processor ID Register (PIR) */
+ mfspr \reg_0, 286
+ slwi \reg_0, \reg_0, PER_CPU_CONTROL_SIZE_LOG2
+#if defined(__powerpc64__)
+ LA \reg_1, _Per_CPU_Information
+ add \reg_0, \reg_0, \reg_1
+#else
+ addis \reg_0, \reg_0, _Per_CPU_Information@ha
+ addi \reg_0, \reg_0, _Per_CPU_Information@l
+#endif
+ mtspr PPC_PER_CPU_CONTROL_REGISTER, \reg_0
+#endif
+.endm
+
+.macro GET_SELF_CPU_CONTROL reg
+#if defined(RTEMS_SMP)
+ mfspr \reg, PPC_PER_CPU_CONTROL_REGISTER
+#else
+ lis \reg, _Per_CPU_Information@h
+ ori \reg, \reg, _Per_CPU_Information@l
+#endif
+.endm
+
+#define LINKER_SYMBOL(sym) .extern sym
+
+#endif /* ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __LIBCPU_POWERPC_UTILITY_H */
diff --git a/bsps/powerpc/include/libcpu/pte121.h b/bsps/powerpc/include/libcpu/pte121.h
new file mode 100644
index 0000000000..a07e063f03
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/pte121.h
@@ -0,0 +1,265 @@
+#ifndef _LIBCPU_PTE121_H
+#define _LIBCPU_PTE121_H
+
+/*
+ * Authorship
+ * ----------
+ * This software was created by
+ * Till Straumann <strauman@slac.stanford.edu>, 4/2002, 2003, 2004,
+ * Stanford Linear Accelerator Center, Stanford University.
+ *
+ * Acknowledgement of sponsorship
+ * ------------------------------
+ * This software was produced by
+ * the Stanford Linear Accelerator Center, Stanford University,
+ * under Contract DE-AC03-76SFO0515 with the Department of Energy.
+ *
+ * Government disclaimer of liability
+ * ----------------------------------
+ * Neither the United States nor the United States Department of Energy,
+ * nor any of their employees, makes any warranty, express or implied, or
+ * assumes any legal liability or responsibility for the accuracy,
+ * completeness, or usefulness of any data, apparatus, product, or process
+ * disclosed, or represents that its use would not infringe privately owned
+ * rights.
+ *
+ * Stanford disclaimer of liability
+ * --------------------------------
+ * Stanford University makes no representations or warranties, express or
+ * implied, nor assumes any liability for the use of this software.
+ *
+ * Stanford disclaimer of copyright
+ * --------------------------------
+ * Stanford University, owner of the copyright, hereby disclaims its
+ * copyright and all other rights in this software. Hence, anyone may
+ * freely use it for any purpose without restriction.
+ *
+ * Maintenance of notices
+ * ----------------------
+ * In the interest of clarity regarding the origin and status of this
+ * SLAC software, this and all the preceding Stanford University notices
+ * are to remain affixed to any copy or derivative of this software made
+ * or distributed by the recipient and are to be affixed to any copy of
+ * software made or distributed by the recipient that contains a copy or
+ * derivative of this software.
+ *
+ * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
+ */
+
+/* Rudimentary page/hash table support for Powerpc
+ *
+ * A simple, static (i.e. no 'per-process' virtual
+ * address space etc.) page table providing
+ * one-to-one effective <-> virtual <-> physical
+ * address mapping.
+ *
+ * PURPOSE:
+ * 1) allow write-protection of text/read-only data areas
+ * 2) provide more effective-address space in case
+ * the BATs are not enough
+ * 3) allow 'alias' mappings. Such aliases can only use
+ * the upper bits of the VSID since VSID & 0xf and the
+ * PI are always mapped 1:1 to the RPN.
+ * LIMITATIONS:
+ * - no PTE replacement (makes no sense in a real-time
+ * environment, anyway) -> the page table just MUST
+ * be big enough!.
+ * - only one page table supported.
+ * - no locking implemented. If multiple threads modify
+ * the page table, it is the user's responsibility to
+ * implement exclusive access.
+ */
+
+
+/* I don't include mmu.h here because it says it's derived from linux
+ * and I want to avoid licensing problems
+ */
+
+/* Abstract handle for a page table */
+typedef struct Triv121PgTblRec_ *Triv121PgTbl;
+
+/* A PTE entry */
+typedef struct PTERec_ {
+ volatile unsigned long v:1, vsid:24, h:1, api: 6;
+ volatile unsigned long rpn:20, pad: 3, r:1, c:1, wimg:4, marked:1, pp:2;
+} PTERec, *APte;
+
+/* Initialize a trivial page table
+ * using 2^ldSize bytes of memory starting at
+ * 'base'.
+ *
+ * RETURNS: a handle to the internal data structure
+ * used to manage the page table. NULL on
+ * error.
+ *
+ * NOTES: - 'base' must be aligned to the size
+ * - minimal ldSize is 16 (== 64k)
+ * - this routine maps the page table itself
+ * with read-only access. While this prevents
+ * the CPU from overwriting the page table,
+ * it can still be corrupted by PCI bus masters
+ * (like DMA engines, [VME] bridges etc.) and
+ * even by this CPU if either the MMU is off
+ * or if there is a DBAT mapping granting write
+ * access...
+ */
+Triv121PgTbl
+triv121PgTblInit(unsigned long base, unsigned ldSize);
+
+/* get the log2 of the minimal page table size needed
+ * for mapping 'size' bytes.
+ *
+ * EXAMPLE: create a page table which maps the entire
+ * physical memory. The page table itself shall
+ * be allocated at the top of the available
+ * memory (assuming 'memsize' is a power of two):
+ *
+ * ldSize = triv121PgTblLdMinSize(memsize);
+ * memsize -= (1<<ldSize); / * reduce memory available to RTEMS * /
+ * pgTbl = triv121PgTblInit(memsize,ldSize);
+ *
+ */
+unsigned long
+triv121PgTblLdMinSize(unsigned long size);
+
+/* Map an address range 1:1 in pgTbl with the given protection;
+ *
+ * RETURNS: -1 (TRIV121_MAP_SUCCESS) on success; the page index
+ * for which no PTE could be allocated, on failure.
+ *
+ * NOTES: - This routine returns MINUS ONE ON SUCCESS
+ * - (parts) of a mapping which overlap with
+ * already existing PTEs are silently ignored.
+ *
+ * Therefore, you can e.g. first create
+ * a couple of write protected maps and
+ * finally map the entire memory r/w. This
+ * will leave the write protected maps
+ * intact.
+ */
+long
+triv121PgTblMap(
+ Triv121PgTbl pgTbl, /* handle, returned by Init or Get */
+
+ long vsid, /* vsid for this mapping (contains topmost 4 bits of EA);
+ *
+ * NOTE: it is allowed to pass a VSID < 0 to tell this
+ * routine it should use a VSID corresponding to a
+ * 1:1:1 effective - virtual - physical mapping
+ */
+
+ unsigned long start, /* segment offset (lowermost 28 bits of EA) of address range
+ *
+ * NOTE: if VSID < 0 (TRIV121_121_VSID), 'start' is inter-
+ * preted as an effective address (EA), i.e. all 32
+ * bits are used - the most significant four going into
+ * to the VSID...
+ */
+
+ unsigned long numPages, /* number of pages to map */
+
+ unsigned wimgAttr, /* 'wimg' attributes
+ * (Write thru, cache Inhibit, coherent Memory,
+ * Guarded memory)
+ */
+
+ unsigned protection /* 'pp' access protection: Super User
+ *
+ * 0 r/w none
+ * 1 r/w ro
+ * 2 r/w r/w
+ * 3 ro ro
+ */
+);
+
+#define TRIV121_ATTR_W 8
+#define TRIV121_ATTR_I 4
+#define TRIV121_ATTR_M 2
+#define TRIV121_ATTR_G 1
+
+/* for I/O pages (e.g. PCI, VME addresses) use cache inhibited
+ * and guarded pages. RTM about the 'eieio' instruction!
+ */
+#define TRIV121_ATTR_IO_PAGE (TRIV121_ATTR_I|TRIV121_ATTR_G)
+
+#define TRIV121_PP_RO_PAGE (1) /* read-only for key = 1, unlocked by key=0 */
+#define TRIV121_PP_RW_PAGE (2) /* read-write for key = 1/0 */
+
+#define TRIV121_121_VSID (-1) /* use 1:1 effective<->virtual address mapping */
+#define TRIV121_SEG_VSID (-2) /* lookup VSID in the segment register */
+
+#define TRIV121_MAP_SUCCESS (-1) /* triv121PgTblMap() returns this on SUCCESS */
+
+/* get a handle to the one and only page table
+ * (must have been initialized/allocated)
+ *
+ * RETURNS: NULL if the page table has not been initialized/allocated.
+ */
+Triv121PgTbl
+triv121PgTblGet(void);
+
+/*
+ * compute the SDR1 register value for the page table
+ */
+
+unsigned long
+triv121PgTblSDR1(Triv121PgTbl pgTbl);
+
+/*
+ * Activate the page table:
+ * - set up the segment registers for a 1:1 effective <-> virtual address mapping,
+ * give user and supervisor keys.
+ * - set up the SDR1 register
+ * - flush all tlbs
+ * - 'lock' pgTbl, i.e. prevent all further modifications.
+ *
+ * NOTE: This routine does not change any BATs. Since these
+ * have priority over the page table, the user
+ * may have to switch overlapping BATs OFF in order
+ * for the page table mappings to take effect.
+ */
+void triv121PgTblActivate(Triv121PgTbl pgTbl);
+
+/* Find the PTE for a EA and print its contents to stdout
+ * RETURNS: pte for EA or NULL if no entry was found.
+ */
+APte triv121DumpEa(unsigned long ea);
+
+/* Find and return a PTE for a vsid/pi combination
+ * RETURNS: pte or NULL if no entry was found
+ */
+APte triv121FindPte(unsigned long vsid, unsigned long pi);
+
+/*
+ * Unmap an effective address
+ *
+ * RETURNS: pte that mapped the ea or NULL if no
+ * mapping existed.
+ */
+APte triv121UnmapEa(unsigned long ea);
+
+/*
+ * Change the WIMG and PP attributes of the page containing 'ea'
+ *
+ * NOTES: The 'wimg' and 'pp' may be <0 to indicate that no
+ * change is desired.
+ *
+ * RETURNS: Pointer to modified PTE or NULL if 'ea' is not mapped.
+ */
+APte triv121ChangeEaAttributes(unsigned long ea, int wimg, int pp);
+
+/* Make the whole page table writable
+ * NOTES: If the page table has not been initialized yet,
+ * this routine has no effect (i.e., after
+ * initialization the page table will still be read-only).
+ */
+void triv121MakePgTblRW(void);
+
+/* Make the whole page table read-only
+ */
+void triv121MakePgTblRO(void);
+
+/* Dump a pte to stdout */
+long triv121DumpPte(APte pte);
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/raw_exception.h b/bsps/powerpc/include/libcpu/raw_exception.h
new file mode 100644
index 0000000000..dd0c483b0d
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/raw_exception.h
@@ -0,0 +1,161 @@
+/*
+ * raw_execption.h
+ *
+ * This file contains implementation of C function to
+ * Instantiate mpc5xx primary exception entries.
+ * More detailled information can be found on the Motorola
+ * site and more precisely in the following book:
+ *
+ * MPC555/MPC556 User's Manual
+ * Motorola REF : MPC555UM/D Rev. 3, 2000 October 15
+ *
+ *
+ * MPC5xx port sponsored by Defence Research and Development Canada - Suffield
+ * Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
+ *
+ * Derived from libcpu/powerpc/mpc8xx/exceptions/raw_exception.h:
+ *
+ * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
+ * Canon Centre Recherche France.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _LIBCPU_RAW_EXCEPTION_H
+#define _LIBCPU_RAW_EXCEPTION_H
+
+#include <libcpu/vectors.h>
+
+/*
+ * Exception Vectors as defined in the MPC555 User's Manual
+ */
+
+#define ASM_RESET_VECTOR 0x01
+#define ASM_MACH_VECTOR 0x02
+
+#define ASM_EXT_VECTOR 0x05
+#define ASM_ALIGN_VECTOR 0x06
+#define ASM_PROG_VECTOR 0x07
+#define ASM_FLOAT_VECTOR 0x08
+#define ASM_DEC_VECTOR 0x09
+
+#define ASM_SYS_VECTOR 0x0C
+#define ASM_TRACE_VECTOR 0x0D
+#define ASM_FLOATASSIST_VECTOR 0x0E
+
+#define ASM_SOFTEMUL_VECTOR 0x10
+
+#define ASM_IPROT_VECTOR 0x13
+#define ASM_DPROT_VECTOR 0x14
+
+#define ASM_DBREAK_VECTOR 0x1C
+#define ASM_IBREAK_VECTOR 0x1D
+#define ASM_MEBREAK_VECTOR 0x1E
+#define ASM_NMEBREAK_VECTOR 0x1F
+
+#define LAST_VALID_EXC ASM_NMEBREAK_VECTOR
+
+#ifndef ASM
+
+/*
+ * Type definition for raw exceptions.
+ */
+
+typedef unsigned char rtems_vector;
+struct __rtems_raw_except_connect_data__;
+typedef unsigned char rtems_raw_except_hdl_size;
+
+typedef struct {
+ rtems_vector vector;
+ rtems_exception_handler_t* raw_hdl;
+}rtems_raw_except_hdl;
+
+typedef void (*rtems_raw_except_enable) (const struct __rtems_raw_except_connect_data__*);
+typedef void (*rtems_raw_except_disable) (const struct __rtems_raw_except_connect_data__*);
+typedef int (*rtems_raw_except_is_enabled) (const struct __rtems_raw_except_connect_data__*);
+
+typedef struct __rtems_raw_except_connect_data__{
+ /*
+ * Exception vector (As defined in the manual)
+ */
+ rtems_vector exceptIndex;
+ /*
+ * Exception raw handler. See comment on handler properties below in function prototype.
+ */
+ rtems_raw_except_hdl hdl;
+ /*
+ * function for enabling raw exceptions. In order to be consistent
+ * with the fact that the raw connexion can defined in the
+ * libcpu library, this library should have no knowledge of
+ * board specific hardware to manage exceptions and thus the
+ * "on" routine must enable the except at processor level only.
+ *
+ */
+ rtems_raw_except_enable on;
+ /*
+ * function for disabling raw exceptions. In order to be consistent
+ * with the fact that the raw connexion can defined in the
+ * libcpu library, this library should have no knowledge of
+ * board specific hardware to manage exceptions and thus the
+ * "on" routine must disable the except both at device and PIC level.
+ *
+ */
+ rtems_raw_except_disable off;
+ /*
+ * function enabling to know what exception may currently occur
+ */
+ rtems_raw_except_is_enabled isOn;
+}rtems_raw_except_connect_data;
+
+typedef struct {
+ /*
+ * size of all the table fields (*Tbl) described below.
+ */
+ unsigned int exceptSize;
+ /*
+ * Default handler used when disconnecting exceptions.
+ */
+ rtems_raw_except_connect_data defaultRawEntry;
+ /*
+ * Table containing initials/current value.
+ */
+ rtems_raw_except_connect_data* rawExceptHdlTbl;
+}rtems_raw_except_global_settings;
+
+/*
+ * C callable function enabling to set up one raw idt entry
+ */
+extern int mpc5xx_set_exception (const rtems_raw_except_connect_data*);
+
+/*
+ * C callable function enabling to get one current raw idt entry
+ */
+extern int mpc5xx_get_current_exception (rtems_raw_except_connect_data*);
+
+/*
+ * C callable function enabling to remove one current raw idt entry
+ */
+extern int mpc5xx_delete_exception (const rtems_raw_except_connect_data*);
+
+/*
+ * C callable function enabling to check if vector is valid
+ */
+extern int mpc5xx_vector_is_valid(rtems_vector vector);
+
+inline static void* mpc5xx_get_vector_addr(rtems_vector vector)
+{
+ return ((void*) (((unsigned) vector) << 8));
+}
+/*
+ * Exception global init.
+ */
+extern int mpc5xx_init_exceptions (rtems_raw_except_global_settings* config);
+extern int mpc5xx_get_exception_config (rtems_raw_except_global_settings** config);
+
+# endif /* ASM */
+
+#define SIZEOF_
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/spr.h b/bsps/powerpc/include/libcpu/spr.h
new file mode 100644
index 0000000000..6c81d0ee91
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/spr.h
@@ -0,0 +1,78 @@
+/*
+ * spr.h -- Access to special purpose registers.
+ *
+ * Copyright (C) 1998 Gabriel Paubert, paubert@iram.es
+ *
+ * Modified to compile in RTEMS development environment
+ * by Eric Valette
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+
+#ifndef _LIBCPU_SPR_H
+#define _LIBCPU_SPR_H
+
+#include <rtems/powerpc/registers.h>
+
+#define __MFSPR(reg, val) \
+ __asm__ __volatile__("mfspr %0,"#reg : "=r" (val))
+
+#define __MTSPR(val, reg) \
+ __asm__ __volatile__("mtspr "#reg",%0" : : "r" (val))
+
+
+#define SPR_RW(reg) \
+static inline unsigned long _read_##reg(void) \
+{\
+ unsigned long val;\
+ __MFSPR(reg, val);\
+ return val;\
+}\
+static inline void _write_##reg(unsigned long val)\
+{\
+ __MTSPR(val,reg);\
+ return;\
+}
+
+#define SPR_RO(reg) \
+static inline unsigned long _read_##reg(void) \
+{\
+ unsigned long val;\
+ __MFSPR(reg,val);\
+ return val;\
+}
+
+static inline unsigned long _read_MSR(void)
+{
+ unsigned long val;
+ asm volatile("mfmsr %0" : "=r" (val));
+ return val;
+}
+
+static inline void _write_MSR(unsigned long val)
+{
+ asm volatile("mtmsr %0" : : "r" (val));
+ return;
+}
+
+static inline unsigned long _read_SR(void * va)
+{
+ unsigned long val;
+ asm volatile("mfsrin %0,%1" : "=r" (val): "r" (va));
+ return val;
+}
+
+static inline void _write_SR(unsigned long val, void * va)
+{
+ asm volatile("mtsrin %0,%1" : : "r"(val), "r" (va): "memory");
+ return;
+}
+
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/stackTrace.h b/bsps/powerpc/include/libcpu/stackTrace.h
new file mode 100644
index 0000000000..f73dc2eff2
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/stackTrace.h
@@ -0,0 +1,8 @@
+#ifndef _LIBCPU_STACKTRACE_H
+#define _LIBCPU_STACKTRACE_H
+
+void CPU_stack_take_snapshot(void **stack, int size, void *pc, void *lr, void *r1);
+
+void CPU_print_stack(void);
+
+#endif
diff --git a/bsps/powerpc/include/libcpu/vectors.h b/bsps/powerpc/include/libcpu/vectors.h
new file mode 100644
index 0000000000..2c8914e2a4
--- /dev/null
+++ b/bsps/powerpc/include/libcpu/vectors.h
@@ -0,0 +1,115 @@
+/*
+ * vectors.h Exception frame related contant and API.
+ *
+ * This include file describe the data structure and the functions implemented
+ * by rtems to handle exceptions.
+ *
+ *
+ * MPC5xx port sponsored by Defence Research and Development Canada - Suffield
+ * Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
+ *
+ * Derived from libbsp/powerpc/mbx8xx/vectors/vectors.h:
+ *
+ * CopyRight (C) 1999 valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+#ifndef _LIBCPU_VECTORS_H
+#define _LIBCPU_VECTORS_H
+
+
+/*
+ * Size of hardware vector table.
+ */
+#define NUM_EXCEPTIONS 0x20
+
+/*
+ * The callee (high level exception code written in C)
+ * will store the Link Registers (return address) at entry r1 + 4 !!!.
+ * So let room for it!!!.
+ */
+#define LINK_REGISTER_CALLEE_UPDATE_ROOM 4
+#define SRR0_FRAME_OFFSET 8
+#define SRR1_FRAME_OFFSET 12
+#define EXCEPTION_NUMBER_OFFSET 16
+#define EXC_CR_OFFSET 20
+#define EXC_CTR_OFFSET 24
+#define EXC_XER_OFFSET 28
+#define EXC_LR_OFFSET 32
+#define GPR0_OFFSET 36
+#define GPR1_OFFSET 40
+#define GPR2_OFFSET 44
+#define GPR3_OFFSET 48
+#define GPR4_OFFSET 52
+#define GPR5_OFFSET 56
+#define GPR6_OFFSET 60
+#define GPR7_OFFSET 64
+#define GPR8_OFFSET 68
+#define GPR9_OFFSET 72
+#define GPR10_OFFSET 76
+#define GPR11_OFFSET 80
+#define GPR12_OFFSET 84
+#define GPR13_OFFSET 88
+#define GPR14_OFFSET 92
+#define GPR15_OFFSET 96
+#define GPR16_OFFSET 100
+#define GPR17_OFFSET 104
+#define GPR18_OFFSET 108
+#define GPR19_OFFSET 112
+#define GPR20_OFFSET 116
+#define GPR21_OFFSET 120
+#define GPR22_OFFSET 124
+#define GPR23_OFFSET 128
+#define GPR24_OFFSET 132
+#define GPR25_OFFSET 136
+#define GPR26_OFFSET 140
+#define GPR27_OFFSET 144
+#define GPR28_OFFSET 148
+#define GPR29_OFFSET 152
+#define GPR30_OFFSET 156
+#define GPR31_OFFSET 160
+/*
+ * maintain the EABI requested 8 bytes aligment
+ * As SVR4 ABI requires 16, make it 16 (as some
+ * exception may need more registers to be processed...)
+ */
+#define EXCEPTION_FRAME_END 176
+
+#ifndef ASM
+
+#include <rtems.h>
+
+/*
+ * default raw exception handlers
+ */
+
+extern void default_exception_vector_code_prolog(void);
+extern int default_exception_vector_code_prolog_size;
+extern void initialize_exceptions(void);
+
+typedef void rtems_exception_handler_t (CPU_Exception_frame* excPtr);
+/*DEBUG typedef rtems_exception_handler_t cpuExcHandlerType; */
+
+/*
+ * Exception handler table.
+ *
+ * This table contains pointers to assembly-language exception handlers.
+ * The common exception prologue in vectors.S looks up an entry in this
+ * table and jumps to it. No return address is saved, so the handlers in
+ * this table must return directly to the interrupted code.
+ *
+ * On entry to an exception handler, R1 points to a new exception stack
+ * frame in which R3, R4, and LR have been saved. R4 holds the exception
+ * number.
+ */
+extern rtems_exception_handler_t* exception_handler_table[NUM_EXCEPTIONS];
+
+/* for compatability -- XXX remove */
+typedef rtems_exception_handler_t *cpuExcHandlerType;
+extern cpuExcHandlerType *globalExceptHdl;
+
+#endif /* ASM */
+
+#endif /* _LIBCPU_VECTORS_H */