summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/cpu/powerpc')
-rw-r--r--cpukit/score/cpu/powerpc/asm.h10
-rw-r--r--cpukit/score/cpu/powerpc/rtems/asm.h10
-rw-r--r--cpukit/score/cpu/powerpc/rtems/score/ppc.h157
3 files changed, 175 insertions, 2 deletions
diff --git a/cpukit/score/cpu/powerpc/asm.h b/cpukit/score/cpu/powerpc/asm.h
index af14c95665..3c2e28ad5c 100644
--- a/cpukit/score/cpu/powerpc/asm.h
+++ b/cpukit/score/cpu/powerpc/asm.h
@@ -164,14 +164,20 @@
*/
#define srr0 0x01a
#define srr1 0x01b
+#ifdef ppc403
#define srr2 0x3de /* IBM 400 series only */
#define srr3 0x3df /* IBM 400 series only */
+#endif /* ppc403 */
+
#define sprg0 0x110
#define sprg1 0x111
#define sprg2 0x112
#define sprg3 0x113
+#define dar 0x013 /* Data Address Register */
+#define dec 0x016 /* Decrementer Register */
+#if defined(ppc403)
/* the following SPR/DCR registers exist only in IBM 400 series */
#define dear 0x3d5
#define evpr 0x3d6 /* SPR: exception vector prefix register */
@@ -190,9 +196,13 @@
#define br7 0x087 /* DCR: memory bank register 7 */
/* end of IBM400 series register definitions */
+#elif defined(mpc860) || defined(mpc821)
/* The following registers are for the MPC8x0 */
#define der 0x095 /* Debug Enable Register */
+#define ictrl 0x09E /* Instruction Support Control Register */
+#define immr 0x27E /* Internal Memory Map Register */
/* end of MPC8x0 registers */
+#endif
/*
* Following must be tailor for a particular flavor of the C compiler.
diff --git a/cpukit/score/cpu/powerpc/rtems/asm.h b/cpukit/score/cpu/powerpc/rtems/asm.h
index af14c95665..3c2e28ad5c 100644
--- a/cpukit/score/cpu/powerpc/rtems/asm.h
+++ b/cpukit/score/cpu/powerpc/rtems/asm.h
@@ -164,14 +164,20 @@
*/
#define srr0 0x01a
#define srr1 0x01b
+#ifdef ppc403
#define srr2 0x3de /* IBM 400 series only */
#define srr3 0x3df /* IBM 400 series only */
+#endif /* ppc403 */
+
#define sprg0 0x110
#define sprg1 0x111
#define sprg2 0x112
#define sprg3 0x113
+#define dar 0x013 /* Data Address Register */
+#define dec 0x016 /* Decrementer Register */
+#if defined(ppc403)
/* the following SPR/DCR registers exist only in IBM 400 series */
#define dear 0x3d5
#define evpr 0x3d6 /* SPR: exception vector prefix register */
@@ -190,9 +196,13 @@
#define br7 0x087 /* DCR: memory bank register 7 */
/* end of IBM400 series register definitions */
+#elif defined(mpc860) || defined(mpc821)
/* The following registers are for the MPC8x0 */
#define der 0x095 /* Debug Enable Register */
+#define ictrl 0x09E /* Instruction Support Control Register */
+#define immr 0x27E /* Internal Memory Map Register */
/* end of MPC8x0 registers */
+#endif
/*
* Following must be tailor for a particular flavor of the C compiler.
diff --git a/cpukit/score/cpu/powerpc/rtems/score/ppc.h b/cpukit/score/cpu/powerpc/rtems/score/ppc.h
index 83a54bfa25..682675efde 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/ppc.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/ppc.h
@@ -44,6 +44,8 @@
extern "C" {
#endif
+#include <rtems/score/ppctypes.h>
+
/*
* Define the name of the CPU family.
*/
@@ -220,6 +222,7 @@ extern "C" {
#elif defined(mpc860)
/*
* Added by Jay Monkman (jmonkman@frasca.com) 6/28/98
+ * with some changes by Darlene Stewart (Darlene.Stewart@iit.nrc.ca)
*/
#define CPU_MODEL_NAME "PowerPC MPC860"
@@ -231,7 +234,6 @@ extern "C" {
#define PPC_HAS_FPU 0
#define PPC_HAS_DOUBLE 0
#define PPC_USE_MULTIPLE 1
-#define PPC_USE_SPRG 1
#define PPC_MSR_0 0x00009000
#define PPC_MSR_1 0x00001000
@@ -382,6 +384,157 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif
+#ifndef ASM
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Some functions simply have not been implemented.
+ */
+
+#if defined(ppc603) /* And possibly others */
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+/* Helpful macros */
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ asm volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ asm volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00004000; /* set DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFFBFFF; /* clear DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value |= 0x00008000; /* Set ICE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFF7FFF; /* Clear ICE bit */
+ PPC_Set_HID0( value );
+}
+
+#elif ( defined(mpc860) || defined(mpc821) )
+
+#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+
+#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
+#define isync __asm__ volatile ("isync\n"::)
+
+static inline void _CPU_flush_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbf 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_1_data_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "dcbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_flush_entire_data_cache ( void ) {}
+static inline void _CPU_invalidate_entire_data_cache ( void ) {}
+static inline void _CPU_freeze_data_cache ( void ) {}
+static inline void _CPU_unfreeze_data_cache ( void ) {}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_invalidate_1_inst_cache_line(
+ const void * _address )
+{
+ register const void *__address = _address;
+ asm volatile ( "icbi 0,%0" :: "r" (__address) );
+}
+
+static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
+static inline void _CPU_freeze_inst_cache ( void ) {}
+static inline void _CPU_unfreeze_inst_cache ( void ) {}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x2<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ unsigned32 r1;
+ r1 = (0x4<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+#endif
+
+#endif /* !ASM */
+
/*
* Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix.
@@ -550,7 +703,7 @@ extern "C" {
#define PPC_IRQ_LVL6 (PPC_STD_IRQ_LAST + 23)
#define PPC_IRQ_IRQ7 (PPC_STD_IRQ_LAST + 24)
#define PPC_IRQ_LVL7 (PPC_STD_IRQ_LAST + 25)
-#define PPC_IRQ_CPM_RESERVED_0 (PPC_STD_IRQ_LAST + 26)
+#define PPC_IRQ_CPM_ERROR (PPC_STD_IRQ_LAST + 26)
#define PPC_IRQ_CPM_PC4 (PPC_STD_IRQ_LAST + 27)
#define PPC_IRQ_CPM_PC5 (PPC_STD_IRQ_LAST + 28)
#define PPC_IRQ_CPM_SMC2 (PPC_STD_IRQ_LAST + 29)