summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/powerpc/rtems/score/ppc.h
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2000-06-14 15:52:24 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2000-06-14 15:52:24 +0000
commit61bd030179f252d14f9639cf4921eb4eee07b5ef (patch)
treeeca78226b814ad9e50f0d73fbe00ffaf67ab61e0 /cpukit/score/cpu/powerpc/rtems/score/ppc.h
parentMust now pull in cache management code from libcpu. (diff)
downloadrtems-61bd030179f252d14f9639cf4921eb4eee07b5ef.tar.bz2
Moved PowerPC cache management code to libcpu. Also compiled
mpc8xx libcpu support for the first time and remove includes of bsp.h, references to BSP_Configuration, and Cpu_table. All of these can be obtained directly from RTEMS now.
Diffstat (limited to '')
-rw-r--r--cpukit/score/cpu/powerpc/rtems/score/ppc.h151
1 files changed, 0 insertions, 151 deletions
diff --git a/cpukit/score/cpu/powerpc/rtems/score/ppc.h b/cpukit/score/cpu/powerpc/rtems/score/ppc.h
index 682675efde..cdff768d7d 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/ppc.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/ppc.h
@@ -384,157 +384,6 @@ extern "C" {
#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
#endif
-#ifndef ASM
-
-/*
- * CACHE MANAGER: The following functions are CPU-specific.
- * They provide the basic implementation for the rtems_* cache
- * management routines. If a given function has no meaning for the CPU,
- * it does nothing by default.
- *
- * FIXME: Some functions simply have not been implemented.
- */
-
-#if defined(ppc603) /* And possibly others */
-#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
-#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
-
-/* Helpful macros */
-#define PPC_Get_HID0( _value ) \
- do { \
- _value = 0; /* to avoid warnings */ \
- asm volatile( \
- "mfspr %0, 0x3f0;" /* get HID0 */ \
- "isync" \
- : "=r" (_value) \
- : "0" (_value) \
- ); \
- } while (0)
-
-#define PPC_Set_HID0( _value ) \
- do { \
- asm volatile( \
- "isync;" \
- "mtspr 0x3f0, %0;" /* load HID0 */ \
- "isync" \
- : "=r" (_value) \
- : "0" (_value) \
- ); \
- } while (0)
-
-static inline void _CPU_enable_data_cache (
- void )
-{
- unsigned32 value;
- PPC_Get_HID0( value );
- value |= 0x00004000; /* set DCE bit */
- PPC_Set_HID0( value );
-}
-
-static inline void _CPU_disable_data_cache (
- void )
-{
- unsigned32 value;
- PPC_Get_HID0( value );
- value &= 0xFFFFBFFF; /* clear DCE bit */
- PPC_Set_HID0( value );
-}
-
-static inline void _CPU_enable_inst_cache (
- void )
-{
- unsigned32 value;
- PPC_Get_HID0( value );
- value |= 0x00008000; /* Set ICE bit */
- PPC_Set_HID0( value );
-}
-
-static inline void _CPU_disable_inst_cache (
- void )
-{
- unsigned32 value;
- PPC_Get_HID0( value );
- value &= 0xFFFF7FFF; /* Clear ICE bit */
- PPC_Set_HID0( value );
-}
-
-#elif ( defined(mpc860) || defined(mpc821) )
-
-#define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
-#define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
-
-#define mtspr(_spr,_reg) __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
-#define isync __asm__ volatile ("isync\n"::)
-
-static inline void _CPU_flush_1_data_cache_line(
- const void * _address )
-{
- register const void *__address = _address;
- asm volatile ( "dcbf 0,%0" :: "r" (__address) );
-}
-
-static inline void _CPU_invalidate_1_data_cache_line(
- const void * _address )
-{
- register const void *__address = _address;
- asm volatile ( "dcbi 0,%0" :: "r" (__address) );
-}
-
-static inline void _CPU_flush_entire_data_cache ( void ) {}
-static inline void _CPU_invalidate_entire_data_cache ( void ) {}
-static inline void _CPU_freeze_data_cache ( void ) {}
-static inline void _CPU_unfreeze_data_cache ( void ) {}
-
-static inline void _CPU_enable_data_cache (
- void )
-{
- unsigned32 r1;
- r1 = (0x2<<24);
- mtspr( 568, r1 );
- isync;
-}
-
-static inline void _CPU_disable_data_cache (
- void )
-{
- unsigned32 r1;
- r1 = (0x4<<24);
- mtspr( 568, r1 );
- isync;
-}
-
-static inline void _CPU_invalidate_1_inst_cache_line(
- const void * _address )
-{
- register const void *__address = _address;
- asm volatile ( "icbi 0,%0" :: "r" (__address) );
-}
-
-static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
-static inline void _CPU_freeze_inst_cache ( void ) {}
-static inline void _CPU_unfreeze_inst_cache ( void ) {}
-
-static inline void _CPU_enable_inst_cache (
- void )
-{
- unsigned32 r1;
- r1 = (0x2<<24);
- mtspr( 560, r1 );
- isync;
-}
-
-static inline void _CPU_disable_inst_cache (
- void )
-{
- unsigned32 r1;
- r1 = (0x4<<24);
- mtspr( 560, r1 );
- isync;
-}
-#endif
-
-#endif /* !ASM */
-
/*
* Unless otherwise specified, assume the model has an IP/EP bit to
* set the exception address prefix.