diff options
author | Joel Sherrill <joel.sherrill@OARcorp.com> | 2000-06-12 19:57:02 +0000 |
---|---|---|
committer | Joel Sherrill <joel.sherrill@OARcorp.com> | 2000-06-12 19:57:02 +0000 |
commit | 8ef38186faea3d9b5e6f0f1242f668cb7e7a3d52 (patch) | |
tree | 9253f150814c99167239f7c2cc850cdd9d92c003 /cpukit/score/cpu/i386/rtems/score/i386.h | |
parent | Ensure that when -msoft-float is specified for multilib builds, that (diff) | |
download | rtems-8ef38186faea3d9b5e6f0f1242f668cb7e7a3d52.tar.bz2 |
Patch from John Cotton <john.cotton@nrc.ca>, Charles-Antoine Gauthier
<charles.gauthier@iit.nrc.ca>, and Darlene A. Stewart
<Darlene.Stewart@nrc.ca> to add support for a number of very
significant things:
+ BSPs for many variations on the Motorola MBX8xx board series
+ Cache Manager including initial support for m68040
and PowerPC
+ Rework of mpc8xx libcpu code so all mpc8xx CPUs now use
same code base.
+ Rework of eth_comm BSP to utiltize above.
John reports this works on the 821 and 860
Diffstat (limited to '')
-rw-r--r-- | cpukit/score/cpu/i386/rtems/score/i386.h | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/cpukit/score/cpu/i386/rtems/score/i386.h b/cpukit/score/cpu/i386/rtems/score/i386.h index f113ebee4e..ca1af66fff 100644 --- a/cpukit/score/cpu/i386/rtems/score/i386.h +++ b/cpukit/score/cpu/i386/rtems/score/i386.h @@ -144,6 +144,142 @@ static inline unsigned int i386_swap_U16( } +/* + * Added for pagination management + */ + +static inline unsigned int i386_get_cr0() +{ + register unsigned int segment = 0; + + asm volatile ( "movl %%cr0,%0" : "=r" (segment) : "0" (segment) ); + + return segment; +} + +static inline void i386_set_cr0(unsigned int segment) +{ + asm volatile ( "movl %0,%%cr0" : "=r" (segment) : "0" (segment) ); +} + +static inline unsigned int i386_get_cr2() +{ + register unsigned int segment = 0; + + asm volatile ( "movl %%cr2,%0" : "=r" (segment) : "0" (segment) ); + + return segment; +} + +static inline unsigned int i386_get_cr3() +{ + register unsigned int segment = 0; + + asm volatile ( "movl %%cr3,%0" : "=r" (segment) : "0" (segment) ); + + return segment; +} + +static inline void i386_set_cr3(unsigned int segment) +{ + asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) ); +} + +/* + * Disable the entire cache + */ +void _CPU_disable_cache() { + cr0 regCr0; + + regCr0.i = i386_get_cr0(); + regCr0.cr0.page_level_cache_disable = 1; + regCr0.cr0.no_write_through = 1; + i386_set_cr0( regCr0.i ); + rtems_flush_entire_data_cache(); +} + +/* + * Enable the entire cache + */ +static inline void _CPU_enable_cache() { + cr0 regCr0; + + regCr0.i = i386_get_cr0(); + regCr0.cr0.page_level_cache_disable = 0; + regCr0.cr0.no_write_through = 0; + i386_set_cr0( regCr0.i ); + /*rtems_flush_entire_data_cache();*/ +} + +/* + * CACHE MANAGER: The following functions are CPU-specific. + * They provide the basic implementation for the rtems_* cache + * management routines. If a given function has no meaning for the CPU, + * it does nothing by default. + * + * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for + * each CPU. The routines below should be implemented per CPU, + * to accomodate the capabilities of each. + */ + +/* FIXME: I don't belong here. */ +#define I386_CACHE_ALIGNMENT 16 + +#if defined(I386_CACHE_ALIGNMENT) +#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT +#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT + +static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {} +static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {} +static inline void _CPU_freeze_data_cache (void) {} +static inline void _CPU_unfreeze_data_cache (void) {} +static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {} +static inline void _CPU_freeze_inst_cache (void) {} +static inline void _CPU_unfreeze_inst_cache (void) {} + +static inline void _CPU_flush_entire_data_cache ( + const void * d_addr ) +{ + asm ("wbinvd"); +} +static inline void _CPU_invalidate_entire_data_cache ( + const void * d_addr ) +{ + asm ("invd"); +} + +static inline void _CPU_enable_data_cache ( + void ) +{ + _CPU_enable_cache(); +} + +static inline void _CPU_disable_data_cache ( + void ) +{ + _CPU_disable_cache(); +} + +static inline void _CPU_invalidate_entire_inst_cache ( + const void * i_addr ) +{ + asm ("invd"); +} + +static inline void _CPU_enable_inst_cache ( + void ) +{ + _CPU_enable_cache(); +} + +static inline void _CPU_disable_inst_cache ( + void ) +{ + _CPU_disable_cache(); +} +#endif + + /* routines */ /* |