summaryrefslogtreecommitdiffstats
path: root/c/src/exec/score/cpu/i386/rtems
diff options
context:
space:
mode:
Diffstat (limited to 'c/src/exec/score/cpu/i386/rtems')
-rw-r--r--c/src/exec/score/cpu/i386/rtems/score/i386.h136
1 files changed, 136 insertions, 0 deletions
diff --git a/c/src/exec/score/cpu/i386/rtems/score/i386.h b/c/src/exec/score/cpu/i386/rtems/score/i386.h
index f113ebee4e..ca1af66fff 100644
--- a/c/src/exec/score/cpu/i386/rtems/score/i386.h
+++ b/c/src/exec/score/cpu/i386/rtems/score/i386.h
@@ -144,6 +144,142 @@ static inline unsigned int i386_swap_U16(
}
+/*
+ * Added for pagination management
+ */
+
+static inline unsigned int i386_get_cr0()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr0,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline void i386_set_cr0(unsigned int segment)
+{
+ asm volatile ( "movl %0,%%cr0" : "=r" (segment) : "0" (segment) );
+}
+
+static inline unsigned int i386_get_cr2()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr2,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline unsigned int i386_get_cr3()
+{
+ register unsigned int segment = 0;
+
+ asm volatile ( "movl %%cr3,%0" : "=r" (segment) : "0" (segment) );
+
+ return segment;
+}
+
+static inline void i386_set_cr3(unsigned int segment)
+{
+ asm volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) );
+}
+
+/*
+ * Disable the entire cache
+ */
+void _CPU_disable_cache() {
+ cr0 regCr0;
+
+ regCr0.i = i386_get_cr0();
+ regCr0.cr0.page_level_cache_disable = 1;
+ regCr0.cr0.no_write_through = 1;
+ i386_set_cr0( regCr0.i );
+ rtems_flush_entire_data_cache();
+}
+
+/*
+ * Enable the entire cache
+ */
+static inline void _CPU_enable_cache() {
+ cr0 regCr0;
+
+ regCr0.i = i386_get_cr0();
+ regCr0.cr0.page_level_cache_disable = 0;
+ regCr0.cr0.no_write_through = 0;
+ i386_set_cr0( regCr0.i );
+ /*rtems_flush_entire_data_cache();*/
+}
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
+ * each CPU. The routines below should be implemented per CPU,
+ * to accomodate the capabilities of each.
+ */
+
+/* FIXME: I don't belong here. */
+#define I386_CACHE_ALIGNMENT 16
+
+#if defined(I386_CACHE_ALIGNMENT)
+#define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
+#define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
+
+static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
+static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
+static inline void _CPU_freeze_data_cache (void) {}
+static inline void _CPU_unfreeze_data_cache (void) {}
+static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
+static inline void _CPU_freeze_inst_cache (void) {}
+static inline void _CPU_unfreeze_inst_cache (void) {}
+
+static inline void _CPU_flush_entire_data_cache (
+ const void * d_addr )
+{
+ asm ("wbinvd");
+}
+static inline void _CPU_invalidate_entire_data_cache (
+ const void * d_addr )
+{
+ asm ("invd");
+}
+
+static inline void _CPU_enable_data_cache (
+ void )
+{
+ _CPU_enable_cache();
+}
+
+static inline void _CPU_disable_data_cache (
+ void )
+{
+ _CPU_disable_cache();
+}
+
+static inline void _CPU_invalidate_entire_inst_cache (
+ const void * i_addr )
+{
+ asm ("invd");
+}
+
+static inline void _CPU_enable_inst_cache (
+ void )
+{
+ _CPU_enable_cache();
+}
+
+static inline void _CPU_disable_inst_cache (
+ void )
+{
+ _CPU_disable_cache();
+}
+#endif
+
+
/* routines */
/*