summaryrefslogtreecommitdiffstats
path: root/bsps/bfin/shared/cache/cache.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--bsps/bfin/shared/cache/cache.c (renamed from c/src/lib/libcpu/bfin/cache/cache.c)39
1 files changed, 24 insertions, 15 deletions
diff --git a/c/src/lib/libcpu/bfin/cache/cache.c b/bsps/bfin/shared/cache/cache.c
index cc60904f8c..ea5061bc9c 100644
--- a/c/src/lib/libcpu/bfin/cache/cache.c
+++ b/bsps/bfin/shared/cache/cache.c
@@ -12,8 +12,16 @@
#include <rtems.h>
#include <bsp.h>
#include <libcpu/memoryRegs.h>
-#include "cache_.h"
+#define CPU_DATA_CACHE_ALIGNMENT 32
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
+
+#ifdef BSP_DATA_CACHE_CONFIG
+#define LIBCPU_DATA_CACHE_CONFIG BSP_DATA_CACHE_CONFIG
+#else
+/* use 16K of each SRAM bank */
+#define LIBCPU_DATA_CACHE_CONFIG (3 << DMEM_CONTROL_DMC_SHIFT)
+#endif
/* There are many syncs in the following code because they should be
harmless except for wasting time, and this is easier than figuring out
@@ -21,7 +29,7 @@
buffers and queued reads. Many of them are likely unnecessary. */
-void _CPU_cache_flush_1_data_line(const void *d_addr) {
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {
__asm__ __volatile__ ("ssync; flush [%0]; ssync" :: "a" (d_addr));
}
@@ -32,26 +40,26 @@ void _CPU_cache_flush_1_data_line(const void *d_addr) {
okay since with a pure invalidate method the caller would have no
way to insure the dirty line hadn't been written out anyway prior
to the invalidate. */
-void _CPU_cache_invalidate_1_data_line(const void *d_addr) {
+static void _CPU_cache_invalidate_1_data_line(const void *d_addr) {
__asm__ __volatile__ ("ssync; flushinv [%0]; ssync" :: "a" (d_addr));
}
-void _CPU_cache_freeze_data(void) {
+static void _CPU_cache_freeze_data(void) {
}
-void _CPU_cache_unfreeze_data(void) {
+static void _CPU_cache_unfreeze_data(void) {
}
-void _CPU_cache_invalidate_1_instruction_line(const void *d_addr) {
+static void _CPU_cache_invalidate_1_instruction_line(const void *d_addr) {
__asm__ __volatile__ ("ssync; iflush [%0]; ssync" :: "a" (d_addr));
}
-void _CPU_cache_freeze_instruction(void) {
+static void _CPU_cache_freeze_instruction(void) {
}
-void _CPU_cache_unfreeze_instruction(void) {
+static void _CPU_cache_unfreeze_instruction(void) {
}
/* incredibly inefficient... It would be better to make use of the
@@ -59,7 +67,7 @@ void _CPU_cache_unfreeze_instruction(void) {
cache line and flush just those. However the documentation I've
seen on those is a bit sketchy, and I sure wouldn't want to get it
wrong. */
-void _CPU_cache_flush_entire_data(void) {
+static void _CPU_cache_flush_entire_data(void) {
uint32_t i;
i = 0;
@@ -71,7 +79,7 @@ void _CPU_cache_flush_entire_data(void) {
__asm__ __volatile__ ("ssync");
}
-void _CPU_cache_invalidate_entire_data(void) {
+static void _CPU_cache_invalidate_entire_data(void) {
uint32_t dmemControl;
__asm__ __volatile__ ("ssync");
@@ -83,21 +91,21 @@ void _CPU_cache_invalidate_entire_data(void) {
/* this does not actually enable data cache unless CPLBs are also enabled.
LIBCPU_DATA_CACHE_CONFIG contains the DMEM_CONTROL_DMC bits to set. */
-void _CPU_cache_enable_data(void) {
+static void _CPU_cache_enable_data(void) {
__asm__ __volatile__ ("ssync");
*(uint32_t volatile *) DMEM_CONTROL |= LIBCPU_DATA_CACHE_CONFIG;
__asm__ __volatile__ ("ssync");
}
-void _CPU_cache_disable_data(void) {
+static void _CPU_cache_disable_data(void) {
__asm__ __volatile__ ("ssync");
*(uint32_t volatile *) DMEM_CONTROL &= ~DMEM_CONTROL_DMC_MASK;
__asm__ __volatile__ ("ssync");
}
-void _CPU_cache_invalidate_entire_instruction(void) {
+static void _CPU_cache_invalidate_entire_instruction(void) {
uint32_t imemControl;
__asm__ __volatile__ ("ssync");
@@ -109,17 +117,18 @@ void _CPU_cache_invalidate_entire_instruction(void) {
/* this only actually enables the instruction cache if the CPLBs are also
enabled. */
-void _CPU_cache_enable_instruction(void) {
+static void _CPU_cache_enable_instruction(void) {
__asm__ __volatile__ ("ssync");
*(uint32_t volatile *) IMEM_CONTROL |= IMEM_CONTROL_IMC;
__asm__ __volatile__ ("ssync");
}
-void _CPU_cache_disable_instruction(void) {
+static void _CPU_cache_disable_instruction(void) {
__asm__ __volatile__ ("ssync");
*(uint32_t volatile *) IMEM_CONTROL &= ~IMEM_CONTROL_IMC;
__asm__ __volatile__ ("ssync");
}
+#include "../../../shared/cache/cacheimpl.h"