summaryrefslogtreecommitdiffstats
path: root/bsps/bfin
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-27 14:37:51 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-31 12:49:09 +0100
commit4cf93658eff5cf6b0c02e98a0d1ec33dea5ed85c (patch)
tree8ce105a37991b79f38da9da31c1cb6ce13ef6beb /bsps/bfin
parentbsps: Move network define to source files (diff)
downloadrtems-4cf93658eff5cf6b0c02e98a0d1ec33dea5ed85c.tar.bz2
bsps: Rework cache manager implementation
The previous cache manager support used a single souce file (cache_manager.c) which included an implementation header (cache_.h). This required the use of specialized include paths to find the right header file. Change this to include a generic implementation header (cacheimpl.h) in specialized source files. Use the following directories and files: * bsps/shared/cache * bsps/@RTEMS_CPU@/shared/cache * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILY/start/cache.c Update #3285.
Diffstat (limited to 'bsps/bfin')
-rw-r--r--bsps/bfin/shared/cache/cache.c134
1 files changed, 134 insertions, 0 deletions
diff --git a/bsps/bfin/shared/cache/cache.c b/bsps/bfin/shared/cache/cache.c
new file mode 100644
index 0000000000..ea5061bc9c
--- /dev/null
+++ b/bsps/bfin/shared/cache/cache.c
@@ -0,0 +1,134 @@
+/* Blackfin Cache Support
+ *
+ * Copyright (c) 2008 Kallisti Labs, Los Gatos, CA, USA
+ * written by Allan Hessenflow <allanh@kallisti.com>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+
+#include <rtems.h>
+#include <bsp.h>
+#include <libcpu/memoryRegs.h>
+
+#define CPU_DATA_CACHE_ALIGNMENT 32
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
+
+#ifdef BSP_DATA_CACHE_CONFIG
+#define LIBCPU_DATA_CACHE_CONFIG BSP_DATA_CACHE_CONFIG
+#else
+/* use 16K of each SRAM bank */
+#define LIBCPU_DATA_CACHE_CONFIG (3 << DMEM_CONTROL_DMC_SHIFT)
+#endif
+
+/* There are many syncs in the following code because they should be
+ harmless except for wasting time, and this is easier than figuring out
+ exactly where they're needed to protect from the effects of write
+ buffers and queued reads. Many of them are likely unnecessary. */
+
+
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {
+
+ __asm__ __volatile__ ("ssync; flush [%0]; ssync" :: "a" (d_addr));
+}
+
+/* Blackfins can't just invalidate cache; they can only do flush +
+ invalidate. If the line isn't dirty then this is equivalent to
+ just an invalidate. Even if it is dirty, this should still be
+ okay since with a pure invalidate method the caller would have no
+ way to insure the dirty line hadn't been written out anyway prior
+ to the invalidate. */
+static void _CPU_cache_invalidate_1_data_line(const void *d_addr) {
+
+ __asm__ __volatile__ ("ssync; flushinv [%0]; ssync" :: "a" (d_addr));
+}
+
+static void _CPU_cache_freeze_data(void) {
+}
+
+static void _CPU_cache_unfreeze_data(void) {
+}
+
+static void _CPU_cache_invalidate_1_instruction_line(const void *d_addr) {
+
+ __asm__ __volatile__ ("ssync; iflush [%0]; ssync" :: "a" (d_addr));
+}
+
+static void _CPU_cache_freeze_instruction(void) {
+}
+
+static void _CPU_cache_unfreeze_instruction(void) {
+}
+
+/* incredibly inefficient... It would be better to make use of the
+ DTEST_COMMAND/DTEST_DATAx registers to find the addresses in each
+ cache line and flush just those. However the documentation I've
+ seen on those is a bit sketchy, and I sure wouldn't want to get it
+ wrong. */
+static void _CPU_cache_flush_entire_data(void) {
+ uint32_t i;
+
+ i = 0;
+ __asm__ __volatile__ ("ssync");
+ do {
+ __asm__ __volatile__ ("flush [%0]" :: "a" (i));
+ i += CPU_DATA_CACHE_ALIGNMENT;
+ } while (i);
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_invalidate_entire_data(void) {
+ uint32_t dmemControl;
+
+ __asm__ __volatile__ ("ssync");
+ dmemControl = *(uint32_t volatile *) DMEM_CONTROL;
+ *(uint32_t volatile *) DMEM_CONTROL = dmemControl & ~DMEM_CONTROL_DMC_MASK;
+ *(uint32_t volatile *) DMEM_CONTROL = dmemControl;
+ __asm__ __volatile__ ("ssync");
+}
+
+/* this does not actually enable data cache unless CPLBs are also enabled.
+ LIBCPU_DATA_CACHE_CONFIG contains the DMEM_CONTROL_DMC bits to set. */
+static void _CPU_cache_enable_data(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) DMEM_CONTROL |= LIBCPU_DATA_CACHE_CONFIG;
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_disable_data(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) DMEM_CONTROL &= ~DMEM_CONTROL_DMC_MASK;
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void) {
+ uint32_t imemControl;
+
+ __asm__ __volatile__ ("ssync");
+ imemControl = *(uint32_t volatile *) IMEM_CONTROL;
+ *(uint32_t volatile *) IMEM_CONTROL = imemControl & ~IMEM_CONTROL_IMC;
+ *(uint32_t volatile *) IMEM_CONTROL = imemControl;
+ __asm__ __volatile__ ("ssync");
+}
+
+/* this only actually enables the instruction cache if the CPLBs are also
+ enabled. */
+static void _CPU_cache_enable_instruction(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) IMEM_CONTROL |= IMEM_CONTROL_IMC;
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_disable_instruction(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) IMEM_CONTROL &= ~IMEM_CONTROL_IMC;
+ __asm__ __volatile__ ("ssync");
+}
+
+#include "../../../shared/cache/cacheimpl.h"