summaryrefslogtreecommitdiffstats
path: root/bsps
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-27 14:37:51 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-31 12:49:09 +0100
commit4cf93658eff5cf6b0c02e98a0d1ec33dea5ed85c (patch)
tree8ce105a37991b79f38da9da31c1cb6ce13ef6beb /bsps
parentbsps: Move network define to source files (diff)
downloadrtems-4cf93658eff5cf6b0c02e98a0d1ec33dea5ed85c.tar.bz2
bsps: Rework cache manager implementation
The previous cache manager support used a single souce file (cache_manager.c) which included an implementation header (cache_.h). This required the use of specialized include paths to find the right header file. Change this to include a generic implementation header (cacheimpl.h) in specialized source files. Use the following directories and files: * bsps/shared/cache * bsps/@RTEMS_CPU@/shared/cache * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILY/start/cache.c Update #3285.
Diffstat (limited to 'bsps')
-rw-r--r--bsps/arm/shared/cache/cache-cp15.c184
-rw-r--r--bsps/arm/shared/cache/cache-cp15.h407
-rw-r--r--bsps/arm/shared/cache/cache-l2c-310.c1333
-rw-r--r--bsps/arm/shared/cache/cache-v7m.c141
-rw-r--r--bsps/bfin/shared/cache/cache.c134
-rw-r--r--bsps/i386/shared/cache/cache.c93
-rw-r--r--bsps/m68k/genmcf548x/start/cache.c113
-rw-r--r--bsps/m68k/shared/cache/cache-mcf5223x.c38
-rw-r--r--bsps/m68k/shared/cache/cache-mcf5225x.c34
-rw-r--r--bsps/m68k/shared/cache/cache-mcf5235.c101
-rw-r--r--bsps/m68k/shared/cache/cache-mcf5282.c124
-rw-r--r--bsps/m68k/shared/cache/cache-mcf532x.c143
-rw-r--r--bsps/m68k/shared/cache/cache.c3
-rw-r--r--bsps/m68k/shared/cache/cache.h215
-rw-r--r--bsps/or1k/headers.am1
-rw-r--r--bsps/or1k/include/bsp/cache_.h43
-rw-r--r--bsps/or1k/shared/cache/cache.c388
-rw-r--r--bsps/powerpc/shared/cache/cache.c319
-rw-r--r--bsps/shared/cache/cacheimpl.h520
-rw-r--r--bsps/shared/cache/nocache.c1
-rw-r--r--bsps/sparc/leon2/start/cache.c51
-rw-r--r--bsps/sparc/leon3/start/cache.c191
22 files changed, 4533 insertions, 44 deletions
diff --git a/bsps/arm/shared/cache/cache-cp15.c b/bsps/arm/shared/cache/cache-cp15.c
new file mode 100644
index 0000000000..17de99eaec
--- /dev/null
+++ b/bsps/arm/shared/cache/cache-cp15.c
@@ -0,0 +1,184 @@
+/**
+ * @file
+ *
+ * @ingroup arm
+ *
+ * @brief ARM cache defines and implementation.
+ */
+
+/*
+ * Copyright (c) 2009-2011 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <libcpu/arm-cp15.h>
+#include "cache-cp15.h"
+
+#define CPU_DATA_CACHE_ALIGNMENT 32
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
+#if defined(__ARM_ARCH_7A__)
+/* Some/many ARM Cortex-A cores have L1 data line length 64 bytes */
+#define CPU_MAXIMAL_CACHE_ALIGNMENT 64
+#endif
+
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS \
+ ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+
+static inline void _CPU_cache_flush_1_data_line(const void *d_addr)
+{
+ arm_cache_l1_flush_1_data_line(d_addr);
+}
+
+static inline void
+_CPU_cache_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ _ARM_Data_synchronization_barrier();
+ arm_cache_l1_flush_data_range(
+ d_addr,
+ n_bytes
+ );
+ #if !defined(__ARM_ARCH_7A__)
+ arm_cp15_drain_write_buffer();
+ #endif
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void _CPU_cache_invalidate_1_data_line(const void *d_addr)
+{
+ arm_cache_l1_invalidate_1_data_line(d_addr);
+}
+
+static inline void
+_CPU_cache_invalidate_data_range(
+ const void *addr_first,
+ size_t n_bytes
+)
+{
+ arm_cache_l1_invalidate_data_range(
+ addr_first,
+ n_bytes
+ );
+}
+
+static inline void _CPU_cache_freeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_invalidate_1_instruction_line(const void *d_addr)
+{
+ arm_cache_l1_invalidate_1_instruction_line(d_addr);
+}
+
+static inline void
+_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
+{
+ arm_cache_l1_invalidate_instruction_range( i_addr, n_bytes );
+ _ARM_Instruction_synchronization_barrier();
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_flush_entire_data(void)
+{
+ _ARM_Data_synchronization_barrier();
+ #if defined(__ARM_ARCH_7A__)
+ arm_cp15_data_cache_clean_all_levels();
+ #else
+ arm_cp15_data_cache_clean_and_invalidate();
+ arm_cp15_drain_write_buffer();
+ #endif
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ #if defined(__ARM_ARCH_7A__)
+ arm_cp15_data_cache_invalidate_all_levels();
+ #else
+ arm_cp15_data_cache_invalidate();
+ #endif
+}
+
+static inline void _CPU_cache_enable_data(void)
+{
+ rtems_interrupt_level level;
+ uint32_t ctrl;
+
+ rtems_interrupt_local_disable(level);
+ ctrl = arm_cp15_get_control();
+ ctrl |= ARM_CP15_CTRL_C;
+ arm_cp15_set_control(ctrl);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ rtems_interrupt_level level;
+ uint32_t ctrl;
+
+ rtems_interrupt_local_disable(level);
+ arm_cp15_data_cache_test_and_clean_and_invalidate();
+ ctrl = arm_cp15_get_control();
+ ctrl &= ~ARM_CP15_CTRL_C;
+ arm_cp15_set_control(ctrl);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ arm_cache_l1_invalidate_entire_instruction();
+ _ARM_Instruction_synchronization_barrier();
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ rtems_interrupt_level level;
+ uint32_t ctrl;
+
+ rtems_interrupt_local_disable(level);
+ ctrl = arm_cp15_get_control();
+ ctrl |= ARM_CP15_CTRL_I;
+ arm_cp15_set_control(ctrl);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ rtems_interrupt_level level;
+ uint32_t ctrl;
+
+ rtems_interrupt_local_disable(level);
+ ctrl = arm_cp15_get_control();
+ ctrl &= ~ARM_CP15_CTRL_I;
+ arm_cp15_set_control(ctrl);
+ rtems_interrupt_local_enable(level);
+}
+
+#include "../../shared/cache/cacheimpl.h"
diff --git a/bsps/arm/shared/cache/cache-cp15.h b/bsps/arm/shared/cache/cache-cp15.h
new file mode 100644
index 0000000000..ff01384f4b
--- /dev/null
+++ b/bsps/arm/shared/cache/cache-cp15.h
@@ -0,0 +1,407 @@
+/**
+ * @ingroup arm_shared
+ *
+ * @brief Level 1 Cache definitions and functions.
+ *
+ * This file implements handling for the ARM Level 1 cache controller
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef LIBBSP_ARM_SHARED_CACHE_L1_H
+#define LIBBSP_ARM_SHARED_CACHE_L1_H
+
+#include <bsp.h>
+#include <libcpu/arm-cp15.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* These two defines also ensure that the rtems_cache_* functions have bodies */
+#define ARM_CACHE_L1_CPU_DATA_ALIGNMENT 32
+#define ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT 32
+#define ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+#define ARM_CACHE_L1_CSS_ID_DATA \
+ (ARM_CP15_CACHE_CSS_ID_DATA | ARM_CP15_CACHE_CSS_LEVEL(0))
+#define ARM_CACHE_L1_CSS_ID_INSTRUCTION \
+ (ARM_CP15_CACHE_CSS_ID_INSTRUCTION | ARM_CP15_CACHE_CSS_LEVEL(0))
+#define ARM_CACHE_L1_DATA_LINE_MASK ( ARM_CACHE_L1_CPU_DATA_ALIGNMENT - 1 )
+#define ARM_CACHE_L1_INSTRUCTION_LINE_MASK \
+ ( ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT \
+ - 1 )
+
+/* Errata Handlers */
+static void arm_cache_l1_errata_764369_handler( void )
+{
+#ifdef RTEMS_SMP
+ _ARM_Data_synchronization_barrier();
+#endif
+}
+
+/*
+ * @param l1LineSize Number of bytes in cache line expressed as power of
+ * 2 value
+ * @param l1Associativity Associativity of cache. The associativity does not
+ * have to be a power of 2.
+ * qparam liNumSets Number of sets in cache
+ * */
+
+static inline void arm_cache_l1_properties_for_level(
+ uint32_t *l1LineSize,
+ uint32_t *l1Associativity,
+ uint32_t *l1NumSets,
+ uint32_t level_and_inst_dat
+)
+{
+ uint32_t ccsidr;
+
+ ccsidr = arm_cp15_get_cache_size_id_for_level(level_and_inst_dat);
+
+ /* Cache line size in words + 2 -> bytes) */
+ *l1LineSize = arm_ccsidr_get_line_power(ccsidr);
+ /* Number of Ways */
+ *l1Associativity = arm_ccsidr_get_associativity(ccsidr);
+ /* Number of Sets */
+ *l1NumSets = arm_ccsidr_get_num_sets(ccsidr);
+}
+
+/*
+ * @param log_2_line_bytes The number of bytes per cache line expressed in log2
+ * @param associativity The associativity of the cache beeing operated
+ * @param cache_level_idx The level of the cache beeing operated minus 1 e.g 0
+ * for cache level 1
+ * @param set Number of the set to operate on
+ * @param way Number of the way to operate on
+ * */
+
+static inline uint32_t arm_cache_l1_get_set_way_param(
+ const uint32_t log_2_line_bytes,
+ const uint32_t associativity,
+ const uint32_t cache_level_idx,
+ const uint32_t set,
+ const uint32_t way )
+{
+ uint32_t way_shift = __builtin_clz( associativity - 1 );
+
+
+ return ( 0
+ | ( way
+ << way_shift ) | ( set << log_2_line_bytes ) | ( cache_level_idx << 1 ) );
+}
+
+static inline void arm_cache_l1_flush_1_data_line( const void *d_addr )
+{
+ /* Flush the Data cache */
+ arm_cp15_data_cache_clean_and_invalidate_line( d_addr );
+
+ /* Wait for L1 flush to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_flush_entire_data( void )
+{
+ uint32_t l1LineSize, l1Associativity, l1NumSets;
+ uint32_t s, w;
+ uint32_t set_way_param;
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ /* Get the L1 cache properties */
+ arm_cache_l1_properties_for_level( &l1LineSize,
+ &l1Associativity, &l1NumSets,
+ ARM_CACHE_L1_CSS_ID_DATA);
+
+ for ( w = 0; w < l1Associativity; ++w ) {
+ for ( s = 0; s < l1NumSets; ++s ) {
+ set_way_param = arm_cache_l1_get_set_way_param(
+ l1LineSize,
+ l1Associativity,
+ 0,
+ s,
+ w
+ );
+ arm_cp15_data_cache_clean_line_by_set_and_way( set_way_param );
+ }
+ }
+
+ /* Wait for L1 flush to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_invalidate_entire_data( void )
+{
+ uint32_t l1LineSize, l1Associativity, l1NumSets;
+ uint32_t s, w;
+ uint32_t set_way_param;
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ /* Get the L1 cache properties */
+ arm_cache_l1_properties_for_level( &l1LineSize,
+ &l1Associativity, &l1NumSets,
+ ARM_CACHE_L1_CSS_ID_DATA);
+
+ for ( w = 0; w < l1Associativity; ++w ) {
+ for ( s = 0; s < l1NumSets; ++s ) {
+ set_way_param = arm_cache_l1_get_set_way_param(
+ l1LineSize,
+ l1Associativity,
+ 0,
+ s,
+ w
+ );
+ arm_cp15_data_cache_invalidate_line_by_set_and_way( set_way_param );
+ }
+ }
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_clean_and_invalidate_entire_data( void )
+{
+ uint32_t l1LineSize, l1Associativity, l1NumSets;
+ uint32_t s, w;
+ uint32_t set_way_param;
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+
+ /* Get the L1 cache properties */
+ arm_cache_l1_properties_for_level( &l1LineSize,
+ &l1Associativity, &l1NumSets,
+ ARM_CACHE_L1_CSS_ID_DATA);
+
+ for ( w = 0; w < l1Associativity; ++w ) {
+ for ( s = 0; s < l1NumSets; ++s ) {
+ set_way_param = arm_cache_l1_get_set_way_param(
+ l1LineSize,
+ l1Associativity,
+ 0,
+ s,
+ w
+ );
+ arm_cp15_data_cache_clean_and_invalidate_line_by_set_and_way(
+ set_way_param );
+ }
+ }
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) d_addr
+ & ~ARM_CACHE_L1_DATA_LINE_MASK;
+ const uint32_t ADDR_LAST =
+ (uint32_t)( (size_t) d_addr + n_bytes - 1 );
+
+ arm_cache_l1_errata_764369_handler();
+
+ for (; adx <= ADDR_LAST; adx += ARM_CACHE_L1_CPU_DATA_ALIGNMENT ) {
+ /* Store and invalidate the Data cache line */
+ arm_cp15_data_cache_clean_and_invalidate_line( (void*)adx );
+ }
+ /* Wait for L1 store to complete */
+ _ARM_Data_synchronization_barrier();
+ }
+}
+
+
+static inline void arm_cache_l1_invalidate_1_data_line(
+ const void *d_addr )
+{
+ /* Invalidate the data cache line */
+ arm_cp15_data_cache_invalidate_line( d_addr );
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_freeze_data( void )
+{
+ /* To be implemented as needed, if supported by hardware at all */
+}
+
+static inline void arm_cache_l1_unfreeze_data( void )
+{
+ /* To be implemented as needed, if supported by hardware at all */
+}
+
+static inline void arm_cache_l1_invalidate_1_instruction_line(
+ const void *i_addr )
+{
+ /* Invalidate the Instruction cache line */
+ arm_cp15_instruction_cache_invalidate_line( i_addr );
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_invalidate_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) d_addr
+ & ~ARM_CACHE_L1_DATA_LINE_MASK;
+ const uint32_t end =
+ (uint32_t)( (size_t)d_addr + n_bytes -1);
+
+ arm_cache_l1_errata_764369_handler();
+
+ /* Back starting address up to start of a line and invalidate until end */
+ for (;
+ adx <= end;
+ adx += ARM_CACHE_L1_CPU_DATA_ALIGNMENT ) {
+ /* Invalidate the Instruction cache line */
+ arm_cp15_data_cache_invalidate_line( (void*)adx );
+ }
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+ }
+}
+
+static inline void arm_cache_l1_invalidate_instruction_range(
+ const void *i_addr,
+ size_t n_bytes
+)
+{
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) i_addr
+ & ~ARM_CACHE_L1_INSTRUCTION_LINE_MASK;
+ const uint32_t end =
+ (uint32_t)( (size_t)i_addr + n_bytes -1);
+
+ arm_cache_l1_errata_764369_handler();
+
+ /* Back starting address up to start of a line and invalidate until end */
+ for (;
+ adx <= end;
+ adx += ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT ) {
+ /* Invalidate the Instruction cache line */
+ arm_cp15_instruction_cache_invalidate_line( (void*)adx );
+ }
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+ }
+}
+
+static inline void arm_cache_l1_invalidate_entire_instruction( void )
+{
+ uint32_t ctrl = arm_cp15_get_control();
+
+
+ #ifdef RTEMS_SMP
+
+ /* invalidate I-cache inner shareable */
+ arm_cp15_instruction_cache_inner_shareable_invalidate_all();
+
+ /* I+BTB cache invalidate */
+ arm_cp15_instruction_cache_invalidate();
+ #else /* RTEMS_SMP */
+ /* I+BTB cache invalidate */
+ arm_cp15_instruction_cache_invalidate();
+ #endif /* RTEMS_SMP */
+
+ if ( ( ctrl & ARM_CP15_CTRL_Z ) != 0 ) {
+ #if defined(__ARM_ARCH_7A__)
+ arm_cp15_branch_predictor_inner_shareable_invalidate_all();
+ #endif
+ #if defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_7A__)
+ arm_cp15_branch_predictor_invalidate_all();
+ #endif
+ }
+}
+
+static inline void arm_cache_l1_freeze_instruction( void )
+{
+ /* To be implemented as needed, if supported by hardware at all */
+}
+
+static inline void arm_cache_l1_unfreeze_instruction( void )
+{
+ /* To be implemented as needed, if supported by hardware at all */
+}
+
+static inline void arm_cache_l1_disable_data( void )
+{
+ /* Clean and invalidate the Data cache */
+ arm_cache_l1_flush_entire_data();
+
+ /* Disable the Data cache */
+ arm_cp15_set_control( arm_cp15_get_control() & ~ARM_CP15_CTRL_C );
+}
+
+static inline void arm_cache_l1_disable_instruction( void )
+{
+ /* Synchronize the processor */
+ _ARM_Data_synchronization_barrier();
+
+ /* Invalidate the Instruction cache */
+ arm_cache_l1_invalidate_entire_instruction();
+
+ /* Disable the Instruction cache */
+ arm_cp15_set_control( arm_cp15_get_control() & ~ARM_CP15_CTRL_I );
+}
+
+static inline size_t arm_cache_l1_get_data_cache_size( void )
+{
+ size_t size;
+ uint32_t line_size = 0;
+ uint32_t associativity = 0;
+ uint32_t num_sets = 0;
+
+ arm_cache_l1_properties_for_level( &line_size,
+ &associativity, &num_sets,
+ ARM_CACHE_L1_CSS_ID_DATA);
+
+ size = (1 << line_size) * associativity * num_sets;
+
+ return size;
+}
+
+static inline size_t arm_cache_l1_get_instruction_cache_size( void )
+{
+ size_t size;
+ uint32_t line_size = 0;
+ uint32_t associativity = 0;
+ uint32_t num_sets = 0;
+
+ arm_cache_l1_properties_for_level( &line_size,
+ &associativity, &num_sets,
+ ARM_CACHE_L1_CSS_ID_INSTRUCTION);
+
+ size = (1 << line_size) * associativity * num_sets;
+
+ return size;
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* LIBBSP_ARM_SHARED_CACHE_L1_H */
diff --git a/bsps/arm/shared/cache/cache-l2c-310.c b/bsps/arm/shared/cache/cache-l2c-310.c
new file mode 100644
index 0000000000..6869d205a8
--- /dev/null
+++ b/bsps/arm/shared/cache/cache-l2c-310.c
@@ -0,0 +1,1333 @@
+/**
+ * @ingroup L2C-310_cache
+ *
+ * @brief Cache definitions and functions.
+ *
+ * This file implements handling for the ARM L2C-310 cache controller
+ */
+
+/*
+ * Authorship
+ * ----------
+ * This software was created by
+ * R. Claus <claus@slac.stanford.edu>, 2013,
+ * Stanford Linear Accelerator Center, Stanford University.
+ *
+ * Acknowledgement of sponsorship
+ * ------------------------------
+ * This software was produced by
+ * the Stanford Linear Accelerator Center, Stanford University,
+ * under Contract DE-AC03-76SFO0515 with the Department of Energy.
+ *
+ * Government disclaimer of liability
+ * ----------------------------------
+ * Neither the United States nor the United States Department of Energy,
+ * nor any of their employees, makes any warranty, express or implied, or
+ * assumes any legal liability or responsibility for the accuracy,
+ * completeness, or usefulness of any data, apparatus, product, or process
+ * disclosed, or represents that its use would not infringe privately owned
+ * rights.
+ *
+ * Stanford disclaimer of liability
+ * --------------------------------
+ * Stanford University makes no representations or warranties, express or
+ * implied, nor assumes any liability for the use of this software.
+ *
+ * Stanford disclaimer of copyright
+ * --------------------------------
+ * Stanford University, owner of the copyright, hereby disclaims its
+ * copyright and all other rights in this software. Hence, anyone may
+ * freely use it for any purpose without restriction.
+ *
+ * Maintenance of notices
+ * ----------------------
+ * In the interest of clarity regarding the origin and status of this
+ * SLAC software, this and all the preceding Stanford University notices
+ * are to remain affixed to any copy or derivative of this software made
+ * or distributed by the recipient and are to be affixed to any copy of
+ * software made or distributed by the recipient that contains a copy or
+ * derivative of this software.
+ *
+ * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
+ */
+
+#include <assert.h>
+#include <bsp.h>
+#include <bsp/fatal.h>
+#include <libcpu/arm-cp15.h>
+#include <rtems/rtems/intr.h>
+#include <bsp/arm-release-id.h>
+#include <bsp/arm-errata.h>
+
+#include "cache-cp15.h"
+
+/* These two defines also ensure that the rtems_cache_* functions have bodies */
+#define CPU_DATA_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_DATA_ALIGNMENT
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT
+#if defined(__ARM_ARCH_7A__)
+/* Some/many ARM Cortex-A cores have L1 data line length 64 bytes */
+#define CPU_MAXIMAL_CACHE_ALIGNMENT 64
+#endif
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS \
+ ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
+
+#define L2C_310_DATA_LINE_MASK ( CPU_DATA_CACHE_ALIGNMENT - 1 )
+#define L2C_310_INSTRUCTION_LINE_MASK \
+ ( CPU_INSTRUCTION_CACHE_ALIGNMENT \
+ - 1 )
+#define L2C_310_NUM_WAYS 8
+#define L2C_310_WAY_MASK ( ( 1 << L2C_310_NUM_WAYS ) - 1 )
+
+#define L2C_310_MIN( a, b ) \
+ ((a < b) ? (a) : (b))
+
+#define L2C_310_MAX_LOCKING_BYTES (4 * 1024)
+
+
+/* RTL release number as can be read from cache_id register */
+#define L2C_310_RTL_RELEASE_R0_P0 0x0
+#define L2C_310_RTL_RELEASE_R1_P0 0x2
+#define L2C_310_RTL_RELEASE_R2_P0 0x4
+#define L2C_310_RTL_RELEASE_R3_P0 0x5
+#define L2C_310_RTL_RELEASE_R3_P1 0x6
+#define L2C_310_RTL_RELEASE_R3_P2 0x8
+#define L2C_310_RTL_RELEASE_R3_P3 0x9
+
+#define BSP_ARM_L2C_310_RTL_RELEASE (BSP_ARM_L2C_310_ID & L2C_310_ID_RTL_MASK)
+
+/**
+ * @defgroup L2C-310_cache Cache Support
+ * @ingroup arm_shared
+ * @brief Cache Functions and Defitions
+ * @{
+ */
+
+
+/**
+ * @brief L2CC Register Offsets
+ */
+typedef struct {
+ /** @brief Cache ID */
+ uint32_t cache_id;
+#define L2C_310_ID_RTL_MASK 0x3f
+#define L2C_310_ID_PART_MASK ( 0xf << 6 )
+#define L2C_310_ID_PART_L210 ( 1 << 6 )
+#define L2C_310_ID_PART_L310 ( 3 << 6 )
+#define L2C_310_ID_IMPL_MASK ( 0xff << 24 )
+ /** @brief Cache type */
+ uint32_t cache_type;
+/** @brief 1 if data banking implemented, 0 if not */
+#define L2C_310_TYPE_DATA_BANKING_MASK 0x80000000
+/** @brief 11xy, where: x=1 if pl310_LOCKDOWN_BY_MASTER is defined, otherwise 0 */
+#define L2C_310_TYPE_CTYPE_MASK 0x1E000000
+/** @brief y=1 if pl310_LOCKDOWN_BY_LINE is defined, otherwise 0. */
+#define L2C_310_TYPE_CTYPE_SHIFT 25
+/** @brief 1 for Harvard architecture, 0 for unified architecture */
+#define L2C_310_TYPE_HARVARD_MASK 0x01000000
+/** @brief Data cache way size = 2 Exp(value + 2) KB */
+#define L2C_310_TYPE_SIZE_D_WAYS_MASK 0x00700000
+#define L2C_310_TYPE_SIZE_D_WAYS_SHIFT 20
+/** @brief Assoziativity aka number of data ways = (value * 8) + 8 */
+#define L2C_310_TYPE_NUM_D_WAYS_MASK 0x00040000
+#define L2C_310_TYPE_NUM_D_WAYS_SHIFT 18
+/** @brief Data cache line length 00 - 32 */
+#define L2C_310_TYPE_LENGTH_D_LINE_MASK 0x00003000
+#define L2C_310_TYPE_LENGTH_D_LINE_SHIFT 12
+#define L2C_310_TYPE_LENGTH_D_LINE_VAL_32 0x0
+/** @brief Instruction cache way size = 2 Exp(value + 2) KB */
+#define L2C_310_TYPE_SIZE_I_WAYS_MASK 0x00000700
+#define L2C_310_TYPE_SIZE_I_WAYS_SHIFT 8
+/** @brief Assoziativity aka number of instruction ways = (value * 8) + 8 */
+#define L2C_310_TYPE_NUM_I_WAYS_MASK 0x00000040
+#define L2C_310_TYPE_NUM_I_WAYS_SHIFT 6
+/** @brief Instruction cache line length 00 - 32 */
+#define L2C_310_TYPE_LENGTH_I_LINE_MASK 0x00000003
+#define L2C_310_TYPE_LENGTH_I_LINE_SHIFT 0
+#define L2C_310_TYPE_LENGTH_I_LINE_VAL_32 0x0
+
+ uint8_t reserved_8[0x100 - 8];
+ uint32_t ctrl; /* Control */
+/** @brief Enables the L2CC */
+#define L2C_310_CTRL_ENABLE 0x00000001
+
+#define L2C_310_CTRL_EXCL_CONFIG (1 << 12)
+
+ /** @brief Auxiliary control */
+ uint32_t aux_ctrl;
+
+/** @brief Early BRESP Enable */
+#define L2C_310_AUX_EBRESPE_MASK 0x40000000
+
+/** @brief Instruction Prefetch Enable */
+#define L2C_310_AUX_IPFE_MASK 0x20000000
+
+/** @brief Data Prefetch Enable */
+#define L2C_310_AUX_DPFE_MASK 0x10000000
+
+/** @brief Non-secure interrupt access control */
+#define L2C_310_AUX_NSIC_MASK 0x08000000
+
+/** @brief Non-secure lockdown enable */
+#define L2C_310_AUX_NSLE_MASK 0x04000000
+
+/** @brief Cache replacement policy */
+#define L2C_310_AUX_CRP_MASK 0x02000000
+
+/** @brief Force write allocate */
+#define L2C_310_AUX_FWE_MASK 0x01800000
+
+/** @brief Shared attribute override enable */
+#define L2C_310_AUX_SAOE_MASK 0x00400000
+
+/** @brief Parity enable */
+#define L2C_310_AUX_PE_MASK 0x00200000
+
+/** @brief Event monitor bus enable */
+#define L2C_310_AUX_EMBE_MASK 0x00100000
+
+/** @brief Way-size */
+#define L2C_310_AUX_WAY_SIZE_MASK 0x000E0000
+#define L2C_310_AUX_WAY_SIZE_SHIFT 17
+
+/** @brief Way-size */
+#define L2C_310_AUX_ASSOC_MASK 0x00010000
+
+/** @brief Shared attribute invalidate enable */
+#define L2C_310_AUX_SAIE_MASK 0x00002000
+
+/** @brief Exclusive cache configuration */
+#define L2C_310_AUX_EXCL_CACHE_MASK 0x00001000
+
+/** @brief Store buffer device limitation Enable */
+#define L2C_310_AUX_SBDLE_MASK 0x00000800
+
+/** @brief High Priority for SO and Dev Reads Enable */
+#define L2C_310_AUX_HPSODRE_MASK 0x00000400
+
+/** @brief Full line of zero enable */
+#define L2C_310_AUX_FLZE_MASK 0x00000001
+
+/** @brief Enable all prefetching, */
+#define L2C_310_AUX_REG_DEFAULT_MASK \
+ ( L2C_310_AUX_WAY_SIZE_MASK & ( 0x3 << L2C_310_AUX_WAY_SIZE_SHIFT ) ) \
+ | L2C_310_AUX_PE_MASK /* Prefetch enable */ \
+ | L2C_310_AUX_SAOE_MASK /* Shared attribute override enable */ \
+ | L2C_310_AUX_CRP_MASK /* Cache replacement policy */ \
+ | L2C_310_AUX_DPFE_MASK /* Data prefetch enable */ \
+ | L2C_310_AUX_IPFE_MASK /* Instruction prefetch enable */ \
+ | L2C_310_AUX_EBRESPE_MASK /* Early BRESP enable */
+
+#define L2C_310_AUX_REG_ZERO_MASK 0xFFF1FFFF
+
+/** @brief 1 cycle of latency, there is no additional latency fot tag RAM */
+#define L2C_310_RAM_1_CYCLE_LAT_VAL 0x00000000
+/** @brief 2 cycles of latency for tag RAM */
+#define L2C_310_RAM_2_CYCLE_LAT_VAL 0x00000001
+/** @brief 3 cycles of latency for tag RAM */
+#define L2C_310_RAM_3_CYCLE_LAT_VAL 0x00000002
+/** @brief 4 cycles of latency for tag RAM */
+#define L2C_310_RAM_4_CYCLE_LAT_VAL 0x00000003
+/** @brief 5 cycles of latency for tag RAM */
+#define L2C_310_RAM_5_CYCLE_LAT_VAL 0x00000004
+/** @brief 6 cycles of latency for tag RAM */
+#define L2C_310_RAM_6_CYCLE_LAT_VAL 0x00000005
+/** @brief 7 cycles of latency for tag RAM */
+#define L2C_310_RAM_7_CYCLE_LAT_VAL 0x00000006
+/** @brief 8 cycles of latency for tag RAM */
+#define L2C_310_RAM_8_CYCLE_LAT_VAL 0x00000007
+/** @brief Shift left setup latency values by this value */
+#define L2C_310_RAM_SETUP_SHIFT 0x00000000
+/** @brief Shift left read latency values by this value */
+#define L2C_310_RAM_READ_SHIFT 0x00000004
+/** @brief Shift left write latency values by this value */
+#define L2C_310_RAM_WRITE_SHIFT 0x00000008
+/** @brief Mask for RAM setup latency */
+#define L2C_310_RAM_SETUP_LAT_MASK 0x00000007
+/** @brief Mask for RAM read latency */
+#define L2C_310_RAM_READ_LAT_MASK 0x00000070
+/** @brief Mask for RAM read latency */
+#define L2C_310_RAM_WRITE_LAT_MASK 0x00000700
+ /** @brief Latency for tag RAM */
+ uint32_t tag_ram_ctrl;
+/* @brief Latency for tag RAM */
+#define L2C_310_TAG_RAM_DEFAULT_LAT \
+ ( ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_SETUP_SHIFT ) \
+ | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_READ_SHIFT ) \
+ | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_WRITE_SHIFT ) )
+ /** @brief Latency for data RAM */
+ uint32_t data_ram_ctrl;
+/** @brief Latency for data RAM */
+#define L2C_310_DATA_RAM_DEFAULT_MASK \
+ ( ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_SETUP_SHIFT ) \
+ | ( L2C_310_RAM_3_CYCLE_LAT_VAL << L2C_310_RAM_READ_SHIFT ) \
+ | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_WRITE_SHIFT ) )
+
+ uint8_t reserved_110[0x200 - 0x110];
+
+ /** @brief Event counter control */
+ uint32_t ev_ctrl;
+
+ /** @brief Event counter 1 configuration */
+ uint32_t ev_cnt1_cfg;
+
+ /** @brief Event counter 0 configuration */
+ uint32_t ev_cnt0_cfg;
+
+ /** @brief Event counter 1 value */
+ uint32_t ev_cnt1;
+
+ /** @brief Event counter 0 value */
+ uint32_t ev_cnt0;
+
+ /** @brief Interrupt enable mask */
+ uint32_t int_mask;
+
+ /** @brief Masked interrupt status (read-only)*/
+ uint32_t int_mask_status;
+
+ /** @brief Unmasked interrupt status */
+ uint32_t int_raw_status;
+
+ /** @brief Interrupt clear */
+ uint32_t int_clr;
+
+/**
+ * @name Interrupt bit masks
+ *
+ * @{
+ */
+
+/** @brief DECERR from L3 */
+#define L2C_310_INT_DECERR_MASK 0x00000100
+
+/** @brief SLVERR from L3 */
+#define L2C_310_INT_SLVERR_MASK 0x00000080
+
+/** @brief Error on L2 data RAM (Read) */
+#define L2C_310_INT_ERRRD_MASK 0x00000040
+
+/** @brief Error on L2 tag RAM (Read) */
+#define L2C_310_INT_ERRRT_MASK 0x00000020
+
+/** @brief Error on L2 data RAM (Write) */
+#define L2C_310_INT_ERRWD_MASK 0x00000010
+
+/** @brief Error on L2 tag RAM (Write) */
+#define L2C_310_INT_ERRWT_MASK 0x00000008
+
+/** @brief Parity Error on L2 data RAM (Read) */
+#define L2C_310_INT_PARRD_MASK 0x00000004
+
+/** @brief Parity Error on L2 tag RAM (Read) */
+#define L2C_310_INT_PARRT_MASK 0x00000002
+
+/** @brief Event Counter1/0 Overflow Increment */
+#define L2C_310_INT_ECNTR_MASK 0x00000001
+
+/** @} */
+
+ uint8_t reserved_224[0x730 - 0x224];
+
+ /** @brief Drain the STB */
+ uint32_t cache_sync;
+ uint8_t reserved_734[0x740 - 0x734];
+ /** @brief ARM Errata 753970 for pl310-r3p0 */
+ uint32_t dummy_cache_sync_reg;
+ uint8_t reserved_744[0x770 - 0x744];
+
+ /** @brief Invalidate line by PA */
+ uint32_t inv_pa;
+ uint8_t reserved_774[0x77c - 0x774];
+
+ /** @brief Invalidate by Way */
+ uint32_t inv_way;
+ uint8_t reserved_780[0x7b0 - 0x780];
+
+ /** @brief Clean Line by PA */
+ uint32_t clean_pa;
+ uint8_t reserved_7b4[0x7b8 - 0x7b4];
+
+ /** @brief Clean Line by Set/Way */
+ uint32_t clean_index;
+
+ /** @brief Clean by Way */
+ uint32_t clean_way;
+ uint8_t reserved_7c0[0x7f0 - 0x7c0];
+
+ /** @brief Clean and Invalidate Line by PA */
+ uint32_t clean_inv_pa;
+ uint8_t reserved_7f4[0x7f8 - 0x7f4];
+
+ /** @brief Clean and Invalidate Line by Set/Way */
+ uint32_t clean_inv_indx;
+
+ /** @brief Clean and Invalidate by Way */
+ uint32_t clean_inv_way;
+
+ /** @brief Data lock down 0 */
+ uint32_t d_lockdown_0;
+
+ /** @brief Instruction lock down 0 */
+ uint32_t i_lockdown_0;
+
+ /** @brief Data lock down 1 */
+ uint32_t d_lockdown_1;
+
+ /** @brief Instruction lock down 1 */
+ uint32_t i_lockdown_1;
+
+ /** @brief Data lock down 2 */
+ uint32_t d_lockdown_2;
+
+ /** @brief Instruction lock down 2 */
+ uint32_t i_lockdown_2;
+
+ /** @brief Data lock down 3 */
+ uint32_t d_lockdown_3;
+
+ /** @brief Instruction lock down 3 */
+ uint32_t i_lockdown_3;
+
+ /** @brief Data lock down 4 */
+ uint32_t d_lockdown_4;
+
+ /** @brief Instruction lock down 4 */
+ uint32_t i_lockdown_4;
+
+ /** @brief Data lock down 5 */
+ uint32_t d_lockdown_5;
+
+ /** @brief Instruction lock down 5 */
+ uint32_t i_lockdown_5;
+
+ /** @brief Data lock down 6 */
+ uint32_t d_lockdown_6;
+
+ /** @brief Instruction lock down 6 */
+ uint32_t i_lockdown_6;
+
+ /** @brief Data lock down 7 */
+ uint32_t d_lockdown_7;
+
+ /** @brief Instruction lock down 7 */
+ uint32_t i_lockdown_7;
+
+ uint8_t reserved_940[0x950 - 0x940];
+
+ /** @brief Lockdown by Line Enable */
+ uint32_t lock_line_en;
+
+ /** @brief Cache lockdown by way */
+ uint32_t unlock_way;
+
+ uint8_t reserved_958[0xc00 - 0x958];
+
+ /** @brief Address range redirect, part 1 */
+ uint32_t addr_filtering_start;
+
+ /** @brief Address range redirect, part 2 */
+ uint32_t addr_filtering_end;
+
+/** @brief Address filtering valid bits*/
+#define L2C_310_ADDR_FILTER_VALID_MASK 0xFFF00000
+
+/** @brief Address filtering enable bit*/
+#define L2C_310_ADDR_FILTER_ENABLE_MASK 0x00000001
+
+ uint8_t reserved_c08[0xf40 - 0xc08];
+
+ /** @brief Debug control */
+ uint32_t debug_ctrl;
+
+/** @brief Debug SPIDEN bit */
+#define L2C_310_DEBUG_SPIDEN_MASK 0x00000004
+
+/** @brief Debug DWB bit, forces write through */
+#define L2C_310_DEBUG_DWB_MASK 0x00000002
+
+/** @brief Debug DCL bit, disables cache line fill */
+#define L2C_310_DEBUG_DCL_MASK 0x00000002
+
+ uint8_t reserved_f44[0xf60 - 0xf44];
+
+ /** @brief Purpose prefetch enables */
+ uint32_t prefetch_ctrl;
+/** @brief Prefetch offset */
+#define L2C_310_PREFETCH_OFFSET_MASK 0x0000001F
+ uint8_t reserved_f64[0xf80 - 0xf64];
+
+ /** @brief Purpose power controls */
+ uint32_t power_ctrl;
+} L2CC;
+
+rtems_interrupt_lock l2c_310_lock = RTEMS_INTERRUPT_LOCK_INITIALIZER(
+ "L2-310 cache controller"
+);
+
+/* Errata table for the LC2 310 Level 2 cache from ARM.
+* Information taken from ARMs
+* "CoreLink controllers and peripherals
+* - System controllers
+* - L2C-310 Level 2 Cache Controller
+* - Revision r3p3
+* - Software Developer Errata Notice
+* - ARM CoreLink Level 2 Cache Controller (L2C-310 or PL310),
+* r3 releases Software Developers Errata Notice"
+* Please see this document for more information on these erratas */
+#if BSP_ARM_L2C_310_RTL_RELEASE == L2C_310_RTL_RELEASE_R3_P0
+#define L2C_310_ERRATA_IS_APPLICABLE_753970
+#endif
+
+static bool l2c_310_errata_is_applicable_727913(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_727914(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_727915(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P0:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_729806(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_729815(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_742884(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P1:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_752271(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_765569(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_errata_is_applicable_769419(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ is_applicable = false;
+ break;
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+#if BSP_ARM_L2C_310_RTL_RELEASE == L2C_310_RTL_RELEASE_R0_P0 \
+ || BSP_ARM_L2C_310_RTL_RELEASE == L2C_310_RTL_RELEASE_R1_P0
+#define L2C_310_ERRATA_IS_APPLICABLE_588369
+#endif
+
+#ifdef CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS
+static bool l2c_310_errata_is_applicable_754670(
+ uint32_t rtl_release
+)
+{
+ bool is_applicable = false;
+
+ switch ( rtl_release ) {
+ case L2C_310_RTL_RELEASE_R3_P3:
+ case L2C_310_RTL_RELEASE_R3_P2:
+ case L2C_310_RTL_RELEASE_R3_P1:
+ case L2C_310_RTL_RELEASE_R3_P0:
+ case L2C_310_RTL_RELEASE_R2_P0:
+ case L2C_310_RTL_RELEASE_R1_P0:
+ case L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( 0 );
+ break;
+ }
+
+ return is_applicable;
+}
+#endif /* CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS */
+
+/* The common workaround for this erratum would be to add a
+ * data synchronization barrier to the beginning of the abort handler.
+ * But for RTEMS a call of the abort handler means a fatal condition anyway.
+ * So there is no need to handle this erratum */
+#define CACHE_ARM_ERRATA_775420_HANDLER() \
+ if( arm_errata_is_applicable_processor_errata_775420 ) { \
+ } \
+
+static void l2c_310_check_errata( uint32_t rtl_release )
+{
+ /* This erratum gets handled within the sources */
+ /* Unhandled erratum present: 588369 Errata 588369 says that clean + inv may
+ * keep the cache line if it was clean. See ARMs documentation on the erratum
+ * for a workaround */
+ /* assert( ! l2c_310_errata_is_applicable_588369( rtl_release ) ); */
+
+ /* Unhandled erratum present: 727913 Prefetch dropping feature can cause
+ * incorrect behavior when PL310 handles reads that cross cache line
+ * boundary */
+ assert( ! l2c_310_errata_is_applicable_727913( rtl_release ) );
+
+ /* Unhandled erratum present: 727914 Double linefill feature can cause
+ * deadlock */
+ assert( ! l2c_310_errata_is_applicable_727914( rtl_release ) );
+
+ /* Unhandled erratum present: 727915 Background Clean and Invalidate by Way
+ * operation can cause data corruption */
+ assert( ! l2c_310_errata_is_applicable_727915( rtl_release ) );
+
+ /* Unhandled erratum present: 729806 Speculative reads from the Cortex-A9
+ * MPCore processor can cause deadlock */
+ assert( ! l2c_310_errata_is_applicable_729806( rtl_release ) );
+
+ if( l2c_310_errata_is_applicable_729815( rtl_release ) )
+ {
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+
+ assert( 0 == ( l2cc->aux_ctrl & L2C_310_AUX_HPSODRE_MASK ) );
+
+ /* Erratum: 729815 The “High Priority for SO and Dev reads” feature can
+ * cause Quality of Service issues to cacheable read transactions*/
+
+ /* Conditions
+ This problem occurs when the following conditions are met:
+ 1. Bit[10] “High Priority for SO and Dev reads enable” of the PL310
+ Auxiliary Control Register is set to 1.
+ 2. PL310 receives a cacheable read that misses in the L2 cache.
+ 3. PL310 receives a continuous flow of Strongly Ordered or Device
+ reads that take all address slots in the master interface.
+ Workaround
+ A workaround is only necessary in systems that are able to issue a
+ continuous flow of Strongly Ordered or Device reads. In such a case,
+ the workaround is to disable the “High Priority for SO and Dev reads”
+ feature. This is the default behavior.*/
+ }
+
+ /* Unhandled erratum present: 742884 Double linefill feature might introduce
+ * circular dependency and deadlock */
+ assert( ! l2c_310_errata_is_applicable_742884( rtl_release ) );
+
+ /* Unhandled erratum present: 752271 Double linefill feature can cause data
+ * corruption */
+ assert( ! l2c_310_errata_is_applicable_752271( rtl_release ) );
+
+ /* This erratum can not be worked around: 754670 A continuous write flow can
+ * stall a read targeting the same memory area
+ * But this erratum does not lead to any data corruption */
+ /* assert( ! l2c_310_errata_is_applicable_754670() ); */
+
+ if( l2c_310_errata_is_applicable_765569( rtl_release ) )
+ {
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+
+ assert( !( ( l2cc->aux_ctrl & L2C_310_AUX_IPFE_MASK
+ || l2cc->aux_ctrl & L2C_310_AUX_DPFE_MASK )
+ && ( ( l2cc->prefetch_ctrl & L2C_310_PREFETCH_OFFSET_MASK )
+ == 23 ) ) );
+
+ /* Unhandled erratum present: 765569 Prefetcher can cross 4KB boundary if
+ * offset is programmed with value 23 */
+
+ /* Conditions
+ This problem occurs when the following conditions are met:
+ 1. One of the Prefetch Enable bits (bits [29:28] of the Auxiliary or
+ Prefetch Control Register) is set HIGH.
+ 2. The prefetch offset bits are programmed with value 23 (5'b10111).
+ Workaround
+ A workaround for this erratum is to program the prefetch offset with any
+ value except 23.*/
+ }
+
+ /* Unhandled erratum present: 769419 No automatic Store Buffer drain,
+ * visibility of written data requires an explicit Cache */
+ assert( ! l2c_310_errata_is_applicable_769419( rtl_release ) );
+}
+
+static inline void
+l2c_310_sync( volatile L2CC *l2cc )
+{
+#ifdef L2C_310_ERRATA_IS_APPLICABLE_753970
+ l2cc->dummy_cache_sync_reg = 0;
+#else
+ l2cc->cache_sync = 0;
+#endif
+}
+
+static inline void
+l2c_310_flush_1_line( volatile L2CC *l2cc, uint32_t d_addr )
+{
+#ifdef L2C_310_ERRATA_IS_APPLICABLE_588369
+ /*
+ * Errata 588369 says that clean + inv may keep the
+ * cache line if it was clean, the recommended
+ * workaround is to clean then invalidate the cache
+ * line, with write-back and cache linefill disabled.
+ */
+ l2cc->clean_pa = d_addr;
+ l2c_310_sync( l2cc );
+ l2cc->inv_pa = d_addr;
+#else
+ l2cc->clean_inv_pa = d_addr;
+#endif
+}
+
+static inline void
+l2c_310_flush_range( const void* d_addr, const size_t n_bytes )
+{
+ /* Back starting address up to start of a line and invalidate until ADDR_LAST */
+ uint32_t adx = (uint32_t)d_addr
+ & ~L2C_310_DATA_LINE_MASK;
+ const uint32_t ADDR_LAST =
+ (uint32_t)( (size_t)d_addr + n_bytes - 1 );
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+
+ if ( n_bytes == 0 ) {
+ return;
+ }
+
+ for (; adx <= ADDR_LAST; adx += CPU_DATA_CACHE_ALIGNMENT ) {
+ l2c_310_flush_1_line( l2cc, adx );
+ }
+
+ l2c_310_sync( l2cc );
+}
+
+static inline void
+l2c_310_flush_entire( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+ rtems_interrupt_lock_context lock_context;
+
+ /* Only flush if level 2 cache is active */
+ if( ( l2cc->ctrl & L2C_310_CTRL_ENABLE ) != 0 ) {
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
+ l2cc->clean_inv_way = L2C_310_WAY_MASK;
+
+ while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) {};
+
+ /* Wait for the flush to complete */
+ l2c_310_sync( l2cc );
+
+ rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
+ }
+}
+
+static inline void
+l2c_310_invalidate_1_line( const void *d_addr )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+
+
+ l2cc->inv_pa = (uint32_t) d_addr;
+ l2c_310_sync( l2cc );
+}
+
+static inline void
+l2c_310_invalidate_range( const void* d_addr, const size_t n_bytes )
+{
+ /* Back starting address up to start of a line and invalidate until ADDR_LAST */
+ uint32_t adx = (uint32_t)d_addr
+ & ~L2C_310_DATA_LINE_MASK;
+ const uint32_t ADDR_LAST =
+ (uint32_t)( (size_t)d_addr + n_bytes - 1 );
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+
+ if ( n_bytes == 0 ) {
+ return;
+ }
+
+ for (; adx <= ADDR_LAST; adx += CPU_DATA_CACHE_ALIGNMENT ) {
+ /* Invalidate L2 cache line */
+ l2cc->inv_pa = adx;
+ }
+
+ l2c_310_sync( l2cc );
+}
+
+
+static inline void
+l2c_310_invalidate_entire( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+
+ /* Invalidate the caches */
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ l2cc->inv_way = L2C_310_WAY_MASK;
+
+ while ( l2cc->inv_way & L2C_310_WAY_MASK ) ;
+
+ /* Wait for the invalidate to complete */
+ l2c_310_sync( l2cc );
+}
+
+static inline void
+l2c_310_clean_and_invalidate_entire( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+ rtems_interrupt_lock_context lock_context;
+
+ if( ( l2cc->ctrl & L2C_310_CTRL_ENABLE ) != 0 ) {
+ /* Invalidate the caches */
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
+ l2cc->clean_inv_way = L2C_310_WAY_MASK;
+
+ while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) ;
+
+ /* Wait for the invalidate to complete */
+ l2c_310_sync( l2cc );
+
+ rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
+ }
+}
+
+static inline void
+l2c_310_freeze( void )
+{
+ /* To be implemented as needed, if supported
+ by hardware at all */
+}
+
+static inline void
+l2c_310_unfreeze( void )
+{
+ /* To be implemented as needed, if supported
+ by hardware at all */
+}
+
+static inline size_t
+l2c_310_get_cache_size( void )
+{
+ size_t size = 0;
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+ uint32_t cache_type = l2cc->cache_type;
+ uint32_t way_size;
+ uint32_t num_ways;
+
+ way_size = (cache_type & L2C_310_TYPE_SIZE_D_WAYS_MASK)
+ >> L2C_310_TYPE_SIZE_D_WAYS_SHIFT;
+ num_ways = (cache_type & L2C_310_TYPE_NUM_D_WAYS_MASK)
+ >> L2C_310_TYPE_NUM_D_WAYS_SHIFT;
+
+ assert( way_size <= 0x07 );
+ assert( num_ways <= 0x01 );
+ if( way_size <= 0x07 && num_ways <= 0x01 ) {
+ if( way_size == 0x00 ) {
+ way_size = 16 * 1024;
+ } else if( way_size == 0x07 ) {
+ way_size = 512 * 1024;
+ } else {
+ way_size = (1 << (way_size - 1)) * 16 * 1024;
+ }
+ switch( num_ways ) {
+ case 0:
+ num_ways = 8;
+ break;
+ case 1:
+ num_ways = 16;
+ break;
+ default:
+ num_ways = 0;
+ break;
+ }
+ size = way_size * num_ways;
+ }
+ return size;
+}
+
+static void l2c_310_unlock( volatile L2CC *l2cc )
+{
+ l2cc->d_lockdown_0 = 0;
+ l2cc->i_lockdown_0 = 0;
+ l2cc->d_lockdown_1 = 0;
+ l2cc->i_lockdown_1 = 0;
+ l2cc->d_lockdown_2 = 0;
+ l2cc->i_lockdown_2 = 0;
+ l2cc->d_lockdown_3 = 0;
+ l2cc->i_lockdown_3 = 0;
+ l2cc->d_lockdown_4 = 0;
+ l2cc->i_lockdown_4 = 0;
+ l2cc->d_lockdown_5 = 0;
+ l2cc->i_lockdown_5 = 0;
+ l2cc->d_lockdown_6 = 0;
+ l2cc->i_lockdown_6 = 0;
+ l2cc->d_lockdown_7 = 0;
+ l2cc->i_lockdown_7 = 0;
+}
+
+static void l2c_310_wait_for_background_ops( volatile L2CC *l2cc )
+{
+ while ( l2cc->inv_way & L2C_310_WAY_MASK ) ;
+
+ while ( l2cc->clean_way & L2C_310_WAY_MASK ) ;
+
+ while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) ;
+}
+
+/* We support only the L2C-310 revisions r3p2 and r3p3 cache controller */
+
+#if (BSP_ARM_L2C_310_ID & L2C_310_ID_PART_MASK) \
+ != L2C_310_ID_PART_L310
+#error "invalid L2-310 cache controller part number"
+#endif
+
+#if (BSP_ARM_L2C_310_RTL_RELEASE != L2C_310_RTL_RELEASE_R3_P2) \
+ && (BSP_ARM_L2C_310_RTL_RELEASE != L2C_310_RTL_RELEASE_R3_P3)
+#error "invalid L2-310 cache controller RTL revision"
+#endif
+
+static inline void
+l2c_310_enable( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+ uint32_t cache_id = l2cc->cache_id;
+ uint32_t rtl_release = cache_id & L2C_310_ID_RTL_MASK;
+ uint32_t id_mask = L2C_310_ID_IMPL_MASK | L2C_310_ID_PART_MASK;
+ uint32_t ctrl;
+
+ /*
+ * Do we actually have an L2C-310 cache controller? Has BSP_ARM_L2C_310_BASE
+ * been configured correctly?
+ */
+ if (
+ (BSP_ARM_L2C_310_ID & id_mask) != (cache_id & id_mask)
+ || rtl_release < BSP_ARM_L2C_310_RTL_RELEASE
+ ) {
+ bsp_fatal( ARM_FATAL_L2C_310_UNEXPECTED_ID );
+ }
+
+ l2c_310_check_errata( rtl_release );
+
+ ctrl = l2cc->ctrl;
+
+ if ( ( ctrl & L2C_310_CTRL_EXCL_CONFIG ) != 0 ) {
+ bsp_fatal( ARM_FATAL_L2C_310_EXCLUSIVE_CONFIG );
+ }
+
+ /* Only enable if L2CC is currently disabled */
+ if( ( ctrl & L2C_310_CTRL_ENABLE ) == 0 ) {
+ uint32_t aux_ctrl;
+ int ways;
+
+ /* Make sure that I&D is not locked down when starting */
+ l2c_310_unlock( l2cc );
+
+ l2c_310_wait_for_background_ops( l2cc );
+
+ aux_ctrl = l2cc->aux_ctrl;
+
+ if ( (aux_ctrl & ( 1 << 16 )) != 0 ) {
+ ways = 16;
+ } else {
+ ways = 8;
+ }
+
+ if ( ways != L2C_310_NUM_WAYS ) {
+ bsp_fatal( ARM_FATAL_L2C_310_UNEXPECTED_NUM_WAYS );
+ }
+
+ /* Set up the way size */
+ aux_ctrl &= L2C_310_AUX_REG_ZERO_MASK; /* Set way_size to 0 */
+ aux_ctrl |= L2C_310_AUX_REG_DEFAULT_MASK;
+
+ l2cc->aux_ctrl = aux_ctrl;
+
+ /* Set up the latencies */
+ l2cc->tag_ram_ctrl = L2C_310_TAG_RAM_DEFAULT_LAT;
+ l2cc->data_ram_ctrl = L2C_310_DATA_RAM_DEFAULT_MASK;
+
+ l2c_310_invalidate_entire();
+
+ /* Clear the pending interrupts */
+ l2cc->int_clr = l2cc->int_raw_status;
+
+ /* Enable the L2CC */
+ l2cc->ctrl = ctrl | L2C_310_CTRL_ENABLE;
+ }
+}
+
+static inline void
+l2c_310_disable( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
+ rtems_interrupt_lock_context lock_context;
+
+ if ( l2cc->ctrl & L2C_310_CTRL_ENABLE ) {
+ /* Clean and Invalidate L2 Cache */
+ l2c_310_flush_entire();
+ rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
+
+ l2c_310_wait_for_background_ops( l2cc );
+
+ /* Disable the L2 cache */
+ l2cc->ctrl &= ~L2C_310_CTRL_ENABLE;
+ rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
+ }
+}
+
+static inline void
+_CPU_cache_enable_data( void )
+{
+ l2c_310_enable();
+}
+
+static inline void
+_CPU_cache_disable_data( void )
+{
+ arm_cache_l1_disable_data();
+ l2c_310_disable();
+}
+
+static inline void
+_CPU_cache_enable_instruction( void )
+{
+ l2c_310_enable();
+}
+
+static inline void
+_CPU_cache_disable_instruction( void )
+{
+ arm_cache_l1_disable_instruction();
+ l2c_310_disable();
+}
+
+static inline void
+_CPU_cache_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ arm_cache_l1_flush_data_range(
+ d_addr,
+ n_bytes
+ );
+ l2c_310_flush_range(
+ d_addr,
+ n_bytes
+ );
+}
+
+static inline void
+_CPU_cache_flush_entire_data( void )
+{
+ arm_cache_l1_flush_entire_data();
+ l2c_310_flush_entire();
+}
+
+static inline void
+_CPU_cache_invalidate_data_range(
+ const void *addr_first,
+ size_t n_bytes
+)
+{
+ l2c_310_invalidate_range(
+ addr_first,
+ n_bytes
+ );
+ arm_cache_l1_invalidate_data_range(
+ addr_first,
+ n_bytes
+ );
+}
+
+static inline void
+_CPU_cache_invalidate_entire_data( void )
+{
+ /* This is broadcast within the cluster */
+ arm_cache_l1_flush_entire_data();
+
+ /* forces the address out past level 2 */
+ l2c_310_clean_and_invalidate_entire();
+
+ /*This is broadcast within the cluster */
+ arm_cache_l1_clean_and_invalidate_entire_data();
+}
+
+static inline void
+_CPU_cache_freeze_data( void )
+{
+ arm_cache_l1_freeze_data();
+ l2c_310_freeze();
+}
+
+static inline void
+_CPU_cache_unfreeze_data( void )
+{
+ arm_cache_l1_unfreeze_data();
+ l2c_310_unfreeze();
+}
+
+static inline void
+_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
+{
+ arm_cache_l1_invalidate_instruction_range( i_addr, n_bytes );
+}
+
+static inline void
+_CPU_cache_invalidate_entire_instruction( void )
+{
+ arm_cache_l1_invalidate_entire_instruction();
+}
+
+static inline void
+_CPU_cache_freeze_instruction( void )
+{
+ arm_cache_l1_freeze_instruction();
+ l2c_310_freeze();
+}
+
+static inline void
+_CPU_cache_unfreeze_instruction( void )
+{
+ arm_cache_l1_unfreeze_instruction();
+ l2c_310_unfreeze();
+}
+
+static inline size_t
+_CPU_cache_get_data_cache_size( const uint32_t level )
+{
+ size_t size = 0;
+
+ switch( level )
+ {
+ case 1:
+ size = arm_cache_l1_get_data_cache_size();
+ break;
+ case 0:
+ case 2:
+ size = l2c_310_get_cache_size();
+ break;
+ default:
+ size = 0;
+ break;
+ }
+ return size;
+}
+
+static inline size_t
+_CPU_cache_get_instruction_cache_size( const uint32_t level )
+{
+ size_t size = 0;
+
+ switch( level )
+ {
+ case 1:
+ size = arm_cache_l1_get_instruction_cache_size();
+ break;
+ case 0:
+ case 2:
+ size = l2c_310_get_cache_size();
+ break;
+ default:
+ size = 0;
+ break;
+ }
+ return size;
+}
+
+#include "../../shared/cache/cacheimpl.h"
diff --git a/bsps/arm/shared/cache/cache-v7m.c b/bsps/arm/shared/cache/cache-v7m.c
new file mode 100644
index 0000000000..a73112f12a
--- /dev/null
+++ b/bsps/arm/shared/cache/cache-v7m.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <chip.h>
+
+#define CPU_DATA_CACHE_ALIGNMENT 32
+
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
+
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+static inline void _CPU_cache_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ SCB_CleanInvalidateDCache_by_Addr(
+ RTEMS_DECONST(uint32_t *, (const uint32_t *) d_addr),
+ n_bytes
+ );
+}
+
+static inline void _CPU_cache_invalidate_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ SCB_InvalidateDCache_by_Addr(
+ RTEMS_DECONST(uint32_t *, (const uint32_t *) d_addr),
+ n_bytes
+ );
+}
+
+static inline void _CPU_cache_freeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_invalidate_instruction_range(
+ const void *i_addr,
+ size_t n_bytes
+)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_InvalidateICache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_flush_entire_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_CleanDCache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_InvalidateDCache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_enable_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_EnableDCache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_DisableDCache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_InvalidateICache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_EnableICache();
+ rtems_interrupt_enable(level);
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ SCB_DisableICache();
+ rtems_interrupt_enable(level);
+}
+
+#include "../../shared/cache/cacheimpl.h"
diff --git a/bsps/bfin/shared/cache/cache.c b/bsps/bfin/shared/cache/cache.c
new file mode 100644
index 0000000000..ea5061bc9c
--- /dev/null
+++ b/bsps/bfin/shared/cache/cache.c
@@ -0,0 +1,134 @@
+/* Blackfin Cache Support
+ *
+ * Copyright (c) 2008 Kallisti Labs, Los Gatos, CA, USA
+ * written by Allan Hessenflow <allanh@kallisti.com>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+
+#include <rtems.h>
+#include <bsp.h>
+#include <libcpu/memoryRegs.h>
+
+#define CPU_DATA_CACHE_ALIGNMENT 32
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
+
+#ifdef BSP_DATA_CACHE_CONFIG
+#define LIBCPU_DATA_CACHE_CONFIG BSP_DATA_CACHE_CONFIG
+#else
+/* use 16K of each SRAM bank */
+#define LIBCPU_DATA_CACHE_CONFIG (3 << DMEM_CONTROL_DMC_SHIFT)
+#endif
+
+/* There are many syncs in the following code because they should be
+ harmless except for wasting time, and this is easier than figuring out
+ exactly where they're needed to protect from the effects of write
+ buffers and queued reads. Many of them are likely unnecessary. */
+
+
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {
+
+ __asm__ __volatile__ ("ssync; flush [%0]; ssync" :: "a" (d_addr));
+}
+
+/* Blackfins can't just invalidate cache; they can only do flush +
+ invalidate. If the line isn't dirty then this is equivalent to
+ just an invalidate. Even if it is dirty, this should still be
+ okay since with a pure invalidate method the caller would have no
+ way to insure the dirty line hadn't been written out anyway prior
+ to the invalidate. */
+static void _CPU_cache_invalidate_1_data_line(const void *d_addr) {
+
+ __asm__ __volatile__ ("ssync; flushinv [%0]; ssync" :: "a" (d_addr));
+}
+
+static void _CPU_cache_freeze_data(void) {
+}
+
+static void _CPU_cache_unfreeze_data(void) {
+}
+
+static void _CPU_cache_invalidate_1_instruction_line(const void *d_addr) {
+
+ __asm__ __volatile__ ("ssync; iflush [%0]; ssync" :: "a" (d_addr));
+}
+
+static void _CPU_cache_freeze_instruction(void) {
+}
+
+static void _CPU_cache_unfreeze_instruction(void) {
+}
+
+/* incredibly inefficient... It would be better to make use of the
+ DTEST_COMMAND/DTEST_DATAx registers to find the addresses in each
+ cache line and flush just those. However the documentation I've
+ seen on those is a bit sketchy, and I sure wouldn't want to get it
+ wrong. */
+static void _CPU_cache_flush_entire_data(void) {
+ uint32_t i;
+
+ i = 0;
+ __asm__ __volatile__ ("ssync");
+ do {
+ __asm__ __volatile__ ("flush [%0]" :: "a" (i));
+ i += CPU_DATA_CACHE_ALIGNMENT;
+ } while (i);
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_invalidate_entire_data(void) {
+ uint32_t dmemControl;
+
+ __asm__ __volatile__ ("ssync");
+ dmemControl = *(uint32_t volatile *) DMEM_CONTROL;
+ *(uint32_t volatile *) DMEM_CONTROL = dmemControl & ~DMEM_CONTROL_DMC_MASK;
+ *(uint32_t volatile *) DMEM_CONTROL = dmemControl;
+ __asm__ __volatile__ ("ssync");
+}
+
+/* this does not actually enable data cache unless CPLBs are also enabled.
+ LIBCPU_DATA_CACHE_CONFIG contains the DMEM_CONTROL_DMC bits to set. */
+static void _CPU_cache_enable_data(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) DMEM_CONTROL |= LIBCPU_DATA_CACHE_CONFIG;
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_disable_data(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) DMEM_CONTROL &= ~DMEM_CONTROL_DMC_MASK;
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void) {
+ uint32_t imemControl;
+
+ __asm__ __volatile__ ("ssync");
+ imemControl = *(uint32_t volatile *) IMEM_CONTROL;
+ *(uint32_t volatile *) IMEM_CONTROL = imemControl & ~IMEM_CONTROL_IMC;
+ *(uint32_t volatile *) IMEM_CONTROL = imemControl;
+ __asm__ __volatile__ ("ssync");
+}
+
+/* this only actually enables the instruction cache if the CPLBs are also
+ enabled. */
+static void _CPU_cache_enable_instruction(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) IMEM_CONTROL |= IMEM_CONTROL_IMC;
+ __asm__ __volatile__ ("ssync");
+}
+
+static void _CPU_cache_disable_instruction(void) {
+
+ __asm__ __volatile__ ("ssync");
+ *(uint32_t volatile *) IMEM_CONTROL &= ~IMEM_CONTROL_IMC;
+ __asm__ __volatile__ ("ssync");
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/i386/shared/cache/cache.c b/bsps/i386/shared/cache/cache.c
new file mode 100644
index 0000000000..df7909489d
--- /dev/null
+++ b/bsps/i386/shared/cache/cache.c
@@ -0,0 +1,93 @@
+/*
+ * Cache Management Support Routines for the i386
+ */
+
+#include <rtems.h>
+#include <rtems/score/cpu.h>
+#include <libcpu/page.h>
+
+#define I386_CACHE_ALIGNMENT 16
+#define CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
+#define CPU_INSTRUCTION_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
+
+void _CPU_disable_cache(void)
+{
+ unsigned int regCr0;
+
+ regCr0 = i386_get_cr0();
+ regCr0 |= CR0_PAGE_LEVEL_CACHE_DISABLE;
+ regCr0 |= CR0_NO_WRITE_THROUGH;
+ i386_set_cr0( regCr0 );
+ rtems_cache_flush_entire_data();
+}
+
+/*
+ * Enable the entire cache
+ */
+
+void _CPU_enable_cache(void)
+{
+ unsigned int regCr0;
+
+ regCr0 = i386_get_cr0();
+ regCr0 &= ~(CR0_PAGE_LEVEL_CACHE_DISABLE);
+ regCr0 &= ~(CR0_NO_WRITE_THROUGH);
+ i386_set_cr0( regCr0 );
+ /*rtems_cache_flush_entire_data();*/
+}
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: The routines below should be implemented per CPU,
+ * to accomodate the capabilities of each.
+ */
+
+#if defined(I386_CACHE_ALIGNMENT)
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {}
+static void _CPU_cache_invalidate_1_data_line(const void *d_addr) {}
+static void _CPU_cache_freeze_data(void) {}
+static void _CPU_cache_unfreeze_data(void) {}
+static void _CPU_cache_invalidate_1_instruction_line ( const void *d_addr ) {}
+static void _CPU_cache_freeze_instruction(void) {}
+static void _CPU_cache_unfreeze_instruction(void) {}
+
+static void _CPU_cache_flush_entire_data(void)
+{
+ __asm__ volatile ("wbinvd");
+}
+static void _CPU_cache_invalidate_entire_data(void)
+{
+ __asm__ volatile ("invd");
+}
+
+static void _CPU_cache_enable_data(void)
+{
+ _CPU_enable_cache();
+}
+
+static void _CPU_cache_disable_data(void)
+{
+ _CPU_disable_cache();
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void)
+{
+ __asm__ volatile ("invd");
+}
+
+static void _CPU_cache_enable_instruction(void)
+{
+ _CPU_enable_cache();
+}
+
+static void _CPU_cache_disable_instruction( void )
+{
+ _CPU_disable_cache();
+}
+#endif
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/genmcf548x/start/cache.c b/bsps/m68k/genmcf548x/start/cache.c
new file mode 100644
index 0000000000..18aa929b3d
--- /dev/null
+++ b/bsps/m68k/genmcf548x/start/cache.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2007-2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <bsp.h>
+
+#define CPU_DATA_CACHE_ALIGNMENT 16
+
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 16
+
+/*
+ * There is no complete cache lock (only 2 ways of 4 can be locked)
+ */
+static inline void _CPU_cache_freeze_data(void)
+{
+ /* Do nothing */
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ /* Do nothing */
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ /* Do nothing */
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ /* Do nothing */
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ bsp_cacr_clear_flags( MCF548X_CACR_IDCM);
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ bsp_cacr_set_flags( MCF548X_CACR_IDCM);
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ bsp_cacr_set_self_clear_flags( MCF548X_CACR_ICINVA);
+}
+
+static inline void _CPU_cache_invalidate_1_instruction_line(const void *addr)
+{
+ uint32_t a = (uint32_t) addr & ~0x3;
+
+ __asm__ volatile ("cpushl %%ic,(%0)" :: "a" (a | 0x0));
+ __asm__ volatile ("cpushl %%ic,(%0)" :: "a" (a | 0x1));
+ __asm__ volatile ("cpushl %%ic,(%0)" :: "a" (a | 0x2));
+ __asm__ volatile ("cpushl %%ic,(%0)" :: "a" (a | 0x3));
+}
+
+static inline void _CPU_cache_enable_data(void)
+{
+ bsp_cacr_clear_flags( MCF548X_CACR_DDCM( DCACHE_OFF_IMPRECISE));
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ bsp_cacr_set_flags( MCF548X_CACR_DDCM( DCACHE_OFF_IMPRECISE));
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ bsp_cacr_set_self_clear_flags( MCF548X_CACR_DCINVA);
+}
+
+static inline void _CPU_cache_invalidate_1_data_line( const void *addr)
+{
+ uint32_t a = (uint32_t) addr & ~0x3;
+
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x0));
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x1));
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x2));
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x3));
+}
+
+static inline void _CPU_cache_flush_1_data_line( const void *addr)
+{
+ uint32_t a = (uint32_t) addr & ~0x3;
+
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x0));
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x1));
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x2));
+ __asm__ volatile ("cpushl %%dc,(%0)" :: "a" (a | 0x3));
+}
+
+static inline void _CPU_cache_flush_entire_data( void)
+{
+ uint32_t line = 0;
+
+ for (line = 0; line < 512; ++line) {
+ _CPU_cache_flush_1_data_line( (const void *) (line * 16));
+ }
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache-mcf5223x.c b/bsps/m68k/shared/cache/cache-mcf5223x.c
new file mode 100644
index 0000000000..60b2f7f335
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache-mcf5223x.c
@@ -0,0 +1,38 @@
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <mcf5223x/mcf5223x.h>
+#include "cache.h"
+
+/*
+ * Cannot be frozen
+ */
+static void _CPU_cache_freeze_data(void) {}
+static void _CPU_cache_unfreeze_data(void) {}
+static void _CPU_cache_freeze_instruction(void) {}
+static void _CPU_cache_unfreeze_instruction(void) {}
+
+/*
+ * Write-through data cache -- flushes are unnecessary
+ */
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {}
+static void _CPU_cache_flush_entire_data(void) {}
+
+static void _CPU_cache_enable_instruction(void) {}
+static void _CPU_cache_disable_instruction(void) {}
+static void _CPU_cache_invalidate_entire_instruction(void) {}
+static void _CPU_cache_invalidate_1_instruction_line(const void *addr) {}
+
+static void _CPU_cache_enable_data(void) {}
+static void _CPU_cache_disable_data(void) {}
+static void _CPU_cache_invalidate_entire_data(void) {}
+static void _CPU_cache_invalidate_1_data_line(const void *addr) {}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache-mcf5225x.c b/bsps/m68k/shared/cache/cache-mcf5225x.c
new file mode 100644
index 0000000000..10c867ba7f
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache-mcf5225x.c
@@ -0,0 +1,34 @@
+/**
+ * @file
+ *
+ * Cache Management Support Routines for the MCF5225x
+ */
+
+#include <rtems.h>
+#include "cache.h"
+
+/*
+ * Cannot be frozen
+ */
+static void _CPU_cache_freeze_data(void) {}
+static void _CPU_cache_unfreeze_data(void) {}
+static void _CPU_cache_freeze_instruction(void) {}
+static void _CPU_cache_unfreeze_instruction(void) {}
+
+/*
+ * Write-through data cache -- flushes are unnecessary
+ */
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {}
+static void _CPU_cache_flush_entire_data(void) {}
+
+static void _CPU_cache_enable_instruction(void) {}
+static void _CPU_cache_disable_instruction(void) {}
+static void _CPU_cache_invalidate_entire_instruction(void) {}
+static void _CPU_cache_invalidate_1_instruction_line(const void *addr) {}
+
+static void _CPU_cache_enable_data(void) {}
+static void _CPU_cache_disable_data(void) {}
+static void _CPU_cache_invalidate_entire_data(void) {}
+static void _CPU_cache_invalidate_1_data_line(const void *addr) {}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache-mcf5235.c b/bsps/m68k/shared/cache/cache-mcf5235.c
new file mode 100644
index 0000000000..35390b02ef
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache-mcf5235.c
@@ -0,0 +1,101 @@
+/*
+ * COPYRIGHT (c) 1989-2008.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems.h>
+#include <mcf5235/mcf5235.h>
+#include "cache.h"
+
+/*
+ * Default value for the cacr is set by the BSP
+ */
+extern uint32_t cacr_mode;
+
+/*
+ * Cannot be frozen
+ */
+static void _CPU_cache_freeze_data(void) {}
+static void _CPU_cache_unfreeze_data(void) {}
+static void _CPU_cache_freeze_instruction(void) {}
+static void _CPU_cache_unfreeze_instruction(void) {}
+
+/*
+ * Write-through data cache -- flushes are unnecessary
+ */
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {}
+static void _CPU_cache_flush_entire_data(void) {}
+
+static void _CPU_cache_enable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode &= ~MCF5XXX_CACR_DIDI;
+ m68k_set_cacr(cacr_mode);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_disable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode |= MCF5XXX_CACR_DIDI;
+ m68k_set_cacr(cacr_mode);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void)
+{
+ m68k_set_cacr(cacr_mode | MCF5XXX_CACR_CINV | MCF5XXX_CACR_INVI);
+}
+
+static void _CPU_cache_invalidate_1_instruction_line(const void *addr)
+{
+ /*
+ * Top half of cache is I-space
+ */
+ addr = (void *)((int)addr | 0x400);
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (addr));
+}
+
+static void _CPU_cache_enable_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode &= ~MCF5XXX_CACR_DISD;
+ m68k_set_cacr(cacr_mode);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_disable_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode |= MCF5XXX_CACR_DISD;
+ m68k_set_cacr(cacr_mode);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_invalidate_entire_data(void)
+{
+ m68k_set_cacr(cacr_mode | MCF5XXX_CACR_CINV | MCF5XXX_CACR_INVD);
+}
+
+static void _CPU_cache_invalidate_1_data_line(const void *addr)
+{
+ /*
+ * Bottom half of cache is D-space
+ */
+ addr = (void *)((int)addr & ~0x400);
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (addr));
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache-mcf5282.c b/bsps/m68k/shared/cache/cache-mcf5282.c
new file mode 100644
index 0000000000..259572432a
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache-mcf5282.c
@@ -0,0 +1,124 @@
+/**
+ * @file
+ *
+ * Cache Management Support Routines for the MCF5282
+ */
+
+#include <rtems.h>
+#include <mcf5282/mcf5282.h> /* internal MCF5282 modules */
+#include "cache.h"
+
+/*
+ * CPU-space access
+ */
+#define m68k_set_acr0(_acr0) \
+ __asm__ volatile ("movec %0,%%acr0" : : "d" (_acr0))
+#define m68k_set_acr1(_acr1) \
+ __asm__ volatile ("movec %0,%%acr1" : : "d" (_acr1))
+
+#define NOP __asm__ volatile ("nop");
+
+/*
+ * DEFAULT WHEN mcf5xxx_initialize_cacr not called
+ * Read/write copy of common cache
+ * Split I/D cache
+ * Allow CPUSHL to invalidate a cache line
+ * Enable buffered writes
+ * No burst transfers on non-cacheable accesses
+ * Default cache mode is *disabled* (cache only ACRx areas)
+ */
+static uint32_t cacr_mode = MCF5XXX_CACR_CENB |
+ MCF5XXX_CACR_DBWE |
+ MCF5XXX_CACR_DCM;
+
+void mcf5xxx_initialize_cacr(uint32_t cacr)
+{
+ cacr_mode = cacr;
+ m68k_set_cacr( cacr_mode );
+}
+
+/*
+ * Cannot be frozen
+ */
+static void _CPU_cache_freeze_data(void) {}
+static void _CPU_cache_unfreeze_data(void) {}
+static void _CPU_cache_freeze_instruction(void) {}
+static void _CPU_cache_unfreeze_instruction(void) {}
+
+/*
+ * Write-through data cache -- flushes are unnecessary
+ */
+static void _CPU_cache_flush_1_data_line(const void *d_addr) {}
+static void _CPU_cache_flush_entire_data(void) {}
+
+static void _CPU_cache_enable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode &= ~MCF5XXX_CACR_DIDI;
+ m68k_set_cacr(cacr_mode | MCF5XXX_CACR_CINV | MCF5XXX_CACR_INVI );
+ NOP;
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_disable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode |= MCF5XXX_CACR_DIDI;
+ m68k_set_cacr(cacr_mode);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void)
+{
+ m68k_set_cacr(cacr_mode | MCF5XXX_CACR_CINV | MCF5XXX_CACR_INVI);
+ NOP;
+}
+
+static void _CPU_cache_invalidate_1_instruction_line(const void *addr)
+{
+ /*
+ * Top half of cache is I-space
+ */
+ addr = (void *)((int)addr | 0x400);
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (addr));
+}
+
+static void _CPU_cache_enable_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode &= ~MCF5XXX_CACR_DISD;
+ m68k_set_cacr(cacr_mode | MCF5XXX_CACR_CINV | MCF5XXX_CACR_INVD);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_disable_data(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ cacr_mode |= MCF5XXX_CACR_DISD;
+ m68k_set_cacr(cacr_mode);
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_invalidate_entire_data(void)
+{
+ m68k_set_cacr(cacr_mode | MCF5XXX_CACR_CINV | MCF5XXX_CACR_INVD);
+}
+
+static void _CPU_cache_invalidate_1_data_line(const void *addr)
+{
+ /*
+ * Bottom half of cache is D-space
+ */
+ addr = (void *)((int)addr & ~0x400);
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (addr));
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache-mcf532x.c b/bsps/m68k/shared/cache/cache-mcf532x.c
new file mode 100644
index 0000000000..0203afee77
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache-mcf532x.c
@@ -0,0 +1,143 @@
+/**
+ * @file
+ *
+ * Cache Management Support Routines for the MCF532x
+ */
+
+#include <rtems.h>
+#include <mcf532x/mcf532x.h>
+#include "cache.h"
+
+#define m68k_set_cacr(_cacr) \
+ __asm__ volatile ("movec %0,%%cacr" : : "d" (_cacr))
+
+/*
+ * Read/write copy of common cache
+ * Default cache mode is *disabled* (cache only ACRx areas)
+ * Allow CPUSHL to invalidate a cache line
+ * Enable store buffer
+ */
+static uint32_t cacr_mode = MCF_CACR_ESB |
+ MCF_CACR_DCM(3);
+
+/*
+ * Cannot be frozen
+ */
+static void _CPU_cache_freeze_data(void)
+{
+}
+
+static void _CPU_cache_unfreeze_data(void)
+{
+}
+
+static void _CPU_cache_freeze_instruction(void)
+{
+}
+
+static void _CPU_cache_unfreeze_instruction(void)
+{
+}
+
+static void _CPU_cache_flush_1_data_line(const void *d_addr)
+{
+ register unsigned long adr = (((unsigned long) d_addr >> 4) & 0xff) << 4;
+
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+}
+
+static void _CPU_cache_flush_entire_data(void)
+{
+ register unsigned long set, adr;
+
+ for(set = 0; set < 256; ++set) {
+ adr = (set << 4);
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ }
+}
+
+static void _CPU_cache_enable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ if(!(cacr_mode & MCF_CACR_CENB))
+ {
+ cacr_mode |= MCF_CACR_CENB;
+ m68k_set_cacr(cacr_mode);
+ }
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_disable_instruction(void)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ if((cacr_mode & MCF_CACR_CENB))
+ {
+ cacr_mode &= ~MCF_CACR_CENB;
+ m68k_set_cacr(cacr_mode);
+ }
+ rtems_interrupt_enable(level);
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void)
+{
+ m68k_set_cacr(cacr_mode | MCF_CACR_CINVA);
+}
+
+static void _CPU_cache_invalidate_1_instruction_line(const void *addr)
+{
+ register unsigned long adr = (((unsigned long) addr >> 4) & 0xff) << 4;
+
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+ adr += 1;
+ __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
+}
+
+static void _CPU_cache_enable_data(void)
+{
+ /*
+ * The 532x has a unified data and instruction cache, so we call through
+ * to enable instruction.
+ */
+ _CPU_cache_enable_instruction();
+}
+
+static void _CPU_cache_disable_data(void)
+{
+ /*
+ * The 532x has a unified data and instruction cache, so we call through
+ * to disable instruction.
+ */
+ _CPU_cache_disable_instruction();
+}
+
+static void _CPU_cache_invalidate_entire_data(void)
+{
+ _CPU_cache_invalidate_entire_instruction();
+}
+
+static void _CPU_cache_invalidate_1_data_line(const void *addr)
+{
+ _CPU_cache_invalidate_1_instruction_line(addr);
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache.c b/bsps/m68k/shared/cache/cache.c
new file mode 100644
index 0000000000..3b5e87ef56
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache.c
@@ -0,0 +1,3 @@
+#include <rtems.h>
+#include "cache.h"
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/m68k/shared/cache/cache.h b/bsps/m68k/shared/cache/cache.h
new file mode 100644
index 0000000000..2fa78b651d
--- /dev/null
+++ b/bsps/m68k/shared/cache/cache.h
@@ -0,0 +1,215 @@
+/*
+ * M68K Cache Manager Support
+ */
+
+#if (defined(__mc68020__) && !defined(__mcpu32__))
+# define M68K_INSTRUCTION_CACHE_ALIGNMENT 16
+#elif defined(__mc68030__)
+# define M68K_INSTRUCTION_CACHE_ALIGNMENT 16
+# define M68K_DATA_CACHE_ALIGNMENT 16
+#elif ( defined(__mc68040__) || defined (__mc68060__) )
+# define M68K_INSTRUCTION_CACHE_ALIGNMENT 16
+# define M68K_DATA_CACHE_ALIGNMENT 16
+#elif ( defined(__mcf5200__) )
+# define M68K_INSTRUCTION_CACHE_ALIGNMENT 16
+# if ( defined(__mcf528x__) )
+# define M68K_DATA_CACHE_ALIGNMENT 16
+# endif
+#elif ( defined(__mcf5300__) )
+# define M68K_INSTRUCTION_CACHE_ALIGNMENT 16
+# define M68K_DATA_CACHE_ALIGNMENT 16
+#elif defined(__mcfv4e__)
+# define M68K_INSTRUCTION_CACHE_ALIGNMENT 16
+# define M68K_DATA_CACHE_ALIGNMENT 16
+#endif
+
+#if defined(M68K_DATA_CACHE_ALIGNMENT)
+#define CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
+#endif
+
+#if defined(M68K_INSTRUCTION_CACHE_ALIGNMENT)
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT M68K_INSTRUCTION_CACHE_ALIGNMENT
+#endif
+
+/*
+ * Since the cacr is common to all mc680x0, provide macros
+ * for masking values in that register.
+ */
+
+/*
+ * Used to clear bits in the cacr.
+ */
+#define _CPU_CACR_AND(mask) \
+ { \
+ register unsigned long _value = mask; \
+ register unsigned long _ctl = 0; \
+ __asm__ volatile ( "movec %%cacr, %0; /* read the cacr */ \
+ andl %2, %0; /* and with _val */ \
+ movec %1, %%cacr" /* write the cacr */ \
+ : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
+ }
+
+
+/*
+ * Used to set bits in the cacr.
+ */
+#define _CPU_CACR_OR(mask) \
+ { \
+ register unsigned long _value = mask; \
+ register unsigned long _ctl = 0; \
+ __asm__ volatile ( "movec %%cacr, %0; /* read the cacr */ \
+ orl %2, %0; /* or with _val */ \
+ movec %1, %%cacr" /* write the cacr */ \
+ : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" ); \
+ }
+
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ */
+#if ( (defined(__mc68020__) && !defined(__mcpu32__)) || defined(__mc68030__) )
+
+#if defined(__mc68030__)
+
+/* Only the mc68030 has a data cache; it is writethrough only. */
+
+void _CPU_cache_flush_1_data_line ( const void * d_addr ) {}
+void _CPU_cache_flush_entire_data ( void ) {}
+
+void _CPU_cache_invalidate_1_data_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ __asm__ volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
+ _CPU_CACR_OR(0x00000400);
+}
+
+void _CPU_cache_invalidate_entire_data ( void )
+{
+ _CPU_CACR_OR( 0x00000800 );
+}
+
+void _CPU_cache_freeze_data ( void )
+{
+ _CPU_CACR_OR( 0x00000200 );
+}
+
+void _CPU_cache_unfreeze_data ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFDFF );
+}
+
+void _CPU_cache_enable_data ( void )
+{
+ _CPU_CACR_OR( 0x00000100 );
+}
+void _CPU_cache_disable_data ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFEFF );
+}
+#endif
+
+
+/* Both the 68020 and 68030 have instruction caches */
+
+void _CPU_cache_invalidate_1_instruction_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ __asm__ volatile ( "movec %0, %%caar" :: "a" (p_address) ); /* write caar */
+ _CPU_CACR_OR( 0x00000004 );
+}
+
+void _CPU_cache_invalidate_entire_instruction ( void )
+{
+ _CPU_CACR_OR( 0x00000008 );
+}
+
+void _CPU_cache_freeze_instruction ( void )
+{
+ _CPU_CACR_OR( 0x00000002);
+}
+
+void _CPU_cache_unfreeze_instruction ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFFFD );
+}
+
+void _CPU_cache_enable_instruction ( void )
+{
+ _CPU_CACR_OR( 0x00000001 );
+}
+
+void _CPU_cache_disable_instruction ( void )
+{
+ _CPU_CACR_AND( 0xFFFFFFFE );
+}
+
+
+#elif ( defined(__mc68040__) || defined (__mc68060__) )
+
+/* Cannot be frozen */
+void _CPU_cache_freeze_data ( void ) {}
+void _CPU_cache_unfreeze_data ( void ) {}
+void _CPU_cache_freeze_instruction ( void ) {}
+void _CPU_cache_unfreeze_instruction ( void ) {}
+
+void _CPU_cache_flush_1_data_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ __asm__ volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
+}
+
+void _CPU_cache_invalidate_1_data_line (
+ const void * d_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
+ __asm__ volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
+}
+
+void _CPU_cache_flush_entire_data ( void )
+{
+ asm volatile ( "cpusha %%dc" :: );
+}
+
+void _CPU_cache_invalidate_entire_data ( void )
+{
+ asm volatile ( "cinva %%dc" :: );
+}
+
+void _CPU_cache_enable_data ( void )
+{
+ _CPU_CACR_OR( 0x80000000 );
+}
+
+void _CPU_cache_disable_data ( void )
+{
+ _CPU_CACR_AND( 0x7FFFFFFF );
+}
+
+void _CPU_cache_invalidate_1_instruction_line (
+ const void * i_addr )
+{
+ void * p_address = (void *) _CPU_virtual_to_physical( i_addr );
+ __asm__ volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
+}
+
+void _CPU_cache_invalidate_entire_instruction ( void )
+{
+ asm volatile ( "cinva %%ic" :: );
+}
+
+void _CPU_cache_enable_instruction ( void )
+{
+ _CPU_CACR_OR( 0x00008000 );
+}
+
+void _CPU_cache_disable_instruction ( void )
+{
+ _CPU_CACR_AND( 0xFFFF7FFF );
+}
+#endif
diff --git a/bsps/or1k/headers.am b/bsps/or1k/headers.am
index 7729a76231..670c0c40ab 100644
--- a/bsps/or1k/headers.am
+++ b/bsps/or1k/headers.am
@@ -2,5 +2,4 @@
include_bspdir = $(includedir)/bsp
include_bsp_HEADERS =
-include_bsp_HEADERS += ../../../../../bsps/or1k/include/bsp/cache_.h
include_bsp_HEADERS += ../../../../../bsps/or1k/include/bsp/linker-symbols.h
diff --git a/bsps/or1k/include/bsp/cache_.h b/bsps/or1k/include/bsp/cache_.h
deleted file mode 100644
index ed2053858e..0000000000
--- a/bsps/or1k/include/bsp/cache_.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#ifndef LIBBSP_OR1K_SHARED_CACHE_H
-#define LIBBSP_OR1K_SHARED_CACHE_H
-
-#include <assert.h>
-#include <bsp.h>
-#include <rtems/rtems/intr.h>
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* These two defines also ensure that the rtems_cache_* functions have bodies */
-#define CPU_DATA_CACHE_ALIGNMENT 32
-#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
-
-#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS 1
-
-static inline size_t
-_CPU_cache_get_data_cache_size( const uint32_t level )
-{
- return (level == 0 || level == 1)? 8192 : 0;
-}
-
-static inline size_t
-_CPU_cache_get_instruction_cache_size( const uint32_t level )
-{
- return (level == 0 || level == 1)? 8192 : 0;
-}
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* LIBBSP_OR1K_SHARED_CACHE_H */
diff --git a/bsps/or1k/shared/cache/cache.c b/bsps/or1k/shared/cache/cache.c
new file mode 100644
index 0000000000..55fa54e62f
--- /dev/null
+++ b/bsps/or1k/shared/cache/cache.c
@@ -0,0 +1,388 @@
+/*
+ * COPYRIGHT (c) 2014, 2016 ÅAC Microtec AB <www.aacmicrotec.com>
+ * Contributor(s):
+ * Karol Gugala <kgugala@antmicro.com>
+ * Martin Werner <martin.werner@aacmicrotec.com>
+ *
+ * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
+ *
+ * COPYRIGHT (c) 1989-2006
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems/score/cpu.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/or1k-utility.h>
+#include <rtems/score/percpu.h>
+
+#define CPU_DATA_CACHE_ALIGNMENT 32
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
+
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS 1
+#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS 1
+
+static inline size_t
+_CPU_cache_get_data_cache_size( const uint32_t level )
+{
+ return (level == 0 || level == 1)? 8192 : 0;
+}
+
+static inline size_t
+_CPU_cache_get_instruction_cache_size( const uint32_t level )
+{
+ return (level == 0 || level == 1)? 8192 : 0;
+}
+
+static inline void _CPU_OR1K_Cache_data_block_prefetch(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBPR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+static inline void _CPU_OR1K_Cache_data_block_writeback(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBWR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+static inline void _CPU_OR1K_Cache_data_block_lock(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBLR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+static inline void _CPU_OR1K_Cache_instruction_block_prefetch
+(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_ICBPR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+static inline void _CPU_OR1K_Cache_instruction_block_lock
+(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_ICBLR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+/* Implement RTEMS cache manager functions */
+
+static void _CPU_cache_flush_1_data_line(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBFR, (uintptr_t) d_addr);
+
+ //__asm__ volatile("l.csync");
+
+ _ISR_Local_enable(level);
+}
+
+static void _CPU_cache_invalidate_1_data_line(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBIR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+static void _CPU_cache_freeze_data(void)
+{
+ /* Do nothing */
+}
+
+static void _CPU_cache_unfreeze_data(void)
+{
+ /* Do nothing */
+}
+
+static void _CPU_cache_invalidate_1_instruction_line(const void *d_addr)
+{
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ _OR1K_mtspr(CPU_OR1K_SPR_ICBIR, (uintptr_t) d_addr);
+
+ _ISR_Local_enable(level);
+}
+
+static void _CPU_cache_freeze_instruction(void)
+{
+ /* Do nothing */
+}
+
+static void _CPU_cache_unfreeze_instruction(void)
+{
+ /* Do nothing */
+}
+
+static void _CPU_cache_flush_entire_data(void)
+{
+ size_t addr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ /* We have only 0 level cache so we do not need to invalidate others */
+ for (
+ addr = _CPU_cache_get_data_cache_size(0);
+ addr > 0;
+ addr -= CPU_DATA_CACHE_ALIGNMENT
+ ) {
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBFR, (uintptr_t) addr);
+ }
+
+ _ISR_Local_enable (level);
+}
+
+static void _CPU_cache_invalidate_entire_data(void)
+{
+ size_t addr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ /* We have only 0 level cache so we do not need to invalidate others */
+ for (
+ addr = _CPU_cache_get_data_cache_size(0);
+ addr > 0;
+ addr -= CPU_DATA_CACHE_ALIGNMENT
+ ) {
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBIR, (uintptr_t) addr);
+ }
+
+ _ISR_Local_enable (level);
+}
+
+static void _CPU_cache_invalidate_entire_instruction(void)
+{
+ size_t addr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ /* We have only 0 level cache so we do not need to invalidate others */
+ for (
+ addr = _CPU_cache_get_instruction_cache_size(0);
+ addr > 0;
+ addr -= CPU_INSTRUCTION_CACHE_ALIGNMENT
+ ) {
+ _OR1K_mtspr(CPU_OR1K_SPR_ICBIR, (uintptr_t) addr);
+ }
+
+ /* Flush instructions out of instruction buffer */
+ __asm__ volatile("l.nop");
+ __asm__ volatile("l.nop");
+ __asm__ volatile("l.nop");
+ __asm__ volatile("l.nop");
+ __asm__ volatile("l.nop");
+
+ _ISR_Local_enable (level);
+}
+
+/*
+ * The range functions are copied almost verbatim from the generic
+ * implementations in c/src/lib/libcpu/shared/src/cache_manager.c. The main
+ * modification here is avoiding reapeated off/on toggling of the ISR for each
+ * cache line operation.
+ */
+
+static void _CPU_cache_flush_data_range(const void *d_addr, size_t n_bytes)
+{
+ const void * final_address;
+ ISR_Level level;
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment d_addr and push
+ * the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to flush is zero */
+ return;
+
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
+
+ if( final_address - d_addr > _CPU_cache_get_data_cache_size(0) ) {
+ /*
+ * Avoid iterating over the whole cache multiple times if the range is
+ * larger than the cache size.
+ */
+ _CPU_cache_flush_entire_data();
+ return;
+ }
+
+ _ISR_Local_disable (level);
+
+ while( d_addr <= final_address ) {
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBFR, (uintptr_t) d_addr);
+ d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+
+ _ISR_Local_enable (level);
+}
+
+static void _CPU_cache_invalidate_data_range(const void *d_addr, size_t n_bytes)
+{
+ const void * final_address;
+ ISR_Level level;
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment d_addr and push
+ * the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to flush is zero */
+ return;
+
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
+
+ if( final_address - d_addr > _CPU_cache_get_data_cache_size(0) ) {
+ /*
+ * Avoid iterating over the whole cache multiple times if the range is
+ * larger than the cache size.
+ */
+ _CPU_cache_invalidate_entire_data();
+ return;
+ }
+
+ _ISR_Local_disable (level);
+
+ while( d_addr <= final_address ) {
+ _OR1K_mtspr(CPU_OR1K_SPR_DCBIR, (uintptr_t) d_addr);
+ d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+
+ _ISR_Local_enable (level);
+}
+
+static void _CPU_cache_invalidate_instruction_range(const void *i_addr, size_t n_bytes)
+{
+ const void * final_address;
+ ISR_Level level;
+
+ /*
+ * Set i_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment i_addr and push
+ * the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to flush is zero */
+ return;
+
+ final_address = (void *)((size_t)i_addr + n_bytes - 1);
+ i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
+
+ if( final_address - i_addr > _CPU_cache_get_data_cache_size(0) ) {
+ /*
+ * Avoid iterating over the whole cache multiple times if the range is
+ * larger than the cache size.
+ */
+ _CPU_cache_invalidate_entire_instruction();
+ return;
+ }
+
+ _ISR_Local_disable (level);
+
+ while( i_addr <= final_address ) {
+ _OR1K_mtspr(CPU_OR1K_SPR_ICBIR, (uintptr_t) i_addr);
+ i_addr = (void *)((size_t)i_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+
+ _ISR_Local_enable (level);
+}
+
+static void _CPU_cache_enable_data(void)
+{
+ uint32_t sr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
+ _OR1K_mtspr(CPU_OR1K_SPR_SR, sr | CPU_OR1K_SPR_SR_DCE);
+
+ _ISR_Local_enable(level);
+}
+
+static void _CPU_cache_disable_data(void)
+{
+ uint32_t sr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
+ _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_DCE));
+
+ _ISR_Local_enable(level);
+}
+
+static void _CPU_cache_enable_instruction(void)
+{
+ uint32_t sr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
+ _OR1K_mtspr(CPU_OR1K_SPR_SR, sr | CPU_OR1K_SPR_SR_ICE);
+
+ _ISR_Local_enable(level);
+}
+
+static void _CPU_cache_disable_instruction(void)
+{
+ uint32_t sr;
+ ISR_Level level;
+
+ _ISR_Local_disable (level);
+
+ sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
+ _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_ICE));
+
+ _ISR_Local_enable(level);
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/powerpc/shared/cache/cache.c b/bsps/powerpc/shared/cache/cache.c
new file mode 100644
index 0000000000..902893b883
--- /dev/null
+++ b/bsps/powerpc/shared/cache/cache.c
@@ -0,0 +1,319 @@
+/**
+ * @file
+ *
+ * #ingroup powerpc_shared
+ *
+ * @brief Header file for the Cache Manager PowerPC support.
+ */
+
+/*
+ * Cache Management Support Routines for the MC68040
+ * Modified for MPC8260 Andy Dachs <a.dachs@sstl.co.uk>
+ * Surrey Satellite Technology Limited (SSTL), 2001
+ */
+
+#include <rtems.h>
+#include <rtems/powerpc/powerpc.h>
+#include <rtems/powerpc/registers.h>
+
+/* Provide the CPU defines only if we have a cache */
+#if PPC_CACHE_ALIGNMENT != PPC_NO_CACHE_ALIGNMENT
+ #define CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+ #define CPU_INSTRUCTION_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
+#endif
+
+#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
+
+static inline size_t _CPU_cache_get_data_cache_size(uint32_t level)
+{
+ switch (level) {
+ case 0:
+ /* Fall through */
+#ifdef PPC_CACHE_DATA_L3_SIZE
+ case 3:
+ return PPC_CACHE_DATA_L3_SIZE;
+#endif
+#ifdef PPC_CACHE_DATA_L2_SIZE
+ case 2:
+ return PPC_CACHE_DATA_L2_SIZE;
+#endif
+#ifdef PPC_CACHE_DATA_L1_SIZE
+ case 1:
+ return PPC_CACHE_DATA_L1_SIZE;
+#endif
+ default:
+ return 0;
+ }
+}
+
+static inline size_t _CPU_cache_get_instruction_cache_size(uint32_t level)
+{
+ switch (level) {
+ case 0:
+ /* Fall through */
+#ifdef PPC_CACHE_INSTRUCTION_L3_SIZE
+ case 3:
+ return PPC_CACHE_INSTRUCTION_L3_SIZE;
+#endif
+#ifdef PPC_CACHE_INSTRUCTION_L2_SIZE
+ case 2:
+ return PPC_CACHE_INSTRUCTION_L2_SIZE;
+#endif
+#ifdef PPC_CACHE_INSTRUCTION_L1_SIZE
+ case 1:
+ return PPC_CACHE_INSTRUCTION_L1_SIZE;
+#endif
+ default:
+ return 0;
+ }
+}
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Some functions simply have not been implemented.
+ */
+
+#if defined(ppc603) || defined(ppc603e) || defined(mpc8260) /* And possibly others */
+
+/* Helpful macros */
+#define PPC_Get_HID0( _value ) \
+ do { \
+ _value = 0; /* to avoid warnings */ \
+ __asm__ volatile( \
+ "mfspr %0, 0x3f0;" /* get HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+#define PPC_Set_HID0( _value ) \
+ do { \
+ __asm__ volatile( \
+ "isync;" \
+ "mtspr 0x3f0, %0;" /* load HID0 */ \
+ "isync" \
+ : "=r" (_value) \
+ : "0" (_value) \
+ ); \
+ } while (0)
+
+static inline void _CPU_cache_enable_data(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value |= HID0_DCE; /* set DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value &= ~HID0_DCE; /* clear DCE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value |= HID0_DCI; /* set data flash invalidate bit */
+ PPC_Set_HID0( value );
+ value &= ~HID0_DCI; /* clear data flash invalidate bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_freeze_data(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value |= HID0_DLOCK; /* set data cache lock bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value &= ~HID0_DLOCK; /* set data cache lock bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_flush_entire_data(void)
+{
+ /*
+ * FIXME: how can we do this?
+ */
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value |= 0x00008000; /* Set ICE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value &= 0xFFFF7FFF; /* Clear ICE bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value |= HID0_ICFI; /* set data flash invalidate bit */
+ PPC_Set_HID0( value );
+ value &= ~HID0_ICFI; /* clear data flash invalidate bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value |= HID0_ILOCK; /* set instruction cache lock bit */
+ PPC_Set_HID0( value );
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ uint32_t value;
+ PPC_Get_HID0( value );
+ value &= ~HID0_ILOCK; /* set instruction cache lock bit */
+ PPC_Set_HID0( value );
+}
+
+#elif ( defined(mpx8xx) || defined(mpc860) || defined(mpc821) )
+
+#define mtspr(_spr,_reg) \
+ __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
+#define isync \
+ __asm__ volatile ("isync\n"::)
+
+static inline void _CPU_cache_flush_entire_data(void) {}
+static inline void _CPU_cache_invalidate_entire_data(void) {}
+static inline void _CPU_cache_freeze_data(void) {}
+static inline void _CPU_cache_unfreeze_data(void) {}
+
+static inline void _CPU_cache_enable_data(void)
+{
+ uint32_t r1;
+ r1 = (0x2<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ uint32_t r1;
+ r1 = (0x4<<24);
+ mtspr( 568, r1 );
+ isync;
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void) {}
+static inline void _CPU_cache_freeze_instruction(void) {}
+static inline void _CPU_cache_unfreeze_instruction(void) {}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ uint32_t r1;
+ r1 = (0x2<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ uint32_t r1;
+ r1 = (0x4<<24);
+ mtspr( 560, r1 );
+ isync;
+}
+
+#else
+
+static inline void _CPU_cache_flush_entire_data(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_freeze_data(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_enable_data(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ /* Void */
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ /* Void */
+}
+
+#endif
+
+static inline void _CPU_cache_invalidate_1_data_line(const void *addr)
+{
+ __asm__ volatile ( "dcbi 0,%0" :: "r" (addr) : "memory" );
+}
+
+static inline void _CPU_cache_flush_1_data_line(const void *addr)
+{
+ __asm__ volatile ( "dcbf 0,%0" :: "r" (addr) : "memory" );
+}
+
+
+static inline void _CPU_cache_invalidate_1_instruction_line(const void *addr)
+{
+ __asm__ volatile ( "icbi 0,%0" :: "r" (addr) : "memory");
+}
+
+#include "../../../bsps/shared/cache/cacheimpl.h"
diff --git a/bsps/shared/cache/cacheimpl.h b/bsps/shared/cache/cacheimpl.h
new file mode 100644
index 0000000000..7e9f863337
--- /dev/null
+++ b/bsps/shared/cache/cacheimpl.h
@@ -0,0 +1,520 @@
+/*
+ * Cache Manager
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ *
+ * The functions in this file implement the API to the RTEMS Cache Manager and
+ * are divided into data cache and instruction cache functions. Data cache
+ * functions only have bodies if a data cache is supported. Instruction
+ * cache functions only have bodies if an instruction cache is supported.
+ * Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
+ * defined, where x E {DATA, INSTRUCTION}. These definitions are found in
+ * the Cache Manager Wrapper header files, often
+ *
+ * rtems/c/src/lib/libcpu/CPU/cache_.h
+ *
+ * The cache implementation header file can define
+ *
+ * #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+ *
+ * if it provides cache maintenance functions which operate on multiple lines.
+ * Otherwise a generic loop with single line operations will be used. It is
+ * strongly recommended to provide the implementation in terms of static
+ * inline functions for performance reasons.
+ *
+ * The functions below are implemented with CPU dependent inline routines
+ * found in the cache.c files for each CPU. In the event that a CPU does
+ * not support a specific function for a cache it has, the CPU dependent
+ * routine does nothing (but does exist).
+ *
+ * At this point, the Cache Manager makes no considerations, and provides no
+ * support for BSP specific issues such as a secondary cache. In such a system,
+ * the CPU dependent routines would have to be modified, or a BSP layer added
+ * to this Manager.
+ */
+
+#include <rtems.h>
+
+#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
+#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
+#endif
+
+#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
+#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
+#endif
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/smpimpl.h>
+
+typedef struct {
+ const void *addr;
+ size_t size;
+} smp_cache_area;
+
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+
+static void smp_cache_data_flush(void *arg)
+{
+ smp_cache_area *area = arg;
+
+ rtems_cache_flush_multiple_data_lines(area->addr, area->size);
+}
+
+static void smp_cache_data_inv(void *arg)
+{
+ smp_cache_area *area = arg;
+
+ rtems_cache_invalidate_multiple_data_lines(area->addr, area->size);
+}
+
+static void smp_cache_data_flush_all(void *arg)
+{
+ rtems_cache_flush_entire_data();
+}
+
+static void smp_cache_data_inv_all(void *arg)
+{
+ rtems_cache_invalidate_entire_data();
+}
+
+#endif /* defined(CPU_DATA_CACHE_ALIGNMENT) */
+
+void
+rtems_cache_flush_multiple_data_lines_processor_set(
+ const void *addr,
+ size_t size,
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ smp_cache_area area = { addr, size };
+
+ _SMP_Multicast_action( setsize, set, smp_cache_data_flush, &area );
+#endif
+}
+
+void
+rtems_cache_invalidate_multiple_data_lines_processor_set(
+ const void *addr,
+ size_t size,
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ smp_cache_area area = { addr, size };
+
+ _SMP_Multicast_action( setsize, set, smp_cache_data_inv, &area );
+#endif
+}
+
+void
+rtems_cache_flush_entire_data_processor_set(
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _SMP_Multicast_action( setsize, set, smp_cache_data_flush_all, NULL );
+#endif
+}
+
+void
+rtems_cache_invalidate_entire_data_processor_set(
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _SMP_Multicast_action( setsize, set, smp_cache_data_inv_all, NULL );
+#endif
+}
+
+#endif /* defined(RTEMS_SMP) */
+
+/*
+ * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
+ */
+
+/*
+ * This function is called to flush the data cache by performing cache
+ * copybacks. It must determine how many cache lines need to be copied
+ * back and then perform the copybacks.
+ */
+void
+rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+ _CPU_cache_flush_data_range( d_addr, n_bytes );
+#else
+ const void * final_address;
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment d_addr and push
+ * the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to flush is zero */
+ return;
+
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
+ while( d_addr <= final_address ) {
+ _CPU_cache_flush_1_data_line( d_addr );
+ d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+#endif
+#endif
+}
+
+
+/*
+ * This function is responsible for performing a data cache invalidate.
+ * It must determine how many cache lines need to be invalidated and then
+ * perform the invalidations.
+ */
+
+void
+rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+ _CPU_cache_invalidate_data_range( d_addr, n_bytes );
+#else
+ const void * final_address;
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment d_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to invalidate is zero */
+ return;
+
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
+ while( final_address >= d_addr ) {
+ _CPU_cache_invalidate_1_data_line( d_addr );
+ d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+#endif
+#endif
+}
+
+
+/*
+ * This function is responsible for performing a data cache flush.
+ * It flushes the entire cache.
+ */
+void
+rtems_cache_flush_entire_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ /*
+ * Call the CPU-specific routine
+ */
+ _CPU_cache_flush_entire_data();
+#endif
+}
+
+
+/*
+ * This function is responsible for performing a data cache
+ * invalidate. It invalidates the entire cache.
+ */
+void
+rtems_cache_invalidate_entire_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ /*
+ * Call the CPU-specific routine
+ */
+
+ _CPU_cache_invalidate_entire_data();
+#endif
+}
+
+
+/*
+ * This function returns the data cache granularity.
+ */
+size_t
+rtems_cache_get_data_line_size( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ return CPU_DATA_CACHE_ALIGNMENT;
+#else
+ return 0;
+#endif
+}
+
+
+size_t
+rtems_cache_get_data_cache_size( uint32_t level )
+{
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
+ return _CPU_cache_get_data_cache_size( level );
+#else
+ return 0;
+#endif
+}
+
+/*
+ * This function freezes the data cache; cache lines
+ * are not replaced.
+ */
+void
+rtems_cache_freeze_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_freeze_data();
+#endif
+}
+
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_cache_unfreeze_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_unfreeze_data();
+#endif
+}
+
+
+/* Turn on the data cache. */
+void
+rtems_cache_enable_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_enable_data();
+#endif
+}
+
+
+/* Turn off the data cache. */
+void
+rtems_cache_disable_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_disable_data();
+#endif
+}
+
+
+
+/*
+ * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
+ */
+
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
+ && defined(RTEMS_SMP) \
+ && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+
+static void smp_cache_inst_inv(void *arg)
+{
+ smp_cache_area *area = arg;
+
+ _CPU_cache_invalidate_instruction_range(area->addr, area->size);
+}
+
+static void smp_cache_inst_inv_all(void *arg)
+{
+ _CPU_cache_invalidate_entire_instruction();
+}
+
+#endif
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It must determine how many cache lines need to be invalidated
+ * and then perform the invalidations.
+ */
+
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
+ && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+static void
+_CPU_cache_invalidate_instruction_range(
+ const void * i_addr,
+ size_t n_bytes
+)
+{
+ const void * final_address;
+
+ /*
+ * Set i_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment i_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to invalidate is zero */
+ return;
+
+ final_address = (void *)((size_t)i_addr + n_bytes - 1);
+ i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
+ while( final_address >= i_addr ) {
+ _CPU_cache_invalidate_1_instruction_line( i_addr );
+ i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
+ }
+}
+#endif
+
+void
+rtems_cache_invalidate_multiple_instruction_lines(
+ const void * i_addr,
+ size_t n_bytes
+)
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+ smp_cache_area area = { i_addr, n_bytes };
+
+ _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
+#else
+ _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
+#endif
+#endif
+}
+
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It invalidates the entire cache.
+ */
+void
+rtems_cache_invalidate_entire_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+ _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
+#else
+ _CPU_cache_invalidate_entire_instruction();
+#endif
+#endif
+}
+
+
+/*
+ * This function returns the instruction cache granularity.
+ */
+size_t
+rtems_cache_get_instruction_line_size( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ return CPU_INSTRUCTION_CACHE_ALIGNMENT;
+#else
+ return 0;
+#endif
+}
+
+
+size_t
+rtems_cache_get_instruction_cache_size( uint32_t level )
+{
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
+ return _CPU_cache_get_instruction_cache_size( level );
+#else
+ return 0;
+#endif
+}
+
+
+/*
+ * This function freezes the instruction cache; cache lines
+ * are not replaced.
+ */
+void
+rtems_cache_freeze_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_freeze_instruction();
+#endif
+}
+
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_cache_unfreeze_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_unfreeze_instruction();
+#endif
+}
+
+
+/* Turn on the instruction cache. */
+void
+rtems_cache_enable_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_enable_instruction();
+#endif
+}
+
+
+/* Turn off the instruction cache. */
+void
+rtems_cache_disable_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_disable_instruction();
+#endif
+}
+
+/* Returns the maximal cache line size of all cache kinds in bytes. */
+size_t rtems_cache_get_maximal_line_size( void )
+{
+#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
+ return CPU_MAXIMAL_CACHE_ALIGNMENT;
+#endif
+ size_t max_line_size = 0;
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ {
+ size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
+ if ( max_line_size < data_line_size )
+ max_line_size = data_line_size;
+ }
+#endif
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ {
+ size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
+ if ( max_line_size < instruction_line_size )
+ max_line_size = instruction_line_size;
+ }
+#endif
+ return max_line_size;
+}
+
+/*
+ * Purpose is to synchronize caches after code has been loaded
+ * or self modified. Actual implementation is simple only
+ * but it can and should be repaced by optimized version
+ * which does not need flush and invalidate all cache levels
+ * when code is changed.
+ */
+void
+rtems_cache_instruction_sync_after_code_change( const void * code_addr, size_t n_bytes )
+{
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
+ _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
+#else
+ rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
+ rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
+#endif
+}
diff --git a/bsps/shared/cache/nocache.c b/bsps/shared/cache/nocache.c
new file mode 100644
index 0000000000..55aea24762
--- /dev/null
+++ b/bsps/shared/cache/nocache.c
@@ -0,0 +1 @@
+#include "cacheimpl.h"
diff --git a/bsps/sparc/leon2/start/cache.c b/bsps/sparc/leon2/start/cache.c
new file mode 100644
index 0000000000..5597bd81cf
--- /dev/null
+++ b/bsps/sparc/leon2/start/cache.c
@@ -0,0 +1,51 @@
+/*
+ * SPARC Cache Manager Support
+ */
+
+/*
+ * CACHE MANAGER: The following functions are CPU-specific.
+ * They provide the basic implementation for the rtems_* cache
+ * management routines. If a given function has no meaning for the CPU,
+ * it does nothing by default.
+ *
+ * FIXME: Some functions simply have not been implemented.
+ */
+
+#include <stddef.h>
+
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 0
+
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+static inline void _CPU_cache_invalidate_entire_instruction ( void )
+{
+ __asm__ volatile ("flush");
+}
+
+static inline void _CPU_cache_invalidate_instruction_range(
+ const void *i_addr,
+ size_t n_bytes
+)
+{
+ __asm__ volatile ("flush");
+}
+
+/* XXX these need to be addressed */
+
+static inline void _CPU_cache_freeze_instruction ( void )
+{
+}
+
+static inline void _CPU_cache_unfreeze_instruction ( void )
+{
+}
+
+static inline void _CPU_cache_enable_instruction ( void )
+{
+}
+
+static inline void _CPU_cache_disable_instruction ( void )
+{
+}
+
+#include "../../../shared/cache/cacheimpl.h"
diff --git a/bsps/sparc/leon3/start/cache.c b/bsps/sparc/leon3/start/cache.c
new file mode 100644
index 0000000000..fb210a1f70
--- /dev/null
+++ b/bsps/sparc/leon3/start/cache.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <amba.h>
+#include <leon.h>
+
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
+
+#define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING
+
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT 64
+
+#define CPU_DATA_CACHE_ALIGNMENT 64
+
+static inline volatile struct l2c_regs *get_l2c_regs(void)
+{
+ volatile struct l2c_regs *l2c = NULL;
+ struct ambapp_dev *adev;
+
+ adev = (void *) ambapp_for_each(
+ &ambapp_plb,
+ OPTIONS_ALL | OPTIONS_AHB_SLVS,
+ VENDOR_GAISLER,
+ GAISLER_L2CACHE,
+ ambapp_find_by_idx,
+ NULL
+ );
+ if (adev != NULL) {
+ l2c = (volatile struct l2c_regs *) DEV_TO_AHB(adev)->start[1];
+ }
+
+ return l2c;
+}
+
+static inline size_t get_l2_size(void)
+{
+ size_t size = 0;
+ volatile struct l2c_regs *l2c = get_l2c_regs();
+
+ if (l2c != NULL) {
+ unsigned status = l2c->status;
+ unsigned ways = (status & 0x3) + 1;
+ unsigned set_size = ((status & 0x7ff) >> 2) * 1024;
+
+ size = ways * set_size;
+ }
+
+ return size;
+}
+
+static inline size_t get_l1_size(uint32_t l1_cfg)
+{
+ uint32_t ways = ((l1_cfg >> 24) & 0x7) + 1;
+ uint32_t wsize = UINT32_C(1) << (((l1_cfg >> 20) & 0xf) + 10);
+
+ return ways * wsize;
+}
+
+static inline size_t get_max_size(size_t a, size_t b)
+{
+ return a < b ? b : a;
+}
+
+static inline size_t get_cache_size(uint32_t level, uint32_t l1_cfg)
+{
+ size_t size;
+
+ switch (level) {
+ case 0:
+ size = get_max_size(get_l1_size(l1_cfg), get_l2_size());
+ break;
+ case 1:
+ size = get_l1_size(l1_cfg);
+ break;
+ case 2:
+ size = get_l2_size();
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ return size;
+}
+
+static inline size_t _CPU_cache_get_data_cache_size(uint32_t level)
+{
+ return get_cache_size(level, leon3_get_data_cache_config_register());
+}
+
+static inline void _CPU_cache_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_invalidate_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_freeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ uint32_t cache_reg = leon3_get_cache_control_register();
+
+ cache_reg |= LEON3_REG_CACHE_CTRL_FI;
+ leon3_set_cache_control_register(cache_reg);
+}
+
+static inline void _CPU_cache_invalidate_instruction_range(
+ const void *i_addr,
+ size_t n_bytes
+)
+{
+ _CPU_cache_invalidate_entire_instruction();
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_flush_entire_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_enable_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ /* TODO */
+}
+
+static inline size_t _CPU_cache_get_instruction_cache_size( uint32_t level )
+{
+ return get_cache_size(level, leon3_get_inst_cache_config_register());
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ /* TODO */
+}
+
+#include "../../../shared/cache/cacheimpl.h"