summaryrefslogtreecommitdiffstats
path: root/bsps/shared/cache/cacheimpl.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-27 14:37:51 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-31 12:49:09 +0100
commit4cf93658eff5cf6b0c02e98a0d1ec33dea5ed85c (patch)
tree8ce105a37991b79f38da9da31c1cb6ce13ef6beb /bsps/shared/cache/cacheimpl.h
parentbsps: Move network define to source files (diff)
downloadrtems-4cf93658eff5cf6b0c02e98a0d1ec33dea5ed85c.tar.bz2
bsps: Rework cache manager implementation
The previous cache manager support used a single souce file (cache_manager.c) which included an implementation header (cache_.h). This required the use of specialized include paths to find the right header file. Change this to include a generic implementation header (cacheimpl.h) in specialized source files. Use the following directories and files: * bsps/shared/cache * bsps/@RTEMS_CPU@/shared/cache * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILY/start/cache.c Update #3285.
Diffstat (limited to 'bsps/shared/cache/cacheimpl.h')
-rw-r--r--bsps/shared/cache/cacheimpl.h520
1 files changed, 520 insertions, 0 deletions
diff --git a/bsps/shared/cache/cacheimpl.h b/bsps/shared/cache/cacheimpl.h
new file mode 100644
index 0000000000..7e9f863337
--- /dev/null
+++ b/bsps/shared/cache/cacheimpl.h
@@ -0,0 +1,520 @@
+/*
+ * Cache Manager
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ *
+ * The functions in this file implement the API to the RTEMS Cache Manager and
+ * are divided into data cache and instruction cache functions. Data cache
+ * functions only have bodies if a data cache is supported. Instruction
+ * cache functions only have bodies if an instruction cache is supported.
+ * Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
+ * defined, where x E {DATA, INSTRUCTION}. These definitions are found in
+ * the Cache Manager Wrapper header files, often
+ *
+ * rtems/c/src/lib/libcpu/CPU/cache_.h
+ *
+ * The cache implementation header file can define
+ *
+ * #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+ *
+ * if it provides cache maintenance functions which operate on multiple lines.
+ * Otherwise a generic loop with single line operations will be used. It is
+ * strongly recommended to provide the implementation in terms of static
+ * inline functions for performance reasons.
+ *
+ * The functions below are implemented with CPU dependent inline routines
+ * found in the cache.c files for each CPU. In the event that a CPU does
+ * not support a specific function for a cache it has, the CPU dependent
+ * routine does nothing (but does exist).
+ *
+ * At this point, the Cache Manager makes no considerations, and provides no
+ * support for BSP specific issues such as a secondary cache. In such a system,
+ * the CPU dependent routines would have to be modified, or a BSP layer added
+ * to this Manager.
+ */
+
+#include <rtems.h>
+
+#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
+#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
+#endif
+
+#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
+#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
+#endif
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/smpimpl.h>
+
+typedef struct {
+ const void *addr;
+ size_t size;
+} smp_cache_area;
+
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+
+static void smp_cache_data_flush(void *arg)
+{
+ smp_cache_area *area = arg;
+
+ rtems_cache_flush_multiple_data_lines(area->addr, area->size);
+}
+
+static void smp_cache_data_inv(void *arg)
+{
+ smp_cache_area *area = arg;
+
+ rtems_cache_invalidate_multiple_data_lines(area->addr, area->size);
+}
+
+static void smp_cache_data_flush_all(void *arg)
+{
+ rtems_cache_flush_entire_data();
+}
+
+static void smp_cache_data_inv_all(void *arg)
+{
+ rtems_cache_invalidate_entire_data();
+}
+
+#endif /* defined(CPU_DATA_CACHE_ALIGNMENT) */
+
+void
+rtems_cache_flush_multiple_data_lines_processor_set(
+ const void *addr,
+ size_t size,
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ smp_cache_area area = { addr, size };
+
+ _SMP_Multicast_action( setsize, set, smp_cache_data_flush, &area );
+#endif
+}
+
+void
+rtems_cache_invalidate_multiple_data_lines_processor_set(
+ const void *addr,
+ size_t size,
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ smp_cache_area area = { addr, size };
+
+ _SMP_Multicast_action( setsize, set, smp_cache_data_inv, &area );
+#endif
+}
+
+void
+rtems_cache_flush_entire_data_processor_set(
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _SMP_Multicast_action( setsize, set, smp_cache_data_flush_all, NULL );
+#endif
+}
+
+void
+rtems_cache_invalidate_entire_data_processor_set(
+ const size_t setsize,
+ const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _SMP_Multicast_action( setsize, set, smp_cache_data_inv_all, NULL );
+#endif
+}
+
+#endif /* defined(RTEMS_SMP) */
+
+/*
+ * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
+ */
+
+/*
+ * This function is called to flush the data cache by performing cache
+ * copybacks. It must determine how many cache lines need to be copied
+ * back and then perform the copybacks.
+ */
+void
+rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+ _CPU_cache_flush_data_range( d_addr, n_bytes );
+#else
+ const void * final_address;
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment d_addr and push
+ * the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to flush is zero */
+ return;
+
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
+ while( d_addr <= final_address ) {
+ _CPU_cache_flush_1_data_line( d_addr );
+ d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+#endif
+#endif
+}
+
+
+/*
+ * This function is responsible for performing a data cache invalidate.
+ * It must determine how many cache lines need to be invalidated and then
+ * perform the invalidations.
+ */
+
+void
+rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+ _CPU_cache_invalidate_data_range( d_addr, n_bytes );
+#else
+ const void * final_address;
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment d_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to invalidate is zero */
+ return;
+
+ final_address = (void *)((size_t)d_addr + n_bytes - 1);
+ d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
+ while( final_address >= d_addr ) {
+ _CPU_cache_invalidate_1_data_line( d_addr );
+ d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
+ }
+#endif
+#endif
+}
+
+
+/*
+ * This function is responsible for performing a data cache flush.
+ * It flushes the entire cache.
+ */
+void
+rtems_cache_flush_entire_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ /*
+ * Call the CPU-specific routine
+ */
+ _CPU_cache_flush_entire_data();
+#endif
+}
+
+
+/*
+ * This function is responsible for performing a data cache
+ * invalidate. It invalidates the entire cache.
+ */
+void
+rtems_cache_invalidate_entire_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ /*
+ * Call the CPU-specific routine
+ */
+
+ _CPU_cache_invalidate_entire_data();
+#endif
+}
+
+
+/*
+ * This function returns the data cache granularity.
+ */
+size_t
+rtems_cache_get_data_line_size( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ return CPU_DATA_CACHE_ALIGNMENT;
+#else
+ return 0;
+#endif
+}
+
+
+size_t
+rtems_cache_get_data_cache_size( uint32_t level )
+{
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
+ return _CPU_cache_get_data_cache_size( level );
+#else
+ return 0;
+#endif
+}
+
+/*
+ * This function freezes the data cache; cache lines
+ * are not replaced.
+ */
+void
+rtems_cache_freeze_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_freeze_data();
+#endif
+}
+
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_cache_unfreeze_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_unfreeze_data();
+#endif
+}
+
+
+/* Turn on the data cache. */
+void
+rtems_cache_enable_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_enable_data();
+#endif
+}
+
+
+/* Turn off the data cache. */
+void
+rtems_cache_disable_data( void )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ _CPU_cache_disable_data();
+#endif
+}
+
+
+
+/*
+ * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
+ */
+
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
+ && defined(RTEMS_SMP) \
+ && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+
+static void smp_cache_inst_inv(void *arg)
+{
+ smp_cache_area *area = arg;
+
+ _CPU_cache_invalidate_instruction_range(area->addr, area->size);
+}
+
+static void smp_cache_inst_inv_all(void *arg)
+{
+ _CPU_cache_invalidate_entire_instruction();
+}
+
+#endif
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It must determine how many cache lines need to be invalidated
+ * and then perform the invalidations.
+ */
+
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
+ && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+static void
+_CPU_cache_invalidate_instruction_range(
+ const void * i_addr,
+ size_t n_bytes
+)
+{
+ const void * final_address;
+
+ /*
+ * Set i_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment i_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+
+ if( n_bytes == 0 )
+ /* Do nothing if number of bytes to invalidate is zero */
+ return;
+
+ final_address = (void *)((size_t)i_addr + n_bytes - 1);
+ i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
+ while( final_address >= i_addr ) {
+ _CPU_cache_invalidate_1_instruction_line( i_addr );
+ i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
+ }
+}
+#endif
+
+void
+rtems_cache_invalidate_multiple_instruction_lines(
+ const void * i_addr,
+ size_t n_bytes
+)
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+ smp_cache_area area = { i_addr, n_bytes };
+
+ _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
+#else
+ _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
+#endif
+#endif
+}
+
+
+/*
+ * This function is responsible for performing an instruction cache
+ * invalidate. It invalidates the entire cache.
+ */
+void
+rtems_cache_invalidate_entire_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+ _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
+#else
+ _CPU_cache_invalidate_entire_instruction();
+#endif
+#endif
+}
+
+
+/*
+ * This function returns the instruction cache granularity.
+ */
+size_t
+rtems_cache_get_instruction_line_size( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ return CPU_INSTRUCTION_CACHE_ALIGNMENT;
+#else
+ return 0;
+#endif
+}
+
+
+size_t
+rtems_cache_get_instruction_cache_size( uint32_t level )
+{
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
+ return _CPU_cache_get_instruction_cache_size( level );
+#else
+ return 0;
+#endif
+}
+
+
+/*
+ * This function freezes the instruction cache; cache lines
+ * are not replaced.
+ */
+void
+rtems_cache_freeze_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_freeze_instruction();
+#endif
+}
+
+
+/*
+ * This function unfreezes the instruction cache.
+ */
+void rtems_cache_unfreeze_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_unfreeze_instruction();
+#endif
+}
+
+
+/* Turn on the instruction cache. */
+void
+rtems_cache_enable_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_enable_instruction();
+#endif
+}
+
+
+/* Turn off the instruction cache. */
+void
+rtems_cache_disable_instruction( void )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ _CPU_cache_disable_instruction();
+#endif
+}
+
+/* Returns the maximal cache line size of all cache kinds in bytes. */
+size_t rtems_cache_get_maximal_line_size( void )
+{
+#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
+ return CPU_MAXIMAL_CACHE_ALIGNMENT;
+#endif
+ size_t max_line_size = 0;
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+ {
+ size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
+ if ( max_line_size < data_line_size )
+ max_line_size = data_line_size;
+ }
+#endif
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+ {
+ size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
+ if ( max_line_size < instruction_line_size )
+ max_line_size = instruction_line_size;
+ }
+#endif
+ return max_line_size;
+}
+
+/*
+ * Purpose is to synchronize caches after code has been loaded
+ * or self modified. Actual implementation is simple only
+ * but it can and should be repaced by optimized version
+ * which does not need flush and invalidate all cache levels
+ * when code is changed.
+ */
+void
+rtems_cache_instruction_sync_after_code_change( const void * code_addr, size_t n_bytes )
+{
+#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
+ _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
+#else
+ rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
+ rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
+#endif
+}