summaryrefslogtreecommitdiffstats
path: root/bsps/aarch64/shared/cache/cache.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--bsps/aarch64/shared/cache/cache.c64
1 files changed, 18 insertions, 46 deletions
diff --git a/bsps/aarch64/shared/cache/cache.c b/bsps/aarch64/shared/cache/cache.c
index 9e7446a077..be459d5083 100644
--- a/bsps/aarch64/shared/cache/cache.c
+++ b/bsps/aarch64/shared/cache/cache.c
@@ -36,7 +36,6 @@
#include <rtems.h>
#include <bsp.h>
-#include <bsp/utility.h>
#include <rtems/score/aarch64-system-registers.h>
#define CPU_DATA_CACHE_ALIGNMENT 64
@@ -47,8 +46,6 @@
#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
-#define CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA
-
#define AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT ( (size_t) 64 )
#define AARCH64_CACHE_PREPARE_MVA(mva) (const void *) \
RTEMS_ALIGN_DOWN ( (size_t) mva, AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT )
@@ -66,15 +63,6 @@ void AArch64_data_cache_clean_and_invalidate_line(const void *d_addr)
);
}
-static inline void _CPU_cache_flush_1_data_line(const void *d_addr)
-{
- /* Flush the Data cache */
- AArch64_data_cache_clean_and_invalidate_line( d_addr );
-
- /* Wait for L1 flush to complete */
- _AARCH64_Data_synchronization_barrier();
-}
-
static inline void
_CPU_cache_flush_data_range(
const void *d_addr,
@@ -108,15 +96,6 @@ static inline void AArch64_data_cache_invalidate_line(const void *d_addr)
);
}
-static inline void _CPU_cache_invalidate_1_data_line(const void *d_addr)
-{
- /* Invalidate the data cache line */
- AArch64_data_cache_invalidate_line( d_addr );
-
- /* Wait for L1 invalidate to complete */
- _AARCH64_Data_synchronization_barrier();
-}
-
static inline void
_CPU_cache_invalidate_data_range(
const void *d_addr,
@@ -155,15 +134,6 @@ static inline void AArch64_instruction_cache_invalidate_line(const void *i_addr)
__builtin___clear_cache((void *)i_addr, ((char *)i_addr) + sizeof(void*) - 1);
}
-static inline void _CPU_cache_invalidate_1_instruction_line(const void *d_addr)
-{
- /* Invalidate the Instruction cache line */
- AArch64_instruction_cache_invalidate_line( d_addr );
-
- /* Wait for L1 invalidate to complete */
- _AARCH64_Data_synchronization_barrier();
-}
-
static inline void
_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
{
@@ -183,9 +153,15 @@ static inline void _CPU_cache_unfreeze_instruction(void)
/* TODO */
}
-static inline uint64_t AArch64_get_ccsidr_for_level(uint64_t val)
+static inline uint64_t AArch64_get_ccsidr_for_level(
+ uint64_t level, bool instruction
+)
{
- _AArch64_Write_csselr_el1(val);
+ uint64_t csselr = AARCH64_CSSELR_EL1_LEVEL(level - 1);
+
+ csselr |= instruction ? AARCH64_CSSELR_EL1_IND : 0;
+
+ _AArch64_Write_csselr_el1(csselr);
return _AArch64_Read_ccsidr_el1();
}
@@ -216,7 +192,7 @@ static inline void AArch64_data_cache_clean_level(uint64_t level)
uint64_t way;
uint64_t way_shift;
- ccsidr = AArch64_get_ccsidr_for_level(AARCH64_CSSELR_EL1_LEVEL(level));
+ ccsidr = AArch64_get_ccsidr_for_level(level, false);
line_power = AArch64_ccsidr_get_line_power(ccsidr);
associativity = AArch64_ccsidr_get_associativity(ccsidr);
@@ -229,7 +205,7 @@ static inline void AArch64_data_cache_clean_level(uint64_t level)
for (set = 0; set < num_sets; ++set) {
uint64_t set_and_way = (way << way_shift)
| (set << line_power)
- | (level << 1);
+ | ((level - 1) << 1);
__asm__ volatile (
"dc csw, %[set_and_way]"
@@ -276,7 +252,7 @@ static inline void AArch64_data_cache_clean_all_levels(void)
uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr);
uint64_t level = 0;
- for (level = 0; level < loc; ++level) {
+ for (level = 1; level <= loc; ++level) {
uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level);
/* Check if this level has a data cache or unified cache */
@@ -301,7 +277,7 @@ static inline void AArch64_cache_invalidate_level(uint64_t level)
uint64_t way;
uint64_t way_shift;
- ccsidr = AArch64_get_ccsidr_for_level(AARCH64_CSSELR_EL1_LEVEL(level));
+ ccsidr = AArch64_get_ccsidr_for_level(level, false);
line_power = AArch64_ccsidr_get_line_power(ccsidr);
associativity = AArch64_ccsidr_get_associativity(ccsidr);
@@ -314,7 +290,7 @@ static inline void AArch64_cache_invalidate_level(uint64_t level)
for (set = 0; set < num_sets; ++set) {
uint64_t set_and_way = (way << way_shift)
| (set << line_power)
- | (level << 1);
+ | ((level - 1) << 1);
__asm__ volatile (
"dc isw, %[set_and_way]"
@@ -332,7 +308,7 @@ static inline void AArch64_data_cache_invalidate_all_levels(void)
uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr);
uint64_t level = 0;
- for (level = 0; level < loc; ++level) {
+ for (level = 1; level <= loc; ++level) {
uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level);
/* Check if this level has a data cache or unified cache */
@@ -373,6 +349,7 @@ static inline void _CPU_cache_disable_data(void)
rtems_interrupt_local_enable(level);
}
+#ifdef RTEMS_SMP
static inline
void AArch64_instruction_cache_inner_shareable_invalidate_all(void)
{
@@ -383,6 +360,7 @@ void AArch64_instruction_cache_inner_shareable_invalidate_all(void)
: "memory"
);
}
+#endif /* RTEMS_SMP */
static inline void AArch64_instruction_cache_invalidate(void)
{
@@ -446,17 +424,11 @@ static inline size_t AArch64_get_cache_size(
clidr = _AArch64_Read_clidr_el1();
loc = AArch64_clidr_get_level_of_coherency(clidr);
- if (level >= loc) {
+ if (level > loc) {
return 0;
}
- if (level == 0) {
- level = loc - 1;
- }
-
- ccsidr = AArch64_get_ccsidr_for_level(
- AARCH64_CSSELR_EL1_LEVEL(level) | (instruction ? AARCH64_CSSELR_EL1_IND : 0)
- );
+ ccsidr = AArch64_get_ccsidr_for_level(level, instruction);
return (1U << (AArch64_ccsidr_get_line_power(ccsidr)+4))
* AArch64_ccsidr_get_associativity(ccsidr)