summaryrefslogtreecommitdiffstats
path: root/bsps/aarch64/shared
diff options
context:
space:
mode:
Diffstat (limited to 'bsps/aarch64/shared')
-rw-r--r--bsps/aarch64/shared/cache/cache.c62
-rw-r--r--bsps/aarch64/shared/mmu/vmsav8-64.c18
-rw-r--r--bsps/aarch64/shared/start/linkcmds.base6
-rw-r--r--bsps/aarch64/shared/start/start.S6
4 files changed, 42 insertions, 50 deletions
diff --git a/bsps/aarch64/shared/cache/cache.c b/bsps/aarch64/shared/cache/cache.c
index fc1766c2b9..be459d5083 100644
--- a/bsps/aarch64/shared/cache/cache.c
+++ b/bsps/aarch64/shared/cache/cache.c
@@ -36,7 +36,6 @@
#include <rtems.h>
#include <bsp.h>
-#include <bsp/utility.h>
#include <rtems/score/aarch64-system-registers.h>
#define CPU_DATA_CACHE_ALIGNMENT 64
@@ -64,15 +63,6 @@ void AArch64_data_cache_clean_and_invalidate_line(const void *d_addr)
);
}
-static inline void _CPU_cache_flush_1_data_line(const void *d_addr)
-{
- /* Flush the Data cache */
- AArch64_data_cache_clean_and_invalidate_line( d_addr );
-
- /* Wait for L1 flush to complete */
- _AARCH64_Data_synchronization_barrier();
-}
-
static inline void
_CPU_cache_flush_data_range(
const void *d_addr,
@@ -106,15 +96,6 @@ static inline void AArch64_data_cache_invalidate_line(const void *d_addr)
);
}
-static inline void _CPU_cache_invalidate_1_data_line(const void *d_addr)
-{
- /* Invalidate the data cache line */
- AArch64_data_cache_invalidate_line( d_addr );
-
- /* Wait for L1 invalidate to complete */
- _AARCH64_Data_synchronization_barrier();
-}
-
static inline void
_CPU_cache_invalidate_data_range(
const void *d_addr,
@@ -153,15 +134,6 @@ static inline void AArch64_instruction_cache_invalidate_line(const void *i_addr)
__builtin___clear_cache((void *)i_addr, ((char *)i_addr) + sizeof(void*) - 1);
}
-static inline void _CPU_cache_invalidate_1_instruction_line(const void *d_addr)
-{
- /* Invalidate the Instruction cache line */
- AArch64_instruction_cache_invalidate_line( d_addr );
-
- /* Wait for L1 invalidate to complete */
- _AARCH64_Data_synchronization_barrier();
-}
-
static inline void
_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
{
@@ -181,9 +153,15 @@ static inline void _CPU_cache_unfreeze_instruction(void)
/* TODO */
}
-static inline uint64_t AArch64_get_ccsidr_for_level(uint64_t val)
+static inline uint64_t AArch64_get_ccsidr_for_level(
+ uint64_t level, bool instruction
+)
{
- _AArch64_Write_csselr_el1(val);
+ uint64_t csselr = AARCH64_CSSELR_EL1_LEVEL(level - 1);
+
+ csselr |= instruction ? AARCH64_CSSELR_EL1_IND : 0;
+
+ _AArch64_Write_csselr_el1(csselr);
return _AArch64_Read_ccsidr_el1();
}
@@ -214,7 +192,7 @@ static inline void AArch64_data_cache_clean_level(uint64_t level)
uint64_t way;
uint64_t way_shift;
- ccsidr = AArch64_get_ccsidr_for_level(AARCH64_CSSELR_EL1_LEVEL(level));
+ ccsidr = AArch64_get_ccsidr_for_level(level, false);
line_power = AArch64_ccsidr_get_line_power(ccsidr);
associativity = AArch64_ccsidr_get_associativity(ccsidr);
@@ -227,7 +205,7 @@ static inline void AArch64_data_cache_clean_level(uint64_t level)
for (set = 0; set < num_sets; ++set) {
uint64_t set_and_way = (way << way_shift)
| (set << line_power)
- | (level << 1);
+ | ((level - 1) << 1);
__asm__ volatile (
"dc csw, %[set_and_way]"
@@ -274,7 +252,7 @@ static inline void AArch64_data_cache_clean_all_levels(void)
uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr);
uint64_t level = 0;
- for (level = 0; level < loc; ++level) {
+ for (level = 1; level <= loc; ++level) {
uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level);
/* Check if this level has a data cache or unified cache */
@@ -299,7 +277,7 @@ static inline void AArch64_cache_invalidate_level(uint64_t level)
uint64_t way;
uint64_t way_shift;
- ccsidr = AArch64_get_ccsidr_for_level(AARCH64_CSSELR_EL1_LEVEL(level));
+ ccsidr = AArch64_get_ccsidr_for_level(level, false);
line_power = AArch64_ccsidr_get_line_power(ccsidr);
associativity = AArch64_ccsidr_get_associativity(ccsidr);
@@ -312,7 +290,7 @@ static inline void AArch64_cache_invalidate_level(uint64_t level)
for (set = 0; set < num_sets; ++set) {
uint64_t set_and_way = (way << way_shift)
| (set << line_power)
- | (level << 1);
+ | ((level - 1) << 1);
__asm__ volatile (
"dc isw, %[set_and_way]"
@@ -330,7 +308,7 @@ static inline void AArch64_data_cache_invalidate_all_levels(void)
uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr);
uint64_t level = 0;
- for (level = 0; level < loc; ++level) {
+ for (level = 1; level <= loc; ++level) {
uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level);
/* Check if this level has a data cache or unified cache */
@@ -371,6 +349,7 @@ static inline void _CPU_cache_disable_data(void)
rtems_interrupt_local_enable(level);
}
+#ifdef RTEMS_SMP
static inline
void AArch64_instruction_cache_inner_shareable_invalidate_all(void)
{
@@ -381,6 +360,7 @@ void AArch64_instruction_cache_inner_shareable_invalidate_all(void)
: "memory"
);
}
+#endif /* RTEMS_SMP */
static inline void AArch64_instruction_cache_invalidate(void)
{
@@ -444,17 +424,11 @@ static inline size_t AArch64_get_cache_size(
clidr = _AArch64_Read_clidr_el1();
loc = AArch64_clidr_get_level_of_coherency(clidr);
- if (level >= loc) {
+ if (level > loc) {
return 0;
}
- if (level == 0) {
- level = loc - 1;
- }
-
- ccsidr = AArch64_get_ccsidr_for_level(
- AARCH64_CSSELR_EL1_LEVEL(level) | (instruction ? AARCH64_CSSELR_EL1_IND : 0)
- );
+ ccsidr = AArch64_get_ccsidr_for_level(level, instruction);
return (1U << (AArch64_ccsidr_get_line_power(ccsidr)+4))
* AArch64_ccsidr_get_associativity(ccsidr)
diff --git a/bsps/aarch64/shared/mmu/vmsav8-64.c b/bsps/aarch64/shared/mmu/vmsav8-64.c
index 9caa91c414..c426dec900 100644
--- a/bsps/aarch64/shared/mmu/vmsav8-64.c
+++ b/bsps/aarch64/shared/mmu/vmsav8-64.c
@@ -47,14 +47,25 @@ rtems_status_code aarch64_mmu_map(
)
{
rtems_status_code sc;
+ ISR_Level level;
+ uint64_t max_mappable = 1LLU << aarch64_mmu_get_cpu_pa_bits();
+
+ if ( addr >= max_mappable || (addr + size) > max_mappable ) {
+ return RTEMS_INVALID_ADDRESS;
+ }
+
+ /*
+ * Disable interrupts so they don't run while the MMU tables are being
+ * modified.
+ */
+ _ISR_Local_disable( level );
- aarch64_mmu_disable();
sc = aarch64_mmu_map_block(
(uint64_t *) bsp_translation_table_base,
0x0,
addr,
size,
- 0,
+ -1,
flags
);
_AARCH64_Data_synchronization_barrier();
@@ -63,7 +74,8 @@ rtems_status_code aarch64_mmu_map(
);
_AARCH64_Data_synchronization_barrier();
_AARCH64_Instruction_synchronization_barrier();
- aarch64_mmu_enable();
+
+ _ISR_Local_enable( level );
return sc;
}
diff --git a/bsps/aarch64/shared/start/linkcmds.base b/bsps/aarch64/shared/start/linkcmds.base
index d3b5485777..d442dbea28 100644
--- a/bsps/aarch64/shared/start/linkcmds.base
+++ b/bsps/aarch64/shared/start/linkcmds.base
@@ -56,7 +56,7 @@ bsp_stack_hyp_size = DEFINED (bsp_stack_hyp_size) ? bsp_stack_hyp_size : 0;
bsp_stack_hyp_size = ALIGN (bsp_stack_hyp_size, bsp_stack_align);
MEMORY {
- UNEXPECTED_SECTIONS : ORIGIN = 0xffffffff, LENGTH = 0
+ UNEXPECTED_SECTIONS : ORIGIN = 0xffffffffffffffff, LENGTH = 0
}
SECTIONS {
@@ -151,7 +151,7 @@ SECTIONS {
} > REGION_RODATA AT > REGION_RODATA_LOAD
.data.rel.ro : ALIGN_WITH_INPUT {
*(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*)
- *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*)
+ *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*)
} > REGION_RODATA AT > REGION_RODATA_LOAD
.jcr : ALIGN_WITH_INPUT {
KEEP (*(.jcr))
@@ -323,7 +323,7 @@ SECTIONS {
.noinit (NOLOAD) : ALIGN_WITH_INPUT {
bsp_section_noinit_begin = .;
- *(.noinit*)
+ *(SORT_BY_NAME (SORT_BY_ALIGNMENT (.noinit*)))
bsp_section_noinit_end = .;
} > REGION_WORK AT > REGION_WORK
bsp_section_noinit_size = bsp_section_noinit_end - bsp_section_noinit_begin;
diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S
index 8bd4f86f4e..0237583463 100644
--- a/bsps/aarch64/shared/start/start.S
+++ b/bsps/aarch64/shared/start/start.S
@@ -307,6 +307,12 @@ _el1_start:
/* FPU does not need to be enabled on AArch64 */
+ /* Ensure FPU traps are disabled by default */
+ mrs x0, FPCR
+ bic x0, x0, #((1 << 8) | (1 << 9) | (1 << 10) | (1 << 11) | (1 << 12))
+ bic x0, x0, #(1 << 15)
+ msr FPCR, x0
+
#endif /* AARCH64_MULTILIB_VFP */
/* Branch to start hook 1 */