diff options
Diffstat (limited to 'bsps/aarch64/shared')
-rw-r--r-- | bsps/aarch64/shared/cache/cache.c | 64 | ||||
-rw-r--r-- | bsps/aarch64/shared/mmu/vmsav8-64-nommu.c | 49 | ||||
-rw-r--r-- | bsps/aarch64/shared/mmu/vmsav8-64.c | 81 | ||||
-rw-r--r-- | bsps/aarch64/shared/start/linkcmds.base | 11 | ||||
-rw-r--r-- | bsps/aarch64/shared/start/start.S | 135 |
5 files changed, 232 insertions, 108 deletions
diff --git a/bsps/aarch64/shared/cache/cache.c b/bsps/aarch64/shared/cache/cache.c index 9e7446a077..be459d5083 100644 --- a/bsps/aarch64/shared/cache/cache.c +++ b/bsps/aarch64/shared/cache/cache.c @@ -36,7 +36,6 @@ #include <rtems.h> #include <bsp.h> -#include <bsp/utility.h> #include <rtems/score/aarch64-system-registers.h> #define CPU_DATA_CACHE_ALIGNMENT 64 @@ -47,8 +46,6 @@ #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS -#define CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA - #define AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT ( (size_t) 64 ) #define AARCH64_CACHE_PREPARE_MVA(mva) (const void *) \ RTEMS_ALIGN_DOWN ( (size_t) mva, AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT ) @@ -66,15 +63,6 @@ void AArch64_data_cache_clean_and_invalidate_line(const void *d_addr) ); } -static inline void _CPU_cache_flush_1_data_line(const void *d_addr) -{ - /* Flush the Data cache */ - AArch64_data_cache_clean_and_invalidate_line( d_addr ); - - /* Wait for L1 flush to complete */ - _AARCH64_Data_synchronization_barrier(); -} - static inline void _CPU_cache_flush_data_range( const void *d_addr, @@ -108,15 +96,6 @@ static inline void AArch64_data_cache_invalidate_line(const void *d_addr) ); } -static inline void _CPU_cache_invalidate_1_data_line(const void *d_addr) -{ - /* Invalidate the data cache line */ - AArch64_data_cache_invalidate_line( d_addr ); - - /* Wait for L1 invalidate to complete */ - _AARCH64_Data_synchronization_barrier(); -} - static inline void _CPU_cache_invalidate_data_range( const void *d_addr, @@ -155,15 +134,6 @@ static inline void AArch64_instruction_cache_invalidate_line(const void *i_addr) __builtin___clear_cache((void *)i_addr, ((char *)i_addr) + sizeof(void*) - 1); } -static inline void _CPU_cache_invalidate_1_instruction_line(const void *d_addr) -{ - /* Invalidate the Instruction cache line */ - AArch64_instruction_cache_invalidate_line( d_addr ); - - /* Wait for L1 invalidate to complete */ - _AARCH64_Data_synchronization_barrier(); -} - static inline void _CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes) { @@ -183,9 +153,15 @@ static inline void _CPU_cache_unfreeze_instruction(void) /* TODO */ } -static inline uint64_t AArch64_get_ccsidr_for_level(uint64_t val) +static inline uint64_t AArch64_get_ccsidr_for_level( + uint64_t level, bool instruction +) { - _AArch64_Write_csselr_el1(val); + uint64_t csselr = AARCH64_CSSELR_EL1_LEVEL(level - 1); + + csselr |= instruction ? AARCH64_CSSELR_EL1_IND : 0; + + _AArch64_Write_csselr_el1(csselr); return _AArch64_Read_ccsidr_el1(); } @@ -216,7 +192,7 @@ static inline void AArch64_data_cache_clean_level(uint64_t level) uint64_t way; uint64_t way_shift; - ccsidr = AArch64_get_ccsidr_for_level(AARCH64_CSSELR_EL1_LEVEL(level)); + ccsidr = AArch64_get_ccsidr_for_level(level, false); line_power = AArch64_ccsidr_get_line_power(ccsidr); associativity = AArch64_ccsidr_get_associativity(ccsidr); @@ -229,7 +205,7 @@ static inline void AArch64_data_cache_clean_level(uint64_t level) for (set = 0; set < num_sets; ++set) { uint64_t set_and_way = (way << way_shift) | (set << line_power) - | (level << 1); + | ((level - 1) << 1); __asm__ volatile ( "dc csw, %[set_and_way]" @@ -276,7 +252,7 @@ static inline void AArch64_data_cache_clean_all_levels(void) uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr); uint64_t level = 0; - for (level = 0; level < loc; ++level) { + for (level = 1; level <= loc; ++level) { uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level); /* Check if this level has a data cache or unified cache */ @@ -301,7 +277,7 @@ static inline void AArch64_cache_invalidate_level(uint64_t level) uint64_t way; uint64_t way_shift; - ccsidr = AArch64_get_ccsidr_for_level(AARCH64_CSSELR_EL1_LEVEL(level)); + ccsidr = AArch64_get_ccsidr_for_level(level, false); line_power = AArch64_ccsidr_get_line_power(ccsidr); associativity = AArch64_ccsidr_get_associativity(ccsidr); @@ -314,7 +290,7 @@ static inline void AArch64_cache_invalidate_level(uint64_t level) for (set = 0; set < num_sets; ++set) { uint64_t set_and_way = (way << way_shift) | (set << line_power) - | (level << 1); + | ((level - 1) << 1); __asm__ volatile ( "dc isw, %[set_and_way]" @@ -332,7 +308,7 @@ static inline void AArch64_data_cache_invalidate_all_levels(void) uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr); uint64_t level = 0; - for (level = 0; level < loc; ++level) { + for (level = 1; level <= loc; ++level) { uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level); /* Check if this level has a data cache or unified cache */ @@ -373,6 +349,7 @@ static inline void _CPU_cache_disable_data(void) rtems_interrupt_local_enable(level); } +#ifdef RTEMS_SMP static inline void AArch64_instruction_cache_inner_shareable_invalidate_all(void) { @@ -383,6 +360,7 @@ void AArch64_instruction_cache_inner_shareable_invalidate_all(void) : "memory" ); } +#endif /* RTEMS_SMP */ static inline void AArch64_instruction_cache_invalidate(void) { @@ -446,17 +424,11 @@ static inline size_t AArch64_get_cache_size( clidr = _AArch64_Read_clidr_el1(); loc = AArch64_clidr_get_level_of_coherency(clidr); - if (level >= loc) { + if (level > loc) { return 0; } - if (level == 0) { - level = loc - 1; - } - - ccsidr = AArch64_get_ccsidr_for_level( - AARCH64_CSSELR_EL1_LEVEL(level) | (instruction ? AARCH64_CSSELR_EL1_IND : 0) - ); + ccsidr = AArch64_get_ccsidr_for_level(level, instruction); return (1U << (AArch64_ccsidr_get_line_power(ccsidr)+4)) * AArch64_ccsidr_get_associativity(ccsidr) diff --git a/bsps/aarch64/shared/mmu/vmsav8-64-nommu.c b/bsps/aarch64/shared/mmu/vmsav8-64-nommu.c new file mode 100644 index 0000000000..2c793fa239 --- /dev/null +++ b/bsps/aarch64/shared/mmu/vmsav8-64-nommu.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSBSPsAArch64Shared + * + * @brief AArch64 MMU dummy implementation. + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * Written by Kinsey Moore <kinsey.moore@oarcorp.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <libcpu/mmu-vmsav8-64.h> + +/* + * This must have a non-header implementation because it is used by libdebugger. + */ +rtems_status_code aarch64_mmu_map( + uintptr_t addr, + uint64_t size, + uint64_t flags +) +{ + return RTEMS_SUCCESSFUL; +} diff --git a/bsps/aarch64/shared/mmu/vmsav8-64.c b/bsps/aarch64/shared/mmu/vmsav8-64.c new file mode 100644 index 0000000000..c426dec900 --- /dev/null +++ b/bsps/aarch64/shared/mmu/vmsav8-64.c @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSBSPsAArch64Shared + * + * @brief AArch64 MMU implementation. + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * Written by Kinsey Moore <kinsey.moore@oarcorp.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <bsp/aarch64-mmu.h> +#include <rtems/score/cpu.h> + +/* + * This must have a non-header implementation because it is used by libdebugger. + */ +rtems_status_code aarch64_mmu_map( + uintptr_t addr, + uint64_t size, + uint64_t flags +) +{ + rtems_status_code sc; + ISR_Level level; + uint64_t max_mappable = 1LLU << aarch64_mmu_get_cpu_pa_bits(); + + if ( addr >= max_mappable || (addr + size) > max_mappable ) { + return RTEMS_INVALID_ADDRESS; + } + + /* + * Disable interrupts so they don't run while the MMU tables are being + * modified. + */ + _ISR_Local_disable( level ); + + sc = aarch64_mmu_map_block( + (uint64_t *) bsp_translation_table_base, + 0x0, + addr, + size, + -1, + flags + ); + _AARCH64_Data_synchronization_barrier(); + __asm__ volatile( + "tlbi vmalle1\n" + ); + _AARCH64_Data_synchronization_barrier(); + _AARCH64_Instruction_synchronization_barrier(); + + _ISR_Local_enable( level ); + + return sc; +} diff --git a/bsps/aarch64/shared/start/linkcmds.base b/bsps/aarch64/shared/start/linkcmds.base index a560b1016e..d442dbea28 100644 --- a/bsps/aarch64/shared/start/linkcmds.base +++ b/bsps/aarch64/shared/start/linkcmds.base @@ -56,7 +56,7 @@ bsp_stack_hyp_size = DEFINED (bsp_stack_hyp_size) ? bsp_stack_hyp_size : 0; bsp_stack_hyp_size = ALIGN (bsp_stack_hyp_size, bsp_stack_align); MEMORY { - UNEXPECTED_SECTIONS : ORIGIN = 0xffffffff, LENGTH = 0 + UNEXPECTED_SECTIONS : ORIGIN = 0xffffffffffffffff, LENGTH = 0 } SECTIONS { @@ -151,7 +151,7 @@ SECTIONS { } > REGION_RODATA AT > REGION_RODATA_LOAD .data.rel.ro : ALIGN_WITH_INPUT { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) - *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) + *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } > REGION_RODATA AT > REGION_RODATA_LOAD .jcr : ALIGN_WITH_INPUT { KEEP (*(.jcr)) @@ -321,6 +321,13 @@ SECTIONS { } > REGION_WORK AT > REGION_WORK bsp_section_rtemsstack_size = bsp_section_rtemsstack_end - bsp_section_rtemsstack_begin; + .noinit (NOLOAD) : ALIGN_WITH_INPUT { + bsp_section_noinit_begin = .; + *(SORT_BY_NAME (SORT_BY_ALIGNMENT (.noinit*))) + bsp_section_noinit_end = .; + } > REGION_WORK AT > REGION_WORK + bsp_section_noinit_size = bsp_section_noinit_end - bsp_section_noinit_begin; + .work : ALIGN_WITH_INPUT { /* * The work section will occupy the remaining REGION_WORK region and diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S index bc6a855217..0237583463 100644 --- a/bsps/aarch64/shared/start/start.S +++ b/bsps/aarch64/shared/start/start.S @@ -55,6 +55,11 @@ _start: mov x5, x1 /* machine type number or ~0 for DT boot */ mov x6, x2 /* physical address of ATAGs or DTB */ #else /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ + /* + * This block is dead code. No aarch64 targets require this. It might be + * needed for hardware simulations or in future processor variants with + * lock-step cores. + */ mov x0, XZR mov x1, XZR mov x2, XZR @@ -87,8 +92,42 @@ _start: mov x29, XZR mov x30, XZR #ifdef AARCH64_MULTILIB_VFP -#endif -#endif + mov CPTR_EL3, XZR + mov CPTR_EL2, XZR + mov d0, XZR + mov d1, XZR + mov d2, XZR + mov d3, XZR + mov d4, XZR + mov d5, XZR + mov d6, XZR + mov d7, XZR + mov d8, XZR + mov d9, XZR + mov d10, XZR + mov d11, XZR + mov d12, XZR + mov d13, XZR + mov d14, XZR + mov d15, XZR + mov d16, XZR + mov d17, XZR + mov d18, XZR + mov d19, XZR + mov d20, XZR + mov d21, XZR + mov d22, XZR + mov d23, XZR + mov d24, XZR + mov d25, XZR + mov d26, XZR + mov d27, XZR + mov d28, XZR + mov d29, XZR + mov d30, XZR + mov d31, XZR +#endif /* AARCH64_MULTILIB_VFP */ +#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ /* Initialize SCTLR_EL1 */ mov x0, XZR @@ -105,6 +144,33 @@ _start: b.eq _el2_start _el3_start: + /* + * Before leaving the Secure World, we need to initialize the GIC. We + * do that here in an early stack context in EL3. This will NOT work + * on secondary core boot! We assume only the primary boot core will + * start in EL3 if any. Usually on real hardware, we should be running + * on top of trusted firmware and will not boot in EL3. Qemu fakes it + * for us and will start the primary core in EL3 and secondary cores + * will be brought up in EL1NS as expected. + */ + #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + ldr w1, =_ISR_Stack_size + ldr w2, =_ISR_Stack_area_begin + #else + ldr x1, =_ISR_Stack_size + ldr x2, =_ISR_Stack_area_begin + #endif + add x3, x1, x2 + /* using SP0 for the early init stack context at EL3 */ + msr spsel, #0 + mov sp, x3 + + /* + * Invoke the start hook 0. + * We don't set up exception handling, so this hook better behave. + */ + bl bsp_start_hook_0 + /* Drop from EL3 to EL2 */ /* Initialize HCR_EL2 and SCTLR_EL2 */ @@ -114,27 +180,16 @@ _el3_start: mrs x0, SCR_EL3 /* Set EL2 to AArch64 */ orr x0, x0, #(1<<10) -#ifdef AARCH64_IS_NONSECURE /* Set EL1 to NS */ orr x0, x0, #1 -#endif msr SCR_EL3, x0 /* set EL2h mode for eret */ -#ifdef AARCH64_IS_NONSECURE mov x0, #0b01001 -#else - mov x0, #0b00101 -#endif - msr SPSR_EL3, x0 /* Set EL2 entry point */ -#ifdef AARCH64_IS_NONSECURE adr x0, _el2_start -#else - adr x0, _el1_start -#endif msr ELR_EL3, x0 eret @@ -201,8 +256,8 @@ _el1_start: #endif add x3, x1, x2 - /* Disable interrupts */ - msr DAIFSet, #0x2 + /* Disable interrupts and debug */ + msr DAIFSet, #0xa #ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION mov x8, XZR @@ -252,54 +307,14 @@ _el1_start: /* FPU does not need to be enabled on AArch64 */ -#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION - mov x0, #0 - mov CPTR_EL3, XZR - mov CPTR_EL2, XZR - mov d0, XZR - mov d1, XZR - mov d2, XZR - mov d3, XZR - mov d4, XZR - mov d5, XZR - mov d6, XZR - mov d7, XZR - mov d8, XZR - mov d9, XZR - mov d10, XZR - mov d11, XZR - mov d12, XZR - mov d13, XZR - mov d14, XZR - mov d15, XZR - mov d16, XZR - mov d17, XZR - mov d18, XZR - mov d19, XZR - mov d20, XZR - mov d21, XZR - mov d22, XZR - mov d23, XZR - mov d24, XZR - mov d25, XZR - mov d26, XZR - mov d27, XZR - mov d28, XZR - mov d29, XZR - mov d30, XZR - mov d31, XZR -#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ + /* Ensure FPU traps are disabled by default */ + mrs x0, FPCR + bic x0, x0, #((1 << 8) | (1 << 9) | (1 << 10) | (1 << 11) | (1 << 12)) + bic x0, x0, #(1 << 15) + msr FPCR, x0 #endif /* AARCH64_MULTILIB_VFP */ - /* - * Invoke the start hook 0. - * - */ - - mov x1, x5 /* machine type number or ~0 for DT boot */ - bl bsp_start_hook_0 - /* Branch to start hook 1 */ bl bsp_start_hook_1 |