diff options
Diffstat (limited to 'bsps/aarch64/shared/start')
-rw-r--r-- | bsps/aarch64/shared/start/aarch64-smp.c | 85 | ||||
-rw-r--r-- | bsps/aarch64/shared/start/linkcmds.base | 11 | ||||
-rw-r--r-- | bsps/aarch64/shared/start/start.S | 143 |
3 files changed, 173 insertions, 66 deletions
diff --git a/bsps/aarch64/shared/start/aarch64-smp.c b/bsps/aarch64/shared/start/aarch64-smp.c new file mode 100644 index 0000000000..5ec7babce7 --- /dev/null +++ b/bsps/aarch64/shared/start/aarch64-smp.c @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSBSPsAArch64Shared + * + * @brief SMP startup and interop code. + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * Written by Kinsey Moore <kinsey.moore@oarcorp.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rtems/score/smpimpl.h> + +#include <bsp/irq.h> + +static void bsp_inter_processor_interrupt( void *arg ) +{ + _SMP_Inter_processor_interrupt_handler( _Per_CPU_Get() ); +} + +uint32_t _CPU_SMP_Initialize( void ) +{ + return arm_gic_irq_processor_count(); +} + +void _CPU_SMP_Finalize_initialization( uint32_t cpu_count ) +{ + if ( cpu_count > 0 ) { + rtems_status_code sc; + + sc = rtems_interrupt_handler_install( + ARM_GIC_IRQ_SGI_0, + "IPI", + RTEMS_INTERRUPT_UNIQUE, + bsp_inter_processor_interrupt, + NULL + ); + _Assert( sc == RTEMS_SUCCESSFUL ); + (void) sc; + +#if defined( BSP_DATA_CACHE_ENABLED ) || \ + defined( BSP_INSTRUCTION_CACHE_ENABLED ) + /* Enable unified L2 cache */ + rtems_cache_enable_data(); +#endif + } +} + +void _CPU_SMP_Prepare_start_multitasking( void ) +{ + /* Do nothing */ +} + +void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ) +{ + arm_gic_irq_generate_software_irq( + ARM_GIC_IRQ_SGI_0, + 1U << target_processor_index + ); +} diff --git a/bsps/aarch64/shared/start/linkcmds.base b/bsps/aarch64/shared/start/linkcmds.base index a560b1016e..d442dbea28 100644 --- a/bsps/aarch64/shared/start/linkcmds.base +++ b/bsps/aarch64/shared/start/linkcmds.base @@ -56,7 +56,7 @@ bsp_stack_hyp_size = DEFINED (bsp_stack_hyp_size) ? bsp_stack_hyp_size : 0; bsp_stack_hyp_size = ALIGN (bsp_stack_hyp_size, bsp_stack_align); MEMORY { - UNEXPECTED_SECTIONS : ORIGIN = 0xffffffff, LENGTH = 0 + UNEXPECTED_SECTIONS : ORIGIN = 0xffffffffffffffff, LENGTH = 0 } SECTIONS { @@ -151,7 +151,7 @@ SECTIONS { } > REGION_RODATA AT > REGION_RODATA_LOAD .data.rel.ro : ALIGN_WITH_INPUT { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) - *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) + *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } > REGION_RODATA AT > REGION_RODATA_LOAD .jcr : ALIGN_WITH_INPUT { KEEP (*(.jcr)) @@ -321,6 +321,13 @@ SECTIONS { } > REGION_WORK AT > REGION_WORK bsp_section_rtemsstack_size = bsp_section_rtemsstack_end - bsp_section_rtemsstack_begin; + .noinit (NOLOAD) : ALIGN_WITH_INPUT { + bsp_section_noinit_begin = .; + *(SORT_BY_NAME (SORT_BY_ALIGNMENT (.noinit*))) + bsp_section_noinit_end = .; + } > REGION_WORK AT > REGION_WORK + bsp_section_noinit_size = bsp_section_noinit_end - bsp_section_noinit_begin; + .work : ALIGN_WITH_INPUT { /* * The work section will occupy the remaining REGION_WORK region and diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S index f4c39dacdf..0237583463 100644 --- a/bsps/aarch64/shared/start/start.S +++ b/bsps/aarch64/shared/start/start.S @@ -55,6 +55,11 @@ _start: mov x5, x1 /* machine type number or ~0 for DT boot */ mov x6, x2 /* physical address of ATAGs or DTB */ #else /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ + /* + * This block is dead code. No aarch64 targets require this. It might be + * needed for hardware simulations or in future processor variants with + * lock-step cores. + */ mov x0, XZR mov x1, XZR mov x2, XZR @@ -87,8 +92,42 @@ _start: mov x29, XZR mov x30, XZR #ifdef AARCH64_MULTILIB_VFP -#endif -#endif + mov CPTR_EL3, XZR + mov CPTR_EL2, XZR + mov d0, XZR + mov d1, XZR + mov d2, XZR + mov d3, XZR + mov d4, XZR + mov d5, XZR + mov d6, XZR + mov d7, XZR + mov d8, XZR + mov d9, XZR + mov d10, XZR + mov d11, XZR + mov d12, XZR + mov d13, XZR + mov d14, XZR + mov d15, XZR + mov d16, XZR + mov d17, XZR + mov d18, XZR + mov d19, XZR + mov d20, XZR + mov d21, XZR + mov d22, XZR + mov d23, XZR + mov d24, XZR + mov d25, XZR + mov d26, XZR + mov d27, XZR + mov d28, XZR + mov d29, XZR + mov d30, XZR + mov d31, XZR +#endif /* AARCH64_MULTILIB_VFP */ +#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ /* Initialize SCTLR_EL1 */ mov x0, XZR @@ -105,6 +144,33 @@ _start: b.eq _el2_start _el3_start: + /* + * Before leaving the Secure World, we need to initialize the GIC. We + * do that here in an early stack context in EL3. This will NOT work + * on secondary core boot! We assume only the primary boot core will + * start in EL3 if any. Usually on real hardware, we should be running + * on top of trusted firmware and will not boot in EL3. Qemu fakes it + * for us and will start the primary core in EL3 and secondary cores + * will be brought up in EL1NS as expected. + */ + #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + ldr w1, =_ISR_Stack_size + ldr w2, =_ISR_Stack_area_begin + #else + ldr x1, =_ISR_Stack_size + ldr x2, =_ISR_Stack_area_begin + #endif + add x3, x1, x2 + /* using SP0 for the early init stack context at EL3 */ + msr spsel, #0 + mov sp, x3 + + /* + * Invoke the start hook 0. + * We don't set up exception handling, so this hook better behave. + */ + bl bsp_start_hook_0 + /* Drop from EL3 to EL2 */ /* Initialize HCR_EL2 and SCTLR_EL2 */ @@ -114,27 +180,16 @@ _el3_start: mrs x0, SCR_EL3 /* Set EL2 to AArch64 */ orr x0, x0, #(1<<10) -#ifdef AARCH64_IS_NONSECURE /* Set EL1 to NS */ orr x0, x0, #1 -#endif msr SCR_EL3, x0 /* set EL2h mode for eret */ -#ifdef AARCH64_IS_NONSECURE mov x0, #0b01001 -#else - mov x0, #0b00101 -#endif - msr SPSR_EL3, x0 /* Set EL2 entry point */ -#ifdef AARCH64_IS_NONSECURE adr x0, _el2_start -#else - adr x0, _el1_start -#endif msr ELR_EL3, x0 eret @@ -166,21 +221,21 @@ _el1_start: #ifdef RTEMS_SMP /* Read MPIDR and get current processor index */ mrs x7, mpidr_el1 - and x7, #0xff + and x7, x7, #0xff #endif #ifdef RTEMS_SMP /* * Get current per-CPU control and store it in PL1 only Thread ID - * Register (TPIDRPRW). + * Register (TPIDR_EL1). */ #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 ldr w1, =_Per_CPU_Information #else ldr x1, =_Per_CPU_Information #endif - add x1, x1, x7, asl #PER_CPU_CONTROL_SIZE_LOG2 - mcr p15, 0, x1, c13, c0, 4 + add x1, x1, x7, lsl #PER_CPU_CONTROL_SIZE_LOG2 + msr TPIDR_EL1, x1 #endif @@ -201,8 +256,8 @@ _el1_start: #endif add x3, x1, x2 - /* Save original DAIF value */ - mrs x4, DAIF + /* Disable interrupts and debug */ + msr DAIFSet, #0xa #ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION mov x8, XZR @@ -252,54 +307,14 @@ _el1_start: /* FPU does not need to be enabled on AArch64 */ -#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION - mov x0, #0 - mov CPTR_EL3, XZR - mov CPTR_EL2, XZR - mov d0, XZR - mov d1, XZR - mov d2, XZR - mov d3, XZR - mov d4, XZR - mov d5, XZR - mov d6, XZR - mov d7, XZR - mov d8, XZR - mov d9, XZR - mov d10, XZR - mov d11, XZR - mov d12, XZR - mov d13, XZR - mov d14, XZR - mov d15, XZR - mov d16, XZR - mov d17, XZR - mov d18, XZR - mov d19, XZR - mov d20, XZR - mov d21, XZR - mov d22, XZR - mov d23, XZR - mov d24, XZR - mov d25, XZR - mov d26, XZR - mov d27, XZR - mov d28, XZR - mov d29, XZR - mov d30, XZR - mov d31, XZR -#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ + /* Ensure FPU traps are disabled by default */ + mrs x0, FPCR + bic x0, x0, #((1 << 8) | (1 << 9) | (1 << 10) | (1 << 11) | (1 << 12)) + bic x0, x0, #(1 << 15) + msr FPCR, x0 #endif /* AARCH64_MULTILIB_VFP */ - /* - * Invoke the start hook 0. - * - */ - - mov x1, x5 /* machine type number or ~0 for DT boot */ - bl bsp_start_hook_0 - /* Branch to start hook 1 */ bl bsp_start_hook_1 |