From db68ea1b9b3b2826cb720b9a4a3cbdbd3f45acf9 Mon Sep 17 00:00:00 2001 From: Kinsey Moore Date: Tue, 22 Sep 2020 08:32:56 -0500 Subject: bsps: Add Cortex-A53 LP64 basic BSP This adds an AArch64 basic BSP based on Qemu's Cortex-A53 emulation with interrupt support using GICv3 and clock support using the ARM GPT. --- bsps/aarch64/shared/start/start.S | 219 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 bsps/aarch64/shared/start/start.S (limited to 'bsps/aarch64/shared/start/start.S') diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S new file mode 100644 index 0000000000..f60e840137 --- /dev/null +++ b/bsps/aarch64/shared/start/start.S @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSBSPsAArch64Shared + * + * @brief Boot and system start code. + */ + +/* + * Copyright (C) 2020 On-Line Applications Research Corporation (OAR) + * Written by Kinsey Moore + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include + + /* Global symbols */ + .globl _start + .section ".bsp_start_text", "ax" + +/* Start entry */ + +_start: + + /* + * We do not save the context since we do not return to the boot + * loader but preserve x1 and x2 to allow access to bootloader parameters + */ +#ifndef BSP_START_NEEDS_REGISTER_INITIALIZATION + mov x5, x1 /* machine type number or ~0 for DT boot */ + mov x6, x2 /* physical address of ATAGs or DTB */ +#else /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ + mov x0, XZR + mov x1, XZR + mov x2, XZR + mov x3, XZR + mov x4, XZR + mov x5, XZR + mov x6, XZR + mov x7, XZR + mov x8, XZR + mov x9, XZR + mov x10, XZR + mov x11, XZR + mov x12, XZR + mov x13, XZR + mov x14, XZR + mov x15, XZR + mov x16, XZR + mov x17, XZR + mov x18, XZR + mov x19, XZR + mov x20, XZR + mov x21, XZR + mov x22, XZR + mov x23, XZR + mov x24, XZR + mov x25, XZR + mov x26, XZR + mov x27, XZR + mov x28, XZR + mov x29, XZR + mov x30, XZR +#ifdef AARCH64_MULTILIB_VFP +#endif +#endif + +#ifdef RTEMS_SMP + /* Read MPIDR and get current processor index */ + mrs x7, mpidr_el1 + and x7, #0xff +#endif + +#ifdef RTEMS_SMP + /* + * Get current per-CPU control and store it in PL1 only Thread ID + * Register (TPIDRPRW). + */ + ldr x1, =_Per_CPU_Information + add x1, x1, x7, asl #PER_CPU_CONTROL_SIZE_LOG2 + mcr p15, 0, x1, c13, c0, 4 + +#endif + + /* Calculate interrupt stack area end for current processor */ + ldr x1, =_ISR_Stack_size +#ifdef RTEMS_SMP + add x3, x7, #1 + mul x1, x1, x3 +#endif + ldr x2, =_ISR_Stack_area_begin + add x3, x1, x2 + + /* Save original DAIF value */ + mrs x4, DAIF + +#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION + mov x8, XZR + mov x9, XZR + mov x10, XZR + mov x11, XZR + mov x12, XZR + mov x13, XZR + mov x14, XZR + mov x15, XZR +#endif + + /* + * SPx: the stack pointer corresponding to the current exception level + * Normal operation for RTEMS on AArch64 uses SPx and runs on EL1 + * Exception operation (synchronous errors, IRQ, FIQ, System Errors) uses SP0 + */ + ldr x1, =bsp_stack_exception_size + /* Switch to SP0 and set exception stack */ + msr spsel, #0 + mov sp, x3 + /* Switch back to SPx for normal operation */ + msr spsel, #1 + sub x3, x3, x1 + + /* Set SP1 stack used for normal operation */ + mov sp, x3 + + /* Stay in EL1 mode */ + +#ifdef AARCH64_MULTILIB_VFP +#ifdef AARCH64_MULTILIB_HAS_CPACR + /* Read CPACR */ + mrs x0, CPACR_EL1 + + /* Enable EL1 access permissions for CP10 */ + orr x0, x0, #(1 << 20) + + /* Write CPACR */ + msr CPACR_EL1, x0 + isb +#endif + + /* FPU does not need to be enabled on AArch64 */ + +#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION + mov x0, #0 + mov CPTR_EL3, XZR + mov CPTR_EL2, XZR + mov d0, XZR + mov d1, XZR + mov d2, XZR + mov d3, XZR + mov d4, XZR + mov d5, XZR + mov d6, XZR + mov d7, XZR + mov d8, XZR + mov d9, XZR + mov d10, XZR + mov d11, XZR + mov d12, XZR + mov d13, XZR + mov d14, XZR + mov d15, XZR + mov d16, XZR + mov d17, XZR + mov d18, XZR + mov d19, XZR + mov d20, XZR + mov d21, XZR + mov d22, XZR + mov d23, XZR + mov d24, XZR + mov d25, XZR + mov d26, XZR + mov d27, XZR + mov d28, XZR + mov d29, XZR + mov d30, XZR + mov d31, XZR +#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */ + +#endif /* AARCH64_MULTILIB_VFP */ + + /* + * Invoke the start hook 0. + * + */ + + mov x1, x5 /* machine type number or ~0 for DT boot */ + bl bsp_start_hook_0 + + /* Branch to start hook 1 */ + bl bsp_start_hook_1 + + /* Branch to boot card */ + mov x0, #0 + bl boot_card -- cgit v1.2.3