summaryrefslogtreecommitdiffstats
path: root/bsps/aarch64/shared
diff options
context:
space:
mode:
authorKinsey Moore <kinsey.moore@oarcorp.com>2020-09-22 08:32:56 -0500
committerJoel Sherrill <joel@rtems.org>2020-10-05 16:11:40 -0500
commitdb68ea1b9b3b2826cb720b9a4a3cbdbd3f45acf9 (patch)
tree809d5783fbc0a09691c1167b203232707ac6c219 /bsps/aarch64/shared
parentscore: Add AArch64 port (diff)
downloadrtems-db68ea1b9b3b2826cb720b9a4a3cbdbd3f45acf9.tar.bz2
bsps: Add Cortex-A53 LP64 basic BSP
This adds an AArch64 basic BSP based on Qemu's Cortex-A53 emulation with interrupt support using GICv3 and clock support using the ARM GPT.
Diffstat (limited to 'bsps/aarch64/shared')
-rw-r--r--bsps/aarch64/shared/cache/cache.c616
-rw-r--r--bsps/aarch64/shared/clock/arm-generic-timer-aarch64.c108
-rw-r--r--bsps/aarch64/shared/irq/irq-arm-gicv3-aarch64.c64
-rw-r--r--bsps/aarch64/shared/start/linkcmds.base425
-rw-r--r--bsps/aarch64/shared/start/start.S219
5 files changed, 1432 insertions, 0 deletions
diff --git a/bsps/aarch64/shared/cache/cache.c b/bsps/aarch64/shared/cache/cache.c
new file mode 100644
index 0000000000..d7ea33206f
--- /dev/null
+++ b/bsps/aarch64/shared/cache/cache.c
@@ -0,0 +1,616 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief AArch64 cache defines and implementation.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems.h>
+#include <bsp.h>
+#include <bsp/utility.h>
+
+#define AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT ((size_t)64)
+#define AARCH64_CACHE_L1_DATA_LINE_MASK \
+ ( AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT - 1 )
+#define AARCH64_CACHE_PREPARE_MVA(mva) \
+ ((const void *) (((size_t) (mva)) & AARCH64_CACHE_L1_DATA_LINE_MASK))
+
+static inline
+void AArch64_data_cache_clean_and_invalidate_line(const void *d_addr)
+{
+ d_addr = AARCH64_CACHE_PREPARE_MVA(d_addr);
+
+ __asm__ volatile (
+ "dc civac, %[d_addr]"
+ :
+ : [d_addr] "r" (d_addr)
+ : "memory"
+ );
+}
+
+static inline void _CPU_cache_flush_1_data_line(const void *d_addr)
+{
+ /* Flush the Data cache */
+ AArch64_data_cache_clean_and_invalidate_line( d_addr );
+
+ /* Wait for L1 flush to complete */
+ _AARCH64_Data_synchronization_barrier();
+}
+
+static inline void
+_CPU_cache_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ _AARCH64_Data_synchronization_barrier();
+ if ( n_bytes != 0 ) {
+ size_t adx = (size_t) d_addr & ~AARCH64_CACHE_L1_DATA_LINE_MASK;
+ const size_t ADDR_LAST = (size_t) d_addr + n_bytes - 1;
+
+ for (; adx <= ADDR_LAST; adx += AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT ) {
+ /* Store and invalidate the Data cache line */
+ AArch64_data_cache_clean_and_invalidate_line( (void*)adx );
+ }
+ /* Wait for L1 store to complete */
+ _AARCH64_Data_synchronization_barrier();
+ }
+ _AARCH64_Data_synchronization_barrier();
+}
+
+static inline void AArch64_data_cache_invalidate_line(const void *d_addr)
+{
+ d_addr = AARCH64_CACHE_PREPARE_MVA(d_addr);
+
+ __asm__ volatile (
+ "dc ivac, %[d_addr]"
+ :
+ : [d_addr] "r" (d_addr)
+ : "memory"
+ );
+}
+
+static inline void _CPU_cache_invalidate_1_data_line(const void *d_addr)
+{
+ /* Invalidate the data cache line */
+ AArch64_data_cache_invalidate_line( d_addr );
+
+ /* Wait for L1 invalidate to complete */
+ _AARCH64_Data_synchronization_barrier();
+}
+
+static inline void
+_CPU_cache_invalidate_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ if ( n_bytes != 0 ) {
+ size_t adx = (size_t) d_addr
+ & ~AARCH64_CACHE_L1_DATA_LINE_MASK;
+ const size_t end = (size_t)d_addr + n_bytes -1;
+
+ /* Back starting address up to start of a line and invalidate until end */
+ for (;
+ adx <= end;
+ adx += AARCH64_CACHE_L1_CPU_DATA_ALIGNMENT ) {
+ /* Invalidate the Instruction cache line */
+ AArch64_data_cache_invalidate_line( (void*)adx );
+ }
+ /* Wait for L1 invalidate to complete */
+ _AARCH64_Data_synchronization_barrier();
+ }
+}
+
+static inline void _CPU_cache_freeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_data(void)
+{
+ /* TODO */
+}
+
+static inline void AArch64_instruction_cache_invalidate_line(const void *i_addr)
+{
+ /* __builtin___clear_cache is explicitly only for instruction cacche */
+ __builtin___clear_cache((void *)i_addr, ((char *)i_addr) + sizeof(void*) - 1);
+}
+
+static inline void _CPU_cache_invalidate_1_instruction_line(const void *d_addr)
+{
+ /* Invalidate the Instruction cache line */
+ AArch64_instruction_cache_invalidate_line( d_addr );
+
+ /* Wait for L1 invalidate to complete */
+ _AARCH64_Data_synchronization_barrier();
+}
+
+static inline void
+_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
+{
+ if ( n_bytes != 0 ) {
+ __builtin___clear_cache((void *)i_addr, ((char *)i_addr) + n_bytes - 1);
+ }
+ _AARCH64_Instruction_synchronization_barrier();
+}
+
+static inline void _CPU_cache_freeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline void _CPU_cache_unfreeze_instruction(void)
+{
+ /* TODO */
+}
+
+static inline uint64_t
+AArch64_get_ccsidr(void)
+{
+ uint64_t val;
+
+ __asm__ volatile (
+ "mrs %[val], CCSIDR_EL1\n"
+ : [val] "=&r" (val)
+ );
+
+ return val;
+}
+
+#define CCSIDR_NUMSETS(val) BSP_FLD64(val, 13, 27)
+#define CCSIDR_NUMSETS_GET(reg) BSP_FLD64GET(reg, 13, 27)
+#define CCSIDR_NUMSETS_SET(reg, val) BSP_FLD64SET(reg, val, 13, 27)
+#define CCSIDR_ASSOCIATIVITY(val) BSP_FLD64(val, 3, 12)
+#define CCSIDR_ASSOCIATIVITY_GET(reg) BSP_FLD64GET(reg, 3, 12)
+#define CCSIDR_ASSOCIATIVITY_SET(reg, val) BSP_FLD64SET(reg, val, 3, 12)
+/* line size == 1 << (GET(reg)+4): 0 -> (1 << 4) == 16 */
+#define CCSIDR_LINE_SIZE(val) BSP_FLD64(val, 0, 2)
+#define CCSIDR_LINE_SIZE_GET(reg) BSP_FLD64GET(reg, 0, 2)
+#define CCSIDR_LINE_SIZE_SET(reg, val) BSP_FLD64SET(reg, val, 0, 2)
+
+static inline uint64_t
+AArch64_ccsidr_get_line_power(uint64_t ccsidr)
+{
+ return CCSIDR_LINE_SIZE_GET(ccsidr) + 4;
+}
+
+static inline uint64_t
+AArch64_ccsidr_get_associativity(uint64_t ccsidr)
+{
+ return CCSIDR_ASSOCIATIVITY_GET(ccsidr) + 1;
+}
+
+static inline uint64_t
+AArch64_ccsidr_get_num_sets(uint64_t ccsidr)
+{
+ return CCSIDR_NUMSETS_GET(ccsidr) + 1;
+}
+
+static inline void
+AArch64_set_csselr(uint64_t val)
+{
+ __asm__ volatile (
+ "msr CSSELR_EL1, %[val]\n"
+ :
+ : [val] "r" (val)
+ );
+}
+#define CSSELR_TND BSP_BIT64(4)
+/* This field is level-1: L1 cache is 0, L2 cache is 1, etc */
+#define CSSELR_LEVEL(val) BSP_FLD64(val, 1, 3)
+#define CSSELR_LEVEL_GET(reg) BSP_FLD64GET(reg, 1, 3)
+#define CSSELR_LEVEL_SET(reg, val) BSP_FLD64SET(reg, val, 1, 3)
+#define CSSELR_IND BSP_BIT64(0)
+
+static inline uint64_t AArch64_get_ccsidr_for_level(uint64_t val)
+{
+ AArch64_set_csselr(val);
+ return AArch64_get_ccsidr();
+}
+
+static inline void AArch64_data_cache_clean_level(uint64_t level)
+{
+ uint64_t ccsidr;
+ uint64_t line_power;
+ uint64_t associativity;
+ uint64_t way;
+ uint64_t way_shift;
+
+ ccsidr = AArch64_get_ccsidr_for_level(CSSELR_LEVEL(level));
+
+ line_power = AArch64_ccsidr_get_line_power(ccsidr);
+ associativity = AArch64_ccsidr_get_associativity(ccsidr);
+ way_shift = __builtin_clz(associativity - 1);
+
+ for (way = 0; way < associativity; ++way) {
+ uint64_t num_sets = AArch64_ccsidr_get_num_sets(ccsidr);
+ uint64_t set;
+
+ for (set = 0; set < num_sets; ++set) {
+ uint64_t set_and_way = (way << way_shift)
+ | (set << line_power)
+ | (level << 1);
+
+ __asm__ volatile (
+ "dc csw, %[set_and_way]"
+ :
+ : [set_and_way] "r" (set_and_way)
+ : "memory"
+ );
+ }
+ }
+}
+
+static inline uint64_t
+AArch64_get_clidr(void)
+{
+ uint64_t val;
+
+ __asm__ volatile (
+ "mrs %[val], CLIDR_EL1\n"
+ : [val] "=&r" (val)
+ );
+
+ return val;
+}
+
+#define CLIDR_LOC(val) BSP_FLD64(val, 24, 26)
+#define CLIDR_LOC_GET(reg) BSP_FLD64GET(reg, 24, 26)
+#define CLIDR_LOC_SET(reg, val) BSP_FLD64SET(reg, val, 24, 26)
+#define CLIDR_CTYPE7(val) BSP_FLD64(val, 18, 20)
+#define CLIDR_CTYPE7_GET(reg) BSP_FLD64GET(reg, 18, 20)
+#define CLIDR_CTYPE7_SET(reg, val) BSP_FLD64SET(reg, val, 18, 20)
+#define CLIDR_CTYPE6(val) BSP_FLD64(val, 15, 17)
+#define CLIDR_CTYPE6_GET(reg) BSP_FLD64GET(reg, 15, 17)
+#define CLIDR_CTYPE6_SET(reg, val) BSP_FLD64SET(reg, val, 15, 17)
+#define CLIDR_CTYPE5(val) BSP_FLD64(val, 12, 14)
+#define CLIDR_CTYPE5_GET(reg) BSP_FLD64GET(reg, 12, 14)
+#define CLIDR_CTYPE5_SET(reg, val) BSP_FLD64SET(reg, val, 12, 14)
+#define CLIDR_CTYPE4(val) BSP_FLD64(val, 9, 11)
+#define CLIDR_CTYPE4_GET(reg) BSP_FLD64GET(reg, 9, 11)
+#define CLIDR_CTYPE4_SET(reg, val) BSP_FLD64SET(reg, val, 9, 11)
+#define CLIDR_CTYPE3(val) BSP_FLD64(val, 6, 8)
+#define CLIDR_CTYPE3_GET(reg) BSP_FLD64GET(reg, 6, 8)
+#define CLIDR_CTYPE3_SET(reg, val) BSP_FLD64SET(reg, val, 6, 8)
+#define CLIDR_CTYPE2(val) BSP_FLD64(val, 3, 5)
+#define CLIDR_CTYPE2_GET(reg) BSP_FLD64GET(reg, 3, 5)
+#define CLIDR_CTYPE2_SET(reg, val) BSP_FLD64SET(reg, val, 3, 5)
+#define CLIDR_CTYPE1(val) BSP_FLD64(val, 0, 2)
+#define CLIDR_CTYPE1_GET(reg) BSP_FLD64GET(reg, 0, 2)
+#define CLIDR_CTYPE1_SET(reg, val) BSP_FLD64SET(reg, val, 0, 2)
+
+static inline
+uint64_t AArch64_clidr_get_cache_type(uint64_t clidr, uint64_t level)
+{
+ switch (level)
+ {
+ case 1:
+ return CLIDR_CTYPE1_GET(clidr);
+ case 2:
+ return CLIDR_CTYPE2_GET(clidr);
+ case 3:
+ return CLIDR_CTYPE3_GET(clidr);
+ case 4:
+ return CLIDR_CTYPE4_GET(clidr);
+ case 5:
+ return CLIDR_CTYPE5_GET(clidr);
+ case 6:
+ return CLIDR_CTYPE6_GET(clidr);
+ case 7:
+ return CLIDR_CTYPE7_GET(clidr);
+ default:
+ return 0;
+ }
+}
+
+static inline uint64_t AArch64_clidr_get_level_of_coherency(uint64_t clidr)
+{
+ return CLIDR_LOC_GET(clidr);
+}
+
+static inline void AArch64_data_cache_clean_all_levels(void)
+{
+ uint64_t clidr = AArch64_get_clidr();
+ uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr);
+ uint64_t level = 0;
+
+ for (level = 0; level < loc; ++level) {
+ uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level);
+
+ /* Check if this level has a data cache or unified cache */
+ if (((ctype & (0x6)) == 2) || (ctype == 4)) {
+ AArch64_data_cache_clean_level(level);
+ }
+ }
+}
+
+static inline void _CPU_cache_flush_entire_data(void)
+{
+ _AARCH64_Data_synchronization_barrier();
+ AArch64_data_cache_clean_all_levels();
+ _AARCH64_Data_synchronization_barrier();
+}
+
+static inline void AArch64_cache_invalidate_level(uint64_t level)
+{
+ uint64_t ccsidr;
+ uint64_t line_power;
+ uint64_t associativity;
+ uint64_t way;
+ uint64_t way_shift;
+
+ ccsidr = AArch64_get_ccsidr_for_level(CSSELR_LEVEL(level));
+
+ line_power = AArch64_ccsidr_get_line_power(ccsidr);
+ associativity = AArch64_ccsidr_get_associativity(ccsidr);
+ way_shift = __builtin_clz(associativity - 1);
+
+ for (way = 0; way < associativity; ++way) {
+ uint64_t num_sets = AArch64_ccsidr_get_num_sets(ccsidr);
+ uint64_t set;
+
+ for (set = 0; set < num_sets; ++set) {
+ uint64_t set_and_way = (way << way_shift)
+ | (set << line_power)
+ | (level << 1);
+
+ __asm__ volatile (
+ "dc isw, %[set_and_way]"
+ :
+ : [set_and_way] "r" (set_and_way)
+ : "memory"
+ );
+ }
+ }
+}
+
+static inline void AArch64_data_cache_invalidate_all_levels(void)
+{
+ uint64_t clidr = AArch64_get_clidr();
+ uint64_t loc = AArch64_clidr_get_level_of_coherency(clidr);
+ uint64_t level = 0;
+
+ for (level = 0; level < loc; ++level) {
+ uint64_t ctype = AArch64_clidr_get_cache_type(clidr, level);
+
+ /* Check if this level has a data cache or unified cache */
+ if ((ctype & 0x2) || (ctype == 4)) {
+ AArch64_cache_invalidate_level(level);
+ }
+ }
+}
+
+static inline void _CPU_cache_invalidate_entire_data(void)
+{
+ AArch64_data_cache_invalidate_all_levels();
+}
+
+static inline uint64_t
+AArch64_get_sctlr(void)
+{
+ uint64_t val;
+
+ __asm__ volatile (
+ "mrs %[val], SCTLR_EL1\n"
+ : [val] "=&r" (val)
+ );
+
+ return val;
+}
+
+static inline void
+AArch64_set_sctlr(uint64_t val)
+{
+ __asm__ volatile (
+ "msr SCTLR_EL1, %[val]\n"
+ :
+ : [val] "r" (val)
+ );
+}
+
+#define SCTLR_TWEDEL(val) BSP_FLD64(val, 46, 49)
+#define SCTLR_TWEDEL_GET(reg) BSP_FLD64GET(reg, 46, 49)
+#define SCTLR_TWEDEL_SET(reg, val) BSP_FLD64SET(reg, val, 46, 49)
+#define SCTLR_TWEDEN BSP_BIT64(45)
+#define SCTLR_DSSBS BSP_BIT64(44)
+#define SCTLR_ATA BSP_BIT64(43)
+#define SCTLR_ATA0 BSP_BIT64(42)
+#define SCTLR_TCF(val) BSP_FLD64(val, 40, 41)
+#define SCTLR_TCF_GET(reg) BSP_FLD64GET(reg, 40, 41)
+#define SCTLR_TCF_SET(reg, val) BSP_FLD64SET(reg, val, 40, 41)
+#define SCTLR_TCF0(val) BSP_FLD64(val, 38, 39)
+#define SCTLR_TCF0_GET(reg) BSP_FLD64GET(reg, 38, 39)
+#define SCTLR_TCF0_SET(reg, val) BSP_FLD64SET(reg, val, 38, 39)
+#define SCTLR_ITFSB BSP_BIT64(37)
+#define SCTLR_BT1 BSP_BIT64(36)
+#define SCTLR_BT0 BSP_BIT64(35)
+#define SCTLR_ENIA BSP_BIT64(31)
+#define SCTLR_ENIB BSP_BIT64(30)
+#define SCTLR_LSMAOE BSP_BIT64(29)
+#define SCTLR_NTLSMD BSP_BIT64(28)
+#define SCTLR_ENDA BSP_BIT64(27)
+#define SCTLR_UCI BSP_BIT64(26)
+#define SCTLR_EE BSP_BIT64(25)
+#define SCTLR_E0E BSP_BIT64(24)
+#define SCTLR_SPAN BSP_BIT64(23)
+#define SCTLR_EIS BSP_BIT64(22)
+#define SCTLR_IESB BSP_BIT64(21)
+#define SCTLR_TSCXT BSP_BIT64(20)
+#define SCTLR_WXN BSP_BIT64(19)
+#define SCTLR_NTWE BSP_BIT64(18)
+#define SCTLR_NTWI BSP_BIT64(16)
+#define SCTLR_UCT BSP_BIT64(15)
+#define SCTLR_DZE BSP_BIT64(14)
+#define SCTLR_ENDB BSP_BIT64(13)
+#define SCTLR_I BSP_BIT64(12)
+#define SCTLR_EOS BSP_BIT64(11)
+#define SCTLR_ENRCTX BSP_BIT64(10)
+#define SCTLR_UMA BSP_BIT64(9)
+#define SCTLR_SED BSP_BIT64(8)
+#define SCTLR_ITD BSP_BIT64(7)
+#define SCTLR_NAA BSP_BIT64(6)
+#define SCTLR_CP15BEN BSP_BIT64(5)
+#define SCTLR_SA0 BSP_BIT64(4)
+#define SCTLR_SA BSP_BIT64(3)
+#define SCTLR_C BSP_BIT64(2)
+#define SCTLR_A BSP_BIT64(1)
+#define SCTLR_M BSP_BIT64(0)
+
+static inline void _CPU_cache_enable_data(void)
+{
+ rtems_interrupt_level level;
+ uint64_t sctlr;
+
+ rtems_interrupt_local_disable(level);
+ sctlr = AArch64_get_sctlr();
+ sctlr |= SCTLR_C;
+ AArch64_set_sctlr(sctlr);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline void _CPU_cache_disable_data(void)
+{
+ rtems_interrupt_level level;
+ uint64_t sctlr;
+
+ rtems_interrupt_local_disable(level);
+ AArch64_data_cache_clean_all_levels();
+ AArch64_data_cache_invalidate_all_levels();
+ sctlr = AArch64_get_sctlr();
+ sctlr &= ~SCTLR_C;
+ AArch64_set_sctlr(sctlr);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline
+void AArch64_instruction_cache_inner_shareable_invalidate_all(void)
+{
+ __asm__ volatile (
+ "ic ialluis\n"
+ :
+ :
+ : "memory"
+ );
+}
+
+static inline void AArch64_instruction_cache_invalidate(void)
+{
+ __asm__ volatile (
+ "ic iallu\n"
+ :
+ :
+ : "memory"
+ );
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction(void)
+{
+ /* There is no way to manage branch prediction in AArch64.
+ * See D4.4.12 in the ARMv8 technical manual. */
+
+ #ifdef RTEMS_SMP
+ /* invalidate I-cache inner shareable */
+ AArch64_instruction_cache_inner_shareable_invalidate_all();
+ #endif /* RTEMS_SMP */
+
+ /* I+BTB cache invalidate */
+ AArch64_instruction_cache_invalidate();
+
+ _AARCH64_Instruction_synchronization_barrier();
+}
+
+static inline void _CPU_cache_enable_instruction(void)
+{
+ rtems_interrupt_level level;
+ uint64_t sctlr;
+
+ rtems_interrupt_local_disable(level);
+ sctlr = AArch64_get_sctlr();
+ sctlr |= SCTLR_I;
+ AArch64_set_sctlr(sctlr);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline void _CPU_cache_disable_instruction(void)
+{
+ rtems_interrupt_level level;
+ uint64_t sctlr;
+
+ rtems_interrupt_local_disable(level);
+ sctlr = AArch64_get_sctlr();
+ sctlr &= ~SCTLR_I;
+ AArch64_set_sctlr(sctlr);
+ rtems_interrupt_local_enable(level);
+}
+
+static inline size_t AArch64_get_cache_size(
+ uint64_t level,
+ bool instruction
+)
+{
+ uint64_t clidr;
+ uint64_t loc;
+ uint64_t ccsidr;
+
+ clidr = AArch64_get_clidr();
+ loc = AArch64_clidr_get_level_of_coherency(clidr);
+
+ if (level >= loc) {
+ return 0;
+ }
+
+ if (level == 0) {
+ level = loc - 1;
+ }
+
+ ccsidr = AArch64_get_ccsidr_for_level(
+ CSSELR_LEVEL(level) | (instruction ? CSSELR_IND : 0)
+ );
+
+ return (1U << (AArch64_ccsidr_get_line_power(ccsidr)+4))
+ * AArch64_ccsidr_get_associativity(ccsidr)
+ * AArch64_ccsidr_get_num_sets(ccsidr);
+}
+
+static inline size_t _CPU_cache_get_data_cache_size(uint64_t level)
+{
+ return AArch64_get_cache_size(level, false);
+}
+
+static inline size_t _CPU_cache_get_instruction_cache_size(uint64_t level)
+{
+ return AArch64_get_cache_size(level, true);
+}
+
+#include "../../shared/cache/cacheimpl.h"
diff --git a/bsps/aarch64/shared/clock/arm-generic-timer-aarch64.c b/bsps/aarch64/shared/clock/arm-generic-timer-aarch64.c
new file mode 100644
index 0000000000..977910ff3a
--- /dev/null
+++ b/bsps/aarch64/shared/clock/arm-generic-timer-aarch64.c
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief AArch64-specific ARM GPT system register accessors.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dev/clock/arm-generic-timer.h>
+#include <bsp/irq.h>
+
+uint64_t arm_gt_clock_get_compare_value(void)
+{
+ uint64_t val;
+ __asm__ volatile (
+#ifdef AARCH64_GENERIC_TIMER_USE_VIRTUAL
+ "mrs %[val], cntv_cval_el0"
+#else
+ "mrs %[val], cntp_cval_el0"
+#endif
+ : [val] "=&r" (val)
+ );
+ return val;
+}
+
+void arm_gt_clock_set_compare_value(uint64_t cval)
+{
+ __asm__ volatile (
+#ifdef AARCH64_GENERIC_TIMER_USE_VIRTUAL
+ "msr cntv_cval_el0, %[cval]"
+#else
+ "msr cntp_cval_el0, %[cval]"
+#endif
+ :
+ : [cval] "r" (cval)
+ );
+}
+
+uint64_t arm_gt_clock_get_count(void)
+{
+ uint64_t val;
+ __asm__ volatile (
+#ifdef AARCH64_GENERIC_TIMER_USE_VIRTUAL
+ "mrs %[val], cntvct_el0"
+#else
+ "mrs %[val], cntpct_el0"
+#endif
+ : [val] "=&r" (val)
+ );
+ return val;
+}
+
+void arm_gt_clock_set_control(uint32_t ctl)
+{
+ __asm__ volatile (
+#ifdef AARCH64_GENERIC_TIMER_USE_VIRTUAL
+ "msr cntv_ctl_el0, %[ctl]"
+#else
+ "msr cntp_ctl_el0, %[ctl]"
+#endif
+ :
+ : [ctl] "r" (ctl)
+ );
+}
+
+void arm_generic_timer_get_config( uint32_t *frequency, uint32_t *irq )
+{
+ uint64_t val;
+ __asm__ volatile (
+ "mrs %[val], cntfrq_el0"
+ : [val] "=&r" (val)
+ );
+ *frequency = val;
+
+#ifdef ARM_GENERIC_TIMER_USE_VIRTUAL
+ *irq = BSP_TIMER_VIRT_PPI;
+#else
+ *irq = BSP_TIMER_PHYS_NS_PPI;
+#endif
+}
diff --git a/bsps/aarch64/shared/irq/irq-arm-gicv3-aarch64.c b/bsps/aarch64/shared/irq/irq-arm-gicv3-aarch64.c
new file mode 100644
index 0000000000..fe74bf46bd
--- /dev/null
+++ b/bsps/aarch64/shared/irq/irq-arm-gicv3-aarch64.c
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief AArch64-specific ARM GICv3 handlers.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dev/irq/arm-gic-irq.h>
+#include <bsp/irq-generic.h>
+#include <rtems/score/cpu_irq.h>
+
+void arm_interrupt_handler_dispatch(rtems_vector_number vector)
+{
+ uint32_t interrupt_level = _CPU_ISR_Get_level();
+ AArch64_interrupt_enable(1);
+ bsp_interrupt_handler_dispatch(vector);
+ _CPU_ISR_Set_level(interrupt_level);
+}
+
+void arm_interrupt_facility_set_exception_handler(void)
+{
+ AArch64_set_exception_handler(
+ AARCH64_EXCEPTION_SPx_IRQ,
+ _AArch64_Exception_interrupt_no_nest
+ );
+ AArch64_set_exception_handler(
+ AARCH64_EXCEPTION_SP0_IRQ,
+ _AArch64_Exception_interrupt_nest
+ );
+}
+
+void bsp_interrupt_dispatch(void)
+{
+ gicv3_interrupt_dispatch();
+}
diff --git a/bsps/aarch64/shared/start/linkcmds.base b/bsps/aarch64/shared/start/linkcmds.base
new file mode 100644
index 0000000000..450c3ba2a9
--- /dev/null
+++ b/bsps/aarch64/shared/start/linkcmds.base
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup bsp_linker
+ *
+ * @brief Linker command base file.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ENTRY (_start)
+STARTUP (start.o)
+
+/*
+ * Global symbols that may be defined externally
+ */
+
+bsp_stack_align = DEFINED (bsp_stack_align) ? bsp_stack_align : 16;
+
+bsp_stack_exception_size = DEFINED (bsp_stack_exception_size) ? bsp_stack_exception_size : 0;
+bsp_stack_exception_size = ALIGN (bsp_stack_exception_size, bsp_stack_align);
+
+bsp_vector_table_size = DEFINED (bsp_vector_table_size) ? bsp_vector_table_size : 64;
+
+bsp_section_xbarrier_align = DEFINED (bsp_section_xbarrier_align) ? bsp_section_xbarrier_align : 1;
+bsp_section_robarrier_align = DEFINED (bsp_section_robarrier_align) ? bsp_section_robarrier_align : 1;
+bsp_section_rwbarrier_align = DEFINED (bsp_section_rwbarrier_align) ? bsp_section_rwbarrier_align : 1;
+
+bsp_stack_hyp_size = DEFINED (bsp_stack_hyp_size) ? bsp_stack_hyp_size : 0;
+bsp_stack_hyp_size = ALIGN (bsp_stack_hyp_size, bsp_stack_align);
+
+MEMORY {
+ UNEXPECTED_SECTIONS : ORIGIN = 0xffffffff, LENGTH = 0
+}
+
+SECTIONS {
+ .start : ALIGN_WITH_INPUT {
+ bsp_section_start_begin = .;
+ KEEP (*(.bsp_start_text))
+ KEEP (*(.bsp_start_data))
+ bsp_section_start_end = .;
+ } > REGION_START AT > REGION_START
+ bsp_section_start_size = bsp_section_start_end - bsp_section_start_begin;
+
+ .xbarrier : ALIGN_WITH_INPUT {
+ . = ALIGN (bsp_section_xbarrier_align);
+ } > REGION_VECTOR AT > REGION_VECTOR
+
+ .text : ALIGN_WITH_INPUT {
+ bsp_section_text_begin = .;
+ *(.text.unlikely .text.*_unlikely)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ } > REGION_TEXT AT > REGION_TEXT_LOAD
+ .init : ALIGN_WITH_INPUT {
+ KEEP (*(.init))
+ } > REGION_TEXT AT > REGION_TEXT_LOAD
+ .fini : ALIGN_WITH_INPUT {
+ KEEP (*(.fini))
+ bsp_section_text_end = .;
+ } > REGION_TEXT AT > REGION_TEXT_LOAD
+ bsp_section_text_size = bsp_section_text_end - bsp_section_text_begin;
+ bsp_section_text_load_begin = LOADADDR (.text);
+ bsp_section_text_load_end = bsp_section_text_load_begin + bsp_section_text_size;
+
+ .robarrier : ALIGN_WITH_INPUT {
+ . = ALIGN (bsp_section_robarrier_align);
+ } > REGION_RODATA AT > REGION_RODATA
+
+ .rodata : ALIGN_WITH_INPUT {
+ bsp_section_rodata_begin = .;
+ *(.rodata .rodata.* .gnu.linkonce.r.*)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .rodata1 : ALIGN_WITH_INPUT {
+ *(.rodata1)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .ARM.extab : ALIGN_WITH_INPUT {
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .ARM.exidx : ALIGN_WITH_INPUT {
+ __exidx_start = .;
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ __exidx_end = .;
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .eh_frame : ALIGN_WITH_INPUT {
+ KEEP (*(.eh_frame))
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .gcc_except_table : ALIGN_WITH_INPUT {
+ *(.gcc_except_table .gcc_except_table.*)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .tdata : ALIGN_WITH_INPUT {
+ _TLS_Data_begin = .;
+ *(.tdata .tdata.* .gnu.linkonce.td.*)
+ _TLS_Data_end = .;
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .tbss : ALIGN_WITH_INPUT {
+ _TLS_BSS_begin = .;
+ *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
+ _TLS_BSS_end = .;
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ _TLS_Data_size = _TLS_Data_end - _TLS_Data_begin;
+ _TLS_Data_begin = _TLS_Data_size != 0 ? _TLS_Data_begin : _TLS_BSS_begin;
+ _TLS_Data_end = _TLS_Data_size != 0 ? _TLS_Data_end : _TLS_BSS_begin;
+ _TLS_BSS_size = _TLS_BSS_end - _TLS_BSS_begin;
+ _TLS_Size = _TLS_BSS_end - _TLS_Data_begin;
+ _TLS_Alignment = MAX (ALIGNOF (.tdata), ALIGNOF (.tbss));
+ .preinit_array : ALIGN_WITH_INPUT {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .init_array : ALIGN_WITH_INPUT {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .fini_array : ALIGN_WITH_INPUT {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .data.rel.ro : ALIGN_WITH_INPUT {
+ *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*)
+ *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .jcr : ALIGN_WITH_INPUT {
+ KEEP (*(.jcr))
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .interp : ALIGN_WITH_INPUT {
+ *(.interp)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .note.gnu.build-id : ALIGN_WITH_INPUT {
+ *(.note.gnu.build-id)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .hash : ALIGN_WITH_INPUT {
+ *(.hash)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .gnu.hash : ALIGN_WITH_INPUT {
+ *(.gnu.hash)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .dynsym : ALIGN_WITH_INPUT {
+ *(.dynsym)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .dynstr : ALIGN_WITH_INPUT {
+ *(.dynstr)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .gnu.version : ALIGN_WITH_INPUT {
+ *(.gnu.version)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .gnu.version_d : ALIGN_WITH_INPUT {
+ *(.gnu.version_d)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .gnu.version_r : ALIGN_WITH_INPUT {
+ *(.gnu.version_r)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .rel.dyn : ALIGN_WITH_INPUT {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .rela.dyn : ALIGN_WITH_INPUT {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.rtemsroset*)
+ *(.rela.rtemsrwset*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .rel.plt : ALIGN_WITH_INPUT {
+ *(.rel.plt)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .rela.plt : ALIGN_WITH_INPUT {
+ *(.rela.plt)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .plt : ALIGN_WITH_INPUT {
+ *(.plt)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .iplt : ALIGN_WITH_INPUT {
+ *(.iplt)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .dynamic : ALIGN_WITH_INPUT {
+ *(.dynamic)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .tm_clone_table : ALIGN_WITH_INPUT {
+ *(.tm_clone_table)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .got : ALIGN_WITH_INPUT {
+ *(.got.plt) *(.igot.plt) *(.got) *(.igot)
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ .rtemsroset : ALIGN_WITH_INPUT SUBALIGN(4) {
+ /* Special FreeBSD linker set sections */
+ __start_set_sysctl_set = .;
+ *(set_sysctl_*);
+ __stop_set_sysctl_set = .;
+ *(set_domain_*);
+ *(set_pseudo_*);
+
+ KEEP (*(SORT(.rtemsroset.*)))
+ bsp_section_rodata_end = .;
+ } > REGION_RODATA AT > REGION_RODATA_LOAD
+ bsp_section_rodata_size = bsp_section_rodata_end - bsp_section_rodata_begin;
+ bsp_section_rodata_load_begin = LOADADDR (.rodata);
+ bsp_section_rodata_load_end = bsp_section_rodata_load_begin + bsp_section_rodata_size;
+
+ .rwbarrier : ALIGN_WITH_INPUT {
+ . = ALIGN (bsp_section_rwbarrier_align);
+ } > REGION_DATA AT > REGION_DATA
+
+ .vector : ALIGN_WITH_INPUT {
+ bsp_section_vector_begin = .;
+ . = . + DEFINED (bsp_vector_table_in_start_section) ? 0 : bsp_vector_table_size;
+ bsp_section_vector_end = .;
+ } > REGION_VECTOR AT > REGION_VECTOR
+ bsp_section_vector_size = bsp_section_vector_end - bsp_section_vector_begin;
+ bsp_vector_table_begin = DEFINED (bsp_vector_table_in_start_section) ? bsp_section_start_begin : bsp_section_vector_begin;
+ bsp_vector_table_end = bsp_vector_table_begin + bsp_vector_table_size;
+
+ .fast_text : ALIGN_WITH_INPUT {
+ bsp_section_fast_text_begin = .;
+ *(.bsp_fast_text)
+ bsp_section_fast_text_end = .;
+ } > REGION_FAST_TEXT AT > REGION_FAST_TEXT_LOAD
+ bsp_section_fast_text_size = bsp_section_fast_text_end - bsp_section_fast_text_begin;
+ bsp_section_fast_text_load_begin = LOADADDR (.fast_text);
+ bsp_section_fast_text_load_end = bsp_section_fast_text_load_begin + bsp_section_fast_text_size;
+
+ .fast_data : ALIGN_WITH_INPUT {
+ bsp_section_fast_data_begin = .;
+ *(.bsp_fast_data)
+ bsp_section_fast_data_end = .;
+ } > REGION_FAST_DATA AT > REGION_FAST_DATA_LOAD
+ bsp_section_fast_data_size = bsp_section_fast_data_end - bsp_section_fast_data_begin;
+ bsp_section_fast_data_load_begin = LOADADDR (.fast_data);
+ bsp_section_fast_data_load_end = bsp_section_fast_data_load_begin + bsp_section_fast_data_size;
+
+ .data : ALIGN_WITH_INPUT {
+ bsp_section_data_begin = .;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ } > REGION_DATA AT > REGION_DATA_LOAD
+ .data1 : ALIGN_WITH_INPUT {
+ *(.data1)
+ } > REGION_DATA AT > REGION_DATA_LOAD
+ .rtemsrwset : ALIGN_WITH_INPUT SUBALIGN(4) {
+ KEEP (*(SORT(.rtemsrwset.*)))
+ bsp_section_data_end = .;
+ } > REGION_DATA AT > REGION_DATA_LOAD
+ bsp_section_data_size = bsp_section_data_end - bsp_section_data_begin;
+ bsp_section_data_load_begin = LOADADDR (.data);
+ bsp_section_data_load_end = bsp_section_data_load_begin + bsp_section_data_size;
+
+ .bss : ALIGN_WITH_INPUT {
+ bsp_section_bss_begin = .;
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ bsp_section_bss_end = .;
+ } > REGION_BSS AT > REGION_BSS
+ bsp_section_bss_size = bsp_section_bss_end - bsp_section_bss_begin;
+
+ .rtemsstack (NOLOAD) : ALIGN_WITH_INPUT {
+ bsp_section_rtemsstack_begin = .;
+ *(SORT_BY_ALIGNMENT (SORT_BY_NAME (.rtemsstack*)))
+ bsp_section_rtemsstack_end = .;
+ } > REGION_WORK AT > REGION_WORK
+ bsp_section_rtemsstack_size = bsp_section_rtemsstack_end - bsp_section_rtemsstack_begin;
+
+ .work : ALIGN_WITH_INPUT {
+ /*
+ * The work section will occupy the remaining REGION_WORK region and
+ * contains the RTEMS work space and heap.
+ */
+ bsp_section_work_begin = .;
+ . += ORIGIN (REGION_WORK) + LENGTH (REGION_WORK) - ABSOLUTE (.);
+ bsp_section_work_end = .;
+ } > REGION_WORK AT > REGION_WORK
+ bsp_section_work_size = bsp_section_work_end - bsp_section_work_begin;
+
+ .stack : ALIGN_WITH_INPUT {
+ /*
+ * The stack section will occupy the remaining REGION_STACK region and may
+ * contain the task stacks. Depending on the region distribution this
+ * section may be of zero size.
+ */
+ bsp_section_stack_begin = .;
+ . += ORIGIN (REGION_STACK) + LENGTH (REGION_STACK) - ABSOLUTE (.);
+ bsp_section_stack_end = .;
+ } > REGION_STACK AT > REGION_STACK
+ bsp_section_stack_size = bsp_section_stack_end - bsp_section_stack_begin;
+
+ .nocache : ALIGN_WITH_INPUT {
+ bsp_section_nocache_begin = .;
+ *(SORT_BY_ALIGNMENT (SORT_BY_NAME (.bsp_nocache*)))
+ bsp_section_nocache_end = .;
+ } > REGION_NOCACHE AT > REGION_NOCACHE_LOAD
+ bsp_section_nocache_size = bsp_section_nocache_end - bsp_section_nocache_begin;
+ bsp_section_nocache_load_begin = LOADADDR (.nocache);
+ bsp_section_nocache_load_end = bsp_section_nocache_load_begin + bsp_section_nocache_size;
+
+ .nocachenoload (NOLOAD) : ALIGN_WITH_INPUT {
+ bsp_section_nocachenoload_begin = .;
+ *(SORT_BY_ALIGNMENT (SORT_BY_NAME (.bsp_noload_nocache*)))
+ bsp_section_nocacheheap_begin = .;
+ . += ORIGIN (REGION_NOCACHE) + LENGTH (REGION_NOCACHE) - ABSOLUTE (.);
+ bsp_section_nocacheheap_end = .;
+ bsp_section_nocachenoload_end = .;
+ } > REGION_NOCACHE AT > REGION_NOCACHE
+ bsp_section_nocacheheap_size = bsp_section_nocacheheap_end - bsp_section_nocacheheap_begin;
+ bsp_section_nocachenoload_size = bsp_section_nocachenoload_end - bsp_section_nocachenoload_begin;
+
+ /* FIXME */
+ RamBase = ORIGIN (REGION_WORK);
+ RamSize = LENGTH (REGION_WORK);
+ RamEnd = RamBase + RamSize;
+ WorkAreaBase = bsp_section_work_begin;
+ HeapSize = 0;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF extension */
+ .debug_macro 0 : { *(.debug_macro) }
+ .ARM.attributes 0 : { KEEP (*(.ARM.attributes)) KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
+
+ /*
+ * This is a RTEMS specific section to catch all unexpected input
+ * sections. In case you get an error like
+ * "section `.unexpected_sections' will not fit in region
+ * `UNEXPECTED_SECTIONS'"
+ * you have to figure out the offending input section and add it to the
+ * appropriate output section definition above.
+ */
+ .unexpected_sections : { *(*) } > UNEXPECTED_SECTIONS
+}
diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S
new file mode 100644
index 0000000000..f60e840137
--- /dev/null
+++ b/bsps/aarch64/shared/start/start.S
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief Boot and system start code.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/asm.h>
+#include <rtems/score/percpu.h>
+
+#include <bspopts.h>
+
+ /* Global symbols */
+ .globl _start
+ .section ".bsp_start_text", "ax"
+
+/* Start entry */
+
+_start:
+
+ /*
+ * We do not save the context since we do not return to the boot
+ * loader but preserve x1 and x2 to allow access to bootloader parameters
+ */
+#ifndef BSP_START_NEEDS_REGISTER_INITIALIZATION
+ mov x5, x1 /* machine type number or ~0 for DT boot */
+ mov x6, x2 /* physical address of ATAGs or DTB */
+#else /* BSP_START_NEEDS_REGISTER_INITIALIZATION */
+ mov x0, XZR
+ mov x1, XZR
+ mov x2, XZR
+ mov x3, XZR
+ mov x4, XZR
+ mov x5, XZR
+ mov x6, XZR
+ mov x7, XZR
+ mov x8, XZR
+ mov x9, XZR
+ mov x10, XZR
+ mov x11, XZR
+ mov x12, XZR
+ mov x13, XZR
+ mov x14, XZR
+ mov x15, XZR
+ mov x16, XZR
+ mov x17, XZR
+ mov x18, XZR
+ mov x19, XZR
+ mov x20, XZR
+ mov x21, XZR
+ mov x22, XZR
+ mov x23, XZR
+ mov x24, XZR
+ mov x25, XZR
+ mov x26, XZR
+ mov x27, XZR
+ mov x28, XZR
+ mov x29, XZR
+ mov x30, XZR
+#ifdef AARCH64_MULTILIB_VFP
+#endif
+#endif
+
+#ifdef RTEMS_SMP
+ /* Read MPIDR and get current processor index */
+ mrs x7, mpidr_el1
+ and x7, #0xff
+#endif
+
+#ifdef RTEMS_SMP
+ /*
+ * Get current per-CPU control and store it in PL1 only Thread ID
+ * Register (TPIDRPRW).
+ */
+ ldr x1, =_Per_CPU_Information
+ add x1, x1, x7, asl #PER_CPU_CONTROL_SIZE_LOG2
+ mcr p15, 0, x1, c13, c0, 4
+
+#endif
+
+ /* Calculate interrupt stack area end for current processor */
+ ldr x1, =_ISR_Stack_size
+#ifdef RTEMS_SMP
+ add x3, x7, #1
+ mul x1, x1, x3
+#endif
+ ldr x2, =_ISR_Stack_area_begin
+ add x3, x1, x2
+
+ /* Save original DAIF value */
+ mrs x4, DAIF
+
+#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION
+ mov x8, XZR
+ mov x9, XZR
+ mov x10, XZR
+ mov x11, XZR
+ mov x12, XZR
+ mov x13, XZR
+ mov x14, XZR
+ mov x15, XZR
+#endif
+
+ /*
+ * SPx: the stack pointer corresponding to the current exception level
+ * Normal operation for RTEMS on AArch64 uses SPx and runs on EL1
+ * Exception operation (synchronous errors, IRQ, FIQ, System Errors) uses SP0
+ */
+ ldr x1, =bsp_stack_exception_size
+ /* Switch to SP0 and set exception stack */
+ msr spsel, #0
+ mov sp, x3
+ /* Switch back to SPx for normal operation */
+ msr spsel, #1
+ sub x3, x3, x1
+
+ /* Set SP1 stack used for normal operation */
+ mov sp, x3
+
+ /* Stay in EL1 mode */
+
+#ifdef AARCH64_MULTILIB_VFP
+#ifdef AARCH64_MULTILIB_HAS_CPACR
+ /* Read CPACR */
+ mrs x0, CPACR_EL1
+
+ /* Enable EL1 access permissions for CP10 */
+ orr x0, x0, #(1 << 20)
+
+ /* Write CPACR */
+ msr CPACR_EL1, x0
+ isb
+#endif
+
+ /* FPU does not need to be enabled on AArch64 */
+
+#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION
+ mov x0, #0
+ mov CPTR_EL3, XZR
+ mov CPTR_EL2, XZR
+ mov d0, XZR
+ mov d1, XZR
+ mov d2, XZR
+ mov d3, XZR
+ mov d4, XZR
+ mov d5, XZR
+ mov d6, XZR
+ mov d7, XZR
+ mov d8, XZR
+ mov d9, XZR
+ mov d10, XZR
+ mov d11, XZR
+ mov d12, XZR
+ mov d13, XZR
+ mov d14, XZR
+ mov d15, XZR
+ mov d16, XZR
+ mov d17, XZR
+ mov d18, XZR
+ mov d19, XZR
+ mov d20, XZR
+ mov d21, XZR
+ mov d22, XZR
+ mov d23, XZR
+ mov d24, XZR
+ mov d25, XZR
+ mov d26, XZR
+ mov d27, XZR
+ mov d28, XZR
+ mov d29, XZR
+ mov d30, XZR
+ mov d31, XZR
+#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */
+
+#endif /* AARCH64_MULTILIB_VFP */
+
+ /*
+ * Invoke the start hook 0.
+ *
+ */
+
+ mov x1, x5 /* machine type number or ~0 for DT boot */
+ bl bsp_start_hook_0
+
+ /* Branch to start hook 1 */
+ bl bsp_start_hook_1
+
+ /* Branch to boot card */
+ mov x0, #0
+ bl boot_card