summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--bsps/aarch64/include/bsp/aarch64-mmu.h426
-rw-r--r--bsps/aarch64/include/bsp/linker-symbols.h7
-rw-r--r--bsps/aarch64/xilinx-zynqmp/include/bsp.h7
-rw-r--r--bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c1
-rw-r--r--bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c77
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-default.S66
-rw-r--r--spec/build/bsps/aarch64/optmmupages.yml19
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml2
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_ilp32.yml8
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_lp64.yml8
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/obj.yml2
11 files changed, 603 insertions, 20 deletions
diff --git a/bsps/aarch64/include/bsp/aarch64-mmu.h b/bsps/aarch64/include/bsp/aarch64-mmu.h
new file mode 100644
index 0000000000..e82012576f
--- /dev/null
+++ b/bsps/aarch64/include/bsp/aarch64-mmu.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup aarch64_start
+ *
+ * @brief AArch64 MMU configuration.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LIBBSP_AARCH64_SHARED_AARCH64_MMU_H
+#define LIBBSP_AARCH64_SHARED_AARCH64_MMU_H
+
+#include <bsp/start.h>
+#include <bsp/linker-symbols.h>
+#include <rtems/score/aarch64-system-registers.h>
+#include <bspopts.h>
+#include <bsp/utility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* VMSAv8 Long-descriptor fields */
+#define MMU_DESC_AF BSP_BIT64( 10 )
+#define MMU_DESC_SH_INNER ( BSP_BIT64( 9 ) | BSP_BIT64( 8 ) )
+#define MMU_DESC_WRITE_DISABLE BSP_BIT64( 7 )
+/* PAGE and TABLE flags are the same bit, but only apply on certain levels */
+#define MMU_DESC_TYPE_TABLE BSP_BIT64( 1 )
+#define MMU_DESC_TYPE_PAGE BSP_BIT64( 1 )
+#define MMU_DESC_VALID BSP_BIT64( 0 )
+#define MMU_DESC_MAIR_ATTR( val ) BSP_FLD64( val, 2, 3 )
+#define MMU_DESC_MAIR_ATTR_GET( reg ) BSP_FLD64GET( reg, 2, 3 )
+#define MMU_DESC_MAIR_ATTR_SET( reg, val ) BSP_FLD64SET( reg, val, 2, 3 )
+#define MMU_DESC_PAGE_TABLE_MASK 0xFFFFFFFFF000LL
+
+/* Page table configuration */
+#define MMU_PAGE_BITS 12
+#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
+#define MMU_BITS_PER_LEVEL 9
+#define MMU_TOP_LEVEL_PAGE_BITS ( 2 * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS )
+
+#define AARCH64_MMU_FLAGS_BASE \
+ ( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
+
+#define AARCH64_MMU_DATA_RO_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE_CACHED AARCH64_MMU_DATA_RO_CACHED
+#define AARCH64_MMU_CODE_RW_CACHED AARCH64_MMU_DATA_RW_CACHED
+
+#define AARCH64_MMU_DATA_RO \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 1 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE AARCH64_MMU_DATA_RO
+#define AARCH64_MMU_CODE_RW AARCH64_MMU_DATA_RW
+
+/* RW implied by not ORing in RO */
+#define AARCH64_MMU_DATA_RW_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) )
+#define AARCH64_MMU_DATA_RW \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 1 ) )
+#define AARCH64_MMU_DEVICE ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 0 ) )
+
+typedef struct {
+ uintptr_t begin;
+ uintptr_t end;
+ uint64_t flags;
+} aarch64_mmu_config_entry;
+
+#define AARCH64_MMU_DEFAULT_SECTIONS \
+ { \
+ .begin = (uintptr_t) bsp_section_fast_text_begin, \
+ .end = (uintptr_t) bsp_section_fast_text_end, \
+ .flags = AARCH64_MMU_CODE_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_fast_data_begin, \
+ .end = (uintptr_t) bsp_section_fast_data_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_start_begin, \
+ .end = (uintptr_t) bsp_section_start_end, \
+ .flags = AARCH64_MMU_CODE_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_vector_begin, \
+ .end = (uintptr_t) bsp_section_vector_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_text_begin, \
+ .end = (uintptr_t) bsp_section_text_end, \
+ .flags = AARCH64_MMU_CODE_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_rodata_begin, \
+ .end = (uintptr_t) bsp_section_rodata_end, \
+ .flags = AARCH64_MMU_DATA_RO_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_data_begin, \
+ .end = (uintptr_t) bsp_section_data_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_bss_begin, \
+ .end = (uintptr_t) bsp_section_bss_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_rtemsstack_begin, \
+ .end = (uintptr_t) bsp_section_rtemsstack_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_work_begin, \
+ .end = (uintptr_t) bsp_section_work_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_stack_begin, \
+ .end = (uintptr_t) bsp_section_stack_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+ .begin = (uintptr_t) bsp_section_nocache_begin, \
+ .end = (uintptr_t) bsp_section_nocache_end, \
+ .flags = AARCH64_MMU_DEVICE \
+ }, { \
+ .begin = (uintptr_t) bsp_section_nocachenoload_begin, \
+ .end = (uintptr_t) bsp_section_nocachenoload_end, \
+ .flags = AARCH64_MMU_DEVICE \
+ }, { \
+ .begin = (uintptr_t) bsp_translation_table_base, \
+ .end = (uintptr_t) bsp_translation_table_end, \
+ .flags = AARCH64_MMU_DATA_RW_CACHED \
+ }, { \
+/*
+ * The vector table must be in writable and executable memory as it stores both
+ * exception code and the mutable pointer to which it jumps
+ */ \
+ .begin = (uintptr_t) bsp_start_vector_table_begin, \
+ .end = (uintptr_t) bsp_start_vector_table_end, \
+ .flags = AARCH64_MMU_CODE_RW_CACHED \
+ }
+
+/* setup straight mapped block entries */
+BSP_START_TEXT_SECTION static inline void aarch64_mmu_page_table_set_blocks(
+ uint64_t *page_table,
+ uint64_t base,
+ uint32_t bits_offset,
+ uint64_t default_attr
+)
+{
+ uint64_t page_flag = 0;
+
+ if ( bits_offset == MMU_PAGE_BITS ) {
+ page_flag = MMU_DESC_TYPE_PAGE;
+ }
+
+ for ( uint64_t i = 0; i < ( 1 << MMU_BITS_PER_LEVEL ); i++ ) {
+ page_table[i] = base | ( i << bits_offset );
+ page_table[i] |= default_attr | page_flag;
+ }
+}
+
+BSP_START_TEXT_SECTION static inline rtems_status_code
+aarch64_mmu_page_table_alloc( uint64_t **page_table )
+{
+ /* First page table is already in use as TTB0 */
+ static uintptr_t *current_page_table =
+ (uintptr_t *) bsp_translation_table_base;
+
+ current_page_table += MMU_PAGE_SIZE;
+ *page_table = (uint64_t *) current_page_table;
+
+ /* Out of linker-allocated page tables? */
+ uintptr_t consumed_pages = (uintptr_t) current_page_table;
+ consumed_pages -= (uintptr_t) bsp_translation_table_base;
+ consumed_pages /= MMU_PAGE_SIZE;
+
+ if ( consumed_pages > AARCH64_MMU_TRANSLATION_TABLE_PAGES ) {
+ *page_table = NULL;
+ return RTEMS_NO_MEMORY;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+BSP_START_TEXT_SECTION static inline uintptr_t aarch64_mmu_get_index(
+ uintptr_t root_address,
+ uintptr_t vaddr,
+ uint32_t shift
+)
+{
+ uintptr_t mask = ( 1 << ( MMU_BITS_PER_LEVEL + 1 ) ) - 1;
+
+ return ( ( vaddr - root_address ) >> shift ) & mask;
+}
+
+BSP_START_TEXT_SECTION static inline rtems_status_code
+aarch64_mmu_get_sub_table(
+ uint64_t *page_table_entry,
+ uint64_t **sub_table,
+ uintptr_t physical_root_address,
+ uint32_t shift
+)
+{
+ /* check if the index already has a page table */
+ if ( ( *page_table_entry & MMU_DESC_TYPE_TABLE ) == MMU_DESC_TYPE_TABLE ) {
+ /* extract page table address */
+ uint64_t table_pointer = *page_table_entry & MMU_DESC_PAGE_TABLE_MASK;
+ /* This cast should be safe since the address was inserted in this mode */
+ *sub_table = (uint64_t *) (uintptr_t) table_pointer;
+ } else {
+ /* allocate new page table and set block */
+ rtems_status_code sc = aarch64_mmu_page_table_alloc( sub_table );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ return sc;
+ }
+
+ aarch64_mmu_page_table_set_blocks(
+ *sub_table,
+ physical_root_address,
+ shift - MMU_BITS_PER_LEVEL,
+ *page_table_entry & ~MMU_DESC_PAGE_TABLE_MASK
+ );
+ *page_table_entry = (uintptr_t) *sub_table;
+ *page_table_entry |= MMU_DESC_TYPE_TABLE | MMU_DESC_VALID;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+BSP_START_TEXT_SECTION static inline rtems_status_code aarch64_mmu_map_block(
+ uint64_t *page_table,
+ uintptr_t root_address,
+ uintptr_t addr,
+ uint64_t size,
+ uint32_t level,
+ uint64_t flags
+)
+{
+ uint32_t shift = ( 2 - level ) * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS;
+ uintptr_t granularity = 1 << shift;
+ uint64_t page_flag = 0;
+
+ if ( level == 2 ) {
+ page_flag = MMU_DESC_TYPE_PAGE;
+ }
+
+ while ( size > 0 ) {
+ uintptr_t index = aarch64_mmu_get_index( root_address, addr, shift );
+ uintptr_t block_bottom = RTEMS_ALIGN_DOWN( addr, granularity );
+ uint64_t chunk_size = granularity;
+
+ /* check for perfect block match */
+ if ( block_bottom == addr ) {
+ if ( size >= chunk_size ) {
+ /* when page_flag is set the last level must be a page descriptor */
+ if ( page_flag || ( page_table[index] & MMU_DESC_TYPE_TABLE ) != MMU_DESC_TYPE_TABLE ) {
+ /* no sub-table, apply block properties */
+ page_table[index] = addr | flags | page_flag;
+ size -= chunk_size;
+ addr += chunk_size;
+ continue;
+ }
+ } else {
+ /* block starts on a boundary, but is short */
+ chunk_size = size;
+ }
+ } else {
+ uintptr_t block_top = RTEMS_ALIGN_UP( addr, granularity );
+ chunk_size = block_top - addr;
+
+ if ( chunk_size > size ) {
+ chunk_size = size;
+ }
+ }
+
+ /* Deal with any subtable modification */
+ uintptr_t new_root_address = root_address + index * granularity;
+ uint64_t *sub_table = NULL;
+ rtems_status_code sc;
+
+ sc = aarch64_mmu_get_sub_table(
+ &page_table[index],
+ &sub_table,
+ new_root_address,
+ shift
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ return sc;
+ }
+
+ sc = aarch64_mmu_map_block(
+ sub_table,
+ new_root_address,
+ addr,
+ chunk_size,
+ level + 1,
+ flags
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ return sc;
+ }
+
+ size -= chunk_size;
+ addr += chunk_size;
+ }
+
+ return RTEMS_SUCCESSFUL;
+}
+
+BSP_START_DATA_SECTION extern const aarch64_mmu_config_entry
+ aarch64_mmu_config_table[];
+
+BSP_START_DATA_SECTION extern const size_t
+ aarch64_mmu_config_table_size;
+
+BSP_START_TEXT_SECTION static inline void
+aarch64_mmu_set_translation_table_entries(
+ uint64_t *ttb,
+ const aarch64_mmu_config_entry *config
+)
+{
+ /* Force alignemnt to 4k page size */
+ uintptr_t begin = RTEMS_ALIGN_DOWN( config->begin, MMU_PAGE_SIZE );
+ uintptr_t end = RTEMS_ALIGN_UP( config->end, MMU_PAGE_SIZE );
+ rtems_status_code sc;
+
+ sc = aarch64_mmu_map_block(
+ ttb,
+ 0x0,
+ begin,
+ end - begin,
+ 0,
+ config->flags
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ rtems_fatal_error_occurred( sc );
+ }
+}
+
+BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup_translation_table(
+ const aarch64_mmu_config_entry *config_table,
+ size_t config_count
+)
+{
+ size_t i;
+ uint64_t *ttb = (uint64_t *) bsp_translation_table_base;
+
+ aarch64_mmu_page_table_set_blocks(
+ ttb,
+ (uintptr_t) NULL,
+ MMU_TOP_LEVEL_PAGE_BITS,
+ 0
+ );
+
+ _AArch64_Write_ttbr0_el1( (uintptr_t) ttb );
+
+ /* Configure entries required for each memory section */
+ for ( i = 0; i < config_count; ++i ) {
+ aarch64_mmu_set_translation_table_entries( ttb, &config_table[i] );
+ }
+}
+
+BSP_START_TEXT_SECTION static inline void
+aarch64_mmu_setup_translation_table_and_enable(
+ const aarch64_mmu_config_entry *config_table,
+ size_t config_count
+)
+{
+ uint64_t sctlr;
+
+ aarch64_mmu_setup_translation_table(
+ config_table,
+ config_count
+ );
+
+ /* Enable MMU and cache */
+ sctlr = _AArch64_Read_sctlr_el1();
+ sctlr |= AARCH64_SCTLR_EL1_I | AARCH64_SCTLR_EL1_C | AARCH64_SCTLR_EL1_M;
+ _AArch64_Write_sctlr_el1( sctlr );
+}
+
+BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup( void )
+{
+ /* Set TCR */
+ /* 128GB/36 bits mappable (64-0x1c) */
+ _AArch64_Write_tcr_el1(
+ AARCH64_TCR_EL1_T0SZ( 0x1c ) | AARCH64_TCR_EL1_IRGN0( 0x1 ) |
+ AARCH64_TCR_EL1_ORGN0( 0x1 ) | AARCH64_TCR_EL1_SH0( 0x3 ) | AARCH64_TCR_EL1_TG0( 0x0 )
+ );
+
+ /* Set MAIR */
+ _AArch64_Write_mair_el1(
+ AARCH64_MAIR_EL1_ATTR0( 0x0 ) | AARCH64_MAIR_EL1_ATTR1( 0x4 ) |
+ AARCH64_MAIR_EL1_ATTR2( 0x44 ) | AARCH64_MAIR_EL1_ATTR3( 0xFF )
+ );
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* LIBBSP_AARCH64_SHARED_AARCH64_MMU_H */
diff --git a/bsps/aarch64/include/bsp/linker-symbols.h b/bsps/aarch64/include/bsp/linker-symbols.h
index c9a2bfd233..38c655ba97 100644
--- a/bsps/aarch64/include/bsp/linker-symbols.h
+++ b/bsps/aarch64/include/bsp/linker-symbols.h
@@ -104,6 +104,10 @@ LINKER_SYMBOL(bsp_section_bss_begin)
LINKER_SYMBOL(bsp_section_bss_end)
LINKER_SYMBOL(bsp_section_bss_size)
+LINKER_SYMBOL(bsp_section_rtemsstack_begin)
+LINKER_SYMBOL(bsp_section_rtemsstack_end)
+LINKER_SYMBOL(bsp_section_rtemsstack_size)
+
LINKER_SYMBOL(bsp_section_work_begin)
LINKER_SYMBOL(bsp_section_work_end)
LINKER_SYMBOL(bsp_section_work_size)
@@ -134,6 +138,9 @@ LINKER_SYMBOL(bsp_start_vector_table_begin)
LINKER_SYMBOL(bsp_start_vector_table_end)
LINKER_SYMBOL(bsp_start_vector_table_size)
+LINKER_SYMBOL(bsp_translation_table_base)
+LINKER_SYMBOL(bsp_translation_table_end)
+
#define BSP_FAST_TEXT_SECTION \
RTEMS_SECTION(".bsp_fast_text")
diff --git a/bsps/aarch64/xilinx-zynqmp/include/bsp.h b/bsps/aarch64/xilinx-zynqmp/include/bsp.h
index e405cc2ed7..83f2e2f4e4 100644
--- a/bsps/aarch64/xilinx-zynqmp/include/bsp.h
+++ b/bsps/aarch64/xilinx-zynqmp/include/bsp.h
@@ -61,6 +61,13 @@ extern "C" {
#define BSP_RESET_SMC
+/**
+ * @brief Zynq UltraScale+ MPSoC specific set up of the MMU.
+ *
+ * Provide in the application to override the defaults in the BSP.
+ */
+BSP_START_TEXT_SECTION void zynqmp_setup_mmu_and_cache(void);
+
void zynqmp_debug_console_flush(void);
#ifdef __cplusplus
diff --git a/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c b/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
index b165b42909..7bd787592c 100644
--- a/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
+++ b/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
@@ -47,5 +47,6 @@ BSP_START_TEXT_SECTION void bsp_start_hook_1(void)
{
AArch64_start_set_vector_base();
bsp_start_copy_sections();
+ zynqmp_setup_mmu_and_cache();
bsp_start_clear_bss();
}
diff --git a/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c b/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
new file mode 100644
index 0000000000..8d302e97b5
--- /dev/null
+++ b/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64XilinxZynqMP
+ *
+ * @brief This source file contains the default MMU tables and setup.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bsp.h>
+#include <bsp/start.h>
+#include <bsp/aarch64-mmu.h>
+
+BSP_START_DATA_SECTION static const aarch64_mmu_config_entry
+zynqmp_mmu_config_table[] = {
+ AARCH64_MMU_DEFAULT_SECTIONS,
+#if defined( RTEMS_SMP )
+ {
+ .begin = 0xffff0000U,
+ .end = 0xffffffffU,
+ .flags = AARCH64_MMU_DEVICE
+ },
+#endif
+ {
+ .begin = 0xf9000000U,
+ .end = 0xf9100000U,
+ .flags = AARCH64_MMU_DEVICE
+ }, {
+ .begin = 0xfd000000U,
+ .end = 0xffc00000U,
+ .flags = AARCH64_MMU_DEVICE
+ }
+};
+
+/*
+ * Make weak and let the user override.
+ */
+BSP_START_TEXT_SECTION void
+zynqmp_setup_mmu_and_cache( void ) __attribute__ ((weak));
+
+BSP_START_TEXT_SECTION void
+zynqmp_setup_mmu_and_cache( void )
+{
+ aarch64_mmu_setup();
+
+ aarch64_mmu_setup_translation_table_and_enable(
+ &zynqmp_mmu_config_table[ 0 ],
+ RTEMS_ARRAY_SIZE( zynqmp_mmu_config_table )
+ );
+}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index d139fdc6a4..2a4ddbcc61 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -72,10 +72,6 @@
* * The exception returns to the previous execution state
*/
-/*
- * TODO(kmoore) The current implementation here assumes that SP is not
- * misaligned.
- */
.macro JUMP_HANDLER_SHORT
/* Mask to use in BIC, lower 7 bits */
mov x0, #0x7f
@@ -186,13 +182,50 @@ curr_el_sp0_serror_get_pc: /* The current PC is now in LR */
* the current SP.
*/
curr_el_spx_sync:
- msr SCTLR_EL1, XZR
- stp x0, lr, [sp, #-0x10]! /* Push x0,lr on to the stack */
- bl curr_el_spx_sync_get_pc /* Get current execution address */
-curr_el_spx_sync_get_pc: /* The current PC is now in LR */
-/* Use short jump handler since this has an extra instruction to clear SCTLR */
- JUMP_HANDLER_SHORT
- JUMP_TARGET_SPx
+ msr spsel, #0 /* switch to exception stack */
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* shove lr into CEF */
+ bl .push_exception_context_start /* bl to CEF store routine */
+/* Save original sp in x0 for .push_exception_context_finish */
+ msr spsel, #1
+ mov x0, sp
+ msr spsel, #0
+/* Push the remainder of the context */
+ bl .push_exception_context_finish
+/* get jump target and branch/link */
+ bl curr_el_spx_sync_get_pc /* Get current execution address */
+curr_el_spx_sync_get_pc: /* The current PC is now in LR */
+ mov x0, #0x7f /* Mask to use in BIC, lower 7 bits */
+ bic x0, lr, x0 /* Mask LR to base of current vector */
+ ldr x1, [x0, #0x78] /* Load target from last word in vector */
+ and lr, lr, #0x780 /* Mask off bits for vector number */
+ lsr lr, lr, #7 /* Shift the vector bits down */
+/* Store the vector */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
+ mov x0, sp
+ blr x1
+/* bl to CEF restore routine (doesn't restore lr) */
+ bl .pop_exception_context
+ ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* get lr from CEF */
+/* drop space reserved for CEF and clear exclusive */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+ msr spsel, #1 /* switch to thread stack */
+ eret /* exception return */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+/* Takes up the space of 2 instructions */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ .word _AArch64_Exception_default
+ .word 0x0
+#else
+ .dword _AArch64_Exception_default
+#endif
.balign 0x80
/*
* The exception handler for IRQ exceptions from the current EL using the
@@ -446,7 +479,7 @@ twiddle:
/*
* Apply the exception frame to the current register status, SP points to the EF
*/
-.pop_exception_context_and_ret:
+.pop_exception_context:
/* Pop daif and spsr */
ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
/* Restore daif and spsr */
@@ -462,8 +495,6 @@ twiddle:
/* Restore fpcr and fpsr */
msr FPSR, x2
msr FPCR, x3
-/* Restore LR */
- ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
/* Pop VFP registers */
ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
@@ -496,12 +527,15 @@ twiddle:
ldp x24, x25, [sp, #0xc0]
ldp x26, x27, [sp, #0xd0]
ldp x28, x29, [sp, #0xe0]
-/* Pop sp (ignored since sp should be shortly restored anyway) and ELR */
+/* Pop sp and ELR */
ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
+/* Restore thread SP */
+ msr spsel, #1
+ mov sp, x0
+ msr spsel, #0
/* Restore exception LR */
msr ELR_EL1, x1
ldp x0, x1, [sp, #0x00]
- add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
/* We must clear reservations to ensure consistency with atomic operations */
clrex
diff --git a/spec/build/bsps/aarch64/optmmupages.yml b/spec/build/bsps/aarch64/optmmupages.yml
new file mode 100644
index 0000000000..d737002f87
--- /dev/null
+++ b/spec/build/bsps/aarch64/optmmupages.yml
@@ -0,0 +1,19 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+actions:
+- get-integer: null
+- assert-ge: 0x1
+- assert-le: 0x800
+- env-assign: null
+- format-and-define: null
+build-type: option
+copyrights:
+- Copyright (C) 2021 On-Line Applications Research (OAR)
+default: 0x40
+default-by-variant: []
+description: |
+ Defines the number of MMU translation table pages to provide.
+enabled-by: true
+format: '{:#010x}'
+links: []
+name: AARCH64_MMU_TRANSLATION_TABLE_PAGES
+type: build
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml
index 38b9be59da..16e2b8a7e9 100644
--- a/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml
@@ -12,6 +12,8 @@ links:
- role: build-dependency
uid: ../start
- role: build-dependency
+ uid: ../optmmupages
+- role: build-dependency
uid: abi
- role: build-dependency
uid: obj
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_ilp32.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_ilp32.yml
index f0d9f71401..b530969de7 100644
--- a/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_ilp32.yml
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_ilp32.yml
@@ -30,8 +30,9 @@ content: |
*/
MEMORY {
- RAM : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_LOAD_OFFSET}, LENGTH = ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - ${BSP_XILINX_ZYNQMP_LOAD_OFFSET} - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}
- NOCACHE : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}, LENGTH = ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}
+ RAM : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_LOAD_OFFSET}, LENGTH = ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - ${BSP_XILINX_ZYNQMP_LOAD_OFFSET} - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH} - (0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES})
+ NOCACHE : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - (0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES}) - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}, LENGTH = ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}
+ RAM_MMU : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - (0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES}), LENGTH = 0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES}
}
REGION_ALIAS ("REGION_START", RAM);
@@ -58,6 +59,9 @@ content: |
bsp_vector_table_in_start_section = 1;
+ bsp_translation_table_base = ORIGIN (RAM_MMU);
+ bsp_translation_table_end = ORIGIN (RAM_MMU) + LENGTH (RAM_MMU);
+
OUTPUT_FORMAT ("elf32-littleaarch64")
OUTPUT_ARCH (aarch64:ilp32)
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_lp64.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_lp64.yml
index 045ab16d0a..d9696be5d4 100644
--- a/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_lp64.yml
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/linkcmds_lp64.yml
@@ -30,8 +30,9 @@ content: |
*/
MEMORY {
- RAM : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_LOAD_OFFSET}, LENGTH = ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - ${BSP_XILINX_ZYNQMP_LOAD_OFFSET} - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}
- NOCACHE : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}, LENGTH = ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}
+ RAM : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_LOAD_OFFSET}, LENGTH = ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - ${BSP_XILINX_ZYNQMP_LOAD_OFFSET} - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH} - (0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES})
+ NOCACHE : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - (0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES}) - ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}, LENGTH = ${BSP_XILINX_ZYNQMP_NOCACHE_LENGTH}
+ RAM_MMU : ORIGIN = ${BSP_XILINX_ZYNQMP_RAM_BASE} + ${BSP_XILINX_ZYNQMP_RAM_LENGTH} - (0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES}), LENGTH = 0x4000 * ${AARCH64_MMU_TRANSLATION_TABLE_PAGES}
}
REGION_ALIAS ("REGION_START", RAM);
@@ -58,6 +59,9 @@ content: |
bsp_vector_table_in_start_section = 1;
+ bsp_translation_table_base = ORIGIN (RAM_MMU);
+ bsp_translation_table_end = ORIGIN (RAM_MMU) + LENGTH (RAM_MMU);
+
OUTPUT_FORMAT ("elf64-littleaarch64")
OUTPUT_ARCH (aarch64)
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/obj.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/obj.yml
index a4a4c74333..d8286bec35 100644
--- a/spec/build/bsps/aarch64/xilinx-zynqmp/obj.yml
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/obj.yml
@@ -15,6 +15,7 @@ install:
- destination: ${BSP_INCLUDEDIR}/bsp
source:
- bsps/aarch64/xilinx-zynqmp/include/bsp/irq.h
+ - bsps/aarch64/include/bsp/aarch64-mmu.h
links: []
source:
- bsps/aarch64/shared/cache/cache.c
@@ -22,6 +23,7 @@ source:
- bsps/aarch64/xilinx-zynqmp/console/console.c
- bsps/aarch64/xilinx-zynqmp/start/bspstart.c
- bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
+- bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
- bsps/shared/dev/btimer/btimer-cpucounter.c
- bsps/shared/dev/clock/arm-generic-timer.c
- bsps/shared/dev/getentropy/getentropy-cpucounter.c