summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bsps/aarch64/include/bsp/aarch64-mmu.h13
-rw-r--r--bsps/aarch64/shared/start/aarch64-smp.c85
-rw-r--r--bsps/aarch64/shared/start/start.S12
-rw-r--r--bsps/aarch64/xilinx-versal/start/bspstartmmu.c4
-rw-r--r--bsps/aarch64/xilinx-zynqmp/include/bsp.h9
-rw-r--r--bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c40
-rw-r--r--bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c28
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S6
-rw-r--r--cpukit/score/cpu/aarch64/cpu_asm.S78
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/asm.h5
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h13
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h67
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/abi.yml2
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml2
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/objsmp.yml16
-rw-r--r--spec/build/cpukit/optsmp.yml4
-rw-r--r--testsuites/smptests/smpfatal08/init.c2
17 files changed, 350 insertions, 36 deletions
diff --git a/bsps/aarch64/include/bsp/aarch64-mmu.h b/bsps/aarch64/include/bsp/aarch64-mmu.h
index e82012576f..a5f6e846f3 100644
--- a/bsps/aarch64/include/bsp/aarch64-mmu.h
+++ b/bsps/aarch64/include/bsp/aarch64-mmu.h
@@ -385,17 +385,14 @@ BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup_translation_table(
}
BSP_START_TEXT_SECTION static inline void
-aarch64_mmu_setup_translation_table_and_enable(
- const aarch64_mmu_config_entry *config_table,
- size_t config_count
-)
+aarch64_mmu_enable( void )
{
uint64_t sctlr;
- aarch64_mmu_setup_translation_table(
- config_table,
- config_count
- );
+ /* CPUECTLR_EL1.SMPEN is already set on ZynqMP and is not writable */
+
+ /* Invalidate cache */
+ rtems_cache_invalidate_entire_data();
/* Enable MMU and cache */
sctlr = _AArch64_Read_sctlr_el1();
diff --git a/bsps/aarch64/shared/start/aarch64-smp.c b/bsps/aarch64/shared/start/aarch64-smp.c
new file mode 100644
index 0000000000..5ec7babce7
--- /dev/null
+++ b/bsps/aarch64/shared/start/aarch64-smp.c
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief SMP startup and interop code.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/score/smpimpl.h>
+
+#include <bsp/irq.h>
+
+static void bsp_inter_processor_interrupt( void *arg )
+{
+ _SMP_Inter_processor_interrupt_handler( _Per_CPU_Get() );
+}
+
+uint32_t _CPU_SMP_Initialize( void )
+{
+ return arm_gic_irq_processor_count();
+}
+
+void _CPU_SMP_Finalize_initialization( uint32_t cpu_count )
+{
+ if ( cpu_count > 0 ) {
+ rtems_status_code sc;
+
+ sc = rtems_interrupt_handler_install(
+ ARM_GIC_IRQ_SGI_0,
+ "IPI",
+ RTEMS_INTERRUPT_UNIQUE,
+ bsp_inter_processor_interrupt,
+ NULL
+ );
+ _Assert( sc == RTEMS_SUCCESSFUL );
+ (void) sc;
+
+#if defined( BSP_DATA_CACHE_ENABLED ) || \
+ defined( BSP_INSTRUCTION_CACHE_ENABLED )
+ /* Enable unified L2 cache */
+ rtems_cache_enable_data();
+#endif
+ }
+}
+
+void _CPU_SMP_Prepare_start_multitasking( void )
+{
+ /* Do nothing */
+}
+
+void _CPU_SMP_Send_interrupt( uint32_t target_processor_index )
+{
+ arm_gic_irq_generate_software_irq(
+ ARM_GIC_IRQ_SGI_0,
+ 1U << target_processor_index
+ );
+}
diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S
index f4c39dacdf..bc6a855217 100644
--- a/bsps/aarch64/shared/start/start.S
+++ b/bsps/aarch64/shared/start/start.S
@@ -166,21 +166,21 @@ _el1_start:
#ifdef RTEMS_SMP
/* Read MPIDR and get current processor index */
mrs x7, mpidr_el1
- and x7, #0xff
+ and x7, x7, #0xff
#endif
#ifdef RTEMS_SMP
/*
* Get current per-CPU control and store it in PL1 only Thread ID
- * Register (TPIDRPRW).
+ * Register (TPIDR_EL1).
*/
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
ldr w1, =_Per_CPU_Information
#else
ldr x1, =_Per_CPU_Information
#endif
- add x1, x1, x7, asl #PER_CPU_CONTROL_SIZE_LOG2
- mcr p15, 0, x1, c13, c0, 4
+ add x1, x1, x7, lsl #PER_CPU_CONTROL_SIZE_LOG2
+ msr TPIDR_EL1, x1
#endif
@@ -201,8 +201,8 @@ _el1_start:
#endif
add x3, x1, x2
- /* Save original DAIF value */
- mrs x4, DAIF
+ /* Disable interrupts */
+ msr DAIFSet, #0x2
#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION
mov x8, XZR
diff --git a/bsps/aarch64/xilinx-versal/start/bspstartmmu.c b/bsps/aarch64/xilinx-versal/start/bspstartmmu.c
index 6ab33cc4f1..8b622aec7d 100644
--- a/bsps/aarch64/xilinx-versal/start/bspstartmmu.c
+++ b/bsps/aarch64/xilinx-versal/start/bspstartmmu.c
@@ -70,8 +70,10 @@ versal_setup_mmu_and_cache( void )
{
aarch64_mmu_setup();
- aarch64_mmu_setup_translation_table_and_enable(
+ aarch64_mmu_setup_translation_table(
&versal_mmu_config_table[ 0 ],
RTEMS_ARRAY_SIZE( versal_mmu_config_table )
);
+
+ aarch64_mmu_enable();
}
diff --git a/bsps/aarch64/xilinx-zynqmp/include/bsp.h b/bsps/aarch64/xilinx-zynqmp/include/bsp.h
index 6d49b9ad2a..d937a313f2 100644
--- a/bsps/aarch64/xilinx-zynqmp/include/bsp.h
+++ b/bsps/aarch64/xilinx-zynqmp/include/bsp.h
@@ -60,6 +60,7 @@ extern "C" {
#define BSP_ARM_GIC_DIST_BASE 0xf9010000
#define BSP_RESET_SMC
+#define BSP_CPU_ON_USES_SMC
/**
* @brief Zynq UltraScale+ MPSoC specific set up of the MMU.
@@ -68,6 +69,14 @@ extern "C" {
*/
BSP_START_TEXT_SECTION void zynqmp_setup_mmu_and_cache(void);
+/**
+ * @brief Zynq UltraScale+ MPSoC specific set up of the MMU for non-primary
+ * cores.
+ *
+ * Provide in the application to override the defaults in the BSP.
+ */
+BSP_START_TEXT_SECTION void zynqmp_setup_secondary_cpu_mmu_and_cache( void );
+
void zynqmp_debug_console_flush(void);
uint32_t zynqmp_clock_i2c0(void);
diff --git a/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c b/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
index 7bd787592c..ad688088e5 100644
--- a/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
+++ b/bsps/aarch64/xilinx-zynqmp/start/bspstarthooks.c
@@ -38,12 +38,46 @@
#include <bsp.h>
#include <bsp/start.h>
-BSP_START_TEXT_SECTION void bsp_start_hook_0(void)
+#ifdef RTEMS_SMP
+#include <rtems/score/aarch64-system-registers.h>
+#include <rtems/score/smpimpl.h>
+
+#include <bsp/irq-generic.h>
+#endif
+
+BSP_START_TEXT_SECTION void bsp_start_hook_0( void )
{
- /* Do nothing */
+#ifdef RTEMS_SMP
+ uint32_t cpu_index_self;
+
+ cpu_index_self = _SMP_Get_current_processor();
+
+ if ( cpu_index_self != 0 ) {
+ if (
+ cpu_index_self >= rtems_configuration_get_maximum_processors()
+ || !_SMP_Should_start_processor( cpu_index_self )
+ ) {
+ while ( true ) {
+ _AARCH64_Wait_for_event();
+ }
+ }
+
+ /* Change the VBAR from the start to the normal vector table */
+ AArch64_start_set_vector_base();
+
+ zynqmp_setup_secondary_cpu_mmu_and_cache();
+ arm_gic_irq_initialize_secondary_cpu();
+
+ bsp_interrupt_vector_enable( ARM_GIC_IRQ_SGI_0 );
+ _SMP_Start_multitasking_on_secondary_processor(
+ _Per_CPU_Get_by_index( cpu_index_self )
+ );
+ }
+
+#endif
}
-BSP_START_TEXT_SECTION void bsp_start_hook_1(void)
+BSP_START_TEXT_SECTION void bsp_start_hook_1( void )
{
AArch64_start_set_vector_base();
bsp_start_copy_sections();
diff --git a/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c b/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
index 8d302e97b5..09012c9db5 100644
--- a/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
+++ b/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
@@ -41,13 +41,6 @@
BSP_START_DATA_SECTION static const aarch64_mmu_config_entry
zynqmp_mmu_config_table[] = {
AARCH64_MMU_DEFAULT_SECTIONS,
-#if defined( RTEMS_SMP )
- {
- .begin = 0xffff0000U,
- .end = 0xffffffffU,
- .flags = AARCH64_MMU_DEVICE
- },
-#endif
{
.begin = 0xf9000000U,
.end = 0xf9100000U,
@@ -70,8 +63,27 @@ zynqmp_setup_mmu_and_cache( void )
{
aarch64_mmu_setup();
- aarch64_mmu_setup_translation_table_and_enable(
+ aarch64_mmu_setup_translation_table(
&zynqmp_mmu_config_table[ 0 ],
RTEMS_ARRAY_SIZE( zynqmp_mmu_config_table )
);
+
+ aarch64_mmu_enable();
+}
+
+/*
+ * Make weak and let the user override.
+ */
+BSP_START_TEXT_SECTION void zynqmp_setup_secondary_cpu_mmu_and_cache( void )
+__attribute__ ( ( weak ) );
+
+BSP_START_TEXT_SECTION void zynqmp_setup_secondary_cpu_mmu_and_cache( void )
+{
+ /* Perform basic MMU setup */
+ aarch64_mmu_setup();
+
+ /* Use the existing root page table already configured by CPU0 */
+ _AArch64_Write_ttbr0_el1( (uintptr_t) bsp_translation_table_base );
+
+ aarch64_mmu_enable();
}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index cb0954a29b..b206f5764b 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -47,7 +47,11 @@
.globl _AArch64_Exception_interrupt_nest
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
- #define SELF_CPU_CONTROL_GET_REG w19
+ #ifdef RTEMS_SMP
+ #define SELF_CPU_CONTROL_GET_REG x19
+ #else
+ #define SELF_CPU_CONTROL_GET_REG w19
+ #endif
#else
#define SELF_CPU_CONTROL_GET_REG x19
#endif
diff --git a/cpukit/score/cpu/aarch64/cpu_asm.S b/cpukit/score/cpu/aarch64/cpu_asm.S
index 9e609e06bd..2379698336 100644
--- a/cpukit/score/cpu/aarch64/cpu_asm.S
+++ b/cpukit/score/cpu/aarch64/cpu_asm.S
@@ -55,13 +55,22 @@
*
*/
+DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
+ .globl _CPU_Context_switch_no_return
+ .set _CPU_Context_switch_no_return, _CPU_Context_switch
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
-#define reg_2 w2
+/* Sanitize inputs for ILP32 ABI */
+ mov w0, w0
+ mov w1, w1
+ #ifdef RTEMS_SMP
+ #define reg_2 x2
+ #else
+ #define reg_2 w2
+ #endif
#else
#define reg_2 x2
#endif
-DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
/* Start saving context */
GET_SELF_CPU_CONTROL reg_2
ldr w3, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
@@ -86,7 +95,30 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
str x3, [x0, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
#ifdef RTEMS_SMP
-#error SMP not yet supported
+ /*
+ * The executing thread no longer executes on this processor. Switch
+ * the stack to the temporary interrupt stack of this processor. Mark
+ * the context of the executing thread as not executing.
+ */
+ dmb SY
+ add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
+ mov x3, #0
+ strb w3, [x0, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+
+.L_check_is_executing:
+
+ /* Check the is executing indicator of the heir context */
+ add x3, x1, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
+ ldaxrb w4, [x3]
+ cmp x4, #0
+ bne .L_get_potential_new_heir
+
+ /* Try to update the is executing indicator of the heir context */
+ mov x4, #1
+ stlxrb w5, w4, [x3]
+ cmp x5, #0
+ bne .L_get_potential_new_heir
+ dmb SY
#endif
/* Start restoring context */
@@ -129,6 +161,46 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
*
*/
DEFINE_FUNCTION_AARCH64(_CPU_Context_restore)
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+/* Sanitize input for ILP32 ABI */
+ mov w0, w0
+#endif
+
mov x1, x0
GET_SELF_CPU_CONTROL reg_2
b .L_restore
+
+#ifdef RTEMS_SMP
+.L_get_potential_new_heir:
+
+ /* We may have a new heir */
+
+ /* Read the executing and heir */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ ldr w4, [x2, #PER_CPU_OFFSET_EXECUTING]
+ ldr w5, [x2, #PER_CPU_OFFSET_HEIR]
+#else
+ ldr x4, [x2, #PER_CPU_OFFSET_EXECUTING]
+ ldr x5, [x2, #PER_CPU_OFFSET_HEIR]
+#endif
+
+ /*
+ * Update the executing only if necessary to avoid cache line
+ * monopolization.
+ */
+ cmp x4, x5
+ beq .L_check_is_executing
+
+ /* Calculate the heir context pointer */
+ sub x4, x1, x4
+ add x1, x5, x4
+
+ /* Update the executing */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ str w5, [x2, #PER_CPU_OFFSET_EXECUTING]
+#else
+ str x5, [x2, #PER_CPU_OFFSET_EXECUTING]
+#endif
+
+ b .L_check_is_executing
+#endif
diff --git a/cpukit/score/cpu/aarch64/include/rtems/asm.h b/cpukit/score/cpu/aarch64/include/rtems/asm.h
index 35bf533c8a..fa53e08291 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/asm.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/asm.h
@@ -81,7 +81,12 @@
.align 8 ; .globl name ; name: ; .globl name ## _aarch64 ; name ## _aarch64:
.macro GET_SELF_CPU_CONTROL REG
+#ifdef RTEMS_SMP
+ /* Use Thread ID Register (TPIDR_EL1) */
+ mrs \REG, TPIDR_EL1
+#else
ldr \REG, =_Per_CPU_Information
+#endif
.endm
/** @} */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index dacc18638e..82f74193a2 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -134,9 +134,9 @@
#ifdef RTEMS_SMP
#if defined(AARCH64_MULTILIB_VFP)
- #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x70
+ #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0xb8
#else
- #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x30
+ #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x78
#endif
#endif
@@ -191,12 +191,12 @@ typedef struct {
static inline void _AARCH64_Data_memory_barrier( void )
{
- __asm__ volatile ( "dmb LD" : : : "memory" );
+ __asm__ volatile ( "dmb SY" : : : "memory" );
}
static inline void _AARCH64_Data_synchronization_barrier( void )
{
- __asm__ volatile ( "dsb LD" : : : "memory" );
+ __asm__ volatile ( "dsb SY" : : : "memory" );
}
static inline void _AARCH64_Instruction_synchronization_barrier( void )
@@ -313,6 +313,11 @@ void _CPU_ISR_install_vector(
*/
void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
#ifdef RTEMS_SMP
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index 0536ecd860..90fd48ad4e 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -50,7 +50,7 @@
*/
#define CPU_PER_CPU_CONTROL_SIZE 0
-#define CPU_INTERRUPT_FRAME_SIZE 240
+#define CPU_INTERRUPT_FRAME_SIZE 0x2E0
#ifndef ASM
@@ -60,6 +60,71 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
+typedef struct {
+ uint64_t x0;
+ uint64_t register_lr_original;
+ uint64_t register_lr;
+ uint64_t x1;
+ uint64_t x2;
+ uint64_t x3;
+ uint64_t x4;
+ uint64_t x5;
+ uint64_t x6;
+ uint64_t x7;
+ uint64_t x8;
+ uint64_t x9;
+ uint64_t x10;
+ uint64_t x11;
+ uint64_t x12;
+ uint64_t x13;
+ uint64_t x14;
+ uint64_t x15;
+ uint64_t x16;
+ uint64_t x17;
+ uint64_t x18;
+ uint64_t x19;
+ uint64_t x20;
+ uint64_t x21;
+#ifdef AARCH64_MULTILIB_VFP
+ uint128_t q0;
+ uint128_t q1;
+ uint128_t q2;
+ uint128_t q3;
+ uint128_t q4;
+ uint128_t q5;
+ uint128_t q6;
+ uint128_t q7;
+ uint128_t q8;
+ uint128_t q9;
+ uint128_t q10;
+ uint128_t q11;
+ uint128_t q12;
+ uint128_t q13;
+ uint128_t q14;
+ uint128_t q15;
+ uint128_t q16;
+ uint128_t q17;
+ uint128_t q18;
+ uint128_t q19;
+ uint128_t q20;
+ uint128_t q21;
+ uint128_t q22;
+ uint128_t q23;
+ uint128_t q24;
+ uint128_t q25;
+ uint128_t q26;
+ uint128_t q27;
+ uint128_t q28;
+ uint128_t q29;
+ uint128_t q30;
+ uint128_t q31;
+#endif /* AARCH64_MULTILIB_VFP */
+ uint64_t register_elr;
+ uint64_t register_spsr;
+ uint64_t register_fpsr;
+ uint64_t register_fpcr;
+} CPU_Interrupt_frame;
+
void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/abi.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/abi.yml
index 56c7694f35..59b5f922d4 100644
--- a/spec/build/bsps/aarch64/xilinx-zynqmp/abi.yml
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/abi.yml
@@ -8,9 +8,11 @@ copyrights:
- Copyright (C) 2020 On-Line Applications Research (OAR)
default:
- -mcpu=cortex-a53
+- -mno-outline-atomics
default-by-variant:
- value:
- -mcpu=cortex-a53
+ - -mno-outline-atomics
- -mabi=ilp32
variants:
- aarch64/xilinx_zynqmp_ilp32_qemu
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml
index 1a356903cd..9d4fe505c3 100644
--- a/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml
@@ -20,6 +20,8 @@ links:
- role: build-dependency
uid: obj
- role: build-dependency
+ uid: objsmp
+- role: build-dependency
uid: optloadoff
- role: build-dependency
uid: optnocachelen
diff --git a/spec/build/bsps/aarch64/xilinx-zynqmp/objsmp.yml b/spec/build/bsps/aarch64/xilinx-zynqmp/objsmp.yml
new file mode 100644
index 0000000000..752e6622c6
--- /dev/null
+++ b/spec/build/bsps/aarch64/xilinx-zynqmp/objsmp.yml
@@ -0,0 +1,16 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2021 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by:
+- RTEMS_SMP
+includes: []
+install: []
+links: []
+source:
+- bsps/aarch64/shared/start/aarch64-smp.c
+- bsps/shared/start/bspsmp-arm-psci.c
+type: build
diff --git a/spec/build/cpukit/optsmp.yml b/spec/build/cpukit/optsmp.yml
index eefeef44aa..b4a2d2c771 100644
--- a/spec/build/cpukit/optsmp.yml
+++ b/spec/build/cpukit/optsmp.yml
@@ -11,6 +11,10 @@ default-by-variant: []
description: |
Enable the Symmetric Multiprocessing (SMP) support
enabled-by:
+- aarch64/xilinx_zynqmp_ilp32_qemu
+- aarch64/xilinx_zynqmp_ilp32_zu3eg
+- aarch64/xilinx_zynqmp_lp64_qemu
+- aarch64/xilinx_zynqmp_lp64_zu3eg
- arm/altcycv_devkit
- arm/fvp_cortex_r52
- arm/imx7
diff --git a/testsuites/smptests/smpfatal08/init.c b/testsuites/smptests/smpfatal08/init.c
index bcfb3b72be..0964a6047c 100644
--- a/testsuites/smptests/smpfatal08/init.c
+++ b/testsuites/smptests/smpfatal08/init.c
@@ -76,7 +76,7 @@ void _CPU_SMP_Prepare_start_multitasking(void)
#if defined(RTEMS_PARAVIRT) \
|| (!defined(__leon__) && !defined(__PPC__) \
- && !defined(__arm__) && !defined(__riscv))
+ && !defined(__arm__) && !defined(__riscv) && !defined(__aarch64__))
uint32_t _CPU_SMP_Get_current_processor(void)
{
return 0;