summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu
diff options
context:
space:
mode:
authorKinsey Moore <kinsey.moore@oarcorp.com>2021-07-26 15:43:00 -0500
committerJoel Sherrill <joel@rtems.org>2021-09-21 08:58:32 -0500
commit5f652cb27e0134362e0160135124352539315845 (patch)
treea370df31b31472331566975319b8fc9fa950d4d4 /cpukit/score/cpu
parentbsps/gicv2: Allow BSPs to define IRQ attributes (diff)
downloadrtems-5f652cb27e0134362e0160135124352539315845.tar.bz2
cpukit: Add AArch64 SMP Support
This adds SMP support for AArch64 in cpukit and for the ZynqMP BSPs.
Diffstat (limited to 'cpukit/score/cpu')
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S6
-rw-r--r--cpukit/score/cpu/aarch64/cpu_asm.S78
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/asm.h5
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h13
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h67
5 files changed, 160 insertions, 9 deletions
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index cb0954a29b..b206f5764b 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -47,7 +47,11 @@
.globl _AArch64_Exception_interrupt_nest
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
- #define SELF_CPU_CONTROL_GET_REG w19
+ #ifdef RTEMS_SMP
+ #define SELF_CPU_CONTROL_GET_REG x19
+ #else
+ #define SELF_CPU_CONTROL_GET_REG w19
+ #endif
#else
#define SELF_CPU_CONTROL_GET_REG x19
#endif
diff --git a/cpukit/score/cpu/aarch64/cpu_asm.S b/cpukit/score/cpu/aarch64/cpu_asm.S
index 9e609e06bd..2379698336 100644
--- a/cpukit/score/cpu/aarch64/cpu_asm.S
+++ b/cpukit/score/cpu/aarch64/cpu_asm.S
@@ -55,13 +55,22 @@
*
*/
+DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
+ .globl _CPU_Context_switch_no_return
+ .set _CPU_Context_switch_no_return, _CPU_Context_switch
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
-#define reg_2 w2
+/* Sanitize inputs for ILP32 ABI */
+ mov w0, w0
+ mov w1, w1
+ #ifdef RTEMS_SMP
+ #define reg_2 x2
+ #else
+ #define reg_2 w2
+ #endif
#else
#define reg_2 x2
#endif
-DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
/* Start saving context */
GET_SELF_CPU_CONTROL reg_2
ldr w3, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
@@ -86,7 +95,30 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
str x3, [x0, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
#ifdef RTEMS_SMP
-#error SMP not yet supported
+ /*
+ * The executing thread no longer executes on this processor. Switch
+ * the stack to the temporary interrupt stack of this processor. Mark
+ * the context of the executing thread as not executing.
+ */
+ dmb SY
+ add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
+ mov x3, #0
+ strb w3, [x0, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+
+.L_check_is_executing:
+
+ /* Check the is executing indicator of the heir context */
+ add x3, x1, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
+ ldaxrb w4, [x3]
+ cmp x4, #0
+ bne .L_get_potential_new_heir
+
+ /* Try to update the is executing indicator of the heir context */
+ mov x4, #1
+ stlxrb w5, w4, [x3]
+ cmp x5, #0
+ bne .L_get_potential_new_heir
+ dmb SY
#endif
/* Start restoring context */
@@ -129,6 +161,46 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
*
*/
DEFINE_FUNCTION_AARCH64(_CPU_Context_restore)
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+/* Sanitize input for ILP32 ABI */
+ mov w0, w0
+#endif
+
mov x1, x0
GET_SELF_CPU_CONTROL reg_2
b .L_restore
+
+#ifdef RTEMS_SMP
+.L_get_potential_new_heir:
+
+ /* We may have a new heir */
+
+ /* Read the executing and heir */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ ldr w4, [x2, #PER_CPU_OFFSET_EXECUTING]
+ ldr w5, [x2, #PER_CPU_OFFSET_HEIR]
+#else
+ ldr x4, [x2, #PER_CPU_OFFSET_EXECUTING]
+ ldr x5, [x2, #PER_CPU_OFFSET_HEIR]
+#endif
+
+ /*
+ * Update the executing only if necessary to avoid cache line
+ * monopolization.
+ */
+ cmp x4, x5
+ beq .L_check_is_executing
+
+ /* Calculate the heir context pointer */
+ sub x4, x1, x4
+ add x1, x5, x4
+
+ /* Update the executing */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ str w5, [x2, #PER_CPU_OFFSET_EXECUTING]
+#else
+ str x5, [x2, #PER_CPU_OFFSET_EXECUTING]
+#endif
+
+ b .L_check_is_executing
+#endif
diff --git a/cpukit/score/cpu/aarch64/include/rtems/asm.h b/cpukit/score/cpu/aarch64/include/rtems/asm.h
index 35bf533c8a..fa53e08291 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/asm.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/asm.h
@@ -81,7 +81,12 @@
.align 8 ; .globl name ; name: ; .globl name ## _aarch64 ; name ## _aarch64:
.macro GET_SELF_CPU_CONTROL REG
+#ifdef RTEMS_SMP
+ /* Use Thread ID Register (TPIDR_EL1) */
+ mrs \REG, TPIDR_EL1
+#else
ldr \REG, =_Per_CPU_Information
+#endif
.endm
/** @} */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index dacc18638e..82f74193a2 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -134,9 +134,9 @@
#ifdef RTEMS_SMP
#if defined(AARCH64_MULTILIB_VFP)
- #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x70
+ #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0xb8
#else
- #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x30
+ #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x78
#endif
#endif
@@ -191,12 +191,12 @@ typedef struct {
static inline void _AARCH64_Data_memory_barrier( void )
{
- __asm__ volatile ( "dmb LD" : : : "memory" );
+ __asm__ volatile ( "dmb SY" : : : "memory" );
}
static inline void _AARCH64_Data_synchronization_barrier( void )
{
- __asm__ volatile ( "dsb LD" : : : "memory" );
+ __asm__ volatile ( "dsb SY" : : : "memory" );
}
static inline void _AARCH64_Instruction_synchronization_barrier( void )
@@ -313,6 +313,11 @@ void _CPU_ISR_install_vector(
*/
void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
#ifdef RTEMS_SMP
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index 0536ecd860..90fd48ad4e 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -50,7 +50,7 @@
*/
#define CPU_PER_CPU_CONTROL_SIZE 0
-#define CPU_INTERRUPT_FRAME_SIZE 240
+#define CPU_INTERRUPT_FRAME_SIZE 0x2E0
#ifndef ASM
@@ -60,6 +60,71 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
+typedef struct {
+ uint64_t x0;
+ uint64_t register_lr_original;
+ uint64_t register_lr;
+ uint64_t x1;
+ uint64_t x2;
+ uint64_t x3;
+ uint64_t x4;
+ uint64_t x5;
+ uint64_t x6;
+ uint64_t x7;
+ uint64_t x8;
+ uint64_t x9;
+ uint64_t x10;
+ uint64_t x11;
+ uint64_t x12;
+ uint64_t x13;
+ uint64_t x14;
+ uint64_t x15;
+ uint64_t x16;
+ uint64_t x17;
+ uint64_t x18;
+ uint64_t x19;
+ uint64_t x20;
+ uint64_t x21;
+#ifdef AARCH64_MULTILIB_VFP
+ uint128_t q0;
+ uint128_t q1;
+ uint128_t q2;
+ uint128_t q3;
+ uint128_t q4;
+ uint128_t q5;
+ uint128_t q6;
+ uint128_t q7;
+ uint128_t q8;
+ uint128_t q9;
+ uint128_t q10;
+ uint128_t q11;
+ uint128_t q12;
+ uint128_t q13;
+ uint128_t q14;
+ uint128_t q15;
+ uint128_t q16;
+ uint128_t q17;
+ uint128_t q18;
+ uint128_t q19;
+ uint128_t q20;
+ uint128_t q21;
+ uint128_t q22;
+ uint128_t q23;
+ uint128_t q24;
+ uint128_t q25;
+ uint128_t q26;
+ uint128_t q27;
+ uint128_t q28;
+ uint128_t q29;
+ uint128_t q30;
+ uint128_t q31;
+#endif /* AARCH64_MULTILIB_VFP */
+ uint64_t register_elr;
+ uint64_t register_spsr;
+ uint64_t register_fpsr;
+ uint64_t register_fpcr;
+} CPU_Interrupt_frame;
+
void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );