summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/cpu/aarch64')
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-default.S131
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-default.c79
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c4
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S165
-rw-r--r--cpukit/score/cpu/aarch64/cpu.c40
-rw-r--r--cpukit/score/cpu/aarch64/cpu_asm.S17
-rw-r--r--cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h96
-rw-r--r--cpukit/score/cpu/aarch64/include/machine/elf_machdep.h256
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h1114
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h63
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h42
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h46
12 files changed, 1860 insertions, 193 deletions
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index 2a4ddbcc61..c7c9d03465 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -72,7 +72,7 @@
* * The exception returns to the previous execution state
*/
- .macro JUMP_HANDLER_SHORT
+ .macro JUMP_HANDLER
/* Mask to use in BIC, lower 7 bits */
mov x0, #0x7f
/* LR contains PC, mask off to the base of the current vector */
@@ -109,10 +109,6 @@
nop
nop
nop
- .endm
-
- .macro JUMP_HANDLER
- JUMP_HANDLER_SHORT
nop
.endm
@@ -144,11 +140,48 @@ Vector_table_el3:
* using SP0.
*/
curr_el_sp0_sync:
- stp x0, lr, [sp, #-0x10]! /* Push x0,lr on to the stack */
- bl curr_el_sp0_sync_get_pc /* Get current execution address */
-curr_el_sp0_sync_get_pc: /* The current PC is now in LR */
- JUMP_HANDLER
- JUMP_TARGET_SP0
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* shove lr into CEF */
+ bl .push_exception_context_start /* bl to CEF store routine */
+/* Save original sp in x0 for .push_exception_context_finish */
+ add x0, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* save original sp */
+/* Push the remainder of the context */
+ bl .push_exception_context_finish
+/* get jump target and branch/link */
+ bl curr_el_sp0_sync_get_pc /* Get current execution address */
+curr_el_sp0_sync_get_pc: /* The current PC is now in LR */
+ mov x0, #0x7f /* Mask to use in BIC, lower 7 bits */
+ bic x0, lr, x0 /* Mask LR to base of current vector */
+ ldr x1, [x0, #0x78] /* Load target from last word in vector */
+ and lr, lr, #0x780 /* Mask off bits for vector number */
+ lsr lr, lr, #7 /* Shift the vector bits down */
+/* Store the vector */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
+ mov x0, sp
+ blr x1
+ b twiddle
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+/* Takes up the space of 2 instructions */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ .word _AArch64_Exception_default
+ .word 0x0
+#else
+ .dword _AArch64_Exception_default
+#endif
.balign 0x80
/* The exception handler for IRQ exceptions from the current EL using SP0. */
curr_el_sp0_irq:
@@ -204,13 +237,11 @@ curr_el_spx_sync_get_pc: /* The current PC is now in LR */
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
mov x0, sp
blr x1
-/* bl to CEF restore routine (doesn't restore lr) */
- bl .pop_exception_context
- ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* get lr from CEF */
-/* drop space reserved for CEF and clear exclusive */
- add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
- msr spsel, #1 /* switch to thread stack */
- eret /* exception return */
+ b twiddle
+ nop
+ nop
+ nop
+ nop
nop
nop
nop
@@ -475,69 +506,3 @@ twiddle:
stp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
/* Done, return to exception handler */
ret
-
-/*
- * Apply the exception frame to the current register status, SP points to the EF
- */
-.pop_exception_context:
-/* Pop daif and spsr */
- ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
-/* Restore daif and spsr */
- msr DAIF, x2
- msr SPSR_EL1, x3
-/* Pop FAR and ESR */
- ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
-/* Restore ESR and FAR */
- msr ESR_EL1, x2
- msr FAR_EL1, x3
-/* Pop fpcr and fpsr */
- ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
-/* Restore fpcr and fpsr */
- msr FPSR, x2
- msr FPCR, x3
-/* Pop VFP registers */
- ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
- ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
- ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
- ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
- ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
- ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
- ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
- ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
- ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
- ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
- ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
- ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
- ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
- ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
- ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
- ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
-/* Pop x0-x29(fp) */
- ldp x2, x3, [sp, #0x10]
- ldp x4, x5, [sp, #0x20]
- ldp x6, x7, [sp, #0x30]
- ldp x8, x9, [sp, #0x40]
- ldp x10, x11, [sp, #0x50]
- ldp x12, x13, [sp, #0x60]
- ldp x14, x15, [sp, #0x70]
- ldp x16, x17, [sp, #0x80]
- ldp x18, x19, [sp, #0x90]
- ldp x20, x21, [sp, #0xa0]
- ldp x22, x23, [sp, #0xb0]
- ldp x24, x25, [sp, #0xc0]
- ldp x26, x27, [sp, #0xd0]
- ldp x28, x29, [sp, #0xe0]
-/* Pop sp and ELR */
- ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
-/* Restore thread SP */
- msr spsel, #1
- mov sp, x0
- msr spsel, #0
-/* Restore exception LR */
- msr ELR_EL1, x1
- ldp x0, x1, [sp, #0x00]
-
-/* We must clear reservations to ensure consistency with atomic operations */
- clrex
-
- ret
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.c b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
index 2ebb3dee9f..f1591cbd5d 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.c
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
@@ -41,10 +41,87 @@
#include "config.h"
#endif
-#include <rtems/score/cpu.h>
#include <rtems/fatal.h>
+#include <rtems/score/aarch64-system-registers.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/percpu.h>
void _AArch64_Exception_default( CPU_Exception_frame *frame )
{
+ uint64_t EC = AARCH64_ESR_EL1_EC_GET( frame->register_syndrome );
+
+ /* Emulate FPSR flags for FENV if a FPU exception occurred */
+ if ( EC == 0x2c ) {
+ /*
+ * This must be done because FENV depends on FPSR values, but trapped FPU
+ * exceptions don't set FPSR bits. In the case where a signal is mapped, the
+ * signal code executes after the exception frame is restored and FENV
+ * functions executed in that context will need this information to be
+ * accurate.
+ */
+ uint64_t ISS = AARCH64_ESR_EL1_EC_GET( frame->register_syndrome );
+
+ /* If the exception bits are valid, use them */
+ if ( ( ISS & ( 1 << 23 ) ) != 0 ) {
+ /* The bits of the lower byte match the FPSR exception bits */
+ frame->register_fpsr |= ( ISS & 0xff );
+ }
+ }
+
rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
}
+
+void _CPU_Exception_disable_thread_dispatch( void )
+{
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+ /* Increment interrupt nest and thread dispatch disable level */
+ ++cpu_self->thread_dispatch_disable_level;
+ ++cpu_self->isr_nest_level;
+}
+
+void _AArch64_Exception_frame_copy(
+ CPU_Exception_frame *new_ef,
+ CPU_Exception_frame *old_ef
+)
+{
+ *new_ef = *old_ef;
+}
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *ef )
+{
+ uint64_t EC = AARCH64_ESR_EL1_EC_GET( ef->register_syndrome );
+
+ switch ( EC ) {
+ case 0x1: /* WFI */
+ case 0x7: /* SVE/SIMD/FP */
+ case 0xa: /* LD64B/ST64B* */
+ case 0x18: /* MSR/MRS/system instruction */
+ case 0x19: /* SVE */
+ case 0x15: /* Supervisor call */
+ case 0x26: /* SP Alignment */
+ case 0x31: /* Breakpoint */
+ case 0x33: /* Step */
+ case 0x35: /* Watchpoint */
+ case 0x3c: /* Break Instruction */
+ return -1;
+ case 0x2c: /* FPU */
+ return SIGFPE;
+ case 0x21: /* Instruction Abort */
+ case 0x25: /* Data Abort */
+ return SIGSEGV;
+ default:
+ return SIGILL;
+ }
+}
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *ef, void *address )
+{
+ ef->register_pc = address;
+}
+
+#define AARCH64_INSTRUCTION_SIZE 4
+void _CPU_Exception_frame_make_resume_next_instruction( CPU_Exception_frame *ef )
+{
+ ef->register_pc += AARCH64_INSTRUCTION_SIZE;
+}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c b/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
index e207a5a81d..04a3dfdc0d 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
@@ -45,7 +45,7 @@
#include <inttypes.h>
#include <rtems/score/cpu.h>
-#include <rtems/score/io.h>
+#include <rtems/dev/io.h>
#include <rtems/bspIo.h>
typedef struct {
@@ -216,7 +216,7 @@ void _CPU_Exception_frame_print( const CPU_Exception_frame *frame )
for ( i = 0 ; i < 32 ; ++i ) {
uint64_t low = (uint64_t) qx[i];
- uint64_t high = (uint64_t) (qx[i] >> 32);
+ uint64_t high = (uint64_t) (qx[i] >> 64);
printk( "Q%02i = 0x%016" PRIx64 "%016" PRIx64 "\n", i, high, low );
}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index b206f5764b..6344dce63a 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -45,6 +45,8 @@
.globl _AArch64_Exception_interrupt_no_nest
.globl _AArch64_Exception_interrupt_nest
+.globl _CPU_Exception_dispatch_and_resume
+.globl _CPU_Exception_resume
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
#ifdef RTEMS_SMP
@@ -324,3 +326,166 @@ Return to embedded exception vector code
pop_interrupt_context
/* Return to vector for final cleanup */
ret
+
+/*
+ * This function is expected to resume execution using the CPU_Exception_frame
+ * provided in x0. This function does not adhere to the AAPCS64 calling
+ * convention because all necessary state is contained within the exception
+ * frame.
+ */
+_CPU_Exception_resume:
+/* Reset stack pointer */
+ mov sp, x0
+
+/* call CEF restore routine (doesn't restore lr) */
+ bl .pop_exception_context
+
+/* get lr from CEF */
+ ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+
+/* switch to thread stack */
+ msr spsel, #1
+ eret
+
+/*
+ * This function is expected to undo dispatch disabling, perform dispatch, and
+ * resume execution using the CPU_Exception_frame provided in x0. This function
+ * does not adhere to the AAPCS64 calling convention because all necessary
+ * state is contained within the exception frame.
+ */
+_CPU_Exception_dispatch_and_resume:
+/* Get per-CPU control of current processor */
+ GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
+
+/* Reset stack pointer */
+ mov sp, x0
+
+/* Check dispatch disable and perform dispatch if necessary */
+/* Load some per-CPU variables */
+ ldr w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+ ldrb w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
+ ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
+ ldr w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* Decrement levels and determine thread dispatch state */
+ eor w1, w1, w0
+ sub w0, w0, #1
+ orr w1, w1, w0
+ orr w1, w1, w2
+ sub w3, w3, #1
+
+/* Store thread dispatch disable and ISR nest levels */
+ str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+ str w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* store should_skip_thread_dispatch in x22 */
+ mov x22, x1
+
+/*
+ * It is now safe to assume that the source of the exception has been resolved.
+ * Copy the exception frame to the thread stack to be compatible with thread
+ * dispatch. This may arbitrarily clobber corruptible registers since all
+ * important state is contained in the exception frame.
+ *
+ * No need to save current LR since this will never return to the caller.
+ */
+ bl .move_exception_frame_and_switch_to_thread_stack
+
+/*
+ * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
+ * disable level.
+ */
+ cmp x22, #0
+ bne .Lno_need_thread_dispatch_resume
+ bl .AArch64_Perform_Thread_Dispatch
+.Lno_need_thread_dispatch_resume:
+/* call CEF restore routine (doesn't restore lr) */
+ bl .pop_exception_context
+
+/* get lr from CEF */
+ ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+ eret
+
+/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 */
+.move_exception_frame_and_switch_to_thread_stack:
+ mov x1, sp /* Set x1 to the current exception frame */
+ msr spsel, #1 /* switch to thread stack */
+ ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET] /* Get thread SP from exception frame since it may have been updated */
+ mov sp, x0
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ mov x0, sp /* Set x0 to the new exception frame */
+ mov x20, lr /* Save LR */
+ bl _AArch64_Exception_frame_copy /* Copy exception frame to reserved thread stack space */
+ mov lr, x20 /* Restore LR */
+ msr spsel, #0 /* switch to exception stack */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* release space for CEF on exception stack */
+ msr spsel, #1 /* switch to thread stack */
+ ret
+
+/*
+ * Apply the exception frame to the current register status, SP points to the EF
+ */
+.pop_exception_context:
+/* Pop daif and spsr */
+ ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
+/* Restore daif and spsr */
+ msr DAIF, x2
+ msr SPSR_EL1, x3
+/* Pop FAR and ESR */
+ ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
+/* Restore ESR and FAR */
+ msr ESR_EL1, x2
+ msr FAR_EL1, x3
+/* Pop fpcr and fpsr */
+ ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
+/* Restore fpcr and fpsr */
+ msr FPSR, x2
+ msr FPCR, x3
+/* Pop VFP registers */
+ ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
+ ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
+ ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
+ ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
+ ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
+ ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
+ ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
+ ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
+ ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
+ ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
+ ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
+ ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
+ ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
+ ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
+ ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
+ ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
+/* Pop x0-x29(fp) */
+ ldp x2, x3, [sp, #0x10]
+ ldp x4, x5, [sp, #0x20]
+ ldp x6, x7, [sp, #0x30]
+ ldp x8, x9, [sp, #0x40]
+ ldp x10, x11, [sp, #0x50]
+ ldp x12, x13, [sp, #0x60]
+ ldp x14, x15, [sp, #0x70]
+ ldp x16, x17, [sp, #0x80]
+ ldp x18, x19, [sp, #0x90]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x26, x27, [sp, #0xd0]
+ ldp x28, x29, [sp, #0xe0]
+/* Pop ELR, SP already popped */
+ ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)]
+/* Restore exception LR */
+ msr ELR_EL1, x1
+ ldp x0, x1, [sp, #0x00]
+
+/* We must clear reservations to ensure consistency with atomic operations */
+ clrex
+
+ ret
diff --git a/cpukit/score/cpu/aarch64/cpu.c b/cpukit/score/cpu/aarch64/cpu.c
index b36f55ae17..207508302b 100644
--- a/cpukit/score/cpu/aarch64/cpu.c
+++ b/cpukit/score/cpu/aarch64/cpu.c
@@ -142,18 +142,24 @@ void _CPU_Context_Initialize(
the_context->thread_id = (uintptr_t) tls_area;
if ( tls_area != NULL ) {
- _TLS_TCB_at_area_begin_initialize( tls_area );
+ the_context->thread_id = (uintptr_t) _TLS_Initialize_area( tls_area );
}
}
void _CPU_ISR_Set_level( uint32_t level )
{
/* Set the mask bit if interrupts are disabled */
- level = level ? AARCH64_PSTATE_I : 0;
- __asm__ volatile (
- "msr DAIF, %[level]\n"
- : : [level] "r" (level)
- );
+ if ( level ) {
+ __asm__ volatile (
+ "msr DAIFSet, #0x2\n"
+ : : [level] "r" (level)
+ );
+ } else {
+ __asm__ volatile (
+ "msr DAIFClr, #0x2\n"
+ : : [level] "r" (level)
+ );
+ }
}
uint32_t _CPU_ISR_Get_level( void )
@@ -168,28 +174,6 @@ uint32_t _CPU_ISR_Get_level( void )
return ( level & AARCH64_PSTATE_I ) != 0;
}
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-)
-{
- /* Redirection table starts at the end of the vector table */
- CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4);
-
- CPU_ISR_handler current_handler = table [vector];
-
- /* The current handler is now the old one */
- if (old_handler != NULL) {
- *old_handler = current_handler;
- }
-
- /* Write only if necessary to avoid writes to a maybe read-only memory */
- if (current_handler != new_handler) {
- table [vector] = new_handler;
- }
-}
-
void _CPU_Initialize( void )
{
/* Do nothing */
diff --git a/cpukit/score/cpu/aarch64/cpu_asm.S b/cpukit/score/cpu/aarch64/cpu_asm.S
index 2379698336..6321acde90 100644
--- a/cpukit/score/cpu/aarch64/cpu_asm.S
+++ b/cpukit/score/cpu/aarch64/cpu_asm.S
@@ -203,4 +203,21 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_restore)
#endif
b .L_check_is_executing
+
+DEFINE_FUNCTION_AARCH64(_AArch64_Start_multitasking)
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ /* Sanitize input for ILP32 ABI */
+ mov w0, w0
+#endif
+
+ mov x1, x0
+ GET_SELF_CPU_CONTROL reg_2
+
+ /* Switch the stack to the temporary interrupt stack of this processor */
+ add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
+
+ /* Enable interrupts */
+ msr DAIFClr, #0x2
+
+ b .L_check_is_executing
#endif
diff --git a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
new file mode 100644
index 0000000000..ca9b60e6d1
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUAArch64
+ *
+ * @brief Definitions used in MMU setup.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LIBCPU_AARCH64_MMU_VMSAV8_64_H
+#define LIBCPU_AARCH64_MMU_VMSAV8_64_H
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <rtems.h>
+
+/* VMSAv8 Long-descriptor fields */
+#define MMU_DESC_AF ( 1 << 10 )
+#define MMU_DESC_SH_INNER ( ( 1 << 9 ) | ( 1 << 8 ) )
+#define MMU_DESC_WRITE_DISABLE ( 1 << 7 )
+/* PAGE and TABLE flags are the same bit, but only apply on certain levels */
+#define MMU_DESC_TYPE_TABLE ( 1 << 1 )
+#define MMU_DESC_TYPE_PAGE ( 1 << 1 )
+#define MMU_DESC_VALID ( 1 << 0 )
+#define MMU_DESC_MAIR_ATTR( val ) ( ( val & 0x3 ) << 2 )
+#define MMU_DESC_PAGE_TABLE_MASK 0xFFFFFFFFF000LL
+
+/* Page table configuration */
+#define MMU_PAGE_BITS 12
+#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
+#define MMU_BITS_PER_LEVEL 9
+
+#define AARCH64_MMU_FLAGS_BASE \
+ ( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
+
+#define AARCH64_MMU_DATA_RO_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE_CACHED AARCH64_MMU_DATA_RO_CACHED
+#define AARCH64_MMU_CODE_RW_CACHED AARCH64_MMU_DATA_RW_CACHED
+
+#define AARCH64_MMU_DATA_RO \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 2 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE AARCH64_MMU_DATA_RO
+#define AARCH64_MMU_CODE_RW AARCH64_MMU_DATA_RW
+
+/* RW implied by not ORing in RO */
+#define AARCH64_MMU_DATA_RW_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) )
+#define AARCH64_MMU_DATA_RW \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 2 ) )
+#define AARCH64_MMU_DEVICE ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 0 ) )
+
+rtems_status_code aarch64_mmu_map(
+ uintptr_t addr,
+ uint64_t size,
+ uint64_t flags
+);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* ASM */
+
+#endif /* LIBCPU_AARCH64_MMU_VMSAV8_64_H */
diff --git a/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
new file mode 100644
index 0000000000..c1d219d715
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
@@ -0,0 +1,256 @@
+/* $NetBSD: elf_machdep.h,v 1.4 2018/10/12 01:28:58 ryo Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_ELF_MACHDEP_H_
+#define _AARCH64_ELF_MACHDEP_H_
+
+#ifdef __aarch64__
+
+#if defined(__AARCH64EB__)
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2MSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2MSB
+#else
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB
+#endif
+
+/* Processor specific flags for the ELF header e_flags field. */
+#define EF_ARM_RELEXEC 0x00000001
+#define EF_ARM_HASENTRY 0x00000002
+#define EF_ARM_INTERWORK 0x00000004 /* GNU binutils 000413 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ARM ELF A08 */
+#define EF_ARM_APCS_26 0x00000008 /* GNU binutils 000413 */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ARM ELF B01 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* GNU binutils 000413 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ARM ELF B01 */
+#define EF_ARM_PIC 0x00000020
+#define EF_ARM_ALIGN8 0x00000040 /* 8-bit structure alignment. */
+#define EF_ARM_NEW_ABI 0x00000080
+#define EF_ARM_OLD_ABI 0x00000100
+#define EF_ARM_SOFT_FLOAT 0x00000200
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_EABIMASK 0xff000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define ELF32_MACHDEP_ID_CASES \
+ case EM_ARM: \
+ break;
+
+#define ELF64_MACHDEP_ID_CASES \
+ case EM_AARCH64: \
+ break;
+
+#define ELF64_MACHDEP_ID EM_AARCH64
+#define ELF32_MACHDEP_ID EM_ARM
+
+#define KERN_ELFSIZE 64
+#define ARCH_ELFSIZE 64 /* MD native binary size */
+
+/* Processor specific relocation types */
+
+#define R_AARCH64_NONE 0
+#define R_AARCH64_NONE2 256
+
+#define R_AARCH64_ABS64 257 /* S + A */
+#define R_AARCH64_ABS32 258 /* S + A */
+#define R_AARCH64_ABS16 259 /* S + A */
+#define R_AARCH64_PREL64 260 /* S + A - P */
+#define R_AARCH64_PREL32 261 /* S + A - P */
+#define R_AARCH64_PREL16 262 /* S + A - P */
+#define R_AARCH64_MOVW_UABS_G0 263 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G0_NC 264 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G1 265 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G1_NC 266 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G2 267 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G2_NC 268 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G3 269 /* S + A [bits 48..63] */
+#define R_AARCH64_MOVW_SABS_G0 270 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_SABS_G1 271 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_SABS_G2 272 /* S + A [bits 32..47] */
+#define R_AARCH64_LD_PREL_LO19 273 /* S + A - P */
+#define R_AARCH64_ADR_PREL_LO21 274 /* S + A - P */
+#define R_AARCH64_ADR_PREL_PG_HI21 275 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADD_ABS_LO12_NC 277 /* S + A */
+#define R_AARCH64_LDST8_ABS_LO12_NC 278 /* S + A */
+#define R_AARCH_TSTBR14 279 /* S + A - P */
+#define R_AARCH_CONDBR19 281 /* S + A - P */
+#define R_AARCH_JUMP26 282 /* S + A - P */
+#define R_AARCH_CALL26 283 /* S + A - P */
+#define R_AARCH_LDST16_ABS_LO12_NC 284 /* S + A */
+#define R_AARCH_LDST32_ABS_LO12_NC 285 /* S + A */
+#define R_AARCH_LDST64_ABS_LO12_NC 286 /* S + A */
+#define R_AARCH64_MOVW_PREL_G0 287 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G0_NC 288 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1 289 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1_NC 290 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2 291 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2_NC 292 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G3 293 /* S + A - P */
+
+#define R_AARCH64_LDST128_ABS_LO12_NC 299 /* S + A */
+#define R_AARCH64_MOVW_GOTOFF_G0 300 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1 302 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2 304 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G3 306 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_GOTREL64 307 /* S + A - GOT */
+#define R_AARCH64_GOTREL32 308 /* S + A - GOT */
+#define R_AARCH64_GOT_LD_PREL19 309 /* G(GDAT(S + A)) - P */
+#define R_AARCH64_LD64_GOTOFF_LO15 310 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_ADR_GOT_PAGE 311 /* Page(G(GDAT(S + A))) - Page(GOT) */
+#define R_AARCH64_LD64_GOT_LO12_NC 312 /* G(GDAT(S + A)) */
+#define R_AARCH64_LD64_GOTPAGE_LO15 313 /* G(GDAT(S + A)) - Page(GOT) */
+
+#define R_AARCH64_TLSGD_ADR_PREL21 512 /* G(GTLSIDX(S,A)) - P */
+#define R_AARCH64_TLSGD_ADR_PAGE21 513 /* Page(G(GTLSIDX(S,A))) - Page(P) */
+#define R_AARCH64_TLSGD_ADD_LO12_NC 514 /* G(GTLSIDX(S,A)) */
+#define R_AARCH64_TLSGD_MOVW_G1 515 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSGD_MOVW_G0_NV 516 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSLD_ADR_PREL21 517 /* G(GLDM(S,A)) - P */
+#define R_AARCH64_TLSLD_ADR_PAGE21 518 /* Page(G(GLDM(S))) - Page(P) */
+#define R_AARCH64_TLSLD_ADD_LO12_NC 519 /* G(GLDM(S)) */
+#define R_AARCH64_TLSLD_MOVW_G1 520 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_MOVW_G0_NC 521 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_LD_PREL21 522 /* G(GLDM(S)) - P */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 /* DTPREL(S+A) */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 /* Page(G(GTPREL(S+A))) - Page(P) */
+#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 /* G(GTPREL(S+A)) */
+#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 /* G(GTPREL(S+A)) - P */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1 545 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1_NC 546 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0 547 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0_NC 548 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_HI12 549 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12 550 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12_NC 551 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12 552 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12_NC 553 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12 554 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12_NC 555 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12 556 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12_NC 557 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12 558 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12_NC 559 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC_LD_PREL19 560 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PREL21 561 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PAGE21 562 /* Page(G(GTLSDESC(S+A))) - Page(P) */
+#define R_AARCH64_TLSDESC_LD64_LO12 563 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_ADD_LO12 564 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_OFF_G1 565 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_OFF_G0_NC 566 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_LDR 567 /* */
+#define R_AARCH64_TLSDESC_ADD 568 /* */
+#define R_AARCH64_TLSDESC_CALL 569 /* */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12 570 /* TPREL(S+A) */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC 571 /* TPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12 572 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC 572 /* DTPREL(S+A) */
+
+/* Dynamic Relocations */
+#define R_AARCH64_P32_COPY 180
+#define R_AARCH64_P32_GLOB_DAT 181 /* S + A */
+#define R_AARCH64_P32_JUMP_SLOT 182 /* S + A */
+#define R_AARCH64_P32_RELATIVE 183 /* Delta(S) + A */
+#define R_AARCH64_P32_TLS_DTPREL 184 /* DTPREL(S+A) */
+#define R_AARCH64_P32_TLS_DTPMOD 185 /* LBM(S) */
+#define R_AARCH64_P32_TLS_TPREL 186 /* TPREL(S+A) */
+#define R_AARCH64_P32_TLSDESC 187 /* TLSDESC(S+A) */
+#define R_AARCH64_P32_IRELATIVE 188 /* Indirect(Delta(S) + A) */
+
+#define R_AARCH64_COPY 1024
+#define R_AARCH64_GLOB_DAT 1025 /* S + A */
+#define R_AARCH64_JUMP_SLOT 1026 /* S + A */
+#define R_AARCH64_RELATIVE 1027 /* Delta(S) + A */
+#define R_AARCH64_TLS_DTPREL64 1028 /* DTPREL(S+A) */
+#define R_AARCH64_TLS_DTPMOD64 1029 /* LBM(S) */
+#define R_AARCH64_TLS_TPREL64 1030 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC 1031 /* TLSDESC(S+A) */
+#define R_AARCH64_IRELATIVE 1032 /* Indirect(Delta(S) + A) */
+
+#define R_TYPE(name) R_AARCH64_ ## name
+#define R_TLS_TYPE(name) R_AARCH64_ ## name ## 64
+
+/* Processor specific program header types */
+#define PT_AARCH64_ARCHEXT (PT_LOPROC + 0)
+#define PT_AARCH64_UNWIND (PT_LOPROC + 1)
+
+/* Processor specific section header flags */
+#define SHF_ENTRYSECT 0x10000000
+#define SHF_COMDEF 0x80000000
+
+#define SHT_AARCH64_ATTRIBUTES (SHT_LOPROC + 3)
+
+#ifdef _KERNEL
+#ifdef ELFSIZE
+#define ELF_MD_PROBE_FUNC ELFNAME2(aarch64_netbsd,probe)
+#endif
+
+struct exec_package;
+
+int aarch64_netbsd_elf64_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+int aarch64_netbsd_elf32_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+#endif
+
+#elif defined(__arm__)
+
+#include <arm/elf_machdep.h>
+
+#endif
+
+#endif /* _AARCH64_ELF_MACHDEP_H_ */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
index dc2afdeca8..8ddad5becf 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
@@ -8,7 +8,7 @@
*/
/*
- * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2020 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -6719,21 +6719,291 @@ static inline uint64_t _AArch64_Read_dbgauthstatus_el1( void )
#define AARCH64_DBGBCR_N_EL1_BT_GET( _reg ) \
( ( ( _reg ) >> 20 ) & 0xfU )
-static inline uint64_t _AArch64_Read_dbgbcr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgbcr0_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGBCR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGBCR0_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgbcr0_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGBCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGBCR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr15_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR15_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr15_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -6781,21 +7051,291 @@ static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
#define AARCH64_DBGBVR_N_EL1_RESS_14_4_GET( _reg ) \
( ( ( _reg ) >> 53 ) & 0x7ffULL )
-static inline uint64_t _AArch64_Read_dbgbvr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgbvr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr15_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGBVR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGBVR15_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgbvr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgbvr15_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGBVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGBVR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -7027,21 +7567,291 @@ static inline void _AArch64_Write_dbgvcr32_el2( uint64_t value )
#define AARCH64_DBGWCR_N_EL1_MASK_GET( _reg ) \
( ( ( _reg ) >> 24 ) & 0x1fU )
-static inline uint64_t _AArch64_Read_dbgwcr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgwcr0_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGWCR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGWCR0_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgwcr0_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGWCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGWCR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr15_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR15_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr15_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -7065,21 +7875,291 @@ static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
#define AARCH64_DBGWVR_N_EL1_RESS_14_4_GET( _reg ) \
( ( ( _reg ) >> 53 ) & 0x7ffULL )
-static inline uint64_t _AArch64_Read_dbgwvr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgwvr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr15_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGWVR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGWVR15_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgwvr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgwvr15_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGWVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGWVR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index ae7e2bdcba..aa4f90f1a8 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -43,6 +43,7 @@
#endif
#include <rtems/score/aarch64.h>
#include <libcpu/vectors.h>
+#include <limits.h>
/**
* @addtogroup RTEMSScoreCPUAArch64
@@ -101,7 +102,7 @@
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
-#define CPU_STACK_MINIMUM_SIZE (1024 * 10)
+#define CPU_STACK_MINIMUM_SIZE (1024 * 8)
/* This could be either 4 or 8, depending on the ABI in use.
* Could also use __LP64__ or __ILP32__ */
@@ -156,7 +157,14 @@
extern "C" {
#endif
+/*
+ This is to fix the following warning
+ ISO C does not support 'uint128_t' types
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef unsigned __int128 uint128_t;
+#pragma GCC diagnostic pop
typedef struct {
uint64_t register_x19;
@@ -252,7 +260,7 @@ static inline void AArch64_interrupt_flash( uint64_t isr_cookie )
#define _CPU_ISR_Flash( _isr_cookie ) \
AArch64_interrupt_flash( _isr_cookie )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
+static inline bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
{
return ( isr_cookie & AARCH64_PSTATE_I ) == 0;
}
@@ -285,6 +293,10 @@ void _CPU_Context_Initialize(
{
context->is_executing = is_executing;
}
+
+ RTEMS_NO_RETURN void _AArch64_Start_multitasking( Context_Control *heir );
+
+ #define _CPU_Start_multitasking( _heir ) _AArch64_Start_multitasking( _heir )
#endif
#define _CPU_Context_Restart_self( _the_context ) \
@@ -302,12 +314,6 @@ void _CPU_Initialize( void );
typedef void ( *CPU_ISR_handler )( void );
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-);
-
/**
* @brief CPU switch context.
*/
@@ -378,14 +384,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
void *_CPU_Thread_Idle_body( uintptr_t ignored );
typedef enum {
@@ -406,7 +404,7 @@ typedef enum {
AARCH64_EXCEPTION_LEL32_FIQ = 14,
AARCH64_EXCEPTION_LEL32_SERROR = 15,
MAX_EXCEPTIONS = 16,
- AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff
+ AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = INT_MAX
} AArch64_symbolic_exception_name;
#define VECTOR_POINTER_OFFSET 0x78
@@ -434,7 +432,15 @@ static inline void* AArch64_set_exception_handler(
*vector_address = handler;
/* return now-previous vector pointer */
- return (void*)current_vector_pointer;
+
+/*
+ * This was put in to fix the following warning:
+ * warning: ISO C forbids conversion of function pointer to object pointer type.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ return (void*)current_vector_pointer;
+#pragma GCC diagnostic pop
}
typedef struct {
@@ -524,6 +530,27 @@ typedef struct {
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
+RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame );
+
+RTEMS_NO_RETURN void
+_CPU_Exception_dispatch_and_resume( CPU_Exception_frame *frame );
+
+void _CPU_Exception_disable_thread_dispatch( void );
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame );
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame,
+ void *address );
+
+void _CPU_Exception_frame_make_resume_next_instruction(
+ CPU_Exception_frame *frame
+);
+
+void _AArch64_Exception_frame_copy(
+ CPU_Exception_frame *new_ef,
+ CPU_Exception_frame *old_ef
+);
+
void _AArch64_Exception_default( CPU_Exception_frame *frame );
/** Type that can store a 32-bit integer or a pointer. */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
deleted file mode 100644
index ed8091d73c..0000000000
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-
-/**
- * @file
- *
- * @ingroup RTEMSScoreCPU
- *
- * @brief AArch64 Atomics support
- */
-
-/*
- * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
- * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
-#define _RTEMS_SCORE_ATOMIC_CPU_H
-
-#include <rtems/score/cpustdatomic.h>
-
-#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index 90fd48ad4e..8a0e476899 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -50,8 +50,11 @@
*/
#define CPU_PER_CPU_CONTROL_SIZE 0
+
#define CPU_INTERRUPT_FRAME_SIZE 0x2E0
+#define CPU_THREAD_LOCAL_STORAGE_VARIANT 11
+
#ifndef ASM
#ifdef __cplusplus
@@ -125,20 +128,59 @@ typedef struct {
uint64_t register_fpcr;
} CPU_Interrupt_frame;
+#ifdef RTEMS_SMP
+
+static inline
+struct Per_CPU_Control *_AARCH64_Get_current_per_CPU_control( void )
+{
+ struct Per_CPU_Control *cpu_self;
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ /* Use EL1 Thread ID Register (TPIDR_EL1) */
+ cpu_self = (struct Per_CPU_Control *)(uintptr_t)value;
+
+ return cpu_self;
+}
+
+#define _CPU_Get_current_per_CPU_control() \
+ _AARCH64_Get_current_per_CPU_control()
+
+#endif /* RTEMS_SMP */
+
void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".inst 0x0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ __asm__ volatile (
+ "msr TPIDR_EL0, %0" : : "r" ( context->thread_id ) : "memory"
+ );
+}
+
+static inline void *_CPU_Get_TLS_thread_pointer(
+ const Context_Control *context
+)
+{
+ return (void *)(uintptr_t) context->thread_id;
+}
+
#ifdef __cplusplus
}
#endif