diff options
Diffstat (limited to 'cpukit/score/cpu')
257 files changed, 20906 insertions, 3045 deletions
diff --git a/cpukit/score/cpu/aarch64/aarch64-context-validate.S b/cpukit/score/cpu/aarch64/aarch64-context-validate.S index 57f634934b..1daa0d6bf2 100644 --- a/cpukit/score/cpu/aarch64/aarch64-context-validate.S +++ b/cpukit/score/cpu/aarch64/aarch64-context-validate.S @@ -42,30 +42,49 @@ #include <rtems/asm.h> #include <rtems/score/cpu.h> +#include <rtems/score/basedefs.h> -#define FRAME_OFFSET_X4 0x00 -#define FRAME_OFFSET_X5 0x08 -#define FRAME_OFFSET_X6 0x10 -#define FRAME_OFFSET_X7 0x18 -#define FRAME_OFFSET_X8 0x20 -#define FRAME_OFFSET_X9 0x28 -#define FRAME_OFFSET_X10 0x30 -#define FRAME_OFFSET_X11 0x38 -#define FRAME_OFFSET_LR 0x40 +/* + * This register size applies to X (integer) registers as well as the D (lower + * half floating point) registers. It does not apply to V (full size floating + * point) registers or W (lower half integer) registers. + */ +#define AARCH64_REGISTER_SIZE 8 + +/* According to the AAPCS64, X19-X28 are callee-saved registers */ +#define FRAME_OFFSET_X19 0x00 +#define FRAME_OFFSET_X20 0x08 +#define FRAME_OFFSET_X21 0x10 +#define FRAME_OFFSET_X22 0x18 +#define FRAME_OFFSET_X23 0x20 +#define FRAME_OFFSET_X24 0x28 +#define FRAME_OFFSET_X25 0x30 +#define FRAME_OFFSET_X26 0x38 +#define FRAME_OFFSET_X27 0x40 +#define FRAME_OFFSET_X28 0x48 +#define FRAME_OFFSET_LR 0x50 #ifdef AARCH64_MULTILIB_VFP - #define FRAME_OFFSET_V8 0x48 - #define FRAME_OFFSET_V9 0x58 - #define FRAME_OFFSET_V10 0x68 - #define FRAME_OFFSET_V11 0x78 - #define FRAME_OFFSET_V12 0x88 - #define FRAME_OFFSET_V13 0x98 - #define FRAME_OFFSET_V14 0xA8 - #define FRAME_OFFSET_V15 0xB8 - - #define FRAME_SIZE (FRAME_OFFSET_V15 + 0x10) + /* + * According to the AAPCS64, V8-V15 are callee-saved registers, but only the + * bottom 8 bytes are required to be saved which correspond to D8-D15. + */ + #define FRAME_OFFSET_D8 0x58 + #define FRAME_OFFSET_D9 0x60 + #define FRAME_OFFSET_D10 0x68 + #define FRAME_OFFSET_D11 0x70 + #define FRAME_OFFSET_D12 0x78 + #define FRAME_OFFSET_D13 0x80 + #define FRAME_OFFSET_D14 0x88 + #define FRAME_OFFSET_D15 0x90 + + /* + * Force 16 byte alignment of the frame size to avoid stack pointer alignment + * exceptions. + */ + #define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_D15 + AARCH64_REGISTER_SIZE, 16 ) #else - #define FRAME_SIZE (FRAME_OFFSET_LR + 0x08) + #define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_LR + AARCH64_REGISTER_SIZE, 16 ) #endif .section .text @@ -76,25 +95,27 @@ FUNCTION_ENTRY(_CPU_Context_validate) sub sp, sp, #FRAME_SIZE - str x4, [sp, #FRAME_OFFSET_X4] - str x5, [sp, #FRAME_OFFSET_X5] - str x6, [sp, #FRAME_OFFSET_X6] - str x7, [sp, #FRAME_OFFSET_X7] - str x8, [sp, #FRAME_OFFSET_X8] - str x9, [sp, #FRAME_OFFSET_X9] - str x10, [sp, #FRAME_OFFSET_X10] - str x11, [sp, #FRAME_OFFSET_X11] + str x19, [sp, #FRAME_OFFSET_X19] + str x20, [sp, #FRAME_OFFSET_X20] + str x21, [sp, #FRAME_OFFSET_X21] + str x22, [sp, #FRAME_OFFSET_X22] + str x23, [sp, #FRAME_OFFSET_X23] + str x24, [sp, #FRAME_OFFSET_X24] + str x25, [sp, #FRAME_OFFSET_X25] + str x26, [sp, #FRAME_OFFSET_X26] + str x27, [sp, #FRAME_OFFSET_X27] + str x28, [sp, #FRAME_OFFSET_X28] str lr, [sp, #FRAME_OFFSET_LR] #ifdef AARCH64_MULTILIB_VFP - str d8, [sp, #FRAME_OFFSET_V8] - str d9, [sp, #FRAME_OFFSET_V9] - str d10, [sp, #FRAME_OFFSET_V10] - str d11, [sp, #FRAME_OFFSET_V11] - str d12, [sp, #FRAME_OFFSET_V12] - str d13, [sp, #FRAME_OFFSET_V13] - str d14, [sp, #FRAME_OFFSET_V14] - str d15, [sp, #FRAME_OFFSET_V15] + str d8, [sp, #FRAME_OFFSET_D8] + str d9, [sp, #FRAME_OFFSET_D9] + str d10, [sp, #FRAME_OFFSET_D10] + str d11, [sp, #FRAME_OFFSET_D11] + str d12, [sp, #FRAME_OFFSET_D12] + str d13, [sp, #FRAME_OFFSET_D13] + str d14, [sp, #FRAME_OFFSET_D14] + str d15, [sp, #FRAME_OFFSET_D15] #endif /* Fill */ @@ -112,7 +133,7 @@ FUNCTION_ENTRY(_CPU_Context_validate) #ifdef AARCH64_MULTILIB_VFP - /* X3 contains the FPSCR */ + /* X3 contains the FPSR */ mrs x3, FPSR ldr x4, =0xf000001f bic x3, x3, x4 @@ -132,6 +153,23 @@ FUNCTION_ENTRY(_CPU_Context_validate) fill_register x10 fill_register x11 fill_register x12 + fill_register x13 + fill_register x14 + fill_register x15 + fill_register x16 + fill_register x17 + fill_register x18 + fill_register x19 + fill_register x20 + fill_register x21 + fill_register x22 + fill_register x23 + fill_register x24 + fill_register x25 + fill_register x26 + fill_register x27 + fill_register x28 + fill_register x29 fill_register lr #ifdef AARCH64_MULTILIB_VFP @@ -184,7 +222,6 @@ check: bne restore .endm - /* A compare involving the stack pointer is deprecated */ mov x1, sp cmp x2, x1 bne restore @@ -204,6 +241,23 @@ check: check_register x10 check_register x11 check_register x12 + check_register x13 + check_register x14 + check_register x15 + check_register x16 + check_register x17 + check_register x18 + check_register x19 + check_register x20 + check_register x21 + check_register x22 + check_register x23 + check_register x24 + check_register x25 + check_register x26 + check_register x27 + check_register x28 + check_register x29 check_register lr #ifdef AARCH64_MULTILIB_VFP @@ -215,25 +269,27 @@ check: /* Restore */ restore: - ldr x4, [sp, #FRAME_OFFSET_X4] - ldr x5, [sp, #FRAME_OFFSET_X5] - ldr x6, [sp, #FRAME_OFFSET_X6] - ldr x7, [sp, #FRAME_OFFSET_X7] - ldr x8, [sp, #FRAME_OFFSET_X8] - ldr x9, [sp, #FRAME_OFFSET_X9] - ldr x10, [sp, #FRAME_OFFSET_X10] - ldr x11, [sp, #FRAME_OFFSET_X11] + ldr x19, [sp, #FRAME_OFFSET_X19] + ldr x20, [sp, #FRAME_OFFSET_X20] + ldr x21, [sp, #FRAME_OFFSET_X21] + ldr x22, [sp, #FRAME_OFFSET_X22] + ldr x23, [sp, #FRAME_OFFSET_X23] + ldr x24, [sp, #FRAME_OFFSET_X24] + ldr x25, [sp, #FRAME_OFFSET_X25] + ldr x26, [sp, #FRAME_OFFSET_X26] + ldr x27, [sp, #FRAME_OFFSET_X27] + ldr x28, [sp, #FRAME_OFFSET_X28] ldr lr, [sp, #FRAME_OFFSET_LR] #ifdef AARCH64_MULTILIB_VFP - ldr d8, [sp, #FRAME_OFFSET_V8] - ldr d9, [sp, #FRAME_OFFSET_V9] - ldr d10, [sp, #FRAME_OFFSET_V10] - ldr d11, [sp, #FRAME_OFFSET_V11] - ldr d12, [sp, #FRAME_OFFSET_V12] - ldr d13, [sp, #FRAME_OFFSET_V13] - ldr d14, [sp, #FRAME_OFFSET_V14] - ldr d15, [sp, #FRAME_OFFSET_V15] + ldr d8, [sp, #FRAME_OFFSET_D8] + ldr d9, [sp, #FRAME_OFFSET_D9] + ldr d10, [sp, #FRAME_OFFSET_D10] + ldr d11, [sp, #FRAME_OFFSET_D11] + ldr d12, [sp, #FRAME_OFFSET_D12] + ldr d13, [sp, #FRAME_OFFSET_D13] + ldr d14, [sp, #FRAME_OFFSET_D14] + ldr d15, [sp, #FRAME_OFFSET_D15] #endif add sp, sp, #FRAME_SIZE diff --git a/cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S b/cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S index 2be5ce69ff..73472b81ac 100644 --- a/cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S +++ b/cpukit/score/cpu/aarch64/aarch64-context-volatile-clobber.S @@ -90,10 +90,29 @@ FUNCTION_ENTRY(_CPU_Context_volatile_clobber) clobber_vfp_register d31 #endif /* AARCH64_MULTILIB_VFP */ +/* + * According to the AAPCS64, X0-X18 and X29 are caller-saved registers. X0 is + * already being clobbered. + */ clobber_register x1 clobber_register x2 clobber_register x3 + clobber_register x4 + clobber_register x5 + clobber_register x6 + clobber_register x7 + clobber_register x8 + clobber_register x9 + clobber_register x10 + clobber_register x11 clobber_register x12 + clobber_register x13 + clobber_register x14 + clobber_register x15 + clobber_register x16 + clobber_register x17 + clobber_register x18 + clobber_register x29 ret diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S index d139fdc6a4..c7c9d03465 100644 --- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S +++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S @@ -72,11 +72,7 @@ * * The exception returns to the previous execution state */ -/* - * TODO(kmoore) The current implementation here assumes that SP is not - * misaligned. - */ - .macro JUMP_HANDLER_SHORT + .macro JUMP_HANDLER /* Mask to use in BIC, lower 7 bits */ mov x0, #0x7f /* LR contains PC, mask off to the base of the current vector */ @@ -113,10 +109,6 @@ nop nop nop - .endm - - .macro JUMP_HANDLER - JUMP_HANDLER_SHORT nop .endm @@ -148,11 +140,48 @@ Vector_table_el3: * using SP0. */ curr_el_sp0_sync: - stp x0, lr, [sp, #-0x10]! /* Push x0,lr on to the stack */ - bl curr_el_sp0_sync_get_pc /* Get current execution address */ -curr_el_sp0_sync_get_pc: /* The current PC is now in LR */ - JUMP_HANDLER - JUMP_TARGET_SP0 + sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */ + str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* shove lr into CEF */ + bl .push_exception_context_start /* bl to CEF store routine */ +/* Save original sp in x0 for .push_exception_context_finish */ + add x0, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* save original sp */ +/* Push the remainder of the context */ + bl .push_exception_context_finish +/* get jump target and branch/link */ + bl curr_el_sp0_sync_get_pc /* Get current execution address */ +curr_el_sp0_sync_get_pc: /* The current PC is now in LR */ + mov x0, #0x7f /* Mask to use in BIC, lower 7 bits */ + bic x0, lr, x0 /* Mask LR to base of current vector */ + ldr x1, [x0, #0x78] /* Load target from last word in vector */ + and lr, lr, #0x780 /* Mask off bits for vector number */ + lsr lr, lr, #7 /* Shift the vector bits down */ +/* Store the vector */ + str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET] + mov x0, sp + blr x1 + b twiddle + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop +/* Takes up the space of 2 instructions */ +#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + .word _AArch64_Exception_default + .word 0x0 +#else + .dword _AArch64_Exception_default +#endif .balign 0x80 /* The exception handler for IRQ exceptions from the current EL using SP0. */ curr_el_sp0_irq: @@ -186,13 +215,48 @@ curr_el_sp0_serror_get_pc: /* The current PC is now in LR */ * the current SP. */ curr_el_spx_sync: - msr SCTLR_EL1, XZR - stp x0, lr, [sp, #-0x10]! /* Push x0,lr on to the stack */ - bl curr_el_spx_sync_get_pc /* Get current execution address */ -curr_el_spx_sync_get_pc: /* The current PC is now in LR */ -/* Use short jump handler since this has an extra instruction to clear SCTLR */ - JUMP_HANDLER_SHORT - JUMP_TARGET_SPx + msr spsel, #0 /* switch to exception stack */ + sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */ + str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* shove lr into CEF */ + bl .push_exception_context_start /* bl to CEF store routine */ +/* Save original sp in x0 for .push_exception_context_finish */ + msr spsel, #1 + mov x0, sp + msr spsel, #0 +/* Push the remainder of the context */ + bl .push_exception_context_finish +/* get jump target and branch/link */ + bl curr_el_spx_sync_get_pc /* Get current execution address */ +curr_el_spx_sync_get_pc: /* The current PC is now in LR */ + mov x0, #0x7f /* Mask to use in BIC, lower 7 bits */ + bic x0, lr, x0 /* Mask LR to base of current vector */ + ldr x1, [x0, #0x78] /* Load target from last word in vector */ + and lr, lr, #0x780 /* Mask off bits for vector number */ + lsr lr, lr, #7 /* Shift the vector bits down */ +/* Store the vector */ + str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET] + mov x0, sp + blr x1 + b twiddle + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop +/* Takes up the space of 2 instructions */ +#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + .word _AArch64_Exception_default + .word 0x0 +#else + .dword _AArch64_Exception_default +#endif .balign 0x80 /* * The exception handler for IRQ exceptions from the current EL using the @@ -442,68 +506,3 @@ twiddle: stp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)] /* Done, return to exception handler */ ret - -/* - * Apply the exception frame to the current register status, SP points to the EF - */ -.pop_exception_context_and_ret: -/* Pop daif and spsr */ - ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET] -/* Restore daif and spsr */ - msr DAIF, x2 - msr SPSR_EL1, x3 -/* Pop FAR and ESR */ - ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET] -/* Restore ESR and FAR */ - msr ESR_EL1, x2 - msr FAR_EL1, x3 -/* Pop fpcr and fpsr */ - ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET] -/* Restore fpcr and fpsr */ - msr FPSR, x2 - msr FPCR, x3 -/* Restore LR */ - ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] -/* Pop VFP registers */ - ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)] - ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)] - ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)] - ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)] - ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)] - ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)] - ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)] - ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)] - ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)] - ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)] - ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)] - ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)] - ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)] - ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)] - ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)] - ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)] -/* Pop x0-x29(fp) */ - ldp x2, x3, [sp, #0x10] - ldp x4, x5, [sp, #0x20] - ldp x6, x7, [sp, #0x30] - ldp x8, x9, [sp, #0x40] - ldp x10, x11, [sp, #0x50] - ldp x12, x13, [sp, #0x60] - ldp x14, x15, [sp, #0x70] - ldp x16, x17, [sp, #0x80] - ldp x18, x19, [sp, #0x90] - ldp x20, x21, [sp, #0xa0] - ldp x22, x23, [sp, #0xb0] - ldp x24, x25, [sp, #0xc0] - ldp x26, x27, [sp, #0xd0] - ldp x28, x29, [sp, #0xe0] -/* Pop sp (ignored since sp should be shortly restored anyway) and ELR */ - ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET] -/* Restore exception LR */ - msr ELR_EL1, x1 - ldp x0, x1, [sp, #0x00] - add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE - -/* We must clear reservations to ensure consistency with atomic operations */ - clrex - - ret diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.c b/cpukit/score/cpu/aarch64/aarch64-exception-default.c index 2ebb3dee9f..f1591cbd5d 100644 --- a/cpukit/score/cpu/aarch64/aarch64-exception-default.c +++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.c @@ -41,10 +41,87 @@ #include "config.h" #endif -#include <rtems/score/cpu.h> #include <rtems/fatal.h> +#include <rtems/score/aarch64-system-registers.h> +#include <rtems/score/cpu.h> +#include <rtems/score/percpu.h> void _AArch64_Exception_default( CPU_Exception_frame *frame ) { + uint64_t EC = AARCH64_ESR_EL1_EC_GET( frame->register_syndrome ); + + /* Emulate FPSR flags for FENV if a FPU exception occurred */ + if ( EC == 0x2c ) { + /* + * This must be done because FENV depends on FPSR values, but trapped FPU + * exceptions don't set FPSR bits. In the case where a signal is mapped, the + * signal code executes after the exception frame is restored and FENV + * functions executed in that context will need this information to be + * accurate. + */ + uint64_t ISS = AARCH64_ESR_EL1_EC_GET( frame->register_syndrome ); + + /* If the exception bits are valid, use them */ + if ( ( ISS & ( 1 << 23 ) ) != 0 ) { + /* The bits of the lower byte match the FPSR exception bits */ + frame->register_fpsr |= ( ISS & 0xff ); + } + } + rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame ); } + +void _CPU_Exception_disable_thread_dispatch( void ) +{ + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + /* Increment interrupt nest and thread dispatch disable level */ + ++cpu_self->thread_dispatch_disable_level; + ++cpu_self->isr_nest_level; +} + +void _AArch64_Exception_frame_copy( + CPU_Exception_frame *new_ef, + CPU_Exception_frame *old_ef +) +{ + *new_ef = *old_ef; +} + +int _CPU_Exception_frame_get_signal( CPU_Exception_frame *ef ) +{ + uint64_t EC = AARCH64_ESR_EL1_EC_GET( ef->register_syndrome ); + + switch ( EC ) { + case 0x1: /* WFI */ + case 0x7: /* SVE/SIMD/FP */ + case 0xa: /* LD64B/ST64B* */ + case 0x18: /* MSR/MRS/system instruction */ + case 0x19: /* SVE */ + case 0x15: /* Supervisor call */ + case 0x26: /* SP Alignment */ + case 0x31: /* Breakpoint */ + case 0x33: /* Step */ + case 0x35: /* Watchpoint */ + case 0x3c: /* Break Instruction */ + return -1; + case 0x2c: /* FPU */ + return SIGFPE; + case 0x21: /* Instruction Abort */ + case 0x25: /* Data Abort */ + return SIGSEGV; + default: + return SIGILL; + } +} + +void _CPU_Exception_frame_set_resume( CPU_Exception_frame *ef, void *address ) +{ + ef->register_pc = address; +} + +#define AARCH64_INSTRUCTION_SIZE 4 +void _CPU_Exception_frame_make_resume_next_instruction( CPU_Exception_frame *ef ) +{ + ef->register_pc += AARCH64_INSTRUCTION_SIZE; +} diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c b/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c index e207a5a81d..04a3dfdc0d 100644 --- a/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c +++ b/cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c @@ -45,7 +45,7 @@ #include <inttypes.h> #include <rtems/score/cpu.h> -#include <rtems/score/io.h> +#include <rtems/dev/io.h> #include <rtems/bspIo.h> typedef struct { @@ -216,7 +216,7 @@ void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ) for ( i = 0 ; i < 32 ; ++i ) { uint64_t low = (uint64_t) qx[i]; - uint64_t high = (uint64_t) (qx[i] >> 32); + uint64_t high = (uint64_t) (qx[i] >> 64); printk( "Q%02i = 0x%016" PRIx64 "%016" PRIx64 "\n", i, high, low ); } diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S index cb0954a29b..6344dce63a 100644 --- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S +++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S @@ -45,9 +45,15 @@ .globl _AArch64_Exception_interrupt_no_nest .globl _AArch64_Exception_interrupt_nest +.globl _CPU_Exception_dispatch_and_resume +.globl _CPU_Exception_resume #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 - #define SELF_CPU_CONTROL_GET_REG w19 + #ifdef RTEMS_SMP + #define SELF_CPU_CONTROL_GET_REG x19 + #else + #define SELF_CPU_CONTROL_GET_REG w19 + #endif #else #define SELF_CPU_CONTROL_GET_REG x19 #endif @@ -320,3 +326,166 @@ Return to embedded exception vector code pop_interrupt_context /* Return to vector for final cleanup */ ret + +/* + * This function is expected to resume execution using the CPU_Exception_frame + * provided in x0. This function does not adhere to the AAPCS64 calling + * convention because all necessary state is contained within the exception + * frame. + */ +_CPU_Exception_resume: +/* Reset stack pointer */ + mov sp, x0 + +/* call CEF restore routine (doesn't restore lr) */ + bl .pop_exception_context + +/* get lr from CEF */ + ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] + +/* drop space reserved for CEF */ + add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE + +/* switch to thread stack */ + msr spsel, #1 + eret + +/* + * This function is expected to undo dispatch disabling, perform dispatch, and + * resume execution using the CPU_Exception_frame provided in x0. This function + * does not adhere to the AAPCS64 calling convention because all necessary + * state is contained within the exception frame. + */ +_CPU_Exception_dispatch_and_resume: +/* Get per-CPU control of current processor */ + GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG + +/* Reset stack pointer */ + mov sp, x0 + +/* Check dispatch disable and perform dispatch if necessary */ +/* Load some per-CPU variables */ + ldr w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + ldrb w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED] + ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE] + ldr w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL] + +/* Decrement levels and determine thread dispatch state */ + eor w1, w1, w0 + sub w0, w0, #1 + orr w1, w1, w0 + orr w1, w1, w2 + sub w3, w3, #1 + +/* Store thread dispatch disable and ISR nest levels */ + str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + str w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL] + +/* store should_skip_thread_dispatch in x22 */ + mov x22, x1 + +/* + * It is now safe to assume that the source of the exception has been resolved. + * Copy the exception frame to the thread stack to be compatible with thread + * dispatch. This may arbitrarily clobber corruptible registers since all + * important state is contained in the exception frame. + * + * No need to save current LR since this will never return to the caller. + */ + bl .move_exception_frame_and_switch_to_thread_stack + +/* + * Check thread dispatch necessary, ISR dispatch disable and thread dispatch + * disable level. + */ + cmp x22, #0 + bne .Lno_need_thread_dispatch_resume + bl .AArch64_Perform_Thread_Dispatch +.Lno_need_thread_dispatch_resume: +/* call CEF restore routine (doesn't restore lr) */ + bl .pop_exception_context + +/* get lr from CEF */ + ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] + +/* drop space reserved for CEF */ + add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE + eret + +/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 */ +.move_exception_frame_and_switch_to_thread_stack: + mov x1, sp /* Set x1 to the current exception frame */ + msr spsel, #1 /* switch to thread stack */ + ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET] /* Get thread SP from exception frame since it may have been updated */ + mov sp, x0 + sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */ + mov x0, sp /* Set x0 to the new exception frame */ + mov x20, lr /* Save LR */ + bl _AArch64_Exception_frame_copy /* Copy exception frame to reserved thread stack space */ + mov lr, x20 /* Restore LR */ + msr spsel, #0 /* switch to exception stack */ + add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* release space for CEF on exception stack */ + msr spsel, #1 /* switch to thread stack */ + ret + +/* + * Apply the exception frame to the current register status, SP points to the EF + */ +.pop_exception_context: +/* Pop daif and spsr */ + ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET] +/* Restore daif and spsr */ + msr DAIF, x2 + msr SPSR_EL1, x3 +/* Pop FAR and ESR */ + ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET] +/* Restore ESR and FAR */ + msr ESR_EL1, x2 + msr FAR_EL1, x3 +/* Pop fpcr and fpsr */ + ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET] +/* Restore fpcr and fpsr */ + msr FPSR, x2 + msr FPCR, x3 +/* Pop VFP registers */ + ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)] + ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)] + ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)] + ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)] + ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)] + ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)] + ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)] + ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)] + ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)] + ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)] + ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)] + ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)] + ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)] + ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)] + ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)] + ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)] +/* Pop x0-x29(fp) */ + ldp x2, x3, [sp, #0x10] + ldp x4, x5, [sp, #0x20] + ldp x6, x7, [sp, #0x30] + ldp x8, x9, [sp, #0x40] + ldp x10, x11, [sp, #0x50] + ldp x12, x13, [sp, #0x60] + ldp x14, x15, [sp, #0x70] + ldp x16, x17, [sp, #0x80] + ldp x18, x19, [sp, #0x90] + ldp x20, x21, [sp, #0xa0] + ldp x22, x23, [sp, #0xb0] + ldp x24, x25, [sp, #0xc0] + ldp x26, x27, [sp, #0xd0] + ldp x28, x29, [sp, #0xe0] +/* Pop ELR, SP already popped */ + ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)] +/* Restore exception LR */ + msr ELR_EL1, x1 + ldp x0, x1, [sp, #0x00] + +/* We must clear reservations to ensure consistency with atomic operations */ + clrex + + ret diff --git a/cpukit/score/cpu/aarch64/cpu.c b/cpukit/score/cpu/aarch64/cpu.c index 75b1125cf7..207508302b 100644 --- a/cpukit/score/cpu/aarch64/cpu.c +++ b/cpukit/score/cpu/aarch64/cpu.c @@ -38,8 +38,7 @@ #include "config.h" #endif -#include <rtems/score/assert.h> -#include <rtems/score/cpu.h> +#include <rtems/score/cpuimpl.h> #include <rtems/score/thread.h> #include <rtems/score/tls.h> @@ -143,21 +142,27 @@ void _CPU_Context_Initialize( the_context->thread_id = (uintptr_t) tls_area; if ( tls_area != NULL ) { - _TLS_TCB_at_area_begin_initialize( tls_area ); + the_context->thread_id = (uintptr_t) _TLS_Initialize_area( tls_area ); } } -void _CPU_ISR_Set_level( uint64_t level ) +void _CPU_ISR_Set_level( uint32_t level ) { /* Set the mask bit if interrupts are disabled */ - level = level ? AARCH64_PSTATE_I : 0; - __asm__ volatile ( - "msr DAIF, %[level]\n" - : : [level] "r" (level) - ); + if ( level ) { + __asm__ volatile ( + "msr DAIFSet, #0x2\n" + : : [level] "r" (level) + ); + } else { + __asm__ volatile ( + "msr DAIFClr, #0x2\n" + : : [level] "r" (level) + ); + } } -uint64_t _CPU_ISR_Get_level( void ) +uint32_t _CPU_ISR_Get_level( void ) { uint64_t level; @@ -169,29 +174,20 @@ uint64_t _CPU_ISR_Get_level( void ) return ( level & AARCH64_PSTATE_I ) != 0; } -void _CPU_ISR_install_vector( - uint32_t vector, - CPU_ISR_handler new_handler, - CPU_ISR_handler *old_handler -) +void _CPU_Initialize( void ) { - /* Redirection table starts at the end of the vector table */ - CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4); - - CPU_ISR_handler current_handler = table [vector]; - - /* The current handler is now the old one */ - if (old_handler != NULL) { - *old_handler = current_handler; - } - - /* Write only if necessary to avoid writes to a maybe read-only memory */ - if (current_handler != new_handler) { - table [vector] = new_handler; - } + /* Do nothing */ } -void _CPU_Initialize( void ) +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) { - /* Do nothing */ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + __asm__ volatile ("mov x0, %0\n" + : "=r" (error) + : "0" (error) + : "x0" ); + while (1); } diff --git a/cpukit/score/cpu/aarch64/cpu_asm.S b/cpukit/score/cpu/aarch64/cpu_asm.S index 9e609e06bd..6321acde90 100644 --- a/cpukit/score/cpu/aarch64/cpu_asm.S +++ b/cpukit/score/cpu/aarch64/cpu_asm.S @@ -55,13 +55,22 @@ * */ +DEFINE_FUNCTION_AARCH64(_CPU_Context_switch) + .globl _CPU_Context_switch_no_return + .set _CPU_Context_switch_no_return, _CPU_Context_switch #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 -#define reg_2 w2 +/* Sanitize inputs for ILP32 ABI */ + mov w0, w0 + mov w1, w1 + #ifdef RTEMS_SMP + #define reg_2 x2 + #else + #define reg_2 w2 + #endif #else #define reg_2 x2 #endif -DEFINE_FUNCTION_AARCH64(_CPU_Context_switch) /* Start saving context */ GET_SELF_CPU_CONTROL reg_2 ldr w3, [x2, #PER_CPU_ISR_DISPATCH_DISABLE] @@ -86,7 +95,30 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_switch) str x3, [x0, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE] #ifdef RTEMS_SMP -#error SMP not yet supported + /* + * The executing thread no longer executes on this processor. Switch + * the stack to the temporary interrupt stack of this processor. Mark + * the context of the executing thread as not executing. + */ + dmb SY + add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE) + mov x3, #0 + strb w3, [x0, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET] + +.L_check_is_executing: + + /* Check the is executing indicator of the heir context */ + add x3, x1, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET + ldaxrb w4, [x3] + cmp x4, #0 + bne .L_get_potential_new_heir + + /* Try to update the is executing indicator of the heir context */ + mov x4, #1 + stlxrb w5, w4, [x3] + cmp x5, #0 + bne .L_get_potential_new_heir + dmb SY #endif /* Start restoring context */ @@ -129,6 +161,63 @@ DEFINE_FUNCTION_AARCH64(_CPU_Context_switch) * */ DEFINE_FUNCTION_AARCH64(_CPU_Context_restore) +#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 +/* Sanitize input for ILP32 ABI */ + mov w0, w0 +#endif + mov x1, x0 GET_SELF_CPU_CONTROL reg_2 b .L_restore + +#ifdef RTEMS_SMP +.L_get_potential_new_heir: + + /* We may have a new heir */ + + /* Read the executing and heir */ +#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + ldr w4, [x2, #PER_CPU_OFFSET_EXECUTING] + ldr w5, [x2, #PER_CPU_OFFSET_HEIR] +#else + ldr x4, [x2, #PER_CPU_OFFSET_EXECUTING] + ldr x5, [x2, #PER_CPU_OFFSET_HEIR] +#endif + + /* + * Update the executing only if necessary to avoid cache line + * monopolization. + */ + cmp x4, x5 + beq .L_check_is_executing + + /* Calculate the heir context pointer */ + sub x4, x1, x4 + add x1, x5, x4 + + /* Update the executing */ +#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + str w5, [x2, #PER_CPU_OFFSET_EXECUTING] +#else + str x5, [x2, #PER_CPU_OFFSET_EXECUTING] +#endif + + b .L_check_is_executing + +DEFINE_FUNCTION_AARCH64(_AArch64_Start_multitasking) +#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32 + /* Sanitize input for ILP32 ABI */ + mov w0, w0 +#endif + + mov x1, x0 + GET_SELF_CPU_CONTROL reg_2 + + /* Switch the stack to the temporary interrupt stack of this processor */ + add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE) + + /* Enable interrupts */ + msr DAIFClr, #0x2 + + b .L_check_is_executing +#endif diff --git a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h new file mode 100644 index 0000000000..ca9b60e6d1 --- /dev/null +++ b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUAArch64 + * + * @brief Definitions used in MMU setup. + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * Written by Kinsey Moore <kinsey.moore@oarcorp.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LIBCPU_AARCH64_MMU_VMSAV8_64_H +#define LIBCPU_AARCH64_MMU_VMSAV8_64_H + +#ifndef ASM + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include <rtems.h> + +/* VMSAv8 Long-descriptor fields */ +#define MMU_DESC_AF ( 1 << 10 ) +#define MMU_DESC_SH_INNER ( ( 1 << 9 ) | ( 1 << 8 ) ) +#define MMU_DESC_WRITE_DISABLE ( 1 << 7 ) +/* PAGE and TABLE flags are the same bit, but only apply on certain levels */ +#define MMU_DESC_TYPE_TABLE ( 1 << 1 ) +#define MMU_DESC_TYPE_PAGE ( 1 << 1 ) +#define MMU_DESC_VALID ( 1 << 0 ) +#define MMU_DESC_MAIR_ATTR( val ) ( ( val & 0x3 ) << 2 ) +#define MMU_DESC_PAGE_TABLE_MASK 0xFFFFFFFFF000LL + +/* Page table configuration */ +#define MMU_PAGE_BITS 12 +#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS ) +#define MMU_BITS_PER_LEVEL 9 + +#define AARCH64_MMU_FLAGS_BASE \ + ( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF ) + +#define AARCH64_MMU_DATA_RO_CACHED \ + ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) | MMU_DESC_WRITE_DISABLE ) +#define AARCH64_MMU_CODE_CACHED AARCH64_MMU_DATA_RO_CACHED +#define AARCH64_MMU_CODE_RW_CACHED AARCH64_MMU_DATA_RW_CACHED + +#define AARCH64_MMU_DATA_RO \ + ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 2 ) | MMU_DESC_WRITE_DISABLE ) +#define AARCH64_MMU_CODE AARCH64_MMU_DATA_RO +#define AARCH64_MMU_CODE_RW AARCH64_MMU_DATA_RW + +/* RW implied by not ORing in RO */ +#define AARCH64_MMU_DATA_RW_CACHED \ + ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) ) +#define AARCH64_MMU_DATA_RW \ + ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 2 ) ) +#define AARCH64_MMU_DEVICE ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 0 ) ) + +rtems_status_code aarch64_mmu_map( + uintptr_t addr, + uint64_t size, + uint64_t flags +); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* ASM */ + +#endif /* LIBCPU_AARCH64_MMU_VMSAV8_64_H */ diff --git a/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h new file mode 100644 index 0000000000..c1d219d715 --- /dev/null +++ b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h @@ -0,0 +1,256 @@ +/* $NetBSD: elf_machdep.h,v 1.4 2018/10/12 01:28:58 ryo Exp $ */ + +/*- + * Copyright (c) 2014 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Matt Thomas of 3am Software Foundry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AARCH64_ELF_MACHDEP_H_ +#define _AARCH64_ELF_MACHDEP_H_ + +#ifdef __aarch64__ + +#if defined(__AARCH64EB__) +#define ELF64_MACHDEP_ENDIANNESS ELFDATA2MSB +#define ELF32_MACHDEP_ENDIANNESS ELFDATA2MSB +#else +#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB +#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB +#endif + +/* Processor specific flags for the ELF header e_flags field. */ +#define EF_ARM_RELEXEC 0x00000001 +#define EF_ARM_HASENTRY 0x00000002 +#define EF_ARM_INTERWORK 0x00000004 /* GNU binutils 000413 */ +#define EF_ARM_SYMSARESORTED 0x00000004 /* ARM ELF A08 */ +#define EF_ARM_APCS_26 0x00000008 /* GNU binutils 000413 */ +#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ARM ELF B01 */ +#define EF_ARM_APCS_FLOAT 0x00000010 /* GNU binutils 000413 */ +#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ARM ELF B01 */ +#define EF_ARM_PIC 0x00000020 +#define EF_ARM_ALIGN8 0x00000040 /* 8-bit structure alignment. */ +#define EF_ARM_NEW_ABI 0x00000080 +#define EF_ARM_OLD_ABI 0x00000100 +#define EF_ARM_SOFT_FLOAT 0x00000200 +#define EF_ARM_BE8 0x00800000 +#define EF_ARM_EABIMASK 0xff000000 +#define EF_ARM_EABI_VER1 0x01000000 +#define EF_ARM_EABI_VER2 0x02000000 +#define EF_ARM_EABI_VER3 0x03000000 +#define EF_ARM_EABI_VER4 0x04000000 +#define EF_ARM_EABI_VER5 0x05000000 + +#define ELF32_MACHDEP_ID_CASES \ + case EM_ARM: \ + break; + +#define ELF64_MACHDEP_ID_CASES \ + case EM_AARCH64: \ + break; + +#define ELF64_MACHDEP_ID EM_AARCH64 +#define ELF32_MACHDEP_ID EM_ARM + +#define KERN_ELFSIZE 64 +#define ARCH_ELFSIZE 64 /* MD native binary size */ + +/* Processor specific relocation types */ + +#define R_AARCH64_NONE 0 +#define R_AARCH64_NONE2 256 + +#define R_AARCH64_ABS64 257 /* S + A */ +#define R_AARCH64_ABS32 258 /* S + A */ +#define R_AARCH64_ABS16 259 /* S + A */ +#define R_AARCH64_PREL64 260 /* S + A - P */ +#define R_AARCH64_PREL32 261 /* S + A - P */ +#define R_AARCH64_PREL16 262 /* S + A - P */ +#define R_AARCH64_MOVW_UABS_G0 263 /* S + A [bits 0..15] */ +#define R_AARCH64_MOVW_UABS_G0_NC 264 /* S + A [bits 0..15] */ +#define R_AARCH64_MOVW_UABS_G1 265 /* S + A [bits 16..31] */ +#define R_AARCH64_MOVW_UABS_G1_NC 266 /* S + A [bits 16..31] */ +#define R_AARCH64_MOVW_UABS_G2 267 /* S + A [bits 32..47] */ +#define R_AARCH64_MOVW_UABS_G2_NC 268 /* S + A [bits 32..47] */ +#define R_AARCH64_MOVW_UABS_G3 269 /* S + A [bits 48..63] */ +#define R_AARCH64_MOVW_SABS_G0 270 /* S + A [bits 0..15] */ +#define R_AARCH64_MOVW_SABS_G1 271 /* S + A [bits 16..31] */ +#define R_AARCH64_MOVW_SABS_G2 272 /* S + A [bits 32..47] */ +#define R_AARCH64_LD_PREL_LO19 273 /* S + A - P */ +#define R_AARCH64_ADR_PREL_LO21 274 /* S + A - P */ +#define R_AARCH64_ADR_PREL_PG_HI21 275 /* Page(S + A) - Page(P) */ +#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 /* Page(S + A) - Page(P) */ +#define R_AARCH64_ADD_ABS_LO12_NC 277 /* S + A */ +#define R_AARCH64_LDST8_ABS_LO12_NC 278 /* S + A */ +#define R_AARCH_TSTBR14 279 /* S + A - P */ +#define R_AARCH_CONDBR19 281 /* S + A - P */ +#define R_AARCH_JUMP26 282 /* S + A - P */ +#define R_AARCH_CALL26 283 /* S + A - P */ +#define R_AARCH_LDST16_ABS_LO12_NC 284 /* S + A */ +#define R_AARCH_LDST32_ABS_LO12_NC 285 /* S + A */ +#define R_AARCH_LDST64_ABS_LO12_NC 286 /* S + A */ +#define R_AARCH64_MOVW_PREL_G0 287 /* S + A - P */ +#define R_AARCH64_MOVW_PREL_G0_NC 288 /* S + A - P */ +#define R_AARCH64_MOVW_PREL_G1 289 /* S + A - P */ +#define R_AARCH64_MOVW_PREL_G1_NC 290 /* S + A - P */ +#define R_AARCH64_MOVW_PREL_G2 291 /* S + A - P */ +#define R_AARCH64_MOVW_PREL_G2_NC 292 /* S + A - P */ +#define R_AARCH64_MOVW_PREL_G3 293 /* S + A - P */ + +#define R_AARCH64_LDST128_ABS_LO12_NC 299 /* S + A */ +#define R_AARCH64_MOVW_GOTOFF_G0 300 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_MOVW_GOTOFF_G1 302 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_MOVW_GOTOFF_G2 304 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_MOVW_GOTOFF_G3 306 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_GOTREL64 307 /* S + A - GOT */ +#define R_AARCH64_GOTREL32 308 /* S + A - GOT */ +#define R_AARCH64_GOT_LD_PREL19 309 /* G(GDAT(S + A)) - P */ +#define R_AARCH64_LD64_GOTOFF_LO15 310 /* G(GDAT(S + A)) - GOT */ +#define R_AARCH64_ADR_GOT_PAGE 311 /* Page(G(GDAT(S + A))) - Page(GOT) */ +#define R_AARCH64_LD64_GOT_LO12_NC 312 /* G(GDAT(S + A)) */ +#define R_AARCH64_LD64_GOTPAGE_LO15 313 /* G(GDAT(S + A)) - Page(GOT) */ + +#define R_AARCH64_TLSGD_ADR_PREL21 512 /* G(GTLSIDX(S,A)) - P */ +#define R_AARCH64_TLSGD_ADR_PAGE21 513 /* Page(G(GTLSIDX(S,A))) - Page(P) */ +#define R_AARCH64_TLSGD_ADD_LO12_NC 514 /* G(GTLSIDX(S,A)) */ +#define R_AARCH64_TLSGD_MOVW_G1 515 /* G(GTLSIDX(S,A)) - GOT */ +#define R_AARCH64_TLSGD_MOVW_G0_NV 516 /* G(GTLSIDX(S,A)) - GOT */ +#define R_AARCH64_TLSLD_ADR_PREL21 517 /* G(GLDM(S,A)) - P */ +#define R_AARCH64_TLSLD_ADR_PAGE21 518 /* Page(G(GLDM(S))) - Page(P) */ +#define R_AARCH64_TLSLD_ADD_LO12_NC 519 /* G(GLDM(S)) */ +#define R_AARCH64_TLSLD_MOVW_G1 520 /* G(GLDM(S)) - GOT */ +#define R_AARCH64_TLSLD_MOVW_G0_NC 521 /* G(GLDM(S)) - GOT */ +#define R_AARCH64_TLSLD_LD_PREL21 522 /* G(GLDM(S)) - P */ +#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 528 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 /* DTPREL(S+A) */ +#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 /* G(GTPREL(S+A)) - GOT */ +#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 /* G(GTPREL(S+A)) - GOT */ +#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 /* Page(G(GTPREL(S+A))) - Page(P) */ +#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 /* G(GTPREL(S+A)) */ +#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 /* G(GTPREL(S+A)) - P */ +#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 /* TPREL(S+A) */ +#define R_AARCH64_MOVW_TPREL_G1 545 /* TPREL(S+A) */ +#define R_AARCH64_MOVW_TPREL_G1_NC 546 /* TPREL(S+A) */ +#define R_AARCH64_MOVW_TPREL_G0 547 /* TPREL(S+A) */ +#define R_AARCH64_MOVW_TPREL_G0_NC 548 /* TPREL(S+A) */ +#define R_AARCH64_ADD_TPREL_HI12 549 /* TPREL(S+A) */ +#define R_AARCH64_ADD_TPREL_LO12 550 /* TPREL(S+A) */ +#define R_AARCH64_ADD_TPREL_LO12_NC 551 /* TPREL(S+A) */ +#define R_AARCH64_LDST8_TPREL_LO12 552 /* TPREL(S+A) */ +#define R_AARCH64_LDST8_TPREL_LO12_NC 553 /* TPREL(S+A) */ +#define R_AARCH64_LDST16_TPREL_LO12 554 /* TPREL(S+A) */ +#define R_AARCH64_LDST16_TPREL_LO12_NC 555 /* TPREL(S+A) */ +#define R_AARCH64_LDST32_TPREL_LO12 556 /* TPREL(S+A) */ +#define R_AARCH64_LDST32_TPREL_LO12_NC 557 /* TPREL(S+A) */ +#define R_AARCH64_LDST64_TPREL_LO12 558 /* TPREL(S+A) */ +#define R_AARCH64_LDST64_TPREL_LO12_NC 559 /* TPREL(S+A) */ +#define R_AARCH64_TLSDESC_LD_PREL19 560 /* G(GTLSDESC(S+A)) - P */ +#define R_AARCH64_TLSDESC_LD_PREL21 561 /* G(GTLSDESC(S+A)) - P */ +#define R_AARCH64_TLSDESC_LD_PAGE21 562 /* Page(G(GTLSDESC(S+A))) - Page(P) */ +#define R_AARCH64_TLSDESC_LD64_LO12 563 /* G(GTLSDESC(S+A)) */ +#define R_AARCH64_TLSDESC_ADD_LO12 564 /* G(GTLSDESC(S+A)) */ +#define R_AARCH64_TLSDESC_OFF_G1 565 /* G(GTLSDESC(S+A)) - GOT */ +#define R_AARCH64_TLSDESC_OFF_G0_NC 566 /* G(GTLSDESC(S+A)) - GOT */ +#define R_AARCH64_TLSDESC_LDR 567 /* */ +#define R_AARCH64_TLSDESC_ADD 568 /* */ +#define R_AARCH64_TLSDESC_CALL 569 /* */ +#define R_AARCH64_TLSLE_LDST128_TPREL_LO12 570 /* TPREL(S+A) */ +#define R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC 571 /* TPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12 572 /* DTPREL(S+A) */ +#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC 572 /* DTPREL(S+A) */ + +/* Dynamic Relocations */ +#define R_AARCH64_P32_COPY 180 +#define R_AARCH64_P32_GLOB_DAT 181 /* S + A */ +#define R_AARCH64_P32_JUMP_SLOT 182 /* S + A */ +#define R_AARCH64_P32_RELATIVE 183 /* Delta(S) + A */ +#define R_AARCH64_P32_TLS_DTPREL 184 /* DTPREL(S+A) */ +#define R_AARCH64_P32_TLS_DTPMOD 185 /* LBM(S) */ +#define R_AARCH64_P32_TLS_TPREL 186 /* TPREL(S+A) */ +#define R_AARCH64_P32_TLSDESC 187 /* TLSDESC(S+A) */ +#define R_AARCH64_P32_IRELATIVE 188 /* Indirect(Delta(S) + A) */ + +#define R_AARCH64_COPY 1024 +#define R_AARCH64_GLOB_DAT 1025 /* S + A */ +#define R_AARCH64_JUMP_SLOT 1026 /* S + A */ +#define R_AARCH64_RELATIVE 1027 /* Delta(S) + A */ +#define R_AARCH64_TLS_DTPREL64 1028 /* DTPREL(S+A) */ +#define R_AARCH64_TLS_DTPMOD64 1029 /* LBM(S) */ +#define R_AARCH64_TLS_TPREL64 1030 /* TPREL(S+A) */ +#define R_AARCH64_TLSDESC 1031 /* TLSDESC(S+A) */ +#define R_AARCH64_IRELATIVE 1032 /* Indirect(Delta(S) + A) */ + +#define R_TYPE(name) R_AARCH64_ ## name +#define R_TLS_TYPE(name) R_AARCH64_ ## name ## 64 + +/* Processor specific program header types */ +#define PT_AARCH64_ARCHEXT (PT_LOPROC + 0) +#define PT_AARCH64_UNWIND (PT_LOPROC + 1) + +/* Processor specific section header flags */ +#define SHF_ENTRYSECT 0x10000000 +#define SHF_COMDEF 0x80000000 + +#define SHT_AARCH64_ATTRIBUTES (SHT_LOPROC + 3) + +#ifdef _KERNEL +#ifdef ELFSIZE +#define ELF_MD_PROBE_FUNC ELFNAME2(aarch64_netbsd,probe) +#endif + +struct exec_package; + +int aarch64_netbsd_elf64_probe(struct lwp *, struct exec_package *, void *, + char *, vaddr_t *); +int aarch64_netbsd_elf32_probe(struct lwp *, struct exec_package *, void *, + char *, vaddr_t *); +#endif + +#elif defined(__arm__) + +#include <arm/elf_machdep.h> + +#endif + +#endif /* _AARCH64_ELF_MACHDEP_H_ */ diff --git a/cpukit/score/cpu/aarch64/include/rtems/asm.h b/cpukit/score/cpu/aarch64/include/rtems/asm.h index 35bf533c8a..fa53e08291 100644 --- a/cpukit/score/cpu/aarch64/include/rtems/asm.h +++ b/cpukit/score/cpu/aarch64/include/rtems/asm.h @@ -81,7 +81,12 @@ .align 8 ; .globl name ; name: ; .globl name ## _aarch64 ; name ## _aarch64: .macro GET_SELF_CPU_CONTROL REG +#ifdef RTEMS_SMP + /* Use Thread ID Register (TPIDR_EL1) */ + mrs \REG, TPIDR_EL1 +#else ldr \REG, =_Per_CPU_Information +#endif .endm /** @} */ diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h new file mode 100644 index 0000000000..8ddad5becf --- /dev/null +++ b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h @@ -0,0 +1,11065 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @brief This header file provides the API to read and write the AArch64 + * system registers. + */ + +/* + * Copyright (C) 2020 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTEMS_SCORE_AARCH64_SYSTEM_REGISTERS_H +#define _RTEMS_SCORE_AARCH64_SYSTEM_REGISTERS_H + +#include <stdint.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* ACTLR_EL1, Auxiliary Control Register (EL1) */ + +static inline uint64_t _AArch64_Read_actlr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ACTLR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_actlr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ACTLR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ACTLR_EL2, Auxiliary Control Register (EL2) */ + +static inline uint64_t _AArch64_Read_actlr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ACTLR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_actlr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr ACTLR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* ACTLR_EL3, Auxiliary Control Register (EL3) */ + +static inline uint64_t _AArch64_Read_actlr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ACTLR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_actlr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr ACTLR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* AFSR0_EL1, Auxiliary Fault Status Register 0 (EL1) */ + +static inline uint64_t _AArch64_Read_afsr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AFSR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_afsr0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr AFSR0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* AFSR0_EL2, Auxiliary Fault Status Register 0 (EL2) */ + +static inline uint64_t _AArch64_Read_afsr0_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AFSR0_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_afsr0_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr AFSR0_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* AFSR0_EL3, Auxiliary Fault Status Register 0 (EL3) */ + +static inline uint64_t _AArch64_Read_afsr0_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AFSR0_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_afsr0_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr AFSR0_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* AFSR1_EL1, Auxiliary Fault Status Register 1 (EL1) */ + +static inline uint64_t _AArch64_Read_afsr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AFSR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_afsr1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr AFSR1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* AFSR1_EL2, Auxiliary Fault Status Register 1 (EL2) */ + +static inline uint64_t _AArch64_Read_afsr1_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AFSR1_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_afsr1_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr AFSR1_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* AFSR1_EL3, Auxiliary Fault Status Register 1 (EL3) */ + +static inline uint64_t _AArch64_Read_afsr1_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AFSR1_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_afsr1_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr AFSR1_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* AIDR_EL1, Auxiliary ID Register */ + +static inline uint64_t _AArch64_Read_aidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* AMAIR_EL1, Auxiliary Memory Attribute Indirection Register (EL1) */ + +static inline uint64_t _AArch64_Read_amair_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMAIR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amair_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr AMAIR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMAIR_EL2, Auxiliary Memory Attribute Indirection Register (EL2) */ + +static inline uint64_t _AArch64_Read_amair_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMAIR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amair_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr AMAIR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMAIR_EL3, Auxiliary Memory Attribute Indirection Register (EL3) */ + +static inline uint64_t _AArch64_Read_amair_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMAIR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amair_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr AMAIR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* APDAKEYHI_EL1, Pointer Authentication Key A for Data (bits[127:64]) */ + +static inline uint64_t _AArch64_Read_apdakeyhi_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APDAKEYHI_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apdakeyhi_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APDAKEYHI_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APDAKEYLO_EL1, Pointer Authentication Key A for Data (bits[63:0]) */ + +static inline uint64_t _AArch64_Read_apdakeylo_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APDAKEYLO_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apdakeylo_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APDAKEYLO_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APDBKEYHI_EL1, Pointer Authentication Key B for Data (bits[127:64]) */ + +static inline uint64_t _AArch64_Read_apdbkeyhi_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APDBKEYHI_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apdbkeyhi_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APDBKEYHI_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APDBKEYLO_EL1, Pointer Authentication Key B for Data (bits[63:0]) */ + +static inline uint64_t _AArch64_Read_apdbkeylo_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APDBKEYLO_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apdbkeylo_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APDBKEYLO_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APGAKEYHI_EL1, Pointer Authentication Key A for Code (bits[127:64]) */ + +static inline uint64_t _AArch64_Read_apgakeyhi_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APGAKEYHI_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apgakeyhi_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APGAKEYHI_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APGAKEYLO_EL1, Pointer Authentication Key A for Code (bits[63:0]) */ + +static inline uint64_t _AArch64_Read_apgakeylo_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APGAKEYLO_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apgakeylo_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APGAKEYLO_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APIAKEYHI_EL1, Pointer Authentication Key A for Instruction (bits[127:64]) */ + +static inline uint64_t _AArch64_Read_apiakeyhi_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APIAKEYHI_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apiakeyhi_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APIAKEYHI_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APIAKEYLO_EL1, Pointer Authentication Key A for Instruction (bits[63:0]) */ + +static inline uint64_t _AArch64_Read_apiakeylo_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APIAKEYLO_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apiakeylo_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APIAKEYLO_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APIBKEYHI_EL1, Pointer Authentication Key B for Instruction (bits[127:64]) */ + +static inline uint64_t _AArch64_Read_apibkeyhi_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APIBKEYHI_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apibkeyhi_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APIBKEYHI_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* APIBKEYLO_EL1, Pointer Authentication Key B for Instruction (bits[63:0]) */ + +static inline uint64_t _AArch64_Read_apibkeylo_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, APIBKEYLO_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_apibkeylo_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr APIBKEYLO_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CCSIDR2_EL1, Current Cache Size ID Register 2 */ + +#define AARCH64_CCSIDR2_EL1_NUMSETS( _val ) ( ( _val ) << 0 ) +#define AARCH64_CCSIDR2_EL1_NUMSETS_SHIFT 0 +#define AARCH64_CCSIDR2_EL1_NUMSETS_MASK 0xffffffU +#define AARCH64_CCSIDR2_EL1_NUMSETS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffU ) + +static inline uint64_t _AArch64_Read_ccsidr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CCSIDR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* CCSIDR_EL1, Current Cache Size ID Register */ + +#define AARCH64_CCSIDR_EL1_LINESIZE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CCSIDR_EL1_LINESIZE_SHIFT 0 +#define AARCH64_CCSIDR_EL1_LINESIZE_MASK 0x7U +#define AARCH64_CCSIDR_EL1_LINESIZE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x7U ) + +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_0( _val ) ( ( _val ) << 3 ) +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_SHIFT_0 3 +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_MASK_0 0x1ff8U +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_GET_0( _reg ) \ + ( ( ( _reg ) >> 3 ) & 0x3ffU ) + +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_1( _val ) ( ( _val ) << 3 ) +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_SHIFT_1 3 +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_MASK_1 0xfffff8U +#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_GET_1( _reg ) \ + ( ( ( _reg ) >> 3 ) & 0x1fffffU ) + +#define AARCH64_CCSIDR_EL1_NUMSETS_0( _val ) ( ( _val ) << 13 ) +#define AARCH64_CCSIDR_EL1_NUMSETS_SHIFT_0 13 +#define AARCH64_CCSIDR_EL1_NUMSETS_MASK_0 0xfffe000U +#define AARCH64_CCSIDR_EL1_NUMSETS_GET_0( _reg ) \ + ( ( ( _reg ) >> 13 ) & 0x7fffU ) + +#define AARCH64_CCSIDR_EL1_NUMSETS_1( _val ) ( ( _val ) << 32 ) +#define AARCH64_CCSIDR_EL1_NUMSETS_SHIFT_1 32 +#define AARCH64_CCSIDR_EL1_NUMSETS_MASK_1 0xffffff00000000ULL +#define AARCH64_CCSIDR_EL1_NUMSETS_GET_1( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xffffffULL ) + +static inline uint64_t _AArch64_Read_ccsidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CCSIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* CLIDR_EL1, Cache Level ID Register */ + +#define AARCH64_CLIDR_EL1_CTYPE1( _val ) ( ( _val ) << 0 ) +#define AARCH64_CLIDR_EL1_CTYPE1_SHIFT 0 +#define AARCH64_CLIDR_EL1_CTYPE1_MASK ( 0x7U << 0 ) +#define AARCH64_CLIDR_EL1_CTYPE1_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_CTYPE2( _val ) ( ( _val ) << 3 ) +#define AARCH64_CLIDR_EL1_CTYPE2_SHIFT 3 +#define AARCH64_CLIDR_EL1_CTYPE2_MASK ( 0x7U << 3 ) +#define AARCH64_CLIDR_EL1_CTYPE2_GET( _reg ) \ + ( ( ( _reg ) >> 3 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_CTYPE3( _val ) ( ( _val ) << 6 ) +#define AARCH64_CLIDR_EL1_CTYPE3_SHIFT 6 +#define AARCH64_CLIDR_EL1_CTYPE3_MASK ( 0x7U << 6 ) +#define AARCH64_CLIDR_EL1_CTYPE3_GET( _reg ) \ + ( ( ( _reg ) >> 6 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_CTYPE4( _val ) ( ( _val ) << 9 ) +#define AARCH64_CLIDR_EL1_CTYPE4_SHIFT 9 +#define AARCH64_CLIDR_EL1_CTYPE4_MASK ( 0x7U << 9 ) +#define AARCH64_CLIDR_EL1_CTYPE4_GET( _reg ) \ + ( ( ( _reg ) >> 9 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_CTYPE5( _val ) ( ( _val ) << 12 ) +#define AARCH64_CLIDR_EL1_CTYPE5_SHIFT 12 +#define AARCH64_CLIDR_EL1_CTYPE5_MASK ( 0x7U << 12 ) +#define AARCH64_CLIDR_EL1_CTYPE5_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_CTYPE6( _val ) ( ( _val ) << 15 ) +#define AARCH64_CLIDR_EL1_CTYPE6_SHIFT 15 +#define AARCH64_CLIDR_EL1_CTYPE6_MASK ( 0x7U << 15 ) +#define AARCH64_CLIDR_EL1_CTYPE6_GET( _reg ) \ + ( ( ( _reg ) >> 15 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_CTYPE7( _val ) ( ( _val ) << 18 ) +#define AARCH64_CLIDR_EL1_CTYPE7_SHIFT 18 +#define AARCH64_CLIDR_EL1_CTYPE7_MASK ( 0x7U << 18 ) +#define AARCH64_CLIDR_EL1_CTYPE7_GET( _reg ) \ + ( ( ( _reg ) >> 18 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_LOUIS( _val ) ( ( _val ) << 21 ) +#define AARCH64_CLIDR_EL1_LOUIS_SHIFT 21 +#define AARCH64_CLIDR_EL1_LOUIS_MASK 0xe00000U +#define AARCH64_CLIDR_EL1_LOUIS_GET( _reg ) \ + ( ( ( _reg ) >> 21 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_LOC( _val ) ( ( _val ) << 24 ) +#define AARCH64_CLIDR_EL1_LOC_SHIFT 24 +#define AARCH64_CLIDR_EL1_LOC_MASK 0x7000000U +#define AARCH64_CLIDR_EL1_LOC_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_LOUU( _val ) ( ( _val ) << 27 ) +#define AARCH64_CLIDR_EL1_LOUU_SHIFT 27 +#define AARCH64_CLIDR_EL1_LOUU_MASK 0x38000000U +#define AARCH64_CLIDR_EL1_LOUU_GET( _reg ) \ + ( ( ( _reg ) >> 27 ) & 0x7U ) + +#define AARCH64_CLIDR_EL1_ICB( _val ) ( ( _val ) << 30 ) +#define AARCH64_CLIDR_EL1_ICB_SHIFT 30 +#define AARCH64_CLIDR_EL1_ICB_MASK 0x1c0000000ULL +#define AARCH64_CLIDR_EL1_ICB_GET( _reg ) \ + ( ( ( _reg ) >> 30 ) & 0x7ULL ) + +static inline uint64_t _AArch64_Read_clidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CLIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* CONTEXTIDR_EL1, Context ID Register (EL1) */ + +#define AARCH64_CONTEXTIDR_EL1_PROCID( _val ) ( ( _val ) << 0 ) +#define AARCH64_CONTEXTIDR_EL1_PROCID_SHIFT 0 +#define AARCH64_CONTEXTIDR_EL1_PROCID_MASK 0xffffffffU +#define AARCH64_CONTEXTIDR_EL1_PROCID_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_contextidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CONTEXTIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_contextidr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CONTEXTIDR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CONTEXTIDR_EL2, Context ID Register (EL2) */ + +#define AARCH64_CONTEXTIDR_EL2_PROCID( _val ) ( ( _val ) << 0 ) +#define AARCH64_CONTEXTIDR_EL2_PROCID_SHIFT 0 +#define AARCH64_CONTEXTIDR_EL2_PROCID_MASK 0xffffffffU +#define AARCH64_CONTEXTIDR_EL2_PROCID_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_contextidr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CONTEXTIDR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_contextidr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CONTEXTIDR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CPACR_EL1, Architectural Feature Access Control Register */ + +#define AARCH64_CPACR_EL1_ZEN( _val ) ( ( _val ) << 16 ) +#define AARCH64_CPACR_EL1_ZEN_SHIFT 16 +#define AARCH64_CPACR_EL1_ZEN_MASK 0x30000U +#define AARCH64_CPACR_EL1_ZEN_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x3U ) + +#define AARCH64_CPACR_EL1_FPEN( _val ) ( ( _val ) << 20 ) +#define AARCH64_CPACR_EL1_FPEN_SHIFT 20 +#define AARCH64_CPACR_EL1_FPEN_MASK 0x300000U +#define AARCH64_CPACR_EL1_FPEN_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0x3U ) + +#define AARCH64_CPACR_EL1_TTA 0x10000000U + +static inline uint64_t _AArch64_Read_cpacr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CPACR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cpacr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CPACR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CPTR_EL2, Architectural Feature Trap Register (EL2) */ + +#define AARCH64_CPTR_EL2_TZ 0x100U + +#define AARCH64_CPTR_EL2_TFP 0x400U + +#define AARCH64_CPTR_EL2_ZEN( _val ) ( ( _val ) << 16 ) +#define AARCH64_CPTR_EL2_ZEN_SHIFT 16 +#define AARCH64_CPTR_EL2_ZEN_MASK 0x30000U +#define AARCH64_CPTR_EL2_ZEN_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x3U ) + +#define AARCH64_CPTR_EL2_TTA_0 0x100000U + +#define AARCH64_CPTR_EL2_FPEN( _val ) ( ( _val ) << 20 ) +#define AARCH64_CPTR_EL2_FPEN_SHIFT 20 +#define AARCH64_CPTR_EL2_FPEN_MASK 0x300000U +#define AARCH64_CPTR_EL2_FPEN_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0x3U ) + +#define AARCH64_CPTR_EL2_TTA_1 0x10000000U + +#define AARCH64_CPTR_EL2_TAM 0x40000000U + +#define AARCH64_CPTR_EL2_TCPAC 0x80000000U + +static inline uint64_t _AArch64_Read_cptr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CPTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cptr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CPTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CPTR_EL3, Architectural Feature Trap Register (EL3) */ + +#define AARCH64_CPTR_EL3_EZ 0x100U + +#define AARCH64_CPTR_EL3_TFP 0x400U + +#define AARCH64_CPTR_EL3_TTA 0x100000U + +#define AARCH64_CPTR_EL3_TAM 0x40000000U + +#define AARCH64_CPTR_EL3_TCPAC 0x80000000U + +static inline uint64_t _AArch64_Read_cptr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CPTR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cptr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr CPTR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* CSSELR_EL1, Cache Size Selection Register */ + +#define AARCH64_CSSELR_EL1_IND 0x1U + +#define AARCH64_CSSELR_EL1_LEVEL( _val ) ( ( _val ) << 1 ) +#define AARCH64_CSSELR_EL1_LEVEL_SHIFT 1 +#define AARCH64_CSSELR_EL1_LEVEL_MASK 0xeU +#define AARCH64_CSSELR_EL1_LEVEL_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7U ) + +#define AARCH64_CSSELR_EL1_TND 0x10U + +static inline uint64_t _AArch64_Read_csselr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CSSELR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_csselr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CSSELR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CTR_EL0, Cache Type Register */ + +#define AARCH64_CTR_EL0_IMINLINE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CTR_EL0_IMINLINE_SHIFT 0 +#define AARCH64_CTR_EL0_IMINLINE_MASK 0xfU +#define AARCH64_CTR_EL0_IMINLINE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_CTR_EL0_L1IP( _val ) ( ( _val ) << 14 ) +#define AARCH64_CTR_EL0_L1IP_SHIFT 14 +#define AARCH64_CTR_EL0_L1IP_MASK 0xc000U +#define AARCH64_CTR_EL0_L1IP_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_CTR_EL0_DMINLINE( _val ) ( ( _val ) << 16 ) +#define AARCH64_CTR_EL0_DMINLINE_SHIFT 16 +#define AARCH64_CTR_EL0_DMINLINE_MASK 0xf0000U +#define AARCH64_CTR_EL0_DMINLINE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_CTR_EL0_ERG( _val ) ( ( _val ) << 20 ) +#define AARCH64_CTR_EL0_ERG_SHIFT 20 +#define AARCH64_CTR_EL0_ERG_MASK 0xf00000U +#define AARCH64_CTR_EL0_ERG_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_CTR_EL0_CWG( _val ) ( ( _val ) << 24 ) +#define AARCH64_CTR_EL0_CWG_SHIFT 24 +#define AARCH64_CTR_EL0_CWG_MASK 0xf000000U +#define AARCH64_CTR_EL0_CWG_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_CTR_EL0_IDC 0x10000000U + +#define AARCH64_CTR_EL0_DIC 0x20000000U + +#define AARCH64_CTR_EL0_TMINLINE( _val ) ( ( _val ) << 32 ) +#define AARCH64_CTR_EL0_TMINLINE_SHIFT 32 +#define AARCH64_CTR_EL0_TMINLINE_MASK 0x3f00000000ULL +#define AARCH64_CTR_EL0_TMINLINE_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0x3fULL ) + +static inline uint64_t _AArch64_Read_ctr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CTR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* DACR32_EL2, Domain Access Control Register */ + +static inline uint64_t _AArch64_Read_dacr32_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DACR32_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dacr32_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr DACR32_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* DCZID_EL0, Data Cache Zero ID Register */ + +#define AARCH64_DCZID_EL0_BS( _val ) ( ( _val ) << 0 ) +#define AARCH64_DCZID_EL0_BS_SHIFT 0 +#define AARCH64_DCZID_EL0_BS_MASK 0xfU +#define AARCH64_DCZID_EL0_BS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_DCZID_EL0_DZP 0x10U + +static inline uint64_t _AArch64_Read_dczid_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DCZID_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ESR_EL1, Exception Syndrome Register (EL1) */ + +#define AARCH64_ESR_EL1_DIRECTION 0x1U + +#define AARCH64_ESR_EL1_ERETA 0x1U + +#define AARCH64_ESR_EL1_IOF 0x1U + +#define AARCH64_ESR_EL1_TI 0x1U + +#define AARCH64_ESR_EL1_BTYPE( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL1_BTYPE_SHIFT 0 +#define AARCH64_ESR_EL1_BTYPE_MASK 0x3U +#define AARCH64_ESR_EL1_BTYPE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3U ) + +#define AARCH64_ESR_EL1_DFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL1_DFSC_SHIFT 0 +#define AARCH64_ESR_EL1_DFSC_MASK 0x3fU +#define AARCH64_ESR_EL1_DFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_ESR_EL1_IFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL1_IFSC_SHIFT 0 +#define AARCH64_ESR_EL1_IFSC_MASK 0x3fU +#define AARCH64_ESR_EL1_IFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_ESR_EL1_COMMENT( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL1_COMMENT_SHIFT 0 +#define AARCH64_ESR_EL1_COMMENT_MASK 0xffffU +#define AARCH64_ESR_EL1_COMMENT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_ESR_EL1_IMM16( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL1_IMM16_SHIFT 0 +#define AARCH64_ESR_EL1_IMM16_MASK 0xffffU +#define AARCH64_ESR_EL1_IMM16_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_ESR_EL1_ISS( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL1_ISS_SHIFT 0 +#define AARCH64_ESR_EL1_ISS_MASK 0x1ffffffU +#define AARCH64_ESR_EL1_ISS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x1ffffffU ) + +#define AARCH64_ESR_EL1_DZF 0x2U + +#define AARCH64_ESR_EL1_ERET 0x2U + +#define AARCH64_ESR_EL1_AM( _val ) ( ( _val ) << 1 ) +#define AARCH64_ESR_EL1_AM_SHIFT 1 +#define AARCH64_ESR_EL1_AM_MASK 0xeU +#define AARCH64_ESR_EL1_AM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7U ) + +#define AARCH64_ESR_EL1_CRM( _val ) ( ( _val ) << 1 ) +#define AARCH64_ESR_EL1_CRM_SHIFT 1 +#define AARCH64_ESR_EL1_CRM_MASK 0x1eU +#define AARCH64_ESR_EL1_CRM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0xfU ) + +#define AARCH64_ESR_EL1_OFF 0x4U + +#define AARCH64_ESR_EL1_UFF 0x8U + +#define AARCH64_ESR_EL1_IXF 0x10U + +#define AARCH64_ESR_EL1_OFFSET 0x10U + +#define AARCH64_ESR_EL1_RN( _val ) ( ( _val ) << 5 ) +#define AARCH64_ESR_EL1_RN_SHIFT 5 +#define AARCH64_ESR_EL1_RN_MASK 0x3e0U +#define AARCH64_ESR_EL1_RN_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x1fU ) + +#define AARCH64_ESR_EL1_RT( _val ) ( ( _val ) << 5 ) +#define AARCH64_ESR_EL1_RT_SHIFT 5 +#define AARCH64_ESR_EL1_RT_MASK 0x3e0U +#define AARCH64_ESR_EL1_RT_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x1fU ) + +#define AARCH64_ESR_EL1_EX 0x40U + +#define AARCH64_ESR_EL1_WNR 0x40U + +#define AARCH64_ESR_EL1_IDF 0x80U + +#define AARCH64_ESR_EL1_S1PTW 0x80U + +#define AARCH64_ESR_EL1_CM 0x100U + +#define AARCH64_ESR_EL1_VECITR( _val ) ( ( _val ) << 8 ) +#define AARCH64_ESR_EL1_VECITR_SHIFT 8 +#define AARCH64_ESR_EL1_VECITR_MASK 0x700U +#define AARCH64_ESR_EL1_VECITR_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x7U ) + +#define AARCH64_ESR_EL1_EA 0x200U + +#define AARCH64_ESR_EL1_FNV 0x400U + +#define AARCH64_ESR_EL1_AET( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL1_AET_SHIFT 10 +#define AARCH64_ESR_EL1_AET_MASK 0x1c00U +#define AARCH64_ESR_EL1_AET_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x7U ) + +#define AARCH64_ESR_EL1_CRN( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL1_CRN_SHIFT 10 +#define AARCH64_ESR_EL1_CRN_MASK 0x3c00U +#define AARCH64_ESR_EL1_CRN_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0xfU ) + +#define AARCH64_ESR_EL1_RT2( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL1_RT2_SHIFT 10 +#define AARCH64_ESR_EL1_RT2_MASK 0x7c00U +#define AARCH64_ESR_EL1_RT2_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x1fU ) + +#define AARCH64_ESR_EL1_SET( _val ) ( ( _val ) << 11 ) +#define AARCH64_ESR_EL1_SET_SHIFT 11 +#define AARCH64_ESR_EL1_SET_MASK 0x1800U +#define AARCH64_ESR_EL1_SET_GET( _reg ) \ + ( ( ( _reg ) >> 11 ) & 0x3U ) + +#define AARCH64_ESR_EL1_IMM8( _val ) ( ( _val ) << 12 ) +#define AARCH64_ESR_EL1_IMM8_SHIFT 12 +#define AARCH64_ESR_EL1_IMM8_MASK 0xff000U +#define AARCH64_ESR_EL1_IMM8_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xffU ) + +#define AARCH64_ESR_EL1_IESB 0x2000U + +#define AARCH64_ESR_EL1_VNCR 0x2000U + +#define AARCH64_ESR_EL1_AR 0x4000U + +#define AARCH64_ESR_EL1_OP1( _val ) ( ( _val ) << 14 ) +#define AARCH64_ESR_EL1_OP1_SHIFT 14 +#define AARCH64_ESR_EL1_OP1_MASK 0x1c000U +#define AARCH64_ESR_EL1_OP1_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x7U ) + +#define AARCH64_ESR_EL1_OPC1_0( _val ) ( ( _val ) << 14 ) +#define AARCH64_ESR_EL1_OPC1_SHIFT_0 14 +#define AARCH64_ESR_EL1_OPC1_MASK_0 0x1c000U +#define AARCH64_ESR_EL1_OPC1_GET_0( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x7U ) + +#define AARCH64_ESR_EL1_SF 0x8000U + +#define AARCH64_ESR_EL1_OPC1_1( _val ) ( ( _val ) << 16 ) +#define AARCH64_ESR_EL1_OPC1_SHIFT_1 16 +#define AARCH64_ESR_EL1_OPC1_MASK_1 0xf0000U +#define AARCH64_ESR_EL1_OPC1_GET_1( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ESR_EL1_SRT( _val ) ( ( _val ) << 16 ) +#define AARCH64_ESR_EL1_SRT_SHIFT 16 +#define AARCH64_ESR_EL1_SRT_MASK 0x1f0000U +#define AARCH64_ESR_EL1_SRT_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x1fU ) + +#define AARCH64_ESR_EL1_OP2( _val ) ( ( _val ) << 17 ) +#define AARCH64_ESR_EL1_OP2_SHIFT 17 +#define AARCH64_ESR_EL1_OP2_MASK 0xe0000U +#define AARCH64_ESR_EL1_OP2_GET( _reg ) \ + ( ( ( _reg ) >> 17 ) & 0x7U ) + +#define AARCH64_ESR_EL1_OPC2( _val ) ( ( _val ) << 17 ) +#define AARCH64_ESR_EL1_OPC2_SHIFT 17 +#define AARCH64_ESR_EL1_OPC2_MASK 0xe0000U +#define AARCH64_ESR_EL1_OPC2_GET( _reg ) \ + ( ( ( _reg ) >> 17 ) & 0x7U ) + +#define AARCH64_ESR_EL1_CCKNOWNPASS 0x80000U + +#define AARCH64_ESR_EL1_OP0( _val ) ( ( _val ) << 20 ) +#define AARCH64_ESR_EL1_OP0_SHIFT 20 +#define AARCH64_ESR_EL1_OP0_MASK 0x300000U +#define AARCH64_ESR_EL1_OP0_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0x3U ) + +#define AARCH64_ESR_EL1_COND( _val ) ( ( _val ) << 20 ) +#define AARCH64_ESR_EL1_COND_SHIFT 20 +#define AARCH64_ESR_EL1_COND_MASK 0xf00000U +#define AARCH64_ESR_EL1_COND_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ESR_EL1_SSE 0x200000U + +#define AARCH64_ESR_EL1_SAS( _val ) ( ( _val ) << 22 ) +#define AARCH64_ESR_EL1_SAS_SHIFT 22 +#define AARCH64_ESR_EL1_SAS_MASK 0xc00000U +#define AARCH64_ESR_EL1_SAS_GET( _reg ) \ + ( ( ( _reg ) >> 22 ) & 0x3U ) + +#define AARCH64_ESR_EL1_TFV 0x800000U + +#define AARCH64_ESR_EL1_CV 0x1000000U + +#define AARCH64_ESR_EL1_IDS 0x1000000U + +#define AARCH64_ESR_EL1_ISV 0x1000000U + +#define AARCH64_ESR_EL1_IL 0x2000000U + +#define AARCH64_ESR_EL1_EC( _val ) ( ( _val ) << 26 ) +#define AARCH64_ESR_EL1_EC_SHIFT 26 +#define AARCH64_ESR_EL1_EC_MASK 0xfc000000U +#define AARCH64_ESR_EL1_EC_GET( _reg ) \ + ( ( ( _reg ) >> 26 ) & 0x3fU ) + +static inline uint64_t _AArch64_Read_esr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ESR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_esr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ESR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ESR_EL2, Exception Syndrome Register (EL2) */ + +#define AARCH64_ESR_EL2_DIRECTION 0x1U + +#define AARCH64_ESR_EL2_ERETA 0x1U + +#define AARCH64_ESR_EL2_IOF 0x1U + +#define AARCH64_ESR_EL2_TI 0x1U + +#define AARCH64_ESR_EL2_BTYPE( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL2_BTYPE_SHIFT 0 +#define AARCH64_ESR_EL2_BTYPE_MASK 0x3U +#define AARCH64_ESR_EL2_BTYPE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3U ) + +#define AARCH64_ESR_EL2_DFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL2_DFSC_SHIFT 0 +#define AARCH64_ESR_EL2_DFSC_MASK 0x3fU +#define AARCH64_ESR_EL2_DFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_ESR_EL2_IFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL2_IFSC_SHIFT 0 +#define AARCH64_ESR_EL2_IFSC_MASK 0x3fU +#define AARCH64_ESR_EL2_IFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_ESR_EL2_COMMENT( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL2_COMMENT_SHIFT 0 +#define AARCH64_ESR_EL2_COMMENT_MASK 0xffffU +#define AARCH64_ESR_EL2_COMMENT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_ESR_EL2_IMM16( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL2_IMM16_SHIFT 0 +#define AARCH64_ESR_EL2_IMM16_MASK 0xffffU +#define AARCH64_ESR_EL2_IMM16_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_ESR_EL2_ISS( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL2_ISS_SHIFT 0 +#define AARCH64_ESR_EL2_ISS_MASK 0x1ffffffU +#define AARCH64_ESR_EL2_ISS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x1ffffffU ) + +#define AARCH64_ESR_EL2_DZF 0x2U + +#define AARCH64_ESR_EL2_ERET 0x2U + +#define AARCH64_ESR_EL2_AM( _val ) ( ( _val ) << 1 ) +#define AARCH64_ESR_EL2_AM_SHIFT 1 +#define AARCH64_ESR_EL2_AM_MASK 0xeU +#define AARCH64_ESR_EL2_AM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7U ) + +#define AARCH64_ESR_EL2_CRM( _val ) ( ( _val ) << 1 ) +#define AARCH64_ESR_EL2_CRM_SHIFT 1 +#define AARCH64_ESR_EL2_CRM_MASK 0x1eU +#define AARCH64_ESR_EL2_CRM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0xfU ) + +#define AARCH64_ESR_EL2_OFF 0x4U + +#define AARCH64_ESR_EL2_UFF 0x8U + +#define AARCH64_ESR_EL2_IXF 0x10U + +#define AARCH64_ESR_EL2_OFFSET 0x10U + +#define AARCH64_ESR_EL2_RN( _val ) ( ( _val ) << 5 ) +#define AARCH64_ESR_EL2_RN_SHIFT 5 +#define AARCH64_ESR_EL2_RN_MASK 0x3e0U +#define AARCH64_ESR_EL2_RN_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x1fU ) + +#define AARCH64_ESR_EL2_RT( _val ) ( ( _val ) << 5 ) +#define AARCH64_ESR_EL2_RT_SHIFT 5 +#define AARCH64_ESR_EL2_RT_MASK 0x3e0U +#define AARCH64_ESR_EL2_RT_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x1fU ) + +#define AARCH64_ESR_EL2_EX 0x40U + +#define AARCH64_ESR_EL2_WNR 0x40U + +#define AARCH64_ESR_EL2_IDF 0x80U + +#define AARCH64_ESR_EL2_S1PTW 0x80U + +#define AARCH64_ESR_EL2_CM 0x100U + +#define AARCH64_ESR_EL2_VECITR( _val ) ( ( _val ) << 8 ) +#define AARCH64_ESR_EL2_VECITR_SHIFT 8 +#define AARCH64_ESR_EL2_VECITR_MASK 0x700U +#define AARCH64_ESR_EL2_VECITR_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x7U ) + +#define AARCH64_ESR_EL2_EA 0x200U + +#define AARCH64_ESR_EL2_FNV 0x400U + +#define AARCH64_ESR_EL2_AET( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL2_AET_SHIFT 10 +#define AARCH64_ESR_EL2_AET_MASK 0x1c00U +#define AARCH64_ESR_EL2_AET_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x7U ) + +#define AARCH64_ESR_EL2_CRN( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL2_CRN_SHIFT 10 +#define AARCH64_ESR_EL2_CRN_MASK 0x3c00U +#define AARCH64_ESR_EL2_CRN_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0xfU ) + +#define AARCH64_ESR_EL2_RT2( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL2_RT2_SHIFT 10 +#define AARCH64_ESR_EL2_RT2_MASK 0x7c00U +#define AARCH64_ESR_EL2_RT2_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x1fU ) + +#define AARCH64_ESR_EL2_SET( _val ) ( ( _val ) << 11 ) +#define AARCH64_ESR_EL2_SET_SHIFT 11 +#define AARCH64_ESR_EL2_SET_MASK 0x1800U +#define AARCH64_ESR_EL2_SET_GET( _reg ) \ + ( ( ( _reg ) >> 11 ) & 0x3U ) + +#define AARCH64_ESR_EL2_IMM8( _val ) ( ( _val ) << 12 ) +#define AARCH64_ESR_EL2_IMM8_SHIFT 12 +#define AARCH64_ESR_EL2_IMM8_MASK 0xff000U +#define AARCH64_ESR_EL2_IMM8_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xffU ) + +#define AARCH64_ESR_EL2_IESB 0x2000U + +#define AARCH64_ESR_EL2_VNCR 0x2000U + +#define AARCH64_ESR_EL2_AR 0x4000U + +#define AARCH64_ESR_EL2_OP1( _val ) ( ( _val ) << 14 ) +#define AARCH64_ESR_EL2_OP1_SHIFT 14 +#define AARCH64_ESR_EL2_OP1_MASK 0x1c000U +#define AARCH64_ESR_EL2_OP1_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x7U ) + +#define AARCH64_ESR_EL2_OPC1_0( _val ) ( ( _val ) << 14 ) +#define AARCH64_ESR_EL2_OPC1_SHIFT_0 14 +#define AARCH64_ESR_EL2_OPC1_MASK_0 0x1c000U +#define AARCH64_ESR_EL2_OPC1_GET_0( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x7U ) + +#define AARCH64_ESR_EL2_SF 0x8000U + +#define AARCH64_ESR_EL2_OPC1_1( _val ) ( ( _val ) << 16 ) +#define AARCH64_ESR_EL2_OPC1_SHIFT_1 16 +#define AARCH64_ESR_EL2_OPC1_MASK_1 0xf0000U +#define AARCH64_ESR_EL2_OPC1_GET_1( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ESR_EL2_SRT( _val ) ( ( _val ) << 16 ) +#define AARCH64_ESR_EL2_SRT_SHIFT 16 +#define AARCH64_ESR_EL2_SRT_MASK 0x1f0000U +#define AARCH64_ESR_EL2_SRT_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x1fU ) + +#define AARCH64_ESR_EL2_OP2( _val ) ( ( _val ) << 17 ) +#define AARCH64_ESR_EL2_OP2_SHIFT 17 +#define AARCH64_ESR_EL2_OP2_MASK 0xe0000U +#define AARCH64_ESR_EL2_OP2_GET( _reg ) \ + ( ( ( _reg ) >> 17 ) & 0x7U ) + +#define AARCH64_ESR_EL2_OPC2( _val ) ( ( _val ) << 17 ) +#define AARCH64_ESR_EL2_OPC2_SHIFT 17 +#define AARCH64_ESR_EL2_OPC2_MASK 0xe0000U +#define AARCH64_ESR_EL2_OPC2_GET( _reg ) \ + ( ( ( _reg ) >> 17 ) & 0x7U ) + +#define AARCH64_ESR_EL2_CCKNOWNPASS 0x80000U + +#define AARCH64_ESR_EL2_OP0( _val ) ( ( _val ) << 20 ) +#define AARCH64_ESR_EL2_OP0_SHIFT 20 +#define AARCH64_ESR_EL2_OP0_MASK 0x300000U +#define AARCH64_ESR_EL2_OP0_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0x3U ) + +#define AARCH64_ESR_EL2_COND( _val ) ( ( _val ) << 20 ) +#define AARCH64_ESR_EL2_COND_SHIFT 20 +#define AARCH64_ESR_EL2_COND_MASK 0xf00000U +#define AARCH64_ESR_EL2_COND_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ESR_EL2_SSE 0x200000U + +#define AARCH64_ESR_EL2_SAS( _val ) ( ( _val ) << 22 ) +#define AARCH64_ESR_EL2_SAS_SHIFT 22 +#define AARCH64_ESR_EL2_SAS_MASK 0xc00000U +#define AARCH64_ESR_EL2_SAS_GET( _reg ) \ + ( ( ( _reg ) >> 22 ) & 0x3U ) + +#define AARCH64_ESR_EL2_TFV 0x800000U + +#define AARCH64_ESR_EL2_CV 0x1000000U + +#define AARCH64_ESR_EL2_IDS 0x1000000U + +#define AARCH64_ESR_EL2_ISV 0x1000000U + +#define AARCH64_ESR_EL2_IL 0x2000000U + +#define AARCH64_ESR_EL2_EC( _val ) ( ( _val ) << 26 ) +#define AARCH64_ESR_EL2_EC_SHIFT 26 +#define AARCH64_ESR_EL2_EC_MASK 0xfc000000U +#define AARCH64_ESR_EL2_EC_GET( _reg ) \ + ( ( ( _reg ) >> 26 ) & 0x3fU ) + +static inline uint64_t _AArch64_Read_esr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ESR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_esr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr ESR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* ESR_EL3, Exception Syndrome Register (EL3) */ + +#define AARCH64_ESR_EL3_DIRECTION 0x1U + +#define AARCH64_ESR_EL3_ERETA 0x1U + +#define AARCH64_ESR_EL3_IOF 0x1U + +#define AARCH64_ESR_EL3_TI 0x1U + +#define AARCH64_ESR_EL3_BTYPE( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL3_BTYPE_SHIFT 0 +#define AARCH64_ESR_EL3_BTYPE_MASK 0x3U +#define AARCH64_ESR_EL3_BTYPE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3U ) + +#define AARCH64_ESR_EL3_DFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL3_DFSC_SHIFT 0 +#define AARCH64_ESR_EL3_DFSC_MASK 0x3fU +#define AARCH64_ESR_EL3_DFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_ESR_EL3_IFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL3_IFSC_SHIFT 0 +#define AARCH64_ESR_EL3_IFSC_MASK 0x3fU +#define AARCH64_ESR_EL3_IFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_ESR_EL3_COMMENT( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL3_COMMENT_SHIFT 0 +#define AARCH64_ESR_EL3_COMMENT_MASK 0xffffU +#define AARCH64_ESR_EL3_COMMENT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_ESR_EL3_IMM16( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL3_IMM16_SHIFT 0 +#define AARCH64_ESR_EL3_IMM16_MASK 0xffffU +#define AARCH64_ESR_EL3_IMM16_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_ESR_EL3_ISS( _val ) ( ( _val ) << 0 ) +#define AARCH64_ESR_EL3_ISS_SHIFT 0 +#define AARCH64_ESR_EL3_ISS_MASK 0x1ffffffU +#define AARCH64_ESR_EL3_ISS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x1ffffffU ) + +#define AARCH64_ESR_EL3_DZF 0x2U + +#define AARCH64_ESR_EL3_ERET 0x2U + +#define AARCH64_ESR_EL3_AM( _val ) ( ( _val ) << 1 ) +#define AARCH64_ESR_EL3_AM_SHIFT 1 +#define AARCH64_ESR_EL3_AM_MASK 0xeU +#define AARCH64_ESR_EL3_AM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7U ) + +#define AARCH64_ESR_EL3_CRM( _val ) ( ( _val ) << 1 ) +#define AARCH64_ESR_EL3_CRM_SHIFT 1 +#define AARCH64_ESR_EL3_CRM_MASK 0x1eU +#define AARCH64_ESR_EL3_CRM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0xfU ) + +#define AARCH64_ESR_EL3_OFF 0x4U + +#define AARCH64_ESR_EL3_UFF 0x8U + +#define AARCH64_ESR_EL3_IXF 0x10U + +#define AARCH64_ESR_EL3_OFFSET 0x10U + +#define AARCH64_ESR_EL3_RN( _val ) ( ( _val ) << 5 ) +#define AARCH64_ESR_EL3_RN_SHIFT 5 +#define AARCH64_ESR_EL3_RN_MASK 0x3e0U +#define AARCH64_ESR_EL3_RN_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x1fU ) + +#define AARCH64_ESR_EL3_RT( _val ) ( ( _val ) << 5 ) +#define AARCH64_ESR_EL3_RT_SHIFT 5 +#define AARCH64_ESR_EL3_RT_MASK 0x3e0U +#define AARCH64_ESR_EL3_RT_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x1fU ) + +#define AARCH64_ESR_EL3_EX 0x40U + +#define AARCH64_ESR_EL3_WNR 0x40U + +#define AARCH64_ESR_EL3_IDF 0x80U + +#define AARCH64_ESR_EL3_S1PTW 0x80U + +#define AARCH64_ESR_EL3_CM 0x100U + +#define AARCH64_ESR_EL3_VECITR( _val ) ( ( _val ) << 8 ) +#define AARCH64_ESR_EL3_VECITR_SHIFT 8 +#define AARCH64_ESR_EL3_VECITR_MASK 0x700U +#define AARCH64_ESR_EL3_VECITR_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x7U ) + +#define AARCH64_ESR_EL3_EA 0x200U + +#define AARCH64_ESR_EL3_FNV 0x400U + +#define AARCH64_ESR_EL3_AET( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL3_AET_SHIFT 10 +#define AARCH64_ESR_EL3_AET_MASK 0x1c00U +#define AARCH64_ESR_EL3_AET_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x7U ) + +#define AARCH64_ESR_EL3_CRN( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL3_CRN_SHIFT 10 +#define AARCH64_ESR_EL3_CRN_MASK 0x3c00U +#define AARCH64_ESR_EL3_CRN_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0xfU ) + +#define AARCH64_ESR_EL3_RT2( _val ) ( ( _val ) << 10 ) +#define AARCH64_ESR_EL3_RT2_SHIFT 10 +#define AARCH64_ESR_EL3_RT2_MASK 0x7c00U +#define AARCH64_ESR_EL3_RT2_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x1fU ) + +#define AARCH64_ESR_EL3_SET( _val ) ( ( _val ) << 11 ) +#define AARCH64_ESR_EL3_SET_SHIFT 11 +#define AARCH64_ESR_EL3_SET_MASK 0x1800U +#define AARCH64_ESR_EL3_SET_GET( _reg ) \ + ( ( ( _reg ) >> 11 ) & 0x3U ) + +#define AARCH64_ESR_EL3_IMM8( _val ) ( ( _val ) << 12 ) +#define AARCH64_ESR_EL3_IMM8_SHIFT 12 +#define AARCH64_ESR_EL3_IMM8_MASK 0xff000U +#define AARCH64_ESR_EL3_IMM8_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xffU ) + +#define AARCH64_ESR_EL3_IESB 0x2000U + +#define AARCH64_ESR_EL3_VNCR 0x2000U + +#define AARCH64_ESR_EL3_AR 0x4000U + +#define AARCH64_ESR_EL3_OP1( _val ) ( ( _val ) << 14 ) +#define AARCH64_ESR_EL3_OP1_SHIFT 14 +#define AARCH64_ESR_EL3_OP1_MASK 0x1c000U +#define AARCH64_ESR_EL3_OP1_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x7U ) + +#define AARCH64_ESR_EL3_OPC1_0( _val ) ( ( _val ) << 14 ) +#define AARCH64_ESR_EL3_OPC1_SHIFT_0 14 +#define AARCH64_ESR_EL3_OPC1_MASK_0 0x1c000U +#define AARCH64_ESR_EL3_OPC1_GET_0( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x7U ) + +#define AARCH64_ESR_EL3_SF 0x8000U + +#define AARCH64_ESR_EL3_OPC1_1( _val ) ( ( _val ) << 16 ) +#define AARCH64_ESR_EL3_OPC1_SHIFT_1 16 +#define AARCH64_ESR_EL3_OPC1_MASK_1 0xf0000U +#define AARCH64_ESR_EL3_OPC1_GET_1( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ESR_EL3_SRT( _val ) ( ( _val ) << 16 ) +#define AARCH64_ESR_EL3_SRT_SHIFT 16 +#define AARCH64_ESR_EL3_SRT_MASK 0x1f0000U +#define AARCH64_ESR_EL3_SRT_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x1fU ) + +#define AARCH64_ESR_EL3_OP2( _val ) ( ( _val ) << 17 ) +#define AARCH64_ESR_EL3_OP2_SHIFT 17 +#define AARCH64_ESR_EL3_OP2_MASK 0xe0000U +#define AARCH64_ESR_EL3_OP2_GET( _reg ) \ + ( ( ( _reg ) >> 17 ) & 0x7U ) + +#define AARCH64_ESR_EL3_OPC2( _val ) ( ( _val ) << 17 ) +#define AARCH64_ESR_EL3_OPC2_SHIFT 17 +#define AARCH64_ESR_EL3_OPC2_MASK 0xe0000U +#define AARCH64_ESR_EL3_OPC2_GET( _reg ) \ + ( ( ( _reg ) >> 17 ) & 0x7U ) + +#define AARCH64_ESR_EL3_CCKNOWNPASS 0x80000U + +#define AARCH64_ESR_EL3_OP0( _val ) ( ( _val ) << 20 ) +#define AARCH64_ESR_EL3_OP0_SHIFT 20 +#define AARCH64_ESR_EL3_OP0_MASK 0x300000U +#define AARCH64_ESR_EL3_OP0_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0x3U ) + +#define AARCH64_ESR_EL3_COND( _val ) ( ( _val ) << 20 ) +#define AARCH64_ESR_EL3_COND_SHIFT 20 +#define AARCH64_ESR_EL3_COND_MASK 0xf00000U +#define AARCH64_ESR_EL3_COND_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ESR_EL3_SSE 0x200000U + +#define AARCH64_ESR_EL3_SAS( _val ) ( ( _val ) << 22 ) +#define AARCH64_ESR_EL3_SAS_SHIFT 22 +#define AARCH64_ESR_EL3_SAS_MASK 0xc00000U +#define AARCH64_ESR_EL3_SAS_GET( _reg ) \ + ( ( ( _reg ) >> 22 ) & 0x3U ) + +#define AARCH64_ESR_EL3_TFV 0x800000U + +#define AARCH64_ESR_EL3_CV 0x1000000U + +#define AARCH64_ESR_EL3_IDS 0x1000000U + +#define AARCH64_ESR_EL3_ISV 0x1000000U + +#define AARCH64_ESR_EL3_IL 0x2000000U + +#define AARCH64_ESR_EL3_EC( _val ) ( ( _val ) << 26 ) +#define AARCH64_ESR_EL3_EC_SHIFT 26 +#define AARCH64_ESR_EL3_EC_MASK 0xfc000000U +#define AARCH64_ESR_EL3_EC_GET( _reg ) \ + ( ( ( _reg ) >> 26 ) & 0x3fU ) + +static inline uint64_t _AArch64_Read_esr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ESR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_esr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr ESR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* FAR_EL1, Fault Address Register (EL1) */ + +static inline uint64_t _AArch64_Read_far_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, FAR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_far_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr FAR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* FAR_EL2, Fault Address Register (EL2) */ + +static inline uint64_t _AArch64_Read_far_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, FAR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_far_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr FAR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* FAR_EL3, Fault Address Register (EL3) */ + +static inline uint64_t _AArch64_Read_far_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, FAR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_far_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr FAR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* FPEXC32_EL2, Floating-Point Exception Control Register */ + +#define AARCH64_FPEXC32_EL2_IOF 0x1U + +#define AARCH64_FPEXC32_EL2_DZF 0x2U + +#define AARCH64_FPEXC32_EL2_OFF 0x4U + +#define AARCH64_FPEXC32_EL2_UFF 0x8U + +#define AARCH64_FPEXC32_EL2_IXF 0x10U + +#define AARCH64_FPEXC32_EL2_IDF 0x80U + +#define AARCH64_FPEXC32_EL2_VECITR( _val ) ( ( _val ) << 8 ) +#define AARCH64_FPEXC32_EL2_VECITR_SHIFT 8 +#define AARCH64_FPEXC32_EL2_VECITR_MASK 0x700U +#define AARCH64_FPEXC32_EL2_VECITR_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x7U ) + +#define AARCH64_FPEXC32_EL2_TFV 0x4000000U + +#define AARCH64_FPEXC32_EL2_VV 0x8000000U + +#define AARCH64_FPEXC32_EL2_FP2V 0x10000000U + +#define AARCH64_FPEXC32_EL2_DEX 0x20000000U + +#define AARCH64_FPEXC32_EL2_EN 0x40000000U + +#define AARCH64_FPEXC32_EL2_EX 0x80000000U + +static inline uint64_t _AArch64_Read_fpexc32_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, FPEXC32_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_fpexc32_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr FPEXC32_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* GCR_EL1, Tag Control Register. */ + +#define AARCH64_GCR_EL1_EXCLUDE( _val ) ( ( _val ) << 0 ) +#define AARCH64_GCR_EL1_EXCLUDE_SHIFT 0 +#define AARCH64_GCR_EL1_EXCLUDE_MASK 0xffffU +#define AARCH64_GCR_EL1_EXCLUDE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_GCR_EL1_RRND 0x10000U + +static inline uint64_t _AArch64_Read_gcr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, GCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_gcr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr GCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* GMID_EL1, Multiple tag transfer ID Register */ + +#define AARCH64_GMID_EL1_BS( _val ) ( ( _val ) << 0 ) +#define AARCH64_GMID_EL1_BS_SHIFT 0 +#define AARCH64_GMID_EL1_BS_MASK 0xfU +#define AARCH64_GMID_EL1_BS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_gmid_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, GMID_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* HACR_EL2, Hypervisor Auxiliary Control Register */ + +static inline uint64_t _AArch64_Read_hacr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HACR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hacr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HACR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HAFGRTR_EL2, Hypervisor Activity Monitors Fine-Grained Read Trap Register */ + +#define AARCH64_HAFGRTR_EL2_AMCNTEN0 0x1U + +#define AARCH64_HAFGRTR_EL2_AMCNTEN1 0x20000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR10_EL0 0x40000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER10_EL0 0x80000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR11_EL0 0x100000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER11_EL0 0x200000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR12_EL0 0x400000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER12_EL0 0x800000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR13_EL0 0x1000000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER13_EL0 0x2000000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR14_EL0 0x4000000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER14_EL0 0x8000000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR15_EL0 0x10000000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER15_EL0 0x20000000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR16_EL0 0x40000000U + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER16_EL0 0x80000000U + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR17_EL0 0x100000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER17_EL0 0x200000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR18_EL0 0x400000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER18_EL0 0x800000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR19_EL0 0x1000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER19_EL0 0x2000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR110_EL0 0x4000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER110_EL0 0x8000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR111_EL0 0x10000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER111_EL0 0x20000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR112_EL0 0x40000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER112_EL0 0x80000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR113_EL0 0x100000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER113_EL0 0x200000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR114_EL0 0x400000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER114_EL0 0x800000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVCNTR115_EL0 0x1000000000000ULL + +#define AARCH64_HAFGRTR_EL2_AMEVTYPER115_EL0 0x2000000000000ULL + +static inline uint64_t _AArch64_Read_hafgrtr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HAFGRTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hafgrtr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HAFGRTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HCR_EL2, Hypervisor Configuration Register */ + +#define AARCH64_HCR_EL2_VM 0x1U + +#define AARCH64_HCR_EL2_SWIO 0x2U + +#define AARCH64_HCR_EL2_PTW 0x4U + +#define AARCH64_HCR_EL2_FMO 0x8U + +#define AARCH64_HCR_EL2_IMO 0x10U + +#define AARCH64_HCR_EL2_AMO 0x20U + +#define AARCH64_HCR_EL2_VF 0x40U + +#define AARCH64_HCR_EL2_VI 0x80U + +#define AARCH64_HCR_EL2_VSE 0x100U + +#define AARCH64_HCR_EL2_FB 0x200U + +#define AARCH64_HCR_EL2_BSU( _val ) ( ( _val ) << 10 ) +#define AARCH64_HCR_EL2_BSU_SHIFT 10 +#define AARCH64_HCR_EL2_BSU_MASK 0xc00U +#define AARCH64_HCR_EL2_BSU_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3U ) + +#define AARCH64_HCR_EL2_DC 0x1000U + +#define AARCH64_HCR_EL2_TWI 0x2000U + +#define AARCH64_HCR_EL2_TWE 0x4000U + +#define AARCH64_HCR_EL2_TID0 0x8000U + +#define AARCH64_HCR_EL2_TID1 0x10000U + +#define AARCH64_HCR_EL2_TID2 0x20000U + +#define AARCH64_HCR_EL2_TID3 0x40000U + +#define AARCH64_HCR_EL2_TSC 0x80000U + +#define AARCH64_HCR_EL2_TIDCP 0x100000U + +#define AARCH64_HCR_EL2_TACR 0x200000U + +#define AARCH64_HCR_EL2_TSW 0x400000U + +#define AARCH64_HCR_EL2_TPCP 0x800000U + +#define AARCH64_HCR_EL2_TPU 0x1000000U + +#define AARCH64_HCR_EL2_TTLB 0x2000000U + +#define AARCH64_HCR_EL2_TVM 0x4000000U + +#define AARCH64_HCR_EL2_TGE 0x8000000U + +#define AARCH64_HCR_EL2_TDZ 0x10000000U + +#define AARCH64_HCR_EL2_HCD 0x20000000U + +#define AARCH64_HCR_EL2_TRVM 0x40000000U + +#define AARCH64_HCR_EL2_RW 0x80000000U + +#define AARCH64_HCR_EL2_CD 0x100000000ULL + +#define AARCH64_HCR_EL2_ID 0x200000000ULL + +#define AARCH64_HCR_EL2_E2H 0x400000000ULL + +#define AARCH64_HCR_EL2_TLOR 0x800000000ULL + +#define AARCH64_HCR_EL2_TERR 0x1000000000ULL + +#define AARCH64_HCR_EL2_TEA 0x2000000000ULL + +#define AARCH64_HCR_EL2_MIOCNCE 0x4000000000ULL + +#define AARCH64_HCR_EL2_APK 0x10000000000ULL + +#define AARCH64_HCR_EL2_API 0x20000000000ULL + +#define AARCH64_HCR_EL2_NV 0x40000000000ULL + +#define AARCH64_HCR_EL2_NV1 0x80000000000ULL + +#define AARCH64_HCR_EL2_AT 0x100000000000ULL + +#define AARCH64_HCR_EL2_NV2 0x200000000000ULL + +#define AARCH64_HCR_EL2_FWB 0x400000000000ULL + +#define AARCH64_HCR_EL2_FIEN 0x800000000000ULL + +#define AARCH64_HCR_EL2_TID4 0x2000000000000ULL + +#define AARCH64_HCR_EL2_TICAB 0x4000000000000ULL + +#define AARCH64_HCR_EL2_AMVOFFEN 0x8000000000000ULL + +#define AARCH64_HCR_EL2_TOCU 0x10000000000000ULL + +#define AARCH64_HCR_EL2_ENSCXT 0x20000000000000ULL + +#define AARCH64_HCR_EL2_TTLBIS 0x40000000000000ULL + +#define AARCH64_HCR_EL2_TTLBOS 0x80000000000000ULL + +#define AARCH64_HCR_EL2_ATA 0x100000000000000ULL + +#define AARCH64_HCR_EL2_DCT 0x200000000000000ULL + +#define AARCH64_HCR_EL2_TID5 0x400000000000000ULL + +#define AARCH64_HCR_EL2_TWEDEN 0x800000000000000ULL + +#define AARCH64_HCR_EL2_TWEDEL( _val ) ( ( _val ) << 60 ) +#define AARCH64_HCR_EL2_TWEDEL_SHIFT 60 +#define AARCH64_HCR_EL2_TWEDEL_MASK 0xf000000000000000ULL +#define AARCH64_HCR_EL2_TWEDEL_GET( _reg ) \ + ( ( ( _reg ) >> 60 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_hcr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hcr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HDFGRTR_EL2, Hypervisor Debug Fine-Grained Read Trap Register */ + +#define AARCH64_HDFGRTR_EL2_DBGBCRN_EL1 0x1U + +#define AARCH64_HDFGRTR_EL2_DBGBVRN_EL1 0x2U + +#define AARCH64_HDFGRTR_EL2_DBGWCRN_EL1 0x4U + +#define AARCH64_HDFGRTR_EL2_DBGWVRN_EL1 0x8U + +#define AARCH64_HDFGRTR_EL2_MDSCR_EL1 0x10U + +#define AARCH64_HDFGRTR_EL2_DBGCLAIM 0x20U + +#define AARCH64_HDFGRTR_EL2_DBGAUTHSTATUS_EL1 0x40U + +#define AARCH64_HDFGRTR_EL2_DBGPRCR_EL1 0x80U + +#define AARCH64_HDFGRTR_EL2_OSLSR_EL1 0x200U + +#define AARCH64_HDFGRTR_EL2_OSECCR_EL1 0x400U + +#define AARCH64_HDFGRTR_EL2_OSDLR_EL1 0x800U + +#define AARCH64_HDFGRTR_EL2_PMEVCNTRN_EL0 0x1000U + +#define AARCH64_HDFGRTR_EL2_PMEVTYPERN_EL0 0x2000U + +#define AARCH64_HDFGRTR_EL2_PMCCFILTR_EL0 0x4000U + +#define AARCH64_HDFGRTR_EL2_PMCCNTR_EL0 0x8000U + +#define AARCH64_HDFGRTR_EL2_PMCNTEN 0x10000U + +#define AARCH64_HDFGRTR_EL2_PMINTEN 0x20000U + +#define AARCH64_HDFGRTR_EL2_PMOVS 0x40000U + +#define AARCH64_HDFGRTR_EL2_PMSELR_EL0 0x80000U + +#define AARCH64_HDFGRTR_EL2_PMMIR_EL1 0x400000U + +#define AARCH64_HDFGRTR_EL2_PMBLIMITR_EL1 0x800000U + +#define AARCH64_HDFGRTR_EL2_PMBPTR_EL1 0x1000000U + +#define AARCH64_HDFGRTR_EL2_PMBSR_EL1 0x2000000U + +#define AARCH64_HDFGRTR_EL2_PMSCR_EL1 0x4000000U + +#define AARCH64_HDFGRTR_EL2_PMSEVFR_EL1 0x8000000U + +#define AARCH64_HDFGRTR_EL2_PMSFCR_EL1 0x10000000U + +#define AARCH64_HDFGRTR_EL2_PMSICR_EL1 0x20000000U + +#define AARCH64_HDFGRTR_EL2_PMSIDR_EL1 0x40000000U + +#define AARCH64_HDFGRTR_EL2_PMSIRR_EL1 0x80000000U + +#define AARCH64_HDFGRTR_EL2_PMSLATFR_EL1 0x100000000ULL + +#define AARCH64_HDFGRTR_EL2_TRC 0x200000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCAUTHSTATUS 0x400000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCAUXCTLR 0x800000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCCLAIM 0x1000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCCNTVRN 0x2000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCID 0x10000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCIMSPECN 0x20000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCOSLSR 0x80000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCPRGCTLR 0x100000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCSEQSTR 0x200000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCSSCSRN 0x400000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCSTATR 0x800000000000ULL + +#define AARCH64_HDFGRTR_EL2_TRCVICTLR 0x1000000000000ULL + +#define AARCH64_HDFGRTR_EL2_PMUSERENR_EL0 0x200000000000000ULL + +#define AARCH64_HDFGRTR_EL2_PMCEIDN_EL0 0x400000000000000ULL + +static inline uint64_t _AArch64_Read_hdfgrtr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HDFGRTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hdfgrtr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HDFGRTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HDFGWTR_EL2, Hypervisor Debug Fine-Grained Write Trap Register */ + +#define AARCH64_HDFGWTR_EL2_DBGBCRN_EL1 0x1U + +#define AARCH64_HDFGWTR_EL2_DBGBVRN_EL1 0x2U + +#define AARCH64_HDFGWTR_EL2_DBGWCRN_EL1 0x4U + +#define AARCH64_HDFGWTR_EL2_DBGWVRN_EL1 0x8U + +#define AARCH64_HDFGWTR_EL2_MDSCR_EL1 0x10U + +#define AARCH64_HDFGWTR_EL2_DBGCLAIM 0x20U + +#define AARCH64_HDFGWTR_EL2_DBGPRCR_EL1 0x80U + +#define AARCH64_HDFGWTR_EL2_OSLAR_EL1 0x100U + +#define AARCH64_HDFGWTR_EL2_OSECCR_EL1 0x400U + +#define AARCH64_HDFGWTR_EL2_OSDLR_EL1 0x800U + +#define AARCH64_HDFGWTR_EL2_PMEVCNTRN_EL0 0x1000U + +#define AARCH64_HDFGWTR_EL2_PMEVTYPERN_EL0 0x2000U + +#define AARCH64_HDFGWTR_EL2_PMCCFILTR_EL0 0x4000U + +#define AARCH64_HDFGWTR_EL2_PMCCNTR_EL0 0x8000U + +#define AARCH64_HDFGWTR_EL2_PMCNTEN 0x10000U + +#define AARCH64_HDFGWTR_EL2_PMINTEN 0x20000U + +#define AARCH64_HDFGWTR_EL2_PMOVS 0x40000U + +#define AARCH64_HDFGWTR_EL2_PMSELR_EL0 0x80000U + +#define AARCH64_HDFGWTR_EL2_PMSWINC_EL0 0x100000U + +#define AARCH64_HDFGWTR_EL2_PMCR_EL0 0x200000U + +#define AARCH64_HDFGWTR_EL2_PMBLIMITR_EL1 0x800000U + +#define AARCH64_HDFGWTR_EL2_PMBPTR_EL1 0x1000000U + +#define AARCH64_HDFGWTR_EL2_PMBSR_EL1 0x2000000U + +#define AARCH64_HDFGWTR_EL2_PMSCR_EL1 0x4000000U + +#define AARCH64_HDFGWTR_EL2_PMSEVFR_EL1 0x8000000U + +#define AARCH64_HDFGWTR_EL2_PMSFCR_EL1 0x10000000U + +#define AARCH64_HDFGWTR_EL2_PMSICR_EL1 0x20000000U + +#define AARCH64_HDFGWTR_EL2_PMSIRR_EL1 0x80000000U + +#define AARCH64_HDFGWTR_EL2_PMSLATFR_EL1 0x100000000ULL + +#define AARCH64_HDFGWTR_EL2_TRC 0x200000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCAUXCTLR 0x800000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCCLAIM 0x1000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCCNTVRN 0x2000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCIMSPECN 0x20000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCOSLAR 0x40000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCPRGCTLR 0x100000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCSEQSTR 0x200000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCSSCSRN 0x400000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRCVICTLR 0x1000000000000ULL + +#define AARCH64_HDFGWTR_EL2_TRFCR_EL1 0x2000000000000ULL + +#define AARCH64_HDFGWTR_EL2_PMUSERENR_EL0 0x200000000000000ULL + +static inline uint64_t _AArch64_Read_hdfgwtr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HDFGWTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hdfgwtr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HDFGWTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HFGITR_EL2, Hypervisor Fine-Grained Instruction Trap Register */ + +#define AARCH64_HFGITR_EL2_ICIALLUIS 0x1U + +#define AARCH64_HFGITR_EL2_ICIALLU 0x2U + +#define AARCH64_HFGITR_EL2_ICIVAU 0x4U + +#define AARCH64_HFGITR_EL2_DCIVAC 0x8U + +#define AARCH64_HFGITR_EL2_DCISW 0x10U + +#define AARCH64_HFGITR_EL2_DCCSW 0x20U + +#define AARCH64_HFGITR_EL2_DCCISW 0x40U + +#define AARCH64_HFGITR_EL2_DCCVAU 0x80U + +#define AARCH64_HFGITR_EL2_DCCVAP 0x100U + +#define AARCH64_HFGITR_EL2_DCCVADP 0x200U + +#define AARCH64_HFGITR_EL2_DCCIVAC 0x400U + +#define AARCH64_HFGITR_EL2_DCZVA 0x800U + +#define AARCH64_HFGITR_EL2_ATS1E1R 0x1000U + +#define AARCH64_HFGITR_EL2_ATS1E1W 0x2000U + +#define AARCH64_HFGITR_EL2_ATS1E0R 0x4000U + +#define AARCH64_HFGITR_EL2_ATS1E0W 0x8000U + +#define AARCH64_HFGITR_EL2_ATS1E1RP 0x10000U + +#define AARCH64_HFGITR_EL2_ATS1E1WP 0x20000U + +#define AARCH64_HFGITR_EL2_TLBIVMALLE1OS 0x40000U + +#define AARCH64_HFGITR_EL2_TLBIVAE1OS 0x80000U + +#define AARCH64_HFGITR_EL2_TLBIASIDE1OS 0x100000U + +#define AARCH64_HFGITR_EL2_TLBIVAAE1OS 0x200000U + +#define AARCH64_HFGITR_EL2_TLBIVALE1OS 0x400000U + +#define AARCH64_HFGITR_EL2_TLBIVAALE1OS 0x800000U + +#define AARCH64_HFGITR_EL2_TLBIRVAE1OS 0x1000000U + +#define AARCH64_HFGITR_EL2_TLBIRVAAE1OS 0x2000000U + +#define AARCH64_HFGITR_EL2_TLBIRVALE1OS 0x4000000U + +#define AARCH64_HFGITR_EL2_TLBIRVAALE1OS 0x8000000U + +#define AARCH64_HFGITR_EL2_TLBIVMALLE1IS 0x10000000U + +#define AARCH64_HFGITR_EL2_TLBIVAE1IS 0x20000000U + +#define AARCH64_HFGITR_EL2_TLBIASIDE1IS 0x40000000U + +#define AARCH64_HFGITR_EL2_TLBIVAAE1IS 0x80000000U + +#define AARCH64_HFGITR_EL2_TLBIVALE1IS 0x100000000ULL + +#define AARCH64_HFGITR_EL2_TLBIVAALE1IS 0x200000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVAE1IS 0x400000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVAAE1IS 0x800000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVALE1IS 0x1000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVAALE1IS 0x2000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVAE1 0x4000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVAAE1 0x8000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVALE1 0x10000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIRVAALE1 0x20000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIVMALLE1 0x40000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIVAE1 0x80000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIASIDE1 0x100000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIVAAE1 0x200000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIVALE1 0x400000000000ULL + +#define AARCH64_HFGITR_EL2_TLBIVAALE1 0x800000000000ULL + +#define AARCH64_HFGITR_EL2_CFPRCTX 0x1000000000000ULL + +#define AARCH64_HFGITR_EL2_DVPRCTX 0x2000000000000ULL + +#define AARCH64_HFGITR_EL2_CPPRCTX 0x4000000000000ULL + +#define AARCH64_HFGITR_EL2_ERET 0x8000000000000ULL + +#define AARCH64_HFGITR_EL2_SVC_EL0 0x10000000000000ULL + +#define AARCH64_HFGITR_EL2_SVC_EL1 0x20000000000000ULL + +#define AARCH64_HFGITR_EL2_DCCVAC 0x40000000000000ULL + +static inline uint64_t _AArch64_Read_hfgitr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HFGITR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hfgitr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HFGITR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HFGRTR_EL2, Hypervisor Fine-Grained Read Trap Register */ + +#define AARCH64_HFGRTR_EL2_AFSR0_EL1 0x1U + +#define AARCH64_HFGRTR_EL2_AFSR1_EL1 0x2U + +#define AARCH64_HFGRTR_EL2_AIDR_EL1 0x4U + +#define AARCH64_HFGRTR_EL2_AMAIR_EL1 0x8U + +#define AARCH64_HFGRTR_EL2_APDAKEY 0x10U + +#define AARCH64_HFGRTR_EL2_APDBKEY 0x20U + +#define AARCH64_HFGRTR_EL2_APGAKEY 0x40U + +#define AARCH64_HFGRTR_EL2_APIAKEY 0x80U + +#define AARCH64_HFGRTR_EL2_APIBKEY 0x100U + +#define AARCH64_HFGRTR_EL2_CCSIDR_EL1 0x200U + +#define AARCH64_HFGRTR_EL2_CLIDR_EL1 0x400U + +#define AARCH64_HFGRTR_EL2_CONTEXTIDR_EL1 0x800U + +#define AARCH64_HFGRTR_EL2_CPACR_EL1 0x1000U + +#define AARCH64_HFGRTR_EL2_CSSELR_EL1 0x2000U + +#define AARCH64_HFGRTR_EL2_CTR_EL0 0x4000U + +#define AARCH64_HFGRTR_EL2_DCZID_EL0 0x8000U + +#define AARCH64_HFGRTR_EL2_ESR_EL1 0x10000U + +#define AARCH64_HFGRTR_EL2_FAR_EL1 0x20000U + +#define AARCH64_HFGRTR_EL2_ISR_EL1 0x40000U + +#define AARCH64_HFGRTR_EL2_LORC_EL1 0x80000U + +#define AARCH64_HFGRTR_EL2_LOREA_EL1 0x100000U + +#define AARCH64_HFGRTR_EL2_LORID_EL1 0x200000U + +#define AARCH64_HFGRTR_EL2_LORN_EL1 0x400000U + +#define AARCH64_HFGRTR_EL2_LORSA_EL1 0x800000U + +#define AARCH64_HFGRTR_EL2_MAIR_EL1 0x1000000U + +#define AARCH64_HFGRTR_EL2_MIDR_EL1 0x2000000U + +#define AARCH64_HFGRTR_EL2_MPIDR_EL1 0x4000000U + +#define AARCH64_HFGRTR_EL2_PAR_EL1 0x8000000U + +#define AARCH64_HFGRTR_EL2_REVIDR_EL1 0x10000000U + +#define AARCH64_HFGRTR_EL2_SCTLR_EL1 0x20000000U + +#define AARCH64_HFGRTR_EL2_SCXTNUM_EL1 0x40000000U + +#define AARCH64_HFGRTR_EL2_SCXTNUM_EL0 0x80000000U + +#define AARCH64_HFGRTR_EL2_TCR_EL1 0x100000000ULL + +#define AARCH64_HFGRTR_EL2_TPIDR_EL1 0x200000000ULL + +#define AARCH64_HFGRTR_EL2_TPIDRRO_EL0 0x400000000ULL + +#define AARCH64_HFGRTR_EL2_TPIDR_EL0 0x800000000ULL + +#define AARCH64_HFGRTR_EL2_TTBR0_EL1 0x1000000000ULL + +#define AARCH64_HFGRTR_EL2_TTBR1_EL1 0x2000000000ULL + +#define AARCH64_HFGRTR_EL2_VBAR_EL1 0x4000000000ULL + +#define AARCH64_HFGRTR_EL2_ICC_IGRPENN_EL1 0x8000000000ULL + +#define AARCH64_HFGRTR_EL2_ERRIDR_EL1 0x10000000000ULL + +#define AARCH64_HFGRTR_EL2_ERRSELR_EL1 0x20000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXFR_EL1 0x40000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXCTLR_EL1 0x80000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXSTATUS_EL1 0x100000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXMISCN_EL1 0x200000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXPFGF_EL1 0x400000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXPFGCTL_EL1 0x800000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXPFGCDN_EL1 0x1000000000000ULL + +#define AARCH64_HFGRTR_EL2_ERXADDR_EL1 0x2000000000000ULL + +static inline uint64_t _AArch64_Read_hfgrtr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HFGRTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hfgrtr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HFGRTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HFGWTR_EL2, Hypervisor Fine-Grained Write Trap Register */ + +#define AARCH64_HFGWTR_EL2_AFSR0_EL1 0x1U + +#define AARCH64_HFGWTR_EL2_AFSR1_EL1 0x2U + +#define AARCH64_HFGWTR_EL2_AMAIR_EL1 0x8U + +#define AARCH64_HFGWTR_EL2_APDAKEY 0x10U + +#define AARCH64_HFGWTR_EL2_APDBKEY 0x20U + +#define AARCH64_HFGWTR_EL2_APGAKEY 0x40U + +#define AARCH64_HFGWTR_EL2_APIAKEY 0x80U + +#define AARCH64_HFGWTR_EL2_APIBKEY 0x100U + +#define AARCH64_HFGWTR_EL2_CONTEXTIDR_EL1 0x800U + +#define AARCH64_HFGWTR_EL2_CPACR_EL1 0x1000U + +#define AARCH64_HFGWTR_EL2_CSSELR_EL1 0x2000U + +#define AARCH64_HFGWTR_EL2_ESR_EL1 0x10000U + +#define AARCH64_HFGWTR_EL2_FAR_EL1 0x20000U + +#define AARCH64_HFGWTR_EL2_LORC_EL1 0x80000U + +#define AARCH64_HFGWTR_EL2_LOREA_EL1 0x100000U + +#define AARCH64_HFGWTR_EL2_LORN_EL1 0x400000U + +#define AARCH64_HFGWTR_EL2_LORSA_EL1 0x800000U + +#define AARCH64_HFGWTR_EL2_MAIR_EL1 0x1000000U + +#define AARCH64_HFGWTR_EL2_PAR_EL1 0x8000000U + +#define AARCH64_HFGWTR_EL2_SCTLR_EL1 0x20000000U + +#define AARCH64_HFGWTR_EL2_SCXTNUM_EL1 0x40000000U + +#define AARCH64_HFGWTR_EL2_SCXTNUM_EL0 0x80000000U + +#define AARCH64_HFGWTR_EL2_TCR_EL1 0x100000000ULL + +#define AARCH64_HFGWTR_EL2_TPIDR_EL1 0x200000000ULL + +#define AARCH64_HFGWTR_EL2_TPIDRRO_EL0 0x400000000ULL + +#define AARCH64_HFGWTR_EL2_TPIDR_EL0 0x800000000ULL + +#define AARCH64_HFGWTR_EL2_TTBR0_EL1 0x1000000000ULL + +#define AARCH64_HFGWTR_EL2_TTBR1_EL1 0x2000000000ULL + +#define AARCH64_HFGWTR_EL2_VBAR_EL1 0x4000000000ULL + +#define AARCH64_HFGWTR_EL2_ICC_IGRPENN_EL1 0x8000000000ULL + +#define AARCH64_HFGWTR_EL2_ERRSELR_EL1 0x20000000000ULL + +#define AARCH64_HFGWTR_EL2_ERXCTLR_EL1 0x80000000000ULL + +#define AARCH64_HFGWTR_EL2_ERXSTATUS_EL1 0x100000000000ULL + +#define AARCH64_HFGWTR_EL2_ERXMISCN_EL1 0x200000000000ULL + +#define AARCH64_HFGWTR_EL2_ERXPFGCTL_EL1 0x800000000000ULL + +#define AARCH64_HFGWTR_EL2_ERXPFGCDN_EL1 0x1000000000000ULL + +#define AARCH64_HFGWTR_EL2_ERXADDR_EL1 0x2000000000000ULL + +static inline uint64_t _AArch64_Read_hfgwtr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HFGWTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hfgwtr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HFGWTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HPFAR_EL2, Hypervisor IPA Fault Address Register */ + +#define AARCH64_HPFAR_EL2_FIPA_47_12( _val ) ( ( _val ) << 4 ) +#define AARCH64_HPFAR_EL2_FIPA_47_12_SHIFT 4 +#define AARCH64_HPFAR_EL2_FIPA_47_12_MASK 0xfffffffff0ULL +#define AARCH64_HPFAR_EL2_FIPA_47_12_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfffffffffULL ) + +#define AARCH64_HPFAR_EL2_FIPA_51_48( _val ) ( ( _val ) << 40 ) +#define AARCH64_HPFAR_EL2_FIPA_51_48_SHIFT 40 +#define AARCH64_HPFAR_EL2_FIPA_51_48_MASK 0xf0000000000ULL +#define AARCH64_HPFAR_EL2_FIPA_51_48_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_HPFAR_EL2_NS 0x8000000000000000ULL + +static inline uint64_t _AArch64_Read_hpfar_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HPFAR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hpfar_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HPFAR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* HSTR_EL2, Hypervisor System Trap Register */ + +static inline uint64_t _AArch64_Read_hstr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, HSTR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_hstr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr HSTR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* ID_AA64AFR0_EL1, AArch64 Auxiliary Feature Register 0 */ + +static inline uint64_t _AArch64_Read_id_aa64afr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64AFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64AFR1_EL1, AArch64 Auxiliary Feature Register 1 */ + +static inline uint64_t _AArch64_Read_id_aa64afr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64AFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64DFR0_EL1, AArch64 Debug Feature Register 0 */ + +#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER_SHIFT 0 +#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER_MASK 0xfU +#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64DFR0_EL1_TRACEVER( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64DFR0_EL1_TRACEVER_SHIFT 4 +#define AARCH64_ID_AA64DFR0_EL1_TRACEVER_MASK 0xf0U +#define AARCH64_ID_AA64DFR0_EL1_TRACEVER_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64DFR0_EL1_PMUVER( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64DFR0_EL1_PMUVER_SHIFT 8 +#define AARCH64_ID_AA64DFR0_EL1_PMUVER_MASK 0xf00U +#define AARCH64_ID_AA64DFR0_EL1_PMUVER_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64DFR0_EL1_BRPS( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64DFR0_EL1_BRPS_SHIFT 12 +#define AARCH64_ID_AA64DFR0_EL1_BRPS_MASK 0xf000U +#define AARCH64_ID_AA64DFR0_EL1_BRPS_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64DFR0_EL1_WRPS( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64DFR0_EL1_WRPS_SHIFT 20 +#define AARCH64_ID_AA64DFR0_EL1_WRPS_MASK 0xf00000U +#define AARCH64_ID_AA64DFR0_EL1_WRPS_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS_SHIFT 28 +#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS_MASK 0xf0000000U +#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64DFR0_EL1_PMSVER( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64DFR0_EL1_PMSVER_SHIFT 32 +#define AARCH64_ID_AA64DFR0_EL1_PMSVER_MASK 0xf00000000ULL +#define AARCH64_ID_AA64DFR0_EL1_PMSVER_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK_SHIFT 36 +#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK_MASK 0xf000000000ULL +#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT( _val ) ( ( _val ) << 40 ) +#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT_SHIFT 40 +#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT_MASK 0xf0000000000ULL +#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_ID_AA64DFR0_EL1_MTPMU( _val ) ( ( _val ) << 48 ) +#define AARCH64_ID_AA64DFR0_EL1_MTPMU_SHIFT 48 +#define AARCH64_ID_AA64DFR0_EL1_MTPMU_MASK 0xf000000000000ULL +#define AARCH64_ID_AA64DFR0_EL1_MTPMU_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64dfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64DFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64DFR1_EL1, AArch64 Debug Feature Register 1 */ + +static inline uint64_t _AArch64_Read_id_aa64dfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64DFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64ISAR0_EL1, AArch64 Instruction Set Attribute Register 0 */ + +#define AARCH64_ID_AA64ISAR0_EL1_AES( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64ISAR0_EL1_AES_SHIFT 4 +#define AARCH64_ID_AA64ISAR0_EL1_AES_MASK 0xf0U +#define AARCH64_ID_AA64ISAR0_EL1_AES_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR0_EL1_SHA1( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64ISAR0_EL1_SHA1_SHIFT 8 +#define AARCH64_ID_AA64ISAR0_EL1_SHA1_MASK 0xf00U +#define AARCH64_ID_AA64ISAR0_EL1_SHA1_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR0_EL1_SHA2( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64ISAR0_EL1_SHA2_SHIFT 12 +#define AARCH64_ID_AA64ISAR0_EL1_SHA2_MASK 0xf000U +#define AARCH64_ID_AA64ISAR0_EL1_SHA2_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR0_EL1_CRC32( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64ISAR0_EL1_CRC32_SHIFT 16 +#define AARCH64_ID_AA64ISAR0_EL1_CRC32_MASK 0xf0000U +#define AARCH64_ID_AA64ISAR0_EL1_CRC32_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC_SHIFT 20 +#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC_MASK 0xf00000U +#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR0_EL1_RDM( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64ISAR0_EL1_RDM_SHIFT 28 +#define AARCH64_ID_AA64ISAR0_EL1_RDM_MASK 0xf0000000U +#define AARCH64_ID_AA64ISAR0_EL1_RDM_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR0_EL1_SHA3( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64ISAR0_EL1_SHA3_SHIFT 32 +#define AARCH64_ID_AA64ISAR0_EL1_SHA3_MASK 0xf00000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_SHA3_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_SM3( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64ISAR0_EL1_SM3_SHIFT 36 +#define AARCH64_ID_AA64ISAR0_EL1_SM3_MASK 0xf000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_SM3_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_SM4( _val ) ( ( _val ) << 40 ) +#define AARCH64_ID_AA64ISAR0_EL1_SM4_SHIFT 40 +#define AARCH64_ID_AA64ISAR0_EL1_SM4_MASK 0xf0000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_SM4_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_DP( _val ) ( ( _val ) << 44 ) +#define AARCH64_ID_AA64ISAR0_EL1_DP_SHIFT 44 +#define AARCH64_ID_AA64ISAR0_EL1_DP_MASK 0xf00000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_DP_GET( _reg ) \ + ( ( ( _reg ) >> 44 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_FHM( _val ) ( ( _val ) << 48 ) +#define AARCH64_ID_AA64ISAR0_EL1_FHM_SHIFT 48 +#define AARCH64_ID_AA64ISAR0_EL1_FHM_MASK 0xf000000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_FHM_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_TS( _val ) ( ( _val ) << 52 ) +#define AARCH64_ID_AA64ISAR0_EL1_TS_SHIFT 52 +#define AARCH64_ID_AA64ISAR0_EL1_TS_MASK 0xf0000000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_TS_GET( _reg ) \ + ( ( ( _reg ) >> 52 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_TLB( _val ) ( ( _val ) << 56 ) +#define AARCH64_ID_AA64ISAR0_EL1_TLB_SHIFT 56 +#define AARCH64_ID_AA64ISAR0_EL1_TLB_MASK 0xf00000000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_TLB_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR0_EL1_RNDR( _val ) ( ( _val ) << 60 ) +#define AARCH64_ID_AA64ISAR0_EL1_RNDR_SHIFT 60 +#define AARCH64_ID_AA64ISAR0_EL1_RNDR_MASK 0xf000000000000000ULL +#define AARCH64_ID_AA64ISAR0_EL1_RNDR_GET( _reg ) \ + ( ( ( _reg ) >> 60 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64isar0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64ISAR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64ISAR1_EL1, AArch64 Instruction Set Attribute Register 1 */ + +#define AARCH64_ID_AA64ISAR1_EL1_DPB( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64ISAR1_EL1_DPB_SHIFT 0 +#define AARCH64_ID_AA64ISAR1_EL1_DPB_MASK 0xfU +#define AARCH64_ID_AA64ISAR1_EL1_DPB_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_APA( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64ISAR1_EL1_APA_SHIFT 4 +#define AARCH64_ID_AA64ISAR1_EL1_APA_MASK 0xf0U +#define AARCH64_ID_AA64ISAR1_EL1_APA_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_API( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64ISAR1_EL1_API_SHIFT 8 +#define AARCH64_ID_AA64ISAR1_EL1_API_MASK 0xf00U +#define AARCH64_ID_AA64ISAR1_EL1_API_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_JSCVT( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64ISAR1_EL1_JSCVT_SHIFT 12 +#define AARCH64_ID_AA64ISAR1_EL1_JSCVT_MASK 0xf000U +#define AARCH64_ID_AA64ISAR1_EL1_JSCVT_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_FCMA( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64ISAR1_EL1_FCMA_SHIFT 16 +#define AARCH64_ID_AA64ISAR1_EL1_FCMA_MASK 0xf0000U +#define AARCH64_ID_AA64ISAR1_EL1_FCMA_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_LRCPC( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64ISAR1_EL1_LRCPC_SHIFT 20 +#define AARCH64_ID_AA64ISAR1_EL1_LRCPC_MASK 0xf00000U +#define AARCH64_ID_AA64ISAR1_EL1_LRCPC_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_GPA( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_AA64ISAR1_EL1_GPA_SHIFT 24 +#define AARCH64_ID_AA64ISAR1_EL1_GPA_MASK 0xf000000U +#define AARCH64_ID_AA64ISAR1_EL1_GPA_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_GPI( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64ISAR1_EL1_GPI_SHIFT 28 +#define AARCH64_ID_AA64ISAR1_EL1_GPI_MASK 0xf0000000U +#define AARCH64_ID_AA64ISAR1_EL1_GPI_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS_SHIFT 32 +#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS_MASK 0xf00000000ULL +#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR1_EL1_SB( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64ISAR1_EL1_SB_SHIFT 36 +#define AARCH64_ID_AA64ISAR1_EL1_SB_MASK 0xf000000000ULL +#define AARCH64_ID_AA64ISAR1_EL1_SB_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR1_EL1_SPECRES( _val ) ( ( _val ) << 40 ) +#define AARCH64_ID_AA64ISAR1_EL1_SPECRES_SHIFT 40 +#define AARCH64_ID_AA64ISAR1_EL1_SPECRES_MASK 0xf0000000000ULL +#define AARCH64_ID_AA64ISAR1_EL1_SPECRES_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR1_EL1_BF16( _val ) ( ( _val ) << 44 ) +#define AARCH64_ID_AA64ISAR1_EL1_BF16_SHIFT 44 +#define AARCH64_ID_AA64ISAR1_EL1_BF16_MASK 0xf00000000000ULL +#define AARCH64_ID_AA64ISAR1_EL1_BF16_GET( _reg ) \ + ( ( ( _reg ) >> 44 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR1_EL1_DGH( _val ) ( ( _val ) << 48 ) +#define AARCH64_ID_AA64ISAR1_EL1_DGH_SHIFT 48 +#define AARCH64_ID_AA64ISAR1_EL1_DGH_MASK 0xf000000000000ULL +#define AARCH64_ID_AA64ISAR1_EL1_DGH_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +#define AARCH64_ID_AA64ISAR1_EL1_I8MM( _val ) ( ( _val ) << 52 ) +#define AARCH64_ID_AA64ISAR1_EL1_I8MM_SHIFT 52 +#define AARCH64_ID_AA64ISAR1_EL1_I8MM_MASK 0xf0000000000000ULL +#define AARCH64_ID_AA64ISAR1_EL1_I8MM_GET( _reg ) \ + ( ( ( _reg ) >> 52 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64isar1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64ISAR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64MMFR0_EL1, AArch64 Memory Model Feature Register 0 */ + +#define AARCH64_ID_AA64MMFR0_EL1_PARANGE( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 +#define AARCH64_ID_AA64MMFR0_EL1_PARANGE_MASK 0xfU +#define AARCH64_ID_AA64MMFR0_EL1_PARANGE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS_SHIFT 4 +#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS_MASK 0xf0U +#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_BIGEND( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64MMFR0_EL1_BIGEND_SHIFT 8 +#define AARCH64_ID_AA64MMFR0_EL1_BIGEND_MASK 0xf00U +#define AARCH64_ID_AA64MMFR0_EL1_BIGEND_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 +#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM_MASK 0xf000U +#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 +#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0_MASK 0xf0000U +#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_SHIFT 20 +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_MASK 0xf00000U +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_SHIFT 24 +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_MASK 0xf000000U +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_SHIFT 28 +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_MASK 0xf0000000U +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT 32 +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2_MASK 0xf00000000ULL +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT 36 +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2_MASK 0xf000000000ULL +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2( _val ) ( ( _val ) << 40 ) +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT 40 +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2_MASK 0xf0000000000ULL +#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR0_EL1_EXS( _val ) ( ( _val ) << 44 ) +#define AARCH64_ID_AA64MMFR0_EL1_EXS_SHIFT 44 +#define AARCH64_ID_AA64MMFR0_EL1_EXS_MASK 0xf00000000000ULL +#define AARCH64_ID_AA64MMFR0_EL1_EXS_GET( _reg ) \ + ( ( ( _reg ) >> 44 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR0_EL1_FGT( _val ) ( ( _val ) << 56 ) +#define AARCH64_ID_AA64MMFR0_EL1_FGT_SHIFT 56 +#define AARCH64_ID_AA64MMFR0_EL1_FGT_MASK 0xf00000000000000ULL +#define AARCH64_ID_AA64MMFR0_EL1_FGT_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR0_EL1_ECV( _val ) ( ( _val ) << 60 ) +#define AARCH64_ID_AA64MMFR0_EL1_ECV_SHIFT 60 +#define AARCH64_ID_AA64MMFR0_EL1_ECV_MASK 0xf000000000000000ULL +#define AARCH64_ID_AA64MMFR0_EL1_ECV_GET( _reg ) \ + ( ( ( _reg ) >> 60 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64mmfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64MMFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64MMFR1_EL1, AArch64 Memory Model Feature Register 1 */ + +#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS_SHIFT 0 +#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS_MASK 0xfU +#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS_SHIFT 4 +#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS_MASK 0xf0U +#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_VH( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64MMFR1_EL1_VH_SHIFT 8 +#define AARCH64_ID_AA64MMFR1_EL1_VH_MASK 0xf00U +#define AARCH64_ID_AA64MMFR1_EL1_VH_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_HPDS( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64MMFR1_EL1_HPDS_SHIFT 12 +#define AARCH64_ID_AA64MMFR1_EL1_HPDS_MASK 0xf000U +#define AARCH64_ID_AA64MMFR1_EL1_HPDS_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_LO( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64MMFR1_EL1_LO_SHIFT 16 +#define AARCH64_ID_AA64MMFR1_EL1_LO_MASK 0xf0000U +#define AARCH64_ID_AA64MMFR1_EL1_LO_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_PAN( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64MMFR1_EL1_PAN_SHIFT 20 +#define AARCH64_ID_AA64MMFR1_EL1_PAN_MASK 0xf00000U +#define AARCH64_ID_AA64MMFR1_EL1_PAN_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI_SHIFT 24 +#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI_MASK 0xf000000U +#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_XNX( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64MMFR1_EL1_XNX_SHIFT 28 +#define AARCH64_ID_AA64MMFR1_EL1_XNX_MASK 0xf0000000U +#define AARCH64_ID_AA64MMFR1_EL1_XNX_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR1_EL1_TWED( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64MMFR1_EL1_TWED_SHIFT 32 +#define AARCH64_ID_AA64MMFR1_EL1_TWED_MASK 0xf00000000ULL +#define AARCH64_ID_AA64MMFR1_EL1_TWED_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR1_EL1_ETS( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64MMFR1_EL1_ETS_SHIFT 36 +#define AARCH64_ID_AA64MMFR1_EL1_ETS_MASK 0xf000000000ULL +#define AARCH64_ID_AA64MMFR1_EL1_ETS_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64mmfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64MMFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64MMFR2_EL1, AArch64 Memory Model Feature Register 2 */ + +#define AARCH64_ID_AA64MMFR2_EL1_CNP( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64MMFR2_EL1_CNP_SHIFT 0 +#define AARCH64_ID_AA64MMFR2_EL1_CNP_MASK 0xfU +#define AARCH64_ID_AA64MMFR2_EL1_CNP_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_UAO( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64MMFR2_EL1_UAO_SHIFT 4 +#define AARCH64_ID_AA64MMFR2_EL1_UAO_MASK 0xf0U +#define AARCH64_ID_AA64MMFR2_EL1_UAO_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_LSM( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64MMFR2_EL1_LSM_SHIFT 8 +#define AARCH64_ID_AA64MMFR2_EL1_LSM_MASK 0xf00U +#define AARCH64_ID_AA64MMFR2_EL1_LSM_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_IESB( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64MMFR2_EL1_IESB_SHIFT 12 +#define AARCH64_ID_AA64MMFR2_EL1_IESB_MASK 0xf000U +#define AARCH64_ID_AA64MMFR2_EL1_IESB_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_VARANGE( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64MMFR2_EL1_VARANGE_SHIFT 16 +#define AARCH64_ID_AA64MMFR2_EL1_VARANGE_MASK 0xf0000U +#define AARCH64_ID_AA64MMFR2_EL1_VARANGE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_CCIDX( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64MMFR2_EL1_CCIDX_SHIFT 20 +#define AARCH64_ID_AA64MMFR2_EL1_CCIDX_MASK 0xf00000U +#define AARCH64_ID_AA64MMFR2_EL1_CCIDX_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_NV( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_AA64MMFR2_EL1_NV_SHIFT 24 +#define AARCH64_ID_AA64MMFR2_EL1_NV_MASK 0xf000000U +#define AARCH64_ID_AA64MMFR2_EL1_NV_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_ST( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64MMFR2_EL1_ST_SHIFT 28 +#define AARCH64_ID_AA64MMFR2_EL1_ST_MASK 0xf0000000U +#define AARCH64_ID_AA64MMFR2_EL1_ST_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64MMFR2_EL1_AT( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64MMFR2_EL1_AT_SHIFT 32 +#define AARCH64_ID_AA64MMFR2_EL1_AT_MASK 0xf00000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_AT_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR2_EL1_IDS( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64MMFR2_EL1_IDS_SHIFT 36 +#define AARCH64_ID_AA64MMFR2_EL1_IDS_MASK 0xf000000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_IDS_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR2_EL1_FWB( _val ) ( ( _val ) << 40 ) +#define AARCH64_ID_AA64MMFR2_EL1_FWB_SHIFT 40 +#define AARCH64_ID_AA64MMFR2_EL1_FWB_MASK 0xf0000000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_FWB_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR2_EL1_TTL( _val ) ( ( _val ) << 48 ) +#define AARCH64_ID_AA64MMFR2_EL1_TTL_SHIFT 48 +#define AARCH64_ID_AA64MMFR2_EL1_TTL_MASK 0xf000000000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_TTL_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR2_EL1_BBM( _val ) ( ( _val ) << 52 ) +#define AARCH64_ID_AA64MMFR2_EL1_BBM_SHIFT 52 +#define AARCH64_ID_AA64MMFR2_EL1_BBM_MASK 0xf0000000000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_BBM_GET( _reg ) \ + ( ( ( _reg ) >> 52 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR2_EL1_EVT( _val ) ( ( _val ) << 56 ) +#define AARCH64_ID_AA64MMFR2_EL1_EVT_SHIFT 56 +#define AARCH64_ID_AA64MMFR2_EL1_EVT_MASK 0xf00000000000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_EVT_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xfULL ) + +#define AARCH64_ID_AA64MMFR2_EL1_E0PD( _val ) ( ( _val ) << 60 ) +#define AARCH64_ID_AA64MMFR2_EL1_E0PD_SHIFT 60 +#define AARCH64_ID_AA64MMFR2_EL1_E0PD_MASK 0xf000000000000000ULL +#define AARCH64_ID_AA64MMFR2_EL1_E0PD_GET( _reg ) \ + ( ( ( _reg ) >> 60 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64mmfr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64MMFR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64PFR0_EL1, AArch64 Processor Feature Register 0 */ + +#define AARCH64_ID_AA64PFR0_EL1_EL0( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64PFR0_EL1_EL0_SHIFT 0 +#define AARCH64_ID_AA64PFR0_EL1_EL0_MASK 0xfU +#define AARCH64_ID_AA64PFR0_EL1_EL0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_EL1( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64PFR0_EL1_EL1_SHIFT 4 +#define AARCH64_ID_AA64PFR0_EL1_EL1_MASK 0xf0U +#define AARCH64_ID_AA64PFR0_EL1_EL1_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_EL2( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64PFR0_EL1_EL2_SHIFT 8 +#define AARCH64_ID_AA64PFR0_EL1_EL2_MASK 0xf00U +#define AARCH64_ID_AA64PFR0_EL1_EL2_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_EL3( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64PFR0_EL1_EL3_SHIFT 12 +#define AARCH64_ID_AA64PFR0_EL1_EL3_MASK 0xf000U +#define AARCH64_ID_AA64PFR0_EL1_EL3_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_FP( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64PFR0_EL1_FP_SHIFT 16 +#define AARCH64_ID_AA64PFR0_EL1_FP_MASK 0xf0000U +#define AARCH64_ID_AA64PFR0_EL1_FP_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD_SHIFT 20 +#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD_MASK 0xf00000U +#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_GIC( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_AA64PFR0_EL1_GIC_SHIFT 24 +#define AARCH64_ID_AA64PFR0_EL1_GIC_MASK 0xf000000U +#define AARCH64_ID_AA64PFR0_EL1_GIC_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_RAS( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_AA64PFR0_EL1_RAS_SHIFT 28 +#define AARCH64_ID_AA64PFR0_EL1_RAS_MASK 0xf0000000U +#define AARCH64_ID_AA64PFR0_EL1_RAS_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR0_EL1_SVE( _val ) ( ( _val ) << 32 ) +#define AARCH64_ID_AA64PFR0_EL1_SVE_SHIFT 32 +#define AARCH64_ID_AA64PFR0_EL1_SVE_MASK 0xf00000000ULL +#define AARCH64_ID_AA64PFR0_EL1_SVE_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xfULL ) + +#define AARCH64_ID_AA64PFR0_EL1_SEL2( _val ) ( ( _val ) << 36 ) +#define AARCH64_ID_AA64PFR0_EL1_SEL2_SHIFT 36 +#define AARCH64_ID_AA64PFR0_EL1_SEL2_MASK 0xf000000000ULL +#define AARCH64_ID_AA64PFR0_EL1_SEL2_GET( _reg ) \ + ( ( ( _reg ) >> 36 ) & 0xfULL ) + +#define AARCH64_ID_AA64PFR0_EL1_MPAM( _val ) ( ( _val ) << 40 ) +#define AARCH64_ID_AA64PFR0_EL1_MPAM_SHIFT 40 +#define AARCH64_ID_AA64PFR0_EL1_MPAM_MASK 0xf0000000000ULL +#define AARCH64_ID_AA64PFR0_EL1_MPAM_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xfULL ) + +#define AARCH64_ID_AA64PFR0_EL1_AMU( _val ) ( ( _val ) << 44 ) +#define AARCH64_ID_AA64PFR0_EL1_AMU_SHIFT 44 +#define AARCH64_ID_AA64PFR0_EL1_AMU_MASK 0xf00000000000ULL +#define AARCH64_ID_AA64PFR0_EL1_AMU_GET( _reg ) \ + ( ( ( _reg ) >> 44 ) & 0xfULL ) + +#define AARCH64_ID_AA64PFR0_EL1_DIT( _val ) ( ( _val ) << 48 ) +#define AARCH64_ID_AA64PFR0_EL1_DIT_SHIFT 48 +#define AARCH64_ID_AA64PFR0_EL1_DIT_MASK 0xf000000000000ULL +#define AARCH64_ID_AA64PFR0_EL1_DIT_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +#define AARCH64_ID_AA64PFR0_EL1_CSV2( _val ) ( ( _val ) << 56 ) +#define AARCH64_ID_AA64PFR0_EL1_CSV2_SHIFT 56 +#define AARCH64_ID_AA64PFR0_EL1_CSV2_MASK 0xf00000000000000ULL +#define AARCH64_ID_AA64PFR0_EL1_CSV2_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xfULL ) + +#define AARCH64_ID_AA64PFR0_EL1_CSV3( _val ) ( ( _val ) << 60 ) +#define AARCH64_ID_AA64PFR0_EL1_CSV3_SHIFT 60 +#define AARCH64_ID_AA64PFR0_EL1_CSV3_MASK 0xf000000000000000ULL +#define AARCH64_ID_AA64PFR0_EL1_CSV3_GET( _reg ) \ + ( ( ( _reg ) >> 60 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_id_aa64pfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64PFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AA64PFR1_EL1, AArch64 Processor Feature Register 1 */ + +#define AARCH64_ID_AA64PFR1_EL1_BT( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_AA64PFR1_EL1_BT_SHIFT 0 +#define AARCH64_ID_AA64PFR1_EL1_BT_MASK 0xfU +#define AARCH64_ID_AA64PFR1_EL1_BT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR1_EL1_SSBS( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_AA64PFR1_EL1_SSBS_SHIFT 4 +#define AARCH64_ID_AA64PFR1_EL1_SSBS_MASK 0xf0U +#define AARCH64_ID_AA64PFR1_EL1_SSBS_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR1_EL1_MTE( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_AA64PFR1_EL1_MTE_SHIFT 8 +#define AARCH64_ID_AA64PFR1_EL1_MTE_MASK 0xf00U +#define AARCH64_ID_AA64PFR1_EL1_MTE_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC_SHIFT 12 +#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC_MASK 0xf000U +#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC_SHIFT 16 +#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC_MASK 0xf0000U +#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_aa64pfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AA64PFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_AFR0_EL1, AArch64 Auxiliary Feature Register 0 */ + +static inline uint64_t _AArch64_Read_id_afr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_AFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_DFR0_EL1, AArch64 Debug Feature Register 0 */ + +#define AARCH64_ID_DFR0_EL1_COPDBG( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_DFR0_EL1_COPDBG_SHIFT 0 +#define AARCH64_ID_DFR0_EL1_COPDBG_MASK 0xfU +#define AARCH64_ID_DFR0_EL1_COPDBG_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_COPSDBG( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_DFR0_EL1_COPSDBG_SHIFT 4 +#define AARCH64_ID_DFR0_EL1_COPSDBG_MASK 0xf0U +#define AARCH64_ID_DFR0_EL1_COPSDBG_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_MMAPDBG( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_DFR0_EL1_MMAPDBG_SHIFT 8 +#define AARCH64_ID_DFR0_EL1_MMAPDBG_MASK 0xf00U +#define AARCH64_ID_DFR0_EL1_MMAPDBG_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_COPTRC( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_DFR0_EL1_COPTRC_SHIFT 12 +#define AARCH64_ID_DFR0_EL1_COPTRC_MASK 0xf000U +#define AARCH64_ID_DFR0_EL1_COPTRC_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_MMAPTRC( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_DFR0_EL1_MMAPTRC_SHIFT 16 +#define AARCH64_ID_DFR0_EL1_MMAPTRC_MASK 0xf0000U +#define AARCH64_ID_DFR0_EL1_MMAPTRC_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_MPROFDBG( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_DFR0_EL1_MPROFDBG_SHIFT 20 +#define AARCH64_ID_DFR0_EL1_MPROFDBG_MASK 0xf00000U +#define AARCH64_ID_DFR0_EL1_MPROFDBG_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_PERFMON( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_DFR0_EL1_PERFMON_SHIFT 24 +#define AARCH64_ID_DFR0_EL1_PERFMON_MASK 0xf000000U +#define AARCH64_ID_DFR0_EL1_PERFMON_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_DFR0_EL1_TRACEFILT( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_DFR0_EL1_TRACEFILT_SHIFT 28 +#define AARCH64_ID_DFR0_EL1_TRACEFILT_MASK 0xf0000000U +#define AARCH64_ID_DFR0_EL1_TRACEFILT_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_dfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_DFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_DFR1_EL1, Debug Feature Register 1 */ + +#define AARCH64_ID_DFR1_EL1_MTPMU( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_DFR1_EL1_MTPMU_SHIFT 0 +#define AARCH64_ID_DFR1_EL1_MTPMU_MASK 0xfU +#define AARCH64_ID_DFR1_EL1_MTPMU_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_dfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_DFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR0_EL1, AArch64 Instruction Set Attribute Register 0 */ + +#define AARCH64_ID_ISAR0_EL1_SWAP( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR0_EL1_SWAP_SHIFT 0 +#define AARCH64_ID_ISAR0_EL1_SWAP_MASK 0xfU +#define AARCH64_ID_ISAR0_EL1_SWAP_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR0_EL1_BITCOUNT( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR0_EL1_BITCOUNT_SHIFT 4 +#define AARCH64_ID_ISAR0_EL1_BITCOUNT_MASK 0xf0U +#define AARCH64_ID_ISAR0_EL1_BITCOUNT_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR0_EL1_BITFIELD( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR0_EL1_BITFIELD_SHIFT 8 +#define AARCH64_ID_ISAR0_EL1_BITFIELD_MASK 0xf00U +#define AARCH64_ID_ISAR0_EL1_BITFIELD_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR0_EL1_CMPBRANCH( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR0_EL1_CMPBRANCH_SHIFT 12 +#define AARCH64_ID_ISAR0_EL1_CMPBRANCH_MASK 0xf000U +#define AARCH64_ID_ISAR0_EL1_CMPBRANCH_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR0_EL1_COPROC( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR0_EL1_COPROC_SHIFT 16 +#define AARCH64_ID_ISAR0_EL1_COPROC_MASK 0xf0000U +#define AARCH64_ID_ISAR0_EL1_COPROC_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR0_EL1_DEBUG( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_ISAR0_EL1_DEBUG_SHIFT 20 +#define AARCH64_ID_ISAR0_EL1_DEBUG_MASK 0xf00000U +#define AARCH64_ID_ISAR0_EL1_DEBUG_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_ISAR0_EL1_DIVIDE( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR0_EL1_DIVIDE_SHIFT 24 +#define AARCH64_ID_ISAR0_EL1_DIVIDE_MASK 0xf000000U +#define AARCH64_ID_ISAR0_EL1_DIVIDE_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR1_EL1, AArch64 Instruction Set Attribute Register 1 */ + +#define AARCH64_ID_ISAR1_EL1_ENDIAN( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR1_EL1_ENDIAN_SHIFT 0 +#define AARCH64_ID_ISAR1_EL1_ENDIAN_MASK 0xfU +#define AARCH64_ID_ISAR1_EL1_ENDIAN_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_EXCEPT( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR1_EL1_EXCEPT_SHIFT 4 +#define AARCH64_ID_ISAR1_EL1_EXCEPT_MASK 0xf0U +#define AARCH64_ID_ISAR1_EL1_EXCEPT_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR_SHIFT 8 +#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR_MASK 0xf00U +#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_EXTEND( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR1_EL1_EXTEND_SHIFT 12 +#define AARCH64_ID_ISAR1_EL1_EXTEND_MASK 0xf000U +#define AARCH64_ID_ISAR1_EL1_EXTEND_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_IFTHEN( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR1_EL1_IFTHEN_SHIFT 16 +#define AARCH64_ID_ISAR1_EL1_IFTHEN_MASK 0xf0000U +#define AARCH64_ID_ISAR1_EL1_IFTHEN_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_IMMEDIATE( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_ISAR1_EL1_IMMEDIATE_SHIFT 20 +#define AARCH64_ID_ISAR1_EL1_IMMEDIATE_MASK 0xf00000U +#define AARCH64_ID_ISAR1_EL1_IMMEDIATE_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_INTERWORK( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR1_EL1_INTERWORK_SHIFT 24 +#define AARCH64_ID_ISAR1_EL1_INTERWORK_MASK 0xf000000U +#define AARCH64_ID_ISAR1_EL1_INTERWORK_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_ISAR1_EL1_JAZELLE( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_ISAR1_EL1_JAZELLE_SHIFT 28 +#define AARCH64_ID_ISAR1_EL1_JAZELLE_MASK 0xf0000000U +#define AARCH64_ID_ISAR1_EL1_JAZELLE_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR2_EL1, AArch64 Instruction Set Attribute Register 2 */ + +#define AARCH64_ID_ISAR2_EL1_LOADSTORE( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR2_EL1_LOADSTORE_SHIFT 0 +#define AARCH64_ID_ISAR2_EL1_LOADSTORE_MASK 0xfU +#define AARCH64_ID_ISAR2_EL1_LOADSTORE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_MEMHINT( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR2_EL1_MEMHINT_SHIFT 4 +#define AARCH64_ID_ISAR2_EL1_MEMHINT_MASK 0xf0U +#define AARCH64_ID_ISAR2_EL1_MEMHINT_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT_SHIFT 8 +#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT_MASK 0xf00U +#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_MULT( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR2_EL1_MULT_SHIFT 12 +#define AARCH64_ID_ISAR2_EL1_MULT_MASK 0xf000U +#define AARCH64_ID_ISAR2_EL1_MULT_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_MULTS( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR2_EL1_MULTS_SHIFT 16 +#define AARCH64_ID_ISAR2_EL1_MULTS_MASK 0xf0000U +#define AARCH64_ID_ISAR2_EL1_MULTS_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_MULTU( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_ISAR2_EL1_MULTU_SHIFT 20 +#define AARCH64_ID_ISAR2_EL1_MULTU_MASK 0xf00000U +#define AARCH64_ID_ISAR2_EL1_MULTU_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_PSR_AR( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR2_EL1_PSR_AR_SHIFT 24 +#define AARCH64_ID_ISAR2_EL1_PSR_AR_MASK 0xf000000U +#define AARCH64_ID_ISAR2_EL1_PSR_AR_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_ISAR2_EL1_REVERSAL( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_ISAR2_EL1_REVERSAL_SHIFT 28 +#define AARCH64_ID_ISAR2_EL1_REVERSAL_MASK 0xf0000000U +#define AARCH64_ID_ISAR2_EL1_REVERSAL_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR3_EL1, AArch64 Instruction Set Attribute Register 3 */ + +#define AARCH64_ID_ISAR3_EL1_SATURATE( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR3_EL1_SATURATE_SHIFT 0 +#define AARCH64_ID_ISAR3_EL1_SATURATE_MASK 0xfU +#define AARCH64_ID_ISAR3_EL1_SATURATE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_SIMD( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR3_EL1_SIMD_SHIFT 4 +#define AARCH64_ID_ISAR3_EL1_SIMD_MASK 0xf0U +#define AARCH64_ID_ISAR3_EL1_SIMD_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_SVC( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR3_EL1_SVC_SHIFT 8 +#define AARCH64_ID_ISAR3_EL1_SVC_MASK 0xf00U +#define AARCH64_ID_ISAR3_EL1_SVC_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM_SHIFT 12 +#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM_MASK 0xf000U +#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_TABBRANCH( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR3_EL1_TABBRANCH_SHIFT 16 +#define AARCH64_ID_ISAR3_EL1_TABBRANCH_MASK 0xf0000U +#define AARCH64_ID_ISAR3_EL1_TABBRANCH_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_T32COPY( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_ISAR3_EL1_T32COPY_SHIFT 20 +#define AARCH64_ID_ISAR3_EL1_T32COPY_MASK 0xf00000U +#define AARCH64_ID_ISAR3_EL1_T32COPY_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_TRUENOP( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR3_EL1_TRUENOP_SHIFT 24 +#define AARCH64_ID_ISAR3_EL1_TRUENOP_MASK 0xf000000U +#define AARCH64_ID_ISAR3_EL1_TRUENOP_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_ISAR3_EL1_T32EE( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_ISAR3_EL1_T32EE_SHIFT 28 +#define AARCH64_ID_ISAR3_EL1_T32EE_MASK 0xf0000000U +#define AARCH64_ID_ISAR3_EL1_T32EE_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR4_EL1, AArch64 Instruction Set Attribute Register 4 */ + +#define AARCH64_ID_ISAR4_EL1_UNPRIV( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR4_EL1_UNPRIV_SHIFT 0 +#define AARCH64_ID_ISAR4_EL1_UNPRIV_MASK 0xfU +#define AARCH64_ID_ISAR4_EL1_UNPRIV_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS_SHIFT 4 +#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS_MASK 0xf0U +#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_WRITEBACK( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR4_EL1_WRITEBACK_SHIFT 8 +#define AARCH64_ID_ISAR4_EL1_WRITEBACK_MASK 0xf00U +#define AARCH64_ID_ISAR4_EL1_WRITEBACK_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_SMC( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR4_EL1_SMC_SHIFT 12 +#define AARCH64_ID_ISAR4_EL1_SMC_MASK 0xf000U +#define AARCH64_ID_ISAR4_EL1_SMC_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_BARRIER( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR4_EL1_BARRIER_SHIFT 16 +#define AARCH64_ID_ISAR4_EL1_BARRIER_MASK 0xf0000U +#define AARCH64_ID_ISAR4_EL1_BARRIER_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC_SHIFT 20 +#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC_MASK 0xf00000U +#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_PSR_M( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR4_EL1_PSR_M_SHIFT 24 +#define AARCH64_ID_ISAR4_EL1_PSR_M_MASK 0xf000000U +#define AARCH64_ID_ISAR4_EL1_PSR_M_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_ISAR4_EL1_SWP_FRAC( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_ISAR4_EL1_SWP_FRAC_SHIFT 28 +#define AARCH64_ID_ISAR4_EL1_SWP_FRAC_MASK 0xf0000000U +#define AARCH64_ID_ISAR4_EL1_SWP_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar4_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR4_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR5_EL1, AArch64 Instruction Set Attribute Register 5 */ + +#define AARCH64_ID_ISAR5_EL1_SEVL( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR5_EL1_SEVL_SHIFT 0 +#define AARCH64_ID_ISAR5_EL1_SEVL_MASK 0xfU +#define AARCH64_ID_ISAR5_EL1_SEVL_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR5_EL1_AES( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR5_EL1_AES_SHIFT 4 +#define AARCH64_ID_ISAR5_EL1_AES_MASK 0xf0U +#define AARCH64_ID_ISAR5_EL1_AES_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR5_EL1_SHA1( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR5_EL1_SHA1_SHIFT 8 +#define AARCH64_ID_ISAR5_EL1_SHA1_MASK 0xf00U +#define AARCH64_ID_ISAR5_EL1_SHA1_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR5_EL1_SHA2( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR5_EL1_SHA2_SHIFT 12 +#define AARCH64_ID_ISAR5_EL1_SHA2_MASK 0xf000U +#define AARCH64_ID_ISAR5_EL1_SHA2_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR5_EL1_CRC32( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR5_EL1_CRC32_SHIFT 16 +#define AARCH64_ID_ISAR5_EL1_CRC32_MASK 0xf0000U +#define AARCH64_ID_ISAR5_EL1_CRC32_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR5_EL1_RDM( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR5_EL1_RDM_SHIFT 24 +#define AARCH64_ID_ISAR5_EL1_RDM_MASK 0xf000000U +#define AARCH64_ID_ISAR5_EL1_RDM_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_ISAR5_EL1_VCMA( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_ISAR5_EL1_VCMA_SHIFT 28 +#define AARCH64_ID_ISAR5_EL1_VCMA_MASK 0xf0000000U +#define AARCH64_ID_ISAR5_EL1_VCMA_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar5_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR5_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_ISAR6_EL1, AArch64 Instruction Set Attribute Register 6 */ + +#define AARCH64_ID_ISAR6_EL1_JSCVT( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_ISAR6_EL1_JSCVT_SHIFT 0 +#define AARCH64_ID_ISAR6_EL1_JSCVT_MASK 0xfU +#define AARCH64_ID_ISAR6_EL1_JSCVT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_ISAR6_EL1_DP( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_ISAR6_EL1_DP_SHIFT 4 +#define AARCH64_ID_ISAR6_EL1_DP_MASK 0xf0U +#define AARCH64_ID_ISAR6_EL1_DP_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_ISAR6_EL1_FHM( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_ISAR6_EL1_FHM_SHIFT 8 +#define AARCH64_ID_ISAR6_EL1_FHM_MASK 0xf00U +#define AARCH64_ID_ISAR6_EL1_FHM_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_ISAR6_EL1_SB( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_ISAR6_EL1_SB_SHIFT 12 +#define AARCH64_ID_ISAR6_EL1_SB_MASK 0xf000U +#define AARCH64_ID_ISAR6_EL1_SB_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_ISAR6_EL1_SPECRES( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_ISAR6_EL1_SPECRES_SHIFT 16 +#define AARCH64_ID_ISAR6_EL1_SPECRES_MASK 0xf0000U +#define AARCH64_ID_ISAR6_EL1_SPECRES_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_ISAR6_EL1_BF16( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_ISAR6_EL1_BF16_SHIFT 20 +#define AARCH64_ID_ISAR6_EL1_BF16_MASK 0xf00000U +#define AARCH64_ID_ISAR6_EL1_BF16_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_ISAR6_EL1_I8MM( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_ISAR6_EL1_I8MM_SHIFT 24 +#define AARCH64_ID_ISAR6_EL1_I8MM_MASK 0xf000000U +#define AARCH64_ID_ISAR6_EL1_I8MM_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_isar6_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_ISAR6_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_MMFR0_EL1, AArch64 Memory Model Feature Register 0 */ + +#define AARCH64_ID_MMFR0_EL1_VMSA( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_MMFR0_EL1_VMSA_SHIFT 0 +#define AARCH64_ID_MMFR0_EL1_VMSA_MASK 0xfU +#define AARCH64_ID_MMFR0_EL1_VMSA_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_PMSA( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_MMFR0_EL1_PMSA_SHIFT 4 +#define AARCH64_ID_MMFR0_EL1_PMSA_MASK 0xf0U +#define AARCH64_ID_MMFR0_EL1_PMSA_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_OUTERSHR( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_MMFR0_EL1_OUTERSHR_SHIFT 8 +#define AARCH64_ID_MMFR0_EL1_OUTERSHR_MASK 0xf00U +#define AARCH64_ID_MMFR0_EL1_OUTERSHR_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_SHARELVL( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_MMFR0_EL1_SHARELVL_SHIFT 12 +#define AARCH64_ID_MMFR0_EL1_SHARELVL_MASK 0xf000U +#define AARCH64_ID_MMFR0_EL1_SHARELVL_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_TCM( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_MMFR0_EL1_TCM_SHIFT 16 +#define AARCH64_ID_MMFR0_EL1_TCM_MASK 0xf0000U +#define AARCH64_ID_MMFR0_EL1_TCM_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_AUXREG( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_MMFR0_EL1_AUXREG_SHIFT 20 +#define AARCH64_ID_MMFR0_EL1_AUXREG_MASK 0xf00000U +#define AARCH64_ID_MMFR0_EL1_AUXREG_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_FCSE( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_MMFR0_EL1_FCSE_SHIFT 24 +#define AARCH64_ID_MMFR0_EL1_FCSE_MASK 0xf000000U +#define AARCH64_ID_MMFR0_EL1_FCSE_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_MMFR0_EL1_INNERSHR( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_MMFR0_EL1_INNERSHR_SHIFT 28 +#define AARCH64_ID_MMFR0_EL1_INNERSHR_MASK 0xf0000000U +#define AARCH64_ID_MMFR0_EL1_INNERSHR_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_mmfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_MMFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_MMFR1_EL1, AArch64 Memory Model Feature Register 1 */ + +#define AARCH64_ID_MMFR1_EL1_L1HVDVA( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_MMFR1_EL1_L1HVDVA_SHIFT 0 +#define AARCH64_ID_MMFR1_EL1_L1HVDVA_MASK 0xfU +#define AARCH64_ID_MMFR1_EL1_L1HVDVA_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_L1UNIVA( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_MMFR1_EL1_L1UNIVA_SHIFT 4 +#define AARCH64_ID_MMFR1_EL1_L1UNIVA_MASK 0xf0U +#define AARCH64_ID_MMFR1_EL1_L1UNIVA_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_L1HVDSW( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_MMFR1_EL1_L1HVDSW_SHIFT 8 +#define AARCH64_ID_MMFR1_EL1_L1HVDSW_MASK 0xf00U +#define AARCH64_ID_MMFR1_EL1_L1HVDSW_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_L1UNISW( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_MMFR1_EL1_L1UNISW_SHIFT 12 +#define AARCH64_ID_MMFR1_EL1_L1UNISW_MASK 0xf000U +#define AARCH64_ID_MMFR1_EL1_L1UNISW_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_L1HVD( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_MMFR1_EL1_L1HVD_SHIFT 16 +#define AARCH64_ID_MMFR1_EL1_L1HVD_MASK 0xf0000U +#define AARCH64_ID_MMFR1_EL1_L1HVD_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_L1UNI( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_MMFR1_EL1_L1UNI_SHIFT 20 +#define AARCH64_ID_MMFR1_EL1_L1UNI_MASK 0xf00000U +#define AARCH64_ID_MMFR1_EL1_L1UNI_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_L1TSTCLN( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_MMFR1_EL1_L1TSTCLN_SHIFT 24 +#define AARCH64_ID_MMFR1_EL1_L1TSTCLN_MASK 0xf000000U +#define AARCH64_ID_MMFR1_EL1_L1TSTCLN_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_MMFR1_EL1_BPRED( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_MMFR1_EL1_BPRED_SHIFT 28 +#define AARCH64_ID_MMFR1_EL1_BPRED_MASK 0xf0000000U +#define AARCH64_ID_MMFR1_EL1_BPRED_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_mmfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_MMFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_MMFR2_EL1, AArch64 Memory Model Feature Register 2 */ + +#define AARCH64_ID_MMFR2_EL1_L1HVDFG( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_MMFR2_EL1_L1HVDFG_SHIFT 0 +#define AARCH64_ID_MMFR2_EL1_L1HVDFG_MASK 0xfU +#define AARCH64_ID_MMFR2_EL1_L1HVDFG_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_L1HVDBG( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_MMFR2_EL1_L1HVDBG_SHIFT 4 +#define AARCH64_ID_MMFR2_EL1_L1HVDBG_MASK 0xf0U +#define AARCH64_ID_MMFR2_EL1_L1HVDBG_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_L1HVDRNG( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_MMFR2_EL1_L1HVDRNG_SHIFT 8 +#define AARCH64_ID_MMFR2_EL1_L1HVDRNG_MASK 0xf00U +#define AARCH64_ID_MMFR2_EL1_L1HVDRNG_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_HVDTLB( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_MMFR2_EL1_HVDTLB_SHIFT 12 +#define AARCH64_ID_MMFR2_EL1_HVDTLB_MASK 0xf000U +#define AARCH64_ID_MMFR2_EL1_HVDTLB_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_UNITLB( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_MMFR2_EL1_UNITLB_SHIFT 16 +#define AARCH64_ID_MMFR2_EL1_UNITLB_MASK 0xf0000U +#define AARCH64_ID_MMFR2_EL1_UNITLB_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_MEMBARR( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_MMFR2_EL1_MEMBARR_SHIFT 20 +#define AARCH64_ID_MMFR2_EL1_MEMBARR_MASK 0xf00000U +#define AARCH64_ID_MMFR2_EL1_MEMBARR_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_WFISTALL( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_MMFR2_EL1_WFISTALL_SHIFT 24 +#define AARCH64_ID_MMFR2_EL1_WFISTALL_MASK 0xf000000U +#define AARCH64_ID_MMFR2_EL1_WFISTALL_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_MMFR2_EL1_HWACCFLG( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_MMFR2_EL1_HWACCFLG_SHIFT 28 +#define AARCH64_ID_MMFR2_EL1_HWACCFLG_MASK 0xf0000000U +#define AARCH64_ID_MMFR2_EL1_HWACCFLG_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_mmfr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_MMFR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_MMFR3_EL1, AArch64 Memory Model Feature Register 3 */ + +#define AARCH64_ID_MMFR3_EL1_CMAINTVA( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_MMFR3_EL1_CMAINTVA_SHIFT 0 +#define AARCH64_ID_MMFR3_EL1_CMAINTVA_MASK 0xfU +#define AARCH64_ID_MMFR3_EL1_CMAINTVA_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_CMAINTSW( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_MMFR3_EL1_CMAINTSW_SHIFT 4 +#define AARCH64_ID_MMFR3_EL1_CMAINTSW_MASK 0xf0U +#define AARCH64_ID_MMFR3_EL1_CMAINTSW_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_BPMAINT( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_MMFR3_EL1_BPMAINT_SHIFT 8 +#define AARCH64_ID_MMFR3_EL1_BPMAINT_MASK 0xf00U +#define AARCH64_ID_MMFR3_EL1_BPMAINT_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_MAINTBCST( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_MMFR3_EL1_MAINTBCST_SHIFT 12 +#define AARCH64_ID_MMFR3_EL1_MAINTBCST_MASK 0xf000U +#define AARCH64_ID_MMFR3_EL1_MAINTBCST_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_PAN( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_MMFR3_EL1_PAN_SHIFT 16 +#define AARCH64_ID_MMFR3_EL1_PAN_MASK 0xf0000U +#define AARCH64_ID_MMFR3_EL1_PAN_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_COHWALK( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_MMFR3_EL1_COHWALK_SHIFT 20 +#define AARCH64_ID_MMFR3_EL1_COHWALK_MASK 0xf00000U +#define AARCH64_ID_MMFR3_EL1_COHWALK_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_CMEMSZ( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_MMFR3_EL1_CMEMSZ_SHIFT 24 +#define AARCH64_ID_MMFR3_EL1_CMEMSZ_MASK 0xf000000U +#define AARCH64_ID_MMFR3_EL1_CMEMSZ_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_MMFR3_EL1_SUPERSEC( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_MMFR3_EL1_SUPERSEC_SHIFT 28 +#define AARCH64_ID_MMFR3_EL1_SUPERSEC_MASK 0xf0000000U +#define AARCH64_ID_MMFR3_EL1_SUPERSEC_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_mmfr3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_MMFR3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_MMFR4_EL1, AArch64 Memory Model Feature Register 4 */ + +#define AARCH64_ID_MMFR4_EL1_SPECSEI( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_MMFR4_EL1_SPECSEI_SHIFT 0 +#define AARCH64_ID_MMFR4_EL1_SPECSEI_MASK 0xfU +#define AARCH64_ID_MMFR4_EL1_SPECSEI_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_AC2( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_MMFR4_EL1_AC2_SHIFT 4 +#define AARCH64_ID_MMFR4_EL1_AC2_MASK 0xf0U +#define AARCH64_ID_MMFR4_EL1_AC2_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_XNX( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_MMFR4_EL1_XNX_SHIFT 8 +#define AARCH64_ID_MMFR4_EL1_XNX_MASK 0xf00U +#define AARCH64_ID_MMFR4_EL1_XNX_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_CNP( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_MMFR4_EL1_CNP_SHIFT 12 +#define AARCH64_ID_MMFR4_EL1_CNP_MASK 0xf000U +#define AARCH64_ID_MMFR4_EL1_CNP_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_HPDS( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_MMFR4_EL1_HPDS_SHIFT 16 +#define AARCH64_ID_MMFR4_EL1_HPDS_MASK 0xf0000U +#define AARCH64_ID_MMFR4_EL1_HPDS_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_LSM( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_MMFR4_EL1_LSM_SHIFT 20 +#define AARCH64_ID_MMFR4_EL1_LSM_MASK 0xf00000U +#define AARCH64_ID_MMFR4_EL1_LSM_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_CCIDX( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_MMFR4_EL1_CCIDX_SHIFT 24 +#define AARCH64_ID_MMFR4_EL1_CCIDX_MASK 0xf000000U +#define AARCH64_ID_MMFR4_EL1_CCIDX_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_MMFR4_EL1_EVT( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_MMFR4_EL1_EVT_SHIFT 28 +#define AARCH64_ID_MMFR4_EL1_EVT_MASK 0xf0000000U +#define AARCH64_ID_MMFR4_EL1_EVT_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_mmfr4_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_MMFR4_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_MMFR5_EL1, AArch64 Memory Model Feature Register 5 */ + +#define AARCH64_ID_MMFR5_EL1_ETS( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_MMFR5_EL1_ETS_SHIFT 0 +#define AARCH64_ID_MMFR5_EL1_ETS_MASK 0xfU +#define AARCH64_ID_MMFR5_EL1_ETS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_mmfr5_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_MMFR5_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_PFR0_EL1, AArch64 Processor Feature Register 0 */ + +#define AARCH64_ID_PFR0_EL1_STATE0( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_PFR0_EL1_STATE0_SHIFT 0 +#define AARCH64_ID_PFR0_EL1_STATE0_MASK 0xfU +#define AARCH64_ID_PFR0_EL1_STATE0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_STATE1( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_PFR0_EL1_STATE1_SHIFT 4 +#define AARCH64_ID_PFR0_EL1_STATE1_MASK 0xf0U +#define AARCH64_ID_PFR0_EL1_STATE1_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_STATE2( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_PFR0_EL1_STATE2_SHIFT 8 +#define AARCH64_ID_PFR0_EL1_STATE2_MASK 0xf00U +#define AARCH64_ID_PFR0_EL1_STATE2_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_STATE3( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_PFR0_EL1_STATE3_SHIFT 12 +#define AARCH64_ID_PFR0_EL1_STATE3_MASK 0xf000U +#define AARCH64_ID_PFR0_EL1_STATE3_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_CSV2( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_PFR0_EL1_CSV2_SHIFT 16 +#define AARCH64_ID_PFR0_EL1_CSV2_MASK 0xf0000U +#define AARCH64_ID_PFR0_EL1_CSV2_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_AMU( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_PFR0_EL1_AMU_SHIFT 20 +#define AARCH64_ID_PFR0_EL1_AMU_MASK 0xf00000U +#define AARCH64_ID_PFR0_EL1_AMU_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_DIT( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_PFR0_EL1_DIT_SHIFT 24 +#define AARCH64_ID_PFR0_EL1_DIT_MASK 0xf000000U +#define AARCH64_ID_PFR0_EL1_DIT_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_PFR0_EL1_RAS( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_PFR0_EL1_RAS_SHIFT 28 +#define AARCH64_ID_PFR0_EL1_RAS_MASK 0xf0000000U +#define AARCH64_ID_PFR0_EL1_RAS_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_pfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_PFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_PFR1_EL1, AArch64 Processor Feature Register 1 */ + +#define AARCH64_ID_PFR1_EL1_PROGMOD( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_PFR1_EL1_PROGMOD_SHIFT 0 +#define AARCH64_ID_PFR1_EL1_PROGMOD_MASK 0xfU +#define AARCH64_ID_PFR1_EL1_PROGMOD_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_SECURITY( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_PFR1_EL1_SECURITY_SHIFT 4 +#define AARCH64_ID_PFR1_EL1_SECURITY_MASK 0xf0U +#define AARCH64_ID_PFR1_EL1_SECURITY_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_MPROGMOD( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_PFR1_EL1_MPROGMOD_SHIFT 8 +#define AARCH64_ID_PFR1_EL1_MPROGMOD_MASK 0xf00U +#define AARCH64_ID_PFR1_EL1_MPROGMOD_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION( _val ) ( ( _val ) << 12 ) +#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION_SHIFT 12 +#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION_MASK 0xf000U +#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_GENTIMER( _val ) ( ( _val ) << 16 ) +#define AARCH64_ID_PFR1_EL1_GENTIMER_SHIFT 16 +#define AARCH64_ID_PFR1_EL1_GENTIMER_MASK 0xf0000U +#define AARCH64_ID_PFR1_EL1_GENTIMER_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_SEC_FRAC( _val ) ( ( _val ) << 20 ) +#define AARCH64_ID_PFR1_EL1_SEC_FRAC_SHIFT 20 +#define AARCH64_ID_PFR1_EL1_SEC_FRAC_MASK 0xf00000U +#define AARCH64_ID_PFR1_EL1_SEC_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_VIRT_FRAC( _val ) ( ( _val ) << 24 ) +#define AARCH64_ID_PFR1_EL1_VIRT_FRAC_SHIFT 24 +#define AARCH64_ID_PFR1_EL1_VIRT_FRAC_MASK 0xf000000U +#define AARCH64_ID_PFR1_EL1_VIRT_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_ID_PFR1_EL1_GIC( _val ) ( ( _val ) << 28 ) +#define AARCH64_ID_PFR1_EL1_GIC_SHIFT 28 +#define AARCH64_ID_PFR1_EL1_GIC_MASK 0xf0000000U +#define AARCH64_ID_PFR1_EL1_GIC_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_pfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_PFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ID_PFR2_EL1, AArch64 Processor Feature Register 2 */ + +#define AARCH64_ID_PFR2_EL1_CSV3( _val ) ( ( _val ) << 0 ) +#define AARCH64_ID_PFR2_EL1_CSV3_SHIFT 0 +#define AARCH64_ID_PFR2_EL1_CSV3_MASK 0xfU +#define AARCH64_ID_PFR2_EL1_CSV3_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_ID_PFR2_EL1_SSBS( _val ) ( ( _val ) << 4 ) +#define AARCH64_ID_PFR2_EL1_SSBS_SHIFT 4 +#define AARCH64_ID_PFR2_EL1_SSBS_MASK 0xf0U +#define AARCH64_ID_PFR2_EL1_SSBS_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_ID_PFR2_EL1_RAS_FRAC( _val ) ( ( _val ) << 8 ) +#define AARCH64_ID_PFR2_EL1_RAS_FRAC_SHIFT 8 +#define AARCH64_ID_PFR2_EL1_RAS_FRAC_MASK 0xf00U +#define AARCH64_ID_PFR2_EL1_RAS_FRAC_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_id_pfr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ID_PFR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* IFSR32_EL2, Instruction Fault Status Register (EL2) */ + +#define AARCH64_IFSR32_EL2_FS_3_0( _val ) ( ( _val ) << 0 ) +#define AARCH64_IFSR32_EL2_FS_3_0_SHIFT 0 +#define AARCH64_IFSR32_EL2_FS_3_0_MASK 0xfU +#define AARCH64_IFSR32_EL2_FS_3_0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_IFSR32_EL2_STATUS( _val ) ( ( _val ) << 0 ) +#define AARCH64_IFSR32_EL2_STATUS_SHIFT 0 +#define AARCH64_IFSR32_EL2_STATUS_MASK 0x3fU +#define AARCH64_IFSR32_EL2_STATUS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_IFSR32_EL2_LPAE 0x200U + +#define AARCH64_IFSR32_EL2_FS_4 0x400U + +#define AARCH64_IFSR32_EL2_EXT 0x1000U + +#define AARCH64_IFSR32_EL2_FNV 0x10000U + +static inline uint64_t _AArch64_Read_ifsr32_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, IFSR32_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_ifsr32_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr IFSR32_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* ISR_EL1, Interrupt Status Register */ + +#define AARCH64_ISR_EL1_F 0x40U + +#define AARCH64_ISR_EL1_I 0x80U + +#define AARCH64_ISR_EL1_A 0x100U + +static inline uint64_t _AArch64_Read_isr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ISR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* LORC_EL1, LORegion Control (EL1) */ + +#define AARCH64_LORC_EL1_EN 0x1U + +#define AARCH64_LORC_EL1_DS( _val ) ( ( _val ) << 2 ) +#define AARCH64_LORC_EL1_DS_SHIFT 2 +#define AARCH64_LORC_EL1_DS_MASK 0x3fcU +#define AARCH64_LORC_EL1_DS_GET( _reg ) \ + ( ( ( _reg ) >> 2 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_lorc_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, LORC_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_lorc_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr LORC_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* LOREA_EL1, LORegion End Address (EL1) */ + +#define AARCH64_LOREA_EL1_EA_47_16( _val ) ( ( _val ) << 16 ) +#define AARCH64_LOREA_EL1_EA_47_16_SHIFT 16 +#define AARCH64_LOREA_EL1_EA_47_16_MASK 0xffffffff0000ULL +#define AARCH64_LOREA_EL1_EA_47_16_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xffffffffULL ) + +#define AARCH64_LOREA_EL1_EA_51_48( _val ) ( ( _val ) << 48 ) +#define AARCH64_LOREA_EL1_EA_51_48_SHIFT 48 +#define AARCH64_LOREA_EL1_EA_51_48_MASK 0xf000000000000ULL +#define AARCH64_LOREA_EL1_EA_51_48_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_lorea_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, LOREA_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_lorea_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr LOREA_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* LORID_EL1, LORegionID (EL1) */ + +#define AARCH64_LORID_EL1_LR( _val ) ( ( _val ) << 0 ) +#define AARCH64_LORID_EL1_LR_SHIFT 0 +#define AARCH64_LORID_EL1_LR_MASK 0xffU +#define AARCH64_LORID_EL1_LR_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +#define AARCH64_LORID_EL1_LD( _val ) ( ( _val ) << 16 ) +#define AARCH64_LORID_EL1_LD_SHIFT 16 +#define AARCH64_LORID_EL1_LD_MASK 0xff0000U +#define AARCH64_LORID_EL1_LD_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_lorid_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, LORID_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* LORN_EL1, LORegion Number (EL1) */ + +#define AARCH64_LORN_EL1_NUM( _val ) ( ( _val ) << 0 ) +#define AARCH64_LORN_EL1_NUM_SHIFT 0 +#define AARCH64_LORN_EL1_NUM_MASK 0xffU +#define AARCH64_LORN_EL1_NUM_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_lorn_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, LORN_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_lorn_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr LORN_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* LORSA_EL1, LORegion Start Address (EL1) */ + +#define AARCH64_LORSA_EL1_VALID 0x1U + +#define AARCH64_LORSA_EL1_SA_47_16( _val ) ( ( _val ) << 16 ) +#define AARCH64_LORSA_EL1_SA_47_16_SHIFT 16 +#define AARCH64_LORSA_EL1_SA_47_16_MASK 0xffffffff0000ULL +#define AARCH64_LORSA_EL1_SA_47_16_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xffffffffULL ) + +#define AARCH64_LORSA_EL1_SA_51_48( _val ) ( ( _val ) << 48 ) +#define AARCH64_LORSA_EL1_SA_51_48_SHIFT 48 +#define AARCH64_LORSA_EL1_SA_51_48_MASK 0xf000000000000ULL +#define AARCH64_LORSA_EL1_SA_51_48_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_lorsa_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, LORSA_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_lorsa_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr LORSA_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* MAIR_EL1, Memory Attribute Indirection Register (EL1) */ + +#define AARCH64_MAIR_EL1_ATTR0( _val ) ( ( _val ) << 0 ) +#define AARCH64_MAIR_EL1_ATTR1( _val ) ( ( _val ) << 8 ) +#define AARCH64_MAIR_EL1_ATTR2( _val ) ( ( _val ) << 16 ) +#define AARCH64_MAIR_EL1_ATTR3( _val ) ( ( _val ) << 24 ) +#define AARCH64_MAIR_EL1_ATTR4( _val ) ( ( _val ) << 32 ) +#define AARCH64_MAIR_EL1_ATTR5( _val ) ( ( _val ) << 40 ) +#define AARCH64_MAIR_EL1_ATTR6( _val ) ( ( _val ) << 48 ) +#define AARCH64_MAIR_EL1_ATTR7( _val ) ( ( _val ) << 56 ) + +static inline uint64_t _AArch64_Read_mair_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MAIR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mair_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr MAIR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* MAIR_EL2, Memory Attribute Indirection Register (EL2) */ + +static inline uint64_t _AArch64_Read_mair_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MAIR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mair_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr MAIR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* MAIR_EL3, Memory Attribute Indirection Register (EL3) */ + +static inline uint64_t _AArch64_Read_mair_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MAIR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mair_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr MAIR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* MIDR_EL1, Main ID Register */ + +#define AARCH64_MIDR_EL1_REVISION( _val ) ( ( _val ) << 0 ) +#define AARCH64_MIDR_EL1_REVISION_SHIFT 0 +#define AARCH64_MIDR_EL1_REVISION_MASK 0xfU +#define AARCH64_MIDR_EL1_REVISION_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_MIDR_EL1_PARTNUM( _val ) ( ( _val ) << 4 ) +#define AARCH64_MIDR_EL1_PARTNUM_SHIFT 4 +#define AARCH64_MIDR_EL1_PARTNUM_MASK 0xfff0U +#define AARCH64_MIDR_EL1_PARTNUM_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfffU ) + +#define AARCH64_MIDR_EL1_ARCHITECTURE( _val ) ( ( _val ) << 16 ) +#define AARCH64_MIDR_EL1_ARCHITECTURE_SHIFT 16 +#define AARCH64_MIDR_EL1_ARCHITECTURE_MASK 0xf0000U +#define AARCH64_MIDR_EL1_ARCHITECTURE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_MIDR_EL1_VARIANT( _val ) ( ( _val ) << 20 ) +#define AARCH64_MIDR_EL1_VARIANT_SHIFT 20 +#define AARCH64_MIDR_EL1_VARIANT_MASK 0xf00000U +#define AARCH64_MIDR_EL1_VARIANT_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_MIDR_EL1_IMPLEMENTER( _val ) ( ( _val ) << 24 ) +#define AARCH64_MIDR_EL1_IMPLEMENTER_SHIFT 24 +#define AARCH64_MIDR_EL1_IMPLEMENTER_MASK 0xff000000U +#define AARCH64_MIDR_EL1_IMPLEMENTER_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_midr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* MPIDR_EL1, Multiprocessor Affinity Register */ + +#define AARCH64_MPIDR_EL1_AFF0( _val ) ( ( _val ) << 0 ) +#define AARCH64_MPIDR_EL1_AFF0_SHIFT 0 +#define AARCH64_MPIDR_EL1_AFF0_MASK 0xffU +#define AARCH64_MPIDR_EL1_AFF0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +#define AARCH64_MPIDR_EL1_AFF1( _val ) ( ( _val ) << 8 ) +#define AARCH64_MPIDR_EL1_AFF1_SHIFT 8 +#define AARCH64_MPIDR_EL1_AFF1_MASK 0xff00U +#define AARCH64_MPIDR_EL1_AFF1_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xffU ) + +#define AARCH64_MPIDR_EL1_AFF2( _val ) ( ( _val ) << 16 ) +#define AARCH64_MPIDR_EL1_AFF2_SHIFT 16 +#define AARCH64_MPIDR_EL1_AFF2_MASK 0xff0000U +#define AARCH64_MPIDR_EL1_AFF2_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xffU ) + +#define AARCH64_MPIDR_EL1_MT 0x1000000U + +#define AARCH64_MPIDR_EL1_U 0x40000000U + +#define AARCH64_MPIDR_EL1_AFF3( _val ) ( ( _val ) << 32 ) +#define AARCH64_MPIDR_EL1_AFF3_SHIFT 32 +#define AARCH64_MPIDR_EL1_AFF3_MASK 0xff00000000ULL +#define AARCH64_MPIDR_EL1_AFF3_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xffULL ) + +static inline uint64_t _AArch64_Read_mpidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MPIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* MVFR0_EL1, AArch64 Media and VFP Feature Register 0 */ + +#define AARCH64_MVFR0_EL1_SIMDREG( _val ) ( ( _val ) << 0 ) +#define AARCH64_MVFR0_EL1_SIMDREG_SHIFT 0 +#define AARCH64_MVFR0_EL1_SIMDREG_MASK 0xfU +#define AARCH64_MVFR0_EL1_SIMDREG_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPSP( _val ) ( ( _val ) << 4 ) +#define AARCH64_MVFR0_EL1_FPSP_SHIFT 4 +#define AARCH64_MVFR0_EL1_FPSP_MASK 0xf0U +#define AARCH64_MVFR0_EL1_FPSP_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPDP( _val ) ( ( _val ) << 8 ) +#define AARCH64_MVFR0_EL1_FPDP_SHIFT 8 +#define AARCH64_MVFR0_EL1_FPDP_MASK 0xf00U +#define AARCH64_MVFR0_EL1_FPDP_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPTRAP( _val ) ( ( _val ) << 12 ) +#define AARCH64_MVFR0_EL1_FPTRAP_SHIFT 12 +#define AARCH64_MVFR0_EL1_FPTRAP_MASK 0xf000U +#define AARCH64_MVFR0_EL1_FPTRAP_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPDIVIDE( _val ) ( ( _val ) << 16 ) +#define AARCH64_MVFR0_EL1_FPDIVIDE_SHIFT 16 +#define AARCH64_MVFR0_EL1_FPDIVIDE_MASK 0xf0000U +#define AARCH64_MVFR0_EL1_FPDIVIDE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPSQRT( _val ) ( ( _val ) << 20 ) +#define AARCH64_MVFR0_EL1_FPSQRT_SHIFT 20 +#define AARCH64_MVFR0_EL1_FPSQRT_MASK 0xf00000U +#define AARCH64_MVFR0_EL1_FPSQRT_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPSHVEC( _val ) ( ( _val ) << 24 ) +#define AARCH64_MVFR0_EL1_FPSHVEC_SHIFT 24 +#define AARCH64_MVFR0_EL1_FPSHVEC_MASK 0xf000000U +#define AARCH64_MVFR0_EL1_FPSHVEC_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_MVFR0_EL1_FPROUND( _val ) ( ( _val ) << 28 ) +#define AARCH64_MVFR0_EL1_FPROUND_SHIFT 28 +#define AARCH64_MVFR0_EL1_FPROUND_MASK 0xf0000000U +#define AARCH64_MVFR0_EL1_FPROUND_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_mvfr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MVFR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* MVFR1_EL1, AArch64 Media and VFP Feature Register 1 */ + +#define AARCH64_MVFR1_EL1_FPFTZ( _val ) ( ( _val ) << 0 ) +#define AARCH64_MVFR1_EL1_FPFTZ_SHIFT 0 +#define AARCH64_MVFR1_EL1_FPFTZ_MASK 0xfU +#define AARCH64_MVFR1_EL1_FPFTZ_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_FPDNAN( _val ) ( ( _val ) << 4 ) +#define AARCH64_MVFR1_EL1_FPDNAN_SHIFT 4 +#define AARCH64_MVFR1_EL1_FPDNAN_MASK 0xf0U +#define AARCH64_MVFR1_EL1_FPDNAN_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_SIMDLS( _val ) ( ( _val ) << 8 ) +#define AARCH64_MVFR1_EL1_SIMDLS_SHIFT 8 +#define AARCH64_MVFR1_EL1_SIMDLS_MASK 0xf00U +#define AARCH64_MVFR1_EL1_SIMDLS_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_SIMDINT( _val ) ( ( _val ) << 12 ) +#define AARCH64_MVFR1_EL1_SIMDINT_SHIFT 12 +#define AARCH64_MVFR1_EL1_SIMDINT_MASK 0xf000U +#define AARCH64_MVFR1_EL1_SIMDINT_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_SIMDSP( _val ) ( ( _val ) << 16 ) +#define AARCH64_MVFR1_EL1_SIMDSP_SHIFT 16 +#define AARCH64_MVFR1_EL1_SIMDSP_MASK 0xf0000U +#define AARCH64_MVFR1_EL1_SIMDSP_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_SIMDHP( _val ) ( ( _val ) << 20 ) +#define AARCH64_MVFR1_EL1_SIMDHP_SHIFT 20 +#define AARCH64_MVFR1_EL1_SIMDHP_MASK 0xf00000U +#define AARCH64_MVFR1_EL1_SIMDHP_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_FPHP( _val ) ( ( _val ) << 24 ) +#define AARCH64_MVFR1_EL1_FPHP_SHIFT 24 +#define AARCH64_MVFR1_EL1_FPHP_MASK 0xf000000U +#define AARCH64_MVFR1_EL1_FPHP_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xfU ) + +#define AARCH64_MVFR1_EL1_SIMDFMAC( _val ) ( ( _val ) << 28 ) +#define AARCH64_MVFR1_EL1_SIMDFMAC_SHIFT 28 +#define AARCH64_MVFR1_EL1_SIMDFMAC_MASK 0xf0000000U +#define AARCH64_MVFR1_EL1_SIMDFMAC_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_mvfr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MVFR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* MVFR2_EL1, AArch64 Media and VFP Feature Register 2 */ + +#define AARCH64_MVFR2_EL1_SIMDMISC( _val ) ( ( _val ) << 0 ) +#define AARCH64_MVFR2_EL1_SIMDMISC_SHIFT 0 +#define AARCH64_MVFR2_EL1_SIMDMISC_MASK 0xfU +#define AARCH64_MVFR2_EL1_SIMDMISC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_MVFR2_EL1_FPMISC( _val ) ( ( _val ) << 4 ) +#define AARCH64_MVFR2_EL1_FPMISC_SHIFT 4 +#define AARCH64_MVFR2_EL1_FPMISC_MASK 0xf0U +#define AARCH64_MVFR2_EL1_FPMISC_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_mvfr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MVFR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* PAR_EL1, Physical Address Register */ + +#define AARCH64_PAR_EL1_F 0x1U + +#define AARCH64_PAR_EL1_FST( _val ) ( ( _val ) << 1 ) +#define AARCH64_PAR_EL1_FST_SHIFT 1 +#define AARCH64_PAR_EL1_FST_MASK 0x7eU +#define AARCH64_PAR_EL1_FST_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x3fU ) + +#define AARCH64_PAR_EL1_SH( _val ) ( ( _val ) << 7 ) +#define AARCH64_PAR_EL1_SH_SHIFT 7 +#define AARCH64_PAR_EL1_SH_MASK 0x180U +#define AARCH64_PAR_EL1_SH_GET( _reg ) \ + ( ( ( _reg ) >> 7 ) & 0x3U ) + +#define AARCH64_PAR_EL1_PTW 0x100U + +#define AARCH64_PAR_EL1_NS 0x200U + +#define AARCH64_PAR_EL1_S 0x200U + +#define AARCH64_PAR_EL1_PA_47_12( _val ) ( ( _val ) << 12 ) +#define AARCH64_PAR_EL1_PA_47_12_SHIFT 12 +#define AARCH64_PAR_EL1_PA_47_12_MASK 0xfffffffff000ULL +#define AARCH64_PAR_EL1_PA_47_12_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfffffffffULL ) + +#define AARCH64_PAR_EL1_PA_51_48( _val ) ( ( _val ) << 48 ) +#define AARCH64_PAR_EL1_PA_51_48_SHIFT 48 +#define AARCH64_PAR_EL1_PA_51_48_MASK 0xf000000000000ULL +#define AARCH64_PAR_EL1_PA_51_48_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +#define AARCH64_PAR_EL1_ATTR( _val ) ( ( _val ) << 56 ) +#define AARCH64_PAR_EL1_ATTR_SHIFT 56 +#define AARCH64_PAR_EL1_ATTR_MASK 0xff00000000000000ULL +#define AARCH64_PAR_EL1_ATTR_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xffULL ) + +static inline uint64_t _AArch64_Read_par_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PAR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_par_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PAR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* REVIDR_EL1, Revision ID Register */ + +static inline uint64_t _AArch64_Read_revidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, REVIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* RGSR_EL1, Random Allocation Tag Seed Register. */ + +#define AARCH64_RGSR_EL1_TAG( _val ) ( ( _val ) << 0 ) +#define AARCH64_RGSR_EL1_TAG_SHIFT 0 +#define AARCH64_RGSR_EL1_TAG_MASK 0xfU +#define AARCH64_RGSR_EL1_TAG_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_RGSR_EL1_SEED( _val ) ( ( _val ) << 8 ) +#define AARCH64_RGSR_EL1_SEED_SHIFT 8 +#define AARCH64_RGSR_EL1_SEED_MASK 0xffff00U +#define AARCH64_RGSR_EL1_SEED_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xffffU ) + +static inline uint64_t _AArch64_Read_rgsr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RGSR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_rgsr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr RGSR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* RMR_EL1, Reset Management Register (EL1) */ + +#define AARCH64_RMR_EL1_AA64 0x1U + +#define AARCH64_RMR_EL1_RR 0x2U + +static inline uint64_t _AArch64_Read_rmr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RMR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_rmr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr RMR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* RMR_EL2, Reset Management Register (EL2) */ + +#define AARCH64_RMR_EL2_AA64 0x1U + +#define AARCH64_RMR_EL2_RR 0x2U + +static inline uint64_t _AArch64_Read_rmr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RMR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_rmr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr RMR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* RMR_EL3, Reset Management Register (EL3) */ + +#define AARCH64_RMR_EL3_AA64 0x1U + +#define AARCH64_RMR_EL3_RR 0x2U + +static inline uint64_t _AArch64_Read_rmr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RMR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_rmr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr RMR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* RNDR, Random Number */ + +static inline uint64_t _AArch64_Read_rndr( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RNDR" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* RNDRRS, Reseeded Random Number */ + +static inline uint64_t _AArch64_Read_rndrrs( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RNDRRS" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* RVBAR_EL1, Reset Vector Base Address Register (if EL2 and EL3 not implemented) */ + +static inline uint64_t _AArch64_Read_rvbar_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RVBAR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* RVBAR_EL2, Reset Vector Base Address Register (if EL3 not implemented) */ + +static inline uint64_t _AArch64_Read_rvbar_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RVBAR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* RVBAR_EL3, Reset Vector Base Address Register (if EL3 implemented) */ + +static inline uint64_t _AArch64_Read_rvbar_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, RVBAR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* SCR_EL3, Secure Configuration Register */ + +#define AARCH64_SCR_EL3_NS 0x1U + +#define AARCH64_SCR_EL3_IRQ 0x2U + +#define AARCH64_SCR_EL3_FIQ 0x4U + +#define AARCH64_SCR_EL3_EA 0x8U + +#define AARCH64_SCR_EL3_SMD 0x80U + +#define AARCH64_SCR_EL3_HCE 0x100U + +#define AARCH64_SCR_EL3_SIF 0x200U + +#define AARCH64_SCR_EL3_RW 0x400U + +#define AARCH64_SCR_EL3_ST 0x800U + +#define AARCH64_SCR_EL3_TWI 0x1000U + +#define AARCH64_SCR_EL3_TWE 0x2000U + +#define AARCH64_SCR_EL3_TLOR 0x4000U + +#define AARCH64_SCR_EL3_TERR 0x8000U + +#define AARCH64_SCR_EL3_APK 0x10000U + +#define AARCH64_SCR_EL3_API 0x20000U + +#define AARCH64_SCR_EL3_EEL2 0x40000U + +#define AARCH64_SCR_EL3_EASE 0x80000U + +#define AARCH64_SCR_EL3_NMEA 0x100000U + +#define AARCH64_SCR_EL3_FIEN 0x200000U + +#define AARCH64_SCR_EL3_ENSCXT 0x2000000U + +#define AARCH64_SCR_EL3_ATA 0x4000000U + +#define AARCH64_SCR_EL3_FGTEN 0x8000000U + +#define AARCH64_SCR_EL3_ECVEN 0x10000000U + +#define AARCH64_SCR_EL3_TWEDEN 0x20000000U + +#define AARCH64_SCR_EL3_TWEDEL( _val ) ( ( _val ) << 30 ) +#define AARCH64_SCR_EL3_TWEDEL_SHIFT 30 +#define AARCH64_SCR_EL3_TWEDEL_MASK 0x3c0000000ULL +#define AARCH64_SCR_EL3_TWEDEL_GET( _reg ) \ + ( ( ( _reg ) >> 30 ) & 0xfULL ) + +#define AARCH64_SCR_EL3_AMVOFFEN 0x800000000ULL + +static inline uint64_t _AArch64_Read_scr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_scr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr SCR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCTLR_EL1, System Control Register (EL1) */ + +#define AARCH64_SCTLR_EL1_M 0x1U + +#define AARCH64_SCTLR_EL1_A 0x2U + +#define AARCH64_SCTLR_EL1_C 0x4U + +#define AARCH64_SCTLR_EL1_SA 0x8U + +#define AARCH64_SCTLR_EL1_SA0 0x10U + +#define AARCH64_SCTLR_EL1_CP15BEN 0x20U + +#define AARCH64_SCTLR_EL1_NAA 0x40U + +#define AARCH64_SCTLR_EL1_ITD 0x80U + +#define AARCH64_SCTLR_EL1_SED 0x100U + +#define AARCH64_SCTLR_EL1_UMA 0x200U + +#define AARCH64_SCTLR_EL1_ENRCTX 0x400U + +#define AARCH64_SCTLR_EL1_EOS 0x800U + +#define AARCH64_SCTLR_EL1_I 0x1000U + +#define AARCH64_SCTLR_EL1_ENDB 0x2000U + +#define AARCH64_SCTLR_EL1_DZE 0x4000U + +#define AARCH64_SCTLR_EL1_UCT 0x8000U + +#define AARCH64_SCTLR_EL1_NTWI 0x10000U + +#define AARCH64_SCTLR_EL1_NTWE 0x40000U + +#define AARCH64_SCTLR_EL1_WXN 0x80000U + +#define AARCH64_SCTLR_EL1_TSCXT 0x100000U + +#define AARCH64_SCTLR_EL1_IESB 0x200000U + +#define AARCH64_SCTLR_EL1_EIS 0x400000U + +#define AARCH64_SCTLR_EL1_SPAN 0x800000U + +#define AARCH64_SCTLR_EL1_E0E 0x1000000U + +#define AARCH64_SCTLR_EL1_EE 0x2000000U + +#define AARCH64_SCTLR_EL1_UCI 0x4000000U + +#define AARCH64_SCTLR_EL1_ENDA 0x8000000U + +#define AARCH64_SCTLR_EL1_NTLSMD 0x10000000U + +#define AARCH64_SCTLR_EL1_LSMAOE 0x20000000U + +#define AARCH64_SCTLR_EL1_ENIB 0x40000000U + +#define AARCH64_SCTLR_EL1_ENIA 0x80000000U + +#define AARCH64_SCTLR_EL1_BT0 0x800000000ULL + +#define AARCH64_SCTLR_EL1_BT1 0x1000000000ULL + +#define AARCH64_SCTLR_EL1_ITFSB 0x2000000000ULL + +#define AARCH64_SCTLR_EL1_TCF0( _val ) ( ( _val ) << 38 ) +#define AARCH64_SCTLR_EL1_TCF0_SHIFT 38 +#define AARCH64_SCTLR_EL1_TCF0_MASK 0xc000000000ULL +#define AARCH64_SCTLR_EL1_TCF0_GET( _reg ) \ + ( ( ( _reg ) >> 38 ) & 0x3ULL ) + +#define AARCH64_SCTLR_EL1_TCF( _val ) ( ( _val ) << 40 ) +#define AARCH64_SCTLR_EL1_TCF_SHIFT 40 +#define AARCH64_SCTLR_EL1_TCF_MASK 0x30000000000ULL +#define AARCH64_SCTLR_EL1_TCF_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0x3ULL ) + +#define AARCH64_SCTLR_EL1_ATA0 0x40000000000ULL + +#define AARCH64_SCTLR_EL1_ATA 0x80000000000ULL + +#define AARCH64_SCTLR_EL1_DSSBS 0x100000000000ULL + +#define AARCH64_SCTLR_EL1_TWEDEN 0x200000000000ULL + +#define AARCH64_SCTLR_EL1_TWEDEL( _val ) ( ( _val ) << 46 ) +#define AARCH64_SCTLR_EL1_TWEDEL_SHIFT 46 +#define AARCH64_SCTLR_EL1_TWEDEL_MASK 0x3c00000000000ULL +#define AARCH64_SCTLR_EL1_TWEDEL_GET( _reg ) \ + ( ( ( _reg ) >> 46 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_sctlr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCTLR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_sctlr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr SCTLR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCTLR_EL2, System Control Register (EL2) */ + +#define AARCH64_SCTLR_EL2_M 0x1U + +#define AARCH64_SCTLR_EL2_A 0x2U + +#define AARCH64_SCTLR_EL2_C 0x4U + +#define AARCH64_SCTLR_EL2_SA 0x8U + +#define AARCH64_SCTLR_EL2_SA0 0x10U + +#define AARCH64_SCTLR_EL2_CP15BEN 0x20U + +#define AARCH64_SCTLR_EL2_NAA 0x40U + +#define AARCH64_SCTLR_EL2_ITD 0x80U + +#define AARCH64_SCTLR_EL2_SED 0x100U + +#define AARCH64_SCTLR_EL2_ENRCTX 0x400U + +#define AARCH64_SCTLR_EL2_EOS 0x800U + +#define AARCH64_SCTLR_EL2_I 0x1000U + +#define AARCH64_SCTLR_EL2_ENDB 0x2000U + +#define AARCH64_SCTLR_EL2_DZE 0x4000U + +#define AARCH64_SCTLR_EL2_UCT 0x8000U + +#define AARCH64_SCTLR_EL2_NTWI 0x10000U + +#define AARCH64_SCTLR_EL2_NTWE 0x40000U + +#define AARCH64_SCTLR_EL2_WXN 0x80000U + +#define AARCH64_SCTLR_EL2_TSCXT 0x100000U + +#define AARCH64_SCTLR_EL2_IESB 0x200000U + +#define AARCH64_SCTLR_EL2_EIS 0x400000U + +#define AARCH64_SCTLR_EL2_SPAN 0x800000U + +#define AARCH64_SCTLR_EL2_E0E 0x1000000U + +#define AARCH64_SCTLR_EL2_EE 0x2000000U + +#define AARCH64_SCTLR_EL2_UCI 0x4000000U + +#define AARCH64_SCTLR_EL2_ENDA 0x8000000U + +#define AARCH64_SCTLR_EL2_NTLSMD 0x10000000U + +#define AARCH64_SCTLR_EL2_LSMAOE 0x20000000U + +#define AARCH64_SCTLR_EL2_ENIB 0x40000000U + +#define AARCH64_SCTLR_EL2_ENIA 0x80000000U + +#define AARCH64_SCTLR_EL2_BT0 0x800000000ULL + +#define AARCH64_SCTLR_EL2_BT 0x1000000000ULL + +#define AARCH64_SCTLR_EL2_BT1 0x1000000000ULL + +#define AARCH64_SCTLR_EL2_ITFSB 0x2000000000ULL + +#define AARCH64_SCTLR_EL2_TCF0( _val ) ( ( _val ) << 38 ) +#define AARCH64_SCTLR_EL2_TCF0_SHIFT 38 +#define AARCH64_SCTLR_EL2_TCF0_MASK 0xc000000000ULL +#define AARCH64_SCTLR_EL2_TCF0_GET( _reg ) \ + ( ( ( _reg ) >> 38 ) & 0x3ULL ) + +#define AARCH64_SCTLR_EL2_TCF( _val ) ( ( _val ) << 40 ) +#define AARCH64_SCTLR_EL2_TCF_SHIFT 40 +#define AARCH64_SCTLR_EL2_TCF_MASK 0x30000000000ULL +#define AARCH64_SCTLR_EL2_TCF_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0x3ULL ) + +#define AARCH64_SCTLR_EL2_ATA0 0x40000000000ULL + +#define AARCH64_SCTLR_EL2_ATA 0x80000000000ULL + +#define AARCH64_SCTLR_EL2_DSSBS 0x100000000000ULL + +#define AARCH64_SCTLR_EL2_TWEDEN 0x200000000000ULL + +#define AARCH64_SCTLR_EL2_TWEDEL( _val ) ( ( _val ) << 46 ) +#define AARCH64_SCTLR_EL2_TWEDEL_SHIFT 46 +#define AARCH64_SCTLR_EL2_TWEDEL_MASK 0x3c00000000000ULL +#define AARCH64_SCTLR_EL2_TWEDEL_GET( _reg ) \ + ( ( ( _reg ) >> 46 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_sctlr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCTLR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_sctlr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr SCTLR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCTLR_EL3, System Control Register (EL3) */ + +#define AARCH64_SCTLR_EL3_M 0x1U + +#define AARCH64_SCTLR_EL3_A 0x2U + +#define AARCH64_SCTLR_EL3_C 0x4U + +#define AARCH64_SCTLR_EL3_SA 0x8U + +#define AARCH64_SCTLR_EL3_NAA 0x40U + +#define AARCH64_SCTLR_EL3_EOS 0x800U + +#define AARCH64_SCTLR_EL3_I 0x1000U + +#define AARCH64_SCTLR_EL3_ENDB 0x2000U + +#define AARCH64_SCTLR_EL3_WXN 0x80000U + +#define AARCH64_SCTLR_EL3_IESB 0x200000U + +#define AARCH64_SCTLR_EL3_EIS 0x400000U + +#define AARCH64_SCTLR_EL3_EE 0x2000000U + +#define AARCH64_SCTLR_EL3_ENDA 0x8000000U + +#define AARCH64_SCTLR_EL3_ENIB 0x40000000U + +#define AARCH64_SCTLR_EL3_ENIA 0x80000000U + +#define AARCH64_SCTLR_EL3_BT 0x1000000000ULL + +#define AARCH64_SCTLR_EL3_ITFSB 0x2000000000ULL + +#define AARCH64_SCTLR_EL3_TCF( _val ) ( ( _val ) << 40 ) +#define AARCH64_SCTLR_EL3_TCF_SHIFT 40 +#define AARCH64_SCTLR_EL3_TCF_MASK 0x30000000000ULL +#define AARCH64_SCTLR_EL3_TCF_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0x3ULL ) + +#define AARCH64_SCTLR_EL3_ATA 0x80000000000ULL + +#define AARCH64_SCTLR_EL3_DSSBS 0x100000000000ULL + +static inline uint64_t _AArch64_Read_sctlr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCTLR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_sctlr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr SCTLR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCXTNUM_EL0, EL0 Read/Write Software Context Number */ + +static inline uint64_t _AArch64_Read_scxtnum_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCXTNUM_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_scxtnum_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr SCXTNUM_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCXTNUM_EL1, EL1 Read/Write Software Context Number */ + +static inline uint64_t _AArch64_Read_scxtnum_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCXTNUM_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_scxtnum_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr SCXTNUM_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCXTNUM_EL2, EL2 Read/Write Software Context Number */ + +static inline uint64_t _AArch64_Read_scxtnum_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCXTNUM_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_scxtnum_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr SCXTNUM_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* SCXTNUM_EL3, EL3 Read/Write Software Context Number */ + +static inline uint64_t _AArch64_Read_scxtnum_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SCXTNUM_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_scxtnum_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr SCXTNUM_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* TCR_EL1, Translation Control Register (EL1) */ + +#define AARCH64_TCR_EL1_T0SZ( _val ) ( ( _val ) << 0 ) +#define AARCH64_TCR_EL1_T0SZ_SHIFT 0 +#define AARCH64_TCR_EL1_T0SZ_MASK 0x3fU +#define AARCH64_TCR_EL1_T0SZ_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_TCR_EL1_EPD0 0x80U + +#define AARCH64_TCR_EL1_IRGN0( _val ) ( ( _val ) << 8 ) +#define AARCH64_TCR_EL1_IRGN0_SHIFT 8 +#define AARCH64_TCR_EL1_IRGN0_MASK 0x300U +#define AARCH64_TCR_EL1_IRGN0_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x3U ) + +#define AARCH64_TCR_EL1_ORGN0( _val ) ( ( _val ) << 10 ) +#define AARCH64_TCR_EL1_ORGN0_SHIFT 10 +#define AARCH64_TCR_EL1_ORGN0_MASK 0xc00U +#define AARCH64_TCR_EL1_ORGN0_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3U ) + +#define AARCH64_TCR_EL1_SH0( _val ) ( ( _val ) << 12 ) +#define AARCH64_TCR_EL1_SH0_SHIFT 12 +#define AARCH64_TCR_EL1_SH0_MASK 0x3000U +#define AARCH64_TCR_EL1_SH0_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x3U ) + +#define AARCH64_TCR_EL1_TG0( _val ) ( ( _val ) << 14 ) +#define AARCH64_TCR_EL1_TG0_SHIFT 14 +#define AARCH64_TCR_EL1_TG0_MASK 0xc000U +#define AARCH64_TCR_EL1_TG0_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_TCR_EL1_T1SZ( _val ) ( ( _val ) << 16 ) +#define AARCH64_TCR_EL1_T1SZ_SHIFT 16 +#define AARCH64_TCR_EL1_T1SZ_MASK 0x3f0000U +#define AARCH64_TCR_EL1_T1SZ_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x3fU ) + +#define AARCH64_TCR_EL1_A1 0x400000U + +#define AARCH64_TCR_EL1_EPD1 0x800000U + +#define AARCH64_TCR_EL1_IRGN1( _val ) ( ( _val ) << 24 ) +#define AARCH64_TCR_EL1_IRGN1_SHIFT 24 +#define AARCH64_TCR_EL1_IRGN1_MASK 0x3000000U +#define AARCH64_TCR_EL1_IRGN1_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0x3U ) + +#define AARCH64_TCR_EL1_ORGN1( _val ) ( ( _val ) << 26 ) +#define AARCH64_TCR_EL1_ORGN1_SHIFT 26 +#define AARCH64_TCR_EL1_ORGN1_MASK 0xc000000U +#define AARCH64_TCR_EL1_ORGN1_GET( _reg ) \ + ( ( ( _reg ) >> 26 ) & 0x3U ) + +#define AARCH64_TCR_EL1_SH1( _val ) ( ( _val ) << 28 ) +#define AARCH64_TCR_EL1_SH1_SHIFT 28 +#define AARCH64_TCR_EL1_SH1_MASK 0x30000000U +#define AARCH64_TCR_EL1_SH1_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0x3U ) + +#define AARCH64_TCR_EL1_TG1( _val ) ( ( _val ) << 30 ) +#define AARCH64_TCR_EL1_TG1_SHIFT 30 +#define AARCH64_TCR_EL1_TG1_MASK 0xc0000000U +#define AARCH64_TCR_EL1_TG1_GET( _reg ) \ + ( ( ( _reg ) >> 30 ) & 0x3U ) + +#define AARCH64_TCR_EL1_IPS( _val ) ( ( _val ) << 32 ) +#define AARCH64_TCR_EL1_IPS_SHIFT 32 +#define AARCH64_TCR_EL1_IPS_MASK 0x700000000ULL +#define AARCH64_TCR_EL1_IPS_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0x7ULL ) + +#define AARCH64_TCR_EL1_AS 0x1000000000ULL + +#define AARCH64_TCR_EL1_TBI0 0x2000000000ULL + +#define AARCH64_TCR_EL1_TBI1 0x4000000000ULL + +#define AARCH64_TCR_EL1_HA 0x8000000000ULL + +#define AARCH64_TCR_EL1_HD 0x10000000000ULL + +#define AARCH64_TCR_EL1_HPD0 0x20000000000ULL + +#define AARCH64_TCR_EL1_HPD1 0x40000000000ULL + +#define AARCH64_TCR_EL1_HWU059 0x80000000000ULL + +#define AARCH64_TCR_EL1_HWU060 0x100000000000ULL + +#define AARCH64_TCR_EL1_HWU061 0x200000000000ULL + +#define AARCH64_TCR_EL1_HWU062 0x400000000000ULL + +#define AARCH64_TCR_EL1_HWU159 0x800000000000ULL + +#define AARCH64_TCR_EL1_HWU160 0x1000000000000ULL + +#define AARCH64_TCR_EL1_HWU161 0x2000000000000ULL + +#define AARCH64_TCR_EL1_HWU162 0x4000000000000ULL + +#define AARCH64_TCR_EL1_TBID0 0x8000000000000ULL + +#define AARCH64_TCR_EL1_TBID1 0x10000000000000ULL + +#define AARCH64_TCR_EL1_NFD0 0x20000000000000ULL + +#define AARCH64_TCR_EL1_NFD1 0x40000000000000ULL + +#define AARCH64_TCR_EL1_E0PD0 0x80000000000000ULL + +#define AARCH64_TCR_EL1_E0PD1 0x100000000000000ULL + +#define AARCH64_TCR_EL1_TCMA0 0x200000000000000ULL + +#define AARCH64_TCR_EL1_TCMA1 0x400000000000000ULL + +static inline uint64_t _AArch64_Read_tcr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tcr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TCR_EL2, Translation Control Register (EL2) */ + +#define AARCH64_TCR_EL2_T0SZ( _val ) ( ( _val ) << 0 ) +#define AARCH64_TCR_EL2_T0SZ_SHIFT 0 +#define AARCH64_TCR_EL2_T0SZ_MASK 0x3fU +#define AARCH64_TCR_EL2_T0SZ_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_TCR_EL2_EPD0 0x80U + +#define AARCH64_TCR_EL2_IRGN0( _val ) ( ( _val ) << 8 ) +#define AARCH64_TCR_EL2_IRGN0_SHIFT 8 +#define AARCH64_TCR_EL2_IRGN0_MASK 0x300U +#define AARCH64_TCR_EL2_IRGN0_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x3U ) + +#define AARCH64_TCR_EL2_ORGN0( _val ) ( ( _val ) << 10 ) +#define AARCH64_TCR_EL2_ORGN0_SHIFT 10 +#define AARCH64_TCR_EL2_ORGN0_MASK 0xc00U +#define AARCH64_TCR_EL2_ORGN0_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3U ) + +#define AARCH64_TCR_EL2_SH0( _val ) ( ( _val ) << 12 ) +#define AARCH64_TCR_EL2_SH0_SHIFT 12 +#define AARCH64_TCR_EL2_SH0_MASK 0x3000U +#define AARCH64_TCR_EL2_SH0_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x3U ) + +#define AARCH64_TCR_EL2_TG0( _val ) ( ( _val ) << 14 ) +#define AARCH64_TCR_EL2_TG0_SHIFT 14 +#define AARCH64_TCR_EL2_TG0_MASK 0xc000U +#define AARCH64_TCR_EL2_TG0_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_TCR_EL2_PS( _val ) ( ( _val ) << 16 ) +#define AARCH64_TCR_EL2_PS_SHIFT 16 +#define AARCH64_TCR_EL2_PS_MASK 0x70000U +#define AARCH64_TCR_EL2_PS_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x7U ) + +#define AARCH64_TCR_EL2_T1SZ( _val ) ( ( _val ) << 16 ) +#define AARCH64_TCR_EL2_T1SZ_SHIFT 16 +#define AARCH64_TCR_EL2_T1SZ_MASK 0x3f0000U +#define AARCH64_TCR_EL2_T1SZ_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x3fU ) + +#define AARCH64_TCR_EL2_TBI 0x100000U + +#define AARCH64_TCR_EL2_HA_0 0x200000U + +#define AARCH64_TCR_EL2_A1 0x400000U + +#define AARCH64_TCR_EL2_HD_0 0x400000U + +#define AARCH64_TCR_EL2_EPD1 0x800000U + +#define AARCH64_TCR_EL2_HPD 0x1000000U + +#define AARCH64_TCR_EL2_IRGN1( _val ) ( ( _val ) << 24 ) +#define AARCH64_TCR_EL2_IRGN1_SHIFT 24 +#define AARCH64_TCR_EL2_IRGN1_MASK 0x3000000U +#define AARCH64_TCR_EL2_IRGN1_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0x3U ) + +#define AARCH64_TCR_EL2_HWU59 0x2000000U + +#define AARCH64_TCR_EL2_HWU60 0x4000000U + +#define AARCH64_TCR_EL2_ORGN1( _val ) ( ( _val ) << 26 ) +#define AARCH64_TCR_EL2_ORGN1_SHIFT 26 +#define AARCH64_TCR_EL2_ORGN1_MASK 0xc000000U +#define AARCH64_TCR_EL2_ORGN1_GET( _reg ) \ + ( ( ( _reg ) >> 26 ) & 0x3U ) + +#define AARCH64_TCR_EL2_HWU61 0x8000000U + +#define AARCH64_TCR_EL2_HWU62 0x10000000U + +#define AARCH64_TCR_EL2_SH1( _val ) ( ( _val ) << 28 ) +#define AARCH64_TCR_EL2_SH1_SHIFT 28 +#define AARCH64_TCR_EL2_SH1_MASK 0x30000000U +#define AARCH64_TCR_EL2_SH1_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0x3U ) + +#define AARCH64_TCR_EL2_TBID 0x20000000U + +#define AARCH64_TCR_EL2_TCMA 0x40000000U + +#define AARCH64_TCR_EL2_TG1( _val ) ( ( _val ) << 30 ) +#define AARCH64_TCR_EL2_TG1_SHIFT 30 +#define AARCH64_TCR_EL2_TG1_MASK 0xc0000000U +#define AARCH64_TCR_EL2_TG1_GET( _reg ) \ + ( ( ( _reg ) >> 30 ) & 0x3U ) + +#define AARCH64_TCR_EL2_IPS( _val ) ( ( _val ) << 32 ) +#define AARCH64_TCR_EL2_IPS_SHIFT 32 +#define AARCH64_TCR_EL2_IPS_MASK 0x700000000ULL +#define AARCH64_TCR_EL2_IPS_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0x7ULL ) + +#define AARCH64_TCR_EL2_AS 0x1000000000ULL + +#define AARCH64_TCR_EL2_TBI0 0x2000000000ULL + +#define AARCH64_TCR_EL2_TBI1 0x4000000000ULL + +#define AARCH64_TCR_EL2_HA_1 0x8000000000ULL + +#define AARCH64_TCR_EL2_HD_1 0x10000000000ULL + +#define AARCH64_TCR_EL2_HPD0 0x20000000000ULL + +#define AARCH64_TCR_EL2_HPD1 0x40000000000ULL + +#define AARCH64_TCR_EL2_HWU059 0x80000000000ULL + +#define AARCH64_TCR_EL2_HWU060 0x100000000000ULL + +#define AARCH64_TCR_EL2_HWU061 0x200000000000ULL + +#define AARCH64_TCR_EL2_HWU062 0x400000000000ULL + +#define AARCH64_TCR_EL2_HWU159 0x800000000000ULL + +#define AARCH64_TCR_EL2_HWU160 0x1000000000000ULL + +#define AARCH64_TCR_EL2_HWU161 0x2000000000000ULL + +#define AARCH64_TCR_EL2_HWU162 0x4000000000000ULL + +#define AARCH64_TCR_EL2_TBID0 0x8000000000000ULL + +#define AARCH64_TCR_EL2_TBID1 0x10000000000000ULL + +#define AARCH64_TCR_EL2_NFD0 0x20000000000000ULL + +#define AARCH64_TCR_EL2_NFD1 0x40000000000000ULL + +#define AARCH64_TCR_EL2_E0PD0 0x80000000000000ULL + +#define AARCH64_TCR_EL2_E0PD1 0x100000000000000ULL + +#define AARCH64_TCR_EL2_TCMA0 0x200000000000000ULL + +#define AARCH64_TCR_EL2_TCMA1 0x400000000000000ULL + +static inline uint64_t _AArch64_Read_tcr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tcr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr TCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* TCR_EL3, Translation Control Register (EL3) */ + +#define AARCH64_TCR_EL3_T0SZ( _val ) ( ( _val ) << 0 ) +#define AARCH64_TCR_EL3_T0SZ_SHIFT 0 +#define AARCH64_TCR_EL3_T0SZ_MASK 0x3fU +#define AARCH64_TCR_EL3_T0SZ_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_TCR_EL3_IRGN0( _val ) ( ( _val ) << 8 ) +#define AARCH64_TCR_EL3_IRGN0_SHIFT 8 +#define AARCH64_TCR_EL3_IRGN0_MASK 0x300U +#define AARCH64_TCR_EL3_IRGN0_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x3U ) + +#define AARCH64_TCR_EL3_ORGN0( _val ) ( ( _val ) << 10 ) +#define AARCH64_TCR_EL3_ORGN0_SHIFT 10 +#define AARCH64_TCR_EL3_ORGN0_MASK 0xc00U +#define AARCH64_TCR_EL3_ORGN0_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3U ) + +#define AARCH64_TCR_EL3_SH0( _val ) ( ( _val ) << 12 ) +#define AARCH64_TCR_EL3_SH0_SHIFT 12 +#define AARCH64_TCR_EL3_SH0_MASK 0x3000U +#define AARCH64_TCR_EL3_SH0_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x3U ) + +#define AARCH64_TCR_EL3_TG0( _val ) ( ( _val ) << 14 ) +#define AARCH64_TCR_EL3_TG0_SHIFT 14 +#define AARCH64_TCR_EL3_TG0_MASK 0xc000U +#define AARCH64_TCR_EL3_TG0_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_TCR_EL3_PS( _val ) ( ( _val ) << 16 ) +#define AARCH64_TCR_EL3_PS_SHIFT 16 +#define AARCH64_TCR_EL3_PS_MASK 0x70000U +#define AARCH64_TCR_EL3_PS_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x7U ) + +#define AARCH64_TCR_EL3_TBI 0x100000U + +#define AARCH64_TCR_EL3_HA 0x200000U + +#define AARCH64_TCR_EL3_HD 0x400000U + +#define AARCH64_TCR_EL3_HPD 0x1000000U + +#define AARCH64_TCR_EL3_HWU59 0x2000000U + +#define AARCH64_TCR_EL3_HWU60 0x4000000U + +#define AARCH64_TCR_EL3_HWU61 0x8000000U + +#define AARCH64_TCR_EL3_HWU62 0x10000000U + +#define AARCH64_TCR_EL3_TBID 0x20000000U + +#define AARCH64_TCR_EL3_TCMA 0x40000000U + +static inline uint64_t _AArch64_Read_tcr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TCR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tcr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr TCR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* TFSRE0_EL1, Tag Fault Status Register (EL0). */ + +#define AARCH64_TFSRE0_EL1_TF0 0x1U + +#define AARCH64_TFSRE0_EL1_TF1 0x2U + +static inline uint64_t _AArch64_Read_tfsre0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TFSRE0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tfsre0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TFSRE0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TFSR_EL1, Tag Fault Status Register (EL1) */ + +#define AARCH64_TFSR_EL1_TF0 0x1U + +#define AARCH64_TFSR_EL1_TF1 0x2U + +static inline uint64_t _AArch64_Read_tfsr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TFSR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tfsr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TFSR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TFSR_EL2, Tag Fault Status Register (EL2) */ + +#define AARCH64_TFSR_EL2_TF0 0x1U + +#define AARCH64_TFSR_EL2_TF1 0x2U + +static inline uint64_t _AArch64_Read_tfsr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TFSR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tfsr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr TFSR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* TFSR_EL3, Tag Fault Status Register (EL3) */ + +#define AARCH64_TFSR_EL3_TF0 0x1U + +static inline uint64_t _AArch64_Read_tfsr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TFSR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tfsr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr TFSR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* TPIDR_EL0, EL0 Read/Write Software Thread ID Register */ + +static inline uint64_t _AArch64_Read_tpidr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TPIDR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tpidr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr TPIDR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* TPIDR_EL1, EL1 Software Thread ID Register */ + +static inline uint64_t _AArch64_Read_tpidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TPIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tpidr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TPIDR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TPIDR_EL2, EL2 Software Thread ID Register */ + +static inline uint64_t _AArch64_Read_tpidr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TPIDR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tpidr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr TPIDR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* TPIDR_EL3, EL3 Software Thread ID Register */ + +static inline uint64_t _AArch64_Read_tpidr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TPIDR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tpidr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr TPIDR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* TPIDRRO_EL0, EL0 Read-Only Software Thread ID Register */ + +static inline uint64_t _AArch64_Read_tpidrro_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TPIDRRO_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_tpidrro_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr TPIDRRO_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* TTBR0_EL1, Translation Table Base Register 0 (EL1) */ + +#define AARCH64_TTBR0_EL1_CNP 0x1U + +#define AARCH64_TTBR0_EL1_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_TTBR0_EL1_BADDR_SHIFT 1 +#define AARCH64_TTBR0_EL1_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_TTBR0_EL1_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +#define AARCH64_TTBR0_EL1_ASID( _val ) ( ( _val ) << 48 ) +#define AARCH64_TTBR0_EL1_ASID_SHIFT 48 +#define AARCH64_TTBR0_EL1_ASID_MASK 0xffff000000000000ULL +#define AARCH64_TTBR0_EL1_ASID_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xffffULL ) + +static inline uint64_t _AArch64_Read_ttbr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TTBR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_ttbr0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TTBR0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TTBR0_EL2, Translation Table Base Register 0 (EL2) */ + +#define AARCH64_TTBR0_EL2_CNP 0x1U + +#define AARCH64_TTBR0_EL2_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_TTBR0_EL2_BADDR_SHIFT 1 +#define AARCH64_TTBR0_EL2_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_TTBR0_EL2_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +#define AARCH64_TTBR0_EL2_ASID( _val ) ( ( _val ) << 48 ) +#define AARCH64_TTBR0_EL2_ASID_SHIFT 48 +#define AARCH64_TTBR0_EL2_ASID_MASK 0xffff000000000000ULL +#define AARCH64_TTBR0_EL2_ASID_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xffffULL ) + +static inline uint64_t _AArch64_Read_ttbr0_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TTBR0_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_ttbr0_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr TTBR0_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* TTBR0_EL3, Translation Table Base Register 0 (EL3) */ + +#define AARCH64_TTBR0_EL3_CNP 0x1U + +#define AARCH64_TTBR0_EL3_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_TTBR0_EL3_BADDR_SHIFT 1 +#define AARCH64_TTBR0_EL3_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_TTBR0_EL3_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +static inline uint64_t _AArch64_Read_ttbr0_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TTBR0_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_ttbr0_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr TTBR0_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* TTBR1_EL1, Translation Table Base Register 1 (EL1) */ + +#define AARCH64_TTBR1_EL1_CNP 0x1U + +#define AARCH64_TTBR1_EL1_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_TTBR1_EL1_BADDR_SHIFT 1 +#define AARCH64_TTBR1_EL1_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_TTBR1_EL1_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +#define AARCH64_TTBR1_EL1_ASID( _val ) ( ( _val ) << 48 ) +#define AARCH64_TTBR1_EL1_ASID_SHIFT 48 +#define AARCH64_TTBR1_EL1_ASID_MASK 0xffff000000000000ULL +#define AARCH64_TTBR1_EL1_ASID_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xffffULL ) + +static inline uint64_t _AArch64_Read_ttbr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TTBR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_ttbr1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TTBR1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TTBR1_EL2, Translation Table Base Register 1 (EL2) */ + +#define AARCH64_TTBR1_EL2_CNP 0x1U + +#define AARCH64_TTBR1_EL2_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_TTBR1_EL2_BADDR_SHIFT 1 +#define AARCH64_TTBR1_EL2_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_TTBR1_EL2_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +#define AARCH64_TTBR1_EL2_ASID( _val ) ( ( _val ) << 48 ) +#define AARCH64_TTBR1_EL2_ASID_SHIFT 48 +#define AARCH64_TTBR1_EL2_ASID_MASK 0xffff000000000000ULL +#define AARCH64_TTBR1_EL2_ASID_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xffffULL ) + +static inline uint64_t _AArch64_Read_ttbr1_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TTBR1_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_ttbr1_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr TTBR1_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VBAR_EL1, Vector Base Address Register (EL1) */ + +static inline uint64_t _AArch64_Read_vbar_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VBAR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vbar_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr VBAR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* VBAR_EL2, Vector Base Address Register (EL2) */ + +static inline uint64_t _AArch64_Read_vbar_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VBAR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vbar_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VBAR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VBAR_EL3, Vector Base Address Register (EL3) */ + +static inline uint64_t _AArch64_Read_vbar_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VBAR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vbar_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr VBAR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* VMPIDR_EL2, Virtualization Multiprocessor ID Register */ + +#define AARCH64_VMPIDR_EL2_AFF0( _val ) ( ( _val ) << 0 ) +#define AARCH64_VMPIDR_EL2_AFF0_SHIFT 0 +#define AARCH64_VMPIDR_EL2_AFF0_MASK 0xffU +#define AARCH64_VMPIDR_EL2_AFF0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +#define AARCH64_VMPIDR_EL2_AFF1( _val ) ( ( _val ) << 8 ) +#define AARCH64_VMPIDR_EL2_AFF1_SHIFT 8 +#define AARCH64_VMPIDR_EL2_AFF1_MASK 0xff00U +#define AARCH64_VMPIDR_EL2_AFF1_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xffU ) + +#define AARCH64_VMPIDR_EL2_AFF2( _val ) ( ( _val ) << 16 ) +#define AARCH64_VMPIDR_EL2_AFF2_SHIFT 16 +#define AARCH64_VMPIDR_EL2_AFF2_MASK 0xff0000U +#define AARCH64_VMPIDR_EL2_AFF2_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xffU ) + +#define AARCH64_VMPIDR_EL2_MT 0x1000000U + +#define AARCH64_VMPIDR_EL2_U 0x40000000U + +#define AARCH64_VMPIDR_EL2_AFF3( _val ) ( ( _val ) << 32 ) +#define AARCH64_VMPIDR_EL2_AFF3_SHIFT 32 +#define AARCH64_VMPIDR_EL2_AFF3_MASK 0xff00000000ULL +#define AARCH64_VMPIDR_EL2_AFF3_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xffULL ) + +static inline uint64_t _AArch64_Read_vmpidr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VMPIDR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vmpidr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VMPIDR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VNCR_EL2, Virtual Nested Control Register */ + +#define AARCH64_VNCR_EL2_BADDR( _val ) ( ( _val ) << 12 ) +#define AARCH64_VNCR_EL2_BADDR_SHIFT 12 +#define AARCH64_VNCR_EL2_BADDR_MASK 0x1ffffffffff000ULL +#define AARCH64_VNCR_EL2_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x1ffffffffffULL ) + +#define AARCH64_VNCR_EL2_RESS( _val ) ( ( _val ) << 53 ) +#define AARCH64_VNCR_EL2_RESS_SHIFT 53 +#define AARCH64_VNCR_EL2_RESS_MASK 0xffe0000000000000ULL +#define AARCH64_VNCR_EL2_RESS_GET( _reg ) \ + ( ( ( _reg ) >> 53 ) & 0x7ffULL ) + +static inline uint64_t _AArch64_Read_vncr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VNCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vncr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VNCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VPIDR_EL2, Virtualization Processor ID Register */ + +#define AARCH64_VPIDR_EL2_REVISION( _val ) ( ( _val ) << 0 ) +#define AARCH64_VPIDR_EL2_REVISION_SHIFT 0 +#define AARCH64_VPIDR_EL2_REVISION_MASK 0xfU +#define AARCH64_VPIDR_EL2_REVISION_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_VPIDR_EL2_PARTNUM( _val ) ( ( _val ) << 4 ) +#define AARCH64_VPIDR_EL2_PARTNUM_SHIFT 4 +#define AARCH64_VPIDR_EL2_PARTNUM_MASK 0xfff0U +#define AARCH64_VPIDR_EL2_PARTNUM_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfffU ) + +#define AARCH64_VPIDR_EL2_ARCHITECTURE( _val ) ( ( _val ) << 16 ) +#define AARCH64_VPIDR_EL2_ARCHITECTURE_SHIFT 16 +#define AARCH64_VPIDR_EL2_ARCHITECTURE_MASK 0xf0000U +#define AARCH64_VPIDR_EL2_ARCHITECTURE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_VPIDR_EL2_VARIANT( _val ) ( ( _val ) << 20 ) +#define AARCH64_VPIDR_EL2_VARIANT_SHIFT 20 +#define AARCH64_VPIDR_EL2_VARIANT_MASK 0xf00000U +#define AARCH64_VPIDR_EL2_VARIANT_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +#define AARCH64_VPIDR_EL2_IMPLEMENTER( _val ) ( ( _val ) << 24 ) +#define AARCH64_VPIDR_EL2_IMPLEMENTER_SHIFT 24 +#define AARCH64_VPIDR_EL2_IMPLEMENTER_MASK 0xff000000U +#define AARCH64_VPIDR_EL2_IMPLEMENTER_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_vpidr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VPIDR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vpidr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VPIDR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VSTCR_EL2, Virtualization Secure Translation Control Register */ + +#define AARCH64_VSTCR_EL2_T0SZ( _val ) ( ( _val ) << 0 ) +#define AARCH64_VSTCR_EL2_T0SZ_SHIFT 0 +#define AARCH64_VSTCR_EL2_T0SZ_MASK 0x3fU +#define AARCH64_VSTCR_EL2_T0SZ_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_VSTCR_EL2_SL0( _val ) ( ( _val ) << 6 ) +#define AARCH64_VSTCR_EL2_SL0_SHIFT 6 +#define AARCH64_VSTCR_EL2_SL0_MASK 0xc0U +#define AARCH64_VSTCR_EL2_SL0_GET( _reg ) \ + ( ( ( _reg ) >> 6 ) & 0x3U ) + +#define AARCH64_VSTCR_EL2_TG0( _val ) ( ( _val ) << 14 ) +#define AARCH64_VSTCR_EL2_TG0_SHIFT 14 +#define AARCH64_VSTCR_EL2_TG0_MASK 0xc000U +#define AARCH64_VSTCR_EL2_TG0_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_VSTCR_EL2_SW 0x20000000U + +#define AARCH64_VSTCR_EL2_SA 0x40000000U + +static inline uint64_t _AArch64_Read_vstcr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VSTCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vstcr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VSTCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VSTTBR_EL2, Virtualization Secure Translation Table Base Register */ + +#define AARCH64_VSTTBR_EL2_CNP 0x1U + +#define AARCH64_VSTTBR_EL2_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_VSTTBR_EL2_BADDR_SHIFT 1 +#define AARCH64_VSTTBR_EL2_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_VSTTBR_EL2_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +static inline uint64_t _AArch64_Read_vsttbr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VSTTBR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vsttbr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VSTTBR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VTCR_EL2, Virtualization Translation Control Register */ + +#define AARCH64_VTCR_EL2_T0SZ( _val ) ( ( _val ) << 0 ) +#define AARCH64_VTCR_EL2_T0SZ_SHIFT 0 +#define AARCH64_VTCR_EL2_T0SZ_MASK 0x3fU +#define AARCH64_VTCR_EL2_T0SZ_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_VTCR_EL2_SL0( _val ) ( ( _val ) << 6 ) +#define AARCH64_VTCR_EL2_SL0_SHIFT 6 +#define AARCH64_VTCR_EL2_SL0_MASK 0xc0U +#define AARCH64_VTCR_EL2_SL0_GET( _reg ) \ + ( ( ( _reg ) >> 6 ) & 0x3U ) + +#define AARCH64_VTCR_EL2_IRGN0( _val ) ( ( _val ) << 8 ) +#define AARCH64_VTCR_EL2_IRGN0_SHIFT 8 +#define AARCH64_VTCR_EL2_IRGN0_MASK 0x300U +#define AARCH64_VTCR_EL2_IRGN0_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x3U ) + +#define AARCH64_VTCR_EL2_ORGN0( _val ) ( ( _val ) << 10 ) +#define AARCH64_VTCR_EL2_ORGN0_SHIFT 10 +#define AARCH64_VTCR_EL2_ORGN0_MASK 0xc00U +#define AARCH64_VTCR_EL2_ORGN0_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3U ) + +#define AARCH64_VTCR_EL2_SH0( _val ) ( ( _val ) << 12 ) +#define AARCH64_VTCR_EL2_SH0_SHIFT 12 +#define AARCH64_VTCR_EL2_SH0_MASK 0x3000U +#define AARCH64_VTCR_EL2_SH0_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x3U ) + +#define AARCH64_VTCR_EL2_TG0( _val ) ( ( _val ) << 14 ) +#define AARCH64_VTCR_EL2_TG0_SHIFT 14 +#define AARCH64_VTCR_EL2_TG0_MASK 0xc000U +#define AARCH64_VTCR_EL2_TG0_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_VTCR_EL2_PS( _val ) ( ( _val ) << 16 ) +#define AARCH64_VTCR_EL2_PS_SHIFT 16 +#define AARCH64_VTCR_EL2_PS_MASK 0x70000U +#define AARCH64_VTCR_EL2_PS_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0x7U ) + +#define AARCH64_VTCR_EL2_VS 0x80000U + +#define AARCH64_VTCR_EL2_HA 0x200000U + +#define AARCH64_VTCR_EL2_HD 0x400000U + +#define AARCH64_VTCR_EL2_HWU59 0x2000000U + +#define AARCH64_VTCR_EL2_HWU60 0x4000000U + +#define AARCH64_VTCR_EL2_HWU61 0x8000000U + +#define AARCH64_VTCR_EL2_HWU62 0x10000000U + +#define AARCH64_VTCR_EL2_NSW 0x20000000U + +#define AARCH64_VTCR_EL2_NSA 0x40000000U + +static inline uint64_t _AArch64_Read_vtcr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VTCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vtcr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VTCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VTTBR_EL2, Virtualization Translation Table Base Register */ + +#define AARCH64_VTTBR_EL2_CNP 0x1U + +#define AARCH64_VTTBR_EL2_BADDR( _val ) ( ( _val ) << 1 ) +#define AARCH64_VTTBR_EL2_BADDR_SHIFT 1 +#define AARCH64_VTTBR_EL2_BADDR_MASK 0xfffffffffffeULL +#define AARCH64_VTTBR_EL2_BADDR_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL ) + +#define AARCH64_VTTBR_EL2_VMID_7_0( _val ) ( ( _val ) << 48 ) +#define AARCH64_VTTBR_EL2_VMID_7_0_SHIFT 48 +#define AARCH64_VTTBR_EL2_VMID_7_0_MASK 0xff000000000000ULL +#define AARCH64_VTTBR_EL2_VMID_7_0_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xffULL ) + +#define AARCH64_VTTBR_EL2_VMID_15_8( _val ) ( ( _val ) << 56 ) +#define AARCH64_VTTBR_EL2_VMID_15_8_SHIFT 56 +#define AARCH64_VTTBR_EL2_VMID_15_8_MASK 0xff00000000000000ULL +#define AARCH64_VTTBR_EL2_VMID_15_8_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xffULL ) + +static inline uint64_t _AArch64_Read_vttbr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VTTBR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vttbr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VTTBR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGAUTHSTATUS_EL1, Debug Authentication Status Register */ + +#define AARCH64_DBGAUTHSTATUS_EL1_NSID( _val ) ( ( _val ) << 0 ) +#define AARCH64_DBGAUTHSTATUS_EL1_NSID_SHIFT 0 +#define AARCH64_DBGAUTHSTATUS_EL1_NSID_MASK 0x3U +#define AARCH64_DBGAUTHSTATUS_EL1_NSID_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3U ) + +#define AARCH64_DBGAUTHSTATUS_EL1_NSNID( _val ) ( ( _val ) << 2 ) +#define AARCH64_DBGAUTHSTATUS_EL1_NSNID_SHIFT 2 +#define AARCH64_DBGAUTHSTATUS_EL1_NSNID_MASK 0xcU +#define AARCH64_DBGAUTHSTATUS_EL1_NSNID_GET( _reg ) \ + ( ( ( _reg ) >> 2 ) & 0x3U ) + +#define AARCH64_DBGAUTHSTATUS_EL1_SID( _val ) ( ( _val ) << 4 ) +#define AARCH64_DBGAUTHSTATUS_EL1_SID_SHIFT 4 +#define AARCH64_DBGAUTHSTATUS_EL1_SID_MASK 0x30U +#define AARCH64_DBGAUTHSTATUS_EL1_SID_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0x3U ) + +#define AARCH64_DBGAUTHSTATUS_EL1_SNID( _val ) ( ( _val ) << 6 ) +#define AARCH64_DBGAUTHSTATUS_EL1_SNID_SHIFT 6 +#define AARCH64_DBGAUTHSTATUS_EL1_SNID_MASK 0xc0U +#define AARCH64_DBGAUTHSTATUS_EL1_SNID_GET( _reg ) \ + ( ( ( _reg ) >> 6 ) & 0x3U ) + +static inline uint64_t _AArch64_Read_dbgauthstatus_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGAUTHSTATUS_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* DBGBCR_N_EL1, Debug Breakpoint Control Registers, n = 0 - 15 */ + +#define AARCH64_DBGBCR_N_EL1_E 0x1U + +#define AARCH64_DBGBCR_N_EL1_PMC( _val ) ( ( _val ) << 1 ) +#define AARCH64_DBGBCR_N_EL1_PMC_SHIFT 1 +#define AARCH64_DBGBCR_N_EL1_PMC_MASK 0x6U +#define AARCH64_DBGBCR_N_EL1_PMC_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x3U ) + +#define AARCH64_DBGBCR_N_EL1_BAS( _val ) ( ( _val ) << 5 ) +#define AARCH64_DBGBCR_N_EL1_BAS_SHIFT 5 +#define AARCH64_DBGBCR_N_EL1_BAS_MASK 0x1e0U +#define AARCH64_DBGBCR_N_EL1_BAS_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0xfU ) + +#define AARCH64_DBGBCR_N_EL1_HMC 0x2000U + +#define AARCH64_DBGBCR_N_EL1_SSC( _val ) ( ( _val ) << 14 ) +#define AARCH64_DBGBCR_N_EL1_SSC_SHIFT 14 +#define AARCH64_DBGBCR_N_EL1_SSC_MASK 0xc000U +#define AARCH64_DBGBCR_N_EL1_SSC_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_DBGBCR_N_EL1_LBN( _val ) ( ( _val ) << 16 ) +#define AARCH64_DBGBCR_N_EL1_LBN_SHIFT 16 +#define AARCH64_DBGBCR_N_EL1_LBN_MASK 0xf0000U +#define AARCH64_DBGBCR_N_EL1_LBN_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_DBGBCR_N_EL1_BT( _val ) ( ( _val ) << 20 ) +#define AARCH64_DBGBCR_N_EL1_BT_SHIFT 20 +#define AARCH64_DBGBCR_N_EL1_BT_MASK 0xf00000U +#define AARCH64_DBGBCR_N_EL1_BT_GET( _reg ) \ + ( ( ( _reg ) >> 20 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_dbgbcr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr2_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR2_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr3_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR3_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr4_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR4_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr4_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR4_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr5_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR5_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr5_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR5_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr6_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR6_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr6_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR6_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr7_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR7_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr7_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR7_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr8_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR8_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr8_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR8_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr9_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR9_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr9_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR9_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr10_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR10_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr10_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR10_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr11_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR11_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr11_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR11_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr12_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR12_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr12_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR12_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr13_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR13_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr13_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR13_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr14_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR14_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr14_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR14_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbcr15_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBCR15_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbcr15_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBCR15_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGBVR_N_EL1, Debug Breakpoint Value Registers, n = 0 - 15 */ + +#define AARCH64_DBGBVR_N_EL1_CONTEXTID( _val ) ( ( _val ) << 0 ) +#define AARCH64_DBGBVR_N_EL1_CONTEXTID_SHIFT 0 +#define AARCH64_DBGBVR_N_EL1_CONTEXTID_MASK 0xffffffffU +#define AARCH64_DBGBVR_N_EL1_CONTEXTID_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +#define AARCH64_DBGBVR_N_EL1_VA_48_2( _val ) ( ( _val ) << 2 ) +#define AARCH64_DBGBVR_N_EL1_VA_48_2_SHIFT 2 +#define AARCH64_DBGBVR_N_EL1_VA_48_2_MASK 0x1fffffffffffcULL +#define AARCH64_DBGBVR_N_EL1_VA_48_2_GET( _reg ) \ + ( ( ( _reg ) >> 2 ) & 0x7fffffffffffULL ) + +#define AARCH64_DBGBVR_N_EL1_VMID_7_0( _val ) ( ( _val ) << 32 ) +#define AARCH64_DBGBVR_N_EL1_VMID_7_0_SHIFT 32 +#define AARCH64_DBGBVR_N_EL1_VMID_7_0_MASK 0xff00000000ULL +#define AARCH64_DBGBVR_N_EL1_VMID_7_0_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xffULL ) + +#define AARCH64_DBGBVR_N_EL1_CONTEXTID2( _val ) ( ( _val ) << 32 ) +#define AARCH64_DBGBVR_N_EL1_CONTEXTID2_SHIFT 32 +#define AARCH64_DBGBVR_N_EL1_CONTEXTID2_MASK 0xffffffff00000000ULL +#define AARCH64_DBGBVR_N_EL1_CONTEXTID2_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xffffffffULL ) + +#define AARCH64_DBGBVR_N_EL1_VMID_15_8( _val ) ( ( _val ) << 40 ) +#define AARCH64_DBGBVR_N_EL1_VMID_15_8_SHIFT 40 +#define AARCH64_DBGBVR_N_EL1_VMID_15_8_MASK 0xff0000000000ULL +#define AARCH64_DBGBVR_N_EL1_VMID_15_8_GET( _reg ) \ + ( ( ( _reg ) >> 40 ) & 0xffULL ) + +#define AARCH64_DBGBVR_N_EL1_VA_52_49( _val ) ( ( _val ) << 49 ) +#define AARCH64_DBGBVR_N_EL1_VA_52_49_SHIFT 49 +#define AARCH64_DBGBVR_N_EL1_VA_52_49_MASK 0x1e000000000000ULL +#define AARCH64_DBGBVR_N_EL1_VA_52_49_GET( _reg ) \ + ( ( ( _reg ) >> 49 ) & 0xfULL ) + +#define AARCH64_DBGBVR_N_EL1_RESS_14_4( _val ) ( ( _val ) << 53 ) +#define AARCH64_DBGBVR_N_EL1_RESS_14_4_SHIFT 53 +#define AARCH64_DBGBVR_N_EL1_RESS_14_4_MASK 0xffe0000000000000ULL +#define AARCH64_DBGBVR_N_EL1_RESS_14_4_GET( _reg ) \ + ( ( ( _reg ) >> 53 ) & 0x7ffULL ) + +static inline uint64_t _AArch64_Read_dbgbvr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr2_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR2_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr3_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR3_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr4_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR4_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr4_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR4_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr5_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR5_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr5_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR5_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr6_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR6_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr6_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR6_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr7_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR7_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr7_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR7_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr8_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR8_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr8_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR8_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr9_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR9_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr9_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR9_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr10_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR10_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr10_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR10_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr11_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR11_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr11_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR11_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr12_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR12_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr12_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR12_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr13_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR13_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr13_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR13_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr14_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR14_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr14_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR14_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgbvr15_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGBVR15_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgbvr15_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGBVR15_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGCLAIMCLR_EL1, Debug CLAIM Tag Clear Register */ + +#define AARCH64_DBGCLAIMCLR_EL1_CLAIM( _val ) ( ( _val ) << 0 ) +#define AARCH64_DBGCLAIMCLR_EL1_CLAIM_SHIFT 0 +#define AARCH64_DBGCLAIMCLR_EL1_CLAIM_MASK 0xffU +#define AARCH64_DBGCLAIMCLR_EL1_CLAIM_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_dbgclaimclr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGCLAIMCLR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgclaimclr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGCLAIMCLR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGCLAIMSET_EL1, Debug CLAIM Tag Set Register */ + +#define AARCH64_DBGCLAIMSET_EL1_CLAIM( _val ) ( ( _val ) << 0 ) +#define AARCH64_DBGCLAIMSET_EL1_CLAIM_SHIFT 0 +#define AARCH64_DBGCLAIMSET_EL1_CLAIM_MASK 0xffU +#define AARCH64_DBGCLAIMSET_EL1_CLAIM_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_dbgclaimset_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGCLAIMSET_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgclaimset_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGCLAIMSET_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGDTR_EL0, Debug Data Transfer Register, half-duplex */ + +#define AARCH64_DBGDTR_EL0_LOWWORD( _val ) ( ( _val ) << 0 ) +#define AARCH64_DBGDTR_EL0_LOWWORD_SHIFT 0 +#define AARCH64_DBGDTR_EL0_LOWWORD_MASK 0xffffffffU +#define AARCH64_DBGDTR_EL0_LOWWORD_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +#define AARCH64_DBGDTR_EL0_HIGHWORD( _val ) ( ( _val ) << 32 ) +#define AARCH64_DBGDTR_EL0_HIGHWORD_SHIFT 32 +#define AARCH64_DBGDTR_EL0_HIGHWORD_MASK 0xffffffff00000000ULL +#define AARCH64_DBGDTR_EL0_HIGHWORD_GET( _reg ) \ + ( ( ( _reg ) >> 32 ) & 0xffffffffULL ) + +static inline uint64_t _AArch64_Read_dbgdtr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGDTR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgdtr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGDTR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGDTRRX_EL0, Debug Data Transfer Register, Receive */ + +static inline uint64_t _AArch64_Read_dbgdtrrx_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGDTRRX_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* DBGDTRTX_EL0, Debug Data Transfer Register, Transmit */ + +static inline void _AArch64_Write_dbgdtrtx_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGDTRTX_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGPRCR_EL1, Debug Power Control Register */ + +#define AARCH64_DBGPRCR_EL1_CORENPDRQ 0x1U + +static inline uint64_t _AArch64_Read_dbgprcr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGPRCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgprcr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGPRCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGVCR32_EL2, Debug Vector Catch Register */ + +#define AARCH64_DBGVCR32_EL2_SU 0x2U + +#define AARCH64_DBGVCR32_EL2_U 0x2U + +#define AARCH64_DBGVCR32_EL2_S 0x4U + +#define AARCH64_DBGVCR32_EL2_SS 0x4U + +#define AARCH64_DBGVCR32_EL2_P 0x8U + +#define AARCH64_DBGVCR32_EL2_SP 0x8U + +#define AARCH64_DBGVCR32_EL2_D 0x10U + +#define AARCH64_DBGVCR32_EL2_SD 0x10U + +#define AARCH64_DBGVCR32_EL2_I 0x40U + +#define AARCH64_DBGVCR32_EL2_SI 0x40U + +#define AARCH64_DBGVCR32_EL2_F 0x80U + +#define AARCH64_DBGVCR32_EL2_SF 0x80U + +#define AARCH64_DBGVCR32_EL2_NSU 0x2000000U + +#define AARCH64_DBGVCR32_EL2_NSS 0x4000000U + +#define AARCH64_DBGVCR32_EL2_NSP 0x8000000U + +#define AARCH64_DBGVCR32_EL2_NSD 0x10000000U + +#define AARCH64_DBGVCR32_EL2_NSI 0x40000000U + +#define AARCH64_DBGVCR32_EL2_NSF 0x80000000U + +static inline uint64_t _AArch64_Read_dbgvcr32_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGVCR32_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgvcr32_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGVCR32_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGWCR_N_EL1, Debug Watchpoint Control Registers, n = 0 - 15 */ + +#define AARCH64_DBGWCR_N_EL1_E 0x1U + +#define AARCH64_DBGWCR_N_EL1_PAC( _val ) ( ( _val ) << 1 ) +#define AARCH64_DBGWCR_N_EL1_PAC_SHIFT 1 +#define AARCH64_DBGWCR_N_EL1_PAC_MASK 0x6U +#define AARCH64_DBGWCR_N_EL1_PAC_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x3U ) + +#define AARCH64_DBGWCR_N_EL1_LSC( _val ) ( ( _val ) << 3 ) +#define AARCH64_DBGWCR_N_EL1_LSC_SHIFT 3 +#define AARCH64_DBGWCR_N_EL1_LSC_MASK 0x18U +#define AARCH64_DBGWCR_N_EL1_LSC_GET( _reg ) \ + ( ( ( _reg ) >> 3 ) & 0x3U ) + +#define AARCH64_DBGWCR_N_EL1_BAS( _val ) ( ( _val ) << 5 ) +#define AARCH64_DBGWCR_N_EL1_BAS_SHIFT 5 +#define AARCH64_DBGWCR_N_EL1_BAS_MASK 0x1fe0U +#define AARCH64_DBGWCR_N_EL1_BAS_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0xffU ) + +#define AARCH64_DBGWCR_N_EL1_HMC 0x2000U + +#define AARCH64_DBGWCR_N_EL1_SSC( _val ) ( ( _val ) << 14 ) +#define AARCH64_DBGWCR_N_EL1_SSC_SHIFT 14 +#define AARCH64_DBGWCR_N_EL1_SSC_MASK 0xc000U +#define AARCH64_DBGWCR_N_EL1_SSC_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_DBGWCR_N_EL1_LBN( _val ) ( ( _val ) << 16 ) +#define AARCH64_DBGWCR_N_EL1_LBN_SHIFT 16 +#define AARCH64_DBGWCR_N_EL1_LBN_MASK 0xf0000U +#define AARCH64_DBGWCR_N_EL1_LBN_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_DBGWCR_N_EL1_WT 0x100000U + +#define AARCH64_DBGWCR_N_EL1_MASK( _val ) ( ( _val ) << 24 ) +#define AARCH64_DBGWCR_N_EL1_MASK_SHIFT 24 +#define AARCH64_DBGWCR_N_EL1_MASK_MASK 0x1f000000U +#define AARCH64_DBGWCR_N_EL1_MASK_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0x1fU ) + +static inline uint64_t _AArch64_Read_dbgwcr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr2_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR2_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr3_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR3_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr4_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR4_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr4_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR4_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr5_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR5_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr5_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR5_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr6_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR6_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr6_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR6_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr7_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR7_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr7_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR7_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr8_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR8_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr8_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR8_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr9_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR9_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr9_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR9_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr10_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR10_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr10_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR10_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr11_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR11_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr11_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR11_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr12_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR12_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr12_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR12_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr13_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR13_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr13_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR13_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr14_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR14_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr14_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR14_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwcr15_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWCR15_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwcr15_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWCR15_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DBGWVR_N_EL1, Debug Watchpoint Value Registers, n = 0 - 15 */ + +#define AARCH64_DBGWVR_N_EL1_VA_48_2( _val ) ( ( _val ) << 2 ) +#define AARCH64_DBGWVR_N_EL1_VA_48_2_SHIFT 2 +#define AARCH64_DBGWVR_N_EL1_VA_48_2_MASK 0x1fffffffffffcULL +#define AARCH64_DBGWVR_N_EL1_VA_48_2_GET( _reg ) \ + ( ( ( _reg ) >> 2 ) & 0x7fffffffffffULL ) + +#define AARCH64_DBGWVR_N_EL1_VA_52_49( _val ) ( ( _val ) << 49 ) +#define AARCH64_DBGWVR_N_EL1_VA_52_49_SHIFT 49 +#define AARCH64_DBGWVR_N_EL1_VA_52_49_MASK 0x1e000000000000ULL +#define AARCH64_DBGWVR_N_EL1_VA_52_49_GET( _reg ) \ + ( ( ( _reg ) >> 49 ) & 0xfULL ) + +#define AARCH64_DBGWVR_N_EL1_RESS_14_4( _val ) ( ( _val ) << 53 ) +#define AARCH64_DBGWVR_N_EL1_RESS_14_4_SHIFT 53 +#define AARCH64_DBGWVR_N_EL1_RESS_14_4_MASK 0xffe0000000000000ULL +#define AARCH64_DBGWVR_N_EL1_RESS_14_4_GET( _reg ) \ + ( ( ( _reg ) >> 53 ) & 0x7ffULL ) + +static inline uint64_t _AArch64_Read_dbgwvr0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr2_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR2_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr3_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR3_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr4_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR4_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr4_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR4_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr5_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR5_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr5_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR5_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr6_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR6_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr6_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR6_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr7_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR7_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr7_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR7_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr8_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR8_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr8_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR8_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr9_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR9_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr9_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR9_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr10_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR10_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr10_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR10_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr11_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR11_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr11_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR11_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr12_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR12_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr12_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR12_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr13_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR13_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr13_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR13_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr14_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR14_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr14_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR14_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +static inline uint64_t _AArch64_Read_dbgwvr15_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DBGWVR15_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dbgwvr15_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DBGWVR15_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DLR_EL0, Debug Link Register */ + +static inline uint64_t _AArch64_Read_dlr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DLR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dlr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr DLR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* DSPSR_EL0, Debug Saved Program Status Register */ + +#define AARCH64_DSPSR_EL0_M_3_0( _val ) ( ( _val ) << 0 ) +#define AARCH64_DSPSR_EL0_M_3_0_SHIFT 0 +#define AARCH64_DSPSR_EL0_M_3_0_MASK 0xfU +#define AARCH64_DSPSR_EL0_M_3_0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_DSPSR_EL0_M_4 0x10U + +#define AARCH64_DSPSR_EL0_T 0x20U + +#define AARCH64_DSPSR_EL0_F 0x40U + +#define AARCH64_DSPSR_EL0_I 0x80U + +#define AARCH64_DSPSR_EL0_A 0x100U + +#define AARCH64_DSPSR_EL0_D 0x200U + +#define AARCH64_DSPSR_EL0_E 0x200U + +#define AARCH64_DSPSR_EL0_BTYPE( _val ) ( ( _val ) << 10 ) +#define AARCH64_DSPSR_EL0_BTYPE_SHIFT 10 +#define AARCH64_DSPSR_EL0_BTYPE_MASK 0xc00U +#define AARCH64_DSPSR_EL0_BTYPE_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3U ) + +#define AARCH64_DSPSR_EL0_IT_7_2( _val ) ( ( _val ) << 10 ) +#define AARCH64_DSPSR_EL0_IT_7_2_SHIFT 10 +#define AARCH64_DSPSR_EL0_IT_7_2_MASK 0xfc00U +#define AARCH64_DSPSR_EL0_IT_7_2_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3fU ) + +#define AARCH64_DSPSR_EL0_SSBS_0 0x1000U + +#define AARCH64_DSPSR_EL0_GE( _val ) ( ( _val ) << 16 ) +#define AARCH64_DSPSR_EL0_GE_SHIFT 16 +#define AARCH64_DSPSR_EL0_GE_MASK 0xf0000U +#define AARCH64_DSPSR_EL0_GE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +#define AARCH64_DSPSR_EL0_IL 0x100000U + +#define AARCH64_DSPSR_EL0_SS 0x200000U + +#define AARCH64_DSPSR_EL0_PAN 0x400000U + +#define AARCH64_DSPSR_EL0_SSBS_1 0x800000U + +#define AARCH64_DSPSR_EL0_UAO 0x800000U + +#define AARCH64_DSPSR_EL0_DIT 0x1000000U + +#define AARCH64_DSPSR_EL0_TCO 0x2000000U + +#define AARCH64_DSPSR_EL0_IT_1_0( _val ) ( ( _val ) << 25 ) +#define AARCH64_DSPSR_EL0_IT_1_0_SHIFT 25 +#define AARCH64_DSPSR_EL0_IT_1_0_MASK 0x6000000U +#define AARCH64_DSPSR_EL0_IT_1_0_GET( _reg ) \ + ( ( ( _reg ) >> 25 ) & 0x3U ) + +#define AARCH64_DSPSR_EL0_Q 0x8000000U + +#define AARCH64_DSPSR_EL0_V 0x10000000U + +#define AARCH64_DSPSR_EL0_C 0x20000000U + +#define AARCH64_DSPSR_EL0_Z 0x40000000U + +#define AARCH64_DSPSR_EL0_N 0x80000000U + +static inline uint64_t _AArch64_Read_dspsr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DSPSR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_dspsr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr DSPSR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* MDCCINT_EL1, Monitor DCC Interrupt Enable Register */ + +#define AARCH64_MDCCINT_EL1_TX 0x20000000U + +#define AARCH64_MDCCINT_EL1_RX 0x40000000U + +static inline uint64_t _AArch64_Read_mdccint_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MDCCINT_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mdccint_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr MDCCINT_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* MDCCSR_EL0, Monitor DCC Status Register */ + +#define AARCH64_MDCCSR_EL0_TXFULL 0x20000000U + +#define AARCH64_MDCCSR_EL0_RXFULL 0x40000000U + +static inline uint64_t _AArch64_Read_mdccsr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MDCCSR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* MDCR_EL2, Monitor Debug Configuration Register (EL2) */ + +#define AARCH64_MDCR_EL2_HPMN( _val ) ( ( _val ) << 0 ) +#define AARCH64_MDCR_EL2_HPMN_SHIFT 0 +#define AARCH64_MDCR_EL2_HPMN_MASK 0x1fU +#define AARCH64_MDCR_EL2_HPMN_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x1fU ) + +#define AARCH64_MDCR_EL2_TPMCR 0x20U + +#define AARCH64_MDCR_EL2_TPM 0x40U + +#define AARCH64_MDCR_EL2_HPME 0x80U + +#define AARCH64_MDCR_EL2_TDE 0x100U + +#define AARCH64_MDCR_EL2_TDA 0x200U + +#define AARCH64_MDCR_EL2_TDOSA 0x400U + +#define AARCH64_MDCR_EL2_TDRA 0x800U + +#define AARCH64_MDCR_EL2_E2PB( _val ) ( ( _val ) << 12 ) +#define AARCH64_MDCR_EL2_E2PB_SHIFT 12 +#define AARCH64_MDCR_EL2_E2PB_MASK 0x3000U +#define AARCH64_MDCR_EL2_E2PB_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x3U ) + +#define AARCH64_MDCR_EL2_TPMS 0x4000U + +#define AARCH64_MDCR_EL2_HPMD 0x20000U + +#define AARCH64_MDCR_EL2_TTRF 0x80000U + +#define AARCH64_MDCR_EL2_HCCD 0x800000U + +#define AARCH64_MDCR_EL2_HLP 0x4000000U + +#define AARCH64_MDCR_EL2_TDCC 0x8000000U + +#define AARCH64_MDCR_EL2_MTPME 0x10000000U + +static inline uint64_t _AArch64_Read_mdcr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MDCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mdcr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr MDCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* MDCR_EL3, Monitor Debug Configuration Register (EL3) */ + +#define AARCH64_MDCR_EL3_TPM 0x40U + +#define AARCH64_MDCR_EL3_TDA 0x200U + +#define AARCH64_MDCR_EL3_TDOSA 0x400U + +#define AARCH64_MDCR_EL3_NSPB( _val ) ( ( _val ) << 12 ) +#define AARCH64_MDCR_EL3_NSPB_SHIFT 12 +#define AARCH64_MDCR_EL3_NSPB_MASK 0x3000U +#define AARCH64_MDCR_EL3_NSPB_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0x3U ) + +#define AARCH64_MDCR_EL3_SPD32( _val ) ( ( _val ) << 14 ) +#define AARCH64_MDCR_EL3_SPD32_SHIFT 14 +#define AARCH64_MDCR_EL3_SPD32_MASK 0xc000U +#define AARCH64_MDCR_EL3_SPD32_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_MDCR_EL3_SDD 0x10000U + +#define AARCH64_MDCR_EL3_SPME 0x20000U + +#define AARCH64_MDCR_EL3_STE 0x40000U + +#define AARCH64_MDCR_EL3_TTRF 0x80000U + +#define AARCH64_MDCR_EL3_EDAD 0x100000U + +#define AARCH64_MDCR_EL3_EPMAD 0x200000U + +#define AARCH64_MDCR_EL3_SCCD 0x800000U + +#define AARCH64_MDCR_EL3_TDCC 0x8000000U + +#define AARCH64_MDCR_EL3_MTPME 0x10000000U + +static inline uint64_t _AArch64_Read_mdcr_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MDCR_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mdcr_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr MDCR_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* MDRAR_EL1, Monitor Debug ROM Address Register */ + +#define AARCH64_MDRAR_EL1_VALID( _val ) ( ( _val ) << 0 ) +#define AARCH64_MDRAR_EL1_VALID_SHIFT 0 +#define AARCH64_MDRAR_EL1_VALID_MASK 0x3U +#define AARCH64_MDRAR_EL1_VALID_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3U ) + +#define AARCH64_MDRAR_EL1_ROMADDR_47_12( _val ) ( ( _val ) << 12 ) +#define AARCH64_MDRAR_EL1_ROMADDR_47_12_SHIFT 12 +#define AARCH64_MDRAR_EL1_ROMADDR_47_12_MASK 0xfffffffff000ULL +#define AARCH64_MDRAR_EL1_ROMADDR_47_12_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfffffffffULL ) + +#define AARCH64_MDRAR_EL1_ROMADDR_51_48( _val ) ( ( _val ) << 48 ) +#define AARCH64_MDRAR_EL1_ROMADDR_51_48_SHIFT 48 +#define AARCH64_MDRAR_EL1_ROMADDR_51_48_MASK 0xf000000000000ULL +#define AARCH64_MDRAR_EL1_ROMADDR_51_48_GET( _reg ) \ + ( ( ( _reg ) >> 48 ) & 0xfULL ) + +static inline uint64_t _AArch64_Read_mdrar_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MDRAR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* MDSCR_EL1, Monitor Debug System Control Register */ + +#define AARCH64_MDSCR_EL1_SS 0x1U + +#define AARCH64_MDSCR_EL1_ERR 0x40U + +#define AARCH64_MDSCR_EL1_TDCC 0x1000U + +#define AARCH64_MDSCR_EL1_KDE 0x2000U + +#define AARCH64_MDSCR_EL1_HDE 0x4000U + +#define AARCH64_MDSCR_EL1_MDE 0x8000U + +#define AARCH64_MDSCR_EL1_SC2 0x80000U + +#define AARCH64_MDSCR_EL1_TDA 0x200000U + +#define AARCH64_MDSCR_EL1_INTDIS( _val ) ( ( _val ) << 22 ) +#define AARCH64_MDSCR_EL1_INTDIS_SHIFT 22 +#define AARCH64_MDSCR_EL1_INTDIS_MASK 0xc00000U +#define AARCH64_MDSCR_EL1_INTDIS_GET( _reg ) \ + ( ( ( _reg ) >> 22 ) & 0x3U ) + +#define AARCH64_MDSCR_EL1_TXU 0x4000000U + +#define AARCH64_MDSCR_EL1_RXO 0x8000000U + +#define AARCH64_MDSCR_EL1_TXFULL 0x20000000U + +#define AARCH64_MDSCR_EL1_RXFULL 0x40000000U + +#define AARCH64_MDSCR_EL1_TFO 0x80000000U + +static inline uint64_t _AArch64_Read_mdscr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, MDSCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_mdscr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr MDSCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* OSDLR_EL1, OS Double Lock Register */ + +#define AARCH64_OSDLR_EL1_DLK 0x1U + +static inline uint64_t _AArch64_Read_osdlr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, OSDLR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_osdlr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr OSDLR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* OSDTRRX_EL1, OS Lock Data Transfer Register, Receive */ + +static inline uint64_t _AArch64_Read_osdtrrx_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, OSDTRRX_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_osdtrrx_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr OSDTRRX_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* OSDTRTX_EL1, OS Lock Data Transfer Register, Transmit */ + +static inline uint64_t _AArch64_Read_osdtrtx_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, OSDTRTX_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_osdtrtx_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr OSDTRTX_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* OSECCR_EL1, OS Lock Exception Catch Control Register */ + +#define AARCH64_OSECCR_EL1_EDECCR( _val ) ( ( _val ) << 0 ) +#define AARCH64_OSECCR_EL1_EDECCR_SHIFT 0 +#define AARCH64_OSECCR_EL1_EDECCR_MASK 0xffffffffU +#define AARCH64_OSECCR_EL1_EDECCR_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_oseccr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, OSECCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_oseccr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr OSECCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* OSLAR_EL1, OS Lock Access Register */ + +#define AARCH64_OSLAR_EL1_OSLK 0x1U + +static inline void _AArch64_Write_oslar_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr OSLAR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* OSLSR_EL1, OS Lock Status Register */ + +#define AARCH64_OSLSR_EL1_OSLM_0 0x1U + +#define AARCH64_OSLSR_EL1_OSLK 0x2U + +#define AARCH64_OSLSR_EL1_NTT 0x4U + +#define AARCH64_OSLSR_EL1_OSLM_1 0x8U + +static inline uint64_t _AArch64_Read_oslsr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, OSLSR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* SDER32_EL2, AArch64 Secure Debug Enable Register */ + +#define AARCH64_SDER32_EL2_SUIDEN 0x1U + +#define AARCH64_SDER32_EL2_SUNIDEN 0x2U + +static inline uint64_t _AArch64_Read_sder32_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SDER32_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_sder32_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr SDER32_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* SDER32_EL3, AArch64 Secure Debug Enable Register */ + +#define AARCH64_SDER32_EL3_SUIDEN 0x1U + +#define AARCH64_SDER32_EL3_SUNIDEN 0x2U + +static inline uint64_t _AArch64_Read_sder32_el3( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, SDER32_EL3" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_sder32_el3( uint64_t value ) +{ + __asm__ volatile ( + "msr SDER32_EL3, %0" : : "r" ( value ) : "memory" + ); +} + +/* TRFCR_EL1, Trace Filter Control Register (EL1) */ + +#define AARCH64_TRFCR_EL1_E0TRE 0x1U + +#define AARCH64_TRFCR_EL1_E1TRE 0x2U + +#define AARCH64_TRFCR_EL1_TS( _val ) ( ( _val ) << 5 ) +#define AARCH64_TRFCR_EL1_TS_SHIFT 5 +#define AARCH64_TRFCR_EL1_TS_MASK 0x60U +#define AARCH64_TRFCR_EL1_TS_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x3U ) + +static inline uint64_t _AArch64_Read_trfcr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TRFCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_trfcr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr TRFCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* TRFCR_EL2, Trace Filter Control Register (EL2) */ + +#define AARCH64_TRFCR_EL2_E0HTRE 0x1U + +#define AARCH64_TRFCR_EL2_E2TRE 0x2U + +#define AARCH64_TRFCR_EL2_CX 0x8U + +#define AARCH64_TRFCR_EL2_TS( _val ) ( ( _val ) << 5 ) +#define AARCH64_TRFCR_EL2_TS_SHIFT 5 +#define AARCH64_TRFCR_EL2_TS_MASK 0x60U +#define AARCH64_TRFCR_EL2_TS_GET( _reg ) \ + ( ( ( _reg ) >> 5 ) & 0x3U ) + +static inline uint64_t _AArch64_Read_trfcr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, TRFCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_trfcr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr TRFCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMCCFILTR_EL0, Performance Monitors Cycle Count Filter Register */ + +#define AARCH64_PMCCFILTR_EL0_SH 0x1000000U + +#define AARCH64_PMCCFILTR_EL0_M 0x4000000U + +#define AARCH64_PMCCFILTR_EL0_NSH 0x8000000U + +#define AARCH64_PMCCFILTR_EL0_NSU 0x10000000U + +#define AARCH64_PMCCFILTR_EL0_NSK 0x20000000U + +#define AARCH64_PMCCFILTR_EL0_U 0x40000000U + +#define AARCH64_PMCCFILTR_EL0_P 0x80000000U + +static inline uint64_t _AArch64_Read_pmccfiltr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCCFILTR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmccfiltr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMCCFILTR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMCCNTR_EL0, Performance Monitors Cycle Count Register */ + +static inline uint64_t _AArch64_Read_pmccntr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCCNTR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmccntr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMCCNTR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMCEID0_EL0, Performance Monitors Common Event Identification Register 0 */ + +static inline uint64_t _AArch64_Read_pmceid0_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCEID0_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* PMCEID1_EL0, Performance Monitors Common Event Identification Register 1 */ + +static inline uint64_t _AArch64_Read_pmceid1_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCEID1_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* PMCNTENCLR_EL0, Performance Monitors Count Enable Clear Register */ + +#define AARCH64_PMCNTENCLR_EL0_C 0x80000000U + +static inline uint64_t _AArch64_Read_pmcntenclr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCNTENCLR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmcntenclr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMCNTENCLR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMCNTENSET_EL0, Performance Monitors Count Enable Set Register */ + +#define AARCH64_PMCNTENSET_EL0_C 0x80000000U + +static inline uint64_t _AArch64_Read_pmcntenset_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCNTENSET_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmcntenset_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMCNTENSET_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMCR_EL0, Performance Monitors Control Register */ + +#define AARCH64_PMCR_EL0_E 0x1U + +#define AARCH64_PMCR_EL0_P 0x2U + +#define AARCH64_PMCR_EL0_C 0x4U + +#define AARCH64_PMCR_EL0_D 0x8U + +#define AARCH64_PMCR_EL0_X 0x10U + +#define AARCH64_PMCR_EL0_DP 0x20U + +#define AARCH64_PMCR_EL0_LC 0x40U + +#define AARCH64_PMCR_EL0_LP 0x80U + +#define AARCH64_PMCR_EL0_N( _val ) ( ( _val ) << 11 ) +#define AARCH64_PMCR_EL0_N_SHIFT 11 +#define AARCH64_PMCR_EL0_N_MASK 0xf800U +#define AARCH64_PMCR_EL0_N_GET( _reg ) \ + ( ( ( _reg ) >> 11 ) & 0x1fU ) + +#define AARCH64_PMCR_EL0_IDCODE( _val ) ( ( _val ) << 16 ) +#define AARCH64_PMCR_EL0_IDCODE_SHIFT 16 +#define AARCH64_PMCR_EL0_IDCODE_MASK 0xff0000U +#define AARCH64_PMCR_EL0_IDCODE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xffU ) + +#define AARCH64_PMCR_EL0_IMP( _val ) ( ( _val ) << 24 ) +#define AARCH64_PMCR_EL0_IMP_SHIFT 24 +#define AARCH64_PMCR_EL0_IMP_MASK 0xff000000U +#define AARCH64_PMCR_EL0_IMP_GET( _reg ) \ + ( ( ( _reg ) >> 24 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_pmcr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMCR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmcr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMCR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMEVCNTR_N_EL0, Performance Monitors Event Count Registers, n = 0 - 30 */ + +static inline uint64_t _AArch64_Read_pmevcntr_n_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMEVCNTR_N_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmevcntr_n_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMEVCNTR_N_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMEVTYPER_N_EL0, Performance Monitors Event Type Registers, n = 0 - 30 */ + +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0_SHIFT 0 +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0_MASK 0x3ffU +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3ffU ) + +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10( _val ) ( ( _val ) << 10 ) +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10_SHIFT 10 +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10_MASK 0xfc00U +#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x3fU ) + +#define AARCH64_PMEVTYPER_N_EL0_SH 0x1000000U + +#define AARCH64_PMEVTYPER_N_EL0_MT 0x2000000U + +#define AARCH64_PMEVTYPER_N_EL0_M 0x4000000U + +#define AARCH64_PMEVTYPER_N_EL0_NSH 0x8000000U + +#define AARCH64_PMEVTYPER_N_EL0_NSU 0x10000000U + +#define AARCH64_PMEVTYPER_N_EL0_NSK 0x20000000U + +#define AARCH64_PMEVTYPER_N_EL0_U 0x40000000U + +#define AARCH64_PMEVTYPER_N_EL0_P 0x80000000U + +static inline uint64_t _AArch64_Read_pmevtyper_n_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMEVTYPER_N_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmevtyper_n_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMEVTYPER_N_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMINTENCLR_EL1, Performance Monitors Interrupt Enable Clear Register */ + +#define AARCH64_PMINTENCLR_EL1_C 0x80000000U + +static inline uint64_t _AArch64_Read_pmintenclr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMINTENCLR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmintenclr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMINTENCLR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMINTENSET_EL1, Performance Monitors Interrupt Enable Set Register */ + +#define AARCH64_PMINTENSET_EL1_C 0x80000000U + +static inline uint64_t _AArch64_Read_pmintenset_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMINTENSET_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmintenset_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMINTENSET_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMMIR_EL1, Performance Monitors Machine Identification Register */ + +#define AARCH64_PMMIR_EL1_SLOTS( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMMIR_EL1_SLOTS_SHIFT 0 +#define AARCH64_PMMIR_EL1_SLOTS_MASK 0xffU +#define AARCH64_PMMIR_EL1_SLOTS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_pmmir_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMMIR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* PMOVSCLR_EL0, Performance Monitors Overflow Flag Status Clear Register */ + +#define AARCH64_PMOVSCLR_EL0_C 0x80000000U + +static inline uint64_t _AArch64_Read_pmovsclr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMOVSCLR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmovsclr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMOVSCLR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMOVSSET_EL0, Performance Monitors Overflow Flag Status Set Register */ + +#define AARCH64_PMOVSSET_EL0_C 0x80000000U + +static inline uint64_t _AArch64_Read_pmovsset_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMOVSSET_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmovsset_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMOVSSET_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSELR_EL0, Performance Monitors Event Counter Selection Register */ + +#define AARCH64_PMSELR_EL0_SEL( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMSELR_EL0_SEL_SHIFT 0 +#define AARCH64_PMSELR_EL0_SEL_MASK 0x1fU +#define AARCH64_PMSELR_EL0_SEL_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x1fU ) + +static inline uint64_t _AArch64_Read_pmselr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSELR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmselr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSELR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSWINC_EL0, Performance Monitors Software Increment Register */ + +static inline void _AArch64_Write_pmswinc_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSWINC_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMUSERENR_EL0, Performance Monitors User Enable Register */ + +#define AARCH64_PMUSERENR_EL0_EN 0x1U + +#define AARCH64_PMUSERENR_EL0_SW 0x2U + +#define AARCH64_PMUSERENR_EL0_CR 0x4U + +#define AARCH64_PMUSERENR_EL0_ER 0x8U + +static inline uint64_t _AArch64_Read_pmuserenr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMUSERENR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmuserenr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMUSERENR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMXEVCNTR_EL0, Performance Monitors Selected Event Count Register */ + +#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N_SHIFT 0 +#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N_MASK 0xffffffffU +#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_pmxevcntr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMXEVCNTR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmxevcntr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMXEVCNTR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMXEVTYPER_EL0, Performance Monitors Selected Event Type Register */ + +static inline uint64_t _AArch64_Read_pmxevtyper_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMXEVTYPER_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmxevtyper_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr PMXEVTYPER_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMCFGR_EL0, Activity Monitors Configuration Register */ + +#define AARCH64_AMCFGR_EL0_N( _val ) ( ( _val ) << 0 ) +#define AARCH64_AMCFGR_EL0_N_SHIFT 0 +#define AARCH64_AMCFGR_EL0_N_MASK 0xffU +#define AARCH64_AMCFGR_EL0_N_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +#define AARCH64_AMCFGR_EL0_SIZE( _val ) ( ( _val ) << 8 ) +#define AARCH64_AMCFGR_EL0_SIZE_SHIFT 8 +#define AARCH64_AMCFGR_EL0_SIZE_MASK 0x3f00U +#define AARCH64_AMCFGR_EL0_SIZE_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0x3fU ) + +#define AARCH64_AMCFGR_EL0_HDBG 0x1000000U + +#define AARCH64_AMCFGR_EL0_NCG( _val ) ( ( _val ) << 28 ) +#define AARCH64_AMCFGR_EL0_NCG_SHIFT 28 +#define AARCH64_AMCFGR_EL0_NCG_MASK 0xf0000000U +#define AARCH64_AMCFGR_EL0_NCG_GET( _reg ) \ + ( ( ( _reg ) >> 28 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_amcfgr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCFGR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* AMCG1IDR_EL0, Activity Monitors Counter Group 1 Identification Register */ + +static inline uint64_t _AArch64_Read_amcg1idr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCG1IDR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* AMCGCR_EL0, Activity Monitors Counter Group Configuration Register */ + +#define AARCH64_AMCGCR_EL0_CG0NC( _val ) ( ( _val ) << 0 ) +#define AARCH64_AMCGCR_EL0_CG0NC_SHIFT 0 +#define AARCH64_AMCGCR_EL0_CG0NC_MASK 0xffU +#define AARCH64_AMCGCR_EL0_CG0NC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffU ) + +#define AARCH64_AMCGCR_EL0_CG1NC( _val ) ( ( _val ) << 8 ) +#define AARCH64_AMCGCR_EL0_CG1NC_SHIFT 8 +#define AARCH64_AMCGCR_EL0_CG1NC_MASK 0xff00U +#define AARCH64_AMCGCR_EL0_CG1NC_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xffU ) + +static inline uint64_t _AArch64_Read_amcgcr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCGCR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* AMCNTENCLR0_EL0, Activity Monitors Count Enable Clear Register 0 */ + +static inline uint64_t _AArch64_Read_amcntenclr0_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCNTENCLR0_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amcntenclr0_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMCNTENCLR0_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMCNTENCLR1_EL0, Activity Monitors Count Enable Clear Register 1 */ + +static inline uint64_t _AArch64_Read_amcntenclr1_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCNTENCLR1_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amcntenclr1_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMCNTENCLR1_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMCNTENSET0_EL0, Activity Monitors Count Enable Set Register 0 */ + +static inline uint64_t _AArch64_Read_amcntenset0_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCNTENSET0_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amcntenset0_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMCNTENSET0_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMCNTENSET1_EL0, Activity Monitors Count Enable Set Register 1 */ + +static inline uint64_t _AArch64_Read_amcntenset1_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCNTENSET1_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amcntenset1_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMCNTENSET1_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMCR_EL0, Activity Monitors Control Register */ + +#define AARCH64_AMCR_EL0_HDBG 0x400U + +#define AARCH64_AMCR_EL0_CG1RZ 0x20000U + +static inline uint64_t _AArch64_Read_amcr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMCR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amcr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMCR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMEVCNTR0_N_EL0, Activity Monitors Event Counter Registers 0, n = 0 - 15 */ + +static inline uint64_t _AArch64_Read_amevcntr0_n_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMEVCNTR0_N_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amevcntr0_n_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMEVCNTR0_N_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMEVCNTR1_N_EL0, Activity Monitors Event Counter Registers 1, n = 0 - 15 */ + +static inline uint64_t _AArch64_Read_amevcntr1_n_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMEVCNTR1_N_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amevcntr1_n_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMEVCNTR1_N_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMEVCNTVOFF0_N_EL2, Activity Monitors Event Counter Virtual Offset Registers 0, n = 0 - */ + +static inline uint64_t _AArch64_Read_amevcntvoff0_n_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMEVCNTVOFF0_N_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amevcntvoff0_n_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr AMEVCNTVOFF0_N_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMEVCNTVOFF1_N_EL2, Activity Monitors Event Counter Virtual Offset Registers 1, n = 0 - */ + +static inline uint64_t _AArch64_Read_amevcntvoff1_n_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMEVCNTVOFF1_N_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amevcntvoff1_n_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr AMEVCNTVOFF1_N_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMEVTYPER0_N_EL0, Activity Monitors Event Type Registers 0, n = 0 - 15 */ + +#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT( _val ) ( ( _val ) << 0 ) +#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT_SHIFT 0 +#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT_MASK 0xffffU +#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +static inline uint64_t _AArch64_Read_amevtyper0_n_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMEVTYPER0_N_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* AMEVTYPER1_N_EL0, Activity Monitors Event Type Registers 1, n = 0 - 15 */ + +#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT( _val ) ( ( _val ) << 0 ) +#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT_SHIFT 0 +#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT_MASK 0xffffU +#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +static inline uint64_t _AArch64_Read_amevtyper1_n_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMEVTYPER1_N_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amevtyper1_n_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMEVTYPER1_N_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* AMUSERENR_EL0, Activity Monitors User Enable Register */ + +#define AARCH64_AMUSERENR_EL0_EN 0x1U + +static inline uint64_t _AArch64_Read_amuserenr_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, AMUSERENR_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_amuserenr_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr AMUSERENR_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMBIDR_EL1, Profiling Buffer ID Register */ + +#define AARCH64_PMBIDR_EL1_ALIGN( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMBIDR_EL1_ALIGN_SHIFT 0 +#define AARCH64_PMBIDR_EL1_ALIGN_MASK 0xfU +#define AARCH64_PMBIDR_EL1_ALIGN_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_PMBIDR_EL1_P 0x10U + +#define AARCH64_PMBIDR_EL1_F 0x20U + +static inline uint64_t _AArch64_Read_pmbidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMBIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* PMBLIMITR_EL1, Profiling Buffer Limit Address Register */ + +#define AARCH64_PMBLIMITR_EL1_E 0x1U + +#define AARCH64_PMBLIMITR_EL1_FM( _val ) ( ( _val ) << 1 ) +#define AARCH64_PMBLIMITR_EL1_FM_SHIFT 1 +#define AARCH64_PMBLIMITR_EL1_FM_MASK 0x6U +#define AARCH64_PMBLIMITR_EL1_FM_GET( _reg ) \ + ( ( ( _reg ) >> 1 ) & 0x3U ) + +#define AARCH64_PMBLIMITR_EL1_LIMIT( _val ) ( ( _val ) << 12 ) +#define AARCH64_PMBLIMITR_EL1_LIMIT_SHIFT 12 +#define AARCH64_PMBLIMITR_EL1_LIMIT_MASK 0xfffffffffffff000ULL +#define AARCH64_PMBLIMITR_EL1_LIMIT_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfffffffffffffULL ) + +static inline uint64_t _AArch64_Read_pmblimitr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMBLIMITR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmblimitr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMBLIMITR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMBPTR_EL1, Profiling Buffer Write Pointer Register */ + +static inline uint64_t _AArch64_Read_pmbptr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMBPTR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmbptr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMBPTR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMBSR_EL1, Profiling Buffer Status/syndrome Register */ + +#define AARCH64_PMBSR_EL1_BSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMBSR_EL1_BSC_SHIFT 0 +#define AARCH64_PMBSR_EL1_BSC_MASK 0x3fU +#define AARCH64_PMBSR_EL1_BSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_PMBSR_EL1_FSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMBSR_EL1_FSC_SHIFT 0 +#define AARCH64_PMBSR_EL1_FSC_MASK 0x3fU +#define AARCH64_PMBSR_EL1_FSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_PMBSR_EL1_MSS( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMBSR_EL1_MSS_SHIFT 0 +#define AARCH64_PMBSR_EL1_MSS_MASK 0xffffU +#define AARCH64_PMBSR_EL1_MSS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +#define AARCH64_PMBSR_EL1_COLL 0x10000U + +#define AARCH64_PMBSR_EL1_S 0x20000U + +#define AARCH64_PMBSR_EL1_EA 0x40000U + +#define AARCH64_PMBSR_EL1_DL 0x80000U + +#define AARCH64_PMBSR_EL1_EC( _val ) ( ( _val ) << 26 ) +#define AARCH64_PMBSR_EL1_EC_SHIFT 26 +#define AARCH64_PMBSR_EL1_EC_MASK 0xfc000000U +#define AARCH64_PMBSR_EL1_EC_GET( _reg ) \ + ( ( ( _reg ) >> 26 ) & 0x3fU ) + +static inline uint64_t _AArch64_Read_pmbsr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMBSR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmbsr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMBSR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSCR_EL1, Statistical Profiling Control Register (EL1) */ + +#define AARCH64_PMSCR_EL1_E0SPE 0x1U + +#define AARCH64_PMSCR_EL1_E1SPE 0x2U + +#define AARCH64_PMSCR_EL1_CX 0x8U + +#define AARCH64_PMSCR_EL1_PA 0x10U + +#define AARCH64_PMSCR_EL1_TS 0x20U + +#define AARCH64_PMSCR_EL1_PCT( _val ) ( ( _val ) << 6 ) +#define AARCH64_PMSCR_EL1_PCT_SHIFT 6 +#define AARCH64_PMSCR_EL1_PCT_MASK 0xc0U +#define AARCH64_PMSCR_EL1_PCT_GET( _reg ) \ + ( ( ( _reg ) >> 6 ) & 0x3U ) + +static inline uint64_t _AArch64_Read_pmscr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmscr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSCR_EL2, Statistical Profiling Control Register (EL2) */ + +#define AARCH64_PMSCR_EL2_E0HSPE 0x1U + +#define AARCH64_PMSCR_EL2_E2SPE 0x2U + +#define AARCH64_PMSCR_EL2_CX 0x8U + +#define AARCH64_PMSCR_EL2_PA 0x10U + +#define AARCH64_PMSCR_EL2_TS 0x20U + +#define AARCH64_PMSCR_EL2_PCT( _val ) ( ( _val ) << 6 ) +#define AARCH64_PMSCR_EL2_PCT_SHIFT 6 +#define AARCH64_PMSCR_EL2_PCT_MASK 0xc0U +#define AARCH64_PMSCR_EL2_PCT_GET( _reg ) \ + ( ( ( _reg ) >> 6 ) & 0x3U ) + +static inline uint64_t _AArch64_Read_pmscr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSCR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmscr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSCR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSEVFR_EL1, Sampling Event Filter Register */ + +#define AARCH64_PMSEVFR_EL1_E_1 0x2U + +#define AARCH64_PMSEVFR_EL1_E_3 0x8U + +#define AARCH64_PMSEVFR_EL1_E_5 0x20U + +#define AARCH64_PMSEVFR_EL1_E_7 0x80U + +#define AARCH64_PMSEVFR_EL1_E_11 0x800U + +#define AARCH64_PMSEVFR_EL1_E_12 0x1000U + +#define AARCH64_PMSEVFR_EL1_E_13 0x2000U + +#define AARCH64_PMSEVFR_EL1_E_14 0x4000U + +#define AARCH64_PMSEVFR_EL1_E_15 0x8000U + +#define AARCH64_PMSEVFR_EL1_E_17 0x20000U + +#define AARCH64_PMSEVFR_EL1_E_18 0x40000U + +#define AARCH64_PMSEVFR_EL1_E_24 0x1000000U + +#define AARCH64_PMSEVFR_EL1_E_25 0x2000000U + +#define AARCH64_PMSEVFR_EL1_E_26 0x4000000U + +#define AARCH64_PMSEVFR_EL1_E_27 0x8000000U + +#define AARCH64_PMSEVFR_EL1_E_28 0x10000000U + +#define AARCH64_PMSEVFR_EL1_E_29 0x20000000U + +#define AARCH64_PMSEVFR_EL1_E_30 0x40000000U + +#define AARCH64_PMSEVFR_EL1_E_31 0x80000000U + +#define AARCH64_PMSEVFR_EL1_E_48 0x1000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_49 0x2000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_50 0x4000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_51 0x8000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_52 0x10000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_53 0x20000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_54 0x40000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_55 0x80000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_56 0x100000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_57 0x200000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_58 0x400000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_59 0x800000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_60 0x1000000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_61 0x2000000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_62 0x4000000000000000ULL + +#define AARCH64_PMSEVFR_EL1_E_63 0x8000000000000000ULL + +static inline uint64_t _AArch64_Read_pmsevfr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSEVFR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmsevfr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSEVFR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSFCR_EL1, Sampling Filter Control Register */ + +#define AARCH64_PMSFCR_EL1_FE 0x1U + +#define AARCH64_PMSFCR_EL1_FT 0x2U + +#define AARCH64_PMSFCR_EL1_FL 0x4U + +#define AARCH64_PMSFCR_EL1_B 0x10000U + +#define AARCH64_PMSFCR_EL1_LD 0x20000U + +#define AARCH64_PMSFCR_EL1_ST 0x40000U + +static inline uint64_t _AArch64_Read_pmsfcr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSFCR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmsfcr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSFCR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSICR_EL1, Sampling Interval Counter Register */ + +#define AARCH64_PMSICR_EL1_COUNT( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMSICR_EL1_COUNT_SHIFT 0 +#define AARCH64_PMSICR_EL1_COUNT_MASK 0xffffffffU +#define AARCH64_PMSICR_EL1_COUNT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +#define AARCH64_PMSICR_EL1_ECOUNT( _val ) ( ( _val ) << 56 ) +#define AARCH64_PMSICR_EL1_ECOUNT_SHIFT 56 +#define AARCH64_PMSICR_EL1_ECOUNT_MASK 0xff00000000000000ULL +#define AARCH64_PMSICR_EL1_ECOUNT_GET( _reg ) \ + ( ( ( _reg ) >> 56 ) & 0xffULL ) + +static inline uint64_t _AArch64_Read_pmsicr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSICR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmsicr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSICR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSIDR_EL1, Sampling Profiling ID Register */ + +#define AARCH64_PMSIDR_EL1_FE 0x1U + +#define AARCH64_PMSIDR_EL1_FT 0x2U + +#define AARCH64_PMSIDR_EL1_FL 0x4U + +#define AARCH64_PMSIDR_EL1_ARCHINST 0x8U + +#define AARCH64_PMSIDR_EL1_LDS 0x10U + +#define AARCH64_PMSIDR_EL1_ERND 0x20U + +#define AARCH64_PMSIDR_EL1_INTERVAL( _val ) ( ( _val ) << 8 ) +#define AARCH64_PMSIDR_EL1_INTERVAL_SHIFT 8 +#define AARCH64_PMSIDR_EL1_INTERVAL_MASK 0xf00U +#define AARCH64_PMSIDR_EL1_INTERVAL_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xfU ) + +#define AARCH64_PMSIDR_EL1_MAXSIZE( _val ) ( ( _val ) << 12 ) +#define AARCH64_PMSIDR_EL1_MAXSIZE_SHIFT 12 +#define AARCH64_PMSIDR_EL1_MAXSIZE_MASK 0xf000U +#define AARCH64_PMSIDR_EL1_MAXSIZE_GET( _reg ) \ + ( ( ( _reg ) >> 12 ) & 0xfU ) + +#define AARCH64_PMSIDR_EL1_COUNTSIZE( _val ) ( ( _val ) << 16 ) +#define AARCH64_PMSIDR_EL1_COUNTSIZE_SHIFT 16 +#define AARCH64_PMSIDR_EL1_COUNTSIZE_MASK 0xf0000U +#define AARCH64_PMSIDR_EL1_COUNTSIZE_GET( _reg ) \ + ( ( ( _reg ) >> 16 ) & 0xfU ) + +static inline uint64_t _AArch64_Read_pmsidr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* PMSIRR_EL1, Sampling Interval Reload Register */ + +#define AARCH64_PMSIRR_EL1_RND 0x1U + +#define AARCH64_PMSIRR_EL1_INTERVAL( _val ) ( ( _val ) << 8 ) +#define AARCH64_PMSIRR_EL1_INTERVAL_SHIFT 8 +#define AARCH64_PMSIRR_EL1_INTERVAL_MASK 0xffffff00U +#define AARCH64_PMSIRR_EL1_INTERVAL_GET( _reg ) \ + ( ( ( _reg ) >> 8 ) & 0xffffffU ) + +static inline uint64_t _AArch64_Read_pmsirr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSIRR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmsirr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSIRR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* PMSLATFR_EL1, Sampling Latency Filter Register */ + +#define AARCH64_PMSLATFR_EL1_MINLAT( _val ) ( ( _val ) << 0 ) +#define AARCH64_PMSLATFR_EL1_MINLAT_SHIFT 0 +#define AARCH64_PMSLATFR_EL1_MINLAT_MASK 0xfffU +#define AARCH64_PMSLATFR_EL1_MINLAT_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfffU ) + +static inline uint64_t _AArch64_Read_pmslatfr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, PMSLATFR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_pmslatfr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr PMSLATFR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* DISR_EL1, Deferred Interrupt Status Register */ + +#define AARCH64_DISR_EL1_DFSC( _val ) ( ( _val ) << 0 ) +#define AARCH64_DISR_EL1_DFSC_SHIFT 0 +#define AARCH64_DISR_EL1_DFSC_MASK 0x3fU +#define AARCH64_DISR_EL1_DFSC_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_DISR_EL1_ISS( _val ) ( ( _val ) << 0 ) +#define AARCH64_DISR_EL1_ISS_SHIFT 0 +#define AARCH64_DISR_EL1_ISS_MASK 0xffffffU +#define AARCH64_DISR_EL1_ISS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffU ) + +#define AARCH64_DISR_EL1_EA 0x200U + +#define AARCH64_DISR_EL1_AET( _val ) ( ( _val ) << 10 ) +#define AARCH64_DISR_EL1_AET_SHIFT 10 +#define AARCH64_DISR_EL1_AET_MASK 0x1c00U +#define AARCH64_DISR_EL1_AET_GET( _reg ) \ + ( ( ( _reg ) >> 10 ) & 0x7U ) + +#define AARCH64_DISR_EL1_IDS 0x1000000U + +#define AARCH64_DISR_EL1_A 0x80000000U + +static inline uint64_t _AArch64_Read_disr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, DISR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_disr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr DISR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERRIDR_EL1, Error Record ID Register */ + +#define AARCH64_ERRIDR_EL1_NUM( _val ) ( ( _val ) << 0 ) +#define AARCH64_ERRIDR_EL1_NUM_SHIFT 0 +#define AARCH64_ERRIDR_EL1_NUM_MASK 0xffffU +#define AARCH64_ERRIDR_EL1_NUM_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +static inline uint64_t _AArch64_Read_erridr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERRIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ERRSELR_EL1, Error Record Select Register */ + +#define AARCH64_ERRSELR_EL1_SEL( _val ) ( ( _val ) << 0 ) +#define AARCH64_ERRSELR_EL1_SEL_SHIFT 0 +#define AARCH64_ERRSELR_EL1_SEL_MASK 0xffffU +#define AARCH64_ERRSELR_EL1_SEL_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffU ) + +static inline uint64_t _AArch64_Read_errselr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERRSELR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_errselr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERRSELR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXADDR_EL1, Selected Error Record Address Register */ + +static inline uint64_t _AArch64_Read_erxaddr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXADDR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxaddr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXADDR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXCTLR_EL1, Selected Error Record Control Register */ + +static inline uint64_t _AArch64_Read_erxctlr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXCTLR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxctlr_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXCTLR_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXFR_EL1, Selected Error Record Feature Register */ + +static inline uint64_t _AArch64_Read_erxfr_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXFR_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ERXMISC0_EL1, Selected Error Record Miscellaneous Register 0 */ + +static inline uint64_t _AArch64_Read_erxmisc0_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXMISC0_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxmisc0_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXMISC0_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXMISC1_EL1, Selected Error Record Miscellaneous Register 1 */ + +static inline uint64_t _AArch64_Read_erxmisc1_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXMISC1_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxmisc1_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXMISC1_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXMISC2_EL1, Selected Error Record Miscellaneous Register 2 */ + +static inline uint64_t _AArch64_Read_erxmisc2_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXMISC2_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxmisc2_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXMISC2_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXMISC3_EL1, Selected Error Record Miscellaneous Register 3 */ + +static inline uint64_t _AArch64_Read_erxmisc3_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXMISC3_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxmisc3_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXMISC3_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXPFGCDN_EL1, Selected Pseudo-fault Generation Countdown Register */ + +static inline uint64_t _AArch64_Read_erxpfgcdn_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXPFGCDN_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxpfgcdn_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXPFGCDN_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXPFGCTL_EL1, Selected Pseudo-fault Generation Control Register */ + +static inline uint64_t _AArch64_Read_erxpfgctl_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXPFGCTL_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxpfgctl_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXPFGCTL_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* ERXPFGF_EL1, Selected Pseudo-fault Generation Feature Register */ + +static inline uint64_t _AArch64_Read_erxpfgf_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXPFGF_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* ERXSTATUS_EL1, Selected Error Record Primary Status Register */ + +static inline uint64_t _AArch64_Read_erxstatus_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, ERXSTATUS_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_erxstatus_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr ERXSTATUS_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* VDISR_EL2, Virtual Deferred Interrupt Status Register */ + +#define AARCH64_VDISR_EL2_FS_3_0( _val ) ( ( _val ) << 0 ) +#define AARCH64_VDISR_EL2_FS_3_0_SHIFT 0 +#define AARCH64_VDISR_EL2_FS_3_0_MASK 0xfU +#define AARCH64_VDISR_EL2_FS_3_0_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xfU ) + +#define AARCH64_VDISR_EL2_STATUS( _val ) ( ( _val ) << 0 ) +#define AARCH64_VDISR_EL2_STATUS_SHIFT 0 +#define AARCH64_VDISR_EL2_STATUS_MASK 0x3fU +#define AARCH64_VDISR_EL2_STATUS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0x3fU ) + +#define AARCH64_VDISR_EL2_ISS( _val ) ( ( _val ) << 0 ) +#define AARCH64_VDISR_EL2_ISS_SHIFT 0 +#define AARCH64_VDISR_EL2_ISS_MASK 0xffffffU +#define AARCH64_VDISR_EL2_ISS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffU ) + +#define AARCH64_VDISR_EL2_LPAE 0x200U + +#define AARCH64_VDISR_EL2_FS_4 0x400U + +#define AARCH64_VDISR_EL2_EXT 0x1000U + +#define AARCH64_VDISR_EL2_AET( _val ) ( ( _val ) << 14 ) +#define AARCH64_VDISR_EL2_AET_SHIFT 14 +#define AARCH64_VDISR_EL2_AET_MASK 0xc000U +#define AARCH64_VDISR_EL2_AET_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_VDISR_EL2_IDS 0x1000000U + +#define AARCH64_VDISR_EL2_A 0x80000000U + +static inline uint64_t _AArch64_Read_vdisr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VDISR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vdisr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VDISR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* VSESR_EL2, Virtual SError Exception Syndrome Register */ + +#define AARCH64_VSESR_EL2_ISS( _val ) ( ( _val ) << 0 ) +#define AARCH64_VSESR_EL2_ISS_SHIFT 0 +#define AARCH64_VSESR_EL2_ISS_MASK 0xffffffU +#define AARCH64_VSESR_EL2_ISS_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffU ) + +#define AARCH64_VSESR_EL2_EXT 0x1000U + +#define AARCH64_VSESR_EL2_AET( _val ) ( ( _val ) << 14 ) +#define AARCH64_VSESR_EL2_AET_SHIFT 14 +#define AARCH64_VSESR_EL2_AET_MASK 0xc000U +#define AARCH64_VSESR_EL2_AET_GET( _reg ) \ + ( ( ( _reg ) >> 14 ) & 0x3U ) + +#define AARCH64_VSESR_EL2_IDS 0x1000000U + +static inline uint64_t _AArch64_Read_vsesr_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, VSESR_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_vsesr_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr VSESR_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTFRQ_EL0, Counter-timer Frequency Register */ + +static inline uint64_t _AArch64_Read_cntfrq_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTFRQ_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntfrq_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTFRQ_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHCTL_EL2, Counter-timer Hypervisor Control Register */ + +#define AARCH64_CNTHCTL_EL2_EL0PCTEN 0x1U + +#define AARCH64_CNTHCTL_EL2_EL1PCTEN_0 0x1U + +#define AARCH64_CNTHCTL_EL2_EL0VCTEN 0x2U + +#define AARCH64_CNTHCTL_EL2_EL1PCEN 0x2U + +#define AARCH64_CNTHCTL_EL2_EVNTEN 0x4U + +#define AARCH64_CNTHCTL_EL2_EVNTDIR 0x8U + +#define AARCH64_CNTHCTL_EL2_EVNTI( _val ) ( ( _val ) << 4 ) +#define AARCH64_CNTHCTL_EL2_EVNTI_SHIFT 4 +#define AARCH64_CNTHCTL_EL2_EVNTI_MASK 0xf0U +#define AARCH64_CNTHCTL_EL2_EVNTI_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_CNTHCTL_EL2_EL0VTEN 0x100U + +#define AARCH64_CNTHCTL_EL2_EL0PTEN 0x200U + +#define AARCH64_CNTHCTL_EL2_EL1PCTEN_1 0x400U + +#define AARCH64_CNTHCTL_EL2_EL1PTEN 0x800U + +#define AARCH64_CNTHCTL_EL2_ECV 0x1000U + +#define AARCH64_CNTHCTL_EL2_EL1TVT 0x2000U + +#define AARCH64_CNTHCTL_EL2_EL1TVCT 0x4000U + +#define AARCH64_CNTHCTL_EL2_EL1NVPCT 0x8000U + +#define AARCH64_CNTHCTL_EL2_EL1NVVCT 0x10000U + +#define AARCH64_CNTHCTL_EL2_EVNTIS 0x20000U + +static inline uint64_t _AArch64_Read_cnthctl_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHCTL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthctl_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHCTL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHP_CTL_EL2, Counter-timer Hypervisor Physical Timer Control Register */ + +#define AARCH64_CNTHP_CTL_EL2_ENABLE 0x1U + +#define AARCH64_CNTHP_CTL_EL2_IMASK 0x2U + +#define AARCH64_CNTHP_CTL_EL2_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cnthp_ctl_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHP_CTL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthp_ctl_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHP_CTL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHP_CVAL_EL2, Counter-timer Physical Timer CompareValue Register (EL2) */ + +static inline uint64_t _AArch64_Read_cnthp_cval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHP_CVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthp_cval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHP_CVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHP_TVAL_EL2, Counter-timer Physical Timer TimerValue Register (EL2) */ + +#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cnthp_tval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHP_TVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthp_tval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHP_TVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHPS_CTL_EL2, Counter-timer Secure Physical Timer Control Register (EL2) */ + +#define AARCH64_CNTHPS_CTL_EL2_ENABLE 0x1U + +#define AARCH64_CNTHPS_CTL_EL2_IMASK 0x2U + +#define AARCH64_CNTHPS_CTL_EL2_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cnthps_ctl_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHPS_CTL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthps_ctl_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHPS_CTL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHPS_CVAL_EL2, Counter-timer Secure Physical Timer CompareValue Register (EL2) */ + +static inline uint64_t _AArch64_Read_cnthps_cval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHPS_CVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthps_cval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHPS_CVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHPS_TVAL_EL2, Counter-timer Secure Physical Timer TimerValue Register (EL2) */ + +#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cnthps_tval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHPS_TVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthps_tval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHPS_TVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHV_CTL_EL2, Counter-timer Virtual Timer Control Register (EL2) */ + +#define AARCH64_CNTHV_CTL_EL2_ENABLE 0x1U + +#define AARCH64_CNTHV_CTL_EL2_IMASK 0x2U + +#define AARCH64_CNTHV_CTL_EL2_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cnthv_ctl_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHV_CTL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthv_ctl_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHV_CTL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHV_CVAL_EL2, Counter-timer Virtual Timer CompareValue Register (EL2) */ + +static inline uint64_t _AArch64_Read_cnthv_cval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHV_CVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthv_cval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHV_CVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHV_TVAL_EL2, Counter-timer Virtual Timer TimerValue Register (EL2) */ + +#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cnthv_tval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHV_TVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthv_tval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHV_TVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHVS_CTL_EL2, Counter-timer Secure Virtual Timer Control Register (EL2) */ + +#define AARCH64_CNTHVS_CTL_EL2_ENABLE 0x1U + +#define AARCH64_CNTHVS_CTL_EL2_IMASK 0x2U + +#define AARCH64_CNTHVS_CTL_EL2_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cnthvs_ctl_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHVS_CTL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthvs_ctl_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHVS_CTL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHVS_CVAL_EL2, Counter-timer Secure Virtual Timer CompareValue Register (EL2) */ + +static inline uint64_t _AArch64_Read_cnthvs_cval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHVS_CVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthvs_cval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHVS_CVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTHVS_TVAL_EL2, Counter-timer Secure Virtual Timer TimerValue Register (EL2) */ + +#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cnthvs_tval_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTHVS_TVAL_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cnthvs_tval_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTHVS_TVAL_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTKCTL_EL1, Counter-timer Kernel Control Register */ + +#define AARCH64_CNTKCTL_EL1_EL0PCTEN 0x1U + +#define AARCH64_CNTKCTL_EL1_EL0VCTEN 0x2U + +#define AARCH64_CNTKCTL_EL1_EVNTEN 0x4U + +#define AARCH64_CNTKCTL_EL1_EVNTDIR 0x8U + +#define AARCH64_CNTKCTL_EL1_EVNTI( _val ) ( ( _val ) << 4 ) +#define AARCH64_CNTKCTL_EL1_EVNTI_SHIFT 4 +#define AARCH64_CNTKCTL_EL1_EVNTI_MASK 0xf0U +#define AARCH64_CNTKCTL_EL1_EVNTI_GET( _reg ) \ + ( ( ( _reg ) >> 4 ) & 0xfU ) + +#define AARCH64_CNTKCTL_EL1_EL0VTEN 0x100U + +#define AARCH64_CNTKCTL_EL1_EL0PTEN 0x200U + +#define AARCH64_CNTKCTL_EL1_EVNTIS 0x20000U + +static inline uint64_t _AArch64_Read_cntkctl_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTKCTL_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntkctl_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTKCTL_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTP_CTL_EL0, Counter-timer Physical Timer Control Register */ + +#define AARCH64_CNTP_CTL_EL0_ENABLE 0x1U + +#define AARCH64_CNTP_CTL_EL0_IMASK 0x2U + +#define AARCH64_CNTP_CTL_EL0_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cntp_ctl_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTP_CTL_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntp_ctl_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTP_CTL_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTP_CVAL_EL0, Counter-timer Physical Timer CompareValue Register */ + +static inline uint64_t _AArch64_Read_cntp_cval_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTP_CVAL_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntp_cval_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTP_CVAL_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTP_TVAL_EL0, Counter-timer Physical Timer TimerValue Register */ + +#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cntp_tval_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTP_TVAL_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntp_tval_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTP_TVAL_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTPCTSS_EL0, Counter-timer Self-Synchronized Physical Count Register */ + +static inline uint64_t _AArch64_Read_cntpctss_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTPCTSS_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* CNTPCT_EL0, Counter-timer Physical Count Register */ + +static inline uint64_t _AArch64_Read_cntpct_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTPCT_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* CNTPS_CTL_EL1, Counter-timer Physical Secure Timer Control Register */ + +#define AARCH64_CNTPS_CTL_EL1_ENABLE 0x1U + +#define AARCH64_CNTPS_CTL_EL1_IMASK 0x2U + +#define AARCH64_CNTPS_CTL_EL1_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cntps_ctl_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTPS_CTL_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntps_ctl_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTPS_CTL_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTPOFF_EL2, Counter-timer Physical Offset Register */ + +static inline uint64_t _AArch64_Read_cntpoff_el2( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTPOFF_EL2" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntpoff_el2( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTPOFF_EL2, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTPS_CVAL_EL1, Counter-timer Physical Secure Timer CompareValue Register */ + +static inline uint64_t _AArch64_Read_cntps_cval_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTPS_CVAL_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntps_cval_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTPS_CVAL_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTPS_TVAL_EL1, Counter-timer Physical Secure Timer TimerValue Register */ + +#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cntps_tval_el1( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTPS_TVAL_EL1" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntps_tval_el1( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTPS_TVAL_EL1, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTV_CTL_EL0, Counter-timer Virtual Timer Control Register */ + +#define AARCH64_CNTV_CTL_EL0_ENABLE 0x1U + +#define AARCH64_CNTV_CTL_EL0_IMASK 0x2U + +#define AARCH64_CNTV_CTL_EL0_ISTATUS 0x4U + +static inline uint64_t _AArch64_Read_cntv_ctl_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTV_CTL_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntv_ctl_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTV_CTL_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTV_CVAL_EL0, Counter-timer Virtual Timer CompareValue Register */ + +static inline uint64_t _AArch64_Read_cntv_cval_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTV_CVAL_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntv_cval_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTV_CVAL_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTV_TVAL_EL0, Counter-timer Virtual Timer TimerValue Register */ + +#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE( _val ) ( ( _val ) << 0 ) +#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE_SHIFT 0 +#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE_MASK 0xffffffffU +#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE_GET( _reg ) \ + ( ( ( _reg ) >> 0 ) & 0xffffffffU ) + +static inline uint64_t _AArch64_Read_cntv_tval_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTV_TVAL_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +static inline void _AArch64_Write_cntv_tval_el0( uint64_t value ) +{ + __asm__ volatile ( + "msr CNTV_TVAL_EL0, %0" : : "r" ( value ) : "memory" + ); +} + +/* CNTVCTSS_EL0, Counter-timer Self-Synchronized Virtual Count Register */ + +static inline uint64_t _AArch64_Read_cntvctss_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTVCTSS_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +/* CNTVCT_EL0, Counter-timer Virtual Count Register */ + +static inline uint64_t _AArch64_Read_cntvct_el0( void ) +{ + uint64_t value; + + __asm__ volatile ( + "mrs %0, CNTVCT_EL0" : "=&r" ( value ) : : "memory" + ); + + return value; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_AARCH64_SYSTEM_REGISTERS_H */ diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h index 316079a6cd..aa4f90f1a8 100644 --- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h @@ -43,6 +43,7 @@ #endif #include <rtems/score/aarch64.h> #include <libcpu/vectors.h> +#include <limits.h> /** * @addtogroup RTEMSScoreCPUAArch64 @@ -101,7 +102,7 @@ #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE -#define CPU_STACK_MINIMUM_SIZE (1024 * 10) +#define CPU_STACK_MINIMUM_SIZE (1024 * 8) /* This could be either 4 or 8, depending on the ABI in use. * Could also use __LP64__ or __ILP32__ */ @@ -134,9 +135,9 @@ #ifdef RTEMS_SMP #if defined(AARCH64_MULTILIB_VFP) - #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x70 + #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0xb8 #else - #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x30 + #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x78 #endif #endif @@ -156,7 +157,14 @@ extern "C" { #endif +/* + This is to fix the following warning + ISO C does not support 'uint128_t' types +*/ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" typedef unsigned __int128 uint128_t; +#pragma GCC diagnostic pop typedef struct { uint64_t register_x19; @@ -191,12 +199,12 @@ typedef struct { static inline void _AARCH64_Data_memory_barrier( void ) { - __asm__ volatile ( "dmb LD" : : : "memory" ); + __asm__ volatile ( "dmb SY" : : : "memory" ); } static inline void _AARCH64_Data_synchronization_barrier( void ) { - __asm__ volatile ( "dsb LD" : : : "memory" ); + __asm__ volatile ( "dsb SY" : : : "memory" ); } static inline void _AARCH64_Instruction_synchronization_barrier( void ) @@ -204,9 +212,9 @@ static inline void _AARCH64_Instruction_synchronization_barrier( void ) __asm__ volatile ( "isb" : : : "memory" ); } -void _CPU_ISR_Set_level( uint64_t level ); +void _CPU_ISR_Set_level( uint32_t level ); -uint64_t _CPU_ISR_Get_level( void ); +uint32_t _CPU_ISR_Get_level( void ); #if defined(AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE) uint64_t AArch64_interrupt_disable( void ); @@ -252,7 +260,7 @@ static inline void AArch64_interrupt_flash( uint64_t isr_cookie ) #define _CPU_ISR_Flash( _isr_cookie ) \ AArch64_interrupt_flash( _isr_cookie ) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t isr_cookie ) +static inline bool _CPU_ISR_Is_enabled( uint64_t isr_cookie ) { return ( isr_cookie & AARCH64_PSTATE_I ) == 0; } @@ -285,6 +293,10 @@ void _CPU_Context_Initialize( { context->is_executing = is_executing; } + + RTEMS_NO_RETURN void _AArch64_Start_multitasking( Context_Control *heir ); + + #define _CPU_Start_multitasking( _heir ) _AArch64_Start_multitasking( _heir ) #endif #define _CPU_Context_Restart_self( _the_context ) \ @@ -295,19 +307,6 @@ void _CPU_Context_Initialize( *(*(_destination)) = _CPU_Null_fp_context; \ } while (0) -#define _CPU_Fatal_halt( _source, _err ) \ - do { \ - uint64_t _level; \ - uint32_t _error = _err; \ - _CPU_ISR_Disable( _level ); \ - (void) _level; \ - __asm__ volatile ("mov x0, %0\n" \ - : "=r" (_error) \ - : "0" (_error) \ - : "x0" ); \ - while (1); \ - } while (0); - /** * @brief CPU initialization. */ @@ -315,17 +314,16 @@ void _CPU_Initialize( void ); typedef void ( *CPU_ISR_handler )( void ); -void _CPU_ISR_install_vector( - uint32_t vector, - CPU_ISR_handler new_handler, - CPU_ISR_handler *old_handler -); - /** * @brief CPU switch context. */ void _CPU_Context_switch( Context_Control *run, Context_Control *heir ); +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); #ifdef RTEMS_SMP @@ -360,18 +358,6 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); { __asm__ volatile ( "wfe" : : : "memory" ); } - - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - _AARCH64_Data_synchronization_barrier(); - _AARCH64_Send_event(); - } - - static inline void _CPU_SMP_Processor_event_receive( void ) - { - _AARCH64_Wait_for_event(); - _AARCH64_Data_memory_barrier(); - } #endif @@ -398,14 +384,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - void *_CPU_Thread_Idle_body( uintptr_t ignored ); typedef enum { @@ -426,7 +404,7 @@ typedef enum { AARCH64_EXCEPTION_LEL32_FIQ = 14, AARCH64_EXCEPTION_LEL32_SERROR = 15, MAX_EXCEPTIONS = 16, - AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff + AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = INT_MAX } AArch64_symbolic_exception_name; #define VECTOR_POINTER_OFFSET 0x78 @@ -454,7 +432,15 @@ static inline void* AArch64_set_exception_handler( *vector_address = handler; /* return now-previous vector pointer */ - return (void*)current_vector_pointer; + +/* + * This was put in to fix the following warning: + * warning: ISO C forbids conversion of function pointer to object pointer type. + */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" + return (void*)current_vector_pointer; +#pragma GCC diagnostic pop } typedef struct { @@ -544,6 +530,27 @@ typedef struct { void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ); +RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame ); + +RTEMS_NO_RETURN void +_CPU_Exception_dispatch_and_resume( CPU_Exception_frame *frame ); + +void _CPU_Exception_disable_thread_dispatch( void ); + +int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame ); + +void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame, + void *address ); + +void _CPU_Exception_frame_make_resume_next_instruction( + CPU_Exception_frame *frame +); + +void _AArch64_Exception_frame_copy( + CPU_Exception_frame *new_ef, + CPU_Exception_frame *old_ef +); + void _AArch64_Exception_default( CPU_Exception_frame *frame ); /** Type that can store a 32-bit integer or a pointer. */ diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h index 4c3da6794a..8a0e476899 100644 --- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h @@ -50,7 +50,10 @@ */ #define CPU_PER_CPU_CONTROL_SIZE 0 -#define CPU_INTERRUPT_FRAME_SIZE 240 + +#define CPU_INTERRUPT_FRAME_SIZE 0x2E0 + +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 11 #ifndef ASM @@ -58,20 +61,126 @@ extern "C" { #endif +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +typedef struct { + uint64_t x0; + uint64_t register_lr_original; + uint64_t register_lr; + uint64_t x1; + uint64_t x2; + uint64_t x3; + uint64_t x4; + uint64_t x5; + uint64_t x6; + uint64_t x7; + uint64_t x8; + uint64_t x9; + uint64_t x10; + uint64_t x11; + uint64_t x12; + uint64_t x13; + uint64_t x14; + uint64_t x15; + uint64_t x16; + uint64_t x17; + uint64_t x18; + uint64_t x19; + uint64_t x20; + uint64_t x21; +#ifdef AARCH64_MULTILIB_VFP + uint128_t q0; + uint128_t q1; + uint128_t q2; + uint128_t q3; + uint128_t q4; + uint128_t q5; + uint128_t q6; + uint128_t q7; + uint128_t q8; + uint128_t q9; + uint128_t q10; + uint128_t q11; + uint128_t q12; + uint128_t q13; + uint128_t q14; + uint128_t q15; + uint128_t q16; + uint128_t q17; + uint128_t q18; + uint128_t q19; + uint128_t q20; + uint128_t q21; + uint128_t q22; + uint128_t q23; + uint128_t q24; + uint128_t q25; + uint128_t q26; + uint128_t q27; + uint128_t q28; + uint128_t q29; + uint128_t q30; + uint128_t q31; +#endif /* AARCH64_MULTILIB_VFP */ + uint64_t register_elr; + uint64_t register_spsr; + uint64_t register_fpsr; + uint64_t register_fpcr; +} CPU_Interrupt_frame; + +#ifdef RTEMS_SMP + +static inline +struct Per_CPU_Control *_AARCH64_Get_current_per_CPU_control( void ) +{ + struct Per_CPU_Control *cpu_self; + uint64_t value; + + __asm__ volatile ( + "mrs %0, TPIDR_EL1" : "=&r" ( value ) : : "memory" + ); + + /* Use EL1 Thread ID Register (TPIDR_EL1) */ + cpu_self = (struct Per_CPU_Control *)(uintptr_t)value; + + return cpu_self; +} + +#define _CPU_Get_current_per_CPU_control() \ + _AARCH64_Get_current_per_CPU_control() + +#endif /* RTEMS_SMP */ + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".inst 0x0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + __asm__ volatile ( + "msr TPIDR_EL0, %0" : : "r" ( context->thread_id ) : "memory" + ); +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *)(uintptr_t) context->thread_id; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/arm/__aeabi_read_tp.c b/cpukit/score/cpu/arm/__aeabi_read_tp.c index 22acc770e3..0f4eba8d9a 100644 --- a/cpukit/score/cpu/arm/__aeabi_read_tp.c +++ b/cpukit/score/cpu/arm/__aeabi_read_tp.c @@ -1,15 +1,36 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of __aeabi_read_tp(). + */ + /* - * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * Copyright (c) 2014 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -29,15 +50,15 @@ void __attribute__((naked)) __aeabi_read_tp(void) "ldr r0, =_Per_CPU_Information\n" "ldr r0, [r0, %[executingoff]]\n" #if defined(__thumb__) && !defined(__thumb2__) - "add r0, %[tlsareaoff]\n" + "add r0, %[threadidoff]\n" "ldr r0, [r0]\n" #else - "ldr r0, [r0, %[tlsareaoff]]\n" + "ldr r0, [r0, %[threadidoff]]\n" #endif "bx lr\n" : : [executingoff] "I" (offsetof(Per_CPU_Control, executing)), - [tlsareaoff] "I" (offsetof(Thread_Control, Start.tls_area)) + [threadidoff] "I" (offsetof(Thread_Control, Registers.thread_id)) ); } diff --git a/cpukit/score/cpu/arm/__tls_get_addr.c b/cpukit/score/cpu/arm/__tls_get_addr.c index 837395bd8a..407bf3d639 100644 --- a/cpukit/score/cpu/arm/__tls_get_addr.c +++ b/cpukit/score/cpu/arm/__tls_get_addr.c @@ -1,15 +1,36 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of __tls_get_addr(). + */ + /* - * Copyright (c) 2014-2015 embedded brains GmbH. All rights reserved. + * Copyright (C) 2014, 2015 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -26,10 +47,10 @@ void *__tls_get_addr(const TLS_Index *ti); void *__tls_get_addr(const TLS_Index *ti) { const Thread_Control *executing = _Thread_Get_executing(); - void *tls_block = (char *) executing->Start.tls_area - + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment ); + void *tls_data = (char *) executing->Registers.thread_id + + _TLS_Get_thread_control_block_area_size( &_TLS_Configuration ); assert(ti->module == 1); - return (char *) tls_block + ti->offset; + return (char *) tls_data + ti->offset; } diff --git a/cpukit/score/cpu/arm/aarch32-psma-init-default.c b/cpukit/score/cpu/arm/aarch32-psma-init-default.c index da710d77fc..615e7a528a 100644 --- a/cpukit/score/cpu/arm/aarch32-psma-init-default.c +++ b/cpukit/score/cpu/arm/aarch32-psma-init-default.c @@ -10,7 +10,7 @@ */ /* - * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de) + * Copyright (C) 2020 embedded brains GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/cpukit/score/cpu/arm/aarch32-psma-init.c b/cpukit/score/cpu/arm/aarch32-psma-init.c index de75d2a733..93a3673a98 100644 --- a/cpukit/score/cpu/arm/aarch32-psma-init.c +++ b/cpukit/score/cpu/arm/aarch32-psma-init.c @@ -10,7 +10,7 @@ */ /* - * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de) + * Copyright (C) 2020 embedded brains GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -45,15 +45,9 @@ #include <rtems/score/aarch32-system-registers.h> #include <rtems/score/cpu.h> -#define AARCH32_PSMA_REGION_MAX \ +#define AARCH32_PMSA_REGION_MAX \ ( ( AARCH32_MPUIR_REGION_MASK >> AARCH32_MPUIR_REGION_SHIFT ) + 1 ) -typedef struct { - uint32_t base; - uint32_t limit; - uint32_t attributes; -} AArch32_PMSA_Region; - static void _AArch32_PMSA_Configure( const AArch32_PMSA_Region *regions, size_t region_used, @@ -91,46 +85,41 @@ static void _AArch32_PMSA_Configure( _ARM_Data_synchronization_barrier(); sctlr = _AArch32_Read_sctlr(); sctlr |= AARCH32_SCTLR_M | AARCH32_SCTLR_I | AARCH32_SCTLR_C; - sctlr &= ~AARCH32_SCTLR_BR; + sctlr &= ~( AARCH32_SCTLR_A | AARCH32_SCTLR_BR ); _AArch32_Write_sctlr( sctlr ); _ARM_Instruction_synchronization_barrier(); } -void _AArch32_PMSA_Initialize( - uint32_t memory_attributes_0, - uint32_t memory_attributes_1, +size_t _AArch32_PMSA_Map_sections_to_regions( const AArch32_PMSA_Section *sections, - size_t section_count + size_t section_count, + AArch32_PMSA_Region *regions, + size_t region_max ) { - AArch32_PMSA_Region regions[ AARCH32_PSMA_REGION_MAX ]; size_t ri; size_t si; size_t region_used; - size_t region_max; - _AArch32_Write_mair0( memory_attributes_0 ); - _AArch32_Write_mair1( memory_attributes_1 ); - - region_max = ( _AArch32_Read_mpuir() & AARCH32_MPUIR_REGION_MASK ) >> - AARCH32_MPUIR_REGION_SHIFT; region_used = 0; for ( si = 0; si < section_count; ++si ) { uint32_t base; - uint32_t limit; + uint32_t end; uint32_t attr; + uint32_t limit; base = sections[ si ].begin; - limit = sections[ si ].end; + end = sections[ si ].end; attr = sections[ si ].attributes; - if ( base == limit ) { + if ( base == end ) { continue; } base = RTEMS_ALIGN_DOWN( base, AARCH32_PMSA_MIN_REGION_ALIGN ); - limit = RTEMS_ALIGN_DOWN( limit - 1, AARCH32_PMSA_MIN_REGION_ALIGN ); + end = RTEMS_ALIGN_UP( end, AARCH32_PMSA_MIN_REGION_ALIGN ); + limit = end - AARCH32_PMSA_MIN_REGION_ALIGN; for ( ri = 0; ri < region_used; ++ri ) { uint32_t region_base; @@ -141,29 +130,38 @@ void _AArch32_PMSA_Initialize( region_limit = regions[ ri ].limit; region_attr = regions[ ri ].attributes; - if ( - limit + AARCH32_PMSA_MIN_REGION_ALIGN == region_base && - attr == region_attr - ) { - /* Merge section with existing region */ - regions[ ri ].base = base; - break; - } else if ( - base == region_limit + AARCH32_PMSA_MIN_REGION_ALIGN && - attr == region_attr - ) { - /* Merge section with existing region */ - regions[ ri ].limit = limit; - break; - } else if ( limit < region_base ) { + if ( attr == region_attr ) { + uint32_t region_end; + + if ( end - region_base <= AARCH32_PMSA_MIN_REGION_ALIGN ) { + /* Extend the region */ + regions[ ri ].base = base; + break; + } + + region_end = region_limit + AARCH32_PMSA_MIN_REGION_ALIGN; + + if ( region_end - base <= AARCH32_PMSA_MIN_REGION_ALIGN ) { + /* Extend the region */ + regions[ ri ].limit = limit; + break; + } + + if ( base >= region_base && end <= region_end ) { + /* The section is contained in the region */ + break; + } + } + + if ( base <= region_base ) { size_t i; if ( region_used >= region_max ) { - return; + return 0; } - for ( i = ri; i < region_used; ++i ) { - regions[ i + 1 ] = regions[ i ]; + for ( i = region_used; i > ri; --i ) { + regions[ i ] = regions[ i - 1 ]; } /* New first region */ @@ -177,18 +175,47 @@ void _AArch32_PMSA_Initialize( if ( ri == region_used ) { if ( region_used >= region_max ) { - return; + return 0; } /* New last region */ + ++region_used; regions[ ri ].base = base; regions[ ri ].limit = limit; regions[ ri ].attributes = attr; - ++region_used; } } - _AArch32_PMSA_Configure( regions, region_used, region_max ); + return region_used; +} + +void _AArch32_PMSA_Initialize( + uint32_t memory_attributes_0, + uint32_t memory_attributes_1, + const AArch32_PMSA_Section *sections, + size_t section_count +) +{ + AArch32_PMSA_Region regions[ AARCH32_PMSA_REGION_MAX ]; + size_t region_max; + size_t region_used; + + _AArch32_Write_mair0( memory_attributes_0 ); + _AArch32_Write_mair1( memory_attributes_1 ); + + region_max = ( _AArch32_Read_mpuir() & AARCH32_MPUIR_REGION_MASK ) >> + AARCH32_MPUIR_REGION_SHIFT; + + region_used = _AArch32_PMSA_Map_sections_to_regions( + sections, + section_count, + regions, + region_max + ); + + if ( region_used > 0 ) { + _AArch32_PMSA_Configure( regions, region_used, region_max ); + } } #endif diff --git a/cpukit/score/cpu/arm/arm-context-validate.S b/cpukit/score/cpu/arm/arm-context-validate.S index 257a9ff1cb..51157071dd 100644 --- a/cpukit/score/cpu/arm/arm-context-validate.S +++ b/cpukit/score/cpu/arm/arm-context-validate.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved. + * Copyright (C) 2013, 2017 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/arm-context-volatile-clobber.S b/cpukit/score/cpu/arm/arm-context-volatile-clobber.S index d94e107736..5cbbb27754 100644 --- a/cpukit/score/cpu/arm/arm-context-volatile-clobber.S +++ b/cpukit/score/cpu/arm/arm-context-volatile-clobber.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved. + * Copyright (C) 2013, 2017 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/arm-exception-default.c b/cpukit/score/cpu/arm/arm-exception-default.c index 6d4d88f3d0..02df769287 100644 --- a/cpukit/score/cpu/arm/arm-exception-default.c +++ b/cpukit/score/cpu/arm/arm-exception-default.c @@ -1,15 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _ARM_Exception_default(). + */ + /* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/arm-exception-frame-print.c b/cpukit/score/cpu/arm/arm-exception-frame-print.c index 252d775de7..b089648184 100644 --- a/cpukit/score/cpu/arm/arm-exception-frame-print.c +++ b/cpukit/score/cpu/arm/arm-exception-frame-print.c @@ -1,15 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _CPU_Exception_frame_print(). + */ + /* - * Copyright (c) 2012-2013 embedded brains GmbH. All rights reserved. + * Copyright (C) 2012, 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -19,11 +41,14 @@ #include <inttypes.h> #include <rtems/score/cpu.h> +#if defined(ARM_MULTILIB_ARCH_V7M) +#include <rtems/score/armv7m.h> +#endif #include <rtems/bspIo.h> static void _ARM_VFP_context_print( const ARM_VFP_context *vfp_context ) { -#ifdef ARM_MULTILIB_VFP_D32 +#ifdef ARM_MULTILIB_VFP if ( vfp_context != NULL ) { const uint64_t *dx = &vfp_context->register_d0; int i; @@ -34,7 +59,14 @@ static void _ARM_VFP_context_print( const ARM_VFP_context *vfp_context ) vfp_context->register_fpscr ); - for ( i = 0; i < 32; ++i ) { +#if defined(ARM_MULTILIB_VFP_D32) + int regcount = 32; +#elif defined(ARM_MULTILIB_VFP_D16) + int regcount = 16; +#else + int regcount = 0; +#endif + for ( i = 0; i < regcount; ++i ) { uint32_t low = (uint32_t) dx[i]; uint32_t high = (uint32_t) (dx[i] >> 32); @@ -44,6 +76,136 @@ static void _ARM_VFP_context_print( const ARM_VFP_context *vfp_context ) #endif } +static void _ARM_Cortex_M_fault_info_print( void ) +{ +#if defined(ARM_MULTILIB_ARCH_V7M) + /* + * prints content of additional debugging registers + * available on Cortex-Mx where x > 0 cores. + */ + uint32_t cfsr = _ARMV7M_SCB->cfsr; + uint8_t mmfsr = ARMV7M_SCB_CFSR_MMFSR_GET( cfsr ); + uint8_t bfsr = ( ARMV7M_SCB_CFSR_BFSR_GET( cfsr ) >> 8 ); + uint16_t ufsr = ( ARMV7M_SCB_CFSR_UFSR_GET( cfsr ) >> 16 ); + uint32_t hfsr = _ARMV7M_SCB->hfsr; + if ( mmfsr > 0 ) { + printk( "MMFSR= 0x%08" PRIx32 " (memory fault)\n", mmfsr ); + if ( ( mmfsr & 0x1 ) != 0 ) { + printk( " IACCVIOL : 1 (instruction access violation)\n" ); + } + if ( ( mmfsr & 0x2 ) != 0 ) { + printk( " DACCVIOL : 1 (data access violation)\n" ); + } + if ( (mmfsr & 0x8 ) != 0 ) { + printk( + " MUNSTKERR : 1 (fault on unstacking on exception return)\n" + ); + } + if ( ( mmfsr & 0x10 ) != 0 ) { + printk( " MSTKERR : 1 (fault on stacking on exception entry)\n" ); + } + if ( (mmfsr & 0x20 ) != 0 ) { + printk( " MLSPERR : 1 (fault during lazy FP stack preservation)\n" ); + } + if ( (mmfsr & 0x80 ) != 0 ) { + printk( + " MMFARVALID : 1 -> 0x%08" PRIx32 " (error address)\n", + _ARMV7M_SCB->mmfar + ); + } + else { + printk( " MMFARVALID : 0 (undetermined error address)\n" ); + } + } + if ( bfsr > 0 ) { + printk( "BFSR = 0x%08" PRIx32 " (bus fault)\n", bfsr ); + if ( ( bfsr & 0x1 ) != 0 ) { + printk( " IBUSERR : 1 (instruction fetch error)\n" ); + } + if ( (bfsr & 0x2 ) != 0 ) { + printk( + " PRECISERR : 1 (data bus error with known exact location)\n" + ); + } + if ( ( bfsr & 0x4) != 0 ) { + printk( + " IMPRECISERR: 1 (data bus error without known exact location)\n" + ); + } + if ( (bfsr & 0x8 ) != 0 ) { + printk( + " UNSTKERR : 1 (fault on unstacking on exception return)\n" + ); + } + if ( ( bfsr & 0x10 ) != 0 ) { + printk( " STKERR : 1 (fault on stacking on exception entry)\n" ); + } + if ( ( bfsr & 0x20 ) != 0 ) { + printk( " LSPERR : 1 (fault during lazy FP stack preservation)\n" ); + } + if ( (bfsr & 0x80 ) != 0 ) { + printk( + " BFARVALID : 1 -> 0x%08" PRIx32 " (error address)\n", + _ARMV7M_SCB->bfar + ); + } + else { + printk( " BFARVALID : 0 (undetermined error address)\n" ); + } + } + if ( ufsr > 0 ) { + printk( "UFSR = 0x%08" PRIx32 " (usage fault)\n", ufsr); + if ( (ufsr & 0x1 ) != 0 ) { + printk( " UNDEFINSTR : 1 (undefined instruction issued)\n"); + } + if ( (ufsr & 0x2 ) != 0 ) { + printk( + " INVSTATE : 1" + " (invalid instruction state" + " (Thumb not set in EPSR or invalid IT state in EPSR))\n" + ); + } + if ( (ufsr & 0x4 ) != 0 ) { + printk( " INVPC : 1 (integrity check failure on EXC_RETURN)\n" ); + } + if ( (ufsr & 0x8 ) != 0 ) { + printk( + " NOCP : 1" + " (coprocessor instruction issued" + " but coprocessor disabled or non existent)\n" + ); + } + if ( ( ufsr & 0x100) != 0 ) { + printk( " UNALIGNED : 1 (unaligned access operation occurred)\n" ); + } + if ( ( ufsr & 0x200) != 0 ) { + printk( " DIVBYZERO : 1 (division by zero)" ); + } + } + if ( (hfsr & ( + ARMV7M_SCB_HFSR_VECTTBL_MASK + | ARMV7M_SCB_HFSR_DEBUGEVT_MASK + | ARMV7M_SCB_HFSR_FORCED_MASK + ) ) != 0 ) { + printk( "HFSR = 0x%08" PRIx32 " (hard fault)\n", hfsr ); + if ( (hfsr & ARMV7M_SCB_HFSR_VECTTBL_MASK ) != 0 ) { + printk( + " VECTTBL : 1 (error in address located in vector table)\n" + ); + } + if ( (hfsr & ARMV7M_SCB_HFSR_FORCED_MASK ) != 0 ) { + printk( + " FORCED : 1 (configurable fault escalated to hard fault)\n" + ); + } + if ( (hfsr & ARMV7M_SCB_HFSR_DEBUGEVT_MASK ) != 0 ) { + printk( + " DEBUGEVT : 1 (debug event occurred with debug system disabled)\n" + ); + } + } +#endif +} void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ) { printk( @@ -87,4 +249,5 @@ void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ) ); _ARM_VFP_context_print( frame->vfp_context ); + _ARM_Cortex_M_fault_info_print(); } diff --git a/cpukit/score/cpu/arm/arm_exc_abort.S b/cpukit/score/cpu/arm/arm_exc_abort.S index 03f7963eac..2721e3fed0 100644 --- a/cpukit/score/cpu/arm/arm_exc_abort.S +++ b/cpukit/score/cpu/arm/arm_exc_abort.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -7,16 +9,28 @@ */ /* - * Copyright (c) 2009 - * embedded brains GmbH - * Obere Lagerstr. 30 - * D-82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Copyright (c) 2009 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/arm_exc_interrupt.S b/cpukit/score/cpu/arm/arm_exc_interrupt.S index ddcaf945b5..5a7109da26 100644 --- a/cpukit/score/cpu/arm/arm_exc_interrupt.S +++ b/cpukit/score/cpu/arm/arm_exc_interrupt.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -7,17 +9,28 @@ */ /* - * Copyright (c) 2009, 2016 embedded brains GmbH. All rights reserved. + * Copyright (C) 2009, 2022 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ /* @@ -34,6 +47,11 @@ #ifdef ARM_MULTILIB_ARCH_V4 +#define STACK_POINTER_ADJUST r7 +#define NON_VOLATILE_SCRATCH r9 + +#ifndef ARM_MULTILIB_HAS_STORE_RETURN_STATE + #define EXCHANGE_LR r4 #define EXCHANGE_SPSR r5 #define EXCHANGE_CPSR r6 @@ -42,16 +60,31 @@ #define EXCHANGE_LIST {EXCHANGE_LR, EXCHANGE_SPSR, EXCHANGE_CPSR, EXCHANGE_INT_SP} #define EXCHANGE_SIZE 16 -#define SELF_CPU_CONTROL r7 -#define NON_VOLATILE_SCRATCH r9 - -#define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, SELF_CPU_CONTROL, r12} +#define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, NON_VOLATILE_SCRATCH, r12} #define CONTEXT_SIZE 32 +#endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ + .arm .globl _ARMV4_Exception_interrupt _ARMV4_Exception_interrupt: +#ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE + /* Prepare return from interrupt */ + sub lr, lr, #4 + + /* Save LR_irq and SPSR_irq to the SVC stack */ + srsfd sp!, #ARM_PSR_M_SVC + + /* Switch to SVC mode */ + cps #ARM_PSR_M_SVC + + /* + * Save the volatile registers, two non-volatile registers used for + * interrupt processing, and the link register. + */ + push {r0-r3, STACK_POINTER_ADJUST, NON_VOLATILE_SCRATCH, r12, lr} +#else /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ /* Save exchange registers to exchange area */ stmdb sp, EXCHANGE_LIST @@ -68,90 +101,103 @@ _ARMV4_Exception_interrupt: /* * Save context. We save the link register separately because it has * to be restored in SVC mode. The other registers can be restored in - * INT mode. Ensure that stack remains 8 byte aligned. Use register - * necessary for the stack alignment for the stack pointer of the - * interrupted context. + * INT mode. Ensure that the size of the saved registers is an + * integral multiple of 8 bytes. Provide a non-volatile scratch + * register which may be used accross function calls. + */ + push CONTEXT_LIST + push {STACK_POINTER_ADJUST, lr} +#endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ + + /* + * On a public interface, the stack pointer must be aligned on an + * 8-byte boundary. However, it may temporarily be only aligned on a + * 4-byte boundary. Make sure the stack pointer is aligned on an + * 8-byte boundary. */ - stmdb sp!, CONTEXT_LIST - stmdb sp!, {NON_VOLATILE_SCRATCH, lr} + and STACK_POINTER_ADJUST, sp, #0x4 + sub sp, sp, STACK_POINTER_ADJUST + + /* Get per-CPU control of current processor */ + GET_SELF_CPU_CONTROL r0 #ifdef ARM_MULTILIB_VFP /* Save VFP context */ - vmrs r0, FPSCR - vstmdb sp!, {d0-d7} + vmrs r2, FPSCR + vpush {d0-d7} #ifdef ARM_MULTILIB_VFP_D32 - vstmdb sp!, {d16-d31} + vpush {d16-d31} #endif - stmdb sp!, {r0, r1} + push {r2, r3} #endif /* ARM_MULTILIB_VFP */ - /* Get per-CPU control of current processor */ - GET_SELF_CPU_CONTROL SELF_CPU_CONTROL - +#ifndef ARM_MULTILIB_HAS_STORE_RETURN_STATE /* Remember INT stack pointer */ mov r1, EXCHANGE_INT_SP /* Restore exchange registers from exchange area */ ldmia r1, EXCHANGE_LIST +#endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ /* Get interrupt nest level */ - ldr r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL] + ldr r2, [r0, #PER_CPU_ISR_NEST_LEVEL] /* Switch stack if necessary and save original stack pointer */ mov NON_VOLATILE_SCRATCH, sp cmp r2, #0 +#ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE + ldreq sp, [r0, #PER_CPU_INTERRUPT_STACK_HIGH] +#else moveq sp, r1 - - /* Switch to Thumb-2 instructions if necessary */ - SWITCH_FROM_ARM_TO_THUMB_2 r1 +#endif /* Increment interrupt nest and thread dispatch disable level */ - ldr r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] - add r2, #1 - add r3, #1 - str r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL] - str r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + ldr r3, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + add r2, r2, #1 + add r3, r3, #1 + str r2, [r0, #PER_CPU_ISR_NEST_LEVEL] + str r3, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] /* Call BSP dependent interrupt dispatcher */ #ifdef RTEMS_PROFILING cmp r2, #1 bne .Lskip_profiling BLX_TO_THUMB_1 _CPU_Counter_read - mov SELF_CPU_CONTROL, r0 + push {r0, r1} + GET_SELF_CPU_CONTROL r0 BLX_TO_THUMB_1 bsp_interrupt_dispatch BLX_TO_THUMB_1 _CPU_Counter_read + pop {r1, r3} mov r2, r0 - mov r1, SELF_CPU_CONTROL GET_SELF_CPU_CONTROL r0 - mov SELF_CPU_CONTROL, r0 BLX_TO_THUMB_1 _Profiling_Outer_most_interrupt_entry_and_exit .Lprofiling_done: #else BLX_TO_THUMB_1 bsp_interrupt_dispatch #endif + /* Get per-CPU control of current processor */ + GET_SELF_CPU_CONTROL r0 + /* Load some per-CPU variables */ - ldr r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] - ldrb r1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED] - ldr r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE] - ldr r3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL] + ldr r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + ldrb r1, [r0, #PER_CPU_DISPATCH_NEEDED] + ldr r2, [r0, #PER_CPU_ISR_DISPATCH_DISABLE] + ldr r3, [r0, #PER_CPU_ISR_NEST_LEVEL] /* Restore stack pointer */ mov sp, NON_VOLATILE_SCRATCH - /* Save CPSR in non-volatile register */ - mrs NON_VOLATILE_SCRATCH, CPSR - /* Decrement levels and determine thread dispatch state */ - eor r1, r0 - sub r0, #1 - orr r1, r0 - orr r1, r2 - sub r3, #1 + eor r1, r1, r12 + sub r12, r12, #1 + orr r1, r1, r12 + orr r1, r1, r2 + sub r3, r3, #1 /* Store thread dispatch disable and ISR nest levels */ - str r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] - str r3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL] + str r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + str r3, [r0, #PER_CPU_ISR_NEST_LEVEL] /* * Check thread dispatch necessary, ISR dispatch disable and thread @@ -160,63 +206,71 @@ _ARMV4_Exception_interrupt: cmp r1, #0 bne .Lthread_dispatch_done - /* Thread dispatch */ + /* Save CPSR in non-volatile register */ mrs NON_VOLATILE_SCRATCH, CPSR + /* Thread dispatch */ + .Ldo_thread_dispatch: /* Set ISR dispatch disable and thread dispatch disable level to one */ - mov r0, #1 - str r0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE] - str r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + mov r12, #1 + str r12, [r0, #PER_CPU_ISR_DISPATCH_DISABLE] + str r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] /* Call _Thread_Do_dispatch(), this function will enable interrupts */ - mov r0, SELF_CPU_CONTROL - mov r1, NON_VOLATILE_SCRATCH - mov r2, #0x80 - bic r1, r2 + bic r1, NON_VOLATILE_SCRATCH, #0x80 BLX_TO_THUMB_1 _Thread_Do_dispatch /* Disable interrupts */ msr CPSR, NON_VOLATILE_SCRATCH -#ifdef RTEMS_SMP - GET_SELF_CPU_CONTROL SELF_CPU_CONTROL -#endif + /* + * Get per-CPU control of current processor. In SMP configurations, we + * may run on another processor after the _Thread_Do_dispatch() call. + */ + GET_SELF_CPU_CONTROL r0 /* Check if we have to do the thread dispatch again */ - ldrb r0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED] - cmp r0, #0 + ldrb r12, [r0, #PER_CPU_DISPATCH_NEEDED] + cmp r12, #0 bne .Ldo_thread_dispatch /* We are done with thread dispatching */ - mov r0, #0 - str r0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE] + mov r12, #0 + str r12, [r0, #PER_CPU_ISR_DISPATCH_DISABLE] .Lthread_dispatch_done: - /* Switch to ARM instructions if necessary */ - SWITCH_FROM_THUMB_2_TO_ARM - #ifdef ARM_MULTILIB_VFP /* Restore VFP context */ - ldmia sp!, {r0, r1} + pop {r2, r3} #ifdef ARM_MULTILIB_VFP_D32 - vldmia sp!, {d16-d31} + vpop {d16-d31} #endif - vldmia sp!, {d0-d7} - vmsr FPSCR, r0 + vpop {d0-d7} + vmsr FPSCR, r2 #endif /* ARM_MULTILIB_VFP */ - /* Restore NON_VOLATILE_SCRATCH register and link register */ - ldmia sp!, {NON_VOLATILE_SCRATCH, lr} + /* Undo stack pointer adjustment */ + add sp, sp, STACK_POINTER_ADJUST + +#ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE + /* + * Restore the volatile registers, two non-volatile registers used for + * interrupt processing, and the link register. + */ + pop {r0-r3, STACK_POINTER_ADJUST, NON_VOLATILE_SCRATCH, r12, lr} +#else /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ + /* Restore STACK_POINTER_ADJUST register and link register */ + pop {STACK_POINTER_ADJUST, lr} /* * XXX: Remember and restore stack pointer. The data on the stack is * still in use. So the stack is now in an inconsistent state. The * FIQ handler implementation must not use this area. */ - mov r0, sp + mov r12, sp add sp, #CONTEXT_SIZE /* Get INT mode program status register */ @@ -227,17 +281,18 @@ _ARMV4_Exception_interrupt: msr CPSR_c, r1 /* Save EXCHANGE_LR and EXCHANGE_SPSR registers to exchange area */ - stmdb sp!, {EXCHANGE_LR, EXCHANGE_SPSR} + push {EXCHANGE_LR, EXCHANGE_SPSR} /* Restore context */ - ldmia r0, CONTEXT_LIST + ldmia r12, CONTEXT_LIST /* Set return address and program status */ mov lr, EXCHANGE_LR msr SPSR_fsxc, EXCHANGE_SPSR /* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */ - ldmia sp!, {EXCHANGE_LR, EXCHANGE_SPSR} + pop {EXCHANGE_LR, EXCHANGE_SPSR} +#endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ #ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE /* @@ -267,14 +322,13 @@ _ARMV4_Exception_interrupt: #endif /* Return from interrupt */ +#ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE + rfefd sp! +#else subs pc, lr, #4 +#endif #ifdef RTEMS_PROFILING -#ifdef __thumb2__ -.thumb -#else -.arm -#endif .Lskip_profiling: BLX_TO_THUMB_1 bsp_interrupt_dispatch b .Lprofiling_done diff --git a/cpukit/score/cpu/arm/armv4-exception-default.S b/cpukit/score/cpu/arm/armv4-exception-default.S index 34cc5ade38..a23aef76a4 100644 --- a/cpukit/score/cpu/arm/armv4-exception-default.S +++ b/cpukit/score/cpu/arm/armv4-exception-default.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv4-isr-install-vector.c b/cpukit/score/cpu/arm/armv4-isr-install-vector.c new file mode 100644 index 0000000000..739b02f8bf --- /dev/null +++ b/cpukit/score/cpu/arm/armv4-isr-install-vector.c @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the ARM-specific _CPU_ISR_install_vector(). + */ + +/* + * COPYRIGHT (c) 2000 Canon Research Centre France SA. + * Emmanuel Raguet, mailto:raguet@crf.canon.fr + * + * Copyright (c) 2002 Advent Networks, Inc + * Jay Monkman <jmonkman@adventnetworks.com> + * + * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/score/cpu.h> + +#ifdef ARM_MULTILIB_ARCH_V4 + +void _CPU_ISR_install_vector( + uint32_t vector, + CPU_ISR_handler new_handler, + CPU_ISR_handler *old_handler +) +{ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" + /* Redirection table starts at the end of the vector table */ + CPU_ISR_handler volatile *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4); + + CPU_ISR_handler current_handler = table [vector]; + + /* The current handler is now the old one */ + if (old_handler != NULL) { + *old_handler = current_handler; + } + + /* Write only if necessary to avoid writes to a maybe read-only memory */ + if (current_handler != new_handler) { + table [vector] = new_handler; + } +#pragma GCC diagnostic pop +} + +#endif /* ARM_MULTILIB_ARCH_V4 */ diff --git a/cpukit/score/cpu/arm/armv4-sync-synchronize.c b/cpukit/score/cpu/arm/armv4-sync-synchronize.c index 7e4c91ee1c..2f454306d8 100644 --- a/cpukit/score/cpu/arm/armv4-sync-synchronize.c +++ b/cpukit/score/cpu/arm/armv4-sync-synchronize.c @@ -1,15 +1,36 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of __sync_synchronize(). + */ + /* - * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * Copyright (c) 2015 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/score/cpu.h> diff --git a/cpukit/score/cpu/arm/armv7-thread-idle.c b/cpukit/score/cpu/arm/armv7-thread-idle.c index 73346266c9..720cb2be83 100644 --- a/cpukit/score/cpu/arm/armv7-thread-idle.c +++ b/cpukit/score/cpu/arm/armv7-thread-idle.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief CPU Thread Idle Body + * @brief This source file contains the implementation of + * _CPU_Thread_Idle_body(). */ /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-context-initialize.c b/cpukit/score/cpu/arm/armv7m-context-initialize.c index caaa3557b2..d67bcf93b5 100644 --- a/cpukit/score/cpu/arm/armv7m-context-initialize.c +++ b/cpukit/score/cpu/arm/armv7m-context-initialize.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief CPU Initialize Context + * @brief This source file contains the implementation of + * _CPU_Context_Initialize(). */ /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -48,7 +64,7 @@ void _CPU_Context_Initialize( context->register_sp = stack_area_end; if ( tls_area != NULL ) { - _TLS_TCB_at_area_begin_initialize( tls_area ); + context->thread_id = (uint32_t) _TLS_Initialize_area( tls_area ); } } diff --git a/cpukit/score/cpu/arm/armv7m-context-restore.c b/cpukit/score/cpu/arm/armv7m-context-restore.c index adfc261177..b888abe29f 100644 --- a/cpukit/score/cpu/arm/armv7m-context-restore.c +++ b/cpukit/score/cpu/arm/armv7m-context-restore.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief CPU Restore Context + * @brief This source file contains the implementation of + * _CPU_Context_restore(). */ /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-context-switch.c b/cpukit/score/cpu/arm/armv7m-context-switch.c index bfef67efbd..74d6b953ef 100644 --- a/cpukit/score/cpu/arm/armv7m-context-switch.c +++ b/cpukit/score/cpu/arm/armv7m-context-switch.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARM7M CPU Context Switch + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _CPU_Context_switch(). */ /* * Copyright (c) 2011-2014 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-exception-default.c b/cpukit/score/cpu/arm/armv7m-exception-default.c index 39c5b01242..35dde50dc3 100644 --- a/cpukit/score/cpu/arm/armv7m-exception-default.c +++ b/cpukit/score/cpu/arm/armv7m-exception-default.c @@ -1,15 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _ARMV7M_Exception_default(). + */ + /* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-exception-handler-get.c b/cpukit/score/cpu/arm/armv7m-exception-handler-get.c index 1e86dcfab8..41effd1e18 100644 --- a/cpukit/score/cpu/arm/armv7m-exception-handler-get.c +++ b/cpukit/score/cpu/arm/armv7m-exception-handler-get.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief ARMV7M Get Exception Handler + * @brief This source file contains the implementation of + * _ARMV7M_Get_exception_handler(). */ /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-exception-handler-set.c b/cpukit/score/cpu/arm/armv7m-exception-handler-set.c index 9df362817b..94c9dc5360 100644 --- a/cpukit/score/cpu/arm/armv7m-exception-handler-set.c +++ b/cpukit/score/cpu/arm/armv7m-exception-handler-set.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief ARMV7M Set Exception Handler + * @brief This source file contains the implementation of + * _ARMV7M_Set_exception_handler(). */ /* * Copyright (c) 2011, 2016 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-exception-priority-get.c b/cpukit/score/cpu/arm/armv7m-exception-priority-get.c index 4e150b3109..bb0892a32f 100644 --- a/cpukit/score/cpu/arm/armv7m-exception-priority-get.c +++ b/cpukit/score/cpu/arm/armv7m-exception-priority-get.c @@ -1,20 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief ARMV7M Get Exception Priority + * @brief This source file contains the implementation of + * _ARMV7M_Get_exception_priority(). */ + /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-exception-priority-handler.c b/cpukit/score/cpu/arm/armv7m-exception-priority-handler.c index 319e2f416a..b94366e07c 100644 --- a/cpukit/score/cpu/arm/armv7m-exception-priority-handler.c +++ b/cpukit/score/cpu/arm/armv7m-exception-priority-handler.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARMV7M Set Exception Priority and Handler + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _ARMV7M_Set_exception_priority_and_handler(). */ /* * Copyright (c) 2012 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-exception-priority-set.c b/cpukit/score/cpu/arm/armv7m-exception-priority-set.c index 2d6c48408c..05136a47db 100644 --- a/cpukit/score/cpu/arm/armv7m-exception-priority-set.c +++ b/cpukit/score/cpu/arm/armv7m-exception-priority-set.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief ARMV7M Set Exception Priority + * @brief This source file contains the implementation of + * _ARMV7M_Set_exception_priority(). */ /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-initialize.c b/cpukit/score/cpu/arm/armv7m-initialize.c index 6ec863fa30..0f47b49af7 100644 --- a/cpukit/score/cpu/arm/armv7m-initialize.c +++ b/cpukit/score/cpu/arm/armv7m-initialize.c @@ -3,11 +3,13 @@ /** * @file * - * @brief ARM7M CPU Initialize + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of _CPU_Initialize(). */ /* - * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de) + * Copyright (C) 2020 embedded brains GmbH & Co. KG * Copyright (C) 2011 Sebastian Huber * * Redistribution and use in source and binary forms, with or without diff --git a/cpukit/score/cpu/arm/armv7m-isr-dispatch.c b/cpukit/score/cpu/arm/armv7m-isr-dispatch.c index 09bf602ece..ea168969ba 100644 --- a/cpukit/score/cpu/arm/armv7m-isr-dispatch.c +++ b/cpukit/score/cpu/arm/armv7m-isr-dispatch.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARMV7M ISR Dispatch + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _ARMV7M_Thread_dispatch(). */ /* * Copyright (c) 2011, 2017 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-isr-enter-leave.c b/cpukit/score/cpu/arm/armv7m-isr-enter-leave.c index 0b53873e4e..1490f6e219 100644 --- a/cpukit/score/cpu/arm/armv7m-isr-enter-leave.c +++ b/cpukit/score/cpu/arm/armv7m-isr-enter-leave.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief ARMV7M Interrupt Service Enter and Leave + * @brief This source file contains the implementation of + * _ARMV7M_Interrupt_service_enter() and _ARMV7M_Interrupt_service_leave(). */ /* * Copyright (c) 2011, 2017 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-isr-level-get.c b/cpukit/score/cpu/arm/armv7m-isr-level-get.c index b7432b5eab..cc2b674e13 100644 --- a/cpukit/score/cpu/arm/armv7m-isr-level-get.c +++ b/cpukit/score/cpu/arm/armv7m-isr-level-get.c @@ -1,21 +1,36 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief CPU Get ISR Level + * @brief This source file contains the implementation of _CPU_ISR_Get_level(). */ /* * Copyright (c) 2011-2015 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-isr-level-set.c b/cpukit/score/cpu/arm/armv7m-isr-level-set.c index 987667d7cc..72c9684025 100644 --- a/cpukit/score/cpu/arm/armv7m-isr-level-set.c +++ b/cpukit/score/cpu/arm/armv7m-isr-level-set.c @@ -1,21 +1,36 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief CPU Set ISR Level + * @brief This source file contains the implementation of _CPU_ISR_Set_level(). */ /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-isr-vector-install.c b/cpukit/score/cpu/arm/armv7m-isr-vector-install.c index 6339e00ff9..c1e9eb70cb 100644 --- a/cpukit/score/cpu/arm/armv7m-isr-vector-install.c +++ b/cpukit/score/cpu/arm/armv7m-isr-vector-install.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUARM * - * @brief CPU ISR Vector Install + * @brief This source file contains the implementation of + * _CPU_ISR_install_vector(). */ /* * Copyright (c) 2011 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/armv7m-multitasking-start-stop.c b/cpukit/score/cpu/arm/armv7m-multitasking-start-stop.c index ce96ce47a6..a9b59dec84 100644 --- a/cpukit/score/cpu/arm/armv7m-multitasking-start-stop.c +++ b/cpukit/score/cpu/arm/armv7m-multitasking-start-stop.c @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARMV7M Start Multitasking + * @ingroup RTEMSScoreCPUARM + * + * @brief This source file contains the implementation of + * _ARMV7M_Start_multitasking(). */ /* * Copyright (c) 2011-2014 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/arm/cpu.c b/cpukit/score/cpu/arm/cpu.c index 07b9588afd..c27f4de9f9 100644 --- a/cpukit/score/cpu/arm/cpu.c +++ b/cpukit/score/cpu/arm/cpu.c @@ -1,9 +1,14 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * * @ingroup RTEMSScoreCPUARM * - * @brief ARM architecture support implementation. + * @brief This source file contains static assertions to ensure the consistency + * of interfaces used in C and assembler and it contains the ARM-specific + * implementation of _CPU_Initialize(), _CPU_ISR_Get_level(), + * _CPU_ISR_Set_level(), _CPU_Context_Initialize(), and _CPU_Fatal_halt(). */ /* @@ -15,19 +20,35 @@ * * Copyright (c) 2007 Ray xu <rayx.cn@gmail.com> * - * Copyright (c) 2009, 2017 embedded brains GmbH + * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif -#include <rtems/score/assert.h> -#include <rtems/score/cpu.h> +#include <rtems/score/cpuimpl.h> #include <rtems/score/thread.h> #include <rtems/score/tls.h> @@ -38,13 +59,11 @@ ); #endif -#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER - RTEMS_STATIC_ASSERT( - offsetof( Context_Control, thread_id ) - == ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET, - ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET - ); -#endif +RTEMS_STATIC_ASSERT( + offsetof( Context_Control, thread_id ) + == ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET, + ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET +); #ifdef ARM_MULTILIB_ARCH_V4 RTEMS_STATIC_ASSERT( @@ -100,13 +119,10 @@ void _CPU_Context_Initialize( the_context->register_sp = (uint32_t) stack_area_begin + stack_area_size; the_context->register_lr = (uint32_t) entry_point; the_context->isr_dispatch_disable = 0; - -#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER the_context->thread_id = (uint32_t) tls_area; -#endif if ( tls_area != NULL ) { - _TLS_TCB_at_area_begin_initialize( tls_area ); + the_context->thread_id = (uint32_t) _TLS_Initialize_area( tls_area ); } } @@ -143,31 +159,26 @@ uint32_t _CPU_ISR_Get_level( void ) return ( level & ARM_PSR_I ) != 0; } -void _CPU_ISR_install_vector( - uint32_t vector, - CPU_ISR_handler new_handler, - CPU_ISR_handler *old_handler -) -{ - /* Redirection table starts at the end of the vector table */ - CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4); - - CPU_ISR_handler current_handler = table [vector]; - - /* The current handler is now the old one */ - if (old_handler != NULL) { - *old_handler = current_handler; - } - - /* Write only if necessary to avoid writes to a maybe read-only memory */ - if (current_handler != new_handler) { - table [vector] = new_handler; - } -} - void _CPU_Initialize( void ) { /* Do nothing */ } #endif /* ARM_MULTILIB_ARCH_V4 */ + +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + + __asm__ volatile ("mov r0, %0\n" + : "=r" (error) + : "0" (error) + : "r0" ); + + while ( true ) { + /* Do nothing */ + } +} diff --git a/cpukit/score/cpu/arm/cpu_asm.S b/cpukit/score/cpu/arm/cpu_asm.S index 66f8ba6032..87bbfc4f01 100644 --- a/cpukit/score/cpu/arm/cpu_asm.S +++ b/cpukit/score/cpu/arm/cpu_asm.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -19,11 +21,28 @@ * COPYRIGHT (c) 2000 Canon Research Centre France SA. * Emmanuel Raguet, mailto:raguet@crf.canon.fr * - * Copyright (c) 2013, 2017 embedded brains GmbH + * Copyright (C) 2013, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ @@ -54,6 +73,9 @@ */ DEFINE_FUNCTION_ARM(_CPU_Context_switch) + .globl _CPU_Context_switch_no_return + .set _CPU_Context_switch_no_return, _CPU_Context_switch + /* Start saving context */ GET_SELF_CPU_CONTROL r2 ldr r3, [r2, #PER_CPU_ISR_DISPATCH_DISABLE] @@ -160,6 +182,18 @@ DEFINE_FUNCTION_ARM(_CPU_Context_restore) str r5, [r2, #PER_CPU_OFFSET_EXECUTING] b .L_check_is_executing + +DEFINE_FUNCTION_ARM(_ARM_Start_multitasking) + mov r1, r0 + GET_SELF_CPU_CONTROL r2 + + /* Switch the stack to the temporary interrupt stack of this processor */ + add sp, r2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE) + + /* Enable IRQ interrupts */ + cpsie i + + b .L_check_is_executing #endif #endif /* ARM_MULTILIB_ARCH_V4 */ diff --git a/cpukit/score/cpu/arm/headers.am b/cpukit/score/cpu/arm/headers.am deleted file mode 100644 index cb8976fdbc..0000000000 --- a/cpukit/score/cpu/arm/headers.am +++ /dev/null @@ -1,14 +0,0 @@ -## This file was generated by "./boostrap -H". -include_libcpu_HEADERS += score/cpu/arm/include/libcpu/arm-cp15.h -include_machine_HEADERS += score/cpu/arm/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/arm/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/aarch32-pmsa.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/aarch32-system-registers.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/arm.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/armv4.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/armv7m.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/arm/include/rtems/score/paravirt.h diff --git a/cpukit/score/cpu/arm/include/libcpu/arm-cp15.h b/cpukit/score/cpu/arm/include/libcpu/arm-cp15.h index 5bc01dcb32..c239eaccc8 100644 --- a/cpukit/score/cpu/arm/include/libcpu/arm-cp15.h +++ b/cpukit/score/cpu/arm/include/libcpu/arm-cp15.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -8,17 +10,28 @@ /* * Copyright (c) 2013 Hesham AL-Matary - * Copyright (c) 2009-2017 embedded brains GmbH. All rights reserved. + * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <info@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef LIBCPU_SHARED_ARM_CP15_H @@ -1296,15 +1309,17 @@ arm_cp15_data_cache_test_and_clean(void) ); } -/* In DDI0301H_arm1176jzfs_r0p7_trm - * 'MCR p15, 0, <Rd>, c7, c14, 0' means - * Clean and Invalidate Entire Data Cache - */ ARM_CP15_TEXT_SECTION static inline void arm_cp15_data_cache_clean_and_invalidate(void) { ARM_SWITCH_REGISTERS; +#if __ARM_ARCH >= 6 + /* + * In DDI0301H_arm1176jzfs_r0p7_trm + * 'MCR p15, 0, <Rd>, c7, c14, 0' means + * Clean and Invalidate Entire Data Cache + */ uint32_t sbz = 0; __asm__ volatile ( @@ -1315,6 +1330,22 @@ arm_cp15_data_cache_clean_and_invalidate(void) : [sbz] "r" (sbz) : "memory" ); +#else + /* + * Assume this is an ARM926EJ-S. Use the test, clean, and invalidate DCache + * operation. + */ + __asm__ volatile ( + ARM_SWITCH_TO_ARM + "1:\n" + "mrc p15, 0, r15, c7, c14, 3\n" + "bne 1b\n" + ARM_SWITCH_BACK + : ARM_SWITCH_OUTPUT + : + : "memory" + ); +#endif } ARM_CP15_TEXT_SECTION static inline void diff --git a/cpukit/score/cpu/arm/include/rtems/asm.h b/cpukit/score/cpu/arm/include/rtems/asm.h index 05e186f73c..9f676e40ab 100644 --- a/cpukit/score/cpu/arm/include/rtems/asm.h +++ b/cpukit/score/cpu/arm/include/rtems/asm.h @@ -1,7 +1,12 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARM Assembler Support API + * @ingroup RTEMSScoreCPUARMASM + * + * @brief This header file provides interfaces to address problems caused by + * incompatible flavor of assemblers and toolsets. * * This include file attempts to address the problems * caused by incompatible flavors of assemblers and @@ -25,9 +30,26 @@ * COPYRIGHT (c) 2000 Canon Research Centre France SA. * Emmanuel Raguet, mailto:raguet@crf.canon.fr * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ @@ -67,15 +89,13 @@ #define __REGISTER_PREFIX__ #endif -#include <rtems/concat.h> - /* Use the right prefix for global labels. */ -#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#define SYM(x) RTEMS_XCONCAT(__USER_LABEL_PREFIX__, x) /* Use the right prefix for registers. */ -#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) +#define REG(x) RTEMS_XCONCAT(__REGISTER_PREFIX__, x) /* * define macros for all of the registers on this CPU diff --git a/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h b/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h index ea25828bb6..36541a97aa 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h +++ b/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h @@ -10,7 +10,7 @@ */ /* - * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de) + * Copyright (C) 2020 embedded brains GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -37,8 +37,7 @@ #ifndef _RTEMS_SCORE_AARCH32_PMSA_H #define _RTEMS_SCORE_AARCH32_PMSA_H -#include <stddef.h> -#include <stdint.h> +#include <rtems/score/basedefs.h> #ifdef __cplusplus extern "C" { @@ -64,7 +63,7 @@ extern "C" { #define AARCH32_PMSA_ATTR_IDX( _idx ) \ ( ( _idx ) << AARCH32_PMSA_ATTR_IDX_SHIFT ) -#define AARCH32_PMSA_ATTR_XN 0x6U +#define AARCH32_PMSA_ATTR_XN 0x40U #define AARCH32_PMSA_ATTR_AP_SHIFT 7 #define AARCH32_PMSA_ATTR_AP_MASK 0x18U @@ -99,16 +98,16 @@ extern "C" { #define AARCH32_PMSA_MEM_OUTER_WA 0x10U #define AARCH32_PMSA_MEM_INNER_WTT 0x00U -#define AARCH32_PMSA_MEM_INNER_NC 0x40U -#define AARCH32_PMSA_MEM_INNER_WBT 0x40U -#define AARCH32_PMSA_MEM_INNER_WTNT 0x80U -#define AARCH32_PMSA_MEM_INNER_WBNT 0xc0U +#define AARCH32_PMSA_MEM_INNER_NC 0x04U +#define AARCH32_PMSA_MEM_INNER_WBT 0x04U +#define AARCH32_PMSA_MEM_INNER_WTNT 0x08U +#define AARCH32_PMSA_MEM_INNER_WBNT 0x0cU #define AARCH32_PMSA_MEM_INNER_RA 0x02U #define AARCH32_PMSA_MEM_INNER_WA 0x01U #define AARCH32_PMSA_MEM_ATTR( _ma0, _ma1, _ma2, _ma3 ) \ - ( ( _ma0 ) | ( ( _ma1 ) << 8 ) | ( ( _ma1 ) << 16 ) | ( ( _ma1 ) << 24 ) ) + ( ( _ma0 ) | ( ( _ma1 ) << 8 ) | ( ( _ma2 ) << 16 ) | ( ( _ma3 ) << 24 ) ) #define AARCH32_PMSA_MEM_ATTR_DEFAULT_CACHED \ ( AARCH32_PMSA_MEM_OUTER_WBNT | \ @@ -141,7 +140,7 @@ extern "C" { ( AARCH32_PMSA_ATTR_EN | \ AARCH32_PMSA_ATTR_XN | \ AARCH32_PMSA_ATTR_AP( AARCH32_PMSA_ATTR_AP_EL1_RO_EL0_NO ) | \ - AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_OUTER ) | \ + AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \ AARCH32_PMSA_ATTR_IDX( 0U ) ) #define AARCH32_PMSA_DATA_READ_ONLY_UNCACHED \ @@ -154,8 +153,8 @@ extern "C" { #define AARCH32_PMSA_DATA_READ_WRITE_CACHED \ ( AARCH32_PMSA_ATTR_EN | \ AARCH32_PMSA_ATTR_XN | \ - AARCH32_PMSA_ATTR_AP_EL1_RW_EL0_NO | \ - AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_OUTER ) | \ + AARCH32_PMSA_ATTR_AP( AARCH32_PMSA_ATTR_AP_EL1_RW_EL0_NO ) | \ + AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \ AARCH32_PMSA_ATTR_IDX( 0U ) ) #define AARCH32_PMSA_DATA_READ_WRITE_UNCACHED \ @@ -165,6 +164,13 @@ extern "C" { AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \ AARCH32_PMSA_ATTR_IDX( 1U ) ) +#define AARCH32_PMSA_DATA_READ_WRITE_SHARED \ + ( AARCH32_PMSA_ATTR_EN | \ + AARCH32_PMSA_ATTR_XN | \ + AARCH32_PMSA_ATTR_AP( AARCH32_PMSA_ATTR_AP_EL1_RW_EL0_NO ) | \ + AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_OUTER ) | \ + AARCH32_PMSA_ATTR_IDX( 1U ) ) + #define AARCH32_PMSA_DEVICE \ ( AARCH32_PMSA_ATTR_EN | \ AARCH32_PMSA_ATTR_XN | \ @@ -172,6 +178,20 @@ extern "C" { AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \ AARCH32_PMSA_ATTR_IDX( 2U ) ) +/* + * The Cortex-R52 processor is not coherent and the inner shareability domain + * consists of an individual Cortex-R52 core. Thus for an SMP configuration, + * the read-write data must be configured as Non-cachable and Shareable. The + * outer shareability domain is external to the Cortex-R52 processor. + */ +#if defined(RTEMS_SMP) +#define AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ + AARCH32_PMSA_DATA_READ_WRITE_SHARED +#else +#define AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ + AARCH32_PMSA_DATA_READ_WRITE_CACHED +#endif + /** * @brief The default section definitions shall be used by the BSP to define * ::_AArch32_PMSA_Sections. @@ -187,7 +207,7 @@ extern "C" { }, { \ .begin = (uint32_t) bsp_section_fast_data_begin, \ .end = (uint32_t) bsp_section_fast_data_end, \ - .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \ + .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ }, { \ .begin = (uint32_t) bsp_section_start_begin, \ .end = (uint32_t) bsp_section_start_end, \ @@ -207,23 +227,23 @@ extern "C" { }, { \ .begin = (uint32_t) bsp_section_data_begin, \ .end = (uint32_t) bsp_section_data_end, \ - .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \ + .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ }, { \ .begin = (uint32_t) bsp_section_bss_begin, \ .end = (uint32_t) bsp_section_bss_end, \ - .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \ + .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ }, { \ .begin = (uint32_t) bsp_section_rtemsstack_begin, \ .end = (uint32_t) bsp_section_rtemsstack_end, \ - .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \ + .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ }, { \ .begin = (uint32_t) bsp_section_work_begin, \ .end = (uint32_t) bsp_section_work_end, \ - .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \ + .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ }, { \ .begin = (uint32_t) bsp_section_stack_begin, \ .end = (uint32_t) bsp_section_stack_end, \ - .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \ + .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \ }, { \ .begin = (uint32_t) bsp_section_nocache_begin, \ .end = (uint32_t) bsp_section_nocache_end, \ @@ -258,6 +278,33 @@ typedef struct { } AArch32_PMSA_Section; /** + * @brief The region definition is used to configure the Memory Protection + * Unit (MPU). + * + * A region cannot be empty. + */ +typedef struct { + /** + * @brief This member defines the base address of the region. + * + * The limit address is this the address of the first byte of the region. + */ + uint32_t base; + + /** + * @brief This member defines the limit address of the region. + * + * The limit address is this the address of the last byte of the region. + */ + uint32_t limit; + + /** + * @brief This member defines the attributes of the region. + */ + uint32_t attributes; +} AArch32_PMSA_Region; + +/** * @brief Initializes the Memory Protection Unit (MPU). * * The section definitions are used to define the regions of the MPU. Sections @@ -265,6 +312,8 @@ typedef struct { * regions are used, then the MPU is not enabled. Overlapping section * definitions result in undefined system behaviour. * + * The function shall be called while the MPU is disabled. + * * @param memory_attributes_0 are the memory attributes for MAIR0. * * @param memory_attributes_1 are the memory attributes for MAIR1. @@ -281,6 +330,31 @@ void _AArch32_PMSA_Initialize( ); /** + * @brief Maps the section definitions to region definitions. + * + * The section definitions are used to define the regions of the MPU. Sections + * are merged if possible to reduce the count of used regions. If too many + * regions are used, then zero is returned. Overlapping section definitions + * result in undefined system behaviour. + * + * @param sections is the array with section definitions to map to regions. + * + * @param section_count is the count of section definitions. + * + * @param regions is the array with usable region definitions. + * + * @param region_max is the count of usable region definitions. + * + * @return Returns the count of actually used regions. + */ +size_t _AArch32_PMSA_Map_sections_to_regions( + const AArch32_PMSA_Section *sections, + size_t section_count, + AArch32_PMSA_Region *regions, + size_t region_max +); + +/** * @brief This array provides section definitions to initialize the memory * protection unit (MPU). * diff --git a/cpukit/score/cpu/arm/include/rtems/score/aarch32-system-registers.h b/cpukit/score/cpu/arm/include/rtems/score/aarch32-system-registers.h index 2c532ca669..5af0921547 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/aarch32-system-registers.h +++ b/cpukit/score/cpu/arm/include/rtems/score/aarch32-system-registers.h @@ -10,7 +10,7 @@ */ /* - * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de) + * Copyright (C) 2020 embedded brains GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/cpukit/score/cpu/arm/include/rtems/score/arm.h b/cpukit/score/cpu/arm/include/rtems/score/arm.h index b1e4b07a37..650c48d55f 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/arm.h +++ b/cpukit/score/cpu/arm/include/rtems/score/arm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -13,9 +15,26 @@ * Copyright (c) 2002 Advent Networks, Inc. * Jay Monkman <jmonkman@adventnetworks.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ @@ -47,12 +66,14 @@ extern "C" { #define ARM_MULTILIB_HAS_WFI #define ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE #define ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS + #define ARM_MULTILIB_HAS_STORE_RETURN_STATE #endif #ifndef ARM_DISABLE_THREAD_ID_REGISTER_USE #if defined(__ARM_ARCH_7A__) \ || defined(__ARM_ARCH_7R__) \ - || __ARM_ARCH >= 8 + || __ARM_ARCH >= 8 \ + || __ARM_ARCH == 6 #define ARM_MULTILIB_HAS_THREAD_ID_REGISTER #endif #endif diff --git a/cpukit/score/cpu/arm/include/rtems/score/armv4.h b/cpukit/score/cpu/arm/include/rtems/score/armv4.h index caeaa3e553..1d3a6de5ff 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/armv4.h +++ b/cpukit/score/cpu/arm/include/rtems/score/armv4.h @@ -1,15 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUARM + * + * @brief This header file provides interfaces of the ARMv4 architecture + * support. + */ + /* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef RTEMS_SCORE_ARMV4_H @@ -21,6 +43,12 @@ extern "C" { #endif /* __cplusplus */ +/** + * @addtogroup RTEMSScoreCPUARM + * + * @{ + */ + #ifdef ARM_MULTILIB_ARCH_V4 void bsp_interrupt_dispatch( void ); @@ -91,6 +119,8 @@ static inline void _ARMV4_Status_restore( uint32_t psr ) #endif /* ARM_MULTILIB_ARCH_V4 */ +/** @} */ + #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/cpukit/score/cpu/arm/include/rtems/score/armv7m.h b/cpukit/score/cpu/arm/include/rtems/score/armv7m.h index 8f926e826a..7fa48b3aa5 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/armv7m.h +++ b/cpukit/score/cpu/arm/include/rtems/score/armv7m.h @@ -1,21 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARMV7M Architecture Support + * @ingroup RTEMSScoreCPUARM + * + * @brief This header file provides interfaces of the ARMv7-M architecture + * support. */ /* * Copyright (c) 2011-2014 Sebastian Huber. All rights reserved. * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef RTEMS_SCORE_ARMV7M_H @@ -143,8 +159,19 @@ typedef struct { #define ARMV7M_SCB_SHCSR_MEMFAULTENA (1U << 16) uint32_t shcsr; +#define ARMV7M_SCB_CFSR_MMFSR_MASK 0xff +#define ARMV7M_SCB_CFSR_MMFSR_GET(n) (n & ARMV7M_SCB_CFSR_MMFSR_MASK) +#define ARMV7M_SCB_CFSR_BFSR_MASK 0xff00 +#define ARMV7M_SCB_CFSR_BFSR_GET(n) (n & ARMV7M_SCB_CFSR_BFSR_MASK) +#define ARMV7M_SCB_CFSR_UFSR_MASK 0xffff0000 +#define ARMV7M_SCB_CFSR_UFSR_GET(n) (n & ARMV7M_SCB_CFSR_UFSR_MASK) uint32_t cfsr; + +#define ARMV7M_SCB_HFSR_VECTTBL_MASK 0x2 +#define ARMV7M_SCB_HFSR_FORCED_MASK (1U << 30) +#define ARMV7M_SCB_HFSR_DEBUGEVT_MASK (1U << 31) uint32_t hfsr; + uint32_t dfsr; uint32_t mmfar; uint32_t bfar; @@ -656,11 +683,11 @@ static inline void _ARMV7M_MPU_Set_region( RTEMS_OBFUSCATE_VARIABLE(end); size = (uintptr_t) end - (uintptr_t) begin; - if ( size > 0 ) { + if ( (uintptr_t) end > (uintptr_t) begin ) { rbar = (uintptr_t) begin | region | ARMV7M_MPU_RBAR_VALID; rasr |= _ARMV7M_MPU_Get_region_size(size); } else { - rbar = region; + rbar = ARMV7M_MPU_RBAR_VALID | region; rasr = 0; } @@ -678,6 +705,7 @@ static inline void _ARMV7M_MPU_Disable_region( } static inline void _ARMV7M_MPU_Setup( + uint32_t ctrl, const ARMV7M_MPU_Region_config *cfg, size_t cfg_count ) @@ -713,7 +741,7 @@ static inline void _ARMV7M_MPU_Setup( _ARMV7M_MPU_Disable_region(mpu, region); } - mpu->ctrl = ARMV7M_MPU_CTRL_ENABLE | ARMV7M_MPU_CTRL_PRIVDEFENA; + mpu->ctrl = ctrl; scb->shcsr |= ARMV7M_SCB_SHCSR_MEMFAULTENA; _ARM_Data_synchronization_barrier(); diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu.h b/cpukit/score/cpu/arm/include/rtems/score/cpu.h index e5b23e7100..a462b48cf1 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/arm/include/rtems/score/cpu.h @@ -1,18 +1,23 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARM Architecture Support API + * @ingroup RTEMSScoreCPUARM + * + * @brief This header file defines implementation interfaces pertaining to the + * port of the executive to the ARM architecture. */ /* * This include file contains information pertaining to the ARM * processor. * - * Copyright (c) 2009, 2017 embedded brains GmbH + * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG * * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com> * - * Copyright (c) 2006 OAR Corporation + * Copyright (c) 2006 On-Line Applications Research Corporation (OAR) * * Copyright (c) 2002 Advent Networks, Inc. * Jay Monkman <jmonkman@adventnetworks.com> @@ -20,9 +25,26 @@ * COPYRIGHT (c) 2000 Canon Research Centre France SA. * Emmanuel Raguet, mailto:raguet@crf.canon.fr * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ @@ -157,9 +179,7 @@ #define CPU_MAXIMUM_PROCESSORS 32 -#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER - #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44 -#endif +#define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44 #ifdef ARM_MULTILIB_VFP #define ARM_CONTEXT_CONTROL_D8_OFFSET 48 @@ -172,10 +192,8 @@ #ifdef RTEMS_SMP #if defined(ARM_MULTILIB_VFP) #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112 - #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER) - #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48 #else - #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44 + #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48 #endif #endif @@ -221,9 +239,7 @@ typedef struct { #else void *register_sp; #endif -#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER uint32_t thread_id; -#endif #ifdef ARM_MULTILIB_VFP uint64_t register_d8; uint64_t register_d9; @@ -383,7 +399,7 @@ static inline void arm_interrupt_flash( uint32_t level ) #define _CPU_ISR_Flash( _isr_cookie ) \ arm_interrupt_flash( _isr_cookie ) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { #if defined(ARM_MULTILIB_ARCH_V4) return ( level & 0x80 ) == 0; @@ -424,6 +440,10 @@ void _CPU_Context_Initialize( { context->is_executing = is_executing; } + + RTEMS_NO_RETURN void _ARM_Start_multitasking( Context_Control *heir ); + + #define _CPU_Start_multitasking( _heir ) _ARM_Start_multitasking( _heir ) #endif #define _CPU_Context_Restart_self( _the_context ) \ @@ -434,19 +454,6 @@ void _CPU_Context_Initialize( *(*(_destination)) = _CPU_Null_fp_context; \ } while (0) -#define _CPU_Fatal_halt( _source, _err ) \ - do { \ - uint32_t _level; \ - uint32_t _error = _err; \ - _CPU_ISR_Disable( _level ); \ - (void) _level; \ - __asm__ volatile ("mov r0, %0\n" \ - : "=r" (_error) \ - : "0" (_error) \ - : "r0" ); \ - while (1); \ - } while (0); - /** * @brief CPU initialization. */ @@ -465,6 +472,11 @@ void _CPU_ISR_install_vector( */ void _CPU_Context_switch( Context_Control *run, Context_Control *heir ); +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); #if defined(ARM_MULTILIB_ARCH_V7M) @@ -505,18 +517,6 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); { __asm__ volatile ( "wfe" : : : "memory" ); } - - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - _ARM_Data_synchronization_barrier(); - _ARM_Send_event(); - } - - static inline void _CPU_SMP_Processor_event_receive( void ) - { - _ARM_Wait_for_event(); - _ARM_Data_memory_barrier(); - } #endif @@ -571,14 +571,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - void *_CPU_Thread_Idle_body( uintptr_t ignored ); #if defined(ARM_MULTILIB_ARCH_V4) @@ -593,7 +585,7 @@ typedef enum { ARM_EXCEPTION_IRQ = 6, ARM_EXCEPTION_FIQ = 7, MAX_EXCEPTIONS = 8, - ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff + ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0x7fffffff } Arm_symbolic_exception_name; #endif /* defined(ARM_MULTILIB_ARCH_V4) */ diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h index e25dd25f99..30ef04f6a4 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -10,9 +12,26 @@ * COPYRIGHT (c) 2002 by Advent Networks, Inc. * Jay Monkman <jmonkman@adventnetworks.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * * This file is the include file for cpu_asm.S */ diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h index 2c7078b790..04d23f0ea7 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h @@ -1,15 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief CPU Port Implementation API + * @ingroup RTEMSScoreCPUARM + * + * @brief This header file defines implementation interfaces pertaining to the + * port of the executive to the ARM architecture. */ /* - * Copyright (c) 2013, 2016 embedded brains GmbH + * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -41,6 +63,8 @@ #endif /* ARM_MULTILIB_ARCH_V4 */ +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 11 + #ifndef ASM #ifdef __cplusplus @@ -79,6 +103,18 @@ typedef struct { double d6; double d7; #endif /* ARM_MULTILIB_VFP */ +#ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE + uint32_t r0; + uint32_t r1; + uint32_t r2; + uint32_t r3; + uint32_t r7; + uint32_t r9; + uint32_t r12; + uint32_t lr; + uint32_t return_pc; + uint32_t return_cpsr; +#else /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ uint32_t r9; uint32_t lr; uint32_t r0; @@ -89,6 +125,7 @@ typedef struct { uint32_t return_cpsr; uint32_t r7; uint32_t r12; +#endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */ } CPU_Interrupt_frame; #ifdef RTEMS_SMP @@ -112,20 +149,42 @@ static inline struct Per_CPU_Control *_ARM_Get_current_per_CPU_control( void ) #endif /* ARM_MULTILIB_ARCH_V4 */ +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( "udf" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ +#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER + __asm__ volatile ( + "mcr p15, 0, %0, c13, c0, 3" : : "r" ( context->thread_id ) : "memory" + ); +#else + (void) context; +#endif +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) context->thread_id; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/arm/include/rtems/score/paravirt.h b/cpukit/score/cpu/arm/include/rtems/score/paravirt.h index d0dc4024e2..4aa98499ad 100644 --- a/cpukit/score/cpu/arm/include/rtems/score/paravirt.h +++ b/cpukit/score/cpu/arm/include/rtems/score/paravirt.h @@ -1,22 +1,40 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief ARM Paravirtualization Definitions + * @ingroup RTEMSScoreCPUARMParavirt * - * This include file contains definitions pertaining to paravirtualization - * of the ARM port. + * @brief This header file provides definitions pertaining to + * paravirtualization of the ARM port. */ /* * COPYRIGHT (c) 2018. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may in - * the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ - #ifndef RTEMS_PARAVIRT #error "This file should only be included with paravirtualization is enabled." #endif diff --git a/cpukit/score/cpu/bfin/bfin-exception-frame-print.c b/cpukit/score/cpu/bfin/bfin-exception-frame-print.c index e54eb60e9f..d6c49f5569 100644 --- a/cpukit/score/cpu/bfin/bfin-exception-frame-print.c +++ b/cpukit/score/cpu/bfin/bfin-exception-frame-print.c @@ -1,11 +1,5 @@ /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Copyright (c) 2012 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at diff --git a/cpukit/score/cpu/bfin/cpu.c b/cpukit/score/cpu/bfin/cpu.c index 5a528894d8..962e84d1bc 100644 --- a/cpukit/score/cpu/bfin/cpu.c +++ b/cpukit/score/cpu/bfin/cpu.c @@ -18,6 +18,7 @@ #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> #include <rtems/score/bfin.h> #include <rtems/bfin/bfin.h> @@ -73,6 +74,14 @@ void _CPU_Initialize(void) __asm__ __volatile__ ("syscfg = %0" : : "d" (0x00000004)); } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + __asm__ volatile ( "cli R1; R1 = %0; _halt: idle; jump _halt;" + : : "r" (error) ); +} + +/* end of Fatal Error manager macros */ + diff --git a/cpukit/score/cpu/bfin/headers.am b/cpukit/score/cpu/bfin/headers.am deleted file mode 100644 index e0f6cfca74..0000000000 --- a/cpukit/score/cpu/bfin/headers.am +++ /dev/null @@ -1,11 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/bfin/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/bfin/include/rtems/asm.h -include_rtems_bfin_HEADERS += score/cpu/bfin/include/rtems/bfin/bf52x.h -include_rtems_bfin_HEADERS += score/cpu/bfin/include/rtems/bfin/bf533.h -include_rtems_bfin_HEADERS += score/cpu/bfin/include/rtems/bfin/bfin.h -include_rtems_score_HEADERS += score/cpu/bfin/include/rtems/score/bfin.h -include_rtems_score_HEADERS += score/cpu/bfin/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/bfin/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/bfin/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/bfin/include/rtems/score/cpuimpl.h diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpu.h b/cpukit/score/cpu/bfin/include/rtems/score/cpu.h index d17dfbd30a..72e6d14433 100644 --- a/cpukit/score/cpu/bfin/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/bfin/include/rtems/score/cpu.h @@ -308,15 +308,6 @@ typedef struct { /**@{**/ /** - * Support routine to initialize the RTEMS vector table after it is allocated. - * - * Port Specific Information: - * - * XXX document implementation including references if appropriate - */ -#define _CPU_Initialize_vectors() - -/** * Disable all interrupts for an RTEMS critical section. The previous * level is returned in @a _isr_cookie. * @@ -364,7 +355,7 @@ typedef struct { : : "d"(_level) : "R0" ); \ } -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return level != 0; } @@ -469,29 +460,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/** - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - * - * Port Specific Information: - * - * XXX document implementation including references if appropriate - */ -#define _CPU_Fatal_halt( _source, _error ) \ - { \ - __asm__ volatile ( "cli R1; \ - R1 = %0; \ - _halt: \ - idle; \ - jump _halt;"\ - : : "r" (_error) ); \ - } - -/* end of Fatal Error manager macros */ - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE @@ -637,14 +605,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/bfin/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/bfin/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h index 2140487fc3..5a445d9420 100644 --- a/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h @@ -5,7 +5,7 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -29,34 +29,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/i386/cpu.c b/cpukit/score/cpu/i386/cpu.c index 77b7a7161c..9a3b188d10 100644 --- a/cpukit/score/cpu/i386/cpu.c +++ b/cpukit/score/cpu/i386/cpu.c @@ -8,9 +8,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -176,7 +193,7 @@ void _CPU_Context_Initialize( the_context->esp = (void *) _stack; if ( tls_area != NULL ) { - tcb = (uint32_t) _TLS_TCB_after_TLS_block_initialize( tls_area ); + tcb = (uint32_t) _TLS_Initialize_area( tls_area ); } else { tcb = 0; } @@ -215,16 +232,16 @@ void _CPU_Exception_frame_print (const CPU_Exception_frame *ctx) { unsigned int faultAddr = 0; printk("----------------------------------------------------------\n"); - printk("Exception %" PRIu32 " caught at PC %" PRIx32 " by thread %" PRId32 "\n", + printk("Exception %" PRIu32 " caught at PC %" PRIx32 " by thread %" PRIx32 "\n", ctx->idtIndex, ctx->eip, _Thread_Executing->Object.id); printk("----------------------------------------------------------\n"); printk("Processor execution context at time of the fault was :\n"); printk("----------------------------------------------------------\n"); - printk(" EAX = %" PRIx32 " EBX = %" PRIx32 " ECX = %" PRIx32 " EDX = %" PRIx32 "\n", + printk(" EAX = 0x%08" PRIx32 " EBX = 0x%08" PRIx32 " ECX = 0x%08" PRIx32 " EDX = 0x%08" PRIx32 "\n", ctx->eax, ctx->ebx, ctx->ecx, ctx->edx); - printk(" ESI = %" PRIx32 " EDI = %" PRIx32 " EBP = %" PRIx32 " ESP = %" PRIx32 "\n", + printk(" ESI = 0x%08" PRIx32 " EDI = 0x%08" PRIx32 " EBP = 0x%08" PRIx32 " ESP = 0x%08" PRIx32 "\n", ctx->esi, ctx->edi, ctx->ebp, ctx->esp0); printk("----------------------------------------------------------\n"); printk("Error code pushed by processor itself (if not 0) = %" PRIx32 "\n", @@ -250,7 +267,7 @@ void _CPU_Exception_frame_print (const CPU_Exception_frame *ctx) printk("Call Stack Trace of EIP:\n"); if ( fp ) { for ( i=1; fp->up; fp=fp->up, i++ ) { - printk("0x%08" PRIx32 " ",fp->pc); + printk("0x%08" PRIxPTR " ",fp->pc); if ( ! (i&3) ) printk("\n"); } diff --git a/cpukit/score/cpu/i386/cpu_asm.S b/cpukit/score/cpu/i386/cpu_asm.S index 23360959f5..bb5c096e72 100644 --- a/cpukit/score/cpu/i386/cpu_asm.S +++ b/cpukit/score/cpu/i386/cpu_asm.S @@ -1,14 +1,36 @@ -/* cpu_asm.s +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file * * This file contains all assembly code for the Intel i386 implementation * of RTEMS. - * + */ + +/* * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -45,11 +67,13 @@ .p2align 1 PUBLIC (_CPU_Context_switch) + PUBLIC (_CPU_Context_switch_no_return) .set RUNCONTEXT_ARG, 4 /* save context argument */ .set HEIRCONTEXT_ARG, 8 /* restore context argument */ SYM (_CPU_Context_switch): +SYM (_CPU_Context_switch_no_return): movl RUNCONTEXT_ARG(esp),eax /* eax = running threads context */ GET_SELF_CPU_CONTROL edx /* edx has address for per_CPU information */ movl PER_CPU_ISR_DISPATCH_DISABLE(edx),ecx diff --git a/cpukit/score/cpu/i386/headers.am b/cpukit/score/cpu/i386/headers.am deleted file mode 100644 index e984232d9b..0000000000 --- a/cpukit/score/cpu/i386/headers.am +++ /dev/null @@ -1,11 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/i386/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/i386/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/i386.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/idtr.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/interrupts.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/paravirt.h -include_rtems_score_HEADERS += score/cpu/i386/include/rtems/score/registers.h diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpu.h b/cpukit/score/cpu/i386/include/rtems/score/cpu.h index 0cb4590087..a12b0f2b92 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/i386/include/rtems/score/cpu.h @@ -1,8 +1,10 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file - * + * * @brief Intel I386 CPU Dependent Source - * + * * This include file contains information pertaining to the Intel * i386 processor. */ @@ -11,9 +13,26 @@ * COPYRIGHT (c) 1989-2011. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -32,7 +51,7 @@ extern "C" { #include <rtems/score/paravirt.h> #endif #include <rtems/score/i386.h> - + /** * @defgroup RTEMSScoreCPUi386 i386 Specific Support * @@ -144,7 +163,7 @@ typedef struct { } Context_Control; #define _CPU_Context_Get_SP( _context ) \ - (_context)->esp + (uintptr_t) (_context)->esp #ifdef RTEMS_SMP static inline bool _CPU_Context_Get_is_executing( @@ -409,7 +428,7 @@ extern Context_Control_fp _CPU_Null_fp_context; #define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level) #endif -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & EFLAGS_INTR_ENABLE ) != 0; } @@ -470,16 +489,6 @@ void _CPU_Context_Initialize( uint32_t _CPU_SMP_Get_current_processor( void ); void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); - - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - - static inline void _CPU_SMP_Processor_event_receive( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } #endif #define _CPU_Context_Initialize_fp( _fp_area ) \ @@ -489,19 +498,8 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* - * Fatal Error manager macros - * - * These macros perform the following functions: - * + disable interrupts and halt the CPU - */ - -RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, uint32_t error ); - #endif /* ASM */ -/* end of Fatal Error manager macros */ - #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE /* @@ -574,6 +572,11 @@ void _CPU_Context_switch( Context_Control *heir ); +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + /* * _CPU_Context_restore * @@ -653,14 +656,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /**@}**/ /** Type that can store a 32-bit integer or a pointer. */ diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/i386/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/i386/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h index 570b5cc167..da38ecacf7 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -31,34 +50,75 @@ #define CPU_INTERRUPT_FRAME_SIZE 52 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 20 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + uint32_t tmp; + uint32_t cpu_index; + +#ifdef RTEMS_SMP + cpu_index = _CPU_SMP_Get_current_processor(); +#else + cpu_index = 0; +#endif + + __asm__ volatile ( + "movl " RTEMS_XSTRING( I386_CONTEXT_CONTROL_GS_0_OFFSET ) "(%2), %0\n" + "movl %0, _Global_descriptor_table+24(,%1,8)\n" + "movl " RTEMS_XSTRING( I386_CONTEXT_CONTROL_GS_1_OFFSET ) "(%2), %0\n" + "movl %0, _Global_descriptor_table+28(,%1,8)\n" + "leal 24(,%1,8), %0\n" + "movl %0, %%gs\n" + : "=&r" ( tmp ) + : "r" ( cpu_index ), "r" ( context ) + : "memory" + ); +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) &context->gs; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/i386/include/rtems/score/i386.h b/cpukit/score/cpu/i386/include/rtems/score/i386.h index 549011055e..7598204a64 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/i386.h +++ b/cpukit/score/cpu/i386/include/rtems/score/i386.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -16,9 +18,26 @@ * Copyright (C) 1998 Eric Valette (valette@crf.canon.fr) * Canon Centre Recherche France. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_I386_H @@ -204,7 +223,7 @@ void *i386_Physical_to_logical( * @param[in] offset used with \p segment to compute physical address * @retval physical address */ -RTEMS_INLINE_ROUTINE void *i386_Real_to_physical( +static inline void *i386_Real_to_physical( uint16_t segment, uint16_t offset) { @@ -558,7 +577,7 @@ extern segment_descriptors* i386_get_gdt_entry (uint16_t sgmnt_selector); * @param[in] gdt_entry pointer to entry from which base should be retrieved * @retval base address from GDT entry */ -RTEMS_INLINE_ROUTINE void* i386_base_gdt_entry (segment_descriptors* gdt_entry) +static inline void* i386_base_gdt_entry (segment_descriptors* gdt_entry) { return (void*)(gdt_entry->base_address_15_0 | (gdt_entry->base_address_23_16<<16) | diff --git a/cpukit/score/cpu/i386/include/rtems/score/idtr.h b/cpukit/score/cpu/i386/include/rtems/score/idtr.h index c40fce1fba..f19aaac377 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/idtr.h +++ b/cpukit/score/cpu/i386/include/rtems/score/idtr.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -15,17 +17,34 @@ * Volume 3 : Architecture and Programming Manual * * Formerly contained in and extracted from libcpu/i386/cpu.h. + * + * Applications must not include this file directly. */ /* * COPYRIGHT (C) 1998 Eric Valette (valette@crf.canon.fr) * Canon Centre Recherche France. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * Applications must not include this file directly. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_IDTR_H diff --git a/cpukit/score/cpu/i386/include/rtems/score/interrupts.h b/cpukit/score/cpu/i386/include/rtems/score/interrupts.h index cac9ba9b42..cb86dd1629 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/interrupts.h +++ b/cpukit/score/cpu/i386/include/rtems/score/interrupts.h @@ -1,19 +1,38 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * * @brief Intel I386 Interrupt Macros * * Formerly contained in and extracted from libcpu/i386/cpu.h + * + * Applications must not include this file directly. */ /* * COPYRIGHT (c) 1998 valette@crf.canon.fr * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * Applications must not include this file directly. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ /** diff --git a/cpukit/score/cpu/i386/include/rtems/score/paravirt.h b/cpukit/score/cpu/i386/include/rtems/score/paravirt.h index 1b26226ad0..d3d399b92b 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/paravirt.h +++ b/cpukit/score/cpu/i386/include/rtems/score/paravirt.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -11,9 +13,26 @@ * COPYRIGHT (c) 2018. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may in - * the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ diff --git a/cpukit/score/cpu/i386/include/rtems/score/registers.h b/cpukit/score/cpu/i386/include/rtems/score/registers.h index 79c0ec3e81..eda545cb58 100644 --- a/cpukit/score/cpu/i386/include/rtems/score/registers.h +++ b/cpukit/score/cpu/i386/include/rtems/score/registers.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -11,9 +13,26 @@ /* * COPYRIGHT (c) 1998 valette@crf.canon.fr * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_REGISTERS_H diff --git a/cpukit/score/cpu/lm32/cpu.c b/cpukit/score/cpu/lm32/cpu.c index cd2c4b055f..3301a3099b 100644 --- a/cpukit/score/cpu/lm32/cpu.c +++ b/cpukit/score/cpu/lm32/cpu.c @@ -20,6 +20,7 @@ #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> /* _CPU_Initialize @@ -45,6 +46,18 @@ void _CPU_Initialize(void) /* FP context initialization support goes here */ } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + + while ( true ) { + /* Do nothing */ + } +} + uint32_t _CPU_ISR_Get_level( void ) { /* diff --git a/cpukit/score/cpu/lm32/headers.am b/cpukit/score/cpu/lm32/headers.am deleted file mode 100644 index b77dfb5ab3..0000000000 --- a/cpukit/score/cpu/lm32/headers.am +++ /dev/null @@ -1,8 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/lm32/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/lm32/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/lm32/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/lm32/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/lm32/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/lm32/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/lm32/include/rtems/score/lm32.h diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpu.h b/cpukit/score/cpu/lm32/include/rtems/score/cpu.h index 5c890de53e..335d3407fe 100644 --- a/cpukit/score/cpu/lm32/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/lm32/include/rtems/score/cpu.h @@ -415,15 +415,6 @@ extern Context_Control_fp _CPU_Null_fp_context; /**@{**/ /** - * Support routine to initialize the RTEMS vector table after it is allocated. - * - * Port Specific Information: - * - * XXX document implementation including references if appropriate - */ -#define _CPU_Initialize_vectors() - -/** * Disable all interrupts for an RTEMS critical section. The previous * level is returned in @a _isr_cookie. * @@ -465,7 +456,7 @@ extern Context_Control_fp _CPU_Null_fp_context; #define _CPU_ISR_Flash( _isr_cookie ) \ lm32_flash_interrupts( _isr_cookie ); -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & 0x0001 ) != 0; } @@ -599,23 +590,6 @@ extern char _gp[]; /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/** - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - * - * Port Specific Information: - * - * XXX document implementation including references if appropriate - */ -#define _CPU_Fatal_halt( _source, _error ) \ - { \ - } - -/* end of Fatal Error manager macros */ - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE @@ -638,7 +612,7 @@ void _CPU_Initialize(void); typedef void ( *CPU_ISR_raw_handler )( void ); -RTEMS_INLINE_ROUTINE void _CPU_ISR_install_raw_handler( +static inline void _CPU_ISR_install_raw_handler( uint32_t vector, CPU_ISR_raw_handler new_handler, CPU_ISR_raw_handler *old_handler @@ -771,14 +745,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/lm32/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/lm32/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h index ab43ddbf7d..70a1db1d4d 100644 --- a/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h @@ -5,7 +5,7 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -28,34 +28,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/lm32/lm32-exception-frame-print.c b/cpukit/score/cpu/lm32/lm32-exception-frame-print.c index e54eb60e9f..d6c49f5569 100644 --- a/cpukit/score/cpu/lm32/lm32-exception-frame-print.c +++ b/cpukit/score/cpu/lm32/lm32-exception-frame-print.c @@ -1,11 +1,5 @@ /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Copyright (c) 2012 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at diff --git a/cpukit/score/cpu/m68k/__m68k_read_tp.c b/cpukit/score/cpu/m68k/__m68k_read_tp.c index 466f979e57..8c73c96339 100644 --- a/cpukit/score/cpu/m68k/__m68k_read_tp.c +++ b/cpukit/score/cpu/m68k/__m68k_read_tp.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2014-2015 embedded brains GmbH. All rights reserved. + * Copyright (C) 2014, 2015 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -24,9 +37,7 @@ void __m68k_read_tp(void); void __m68k_read_tp(void) { const Thread_Control *executing = _Thread_Get_executing(); - void *tp = (char *) executing->Start.tls_area + - _TLS_Get_thread_control_block_area_size((uintptr_t) _TLS_Alignment) - + 0x7000; + void *tp = executing->Registers.thread_pointer; __asm__ volatile ( "move.l %0, %%a0" diff --git a/cpukit/score/cpu/m68k/cpu.c b/cpukit/score/cpu/m68k/cpu.c index be63351e9c..44dc06cc91 100644 --- a/cpukit/score/cpu/m68k/cpu.c +++ b/cpukit/score/cpu/m68k/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -8,15 +10,33 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> #include <rtems/score/percpu.h> #include <rtems/score/tls.h> @@ -88,6 +108,29 @@ void _CPU_Initialize(void) m68k_install_interrupt_stack(); } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ +#if ( defined(__mcoldfire__) ) + __asm__ volatile( "move.w %%sr,%%d0\n\t" + "or.l %2,%%d0\n\t" + "move.w %%d0,%%sr\n\t" + "move.l %1,%%d0\n\t" + "move.l #0xDEADBEEF,%%d1\n\t" + "halt" + : "=g" (error) + : "0" (error), "d"(0x0700) + : "d0", "d1" ); + +#else + __asm__ volatile( "movl %0,%%d0; " + "orw #0x0700,%%sr; " + "stop #0x2700" : "=d" ((error)) : "0" ((error)) ); + +#endif + +/* end of Fatal Error manager macros */ +} + uint32_t _CPU_ISR_Get_level( void ) { uint32_t level; @@ -236,6 +279,9 @@ void _CPU_Context_Initialize( #endif if ( tls_area != NULL ) { - _TLS_TCB_before_TLS_block_initialize( tls_area ); + the_context->thread_pointer = + (char *) _TLS_Initialize_area( tls_area ) + 0x7000; + } else { + the_context->thread_pointer = NULL; } } diff --git a/cpukit/score/cpu/m68k/cpu_asm.S b/cpukit/score/cpu/m68k/cpu_asm.S index 3c16e62fbc..28c0cb51eb 100644 --- a/cpukit/score/cpu/m68k/cpu_asm.S +++ b/cpukit/score/cpu/m68k/cpu_asm.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* cpu_asm.s * * This file contains all assembly code for the MC68020 implementation @@ -6,9 +8,26 @@ * COPYRIGHT (c) 1989-2008. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ diff --git a/cpukit/score/cpu/m68k/headers.am b/cpukit/score/cpu/m68k/headers.am deleted file mode 100644 index 3fdc6fe2bd..0000000000 --- a/cpukit/score/cpu/m68k/headers.am +++ /dev/null @@ -1,11 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/m68k/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/m68k/include/rtems/asm.h -include_rtems_m68k_HEADERS += score/cpu/m68k/include/rtems/m68k/m68302.h -include_rtems_m68k_HEADERS += score/cpu/m68k/include/rtems/m68k/m68360.h -include_rtems_m68k_HEADERS += score/cpu/m68k/include/rtems/m68k/qsm.h -include_rtems_m68k_HEADERS += score/cpu/m68k/include/rtems/m68k/sim.h -include_rtems_score_HEADERS += score/cpu/m68k/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/m68k/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/m68k/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/m68k/include/rtems/score/m68k.h diff --git a/cpukit/score/cpu/m68k/include/rtems/m68k/m68360.h b/cpukit/score/cpu/m68k/include/rtems/m68k/m68360.h index dc181d0cdf..4af1096c12 100644 --- a/cpukit/score/cpu/m68k/include/rtems/m68k/m68360.h +++ b/cpukit/score/cpu/m68k/include/rtems/m68k/m68360.h @@ -2,29 +2,33 @@ * @file * * @brief Definitions for Motorola MC68360 Processor + * + * Motorola MC68360 Quad Integrated Communications Controller (QUICC) */ + /* - ************************************************************************** - ************************************************************************** - ** ** - ** MOTOROLA MC68360 QUAD INTEGRATED COMMUNICATIONS CONTROLLER (QUICC) ** - ** ** - ** HARDWARE DECLARATIONS ** - ** ** - ** ** - ** Submitted By: ** - ** ** - ** W. Eric Norum ** - ** Saskatchewan Accelerator Laboratory ** - ** University of Saskatchewan ** - ** 107 North Road ** - ** Saskatoon, Saskatchewan, CANADA ** - ** S7N 5C6 ** - ** ** - ** eric@skatter.usask.ca ** - ** ** - ************************************************************************** - ************************************************************************** + * Copyright (c) 1996 Eric Norum <eric@norum.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_M68K_M68360_H diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpu.h b/cpukit/score/cpu/m68k/include/rtems/score/cpu.h index de7f8215bb..ae3194b9bb 100644 --- a/cpukit/score/cpu/m68k/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/m68k/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -13,9 +15,26 @@ * COPYRIGHT (c) 1989-2011. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -126,6 +145,7 @@ typedef struct { #if defined( __mcoldfire__ ) && ( M68K_HAS_FPU == 1 ) uint8_t fpu_dis; #endif + void *thread_pointer; } Context_Control; #define _CPU_Context_Get_SP( _context ) \ @@ -350,15 +370,12 @@ extern void* _VBR; * ISR handler macros * * These macros perform the following functions: - * + initialize the RTEMS vector table * + disable all maskable CPU interrupts * + restore previous interrupt level (enable) * + temporarily restore interrupts (flash) * + set a particular level */ -#define _CPU_Initialize_vectors() - #define _CPU_ISR_Disable( _level ) \ m68k_disable_interrupts( _level ) @@ -368,7 +385,7 @@ extern void* _VBR; #define _CPU_ISR_Flash( _level ) \ m68k_flash_interrupts( _level ) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & 0x0700 ) == 0; } @@ -404,35 +421,6 @@ void _CPU_Context_Initialize( void *_CPU_Thread_Idle_body( uintptr_t ignored ); -/* - * Fatal Error manager macros - * - * These macros perform the following functions: - * + disable interrupts and halt the CPU - */ - -#if ( defined(__mcoldfire__) ) -#define _CPU_Fatal_halt( _source, _error ) \ - { __asm__ volatile( "move.w %%sr,%%d0\n\t" \ - "or.l %2,%%d0\n\t" \ - "move.w %%d0,%%sr\n\t" \ - "move.l %1,%%d0\n\t" \ - "move.l #0xDEADBEEF,%%d1\n\t" \ - "halt" \ - : "=g" (_error) \ - : "0" (_error), "d"(0x0700) \ - : "d0", "d1" ); \ - } -#else -#define _CPU_Fatal_halt( _source, _error ) \ - { __asm__ volatile( "movl %0,%%d0; " \ - "orw #0x0700,%%sr; " \ - "stop #0x2700" : "=d" ((_error)) : "0" ((_error)) ); \ - } -#endif - -/* end of Fatal Error manager macros */ - #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE /* @@ -634,14 +622,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - #if (M68K_HAS_FPSP_PACKAGE == 1) /* * Hooks for the Floating Point Support Package (FPSP) provided by Motorola diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/m68k/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/m68k/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h index dda5727142..521e9fc4c2 100644 --- a/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,34 +48,60 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( "illegal" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + /* + * There is nothing to do since the thread-local storage area is obtained by + * calling __m68k_read_tp(). + */ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) context->thread_pointer; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/m68k/include/rtems/score/m68k.h b/cpukit/score/cpu/m68k/include/rtems/score/m68k.h index eb262d30ab..399fb6518c 100644 --- a/cpukit/score/cpu/m68k/include/rtems/score/m68k.h +++ b/cpukit/score/cpu/m68k/include/rtems/score/m68k.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -13,9 +15,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_M68K_H diff --git a/cpukit/score/cpu/m68k/m68k-exception-frame-print.c b/cpukit/score/cpu/m68k/m68k-exception-frame-print.c index e54eb60e9f..ba629fd073 100644 --- a/cpukit/score/cpu/m68k/m68k-exception-frame-print.c +++ b/cpukit/score/cpu/m68k/m68k-exception-frame-print.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/microblaze/__tls_get_addr.c b/cpukit/score/cpu/microblaze/__tls_get_addr.c new file mode 100644 index 0000000000..b7e7890013 --- /dev/null +++ b/cpukit/score/cpu/microblaze/__tls_get_addr.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze thread-local storage implementation + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/score/threadimpl.h> +#include <rtems/score/tls.h> + +#include <assert.h> + +void *__tls_get_addr( const TLS_Index *ti ); + +void *__tls_get_addr( const TLS_Index *ti ) +{ + const Thread_Control *executing; + + (void) ti; + + executing = _Thread_Get_executing(); + + return executing->Registers.thread_pointer; +} diff --git a/cpukit/score/cpu/microblaze/cpu.c b/cpukit/score/cpu/microblaze/cpu.c new file mode 100644 index 0000000000..823825d2b1 --- /dev/null +++ b/cpukit/score/cpu/microblaze/cpu.c @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze architecture support implementation + */ + +/* + * Copyright (c) 2015, Hesham Almatary + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <inttypes.h> + +#include <rtems/bspIo.h> +#include <rtems/fatal.h> +#include <rtems/score/isr.h> +#include <rtems/score/tls.h> +#include <rtems/score/wkspace.h> + +void _CPU_Initialize( void ) +{ +} + +void _CPU_Context_Initialize( + Context_Control *context, + void *stack_area_begin, + size_t stack_area_size, + uint32_t new_level, + void (*entry_point)( void ), + bool is_fp, + void *tls_area +) +{ + uint32_t stack = (uint32_t) stack_area_begin; + uint32_t stack_high = stack + stack_area_size; + + memset( context, 0, sizeof(*context) ) ; + + context->r1 = stack_high - 64; + context->r15 = (uint32_t) entry_point; + + uint32_t msr; + _CPU_MSR_GET( msr ); + context->rmsr = msr; + + if ( tls_area != NULL ) { + context->thread_pointer = _TLS_Initialize_area( tls_area ); + } +} + +void _CPU_Exception_frame_print( const CPU_Exception_frame *ctx ) +{ + printk( + "\n" + "R0 = 0x%08" PRIx32 " R17 = %p\n" + "R1 = 0x%08" PRIx32 " R18 = 0x%08" PRIx32 "\n" + "R2 = 0x%08" PRIx32 " R19 = 0x%08" PRIx32 "\n" + "R3 = 0x%08" PRIx32 " R20 = 0x%08" PRIx32 "\n" + "R4 = 0x%08" PRIx32 " R21 = 0x%08" PRIx32 "\n" + "R5 = 0x%08" PRIx32 " R22 = 0x%08" PRIx32 "\n" + "R6 = 0x%08" PRIx32 " R23 = 0x%08" PRIx32 "\n" + "R7 = 0x%08" PRIx32 " R24 = 0x%08" PRIx32 "\n" + "R8 = 0x%08" PRIx32 " R25 = 0x%08" PRIx32 "\n" + "R9 = 0x%08" PRIx32 " R26 = 0x%08" PRIx32 "\n" + "R10 = 0x%08" PRIx32 " R27 = 0x%08" PRIx32 "\n" + "R11 = 0x%08" PRIx32 " R28 = 0x%08" PRIx32 "\n" + "R12 = 0x%08" PRIx32 " R29 = 0x%08" PRIx32 "\n" + "R13 = 0x%08" PRIx32 " R30 = 0x%08" PRIx32 "\n" + "R14 = %p" " R31 = 0x%08" PRIx32 "\n" + "R15 = %p" " ESR = 0x%08" PRIx32 "\n" + "R16 = %p" " EAR = %p\n", + 0, ctx->r17, + ctx->r1, ctx->r18, + ctx->r2, ctx->r19, + ctx->r3, ctx->r20, + ctx->r4, ctx->r21, + ctx->r5, ctx->r22, + ctx->r6, ctx->r23, + ctx->r7, ctx->r24, + ctx->r8, ctx->r25, + ctx->r9, ctx->r26, + ctx->r10, ctx->r27, + ctx->r11, ctx->r28, + ctx->r12, ctx->r29, + ctx->r13, ctx->r30, + ctx->r14, ctx->r31, + ctx->r15, ctx->esr, + ctx->r16, ctx->ear + ); + + printk( + "MSR = 0x%08" PRIx32 " %s%s%s%s%s%s%s%s%s%s%s%s\n", + ctx->msr, + ( ctx->msr & MICROBLAZE_MSR_VM ) ? "VM " : "", + ( ctx->msr & MICROBLAZE_MSR_UM ) ? "UM " : "", + ( ctx->msr & MICROBLAZE_MSR_PVR ) ? "PVR " : "", + ( ctx->msr & MICROBLAZE_MSR_EIP ) ? "EiP " : "", + ( ctx->msr & MICROBLAZE_MSR_EE ) ? "EE " : "", + ( ctx->msr & MICROBLAZE_MSR_DCE ) ? "DCE " : "", + ( ctx->msr & MICROBLAZE_MSR_DZO ) ? "DZO " : "", + ( ctx->msr & MICROBLAZE_MSR_ICE ) ? "ICE " : "", + ( ctx->msr & MICROBLAZE_MSR_FSL ) ? "FSL " : "", + ( ctx->msr & MICROBLAZE_MSR_BIP ) ? "BiP " : "", + ( ctx->msr & MICROBLAZE_MSR_C ) ? "C " : "", + ( ctx->msr & MICROBLAZE_MSR_IE ) ? "IE " : "" + ); +} + +void _CPU_ISR_Set_level( uint32_t level ) +{ + uint32_t microblaze_switch_reg; + + _CPU_MSR_GET( microblaze_switch_reg ); + + if ( level == 0 ) { + microblaze_switch_reg |= MICROBLAZE_MSR_IE; + } else { + microblaze_switch_reg &= ~(MICROBLAZE_MSR_IE); + } + + _CPU_MSR_SET( microblaze_switch_reg ); +} + +uint32_t _CPU_ISR_Get_level( void ) +{ + uint32_t level; + + _CPU_MSR_GET( level ); + + /* This is unique. The MSR register contains an interrupt enable flag where + * most other architectures have an interrupt disable flag. */ + return ( level & MICROBLAZE_MSR_IE ) == 0; +} + +void _CPU_ISR_install_vector( + uint32_t vector, + CPU_ISR_handler new_handler, + CPU_ISR_handler *old_handler +) +{ + *old_handler = _ISR_Vector_table[ vector ]; + _ISR_Vector_table[ vector ] = new_handler; +} + +void *_CPU_Thread_Idle_body( uintptr_t ignored ) +{ + while ( true ) { + } +} + +MicroBlaze_Exception_handler installed_exception_handler = NULL; + +void _MicroBlaze_Exception_install_handler( + MicroBlaze_Exception_handler new_handler, + MicroBlaze_Exception_handler *old_handler +) +{ + if ( old_handler != NULL ) { + *old_handler = installed_exception_handler; + } + + installed_exception_handler = new_handler; +} + +void _MicroBlaze_Exception_handle( CPU_Exception_frame *ef ) +{ + /* EiP is not set for user exceptions which are unused and not hooked */ + if ( + ( ef->msr & MICROBLAZE_MSR_EIP ) != 0 + && installed_exception_handler != NULL + ) { + installed_exception_handler( ef ); + } + + rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) ef ); +} + +MicroBlaze_Exception_handler installed_debug_handler = NULL; + +void _MicroBlaze_Debug_install_handler( + MicroBlaze_Exception_handler new_handler, + MicroBlaze_Exception_handler *old_handler +) +{ + if ( old_handler != NULL ) { + *old_handler = installed_debug_handler; + } + + installed_debug_handler = new_handler; +} + +void _MicroBlaze_Debug_handle( CPU_Exception_frame *ef ) +{ + /* BiP is not set for software debug events, set it here */ + ef->msr |= MICROBLAZE_MSR_BIP; + + if ( installed_debug_handler != NULL ) { + installed_debug_handler( ef ); + } + + rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) ef ); +} diff --git a/cpukit/score/cpu/microblaze/cpu_asm.S b/cpukit/score/cpu/microblaze/cpu_asm.S new file mode 100644 index 0000000000..0a2c5d8fff --- /dev/null +++ b/cpukit/score/cpu/microblaze/cpu_asm.S @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze interrupt handler implementation + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rtems/asm.h> +#include <rtems/score/percpu.h> + + .text + .globl _ISR_Handler + .align 2 + +_ISR_Handler: + /* Save stack frame */ + swi r3, r1, MICROBLAZE_INTERRUPT_FRAME_R3 + swi r4, r1, MICROBLAZE_INTERRUPT_FRAME_R4 + swi r6, r1, MICROBLAZE_INTERRUPT_FRAME_R6 + swi r7, r1, MICROBLAZE_INTERRUPT_FRAME_R7 + swi r8, r1, MICROBLAZE_INTERRUPT_FRAME_R8 + swi r9, r1, MICROBLAZE_INTERRUPT_FRAME_R9 + swi r10, r1, MICROBLAZE_INTERRUPT_FRAME_R10 + swi r11, r1, MICROBLAZE_INTERRUPT_FRAME_R11 + swi r12, r1, MICROBLAZE_INTERRUPT_FRAME_R12 + swi r15, r1, MICROBLAZE_INTERRUPT_FRAME_R15 + swi r18, r1, MICROBLAZE_INTERRUPT_FRAME_R18 + mfs r3, rmsr + swi r3, r1, MICROBLAZE_INTERRUPT_FRAME_MSR + + /* Disable dispatching */ + lwi r3, r0, _Per_CPU_Information + 16 + addik r3, r3, 1 + swi r3, r0, _Per_CPU_Information + 16 + + swi r14, r1, MICROBLAZE_INTERRUPT_FRAME_R14 + + /* Is SP < INTERRUPT_STACK_LOW? */ + lwi r4, r0, _Per_CPU_Information + rsubk r3, r4, r1 + blei r3, switch_to_interrupt_stack + + /* Is SP > INTERRUPT_STACK_HIGH? */ + lwi r4, r0, _Per_CPU_Information + 4 + rsubk r3, r4, r1 + bgei r3, switch_to_interrupt_stack + + bri on_interrupt_stack + +switch_to_interrupt_stack: + add r4, r0, r1 + lwi r1, r0, _Per_CPU_Information + 4 + addik r1, r1, -(CPU_INTERRUPT_FRAME_SIZE) + swi r4, r1, 0 + +on_interrupt_stack: + /* Add 1 to ISR_NEST_LEVEL */ + lwi r3, r0, _Per_CPU_Information + 8 + addik r3, r3, 1 + swi r3, r0, _Per_CPU_Information + 8 + + bralid r15, bsp_interrupt_dispatch + nop + + /* Subtract 1 from ISR_NEST_LEVEL */ + lwi r3, r0, _Per_CPU_Information + 8 + addik r3, r3, -1 + swi r3, r0, _Per_CPU_Information + 8 + + /* Is ISR_NEST_LEVEL > 0? */ + bgti r3, after_stack_switch + + /* Switch back to interrupted thread stack */ + lwi r1, r1, 0 + +after_stack_switch: + /* Subtract 1 from THREAD_DISPATCH_DISABLE_LEVEL */ + lwi r3, r0, _Per_CPU_Information + 16 + addik r3, r3, -1 + swi r3, r0, _Per_CPU_Information + 16 + + /* Is THREAD_DISPATCH_DISABLE_LEVEL != 0? */ + bnei r3, quick_exit + + /* Is DISPATCH_NEEDED == 0? */ + lwi r3, r0, _Per_CPU_Information + 20 + beqi r3, quick_exit + + /* Return to interrupted thread and make it do a dispatch */ + bralid r15, _Thread_Dispatch + nop + /* Fall through to quick exit */ + +quick_exit: + /* Simple return from nested interrupt */ + /* Restore registers */ + lwi r3, r1, MICROBLAZE_INTERRUPT_FRAME_MSR + mts rmsr, r3 + lwi r3, r1, MICROBLAZE_INTERRUPT_FRAME_R3 + lwi r4, r1, MICROBLAZE_INTERRUPT_FRAME_R4 + lwi r5, r1, MICROBLAZE_INTERRUPT_FRAME_R5 + lwi r6, r1, MICROBLAZE_INTERRUPT_FRAME_R6 + lwi r7, r1, MICROBLAZE_INTERRUPT_FRAME_R7 + lwi r8, r1, MICROBLAZE_INTERRUPT_FRAME_R8 + lwi r9, r1, MICROBLAZE_INTERRUPT_FRAME_R9 + lwi r10, r1, MICROBLAZE_INTERRUPT_FRAME_R10 + lwi r11, r1, MICROBLAZE_INTERRUPT_FRAME_R11 + lwi r12, r1, MICROBLAZE_INTERRUPT_FRAME_R12 + lwi r14, r1, MICROBLAZE_INTERRUPT_FRAME_R14 + lwi r15, r1, MICROBLAZE_INTERRUPT_FRAME_R15 + lwi r18, r1, MICROBLAZE_INTERRUPT_FRAME_R18 + + /* Remove stack frame */ + addik r1, r1, CPU_INTERRUPT_FRAME_SIZE + + rtid r14, 0 + nop diff --git a/cpukit/score/cpu/microblaze/include/machine/elf_machdep.h b/cpukit/score/cpu/microblaze/include/machine/elf_machdep.h new file mode 100644 index 0000000000..e6d661c596 --- /dev/null +++ b/cpukit/score/cpu/microblaze/include/machine/elf_machdep.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/* + * Copyright (C) 2023 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MICROBLAZE_ELF_MACHDEP_H_ +#define _MICROBLAZE_ELF_MACHDEP_H_ + +#define ELF64_MACHDEP_ID EM_MICROBLAZE +#define ELF32_MACHDEP_ID EM_MICROBLAZE + +#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB +#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB + +#define ELF32_MACHDEP_ID_CASES \ + case EM_MICROBLAZE: \ + break; + +#define ELF64_MACHDEP_ID_CASES \ + case EM_MICROBLAZE: \ + break; + +#define KERN_ELFSIZE 32 +#define ARCH_ELFSIZE 32 /* MD native binary size */ + +/* Processor specific relocation types */ + +#define R_MICROBLAZE_NONE 0 +#define R_MICROBLAZE_32 1 +#define R_MICROBLAZE_32_PCREL 2 +#define R_MICROBLAZE_64_PCREL 3 +#define R_MICROBLAZE_32_PCREL_LO 4 +#define R_MICROBLAZE_64 5 +#define R_MICROBLAZE_32_LO 6 +#define R_MICROBLAZE_SRO32 7 +#define R_MICROBLAZE_SRW32 8 +#define R_MICROBLAZE_64_NONE 9 +#define R_MICROBLAZE_32_SYM_OP_SYM 10 +#define R_MICROBLAZE_GNU_VTINHERIT 11 +#define R_MICROBLAZE_GNU_VTENTRY 12 +#define R_MICROBLAZE_GOTPC_64 13 +#define R_MICROBLAZE_GOT_64 14 +#define R_MICROBLAZE_PLT_64 15 +#define R_MICROBLAZE_REL 16 +#define R_MICROBLAZE_JUMP_SLOT 17 +#define R_MICROBLAZE_GLOB_DAT 18 +#define R_MICROBLAZE_GOTOFF_64 19 +#define R_MICROBLAZE_GOTOFF_32 20 +#define R_MICROBLAZE_COPY 21 +#define R_MICROBLAZE_TLS 22 +#define R_MICROBLAZE_TLSGD 23 +#define R_MICROBLAZE_TLSLD 24 +#define R_MICROBLAZE_TLSDTPMOD32 25 +#define R_MICROBLAZE_TLSDTPREL32 26 +#define R_MICROBLAZE_TLSDTPREL64 27 +#define R_MICROBLAZE_TLSGOTTPREL32 28 +#define R_MICROBLAZE_TLSTPREL32 29 + +#define R_TYPE( name ) R_MICROBLAZE_##name + +#endif /* _MICROBLAZE_ELF_MACHDEP_H_ */ diff --git a/cpukit/score/cpu/microblaze/include/rtems/asm.h b/cpukit/score/cpu/microblaze/include/rtems/asm.h new file mode 100644 index 0000000000..2e35a66294 --- /dev/null +++ b/cpukit/score/cpu/microblaze/include/rtems/asm.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @brief MicroBlaze assembler support + * + * This include file attempts to address the problems + * caused by incompatible flavors of assemblers and + * toolsets. It primarily addresses variations in the + * use of leading underscores on symbols and the requirement + * that register names be preceded by a %. + */ + +/* + * Copyright (c) 2015, Hesham Almatary + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTEMS_ASM_H +#define _RTEMS_ASM_H + +/* + * Indicate we are in an assembly file and get the basic CPU definitions. + */ + +#ifndef ASM +#define ASM +#endif + +#include <rtems/score/cpuopts.h> + +#ifndef __USER_LABEL_PREFIX__ +/** + * Recent versions of GNU cpp define variables which indicate the + * need for underscores and percents. If not using GNU cpp or + * the version does not support this, then you will obviously + * have to define these as appropriate. + * + * This symbol is prefixed to all C program symbols. + */ +#define __USER_LABEL_PREFIX__ _ +#endif + +#ifndef __REGISTER_PREFIX__ +/** + * Recent versions of GNU cpp define variables which indicate the + * need for underscores and percents. If not using GNU cpp or + * the version does not support this, then you will obviously + * have to define these as appropriate. + * + * This symbol is prefixed to all register names. + */ +#define __REGISTER_PREFIX__ +#endif + +#include <rtems/concat.h> + +/** Use the right prefix for global labels. */ +#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) + +/** Use the right prefix for registers. */ +#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) + +/* + * define macros for all of the registers on this CPU + * + * EXAMPLE: #define d0 REG (d0) + */ + +/* + * Define macros to handle section beginning and ends. + */ + + +/** This macro is used to denote the beginning of a code declaration. */ +#define BEGIN_CODE_DCL .text +/** This macro is used to denote the end of a code declaration. */ +#define END_CODE_DCL +/** This macro is used to denote the beginning of a data declaration section. */ +#define BEGIN_DATA_DCL .data +/** This macro is used to denote the end of a data declaration section. */ +#define END_DATA_DCL +/** This macro is used to denote the beginning of a code section. */ +#define BEGIN_CODE .text +/** This macro is used to denote the end of a code section. */ +#define END_CODE +/** This macro is used to denote the beginning of a data section. */ +#define BEGIN_DATA +/** This macro is used to denote the end of a data section. */ +#define END_DATA +/** This macro is used to denote the beginning of the + * unitialized data section. + */ +#define BEGIN_BSS +/** This macro is used to denote the end of the unitialized data section. */ +#define END_BSS +/** This macro is used to denote the end of the assembly file. */ +#define END + +/** + * This macro is used to declare a public global symbol. + * + * @note This must be tailored for a particular flavor of the C compiler. + * They may need to put underscores in front of the symbols. + */ +#define PUBLIC(sym) .globl SYM (sym) + +/** + * This macro is used to prototype a public global symbol. + * + * @note This must be tailored for a particular flavor of the C compiler. + * They may need to put underscores in front of the symbols. + */ +#define EXTERN(sym) .globl SYM (sym) + +#endif diff --git a/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h b/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h new file mode 100644 index 0000000000..3d865732d6 --- /dev/null +++ b/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h @@ -0,0 +1,467 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPU + * + * @brief MicroBlaze architecture support + */ + +/* + * Copyright (c) 2015, Hesham Almatary + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTEMS_SCORE_CPU_H +#define _RTEMS_SCORE_CPU_H + +#include <rtems/score/basedefs.h> +#include <rtems/score/microblaze.h> + +#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE + +#define CPU_ISR_PASSES_FRAME_POINTER FALSE + +#define CPU_HARDWARE_FP FALSE + +#define CPU_SOFTWARE_FP FALSE + +#define CPU_ALL_TASKS_ARE_FP FALSE + +#define CPU_IDLE_TASK_IS_FP FALSE + +#define CPU_USE_DEFERRED_FP_SWITCH FALSE + +#define CPU_STACK_GROWS_UP FALSE + +/** + * The maximum cache-line size is 16 words. + */ +#define CPU_CACHE_LINE_BYTES 64 + +#define CPU_STRUCTURE_ALIGNMENT + +#define CPU_MODES_INTERRUPT_MASK 0x00000001 + +#define MICROBLAZE_EXCEPTION_FRAME_R1 0 +#define MICROBLAZE_EXCEPTION_FRAME_R2 4 +#define MICROBLAZE_EXCEPTION_FRAME_R3 8 +#define MICROBLAZE_EXCEPTION_FRAME_R4 12 +#define MICROBLAZE_EXCEPTION_FRAME_R5 16 +#define MICROBLAZE_EXCEPTION_FRAME_R6 20 +#define MICROBLAZE_EXCEPTION_FRAME_R7 24 +#define MICROBLAZE_EXCEPTION_FRAME_R8 28 +#define MICROBLAZE_EXCEPTION_FRAME_R9 32 +#define MICROBLAZE_EXCEPTION_FRAME_R10 36 +#define MICROBLAZE_EXCEPTION_FRAME_R11 40 +#define MICROBLAZE_EXCEPTION_FRAME_R12 44 +#define MICROBLAZE_EXCEPTION_FRAME_R13 48 +#define MICROBLAZE_EXCEPTION_FRAME_R14 52 +#define MICROBLAZE_EXCEPTION_FRAME_R15 56 +#define MICROBLAZE_EXCEPTION_FRAME_R16 60 +#define MICROBLAZE_EXCEPTION_FRAME_R17 64 +#define MICROBLAZE_EXCEPTION_FRAME_R18 68 +#define MICROBLAZE_EXCEPTION_FRAME_R19 72 +#define MICROBLAZE_EXCEPTION_FRAME_R20 76 +#define MICROBLAZE_EXCEPTION_FRAME_R21 80 +#define MICROBLAZE_EXCEPTION_FRAME_R22 84 +#define MICROBLAZE_EXCEPTION_FRAME_R23 88 +#define MICROBLAZE_EXCEPTION_FRAME_R24 92 +#define MICROBLAZE_EXCEPTION_FRAME_R25 96 +#define MICROBLAZE_EXCEPTION_FRAME_R26 100 +#define MICROBLAZE_EXCEPTION_FRAME_R27 104 +#define MICROBLAZE_EXCEPTION_FRAME_R28 108 +#define MICROBLAZE_EXCEPTION_FRAME_R29 112 +#define MICROBLAZE_EXCEPTION_FRAME_R30 116 +#define MICROBLAZE_EXCEPTION_FRAME_R31 120 +#define MICROBLAZE_EXCEPTION_FRAME_MSR 124 +#define MICROBLAZE_EXCEPTION_FRAME_EAR 128 +#define MICROBLAZE_EXCEPTION_FRAME_ESR 132 +#define MICROBLAZE_EXCEPTION_FRAME_BTR 136 + +#define CPU_EXCEPTION_FRAME_SIZE 140 + +#ifndef ASM + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup CPUContext Management + * This defines the minimal set of integer and processor state registers + * that must be saved during a voluntary context switch from one thread + * to another. + */ +typedef struct { + uint32_t r1; + uint32_t r13; + uint32_t r14; + uint32_t r15; + uint32_t r16; + uint32_t r17; + uint32_t r18; + uint32_t r19; + uint32_t r20; + uint32_t r21; + uint32_t r22; + uint32_t r23; + uint32_t r24; + uint32_t r25; + uint32_t r26; + uint32_t r27; + uint32_t r28; + uint32_t r29; + uint32_t r30; + uint32_t r31; + uint32_t rmsr; + void *thread_pointer; +} Context_Control; + +/** + * @ingroup CPUContext Management + * + * This macro returns the stack pointer associated with @a _context. + * + * @param[in] _context is the thread context area to access + * + * @return This method returns the stack pointer. + */ +#define _CPU_Context_Get_SP( _context ) \ + (_context)->r1 + +#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0 + +#define CPU_INTERRUPT_NUMBER_OF_VECTORS 32 + +#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE + +#define CPU_MAXIMUM_PROCESSORS 32 + +/** + * @ingroup CPUInterrupt + * This defines the highest interrupt vector number for this port. + */ +#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) + +#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE + +#define CPU_STACK_MINIMUM_SIZE (1024*4) + +#define CPU_ALIGNMENT 4 + +#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT + +#define CPU_STACK_ALIGNMENT CPU_ALIGNMENT + +#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES + +/* + * bit definitions in the documentation are reversed for all special registers + * such that bit 0 is the most significant bit + */ +#define MICROBLAZE_MSR_VM ( 1 << 13 ) +#define MICROBLAZE_MSR_UM ( 1 << 11 ) +#define MICROBLAZE_MSR_PVR ( 1 << 10 ) +#define MICROBLAZE_MSR_EIP ( 1 << 9 ) +#define MICROBLAZE_MSR_EE ( 1 << 8 ) +#define MICROBLAZE_MSR_DCE ( 1 << 7 ) +#define MICROBLAZE_MSR_DZO ( 1 << 6 ) +#define MICROBLAZE_MSR_ICE ( 1 << 5 ) +#define MICROBLAZE_MSR_FSL ( 1 << 4 ) +#define MICROBLAZE_MSR_BIP ( 1 << 3 ) +#define MICROBLAZE_MSR_C ( 1 << 2 ) +#define MICROBLAZE_MSR_IE ( 1 << 1 ) + +#define MICROBLAZE_ESR_DS ( 1 << 12 ) +#define MICROBLAZE_ESR_EC_MASK 0x1f +#define MICROBLAZE_ESR_ESS_MASK 0x7f +#define MICROBLAZE_ESR_ESS_SHIFT 5 + +#define _CPU_MSR_GET( _msr_value ) \ + do { \ + (_msr_value) = 0; \ + __asm__ volatile ("mfs %0, rmsr" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \ + } while (0) + +#define _CPU_MSR_SET( _msr_value ) \ +{ __asm__ volatile ("mts rmsr, %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); } + +#define MICROBLAZE_PVR0_VERSION_GET( _pvr0_value ) \ + ( ( _pvr0_value >> 8 ) & 0xff ) + +#define _CPU_PVR0_GET( _pvr0_value ) \ + do { \ + ( _pvr0_value ) = 0; \ + __asm__ volatile ( "mfs %0, rpvr0" : "=&r" ( ( _pvr0_value ) ) ); \ + } while ( 0 ) + +#define MICROBLAZE_PVR3_BP_GET( _pvr3_value ) \ + ( ( _pvr3_value >> 25 ) & 0xf ) + +#define MICROBLAZE_PVR3_RWP_GET( _pvr3_value ) \ + ( ( _pvr3_value >> 19 ) & 0x7 ) + +#define MICROBLAZE_PVR3_WWP_GET( _pvr3_value ) \ + ( ( _pvr3_value >> 13 ) & 0x7 ) + +#define _CPU_PVR3_GET( _pvr3_value ) \ + do { \ + ( _pvr3_value ) = 0; \ + __asm__ volatile ( "mfs %0, rpvr3" : "=&r" ( ( _pvr3_value ) ) ); \ + } while ( 0 ) + +#define _CPU_ISR_Disable( _isr_cookie ) \ + { \ + unsigned int _new_msr; \ + _CPU_MSR_GET(_isr_cookie); \ + _new_msr = (_isr_cookie) & ~(MICROBLAZE_MSR_IE); \ + _CPU_MSR_SET(_new_msr); \ + } + +#define _CPU_ISR_Enable( _isr_cookie ) \ + { \ + uint32_t _microblaze_interrupt_enable; \ + uint32_t _microblaze_switch_reg; \ + \ + _microblaze_interrupt_enable = (_isr_cookie) & (MICROBLAZE_MSR_IE); \ + _CPU_MSR_GET(_microblaze_switch_reg); \ + _microblaze_switch_reg &= ~(MICROBLAZE_MSR_IE); \ + _microblaze_switch_reg |= _microblaze_interrupt_enable; \ + _CPU_MSR_SET(_microblaze_switch_reg); \ + } + +#define _CPU_ISR_Flash( _isr_cookie ) \ + { \ + unsigned int _new_msr; \ + _CPU_MSR_SET(_isr_cookie); \ + _new_msr = (_isr_cookie) & ~(MICROBLAZE_MSR_IE); \ + _CPU_MSR_SET(_new_msr); \ + } + +void _CPU_ISR_Set_level( uint32_t level ); + +uint32_t _CPU_ISR_Get_level( void ); + +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) +{ + return ( level & MICROBLAZE_MSR_IE ) != 0; +} + +void _CPU_Context_Initialize( + Context_Control *context, + void *stack_area_begin, + size_t stack_area_size, + uint32_t new_level, + void (*entry_point)( void ), + bool is_fp, + void *tls_area +); + +#define _CPU_Context_Restart_self( _the_context ) \ + _CPU_Context_restore( (_the_context) ); + +#define _CPU_Context_Initialize_fp( _destination ) \ + { \ + *(*(_destination)) = _CPU_Null_fp_context; \ + } + +/* end of Context handler macros */ + +/* Fatal Error manager macros */ + +/* TODO */ +#define _CPU_Fatal_halt(_source, _error ) \ + do { \ + __asm__ volatile ( "sleep" ); \ + for(;;) {} \ + } while (0) + +/* end of Fatal Error manager macros */ + +/* Bitfield handler macros */ + +#define CPU_USE_GENERIC_BITFIELD_CODE TRUE + +#define CPU_SIZEOF_POINTER 4 + +#define CPU_PER_CPU_CONTROL_SIZE 0 + +typedef struct { + /* r0 is unnecessary since it is always 0 */ + uint32_t r1; + uint32_t r2; + uint32_t r3; /* return 1/scratch */ + uint32_t r4; /* return 2/scratch */ + uint32_t r5; /* param 1/scratch */ + uint32_t r6; /* param 2/scratch */ + uint32_t r7; /* param 3/scratch */ + uint32_t r8; /* param 4/scratch */ + uint32_t r9; /* param 5/scratch */ + uint32_t r10; /* param 6/scratch */ + uint32_t r11; /* scratch */ + uint32_t r12; /* scratch */ + uint32_t r13; + uint32_t *r14; /* Interrupt Link Register */ + uint32_t *r15; /* Link Register */ + uint32_t *r16; /* Trap/Debug Link Register */ + uint32_t *r17; /* Exception Link Register */ + uint32_t r18; + uint32_t r19; + uint32_t r20; + uint32_t r21; + uint32_t r22; + uint32_t r23; + uint32_t r24; + uint32_t r25; + uint32_t r26; + uint32_t r27; + uint32_t r28; + uint32_t r29; + uint32_t r30; + uint32_t r31; + uint32_t msr; /* Machine Status Register */ + uint32_t *ear; /* Exception Address Register */ + uint32_t esr; /* Exception Status Register */ + uint32_t *btr; /* Branch Target Register */ +} CPU_Exception_frame; + +/* end of Priority handler macros */ + +/* functions */ + +void _CPU_Initialize( void ); + +typedef void ( *CPU_ISR_handler )( uint32_t ); + +void _CPU_ISR_install_vector( + uint32_t vector, + CPU_ISR_handler new_handler, + CPU_ISR_handler *old_handler +); + +typedef void ( *MicroBlaze_Exception_handler )( CPU_Exception_frame *ef ); + +void _MicroBlaze_Exception_install_handler( + MicroBlaze_Exception_handler new_handler, + MicroBlaze_Exception_handler *old_handler +); + +void _MicroBlaze_Exception_handle( + CPU_Exception_frame *ef +); + +void _MicroBlaze_Debug_install_handler( + MicroBlaze_Exception_handler new_handler, + MicroBlaze_Exception_handler *old_handler +); + +void _MicroBlaze_Debug_handle( + CPU_Exception_frame *ef +); + +void _CPU_Context_switch( + Context_Control *run, + Context_Control *heir +); + +/* Selects the appropriate resume function based on CEF state */ +RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame ); + +RTEMS_NO_RETURN void _MicroBlaze_Exception_resume_from_exception( + CPU_Exception_frame *frame +); + +RTEMS_NO_RETURN void _MicroBlaze_Exception_resume_from_break( + CPU_Exception_frame *frame +); + +/* + * Only functions for exception cases since debug exception frames will never + * need dispatch + */ +RTEMS_NO_RETURN void _CPU_Exception_dispatch_and_resume( + CPU_Exception_frame *frame +); + +void _CPU_Exception_disable_thread_dispatch( void ); + +int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame ); + +void _CPU_Exception_frame_set_resume( + CPU_Exception_frame *frame, + void *address +); + +void _CPU_Exception_frame_make_resume_next_instruction( + CPU_Exception_frame *frame +); + +uint32_t *_MicroBlaze_Get_return_address( CPU_Exception_frame *ef ); + +RTEMS_NO_RETURN void _CPU_Context_restore( + Context_Control *new_context +); + +static inline uint32_t CPU_swap_u32( + uint32_t value +) +{ + uint32_t byte1, byte2, byte3, byte4, swapped; + + byte4 = (value >> 24) & 0xff; + byte3 = (value >> 16) & 0xff; + byte2 = (value >> 8) & 0xff; + byte1 = value & 0xff; + + swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4; + return swapped; +} + +#define CPU_swap_u16( value ) \ + (((value&0xff) << 8) | ((value >> 8)&0xff)) + +void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ); + +typedef uint32_t CPU_Counter_ticks; + +uint32_t _CPU_Counter_frequency( void ); + +CPU_Counter_ticks _CPU_Counter_read( void ); + +void *_CPU_Thread_Idle_body( uintptr_t ignored ); + +void bsp_interrupt_dispatch( uint32_t source ); + +/** Type that can store a 32-bit integer or a pointer. */ +typedef uintptr_t CPU_Uint32ptr; + +#ifdef __cplusplus +} +#endif + +#endif /* ASM */ + +#endif /* _RTEMS_SCORE_CPU_H */ diff --git a/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h new file mode 100644 index 0000000000..760ebbfbbb --- /dev/null +++ b/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPU + * + * @brief CPU Port Implementation API + */ + +/* + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTEMS_SCORE_CPUIMPL_H +#define _RTEMS_SCORE_CPUIMPL_H + +#include <rtems/score/cpu.h> + +/** + * @defgroup RTEMSScoreCPUMicroBlaze MicroBlaze + * + * @ingroup RTEMSScoreCPU + * + * @brief MicroBlaze Architecture Support + * + * @{ + */ + +#define CPU_PER_CPU_CONTROL_SIZE 0 + +#define CPU_INTERRUPT_FRAME_SIZE 56 + +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + +#define MICROBLAZE_INTERRUPT_FRAME_R3 0 +#define MICROBLAZE_INTERRUPT_FRAME_R4 4 +#define MICROBLAZE_INTERRUPT_FRAME_R5 8 +#define MICROBLAZE_INTERRUPT_FRAME_R6 12 +#define MICROBLAZE_INTERRUPT_FRAME_R7 16 +#define MICROBLAZE_INTERRUPT_FRAME_R8 20 +#define MICROBLAZE_INTERRUPT_FRAME_R9 24 +#define MICROBLAZE_INTERRUPT_FRAME_R10 28 +#define MICROBLAZE_INTERRUPT_FRAME_R11 32 +#define MICROBLAZE_INTERRUPT_FRAME_R12 36 +#define MICROBLAZE_INTERRUPT_FRAME_R14 40 +#define MICROBLAZE_INTERRUPT_FRAME_R15 44 +#define MICROBLAZE_INTERRUPT_FRAME_R18 48 +#define MICROBLAZE_INTERRUPT_FRAME_MSR 52 + +#ifndef ASM + +#ifdef __cplusplus +extern "C" { +#endif + +void _CPU_Context_volatile_clobber( uintptr_t pattern ); + +void _CPU_Context_validate( uintptr_t pattern ); + +static inline void _CPU_Instruction_illegal( void ) +{ + __asm__ volatile ( ".word 0x0" ); +} + +static inline void _CPU_Instruction_no_operation( void ) +{ + __asm__ volatile ( "nop" ); +} + +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + /* + * There is nothing to do since the thread-local storage area is obtained by + * calling __tls_get_addr(). + */ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) context->thread_pointer; +} + +#ifdef __cplusplus +} +#endif + +#endif /* ASM */ + +/** @} */ + +#endif /* _RTEMS_SCORE_CPUIMPL_H */ diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/microblaze/include/rtems/score/microblaze.h index ed8091d73c..6310b4b17d 100644 --- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h +++ b/cpukit/score/cpu/microblaze/include/rtems/score/microblaze.h @@ -5,12 +5,12 @@ * * @ingroup RTEMSScoreCPU * - * @brief AArch64 Atomics support + * @brief MicroBlaze architecture support */ /* - * Copyright (C) 2020 On-Line Applications Research Corporation (OAR) - * Written by Kinsey Moore <kinsey.moore@oarcorp.com> + * Copyright (c) 2015, Hesham Almatary + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,9 +34,24 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H +#ifndef _RTEMS_SCORE_MICROBLAZE_H +#define _RTEMS_SCORE_MICROBLAZE_H -#include <rtems/score/cpustdatomic.h> +#ifdef __cplusplus +extern "C" { +#endif -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ +#define CPU_MODEL_NAME "MicroBlaze" +#define NOCPU_HAS_FPU 1 + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "MicroBlaze CPU" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_MICROBLAZE_H */ diff --git a/cpukit/score/cpu/microblaze/microblaze-context-switch.S b/cpukit/score/cpu/microblaze/microblaze-context-switch.S new file mode 100644 index 0000000000..523e836398 --- /dev/null +++ b/cpukit/score/cpu/microblaze/microblaze-context-switch.S @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze context switch implementation + */ + +/* + * Copyright (c) 2015, Hesham Almatary + * Copyright (C) 2021 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/asm.h> + +.text +.align 4 + +PUBLIC(_CPU_Context_switch) +PUBLIC(_CPU_Context_restore) +PUBLIC(_CPU_Context_restore_fp) +PUBLIC(_CPU_Context_save_fp) + +SYM(_CPU_Context_switch): + swi r1, r5, 0 + swi r13, r5, 4 + swi r14, r5, 8 + swi r15, r5, 12 + swi r16, r5, 16 + swi r17, r5, 20 + swi r18, r5, 24 + swi r19, r5, 28 + swi r20, r5, 32 + swi r21, r5, 36 + swi r22, r5, 40 + swi r23, r5, 44 + swi r24, r5, 48 + swi r25, r5, 52 + swi r26, r5, 56 + swi r27, r5, 60 + swi r28, r5, 64 + swi r29, r5, 68 + swi r30, r5, 72 + swi r31, r5, 76 + + mfs r21, rmsr + swi r21, r5, 80 + + +SYM(restore): + lwi r1, r6, 0 + lwi r13, r6, 4 + lwi r14, r6, 8 + lwi r15, r6, 12 + lwi r16, r6, 16 + lwi r17, r6, 20 + lwi r18, r6, 24 + lwi r19, r6, 28 + lwi r20, r6, 32 + lwi r21, r6, 36 + lwi r22, r6, 40 + lwi r23, r6, 44 + lwi r24, r6, 48 + lwi r25, r6, 52 + lwi r26, r6, 56 + lwi r27, r6, 60 + lwi r28, r6, 64 + lwi r29, r6, 68 + lwi r30, r6, 72 + + lwi r31, r6, 80 + mts rmsr, r31 + + lwi r31, r6, 76 + + rtsd r15, 8 + +SYM(_CPU_Context_restore): + add r6, r5, r0 + brai restore diff --git a/cpukit/score/cpu/microblaze/microblaze-context-validate.S b/cpukit/score/cpu/microblaze/microblaze-context-validate.S new file mode 100644 index 0000000000..16564c9866 --- /dev/null +++ b/cpukit/score/cpu/microblaze/microblaze-context-validate.S @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze context validate implementation + */ + +/* + * COPYRIGHT (C) 2021 On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/asm.h> + +#define FRAME_OFFSET_R19 0 +#define FRAME_OFFSET_R20 4 +#define FRAME_OFFSET_R21 8 +#define FRAME_OFFSET_R22 12 +#define FRAME_OFFSET_R23 16 +#define FRAME_OFFSET_R24 20 +#define FRAME_OFFSET_R25 24 +#define FRAME_OFFSET_R26 28 +#define FRAME_OFFSET_R27 32 +#define FRAME_OFFSET_R28 36 +#define FRAME_OFFSET_R29 40 +#define FRAME_OFFSET_R30 44 +#define FRAME_OFFSET_R31 48 + +#define FRAME_SIZE (FRAME_OFFSET_R31 + 4) + +.text +.align 4 + +PUBLIC(_CPU_Context_validate) + +SYM(_CPU_Context_validate): + + /* Save */ + addik r1, r1, -FRAME_SIZE + swi r19, r1, FRAME_OFFSET_R19 + swi r20, r1, FRAME_OFFSET_R20 + swi r21, r1, FRAME_OFFSET_R21 + swi r22, r1, FRAME_OFFSET_R22 + swi r23, r1, FRAME_OFFSET_R23 + swi r24, r1, FRAME_OFFSET_R24 + swi r25, r1, FRAME_OFFSET_R25 + swi r26, r1, FRAME_OFFSET_R26 + swi r27, r1, FRAME_OFFSET_R27 + swi r28, r1, FRAME_OFFSET_R28 + swi r29, r1, FRAME_OFFSET_R29 + swi r30, r1, FRAME_OFFSET_R30 + swi r31, r1, FRAME_OFFSET_R31 + + /* Fill */ + + add r4, r0, r3 + + /* r7 contains the stack pointer */ + add r7, r0, r1 + +.macro fill_register reg + addi r4, r4, 1 + add \reg, r0, r4 +.endm + + fill_register r21 + fill_register r22 + fill_register r23 + fill_register r24 + fill_register r25 + fill_register r26 + fill_register r27 + fill_register r28 + fill_register r29 + fill_register r30 + fill_register r31 + + /* Check */ +check: + +.macro check_register reg + addi r4, r4, 1 + cmp r6, \reg, r4 + bnei r6, restore +.endm + + cmp r6, r7, r1 + bnei r6, restore + + add r4, r0, r3 + + check_register r21 + check_register r22 + check_register r23 + check_register r24 + check_register r25 + check_register r26 + check_register r27 + check_register r28 + check_register r29 + check_register r30 + check_register r31 + + brai check + + /* Restore */ +restore: + + lwi r19, r1, FRAME_OFFSET_R19 + lwi r20, r1, FRAME_OFFSET_R20 + lwi r21, r1, FRAME_OFFSET_R21 + lwi r22, r1, FRAME_OFFSET_R22 + lwi r23, r1, FRAME_OFFSET_R23 + lwi r24, r1, FRAME_OFFSET_R24 + lwi r25, r1, FRAME_OFFSET_R25 + lwi r26, r1, FRAME_OFFSET_R26 + lwi r27, r1, FRAME_OFFSET_R27 + lwi r28, r1, FRAME_OFFSET_R28 + lwi r29, r1, FRAME_OFFSET_R29 + lwi r30, r1, FRAME_OFFSET_R30 + lwi r31, r1, FRAME_OFFSET_R31 + + addik r1, r1, FRAME_SIZE + + bra r15 diff --git a/cpukit/score/cpu/microblaze/microblaze-context-volatile-clobber.S b/cpukit/score/cpu/microblaze/microblaze-context-volatile-clobber.S new file mode 100644 index 0000000000..e87cc0453f --- /dev/null +++ b/cpukit/score/cpu/microblaze/microblaze-context-volatile-clobber.S @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze context volatile clobber implementation + */ + +/* + * COPYRIGHT (C) 2021 On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif + +#include <rtems/asm.h> + +.text +.align 4 + +PUBLIC(_CPU_Context_volatile_clobber) + +SYM(_CPU_Context_volatile_clobber): + +.macro clobber_register reg + addi r5, r5, -1 + add \reg, r0, r5 +.endm + + clobber_register r3 + clobber_register r4 + clobber_register r6 + clobber_register r7 + clobber_register r8 + clobber_register r9 + clobber_register r10 + + rtsd r15, 8 + nop diff --git a/cpukit/score/cpu/microblaze/microblaze-exception-extensions.S b/cpukit/score/cpu/microblaze/microblaze-exception-extensions.S new file mode 100644 index 0000000000..252e901393 --- /dev/null +++ b/cpukit/score/cpu/microblaze/microblaze-exception-extensions.S @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze exception extensions ASM implementation + */ + +/* + * Copyright (C) 2022 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rtems/asm.h> +#include <rtems/score/percpu.h> + + .text + .globl _CPU_Exception_dispatch_and_resume + .globl _MicroBlaze_Exception_resume_from_exception + .globl _MicroBlaze_Exception_resume_from_break + .align 2 + +_CPU_Exception_dispatch_and_resume: + /* Subtract 1 from ISR_NEST_LEVEL */ + lwi r3, r0, _Per_CPU_Information + 8 + addik r3, r3, -1 + swi r3, r0, _Per_CPU_Information + 8 + + /* Subtract 1 from THREAD_DISPATCH_DISABLE_LEVEL */ + lwi r3, r0, _Per_CPU_Information + 16 + addik r3, r3, -1 + swi r3, r0, _Per_CPU_Information + 16 + + /* Is THREAD_DISPATCH_DISABLE_LEVEL != 0? */ + bnei r3, _MicroBlaze_Exception_resume_from_exception + + /* Is DISPATCH_NEEDED == 0? */ + lwi r3, r0, _Per_CPU_Information + 20 + beqi r3, _MicroBlaze_Exception_resume_from_exception + + bralid r15, _Thread_Dispatch + nop +/* Fall through to restore exception frame */ + +_MicroBlaze_Exception_resume_from_exception: + /* Move argument to stack pointer */ + addi r1, r5, 0 + + /* Retrieve and store MSR */ + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_MSR + mts rmsr, r3 + + /* Retrieve and store EAR */ + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_EAR + mts rear, r3 + + /* Retrieve and store ESR */ + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_ESR + mts resr, r3 + + /* Restore program state */ + lwi r2, r1, MICROBLAZE_EXCEPTION_FRAME_R2 + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_R3 + lwi r4, r1, MICROBLAZE_EXCEPTION_FRAME_R4 + lwi r5, r1, MICROBLAZE_EXCEPTION_FRAME_R5 + lwi r6, r1, MICROBLAZE_EXCEPTION_FRAME_R6 + lwi r7, r1, MICROBLAZE_EXCEPTION_FRAME_R7 + lwi r8, r1, MICROBLAZE_EXCEPTION_FRAME_R8 + lwi r9, r1, MICROBLAZE_EXCEPTION_FRAME_R9 + lwi r10, r1, MICROBLAZE_EXCEPTION_FRAME_R10 + lwi r11, r1, MICROBLAZE_EXCEPTION_FRAME_R11 + lwi r12, r1, MICROBLAZE_EXCEPTION_FRAME_R12 + lwi r13, r1, MICROBLAZE_EXCEPTION_FRAME_R13 + lwi r14, r1, MICROBLAZE_EXCEPTION_FRAME_R14 + lwi r15, r1, MICROBLAZE_EXCEPTION_FRAME_R15 + lwi r16, r1, MICROBLAZE_EXCEPTION_FRAME_R16 + lwi r17, r1, MICROBLAZE_EXCEPTION_FRAME_R17 + lwi r18, r1, MICROBLAZE_EXCEPTION_FRAME_R18 + lwi r19, r1, MICROBLAZE_EXCEPTION_FRAME_R19 + lwi r20, r1, MICROBLAZE_EXCEPTION_FRAME_R20 + lwi r21, r1, MICROBLAZE_EXCEPTION_FRAME_R21 + lwi r22, r1, MICROBLAZE_EXCEPTION_FRAME_R22 + lwi r23, r1, MICROBLAZE_EXCEPTION_FRAME_R23 + lwi r24, r1, MICROBLAZE_EXCEPTION_FRAME_R24 + lwi r25, r1, MICROBLAZE_EXCEPTION_FRAME_R25 + lwi r26, r1, MICROBLAZE_EXCEPTION_FRAME_R26 + lwi r27, r1, MICROBLAZE_EXCEPTION_FRAME_R27 + lwi r28, r1, MICROBLAZE_EXCEPTION_FRAME_R28 + lwi r29, r1, MICROBLAZE_EXCEPTION_FRAME_R29 + lwi r30, r1, MICROBLAZE_EXCEPTION_FRAME_R30 + lwi r31, r1, MICROBLAZE_EXCEPTION_FRAME_R31 + + /* Free stack space */ + addik r1, r1, CPU_EXCEPTION_FRAME_SIZE + + /* Return from exception mode */ + /* Branch to BTR is handled by upper layers */ + rted r17, 0 + nop + +/* There is no dispatch version of resume from break */ +_MicroBlaze_Exception_resume_from_break: + /* Move argument to stack pointer */ + addi r1, r5, 0 + + /* Retrieve and store MSR */ + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_MSR + mts rmsr, r3 + + /* Retrieve and store EAR */ + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_EAR + mts rear, r3 + + /* Retrieve and store ESR */ + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_ESR + mts resr, r3 + + /* Restore program state */ + lwi r2, r1, MICROBLAZE_EXCEPTION_FRAME_R2 + lwi r3, r1, MICROBLAZE_EXCEPTION_FRAME_R3 + lwi r4, r1, MICROBLAZE_EXCEPTION_FRAME_R4 + lwi r5, r1, MICROBLAZE_EXCEPTION_FRAME_R5 + lwi r6, r1, MICROBLAZE_EXCEPTION_FRAME_R6 + lwi r7, r1, MICROBLAZE_EXCEPTION_FRAME_R7 + lwi r8, r1, MICROBLAZE_EXCEPTION_FRAME_R8 + lwi r9, r1, MICROBLAZE_EXCEPTION_FRAME_R9 + lwi r10, r1, MICROBLAZE_EXCEPTION_FRAME_R10 + lwi r11, r1, MICROBLAZE_EXCEPTION_FRAME_R11 + lwi r12, r1, MICROBLAZE_EXCEPTION_FRAME_R12 + lwi r13, r1, MICROBLAZE_EXCEPTION_FRAME_R13 + lwi r14, r1, MICROBLAZE_EXCEPTION_FRAME_R14 + lwi r15, r1, MICROBLAZE_EXCEPTION_FRAME_R15 + lwi r16, r1, MICROBLAZE_EXCEPTION_FRAME_R16 + lwi r17, r1, MICROBLAZE_EXCEPTION_FRAME_R17 + lwi r18, r1, MICROBLAZE_EXCEPTION_FRAME_R18 + lwi r19, r1, MICROBLAZE_EXCEPTION_FRAME_R19 + lwi r20, r1, MICROBLAZE_EXCEPTION_FRAME_R20 + lwi r21, r1, MICROBLAZE_EXCEPTION_FRAME_R21 + lwi r22, r1, MICROBLAZE_EXCEPTION_FRAME_R22 + lwi r23, r1, MICROBLAZE_EXCEPTION_FRAME_R23 + lwi r24, r1, MICROBLAZE_EXCEPTION_FRAME_R24 + lwi r25, r1, MICROBLAZE_EXCEPTION_FRAME_R25 + lwi r26, r1, MICROBLAZE_EXCEPTION_FRAME_R26 + lwi r27, r1, MICROBLAZE_EXCEPTION_FRAME_R27 + lwi r28, r1, MICROBLAZE_EXCEPTION_FRAME_R28 + lwi r29, r1, MICROBLAZE_EXCEPTION_FRAME_R29 + lwi r30, r1, MICROBLAZE_EXCEPTION_FRAME_R30 + lwi r31, r1, MICROBLAZE_EXCEPTION_FRAME_R31 + + /* Free stack space */ + addik r1, r1, CPU_EXCEPTION_FRAME_SIZE + + /* Return from debug mode */ + rtbd r16, 0 + nop diff --git a/cpukit/score/cpu/microblaze/microblaze-exception-extensions.c b/cpukit/score/cpu/microblaze/microblaze-exception-extensions.c new file mode 100644 index 0000000000..78d65106d3 --- /dev/null +++ b/cpukit/score/cpu/microblaze/microblaze-exception-extensions.c @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUMicroBlaze + * + * @brief MicroBlaze exception extensions implementation + */ + +/* + * Copyright (C) 2022 On-Line Applications Research Corporation (OAR) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/fatal.h> +#include <rtems/score/threadimpl.h> + +RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame ) +{ + /* Break in progress */ + if ( ( frame->msr & MICROBLAZE_MSR_BIP ) != 0 ) { + _MicroBlaze_Exception_resume_from_break( frame ); + } + + /* Exception in progress */ + if ( ( frame->msr & MICROBLAZE_MSR_EIP ) != 0 ) { + _MicroBlaze_Exception_resume_from_exception( frame ); + } + + /* Execution should never reach this point */ + rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame ); +} + +void _CPU_Exception_disable_thread_dispatch( void ) +{ + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + /* Increment interrupt nest and thread dispatch disable level */ + ++cpu_self->thread_dispatch_disable_level; + ++cpu_self->isr_nest_level; +} + +/* -1 means not mappable/recoverable */ +int _CPU_Exception_frame_get_signal( CPU_Exception_frame *ef ) +{ + uint32_t EC = ef->esr & MICROBLAZE_ESR_EC_MASK; + + /* Break in progress */ + if ( ( ef->msr & MICROBLAZE_MSR_BIP ) != 0 ) { + return -1; + } + + switch ( EC ) { + case 0x0: /* Stream */ + case 0x7: /* Privileged or Stack Protection */ + return -1; + + case 0x5: /* Divide */ + case 0x6: /* FPU */ + return SIGFPE; + + case 0x3: /* Instruction Abort */ + case 0x4: /* Data Abort */ + return SIGSEGV; + + case 0x1: /* Unaligned access */ + case 0x2: /* Illegal op-code */ + default: + return SIGILL; + } +} + +void _CPU_Exception_frame_set_resume( CPU_Exception_frame *ef, void *address ) +{ + /* Break in progress */ + if ( ( ef->msr & MICROBLAZE_MSR_BIP ) != 0 ) { + ef->r16 = address; + return; + } + + /* Exception in progress */ + if ( ( ef->msr & MICROBLAZE_MSR_EIP ) != 0 ) { + ef->r17 = address; + return; + } + + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + /* Interrupt in progress must be determined by stack pointer location */ + if ( + ef->r1 >= (uint32_t) cpu_self->interrupt_stack_low + && ef->r1 < (uint32_t) cpu_self->interrupt_stack_high + ) { + ef->r14 = address; + return; + } + + /* Default to normal link register */ + ef->r15 = address; +} + +/* + * This returns the target return address, not necessarily the address of the + * instruction that caused exception. These are the same if it's a MMU exception + * and the BTR overrides the return address if the exception occurred in a delay + * slot. */ +uint32_t *_MicroBlaze_Get_return_address( CPU_Exception_frame *ef ) +{ + /* Break in progress */ + if ( ( ef->msr & MICROBLAZE_MSR_BIP ) != 0 ) { + return ef->r16; + } + + /* Exception in progress */ + if ( ( ef->msr & MICROBLAZE_MSR_EIP ) != 0 ) { + if ( ( ef->esr & MICROBLAZE_ESR_DS ) != 0 ) { + return ef->btr; + } + + return ef->r17; + } + + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + /* Interrupt in progress must be determined by stack pointer location */ + if ( + ef->r1 >= (uint32_t) cpu_self->interrupt_stack_low + && ef->r1 < (uint32_t) cpu_self->interrupt_stack_high + ) { + return ef->r14; + } + + /* Default to normal link register */ + return ef->r15; +} + +/* + * This can only change the resume address in the case of an exception in a + * branch delay slot instruction. + */ +void _CPU_Exception_frame_make_resume_next_instruction( + CPU_Exception_frame *ef +) +{ + uintptr_t ret_addr = (uintptr_t) _MicroBlaze_Get_return_address( ef ); + + _CPU_Exception_frame_set_resume( ef, (uint32_t *) ret_addr ); +} diff --git a/cpukit/score/cpu/mips/cpu.c b/cpukit/score/cpu/mips/cpu.c index c8cf960db5..526f7db13f 100644 --- a/cpukit/score/cpu/mips/cpu.c +++ b/cpukit/score/cpu/mips/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -41,15 +43,33 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> #if CPU_HARDWARE_FP @@ -93,6 +113,18 @@ void _CPU_Initialize(void) #endif } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + + while ( true ) { + /* Do nothing */ + } +} + uint32_t _CPU_ISR_Get_level( void ) { unsigned int sr; diff --git a/cpukit/score/cpu/mips/cpu_asm.S b/cpukit/score/cpu/mips/cpu_asm.S index 5692af22d7..b3aefebbe0 100644 --- a/cpukit/score/cpu/mips/cpu_asm.S +++ b/cpukit/score/cpu/mips/cpu_asm.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * This file contains the basic algorithms for all assembly code used * in an specific CPU port of RTEMS. These algorithms must be implemented @@ -43,9 +45,26 @@ * COPYRIGHT (c) 1989-2002. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -750,7 +769,7 @@ _ISR_Handler_Exception: ** Note, if the exception vector returns, rely on it to have ** adjusted EPC so we will return to some correct address. If ** this is not done, we might get stuck in an infinite loop because - ** we'll return to the instruction where the exception occured and + ** we'll return to the instruction where the exception occurred and ** it could throw again. ** ** It is expected the only code using the exception processing is @@ -776,7 +795,7 @@ _ISR_Handler_Exception: beqz t4,excnodelay NOP - * it did, now see if the branch occured or not * + * it did, now see if the branch occurred or not * li t3,CAUSE_BT AND t4,t1,t3 beqz t4,excnobranch diff --git a/cpukit/score/cpu/mips/headers.am b/cpukit/score/cpu/mips/headers.am deleted file mode 100644 index 3084000f09..0000000000 --- a/cpukit/score/cpu/mips/headers.am +++ /dev/null @@ -1,9 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/mips/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/mips/include/rtems/asm.h -include_rtems_mips_HEADERS += score/cpu/mips/include/rtems/mips/idtcpu.h -include_rtems_mips_HEADERS += score/cpu/mips/include/rtems/mips/iregdef.h -include_rtems_score_HEADERS += score/cpu/mips/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/mips/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/mips/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/mips/include/rtems/score/mips.h diff --git a/cpukit/score/cpu/mips/include/rtems/score/cpu.h b/cpukit/score/cpu/mips/include/rtems/score/cpu.h index cdb0e64013..447a384c88 100644 --- a/cpukit/score/cpu/mips/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/mips/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -36,9 +38,26 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -583,7 +602,7 @@ uint32_t mips_interrupt_mask( void ); _xlevel = _scratch2; \ } while(0) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & SR_INTERRUPT_ENABLE_BITS ) != 0; } @@ -710,23 +729,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/* - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - */ - -#define _CPU_Fatal_halt( _source, _error ) \ - do { \ - unsigned int _level; \ - _CPU_ISR_Disable(_level); \ - (void)_level; \ - loop: goto loop; \ - } while (0) - - extern void mips_break( int error ); #define CPU_USE_GENERIC_BITFIELD_CODE TRUE @@ -833,14 +835,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/mips/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/mips/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/mips/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h index 932784fc8f..98ed1c492f 100644 --- a/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,34 +48,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word -1" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/mips/include/rtems/score/mips.h b/cpukit/score/cpu/mips/include/rtems/score/mips.h index d3dccb1c54..db9f119178 100644 --- a/cpukit/score/cpu/mips/include/rtems/score/mips.h +++ b/cpukit/score/cpu/mips/include/rtems/score/mips.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -15,9 +17,26 @@ * COPYRIGHT (c) 1989-2001. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_MIPS_H diff --git a/cpukit/score/cpu/moxie/cpu.c b/cpukit/score/cpu/moxie/cpu.c index 5522890c59..a3d1005ad8 100644 --- a/cpukit/score/cpu/moxie/cpu.c +++ b/cpukit/score/cpu/moxie/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Moxie CPU Dependent Source * @@ -8,16 +10,35 @@ * COPYRIGHT (c) 1989-1999, 2010. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> +#include <rtems/bspIo.h> /* _CPU_Initialize * @@ -37,6 +58,18 @@ void _CPU_Initialize(void) /* FP context initialization support goes here */ } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + + while ( true ) { + /* Do nothing */ + } +} + /* * _CPU_ISR_Get_level * diff --git a/cpukit/score/cpu/moxie/cpu_asm.S b/cpukit/score/cpu/moxie/cpu_asm.S index 20842149ca..bcf0394475 100644 --- a/cpukit/score/cpu/moxie/cpu_asm.S +++ b/cpukit/score/cpu/moxie/cpu_asm.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Moxie CPU functions * Copyright (C) 2011 Anthony Green @@ -7,9 +9,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/moxie/headers.am b/cpukit/score/cpu/moxie/headers.am deleted file mode 100644 index 6324166c86..0000000000 --- a/cpukit/score/cpu/moxie/headers.am +++ /dev/null @@ -1,7 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/moxie/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/moxie/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/moxie/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/moxie/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/moxie/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/moxie/include/rtems/score/moxie.h diff --git a/cpukit/score/cpu/moxie/include/rtems/asm.h b/cpukit/score/cpu/moxie/include/rtems/asm.h index f328c1ce33..79e07b19cf 100644 --- a/cpukit/score/cpu/moxie/include/rtems/asm.h +++ b/cpukit/score/cpu/moxie/include/rtems/asm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -25,9 +27,26 @@ * COPYRIGHT (c) 1989-1999, 2010. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * * $Id: asm.h,v 1.9 2010/06/29 00:31:09 joel Exp $ */ diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpu.h b/cpukit/score/cpu/moxie/include/rtems/score/cpu.h index c857734a2e..cc1900a852 100644 --- a/cpukit/score/cpu/moxie/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/moxie/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -14,9 +16,26 @@ * COPYRIGHT (c) 1989-2006, 2010. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -209,7 +228,7 @@ typedef struct { * * XXX */ -#define CPU_STACK_MINIMUM_SIZE (1536) +#define CPU_STACK_MINIMUM_SIZE (2048) /** * Size of a pointer. @@ -256,11 +275,6 @@ typedef struct { */ /* - * Support routine to initialize the RTEMS vector table after it is allocated. - */ -#define _CPU_Initialize_vectors() - -/* * Disable all interrupts for an RTEMS critical section. The previous * level is returned in _level. * @@ -303,7 +317,7 @@ typedef struct { _CPU_ISR_Disable( _isr_cookie ); \ } while (0) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return true; } @@ -396,23 +410,6 @@ uint32_t _CPU_ISR_Get_level( void ); /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/* - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - * - * MOXIE Specific Information: - * - * XXX - */ -#define _CPU_Fatal_halt( _source, _error ) \ - printk("Fatal Error %d.%lu Halted\n",_source,_error); \ - for(;;) - -/* end of Fatal Error manager macros */ - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE @@ -535,14 +532,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/moxie/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/moxie/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h index 9c81cb6193..44c70cc56d 100644 --- a/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,34 +48,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/moxie/include/rtems/score/moxie.h b/cpukit/score/cpu/moxie/include/rtems/score/moxie.h index 992f599033..34a6187ed6 100644 --- a/cpukit/score/cpu/moxie/include/rtems/score/moxie.h +++ b/cpukit/score/cpu/moxie/include/rtems/score/moxie.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file */ @@ -12,9 +14,26 @@ * COPYRIGHT (c) 1989-1999, 2010. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_MOXIE_H diff --git a/cpukit/score/cpu/moxie/moxie-exception-frame-print.c b/cpukit/score/cpu/moxie/moxie-exception-frame-print.c index 4dac3f8c91..67a7496b72 100644 --- a/cpukit/score/cpu/moxie/moxie-exception-frame-print.c +++ b/cpukit/score/cpu/moxie/moxie-exception-frame-print.c @@ -1,9 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Copyright (c) 2013 Anthony Green * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/headers.am b/cpukit/score/cpu/nios2/headers.am deleted file mode 100644 index 253f64d03a..0000000000 --- a/cpukit/score/cpu/nios2/headers.am +++ /dev/null @@ -1,10 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/nios2/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/nios2/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/nios2-count-zeros.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/nios2-utility.h -include_rtems_score_HEADERS += score/cpu/nios2/include/rtems/score/nios2.h diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpu.h b/cpukit/score/cpu/nios2/include/rtems/score/cpu.h index 8caec19b5b..a58e7bca06 100644 --- a/cpukit/score/cpu/nios2/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/nios2/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -7,16 +9,33 @@ */ /* - * Copyright (c) 2011 embedded brains GmbH + * Copyright (c) 2011 embedded brains GmbH & Co. KG * * Copyright (c) 2006 Kolja Waschk (rtemsdev/ixo.de) * * COPYRIGHT (c) 1989-2004. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -172,8 +191,6 @@ typedef struct { uint32_t ipending; } CPU_Exception_frame; -#define _CPU_Initialize_vectors() - /** * @brief Macro to disable interrupts. * @@ -292,8 +309,6 @@ void _CPU_Context_Initialize( #define _CPU_Context_Restart_self( _the_context ) \ _CPU_Context_restore( (_the_context) ); -RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t _source, uint32_t _error ); - /** * @brief CPU initialization. */ @@ -338,14 +353,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/nios2/include/rtems/score/cpu_asm.h index 81a19c8d69..fb80c7ed52 100644 --- a/cpukit/score/cpu/nios2/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/nios2/include/rtems/score/cpu_asm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -12,9 +14,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/nios2/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/nios2/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h index e5f326eeb7..3c9c6734ec 100644 --- a/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,26 +48,49 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + register uint32_t r23 __asm__( "r23" ); + + r23 = context->r23; + + /* Make sure that the register assignment is not optimized away */ + __asm__ volatile ( "" : : "r" ( r23 ) ); +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) context->r23; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/nios2/include/rtems/score/nios2-count-zeros.h b/cpukit/score/cpu/nios2/include/rtems/score/nios2-count-zeros.h index bf2390a51f..e0412412c6 100644 --- a/cpukit/score/cpu/nios2/include/rtems/score/nios2-count-zeros.h +++ b/cpukit/score/cpu/nios2/include/rtems/score/nios2-count-zeros.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Author: Jeffrey O. Hill * @@ -11,9 +13,26 @@ * WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR * THE USE OF THIS SOFTWARE. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _NIOS2_COUNT_ZEROS_H diff --git a/cpukit/score/cpu/nios2/include/rtems/score/nios2-utility.h b/cpukit/score/cpu/nios2/include/rtems/score/nios2-utility.h index 9367f73315..811c1affe0 100644 --- a/cpukit/score/cpu/nios2/include/rtems/score/nios2-utility.h +++ b/cpukit/score/cpu/nios2/include/rtems/score/nios2-utility.h @@ -1,20 +1,33 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * * @brief NIOS II Utility */ /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_NIOS2_UTILITY_H diff --git a/cpukit/score/cpu/nios2/include/rtems/score/nios2.h b/cpukit/score/cpu/nios2/include/rtems/score/nios2.h index 26d76bcbfa..620a3bf3ba 100644 --- a/cpukit/score/cpu/nios2/include/rtems/score/nios2.h +++ b/cpukit/score/cpu/nios2/include/rtems/score/nios2.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -14,9 +16,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/cpukit/score/cpu/nios2/nios2-context-initialize.c b/cpukit/score/cpu/nios2/nios2-context-initialize.c index 6950ce4bd6..dc96855fbf 100644 --- a/cpukit/score/cpu/nios2/nios2-context-initialize.c +++ b/cpukit/score/cpu/nios2/nios2-context-initialize.c @@ -1,14 +1,33 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011, 2021 embedded brains GmbH + * Copyright (C) 2011, 2021 embedded brains GmbH & Co. KG * * Copyright (c) 2006 Kolja Waschk (rtemsdev/ixo.de) * * COPYRIGHT (c) 1989-2006 * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -71,9 +90,6 @@ void _CPU_Context_Initialize( } if ( tls_area != NULL ) { - context->r23 = (uintptr_t) tls_area + - _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment ) + - 0x7000; - _TLS_TCB_before_TLS_block_initialize( tls_area ); + context->r23 = (uintptr_t) _TLS_Initialize_area( tls_area ) + 0x7000; } } diff --git a/cpukit/score/cpu/nios2/nios2-context-switch.S b/cpukit/score/cpu/nios2/nios2-context-switch.S index 3792f693da..687d8301c6 100644 --- a/cpukit/score/cpu/nios2/nios2-context-switch.S +++ b/cpukit/score/cpu/nios2/nios2-context-switch.S @@ -1,14 +1,33 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011 embedded brains GmbH + * Copyright (c) 2011 embedded brains GmbH & Co. KG * * Copyright (c) 2006 Kolja Waschk (rtemsdev/ixo.de) * * Derived from no_cpu/cpu_asm.S, copyright (c) 1989-1999, * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-context-validate.S b/cpukit/score/cpu/nios2/nios2-context-validate.S index 00dcc6a2f3..a7fb6e646d 100644 --- a/cpukit/score/cpu/nios2/nios2-context-validate.S +++ b/cpukit/score/cpu/nios2/nios2-context-validate.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-context-volatile-clobber.S b/cpukit/score/cpu/nios2/nios2-context-volatile-clobber.S index e1c52d62b7..a5e181c3df 100644 --- a/cpukit/score/cpu/nios2/nios2-context-volatile-clobber.S +++ b/cpukit/score/cpu/nios2/nios2-context-volatile-clobber.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * Copyright (c) 2013 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-eic-il-low-level.S b/cpukit/score/cpu/nios2/nios2-eic-il-low-level.S index fa4d1fb8f8..660c0add39 100644 --- a/cpukit/score/cpu/nios2/nios2-eic-il-low-level.S +++ b/cpukit/score/cpu/nios2/nios2-eic-il-low-level.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011-2015 embedded brains GmbH. All rights reserved. + * Copyright (C) 2011, 2015 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/score/percpu.h> diff --git a/cpukit/score/cpu/nios2/nios2-exception-frame-print.c b/cpukit/score/cpu/nios2/nios2-exception-frame-print.c index e54eb60e9f..ba629fd073 100644 --- a/cpukit/score/cpu/nios2/nios2-exception-frame-print.c +++ b/cpukit/score/cpu/nios2/nios2-exception-frame-print.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-fatal-halt.c b/cpukit/score/cpu/nios2/nios2-fatal-halt.c index 40cae874c0..3fa43d630d 100644 --- a/cpukit/score/cpu/nios2/nios2-fatal-halt.c +++ b/cpukit/score/cpu/nios2/nios2-fatal-halt.c @@ -1,20 +1,39 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011 embedded brains GmbH + * Copyright (c) 2011 embedded brains GmbH & Co. KG * * Copyright (c) 2006 Kolja Waschk (rtemsdev/ixo.de) * * COPYRIGHT (c) 1989-2004. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ -#include <rtems/score/cpu.h> +#include <rtems/score/cpuimpl.h> #include <rtems/score/nios2-utility.h> -void _CPU_Fatal_halt( uint32_t _source, uint32_t _error ) +void _CPU_Fatal_halt( uint32_t _source, CPU_Uint32ptr _error ) { /* write 0 to status register (disable interrupts) */ __builtin_wrctl( NIOS2_CTLREG_INDEX_STATUS, 0 ); diff --git a/cpukit/score/cpu/nios2/nios2-iic-irq.c b/cpukit/score/cpu/nios2/nios2-iic-irq.c index 665649f9d8..8215e16a4c 100644 --- a/cpukit/score/cpu/nios2/nios2-iic-irq.c +++ b/cpukit/score/cpu/nios2/nios2-iic-irq.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -10,9 +12,26 @@ * COPYRIGHT (c) 1989-2007. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -41,7 +60,7 @@ void __Exception_Handler(CPU_Exception_frame *efr); register unsigned long *stack_ptr __asm__ ("sp"); -RTEMS_INLINE_ROUTINE void __IIC_Handler(void) +static inline void __IIC_Handler(void) { uint32_t active; uint32_t mask; diff --git a/cpukit/score/cpu/nios2/nios2-iic-low-level.S b/cpukit/score/cpu/nios2/nios2-iic-low-level.S index a3d138883b..b0b914f5c8 100644 --- a/cpukit/score/cpu/nios2/nios2-iic-low-level.S +++ b/cpukit/score/cpu/nios2/nios2-iic-low-level.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * This file contains all assembly code for the * NIOS2 implementation of RTEMS. @@ -7,9 +9,26 @@ * Derived from no_cpu/cpu_asm.S, copyright (c) 1989-1999, * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ @@ -137,7 +156,7 @@ stuck_in_exception: /* * Restore the saved registers, so that all general purpose registers - * have been restored to their state at the time the interrupt occured. + * have been restored to their state at the time the interrupt occurred. */ ldw r1, 0(sp) @@ -277,7 +296,7 @@ _ISR_Handler: /* * Restore the saved registers, so that all general purpose registers - * have been restored to their state at the time the interrupt occured. + * have been restored to their state at the time the interrupt occurred. */ ldw r1, 0(sp) diff --git a/cpukit/score/cpu/nios2/nios2-initialize.c b/cpukit/score/cpu/nios2/nios2-initialize.c index 4de6dab30b..1c8d97d565 100644 --- a/cpukit/score/cpu/nios2/nios2-initialize.c +++ b/cpukit/score/cpu/nios2/nios2-initialize.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -10,9 +12,26 @@ * COPYRIGHT (c) 1989-2006 * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-isr-get-level.c b/cpukit/score/cpu/nios2/nios2-isr-get-level.c index cb8be662e9..7b15ac93f5 100644 --- a/cpukit/score/cpu/nios2/nios2-isr-get-level.c +++ b/cpukit/score/cpu/nios2/nios2-isr-get-level.c @@ -1,14 +1,33 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011, 2016 embedded brains GmbH + * Copyright (C) 2011, 2016 embedded brains GmbH & Co. KG * * Copyright (c) 2006 Kolja Waschk (rtemsdev/ixo.de) * * COPYRIGHT (c) 1989-2006 * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-isr-install-vector.c b/cpukit/score/cpu/nios2/nios2-isr-install-vector.c index 7ffaf2ad4e..bb6fa67734 100644 --- a/cpukit/score/cpu/nios2/nios2-isr-install-vector.c +++ b/cpukit/score/cpu/nios2/nios2-isr-install-vector.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -10,9 +12,26 @@ * COPYRIGHT (c) 1989-2006 * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-isr-is-in-progress.c b/cpukit/score/cpu/nios2/nios2-isr-is-in-progress.c index cb6ddb8c6a..5822ebf1f8 100644 --- a/cpukit/score/cpu/nios2/nios2-isr-is-in-progress.c +++ b/cpukit/score/cpu/nios2/nios2-isr-is-in-progress.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-isr-set-level.c b/cpukit/score/cpu/nios2/nios2-isr-set-level.c index 74f1ce0cf8..6211546585 100644 --- a/cpukit/score/cpu/nios2/nios2-isr-set-level.c +++ b/cpukit/score/cpu/nios2/nios2-isr-set-level.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,16 +7,33 @@ */ /* - * Copyright (c) 2011 embedded brains GmbH + * Copyright (c) 2011 embedded brains GmbH & Co. KG * * Copyright (c) 2006 Kolja Waschk (rtemsdev/ixo.de) * * COPYRIGHT (c) 1989-2006 * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-mpu-add-region.c b/cpukit/score/cpu/nios2/nios2-mpu-add-region.c index db74cd6747..dd04aa0a97 100644 --- a/cpukit/score/cpu/nios2/nios2-mpu-add-region.c +++ b/cpukit/score/cpu/nios2/nios2-mpu-add-region.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-mpu-configuration.c b/cpukit/score/cpu/nios2/nios2-mpu-configuration.c index b77ee5fb0e..640bbb1223 100644 --- a/cpukit/score/cpu/nios2/nios2-mpu-configuration.c +++ b/cpukit/score/cpu/nios2/nios2-mpu-configuration.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-mpu-descriptor.c b/cpukit/score/cpu/nios2/nios2-mpu-descriptor.c index 9f85becb7c..9076a065d9 100644 --- a/cpukit/score/cpu/nios2/nios2-mpu-descriptor.c +++ b/cpukit/score/cpu/nios2/nios2-mpu-descriptor.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,17 +7,28 @@ */ /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-mpu-disable-protected.c b/cpukit/score/cpu/nios2/nios2-mpu-disable-protected.c index 879105865f..f1ed73b495 100644 --- a/cpukit/score/cpu/nios2/nios2-mpu-disable-protected.c +++ b/cpukit/score/cpu/nios2/nios2-mpu-disable-protected.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,17 +7,28 @@ */ /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-mpu-reset.c b/cpukit/score/cpu/nios2/nios2-mpu-reset.c index 2e5a19c1e2..85dbfb338b 100644 --- a/cpukit/score/cpu/nios2/nios2-mpu-reset.c +++ b/cpukit/score/cpu/nios2/nios2-mpu-reset.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/nios2/nios2-thread-dispatch-disabled.c b/cpukit/score/cpu/nios2/nios2-thread-dispatch-disabled.c index 8d1e3ee9b8..56efc0e10d 100644 --- a/cpukit/score/cpu/nios2/nios2-thread-dispatch-disabled.c +++ b/cpukit/score/cpu/nios2/nios2-thread-dispatch-disabled.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,17 +7,28 @@ */ /* - * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * Copyright (c) 2011 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/no_cpu/cpu.c b/cpukit/score/cpu/no_cpu/cpu.c index 34e1c99a61..8d3a17d88e 100644 --- a/cpukit/score/cpu/no_cpu/cpu.c +++ b/cpukit/score/cpu/no_cpu/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -8,9 +10,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/no_cpu/cpu_asm.c b/cpukit/score/cpu/no_cpu/cpu_asm.c index 1583cb4a40..36474b28a5 100644 --- a/cpukit/score/cpu/no_cpu/cpu_asm.c +++ b/cpukit/score/cpu/no_cpu/cpu_asm.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -15,9 +17,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ /* diff --git a/cpukit/score/cpu/no_cpu/cpucounterfrequency.c b/cpukit/score/cpu/no_cpu/cpucounterfrequency.c index bbb8c127fe..96a4078889 100644 --- a/cpukit/score/cpu/no_cpu/cpucounterfrequency.c +++ b/cpukit/score/cpu/no_cpu/cpucounterfrequency.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2018 embedded brains GmbH. All rights reserved. + * Copyright (c) 2018 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/score/cpu.h> diff --git a/cpukit/score/cpu/no_cpu/cpucounterread.c b/cpukit/score/cpu/no_cpu/cpucounterread.c index b5dc02a40f..f4e6e77fc1 100644 --- a/cpukit/score/cpu/no_cpu/cpucounterread.c +++ b/cpukit/score/cpu/no_cpu/cpucounterread.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * Copyright (c) 2014 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/score/cpu.h> diff --git a/cpukit/score/cpu/no_cpu/cpuidle.c b/cpukit/score/cpu/no_cpu/cpuidle.c index de66b39faa..dbaf109905 100644 --- a/cpukit/score/cpu/no_cpu/cpuidle.c +++ b/cpukit/score/cpu/no_cpu/cpuidle.c @@ -1,15 +1,37 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPU + * + * @brief This source file contains the implementation of the + * _CPU_Thread_Idle_body(). + */ + /* - * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * Copyright (C) 2013, 2014 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -20,6 +42,13 @@ void *_CPU_Thread_Idle_body( uintptr_t ignored ) { + /* + * This is a workaround for: + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108658 + */ + __asm__ volatile (""); + while ( true ) { /* Do nothing */ } diff --git a/cpukit/score/cpu/no_cpu/headers.am b/cpukit/score/cpu/no_cpu/headers.am deleted file mode 100644 index 6a8c54df42..0000000000 --- a/cpukit/score/cpu/no_cpu/headers.am +++ /dev/null @@ -1,6 +0,0 @@ -## This file was generated by "./boostrap -H". -include_rtems_HEADERS += score/cpu/no_cpu/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/no_cpu/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/no_cpu/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/no_cpu/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/no_cpu/include/rtems/score/no_cpu.h diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h index c067501502..9ef6f43eb9 100644 --- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -28,9 +30,26 @@ * COPYRIGHT (c) 1989-2008. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -535,7 +554,8 @@ extern Context_Control_fp _CPU_Null_fp_context; * @addtogroup RTEMSScoreCPUExampleContext * * Should be large enough to run all RTEMS tests. This ensures - * that a "reasonable" small application should not have any problems. + * that a "reasonable" small application should not have any problems. The + * size shall be a power of two. * * Port Specific Information: * @@ -618,17 +638,6 @@ extern Context_Control_fp _CPU_Null_fp_context; /** * @addtogroup RTEMSScoreCPUExampleInterrupt * - * Support routine to initialize the RTEMS vector table after it is allocated. - * - * Port Specific Information: - * - * XXX document implementation including references if appropriate - */ -#define _CPU_Initialize_vectors() - -/** - * @addtogroup RTEMSScoreCPUExampleInterrupt - * * Disable all interrupts for an RTEMS critical section. The previous * level is returned in @a _isr_cookie. * @@ -687,7 +696,7 @@ extern Context_Control_fp _CPU_Null_fp_context; * @retval true Interrupts are enabled in the ISR level. * @retval false Otherwise. */ -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return false; } @@ -835,23 +844,6 @@ uint32_t _CPU_ISR_Get_level( void ); /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/** - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - * - * Port Specific Information: - * - * XXX document implementation including references if appropriate - */ -#define _CPU_Fatal_halt( _source, _error ) \ - { \ - } - -/* end of Fatal Error manager macros */ - /* Bitfield handler macros */ /** @@ -1054,23 +1046,47 @@ void _CPU_ISR_install_vector( void *_CPU_Thread_Idle_body( uintptr_t ignored ); /** + * @brief Performs a context switch from the executing thread to the heir + * thread. + * * @addtogroup RTEMSScoreCPUExampleContext * - * This routine switches from the run context to the heir context. + * This routine switches from the executing context to the heir context. + * + * @param[out] executing points to the context of the currently executing task. * - * @param[in] run points to the context of the currently executing task - * @param[in] heir points to the context of the heir task + * @param[in, out] heir points to the context of the heir task. * * Port Specific Information: * * XXX document implementation including references if appropriate */ void _CPU_Context_switch( - Context_Control *run, + Context_Control *executing, Context_Control *heir ); /** + * @brief Performs a context switch from the executing thread to the heir + * thread and does not return. + * + * @addtogroup RTEMSScoreCPUExampleContext + * + * This routine shall be a strong alias to _CPU_Context_switch(). It shall be + * provided for all target architectures which support an SMP build + * configuration (RTEMS_SMP). The purpose is help to compiler to avoid + * generation of dead code in _Thread_Start_multitasking(). + * + * @param[out] executing points to the context of the currently executing task. + * + * @param[in, out] heir points to the context of the heir task. + */ +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + +/** * @addtogroup RTEMSScoreCPUExampleContext * * This routine is generally used only to restart self in an @@ -1146,6 +1162,69 @@ typedef struct { */ void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ); +#ifdef RTEMS_EXCEPTION_EXTENSIONS + /** + * @brief Resumes normal execution using the provided exception frame. + * + * This routine helps to avoid dead code in the exception handler epilogue and + * does not return. This routine may assume that the provided pointer is valid + * for resetting the exception stack. + * + * @param frame The CPU_Exception_frame describing the machine exception. + */ + RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame ); + + /** + * @brief Performs thread dispatch and resumes normal execution. + * + * This routine helps to avoid dead code in the exception handler epilogue and + * does not return. This routine may assume that the provided pointer is valid + * for resetting the exception stack. This function is expected to decrement + * the ISR nest level and thread dispatch disable level in the Per_CPU_Control + * structure. + * + * @param frame The CPU_Exception_frame describing the machine + * exception. + */ + RTEMS_NO_RETURN void _CPU_Exception_dispatch_and_resume( + CPU_Exception_frame *frame + ); + + /** + * @brief Disables thread dispatch. + * + * This must be called before calling _CPU_Exception_dispatch_and_resume + * since that function is expected to reduce the levels incremented below. + */ + void _CPU_Exception_disable_thread_dispatch( void ); + + /** + * @brief Retrieves the generic exception class of the machine exception. + * + * @param frame The CPU_Exception_frame describing the machine + * exception. + * @return The signal associated with the CPU_Exception_frame. + */ + int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame ); + + /** + * @brief Sets the execution address of the exception frame. + * + * @param frame The CPU_Exception_frame describing the machine exception. + * @param address The address at which execution should resume. + */ + void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame, void *address ); + + /** + * @brief Sets the execution address of the exception frame to the next + * instruction. + * + * @param frame The CPU_Exception_frame describing the machine + * exception. + */ + void _CPU_Exception_frame_make_resume_next_instruction( CPU_Exception_frame *frame ); +#endif + /** * @defgroup RTEMSScoreCPUExampleCPUEndian CPUEndian * @@ -1215,44 +1294,27 @@ static inline uint32_t CPU_swap_u32( typedef uint32_t CPU_Counter_ticks; /** - * @brief Returns the current CPU counter frequency in Hz. + * @brief Gets the current CPU counter frequency in Hz. * - * @return The current CPU counter frequency in Hz. + * @return Returns the current CPU counter frequency in Hz. */ uint32_t _CPU_Counter_frequency( void ); /** - * @brief Returns the current CPU counter value. + * @brief Gets the current CPU counter value. * - * A CPU counter is some free-running counter. It ticks usually with a - * frequency close to the CPU or system bus clock. The board support package - * must ensure that this function works before the RTEMS initialization. - * Otherwise invalid profiling statistics will be gathered. + * A CPU counter should be some monotonically increasing free-running counter. + * It ticks usually with a frequency close to the CPU or system bus clock. The + * counter should not be affected by power saving states so that it can be used + * for timestamps. The CPU counter should be initialized at the + * RTEMS_SYSINIT_CPU_COUNTER initialization step if necessary. If + * RTEMS_PROFILING is enabled, the CPU counter may have to work very early in + * the system initialization to avoid invalid profiling statistics. * - * @return The current CPU counter value. + * @return Returns the current CPU counter value. */ CPU_Counter_ticks _CPU_Counter_read( void ); -/** - * @brief Returns the difference between the second and first CPU counter - * value. - * - * This operation may be carried out as a modulo operation depending on the - * range of the CPU counter device. - * - * @param[in] second The second CPU counter value. - * @param[in] first The first CPU counter value. - * - * @return Returns second minus first modulo counter period. - */ -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - #ifdef RTEMS_SMP /** * @brief Performs CPU specific SMP initialization in the context of the boot @@ -1335,35 +1397,6 @@ static inline CPU_Counter_ticks _CPU_Counter_difference( void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); /** - * @brief Broadcasts a processor event. - * - * Some architectures provide a low-level synchronization primitive for - * processors in a multi-processor environment. Processors waiting for this - * event may go into a low-power state and stop generating system bus - * transactions. This function must ensure that preceding store operations - * can be observed by other processors. - * - * @see _CPU_SMP_Processor_event_receive(). - */ - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - - /** - * @brief Receives a processor event. - * - * This function will wait for the processor event and may wait forever if no - * such event arrives. - * - * @see _CPU_SMP_Processor_event_broadcast(). - */ - static inline void _CPU_SMP_Processor_event_receive( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - - /** * @brief Gets the is executing indicator of the thread context. * * @param[in] context The context. diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu_asm.h index 747fd3a1c1..222b01982f 100644 --- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu_asm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -14,9 +16,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h index c8c86f4ec7..61f1ab7ba5 100644 --- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013, 2016 embedded brains GmbH - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -35,6 +54,24 @@ */ #define CPU_PER_CPU_CONTROL_SIZE 0 +/** + * @brief Defines the thread-local storage (TLS) variant. + * + * Use one of the following values: + * + * 10: The architecture uses Variant I and the TLS offsets emitted by the + * linker neglect the TCB (examples: nios2, m68k, microblaze, powerpc, + * riscv). The thread pointer directly references the thread-local data + * area. + * + * 11: The architecture uses Variant I and the TLS offsets emitted by the + * linker take the TCB into account (examples: arm, aarch64). + * The thread pointer references the TCB. + * + * 20: The architecture uses Variant II (examples: i386, sparc). + */ +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus @@ -84,6 +121,19 @@ register struct Per_CPU_Control *_CPU_Per_CPU_current asm( "rX" ); #define _CPU_Get_thread_executing() ( _CPU_Per_CPU_current->executing ) /** + * This routine copies _error into a known place -- typically a stack + * location or a register, optionally disables interrupts, and + * halts/stops the CPU. + * + * Port Specific Information: + * + * XXX document implementation including references if appropriate + */ +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +/* end of Fatal Error manager macros */ + +/** * @addtogroup RTEMSScoreCPUExampleContext * * @brief Clobbers all volatile registers with values derived from the pattern @@ -119,7 +169,7 @@ void _CPU_Context_validate( uintptr_t pattern ); * * This function is used only in test sptests/spfatal26. */ -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } @@ -129,11 +179,44 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) * * This function is used only in test sptests/spcache01. */ -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +/** + * @brief Uses the thread-local storage area of the context. + * + * Some architectures may use dedicated registers to reference the thread-local + * storage area of the associated thread. This function should set these + * registers to the values defined by the specified processor context. + * + * @param context is the processor context defining the thread-local storage + * area to use. + */ +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +/** + * @brief Gets the thread pointer of the context. + * + * The thread pointer is used to get the address of thread-local storage + * objects associated with a thread. + * + * @param context is the processor context containing the thread pointer. + */ +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/no_cpu.h b/cpukit/score/cpu/no_cpu/include/rtems/score/no_cpu.h index d1bbceccc0..23ad58a1bc 100644 --- a/cpukit/score/cpu/no_cpu/include/rtems/score/no_cpu.h +++ b/cpukit/score/cpu/no_cpu/include/rtems/score/no_cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* no_cpu.h * * This file sets up basic CPU dependency settings based on @@ -9,9 +11,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/cpukit/score/cpu/or1k/cpu.c b/cpukit/score/cpu/or1k/cpu.c index 79cb76812f..507e1d7104 100644 --- a/cpukit/score/cpu/or1k/cpu.c +++ b/cpukit/score/cpu/or1k/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Opencore OR1K CPU Dependent Source * @@ -5,14 +7,31 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> -#include <rtems/score/cpu.h> /* bsp_start_vector_table_begin is the start address of the vector table * containing addresses to ISR Handlers. It's defined at the BSP linkcmds @@ -28,6 +47,22 @@ void _CPU_Initialize(void) /* Do nothing */ } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + + _OR1KSIM_CPU_Halt(); + + while ( true ) { + /* Do nothing */ + } +} + +/* end of Fatal Error manager macros */ + /** * @brief Sets the hardware interrupt level by the level value. * diff --git a/cpukit/score/cpu/or1k/headers.am b/cpukit/score/cpu/or1k/headers.am deleted file mode 100644 index e65d66464d..0000000000 --- a/cpukit/score/cpu/or1k/headers.am +++ /dev/null @@ -1,8 +0,0 @@ -## This file was generated by "./boostrap -H". -include_rtems_HEADERS += score/cpu/or1k/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/or1k/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/or1k/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/or1k/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/or1k/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/or1k/include/rtems/score/or1k-utility.h -include_rtems_score_HEADERS += score/cpu/or1k/include/rtems/score/or1k.h diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpu.h b/cpukit/score/cpu/or1k/include/rtems/score/cpu.h index 22acfd590e..ce1aa301b3 100644 --- a/cpukit/score/cpu/or1k/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/or1k/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -12,9 +14,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * * This file adapted from no_cpu example of the RTEMS distribution. * The body has been modified for the Opencores OR1k implementation by @@ -244,16 +263,6 @@ typedef Context_Control CPU_Interrupt_frame; /* ISR handler macros */ /* - * Support routine to initialize the RTEMS vector table after it is allocated. - * - * NO_CPU Specific Information: - * - * XXX document implementation including references if appropriate - */ - -#define _CPU_Initialize_vectors() - -/* * Disable all interrupts for an RTEMS critical section. The previous * level is returned in _level. * @@ -307,7 +316,7 @@ static inline void or1k_interrupt_enable(uint32_t level) _OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \ } while(0) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & CPU_OR1K_SPR_SR ) != 0; } @@ -400,23 +409,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/* - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - * - */ - -#include <inttypes.h> - -#define _CPU_Fatal_halt(_source, _error ) \ - _OR1KSIM_CPU_Halt(); \ - for(;;) - -/* end of Fatal Error manager macros */ - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE @@ -476,7 +468,7 @@ void _CPU_ISR_install_raw_handler( typedef void ( *CPU_ISR_handler )( uint32_t ); -RTEMS_INLINE_ROUTINE void _CPU_ISR_install_vector( +static inline void _CPU_ISR_install_vector( uint32_t vector, CPU_ISR_handler new_handler, CPU_ISR_handler *old_handler @@ -586,14 +578,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/or1k/include/rtems/score/cpu_asm.h index a5659f35ce..601508c153 100644 --- a/cpukit/score/cpu/or1k/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/or1k/include/rtems/score/cpu_asm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -12,9 +14,26 @@ * COPYRIGHT (c) 1989-1999. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/or1k/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/or1k/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h index 76da91fdec..9b58b1b77a 100644 --- a/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,26 +48,45 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "l.nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h b/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h index 98bbe41b00..58db24fbbd 100644 --- a/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h +++ b/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -6,9 +8,26 @@ /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_OR1K_UTILITY_H @@ -326,7 +345,7 @@ static inline uint32_t _OR1K_mfspr(uint32_t reg) { uint32_t spr_value; - asm volatile ( + __asm__ volatile ( "l.mfspr %0, %1, 0;\n\t" : "=r" (spr_value) : "r" (reg)); @@ -335,7 +354,7 @@ static inline uint32_t _OR1K_mfspr(uint32_t reg) static inline void _OR1K_mtspr(uint32_t reg, uint32_t value) { - asm volatile ( + __asm__ volatile ( "l.mtspr %1, %0, 0;\n\t" :: "r" (value), "r" (reg) ); @@ -367,12 +386,12 @@ static inline void _OR1K_mtspr(uint32_t reg, uint32_t value) static inline void _OR1K_Sync_mem( void ) { - asm volatile("l.msync"); + __asm__ volatile("l.msync"); } static inline void _OR1K_Sync_pipeline( void ) { - asm volatile("l.psync"); + __asm__ volatile("l.psync"); } /** @@ -383,7 +402,7 @@ static inline void _OR1K_Sync_pipeline( void ) * */ #define _OR1KSIM_CPU_Halt() \ - asm volatile ("l.nop 0xc") + __asm__ volatile ("l.nop 0xc") #ifdef __cplusplus } diff --git a/cpukit/score/cpu/or1k/include/rtems/score/or1k.h b/cpukit/score/cpu/or1k/include/rtems/score/or1k.h index 01ba05c15e..974f21f190 100644 --- a/cpukit/score/cpu/or1k/include/rtems/score/or1k.h +++ b/cpukit/score/cpu/or1k/include/rtems/score/or1k.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file */ @@ -11,9 +13,26 @@ * COPYRIGHT (c) 1989-1999, 2010. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_OR1K_H diff --git a/cpukit/score/cpu/or1k/or1k-context-initialize.c b/cpukit/score/cpu/or1k/or1k-context-initialize.c index ade23b949c..49a5ff1079 100644 --- a/cpukit/score/cpu/or1k/or1k-context-initialize.c +++ b/cpukit/score/cpu/or1k/or1k-context-initialize.c @@ -1,12 +1,31 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * * COPYRIGHT (c) 1989-2006 * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/or1k/or1k-context-switch.S b/cpukit/score/cpu/or1k/or1k-context-switch.S index 4c8b422d7f..0bcadbe3b5 100644 --- a/cpukit/score/cpu/or1k/or1k-context-switch.S +++ b/cpukit/score/cpu/or1k/or1k-context-switch.S @@ -1,9 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/or1k/or1k-context-validate.S b/cpukit/score/cpu/or1k/or1k-context-validate.S index 5b2afbb44a..286f4027cd 100644 --- a/cpukit/score/cpu/or1k/or1k-context-validate.S +++ b/cpukit/score/cpu/or1k/or1k-context-validate.S @@ -1,9 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/or1k/or1k-context-volatile-clobber.S b/cpukit/score/cpu/or1k/or1k-context-volatile-clobber.S index 669a937e07..f43ec0ccf0 100644 --- a/cpukit/score/cpu/or1k/or1k-context-volatile-clobber.S +++ b/cpukit/score/cpu/or1k/or1k-context-volatile-clobber.S @@ -1,9 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/or1k/or1k-exception-default.c b/cpukit/score/cpu/or1k/or1k-exception-default.c index a85ef1c7a7..c7e5cb5b82 100644 --- a/cpukit/score/cpu/or1k/or1k-exception-default.c +++ b/cpukit/score/cpu/or1k/or1k-exception-default.c @@ -1,9 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/or1k/or1k-exception-frame-print.c b/cpukit/score/cpu/or1k/or1k-exception-frame-print.c index a35ee8a900..dbeb5bab04 100644 --- a/cpukit/score/cpu/or1k/or1k-exception-frame-print.c +++ b/cpukit/score/cpu/or1k/or1k-exception-frame-print.c @@ -1,9 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/or1k/or1k-exception-handler-low.S b/cpukit/score/cpu/or1k/or1k-exception-handler-low.S index 5bf6492946..55c220d41c 100644 --- a/cpukit/score/cpu/or1k/or1k-exception-handler-low.S +++ b/cpukit/score/cpu/or1k/or1k-exception-handler-low.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -9,9 +11,26 @@ /* * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/cpukit/score/cpu/powerpc/cpu.c b/cpukit/score/cpu/powerpc/cpu.c index 1367d9d2c0..7c90ac28dc 100644 --- a/cpukit/score/cpu/powerpc/cpu.c +++ b/cpukit/score/cpu/powerpc/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (C) 2009, 2017 embedded brains GmbH. + * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ /* @@ -60,8 +79,10 @@ PPC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE); #endif #ifdef PPC_MULTILIB_ALTIVEC + PPC_ASSERT_OFFSET(vrsave, VRSAVE); + PPC_ASSERT_OFFSET(vscr, VSCR); RTEMS_STATIC_ASSERT( - PPC_CONTEXT_OFFSET_V20 % 16 == 0, + PPC_CONTEXT_OFFSET_V20 % PPC_DEFAULT_CACHE_LINE_SIZE == 0, ppc_context_altivec ); PPC_ASSERT_OFFSET(v20, V20); @@ -76,7 +97,6 @@ PPC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE); PPC_ASSERT_OFFSET(v29, V29); PPC_ASSERT_OFFSET(v30, V30); PPC_ASSERT_OFFSET(v31, V31); - PPC_ASSERT_OFFSET(vrsave, VRSAVE); #endif #ifdef PPC_MULTILIB_FPU @@ -325,3 +345,18 @@ RTEMS_STATIC_ASSERT( sizeof(CPU_Exception_frame) + FRAME_LINK_SPACE <= PPC_EXC_FRAME_SIZE, CPU_Exception_frame ); + +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ppc_interrupt_disable(); + __asm__ volatile ( + "mr 3, %0\n" + "mr 4, %1\n" + "1:\n" + "b 1b\n" + : \ + : "r" (source), "r" (error) + : "memory" + ); + RTEMS_UNREACHABLE(); +} diff --git a/cpukit/score/cpu/powerpc/headers.am b/cpukit/score/cpu/powerpc/headers.am deleted file mode 100644 index 5f016a21d1..0000000000 --- a/cpukit/score/cpu/powerpc/headers.am +++ /dev/null @@ -1,9 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/powerpc/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/powerpc/include/rtems/asm.h -include_rtems_powerpc_HEADERS += score/cpu/powerpc/include/rtems/powerpc/registers.h -include_rtems_score_HEADERS += score/cpu/powerpc/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/powerpc/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/powerpc/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/powerpc/include/rtems/score/paravirt.h -include_rtems_score_HEADERS += score/cpu/powerpc/include/rtems/score/powerpc.h diff --git a/cpukit/score/cpu/powerpc/include/rtems/asm.h b/cpukit/score/cpu/powerpc/include/rtems/asm.h index 27af64e724..94f54245b4 100644 --- a/cpukit/score/cpu/powerpc/include/rtems/asm.h +++ b/cpukit/score/cpu/powerpc/include/rtems/asm.h @@ -75,23 +75,21 @@ #define __PROC_LABEL_PREFIX__ __USER_LABEL_PREFIX__ #endif -#include <rtems/concat.h> - /* Use the right prefix for global labels. */ -#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#define SYM(x) RTEMS_XCONCAT (__USER_LABEL_PREFIX__, x) /* Use the right prefix for procedure labels. */ -#define PROC(x) CONCAT1 (__PROC_LABEL_PREFIX__, x) +#define PROC(x) RTEMS_XCONCAT (__PROC_LABEL_PREFIX__, x) /* Use the right prefix for registers. */ -#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x) +#define REG(x) RTEMS_XCONCAT (__REGISTER_PREFIX__, x) /* Use the right prefix for floating point registers. */ -#define FREG(x) CONCAT1 (__FLOAT_REGISTER_PREFIX__, x) +#define FREG(x) RTEMS_XCONCAT (__FLOAT_REGISTER_PREFIX__, x) /* * define macros for all of the registers on this CPU diff --git a/cpukit/score/cpu/powerpc/include/rtems/powerpc/registers.h b/cpukit/score/cpu/powerpc/include/rtems/powerpc/registers.h index 5dad87016d..271dcc36af 100644 --- a/cpukit/score/cpu/powerpc/include/rtems/powerpc/registers.h +++ b/cpukit/score/cpu/powerpc/include/rtems/powerpc/registers.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -16,9 +18,26 @@ * Surrey Satellite Technology Limited * * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_POWERPC_REGISTERS_H @@ -522,8 +541,8 @@ lidate */ #define FSL_EIS_MAS0 624 #define FSL_EIS_MAS0_TLBSEL (1 << (63 - 35)) -#define FSL_EIS_MAS0_ESEL(n) ((0xf & (n)) << (63 - 47)) -#define FSL_EIS_MAS0_ESEL_GET(m) (((m) >> (63 - 47)) & 0xf) +#define FSL_EIS_MAS0_ESEL(n) ((0xfff & (n)) << (63 - 47)) +#define FSL_EIS_MAS0_ESEL_GET(m) (((m) >> (63 - 47)) & 0xfff) #define FSL_EIS_MAS0_NV (1 << (63 - 63)) #define FSL_EIS_MAS1 625 diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h index 996b6f8e60..6f2fe491e9 100644 --- a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -27,11 +29,28 @@ * * Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL). * - * Copyright (c) 2010, 2017 embedded brains GmbH. + * Copyright (C) 2010, 2020 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -227,6 +246,13 @@ typedef struct { uint32_t isr_dispatch_disable; uint32_t reserved_for_alignment; #if defined(PPC_MULTILIB_ALTIVEC) + #if !defined(__powerpc64__) + uint32_t reserved_for_alignment_2[4]; + #endif + uint32_t vrsave; + uint32_t reserved_for_alignment_3[2]; + /* This field must take stvewx/lvewx requirements into account */ + uint32_t vscr; uint8_t v20[16]; uint8_t v21[16]; uint8_t v22[16]; @@ -239,7 +265,6 @@ typedef struct { uint8_t v29[16]; uint8_t v30[16]; uint8_t v31[16]; - uint32_t vrsave; #elif defined(__ALTIVEC__) /* * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave @@ -354,8 +379,16 @@ static inline ppc_context *ppc_get_context( const Context_Control *context ) #define PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE PPC_CONTEXT_GPR_OFFSET( 32 ) #ifdef PPC_MULTILIB_ALTIVEC + #ifdef __powerpc64__ + #define PPC_CONTEXT_OFFSET_VRSAVE \ + ( PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 ) + #else + #define PPC_CONTEXT_OFFSET_VRSAVE \ + ( PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 24 ) + #endif + #define PPC_CONTEXT_OFFSET_VSCR ( PPC_CONTEXT_OFFSET_VRSAVE + 12 ) #define PPC_CONTEXT_OFFSET_V( v ) \ - ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8) + ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_VRSAVE + 16) #define PPC_CONTEXT_OFFSET_V20 PPC_CONTEXT_OFFSET_V( 20 ) #define PPC_CONTEXT_OFFSET_V21 PPC_CONTEXT_OFFSET_V( 21 ) #define PPC_CONTEXT_OFFSET_V22 PPC_CONTEXT_OFFSET_V( 22 ) @@ -368,9 +401,8 @@ static inline ppc_context *ppc_get_context( const Context_Control *context ) #define PPC_CONTEXT_OFFSET_V29 PPC_CONTEXT_OFFSET_V( 29 ) #define PPC_CONTEXT_OFFSET_V30 PPC_CONTEXT_OFFSET_V( 30 ) #define PPC_CONTEXT_OFFSET_V31 PPC_CONTEXT_OFFSET_V( 31 ) - #define PPC_CONTEXT_OFFSET_VRSAVE PPC_CONTEXT_OFFSET_V( 32 ) #define PPC_CONTEXT_OFFSET_F( f ) \ - ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_VRSAVE + 8 ) + ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_V( 32 ) ) #else #define PPC_CONTEXT_OFFSET_F( f ) \ ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 ) @@ -400,7 +432,7 @@ static inline ppc_context *ppc_get_context( const Context_Control *context ) #if defined(PPC_MULTILIB_FPU) #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_F( 32 ) #elif defined(PPC_MULTILIB_ALTIVEC) - #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_OFFSET_VRSAVE + 4) + #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_V( 33 ) #elif defined(__ALTIVEC__) #define PPC_CONTEXT_VOLATILE_SIZE \ (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8 \ @@ -417,8 +449,8 @@ static inline ppc_context *ppc_get_context( const Context_Control *context ) #endif #ifndef ASM -typedef struct { #if (PPC_HAS_FPU == 1) +typedef struct { /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over * procedure calls. However, this would mean that the interrupt * frame had to hold f0-f13, and the fpscr. And as the majority @@ -432,9 +464,8 @@ typedef struct { float f[32]; uint32_t fpscr; #endif -#endif /* (PPC_HAS_FPU == 1) */ } Context_Control_fp; - +#endif /* (PPC_HAS_FPU == 1) */ #endif /* ASM */ /* @@ -530,7 +561,9 @@ typedef struct { * CPUs with a "floating point save context" instruction. */ +#if (PPC_HAS_FPU == 1) #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp ) +#endif /* * (Optional) # of bytes for libmisc/stackchk to check @@ -576,7 +609,7 @@ typedef struct { #ifndef ASM -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & MSR_EE ) != 0; } @@ -613,20 +646,6 @@ void ppc_set_interrupt_level( uint32_t level ); #endif /* ASM */ -#define _CPU_Fatal_halt( _source, _error ) \ - do { \ - ppc_interrupt_disable(); \ - __asm__ volatile ( \ - "mr 3, %0\n" \ - "mr 4, %1\n" \ - "1:\n" \ - "b 1b\n" \ - : \ - : "r" (_source), "r" (_error) \ - : "memory" \ - ); \ - } while ( 0 ) - /* * Should be large enough to run all RTEMS tests. This ensures * that a "reasonable" small application should not have any problems. @@ -724,14 +743,6 @@ static inline CPU_Counter_ticks _CPU_Counter_read( void ) return value; } -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - #endif /* ASM */ @@ -914,6 +925,11 @@ void _CPU_Context_switch( Context_Control *heir ); +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + /* * _CPU_Context_restore * @@ -925,6 +941,7 @@ void _CPU_Context_switch( RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); +#if (PPC_HAS_FPU == 1) /* * _CPU_Context_save_fp * @@ -944,6 +961,7 @@ void _CPU_Context_save_fp( void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr ); +#endif #ifdef RTEMS_SMP uint32_t _CPU_SMP_Initialize( void ); @@ -968,16 +986,6 @@ void _CPU_Context_restore_fp( } void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); - - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - - static inline void _CPU_SMP_Processor_event_receive( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } #endif typedef struct { diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/powerpc/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h index bd23602827..68b7165546 100644 --- a/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -10,11 +12,28 @@ * * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu> * - * Copyright (c) 2009, 2017 embedded brains GmbH + * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -132,6 +151,8 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifdef RTEMS_SMP /* Use SPRG0 for the per-CPU control of the current processor */ @@ -248,20 +269,45 @@ static inline struct Per_CPU_Control *_PPC_Get_current_per_CPU_control( void ) #endif /* RTEMS_SMP */ +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".long 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ +#ifdef __powerpc64__ + register uintptr_t tp __asm__( "13" ); +#else + register uintptr_t tp __asm__( "2" ); +#endif + + tp = ppc_get_context( context )->tp; + + /* Make sure that the register assignment is not optimized away */ + __asm__ volatile ( "" : : "r" ( tp ) ); +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) ppc_get_context( context )->tp; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/paravirt.h b/cpukit/score/cpu/powerpc/include/rtems/score/paravirt.h index 5efbd3bef7..e6f067b495 100644 --- a/cpukit/score/cpu/powerpc/include/rtems/score/paravirt.h +++ b/cpukit/score/cpu/powerpc/include/rtems/score/paravirt.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -11,12 +13,28 @@ * COPYRIGHT (c) 2018. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may in - * the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ - #ifndef RTEMS_PARAVIRT #error "This file should only be included with paravirtualization is enabled." #endif diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/powerpc.h b/cpukit/score/cpu/powerpc/include/rtems/score/powerpc.h index 84e164d70b..542ab154d2 100644 --- a/cpukit/score/cpu/powerpc/include/rtems/score/powerpc.h +++ b/cpukit/score/cpu/powerpc/include/rtems/score/powerpc.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -32,13 +34,26 @@ * COPYRIGHT (c) 1989-1997. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may in - * the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * Note: - * This file is included by both C and assembler code ( -DASM ) + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ diff --git a/cpukit/score/cpu/powerpc/ppc-context-validate.S b/cpukit/score/cpu/powerpc/ppc-context-validate.S index b65ccacfb8..721633c642 100644 --- a/cpukit/score/cpu/powerpc/ppc-context-validate.S +++ b/cpukit/score/cpu/powerpc/ppc-context-validate.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved. + * Copyright (C) 2013, 2020 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -86,6 +99,7 @@ #define VTMP_OFFSET VOFFSET(12) #define VTMP2_OFFSET VOFFSET(13) #define VRSAVE_OFFSET VOFFSET(14) + #define VRSAVE2_OFFSET (VOFFSET(14) + 4) #define VSCR_OFFSET (VOFFSET(14) + 12) #define ALTIVECEND VOFFSET(15) #else @@ -148,6 +162,13 @@ _CPU_Context_validate: #endif #ifdef PPC_MULTILIB_ALTIVEC + mfvrsave r0 + stw r0, VRSAVE_OFFSET(r1) + li r0, 0xffffffff + mtvrsave r0 + mfvscr v0 + li r0, VSCR_OFFSET + stvewx v0, r1, r0 li r0, V20_OFFSET stvx v20, r1, r0 li r0, V21_OFFSET @@ -172,11 +193,6 @@ _CPU_Context_validate: stvx v30, r1, r0 li r0, V31_OFFSET stvx v31, r1, r0 - mfvscr v0 - li r0, VSCR_OFFSET - stvewx v0, r1, r0 - mfvrsave r0 - stw r0, VRSAVE_OFFSET(r1) #endif /* Fill */ @@ -324,9 +340,11 @@ _CPU_Context_validate: FILL_V 29 FILL_V 30 FILL_V 31 +#ifndef __PPC_VRSAVE__ addi r4, r3, 0x700 mtvrsave r4 #endif +#endif /* Check */ check: @@ -503,6 +521,15 @@ check: #ifdef PPC_MULTILIB_ALTIVEC .macro CHECK_V i +#ifdef __PPC_VRSAVE__ + mfvrsave r4 +.if (31 - \i) > 15 + andis. r5, r4, 1 << (31 - \i - 16) +.else + andi. r5, r4, 1 << (31 - \i) +.endif + beq 1f +#endif li r4, VTMP_OFFSET stvx \i, r1, r4 lwz r5, VTMP_OFFSET(r1) @@ -521,9 +548,43 @@ check: addi r4, r3, 0x600 + \i cmpw r5, r4 bne restore +#ifdef __PPC_VRSAVE__ + mfvrsave r4 +.if (31 - \i) > 15 + xoris r4, r4, 1 << (31 - \i - 16) +.else + xori r4, r4, 1 << (31 - \i) +.endif + mtvrsave r4 + b 2f +1: +.if (31 - \i) > 15 + oris r4, r4, 1 << (31 - \i - 16) +.else + ori r4, r4, 1 << (31 - \i) +.endif + mtvrsave r4 + addi r4, r3, 0x300 + \i + stw r4, VTMP_OFFSET(r1) + addi r4, r3, 0x400 + \i + stw r4, VTMP_OFFSET + 4(r1) + addi r4, r3, 0x500 + \i + stw r4, VTMP_OFFSET + 8(r1) + addi r4, r3, 0x600 + \i + stw r4, VTMP_OFFSET + 12(r1) + li r4, VTMP_OFFSET + lvx \i, r1, r4 +2: +#endif .endm /* Check VSCR */ +#ifdef __PPC_VRSAVE__ + mfvrsave r4 + stw r4, VRSAVE2_OFFSET(r1) + oris r4, r4, 0x8000 + mtvrsave r4 +#endif li r4, VTMP_OFFSET stvx v0, r1, r4 mfvscr v0 @@ -535,6 +596,10 @@ check: bne restore li r4, VTMP_OFFSET lvx v0, r1, r4 +#ifdef __PPC_VRSAVE__ + lwz r4, VRSAVE2_OFFSET(r1) + mtvrsave r4 +#endif CHECK_V 0 CHECK_V 1 @@ -569,10 +634,16 @@ check: CHECK_V 30 CHECK_V 31 mfvrsave r5 +#ifdef __PPC_VRSAVE__ + addi r5, r5, 1 + cmplwi r0, r5, 1 + bgt restore +#else addi r4, r3, 0x700 cmpw r5, r4 bne restore #endif +#endif mtcr r29 addi r5, r3, 1 @@ -582,7 +653,7 @@ check: restore: #ifdef PPC_MULTILIB_ALTIVEC - lwz r0, VRSAVE_OFFSET(r1) + li r0, 0xffffffff mtvrsave r0 li r0, V31_OFFSET lvx v31, r1, r0 @@ -608,6 +679,11 @@ restore: lvx v21, r1, r0 li r0, V20_OFFSET lvx v20, r1, r0 + li r0, VSCR_OFFSET + lvewx v0, r1, r0 + mtvscr v0 + lwz r0, VRSAVE_OFFSET(r1) + mtvrsave r0 #endif #ifdef PPC_MULTILIB_FPU diff --git a/cpukit/score/cpu/powerpc/ppc-context-volatile-clobber.S b/cpukit/score/cpu/powerpc/ppc-context-volatile-clobber.S index ea217b27ce..d235929f7d 100644 --- a/cpukit/score/cpu/powerpc/ppc-context-volatile-clobber.S +++ b/cpukit/score/cpu/powerpc/ppc-context-volatile-clobber.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved. + * Copyright (C) 2013, 2017 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/powerpc/ppc-isr-disable-mask.S b/cpukit/score/cpu/powerpc/ppc-isr-disable-mask.S index 9f78cf9a63..529b88bf3f 100644 --- a/cpukit/score/cpu/powerpc/ppc-isr-disable-mask.S +++ b/cpukit/score/cpu/powerpc/ppc-isr-disable-mask.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * Copyright (c) 2014 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/riscv/cpu.c b/cpukit/score/cpu/riscv/cpu.c index 62150d4ea5..74a716a3b2 100644 --- a/cpukit/score/cpu/riscv/cpu.c +++ b/cpukit/score/cpu/riscv/cpu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * * Copyright (c) 2015 University of York. * Hesham ALmatary <hesham@alumni.york.ac.uk> @@ -173,18 +173,6 @@ RTEMS_STATIC_ASSERT( riscv_interrupt_frame_size ); -void _init(void); - -void _fini(void); - -void _init(void) -{ -} - -void _fini(void) -{ -} - /** * @brief Performs processor dependent initialization. */ diff --git a/cpukit/score/cpu/riscv/headers.am b/cpukit/score/cpu/riscv/headers.am deleted file mode 100644 index eb863cb28c..0000000000 --- a/cpukit/score/cpu/riscv/headers.am +++ /dev/null @@ -1,11 +0,0 @@ -## This file was generated by "./boostrap -H". -include_libcpu_HEADERS += score/cpu/riscv/include/libcpu/access.h -include_libcpu_HEADERS += score/cpu/riscv/include/libcpu/byteorder.h -include_machine_HEADERS += score/cpu/riscv/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/riscv/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/riscv/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/riscv/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/riscv/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/riscv/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/riscv/include/rtems/score/riscv-utility.h -include_rtems_score_HEADERS += score/cpu/riscv/include/rtems/score/riscv.h diff --git a/cpukit/score/cpu/riscv/include/libcpu/access.h b/cpukit/score/cpu/riscv/include/libcpu/access.h index 4f2a780895..f414d6f22f 100644 --- a/cpukit/score/cpu/riscv/include/libcpu/access.h +++ b/cpukit/score/cpu/riscv/include/libcpu/access.h @@ -1,12 +1,31 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * access.h - access routines for SPARC. SPARC is big endian only. * * COPYRIGHT (c) 2011 * Aeroflex Gaisler. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LIBCPU_ACCESS_H diff --git a/cpukit/score/cpu/riscv/include/libcpu/byteorder.h b/cpukit/score/cpu/riscv/include/libcpu/byteorder.h index 939e51fe84..1b4f6f3b1e 100644 --- a/cpukit/score/cpu/riscv/include/libcpu/byteorder.h +++ b/cpukit/score/cpu/riscv/include/libcpu/byteorder.h @@ -7,6 +7,8 @@ #ifndef _LIBCPU_BYTEORDER_H #define _LIBCPU_BYTEORDER_H +#include <stdint.h> + static inline void st_le32(volatile uint32_t *addr, uint32_t value) { *(addr)=value ; diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h index 38eb92394d..e342e7d4af 100644 --- a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @@ -4,7 +4,7 @@ */ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * * Copyright (c) 2015 University of York. * Hesham Almatary <hesham@alumni.york.ac.uk> @@ -147,14 +147,15 @@ typedef struct { #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE -#define _CPU_Initialize_vectors() - static inline uint32_t riscv_interrupt_disable( void ) { unsigned long mstatus; __asm__ volatile ( - "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) : + ".option push\n" + ".option arch, +zicsr\n" + "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) "\n" + ".option pop" : "=&r" ( mstatus ) ); @@ -163,7 +164,14 @@ static inline uint32_t riscv_interrupt_disable( void ) static inline void riscv_interrupt_enable( uint32_t level ) { - __asm__ volatile ( "csrrs zero, mstatus, %0" : : "r" ( level ) ); + __asm__ volatile ( + ".option push\n" + ".option arch, +zicsr\n" + "csrrs zero, mstatus, %0\n" + ".option pop" : + : + "r" ( level ) + ); } #define _CPU_ISR_Disable( _level ) \ @@ -178,22 +186,26 @@ static inline void riscv_interrupt_enable( uint32_t level ) riscv_interrupt_disable(); \ } while(0) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level ) +static inline bool _CPU_ISR_Is_enabled( unsigned long level ) { return ( level & RISCV_MSTATUS_MIE ) != 0; } -RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level ) +static inline void _CPU_ISR_Set_level( uint32_t level ) { - if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) { - __asm__ volatile ( - "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) - ); - } else { - __asm__ volatile ( - "csrrc zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) - ); - } + /* + * Where CPU_ENABLE_ROBUST_THREAD_DISPATCH == TRUE, the only supported + * interrupt level allowed to set is 0 (interrupts enabled). This constraint + * is enforced by the API level functions which return an error status for + * other interrupt levels. + */ + (void) level; + __asm__ volatile ( + ".option push\n" + ".option arch, +zicsr\n" + "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) "\n" + ".option pop" + ); } uint32_t _CPU_ISR_Get_level( void ); @@ -213,8 +225,6 @@ void _CPU_Context_Initialize( #define _CPU_Context_Restart_self( _the_context ) \ _CPU_Context_restore( (_the_context) ) -RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, uint32_t error ); - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_GENERIC_BITFIELD_DATA TRUE @@ -383,6 +393,11 @@ void _CPU_Context_switch( Context_Control *heir ); +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + /* * _CPU_Context_restore * @@ -442,14 +457,6 @@ extern volatile uint32_t * const _RISCV_Counter; CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - #ifdef RTEMS_SMP uint32_t _CPU_SMP_Initialize( void ); @@ -464,23 +471,19 @@ static inline uint32_t _CPU_SMP_Get_current_processor( void ) { unsigned long mhartid; - __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) ); + __asm__ volatile ( + ".option push\n" + ".option arch, +zicsr\n" + "csrr %0, mhartid\n" + ".option pop" : + "=&r" ( mhartid ) + ); - return (uint32_t) mhartid; + return (uint32_t) mhartid - RISCV_BOOT_HARTID; } void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); -static inline void _CPU_SMP_Processor_event_broadcast( void ) -{ - __asm__ volatile ( "" : : : "memory" ); -} - -static inline void _CPU_SMP_Processor_event_receive( void ) -{ - __asm__ volatile ( "" : : : "memory" ); -} - static inline bool _CPU_Context_Get_is_executing( const Context_Control *context ) @@ -496,6 +499,10 @@ static inline void _CPU_Context_Set_is_executing( context->is_executing = is_executing; } +RTEMS_NO_RETURN void _RISCV_Start_multitasking( Context_Control *heir ); + +#define _CPU_Start_multitasking( _heir ) _RISCV_Start_multitasking( _heir ) + #endif /* RTEMS_SMP */ /** Type that can store a 32-bit integer or a pointer. */ diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/riscv/include/rtems/score/cpuatomic.h deleted file mode 100644 index 8ee9606b44..0000000000 --- a/cpukit/score/cpu/riscv/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h index c381cce110..13fd60ed8c 100644 --- a/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h @@ -5,7 +5,7 @@ */ /* - * Copyright (c) 2013, 2018 embedded brains GmbH + * Copyright (C) 2013, 2018 embedded brains GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -54,6 +54,8 @@ #define CPU_PER_CPU_CONTROL_SIZE 16 #endif +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifdef RTEMS_SMP #define RISCV_CONTEXT_IS_EXECUTING 0 #endif @@ -296,6 +298,16 @@ extern "C" { #endif +static inline uint32_t _RISCV_Map_hardid_to_cpu_index( uint32_t hardid ) +{ + return hardid - RISCV_BOOT_HARTID; +} + +static inline uint32_t _RISCV_Map_cpu_index_to_hardid( uint32_t cpu_index ) +{ + return cpu_index + RISCV_BOOT_HARTID; +} + /* Core Local Interruptor (CLINT) */ typedef union { @@ -325,7 +337,7 @@ typedef struct { uint32_t priority[RISCV_PLIC_MAX_INTERRUPTS]; uint32_t pending[1024]; uint32_t enable[16320][32]; - RISCV_PLIC_hart_regs harts[CPU_MAXIMUM_PROCESSORS]; + RISCV_PLIC_hart_regs harts[CPU_MAXIMUM_PROCESSORS + RISCV_BOOT_HARTID]; } RISCV_PLIC_regs; typedef struct { @@ -399,7 +411,13 @@ static inline struct Per_CPU_Control *_RISCV_Get_current_per_CPU_control( void ) { struct Per_CPU_Control *cpu_self; - __asm__ volatile ( "csrr %0, mscratch" : "=r" ( cpu_self ) ); + __asm__ volatile ( + ".option push\n" + ".option arch, +zicsr\n" + "csrr %0, mscratch\n" + ".option pop" : + "=r" ( cpu_self ) + ); return cpu_self; } @@ -408,20 +426,41 @@ static inline struct Per_CPU_Control *_RISCV_Get_current_per_CPU_control( void ) #endif /* RTEMS_SMP */ +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( "unimp" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + register uintptr_t tp __asm__( "tp" ); + + tp = context->tp; + + /* Make sure that the register assignment is not optimized away */ + __asm__ volatile ( "" : : "r" ( tp ) ); +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) context->tp; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/riscv/include/rtems/score/riscv-utility.h b/cpukit/score/cpu/riscv/include/rtems/score/riscv-utility.h index dc4836bee2..1cfcf8dbad 100644 --- a/cpukit/score/cpu/riscv/include/rtems/score/riscv-utility.h +++ b/cpukit/score/cpu/riscv/include/rtems/score/riscv-utility.h @@ -247,22 +247,27 @@ typedef enum { #ifdef __GNUC__ #define read_csr(reg) ({ unsigned long __tmp; \ - asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ + asm volatile (".option push\n.option arch, +zicsr\n" \ + "csrr %0, " #reg "\n.option pop ": "=r"(__tmp)); \ __tmp; }) #define write_csr(reg, val) ({ \ - asm volatile ("csrw " #reg ", %0" :: "rK"(val)); }) + asm volatile (".option push\n.option arch, +zicsr\n" \ + "csrw " #reg ", %0\n.option pop" :: "rK"(val)); }) #define swap_csr(reg, val) ({ unsigned long __tmp; \ - asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "rK"(val)); \ + asm volatile (".option push\n.option arch, +zicsr\n" \ + "csrrw %0, " #reg ", %1\n.option pop" : "=r"(__tmp) : "rK"(val)); \ __tmp; }) #define set_csr(reg, bit) ({ unsigned long __tmp; \ - asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + asm volatile (".option push\n.option arch, +zicsr\nc" \ + "srrs %0, " #reg ", %1\n.option pop" : "=r"(__tmp) : "rK"(bit)); \ __tmp; }) #define clear_csr(reg, bit) ({ unsigned long __tmp; \ - asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + asm volatile (".option push\n.option arch, +zicsr\n" \ + "csrrc %0, " #reg ", %1\n.option pop" : "=r"(__tmp) : "rK"(bit)); \ __tmp; }) #define rdtime() read_csr(time) diff --git a/cpukit/score/cpu/riscv/riscv-context-initialize.c b/cpukit/score/cpu/riscv/riscv-context-initialize.c index c6bd99ebbd..67998b426e 100644 --- a/cpukit/score/cpu/riscv/riscv-context-initialize.c +++ b/cpukit/score/cpu/riscv/riscv-context-initialize.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * * Copyright (c) 2015 University of York. * Hesham Almatary <hesham@alumni.york.ac.uk> @@ -68,7 +68,7 @@ void _CPU_Context_Initialize( if ( tls_area != NULL ) { void *tls_block; - tls_block = _TLS_TCB_before_TLS_block_initialize( tls_area ); + tls_block = _TLS_Initialize_area( tls_area ); context->tp = (uintptr_t) tls_block; } } diff --git a/cpukit/score/cpu/riscv/riscv-context-switch.S b/cpukit/score/cpu/riscv/riscv-context-switch.S index 96c117b3de..a00c842de2 100644 --- a/cpukit/score/cpu/riscv/riscv-context-switch.S +++ b/cpukit/score/cpu/riscv/riscv-context-switch.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * * Copyright (c) 2015 University of York. * Hesham ALmatary <hesham@alumni.york.ac.uk> @@ -35,11 +35,17 @@ .section .text, "ax", @progbits .align 2 + .option arch, +zicsr PUBLIC(_CPU_Context_switch) +PUBLIC(_CPU_Context_switch_no_return) PUBLIC(_CPU_Context_restore) +#ifdef RTEMS_SMP +PUBLIC(_RISCV_Start_multitasking) +#endif SYM(_CPU_Context_switch): +SYM(_CPU_Context_switch_no_return): GET_SELF_CPU_CONTROL a2 lw a3, PER_CPU_ISR_DISPATCH_DISABLE(a2) @@ -172,4 +178,16 @@ SYM(_CPU_Context_restore): sw a5, PER_CPU_OFFSET_EXECUTING(a2) j .Ltry_update_is_executing + +SYM(_RISCV_Start_multitasking): + mv a1, a0 + GET_SELF_CPU_CONTROL a2 + + /* Switch the stack to the temporary interrupt stack of this processor */ + addi sp, a2, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE + + /* Enable interrupts */ + csrrs zero, mstatus, RISCV_MSTATUS_MIE + + j .Ltry_update_is_executing #endif diff --git a/cpukit/score/cpu/riscv/riscv-context-validate.S b/cpukit/score/cpu/riscv/riscv-context-validate.S index 6bdb06a15b..838803e809 100644 --- a/cpukit/score/cpu/riscv/riscv-context-validate.S +++ b/cpukit/score/cpu/riscv/riscv-context-validate.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * Copyright (c) 2015 Hesham Almatary <hesham@alumni.york.ac.uk> * * Redistribution and use in source and binary forms, with or without diff --git a/cpukit/score/cpu/riscv/riscv-context-volatile-clobber.S b/cpukit/score/cpu/riscv/riscv-context-volatile-clobber.S index 628bd1a467..efea2afc72 100644 --- a/cpukit/score/cpu/riscv/riscv-context-volatile-clobber.S +++ b/cpukit/score/cpu/riscv/riscv-context-volatile-clobber.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * Copyright (c) 2015 Hesham Almatary <hesham@alumni.york.ac.uk> * * Redistribution and use in source and binary forms, with or without diff --git a/cpukit/score/cpu/riscv/riscv-counter.S b/cpukit/score/cpu/riscv/riscv-counter.S index e779325b4b..96c68e6b13 100644 --- a/cpukit/score/cpu/riscv/riscv-counter.S +++ b/cpukit/score/cpu/riscv/riscv-counter.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/cpukit/score/cpu/riscv/riscv-exception-frame-print.c b/cpukit/score/cpu/riscv/riscv-exception-frame-print.c index 897ceaf9a8..61986a8ba2 100644 --- a/cpukit/score/cpu/riscv/riscv-exception-frame-print.c +++ b/cpukit/score/cpu/riscv/riscv-exception-frame-print.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * Copyright (c) 2015 Hesham Almatary <hesham@alumni.york.ac.uk> * * Redistribution and use in source and binary forms, with or without diff --git a/cpukit/score/cpu/riscv/riscv-exception-handler.S b/cpukit/score/cpu/riscv/riscv-exception-handler.S index 9330f246b1..34e7cbb0b3 100644 --- a/cpukit/score/cpu/riscv/riscv-exception-handler.S +++ b/cpukit/score/cpu/riscv/riscv-exception-handler.S @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2018 embedded brains GmbH + * Copyright (c) 2018 embedded brains GmbH & Co. KG * Copyright (c) 2015 University of York. * Hesham Almatary <hesham@alumni.york.ac.uk> @@ -45,6 +45,7 @@ PUBLIC(_RISCV_Exception_handler) .section .text, "ax", @progbits .align 2 + .option arch, +zicsr TYPE_FUNC(_RISCV_Exception_handler) SYM(_RISCV_Exception_handler): diff --git a/cpukit/score/cpu/sh/cpu.c b/cpukit/score/cpu/sh/cpu.c index c10086534b..558f95be4d 100644 --- a/cpukit/score/cpu/sh/cpu.c +++ b/cpukit/score/cpu/sh/cpu.c @@ -30,9 +30,9 @@ #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> #include <rtems/score/sh_io.h> -#include <rtems/score/cpu.h> #include <rtems/score/sh.h> /* referenced in start.S */ @@ -76,6 +76,13 @@ void _CPU_Initialize(void) _CPU_ISR_Set_level( level ) ; } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + __asm__ volatile("mov.l %0,r0"::"m" (error)); + __asm__ volatile("mov #1, r4"); + __asm__ volatile("trapa #34"); +} + /* * _CPU_ISR_Get_level */ diff --git a/cpukit/score/cpu/sh/headers.am b/cpukit/score/cpu/sh/headers.am deleted file mode 100644 index ccb3b94eb5..0000000000 --- a/cpukit/score/cpu/sh/headers.am +++ /dev/null @@ -1,7 +0,0 @@ -## This file was generated by "./boostrap -H". -include_rtems_HEADERS += score/cpu/sh/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/sh/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/sh/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/sh/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/sh/include/rtems/score/sh.h -include_rtems_score_HEADERS += score/cpu/sh/include/rtems/score/sh_io.h diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpu.h b/cpukit/score/cpu/sh/include/rtems/score/cpu.h index 0df6aa3f83..f2b59a8713 100644 --- a/cpukit/score/cpu/sh/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/sh/include/rtems/score/cpu.h @@ -367,14 +367,6 @@ void CPU_delay( uint32_t microseconds ); */ /* - * Support routine to initialize the RTEMS vector table after it is allocated. - * - * SH Specific Information: NONE - */ - -#define _CPU_Initialize_vectors() - -/* * Disable all interrupts for an RTEMS critical section. The previous * level is returned in _level. */ @@ -401,7 +393,7 @@ void CPU_delay( uint32_t microseconds ); #define _CPU_ISR_Flash( _level) \ sh_flash_interrupts( _level) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { sh_get_interrupt_level( level ); return level == 0; @@ -498,32 +490,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/* - * FIXME: Trap32 ??? - * - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * invokes a Trap32 Instruction which returns to the breakpoint - * routine of cmon. - */ - -#ifdef BSP_FATAL_HALT - /* we manage the fatal error in the board support package */ - void bsp_fatal_halt( uint32_t _error); -#define _CPU_Fatal_halt( _source, _error ) bsp_fatal_halt( _error) -#else -#define _CPU_Fatal_halt( _source, _error)\ -{ \ - __asm__ volatile("mov.l %0,r0"::"m" (_error)); \ - __asm__ volatile("mov #1, r4"); \ - __asm__ volatile("trapa #34"); \ -} -#endif - -/* end of Fatal Error manager macros */ - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE @@ -610,14 +576,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/sh/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/sh/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h index 6dcbc00f6b..e5f45eb363 100644 --- a/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h @@ -5,7 +5,7 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -29,34 +29,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/sh/sh-exception-frame-print.c b/cpukit/score/cpu/sh/sh-exception-frame-print.c index e54eb60e9f..d6c49f5569 100644 --- a/cpukit/score/cpu/sh/sh-exception-frame-print.c +++ b/cpukit/score/cpu/sh/sh-exception-frame-print.c @@ -1,11 +1,5 @@ /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Copyright (c) 2012 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at diff --git a/cpukit/score/cpu/sparc/access_le.c b/cpukit/score/cpu/sparc/access_le.c index d3a0e93adb..b1bde6da4b 100644 --- a/cpukit/score/cpu/sparc/access_le.c +++ b/cpukit/score/cpu/sparc/access_le.c @@ -1,12 +1,31 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Little-endian access routines for SPARC * * COPYRIGHT (c) 2011 * Aeroflex Gaisler. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <libcpu/byteorder.h> diff --git a/cpukit/score/cpu/sparc/cpu.c b/cpukit/score/cpu/sparc/cpu.c index 9f84c28fc5..c82fa935fe 100644 --- a/cpukit/score/cpu/sparc/cpu.c +++ b/cpukit/score/cpu/sparc/cpu.c @@ -1,29 +1,51 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** - * @file + * @file + * + * @ingroup RTEMSScoreCPUSPARC * - * @brief SPARC CPU Dependent Source + * @brief This source file contains static assertions to ensure the consistency + * of interfaces used in C and assembler and it contains the SPARC-specific + * implementation of _CPU_Initialize(), _CPU_ISR_Get_level(), and + * _CPU_Context_Initialize(). */ /* * COPYRIGHT (c) 1989-2007. * On-Line Applications Research Corporation (OAR). * - * Copyright (c) 2017 embedded brains GmbH + * Copyright (c) 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif -#include <rtems/score/isr.h> #include <rtems/score/percpu.h> #include <rtems/score/tls.h> #include <rtems/score/thread.h> -#include <rtems/rtems/cache.h> #if SPARC_HAS_FPU == 1 RTEMS_STATIC_ASSERT( @@ -144,21 +166,56 @@ RTEMS_STATIC_ASSERT( CPU_Interrupt_frame_alignment ); -/* - * This initializes the set of opcodes placed in each trap - * table entry. The routine which installs a handler is responsible - * for filling in the fields for the _handler address and the _vector - * trap type. - * - * The constants following this structure are masks for the fields which - * must be filled in when the handler is installed. - */ -const CPU_Trap_table_entry _CPU_Trap_slot_template = { - 0xa1480000, /* mov %psr, %l0 */ - 0x29000000, /* sethi %hi(_handler), %l4 */ - 0x81c52000, /* jmp %l4 + %lo(_handler) */ - 0xa6102000 /* mov _vector, %l3 */ -}; +#define SPARC_ASSERT_REGISTER_WINDOW_OFFSET( member, off ) \ + RTEMS_STATIC_ASSERT( \ + offsetof( SPARC_Register_window, member ) == \ + RTEMS_XCONCAT( SPARC_REGISTER_WINDOW_OFFSET_, off ), \ + SPARC_Register_window ## member \ + ) + +SPARC_ASSERT_REGISTER_WINDOW_OFFSET( local[ 0 ], LOCAL( 0 ) ); +SPARC_ASSERT_REGISTER_WINDOW_OFFSET( local[ 1 ], LOCAL( 1 ) ); +SPARC_ASSERT_REGISTER_WINDOW_OFFSET( input[ 0 ], INPUT( 0 ) ); +SPARC_ASSERT_REGISTER_WINDOW_OFFSET( input[ 1 ], INPUT( 1 ) ); + +RTEMS_STATIC_ASSERT( + sizeof( SPARC_Register_window ) == SPARC_REGISTER_WINDOW_SIZE, + SPARC_REGISTER_WINDOW_SIZE +); + +#define SPARC_ASSERT_EXCEPTION_OFFSET( member, off ) \ + RTEMS_STATIC_ASSERT( \ + offsetof( CPU_Exception_frame, member ) == \ + RTEMS_XCONCAT( SPARC_EXCEPTION_OFFSET_, off ), \ + CPU_Exception_frame_offset_ ## member \ + ) + +SPARC_ASSERT_EXCEPTION_OFFSET( psr, PSR ); +SPARC_ASSERT_EXCEPTION_OFFSET( pc, PC ); +SPARC_ASSERT_EXCEPTION_OFFSET( npc, NPC ); +SPARC_ASSERT_EXCEPTION_OFFSET( trap, TRAP ); +SPARC_ASSERT_EXCEPTION_OFFSET( wim, WIM ); +SPARC_ASSERT_EXCEPTION_OFFSET( y, Y ); +SPARC_ASSERT_EXCEPTION_OFFSET( global[ 0 ], GLOBAL( 0 ) ); +SPARC_ASSERT_EXCEPTION_OFFSET( global[ 1 ], GLOBAL( 1 ) ); +SPARC_ASSERT_EXCEPTION_OFFSET( output[ 0 ], OUTPUT( 0 ) ); +SPARC_ASSERT_EXCEPTION_OFFSET( output[ 1 ], OUTPUT( 1 ) ); + +#if SPARC_HAS_FPU == 1 +SPARC_ASSERT_EXCEPTION_OFFSET( fsr, FSR ); +SPARC_ASSERT_EXCEPTION_OFFSET( fp[ 0 ], FP( 0 ) ); +SPARC_ASSERT_EXCEPTION_OFFSET( fp[ 1 ], FP( 1 ) ); +#endif + +RTEMS_STATIC_ASSERT( + sizeof( CPU_Exception_frame ) == SPARC_EXCEPTION_FRAME_SIZE, + SPARC_EXCEPTION_FRAME_SIZE +); + +RTEMS_STATIC_ASSERT( + sizeof( CPU_Exception_frame ) % CPU_ALIGNMENT == 0, + CPU_Exception_frame_alignment +); /* * _CPU_Initialize @@ -197,160 +254,6 @@ uint32_t _CPU_ISR_Get_level( void ) return level; } -/* - * _CPU_ISR_install_raw_handler - * - * This routine installs the specified handler as a "raw" non-executive - * supported trap handler (a.k.a. interrupt service routine). - * - * Input Parameters: - * vector - trap table entry number plus synchronous - * vs. asynchronous information - * new_handler - address of the handler to be installed - * old_handler - pointer to an address of the handler previously installed - * - * Output Parameters: NONE - * *new_handler - address of the handler previously installed - * - * NOTE: - * - * On the SPARC, there are really only 256 vectors. However, the executive - * has no easy, fast, reliable way to determine which traps are synchronous - * and which are asynchronous. By default, synchronous traps return to the - * instruction which caused the interrupt. So if you install a software - * trap handler as an executive interrupt handler (which is desirable since - * RTEMS takes care of window and register issues), then the executive needs - * to know that the return address is to the trap rather than the instruction - * following the trap. - * - * So vectors 0 through 255 are treated as regular asynchronous traps which - * provide the "correct" return address. Vectors 256 through 512 are assumed - * by the executive to be synchronous and to require that the return address - * be fudged. - * - * If you use this mechanism to install a trap handler which must reexecute - * the instruction which caused the trap, then it should be installed as - * an asynchronous trap. This will avoid the executive changing the return - * address. - */ - -void _CPU_ISR_install_raw_handler( - uint32_t vector, - CPU_ISR_raw_handler new_handler, - CPU_ISR_raw_handler *old_handler -) -{ - uint32_t real_vector; - CPU_Trap_table_entry *tbr; - CPU_Trap_table_entry *slot; - uint32_t u32_tbr; - uint32_t u32_handler; - - /* - * Get the "real" trap number for this vector ignoring the synchronous - * versus asynchronous indicator included with our vector numbers. - */ - - real_vector = SPARC_REAL_TRAP_NUMBER( vector ); - - /* - * Get the current base address of the trap table and calculate a pointer - * to the slot we are interested in. - */ - - sparc_get_tbr( u32_tbr ); - - u32_tbr &= 0xfffff000; - - tbr = (CPU_Trap_table_entry *) u32_tbr; - - slot = &tbr[ real_vector ]; - - /* - * Get the address of the old_handler from the trap table. - * - * NOTE: The old_handler returned will be bogus if it does not follow - * the RTEMS model. - */ - -#define HIGH_BITS_MASK 0xFFFFFC00 -#define HIGH_BITS_SHIFT 10 -#define LOW_BITS_MASK 0x000003FF - - if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) { - u32_handler = - (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) | - (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK); - *old_handler = (CPU_ISR_raw_handler) u32_handler; - } else - *old_handler = 0; - - /* - * Copy the template to the slot and then fix it. - */ - - *slot = _CPU_Trap_slot_template; - - u32_handler = (uint32_t) new_handler; - - slot->mov_vector_l3 |= vector; - slot->sethi_of_handler_to_l4 |= - (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT; - slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK); - - /* - * There is no instruction cache snooping, so we need to invalidate - * the instruction cache to make sure that the processor sees the - * changes to the trap table. This step is required on both single- - * and multiprocessor systems. - * - * In a SMP configuration a change to the trap table might be - * missed by other cores. If the system state is up, the other - * cores can be notified using SMP messages that they need to - * flush their icache. If the up state has not been reached - * there is no need to notify other cores. They will do an - * automatic flush of the icache just after entering the up - * state, but before enabling interrupts. - */ - rtems_cache_invalidate_entire_instruction(); -} - -void _CPU_ISR_install_vector( - uint32_t vector, - CPU_ISR_handler new_handler, - CPU_ISR_handler *old_handler -) -{ - uint32_t real_vector; - CPU_ISR_raw_handler ignored; - - /* - * Get the "real" trap number for this vector ignoring the synchronous - * versus asynchronous indicator included with our vector numbers. - */ - - real_vector = SPARC_REAL_TRAP_NUMBER( vector ); - - /* - * Return the previous ISR handler. - */ - - *old_handler = _ISR_Vector_table[ real_vector ]; - - /* - * Install the wrapper so this ISR can be invoked properly. - */ - - _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); - - /* - * We put the actual user ISR address in '_ISR_vector_table'. This will - * be used by the _ISR_Handler so the user gets control. - */ - - _ISR_Vector_table[ real_vector ] = new_handler; -} - void _CPU_Context_Initialize( Context_Control *the_context, uint32_t *stack_base, @@ -416,7 +319,7 @@ void _CPU_Context_Initialize( the_context->isr_dispatch_disable = 0; if ( tls_area != NULL ) { - void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area ); + void *tcb = _TLS_Initialize_area( tls_area ); the_context->g7 = (uintptr_t) tcb; } diff --git a/cpukit/score/cpu/sparc/cpu_asm.S b/cpukit/score/cpu/sparc/cpu_asm.S index e884fb2f9e..fd7186b499 100644 --- a/cpukit/score/cpu/sparc/cpu_asm.S +++ b/cpukit/score/cpu/sparc/cpu_asm.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* cpu_asm.s * * This file contains the basic algorithms for all assembly code used @@ -7,11 +9,28 @@ * COPYRIGHT (c) 1989-2011. * On-Line Applications Research Corporation (OAR). * - * Copyright (c) 2014, 2017 embedded brains GmbH + * Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. * * Ported to ERC32 implementation of the SPARC by On-Line Applications * Research Corporation (OAR) under contract to the European Space @@ -25,27 +44,6 @@ #include <rtems/score/percpu.h> #include <libcpu/grlib-tn-0018.h> -#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH) - #define FP_FRAME_OFFSET_FO_F1 (SPARC_MINIMUM_STACK_FRAME_SIZE + 0) - #define FP_FRAME_OFFSET_F2_F3 (FP_FRAME_OFFSET_FO_F1 + 8) - #define FP_FRAME_OFFSET_F4_F5 (FP_FRAME_OFFSET_F2_F3 + 8) - #define FP_FRAME_OFFSET_F6_F7 (FP_FRAME_OFFSET_F4_F5 + 8) - #define FP_FRAME_OFFSET_F8_F9 (FP_FRAME_OFFSET_F6_F7 + 8) - #define FP_FRAME_OFFSET_F1O_F11 (FP_FRAME_OFFSET_F8_F9 + 8) - #define FP_FRAME_OFFSET_F12_F13 (FP_FRAME_OFFSET_F1O_F11 + 8) - #define FP_FRAME_OFFSET_F14_F15 (FP_FRAME_OFFSET_F12_F13 + 8) - #define FP_FRAME_OFFSET_F16_F17 (FP_FRAME_OFFSET_F14_F15 + 8) - #define FP_FRAME_OFFSET_F18_F19 (FP_FRAME_OFFSET_F16_F17 + 8) - #define FP_FRAME_OFFSET_F2O_F21 (FP_FRAME_OFFSET_F18_F19 + 8) - #define FP_FRAME_OFFSET_F22_F23 (FP_FRAME_OFFSET_F2O_F21 + 8) - #define FP_FRAME_OFFSET_F24_F25 (FP_FRAME_OFFSET_F22_F23 + 8) - #define FP_FRAME_OFFSET_F26_F27 (FP_FRAME_OFFSET_F24_F25 + 8) - #define FP_FRAME_OFFSET_F28_F29 (FP_FRAME_OFFSET_F26_F27 + 8) - #define FP_FRAME_OFFSET_F3O_F31 (FP_FRAME_OFFSET_F28_F29 + 8) - #define FP_FRAME_OFFSET_FSR (FP_FRAME_OFFSET_F3O_F31 + 8) - #define FP_FRAME_SIZE (FP_FRAME_OFFSET_FSR + 8) -#endif - /* * void _CPU_Context_switch( * Context_Control *run, @@ -57,7 +55,9 @@ .align 4 PUBLIC(_CPU_Context_switch) + PUBLIC(_CPU_Context_switch_no_return) SYM(_CPU_Context_switch): +SYM(_CPU_Context_switch_no_return): st %g5, [%o0 + G5_OFFSET] ! save the global registers /* @@ -209,6 +209,18 @@ done_flushing: ! Try to update the is executing indicator of the heir context mov 1, %g1 +#if defined(__FIX_LEON3FT_B2BST) + /* + * This is a workaround for GRLIB-TN-0011 (Technical Note on LEON3/FT + * AHB Lock Release During Atomic Operation). Affected components are + * the GR712RC, UT699, UT699E, UT700, and LEON3FT-RTAX. Strictly, the + * workaround is only necessary if the MMU is enabled. Using the + * __FIX_LEON3FT_B2BST is not 100% appropriate, but the best thing we + * can use to enable the workaround. An alignment padding is filled + * with nops. + */ +.align 16 +#endif .Ltry_update_is_executing: swap [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1 @@ -234,6 +246,14 @@ done_flushing: mov %g1, %psr ! restore status register and ! **** ENABLE TRAPS **** + /* + * WARNING: This code does not run with the restored stack pointer. In + * SMP configurations, it uses a processor-specific stack. In + * uniprocessor configurations, it uses the stack of the caller. In + * this case, the caller shall ensure that it is not the interrupt + * stack (which is also the system initialization stack). + */ + ld [%o1 + G5_OFFSET], %g5 ! restore the global registers ld [%o1 + G7_OFFSET], %g7 @@ -254,7 +274,9 @@ done_flushing: ldd [%o1 + I4_OFFSET], %i4 ldd [%o1 + I6_FP_OFFSET], %i6 - ldd [%o1 + O6_SP_OFFSET], %o6 ! restore the output registers + ldd [%o1 + O6_SP_OFFSET], %o6 ! restore the non-volatile output + ! registers (stack pointer, + ! link register) jmp %o7 + 8 ! return nop ! delay slot @@ -313,8 +335,25 @@ SYM(_CPU_Context_restore): ba SYM(_CPU_Context_restore_heir) mov %i0, %o1 ! in the delay slot +#if !defined(RTEMS_SMP) + .align 4 + PUBLIC(_SPARC_Start_multitasking) +SYM(_SPARC_Start_multitasking): + /* + * Restore the stack pointer right now, so that the window flushing and + * interrupts during _CPU_Context_restore_heir() use the stack of the + * heir thread. This is crucial for the interrupt handling to prevent + * a concurrent use of the interrupt stack (which is also the system + * initialization stack). + */ + ld [%o0 + O6_SP_OFFSET], %o6 + + ba SYM(_CPU_Context_restore) + nop +#endif + /* - * void _ISR_Handler() + * void _SPARC_Interrupt_trap() * * This routine provides the RTEMS interrupt management. * @@ -324,28 +363,14 @@ SYM(_CPU_Context_restore): * l0 = PSR * l1 = PC * l2 = nPC - * l3 = trap type + * l3 = interrupt vector number (this is not the trap type) * - * NOTE: By an executive defined convention, trap type is between 0 and 255 if - * it is an asynchonous trap and 256 and 511 if it is synchronous. + * NOTE: This trap handler is intended to service external interrupts. */ .align 4 - PUBLIC(_ISR_Handler) -SYM(_ISR_Handler): - /* - * Fix the return address for synchronous traps. - */ - - andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 - ! Is this a synchronous trap? - be,a win_ovflow ! No, then skip the adjustment - nop ! DELAY - mov %l1, %l6 ! save trapped pc for debug info - mov %l2, %l1 ! do not return to the instruction - add %l2, 4, %l2 ! indicated - -win_ovflow: + PUBLIC(_SPARC_Interrupt_trap) +SYM(_SPARC_Interrupt_trap): /* * Save the globals this block uses. * @@ -432,7 +457,7 @@ dont_do_the_window: * includes a regular minimum stack frame which will be used if * needed by register window overflow and underflow handlers. * - * REGISTERS SAME AS AT _ISR_Handler + * REGISTERS SAME AS AT _SPARC_Interrupt_trap() */ sub %fp, CPU_INTERRUPT_FRAME_SIZE, %sp @@ -458,9 +483,6 @@ dont_do_the_window: rd %y, %g1 st %g1, [%sp + ISF_Y_OFFSET] ! save y - st %l6, [%sp + ISF_TPC_OFFSET] ! save real trapped pc - - mov %sp, %o1 ! 2nd arg to ISR Handler /* * Increment ISR nest level and Thread dispatch disable level. @@ -501,9 +523,7 @@ dont_do_the_window: bnz dont_switch_stacks ! No, then do not switch stacks #if defined(RTEMS_PROFILING) - sethi %hi(_SPARC_Counter), %o5 - ld [%o5 + %lo(_SPARC_Counter)], %l4 - call %l4 + call SYM(_SPARC_Counter_read_ISR_disabled) nop mov %o0, %o5 #else @@ -539,26 +559,16 @@ dont_switch_stacks: sub %sp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp /* - * Check if we have an external interrupt (trap 0x11 - 0x1f). If so, - * set the PIL in the %psr to mask off interrupts with lower priority. + * Set the PIL in the %psr to mask off interrupts with lower priority. * The original %psr in %l0 is not modified since it will be restored * when the interrupt handler returns. */ mov %l0, %g5 - and %l3, 0x0ff, %g4 - subcc %g4, 0x11, %g0 - bl dont_fix_pil - subcc %g4, 0x1f, %g0 - bg dont_fix_pil - sll %g4, 8, %g4 + sll %l3, 8, %g4 and %g4, SPARC_PSR_PIL_MASK, %g4 andn %l0, SPARC_PSR_PIL_MASK, %g5 - ba pil_fixed or %g4, %g5, %g5 -dont_fix_pil: - or %g5, SPARC_PSR_PIL_MASK, %g5 -pil_fixed: #if SPARC_HAS_FPU == 1 /* @@ -571,23 +581,10 @@ pil_fixed: wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** /* - * Vector to user's handler. - * - * NOTE: TBR may no longer have vector number in it since - * we just enabled traps. It is definitely in l3. + * Call _SPARC_Interrupt_dispatch( %l3 ) */ - - sethi %hi(SYM(_ISR_Vector_table)), %g4 - or %g4, %lo(SYM(_ISR_Vector_table)), %g4 - and %l3, 0xFF, %g5 ! remove synchronous trap indicator - sll %g5, 2, %g5 ! g5 = offset into table - ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] - - - ! o1 = 2nd arg = address of the ISF - ! WAS LOADED WHEN ISF WAS SAVED!!! mov %l3, %o0 ! o0 = 1st arg = vector number - call %g4 + call SYM(_SPARC_Interrupt_dispatch) #if defined(RTEMS_PROFILING) mov %o5, %l3 ! save interrupt entry instant #else @@ -605,7 +602,7 @@ pil_fixed: cmp %l7, 0 bne profiling_not_outer_most_exit nop - call %l4 ! Call _SPARC_Counter.counter_read + call SYM(_SPARC_Counter_read_ISR_disabled) mov %g1, %l4 ! Save previous interrupt status mov %o0, %o2 ! o2 = 3rd arg = interrupt exit instant mov %l3, %o1 ! o1 = 2nd arg = interrupt entry instant @@ -705,40 +702,40 @@ isr_dispatch: * Post-switch actions (e.g. signal handlers) and context switch * extensions may safely use the floating point unit. */ - sub %sp, FP_FRAME_SIZE, %sp - std %f0, [%sp + FP_FRAME_OFFSET_FO_F1] + sub %sp, SPARC_FP_FRAME_SIZE, %sp + std %f0, [%sp + SPARC_FP_FRAME_OFFSET_FO_F1] SPARC_LEON3FT_B2BST_NOP - std %f2, [%sp + FP_FRAME_OFFSET_F2_F3] + std %f2, [%sp + SPARC_FP_FRAME_OFFSET_F2_F3] SPARC_LEON3FT_B2BST_NOP - std %f4, [%sp + FP_FRAME_OFFSET_F4_F5] + std %f4, [%sp + SPARC_FP_FRAME_OFFSET_F4_F5] SPARC_LEON3FT_B2BST_NOP - std %f6, [%sp + FP_FRAME_OFFSET_F6_F7] + std %f6, [%sp + SPARC_FP_FRAME_OFFSET_F6_F7] SPARC_LEON3FT_B2BST_NOP - std %f8, [%sp + FP_FRAME_OFFSET_F8_F9] + std %f8, [%sp + SPARC_FP_FRAME_OFFSET_F8_F9] SPARC_LEON3FT_B2BST_NOP - std %f10, [%sp + FP_FRAME_OFFSET_F1O_F11] + std %f10, [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11] SPARC_LEON3FT_B2BST_NOP - std %f12, [%sp + FP_FRAME_OFFSET_F12_F13] + std %f12, [%sp + SPARC_FP_FRAME_OFFSET_F12_F13] SPARC_LEON3FT_B2BST_NOP - std %f14, [%sp + FP_FRAME_OFFSET_F14_F15] + std %f14, [%sp + SPARC_FP_FRAME_OFFSET_F14_F15] SPARC_LEON3FT_B2BST_NOP - std %f16, [%sp + FP_FRAME_OFFSET_F16_F17] + std %f16, [%sp + SPARC_FP_FRAME_OFFSET_F16_F17] SPARC_LEON3FT_B2BST_NOP - std %f18, [%sp + FP_FRAME_OFFSET_F18_F19] + std %f18, [%sp + SPARC_FP_FRAME_OFFSET_F18_F19] SPARC_LEON3FT_B2BST_NOP - std %f20, [%sp + FP_FRAME_OFFSET_F2O_F21] + std %f20, [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21] SPARC_LEON3FT_B2BST_NOP - std %f22, [%sp + FP_FRAME_OFFSET_F22_F23] + std %f22, [%sp + SPARC_FP_FRAME_OFFSET_F22_F23] SPARC_LEON3FT_B2BST_NOP - std %f24, [%sp + FP_FRAME_OFFSET_F24_F25] + std %f24, [%sp + SPARC_FP_FRAME_OFFSET_F24_F25] SPARC_LEON3FT_B2BST_NOP - std %f26, [%sp + FP_FRAME_OFFSET_F26_F27] + std %f26, [%sp + SPARC_FP_FRAME_OFFSET_F26_F27] SPARC_LEON3FT_B2BST_NOP - std %f28, [%sp + FP_FRAME_OFFSET_F28_F29] + std %f28, [%sp + SPARC_FP_FRAME_OFFSET_F28_F29] SPARC_LEON3FT_B2BST_NOP - std %f30, [%sp + FP_FRAME_OFFSET_F3O_F31] + std %f30, [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31] SPARC_LEON3FT_B2BST_NOP - st %fsr, [%sp + FP_FRAME_OFFSET_FSR] + st %fsr, [%sp + SPARC_FP_FRAME_OFFSET_FSR] call SYM(_Thread_Do_dispatch) mov %g6, %o0 @@ -746,25 +743,25 @@ isr_dispatch: * Restore the floating point context from stack frame and release the * stack frame. */ - ldd [%sp + FP_FRAME_OFFSET_FO_F1], %f0 - ldd [%sp + FP_FRAME_OFFSET_F2_F3], %f2 - ldd [%sp + FP_FRAME_OFFSET_F4_F5], %f4 - ldd [%sp + FP_FRAME_OFFSET_F6_F7], %f6 - ldd [%sp + FP_FRAME_OFFSET_F8_F9], %f8 - ldd [%sp + FP_FRAME_OFFSET_F1O_F11], %f10 - ldd [%sp + FP_FRAME_OFFSET_F12_F13], %f12 - ldd [%sp + FP_FRAME_OFFSET_F14_F15], %f14 - ldd [%sp + FP_FRAME_OFFSET_F16_F17], %f16 - ldd [%sp + FP_FRAME_OFFSET_F18_F19], %f18 - ldd [%sp + FP_FRAME_OFFSET_F2O_F21], %f20 - ldd [%sp + FP_FRAME_OFFSET_F22_F23], %f22 - ldd [%sp + FP_FRAME_OFFSET_F24_F25], %f24 - ldd [%sp + FP_FRAME_OFFSET_F26_F27], %f26 - ldd [%sp + FP_FRAME_OFFSET_F28_F29], %f28 - ldd [%sp + FP_FRAME_OFFSET_F3O_F31], %f30 - ld [%sp + FP_FRAME_OFFSET_FSR], %fsr + ldd [%sp + SPARC_FP_FRAME_OFFSET_FO_F1], %f0 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F2_F3], %f2 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F4_F5], %f4 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F6_F7], %f6 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F8_F9], %f8 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11], %f10 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F12_F13], %f12 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F14_F15], %f14 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F16_F17], %f16 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F18_F19], %f18 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21], %f20 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F22_F23], %f22 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F24_F25], %f24 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F26_F27], %f26 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F28_F29], %f28 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31], %f30 + ld [%sp + SPARC_FP_FRAME_OFFSET_FSR], %fsr ba .Lthread_dispatch_done - add %sp, FP_FRAME_SIZE, %sp + add %sp, SPARC_FP_FRAME_SIZE, %sp .Lnon_fp_thread_dispatch: #endif @@ -799,8 +796,8 @@ isr_dispatch: * The CWP in place at this point may be different from * that which was in effect at the beginning of the ISR if we * have been context switched between the beginning of this invocation - * of _ISR_Handler and this point. Thus the CWP and WIM should - * not be changed back to their values at ISR entry time. Any + * of _SPARC_Interrupt_trap() and this point. Thus the CWP and WIM + * should not be changed back to their values at ISR entry time. Any * changes to the PSR must preserve the CWP. */ diff --git a/cpukit/score/cpu/sparc/headers.am b/cpukit/score/cpu/sparc/headers.am deleted file mode 100644 index 25eaeaa74a..0000000000 --- a/cpukit/score/cpu/sparc/headers.am +++ /dev/null @@ -1,11 +0,0 @@ -## This file was generated by "./boostrap -H". -include_libcpu_HEADERS += score/cpu/sparc/include/libcpu/access.h -include_libcpu_HEADERS += score/cpu/sparc/include/libcpu/byteorder.h -include_libcpu_HEADERS += score/cpu/sparc/include/libcpu/grlib-tn-0018.h -include_machine_HEADERS += score/cpu/sparc/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/sparc/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/sparc/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/sparc/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/sparc/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/sparc/include/rtems/score/sparc.h -include_rtems_score_HEADERS += score/cpu/sparc/include/rtems/score/sparcimpl.h diff --git a/cpukit/score/cpu/sparc/include/libcpu/access.h b/cpukit/score/cpu/sparc/include/libcpu/access.h index 4f2a780895..f414d6f22f 100644 --- a/cpukit/score/cpu/sparc/include/libcpu/access.h +++ b/cpukit/score/cpu/sparc/include/libcpu/access.h @@ -1,12 +1,31 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * access.h - access routines for SPARC. SPARC is big endian only. * * COPYRIGHT (c) 2011 * Aeroflex Gaisler. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LIBCPU_ACCESS_H diff --git a/cpukit/score/cpu/sparc/include/libcpu/byteorder.h b/cpukit/score/cpu/sparc/include/libcpu/byteorder.h index 6726a785f8..0c469ba36b 100644 --- a/cpukit/score/cpu/sparc/include/libcpu/byteorder.h +++ b/cpukit/score/cpu/sparc/include/libcpu/byteorder.h @@ -1,12 +1,31 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * byteorder.h - Endian conversion for SPARC. SPARC is big endian only. * * COPYRIGHT (c) 2011 * Aeroflex Gaisler. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LIBCPU_BYTEORDER_H @@ -18,42 +37,42 @@ extern "C" { #endif -RTEMS_INLINE_ROUTINE uint16_t ld_le16(volatile uint16_t *addr) +static inline uint16_t ld_le16(volatile uint16_t *addr) { return CPU_swap_u16(*addr); } -RTEMS_INLINE_ROUTINE void st_le16(volatile uint16_t *addr, uint16_t val) +static inline void st_le16(volatile uint16_t *addr, uint16_t val) { *addr = CPU_swap_u16(val); } -RTEMS_INLINE_ROUTINE uint32_t ld_le32(volatile uint32_t *addr) +static inline uint32_t ld_le32(volatile uint32_t *addr) { return CPU_swap_u32(*addr); } -RTEMS_INLINE_ROUTINE void st_le32(volatile uint32_t *addr, uint32_t val) +static inline void st_le32(volatile uint32_t *addr, uint32_t val) { *addr = CPU_swap_u32(val); } -RTEMS_INLINE_ROUTINE uint16_t ld_be16(volatile uint16_t *addr) +static inline uint16_t ld_be16(volatile uint16_t *addr) { return *addr; } -RTEMS_INLINE_ROUTINE void st_be16(volatile uint16_t *addr, uint16_t val) +static inline void st_be16(volatile uint16_t *addr, uint16_t val) { *addr = val; } -RTEMS_INLINE_ROUTINE uint32_t ld_be32(volatile uint32_t *addr) +static inline uint32_t ld_be32(volatile uint32_t *addr) { return *addr; } -RTEMS_INLINE_ROUTINE void st_be32(volatile uint32_t *addr, uint32_t val) +static inline void st_be32(volatile uint32_t *addr, uint32_t val) { *addr = val; } diff --git a/cpukit/score/cpu/sparc/include/libcpu/grlib-tn-0018.h b/cpukit/score/cpu/sparc/include/libcpu/grlib-tn-0018.h index 62f33da6e8..10f34c6123 100644 --- a/cpukit/score/cpu/sparc/include/libcpu/grlib-tn-0018.h +++ b/cpukit/score/cpu/sparc/include/libcpu/grlib-tn-0018.h @@ -1,7 +1,16 @@ /* SPDX-License-Identifier: BSD-2-Clause */ +/** + * @file + * + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This header file provides interfaces of the GRLIB-TN-0018 LEON3FT + * RETT Restart Errata fixes. + */ + /* - * Copyright (C) 2020 Cobham Gailer AB + * Copyright (C) 2020 Cobham Gaisler AB * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/cpukit/score/cpu/sparc/include/rtems/asm.h b/cpukit/score/cpu/sparc/include/rtems/asm.h index de65f35d2c..ca89f8e417 100644 --- a/cpukit/score/cpu/sparc/include/rtems/asm.h +++ b/cpukit/score/cpu/sparc/include/rtems/asm.h @@ -1,8 +1,10 @@ /** * @file * - * @brief Address the Problems Caused by Incompatible Flavor of - * Assemblers and Toolsets + * @ingroup RTEMSScoreCPUSPARCASM + * + * @brief This header file provides interfaces to address problems caused by + * incompatible flavor of assemblers and toolsets. * * This include file attempts to address the problems * caused by incompatible flavors of assemblers and diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h index 8c5330b8ce..a21cef371f 100644 --- a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h @@ -1,19 +1,38 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief SPARC CPU Department Source + * @ingroup RTEMSScoreCPUSPARC * - * This include file contains information pertaining to the port of - * the executive to the SPARC processor. + * @brief This header file defines interfaces pertaining to the port of the + * executive to the SPARC processor. */ /* * COPYRIGHT (c) 1989-2011. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -743,14 +762,13 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template; #ifndef ASM -/* - * ISR handler macros - */ - /** - * Support routine to initialize the RTEMS vector table after it is allocated. + * @brief Dispatches the installed interrupt handlers. + * + * @param irq is the interrupt vector number of the external interrupt ranging + * from 0 to 15. This is not a trap number. */ -#define _CPU_Initialize_vectors() +void _SPARC_Interrupt_dispatch( uint32_t irq ); /** * Disable all interrupts for a critical section. The previous @@ -779,7 +797,7 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template; #define _CPU_ISR_Is_enabled( _isr_cookie ) \ sparc_interrupt_is_enabled( _isr_cookie ) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & SPARC_PSR_PIL_MASK ) == 0; } @@ -884,17 +902,6 @@ void _CPU_Context_Initialize( do { } while ( 0 ) /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/** - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - */ -RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, uint32_t error ); - -/* end of Fatal Error manager macros */ - #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE /* Bitfield handler macros */ @@ -971,6 +978,11 @@ void _CPU_Context_switch( Context_Control *heir ); +RTEMS_NO_RETURN void _CPU_Context_switch_no_return( + Context_Control *executing, + Context_Control *heir +); + /** * @brief SPARC specific context restore. * @@ -981,6 +993,25 @@ void _CPU_Context_switch( */ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); +#if !defined(RTEMS_SMP) +/** + * @brief Starts multitasking in uniprocessor configurations. + * + * This function just sets the stack of the heir thread and then calls + * _CPU_Context_restore(). + * + * This is causes that the window flushing and interrupts during + * _CPU_Context_restore() use the stack of the heir thread. This is crucial + * for the interrupt handling to prevent a concurrent use of the interrupt + * stack (which is also the system initialization stack). + * + * @param[in] heir is the context of the heir thread. + */ +RTEMS_NO_RETURN void _SPARC_Start_multitasking( Context_Control *heir ); + +#define _CPU_Start_multitasking( _heir ) _SPARC_Start_multitasking( _heir ) +#endif + #if defined(RTEMS_SMP) uint32_t _CPU_SMP_Initialize( void ); @@ -1000,16 +1031,6 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); #endif void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); - - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - - static inline void _CPU_SMP_Processor_event_receive( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } #endif #if defined(SPARC_USE_LAZY_FP_SWITCH) @@ -1023,9 +1044,60 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context ); } while ( 0 ) #endif +/** + * @brief This structure contains the local and input registers of a register + * window. + */ typedef struct { + /** @brief This member contains the local 0..7 register values. */ + uint32_t local[ 8 ]; + + /** @brief This member contains the input 0..7 register values. */ + uint32_t input[ 8 ]; +} SPARC_Register_window; + +/** + * @brief This structure contains the register set of a context which caused an + * unexpected trap. + */ +typedef struct { + /** @brief This member contains the PSR register value. */ + uint32_t psr; + + /** @brief This member contains the PC value. */ + uint32_t pc; + + /** @brief This member contains the nPC value. */ + uint32_t npc; + + /** @brief This member contains the trap number. */ uint32_t trap; - CPU_Interrupt_frame *isf; + + /** @brief This member contains the WIM register value. */ + uint32_t wim; + + /** @brief This member contains the Y register value. */ + uint32_t y; + + /** @brief This member contains the global 0..7 register values. */ + uint32_t global[ 8 ]; + + /** @brief This member contains the output 0..7 register values. */ + uint32_t output[ 8 ] ; + + /** + * @brief This member contains the additional register windows according to + * the saved WIM. + */ + SPARC_Register_window windows[ SPARC_NUMBER_OF_REGISTER_WINDOWS - 1 ]; + +#if SPARC_HAS_FPU == 1 + /** This member contain the FSR register value. */ + uint32_t fsr; + + /** @brief This member contains the floating point 0..31 register values. */ + uint64_t fp[ 16 ]; +#endif } CPU_Exception_frame; void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ); @@ -1079,39 +1151,7 @@ typedef uint32_t CPU_Counter_ticks; uint32_t _CPU_Counter_frequency( void ); -typedef CPU_Counter_ticks ( *SPARC_Counter_read )( void ); - -/* - * The SPARC processors supported by RTEMS have no built-in CPU counter - * support. We have to use some hardware counter module for this purpose, for - * example the GPTIMER instance used by the clock driver. The BSP must provide - * an implementation of the CPU counter read function. This allows the use of - * dynamic hardware enumeration. - */ -typedef struct { - SPARC_Counter_read read_isr_disabled; - SPARC_Counter_read read; - volatile const CPU_Counter_ticks *counter_register; - volatile const uint32_t *pending_register; - uint32_t pending_mask; - CPU_Counter_ticks accumulated; - CPU_Counter_ticks interval; -} SPARC_Counter; - -extern const SPARC_Counter _SPARC_Counter; - -static inline CPU_Counter_ticks _CPU_Counter_read( void ) -{ - return ( *_SPARC_Counter.read )(); -} - -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} +CPU_Counter_ticks _CPU_Counter_read( void ); /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/sparc/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/sparc/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h index a99da74fa9..9697209a97 100644 --- a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h @@ -1,16 +1,38 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief CPU Port Implementation API + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This header file defines implementation interfaces pertaining to the + * port of the executive to the SPARC processor. */ /* * Copyright (c) 1989, 2007 On-Line Applications Research Corporation (OAR) - * Copyright (c) 2013, 2016 embedded brains GmbH + * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -101,6 +123,8 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 #endif +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 20 + #if ( SPARC_HAS_FPU == 1 ) /** * @brief Offset of the CPU_Per_CPU_control::fsr field relative to the @@ -117,6 +141,49 @@ #endif #endif +#define SPARC_REGISTER_WINDOW_OFFSET_LOCAL( i ) ( ( i ) * 4 ) +#define SPARC_REGISTER_WINDOW_OFFSET_INPUT( i ) ( ( i ) * 4 + 32 ) +#define SPARC_REGISTER_WINDOW_SIZE 64 + +#define SPARC_EXCEPTION_OFFSET_PSR 0 +#define SPARC_EXCEPTION_OFFSET_PC 4 +#define SPARC_EXCEPTION_OFFSET_NPC 8 +#define SPARC_EXCEPTION_OFFSET_TRAP 12 +#define SPARC_EXCEPTION_OFFSET_WIM 16 +#define SPARC_EXCEPTION_OFFSET_Y 20 +#define SPARC_EXCEPTION_OFFSET_GLOBAL( i ) ( ( i ) * 4 + 24 ) +#define SPARC_EXCEPTION_OFFSET_OUTPUT( i ) ( ( i ) * 4 + 56 ) +#define SPARC_EXCEPTION_OFFSET_WINDOWS( i ) ( ( i ) * 64 + 88 ) + +#if SPARC_HAS_FPU == 1 +#define SPARC_EXCEPTION_OFFSET_FSR 536 +#define SPARC_EXCEPTION_OFFSET_FP( i ) ( ( i ) * 8 + 544 ) +#define SPARC_EXCEPTION_FRAME_SIZE 672 +#else +#define SPARC_EXCEPTION_FRAME_SIZE 536 +#endif + +#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH) +#define SPARC_FP_FRAME_OFFSET_FO_F1 (SPARC_MINIMUM_STACK_FRAME_SIZE + 0) +#define SPARC_FP_FRAME_OFFSET_F2_F3 (SPARC_FP_FRAME_OFFSET_FO_F1 + 8) +#define SPARC_FP_FRAME_OFFSET_F4_F5 (SPARC_FP_FRAME_OFFSET_F2_F3 + 8) +#define SPARC_FP_FRAME_OFFSET_F6_F7 (SPARC_FP_FRAME_OFFSET_F4_F5 + 8) +#define SPARC_FP_FRAME_OFFSET_F8_F9 (SPARC_FP_FRAME_OFFSET_F6_F7 + 8) +#define SPARC_FP_FRAME_OFFSET_F1O_F11 (SPARC_FP_FRAME_OFFSET_F8_F9 + 8) +#define SPARC_FP_FRAME_OFFSET_F12_F13 (SPARC_FP_FRAME_OFFSET_F1O_F11 + 8) +#define SPARC_FP_FRAME_OFFSET_F14_F15 (SPARC_FP_FRAME_OFFSET_F12_F13 + 8) +#define SPARC_FP_FRAME_OFFSET_F16_F17 (SPARC_FP_FRAME_OFFSET_F14_F15 + 8) +#define SPARC_FP_FRAME_OFFSET_F18_F19 (SPARC_FP_FRAME_OFFSET_F16_F17 + 8) +#define SPARC_FP_FRAME_OFFSET_F2O_F21 (SPARC_FP_FRAME_OFFSET_F18_F19 + 8) +#define SPARC_FP_FRAME_OFFSET_F22_F23 (SPARC_FP_FRAME_OFFSET_F2O_F21 + 8) +#define SPARC_FP_FRAME_OFFSET_F24_F25 (SPARC_FP_FRAME_OFFSET_F22_F23 + 8) +#define SPARC_FP_FRAME_OFFSET_F26_F27 (SPARC_FP_FRAME_OFFSET_F24_F25 + 8) +#define SPARC_FP_FRAME_OFFSET_F28_F29 (SPARC_FP_FRAME_OFFSET_F26_F27 + 8) +#define SPARC_FP_FRAME_OFFSET_F3O_F31 (SPARC_FP_FRAME_OFFSET_F28_F29 + 8) +#define SPARC_FP_FRAME_OFFSET_FSR (SPARC_FP_FRAME_OFFSET_F3O_F31 + 8) +#define SPARC_FP_FRAME_SIZE (SPARC_FP_FRAME_OFFSET_FSR + 8) +#endif + #ifndef ASM #ifdef __cplusplus @@ -156,20 +223,48 @@ register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__( "g6" ); #define _CPU_Get_thread_executing() ( _SPARC_Per_CPU_current->executing ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + void _CPU_Context_volatile_clobber( uintptr_t pattern ); void _CPU_Context_validate( uintptr_t pattern ); -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( "unimp 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + register uint32_t g7 __asm__( "g7" ); + + g7 = context->g7; + + /* Make sure that the register assignment is not optimized away */ + __asm__ volatile ( "" : : "r" ( g7 ) ); +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + return (void *) context->g7; +} + +#if defined(RTEMS_PROFILING) +/** + * @brief Reads the CPU counter while interrupts are disabled. + */ +CPU_Counter_ticks _SPARC_Counter_read_ISR_disabled( void ); +#endif + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/sparc/include/rtems/score/sparc.h b/cpukit/score/cpu/sparc/include/rtems/score/sparc.h index 166e89d58a..9b1a09d5e6 100644 --- a/cpukit/score/cpu/sparc/include/rtems/score/sparc.h +++ b/cpukit/score/cpu/sparc/include/rtems/score/sparc.h @@ -1,8 +1,12 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * - * @brief Information Required to Build RTEMS for a Particular Member - * of the SPARC Family + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This header file provides information required to build RTEMS for a + * particular member of the SPARC family. * * This file contains the information required to build * RTEMS for a particular member of the SPARC family. It does @@ -15,9 +19,26 @@ * COPYRIGHT (c) 1989-2011. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_SPARC_H diff --git a/cpukit/score/cpu/sparc/include/rtems/score/sparcimpl.h b/cpukit/score/cpu/sparc/include/rtems/score/sparcimpl.h deleted file mode 100644 index edc03bd074..0000000000 --- a/cpukit/score/cpu/sparc/include/rtems/score/sparcimpl.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2016, 2018 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_SPARCIMPL_H -#define _RTEMS_SCORE_SPARCIMPL_H - -#include <rtems/score/cpu.h> - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -struct timecounter; - -/* - * Provides a mutable alias to _SPARC_Counter for use in - * _SPARC_Counter_initialize(). The _SPARC_Counter and _SPARC_Counter_mutable - * are defined via the SPARC_COUNTER_DEFINITION define. - */ -extern SPARC_Counter _SPARC_Counter_mutable; - -void _SPARC_Counter_at_tick_clock( void ); - -CPU_Counter_ticks _SPARC_Counter_read_default( void ); - -CPU_Counter_ticks _SPARC_Counter_read_up( void ); - -CPU_Counter_ticks _SPARC_Counter_read_down( void ); - -CPU_Counter_ticks _SPARC_Counter_read_clock_isr_disabled( void ); - -CPU_Counter_ticks _SPARC_Counter_read_clock( void ); - -CPU_Counter_ticks _SPARC_Counter_read_asr23( void ); - -uint32_t _SPARC_Get_timecount_up( struct timecounter * ); - -uint32_t _SPARC_Get_timecount_down( struct timecounter * ); - -uint32_t _SPARC_Get_timecount_clock( struct timecounter * ); - -uint32_t _SPARC_Get_timecount_asr23( struct timecounter * ); - -/* - * Defines the _SPARC_Counter and _SPARC_Counter_mutable global variables. - * Place this define in the global file scope of the CPU counter support file - * of the BSP. - */ -#define SPARC_COUNTER_DEFINITION \ - __asm__ ( \ - "\t.global\t_SPARC_Counter\n" \ - "\t.global\t_SPARC_Counter_mutable\n" \ - "\t.section\t.data._SPARC_Counter,\"aw\",@progbits\n" \ - "\t.align\t4\n" \ - "\t.type\t_SPARC_Counter, #object\n" \ - "\t.size\t_SPARC_Counter, 28\n" \ - "_SPARC_Counter:\n" \ - "_SPARC_Counter_mutable:\n" \ - "\t.long\t_SPARC_Counter_read_default\n" \ - "\t.long\t_SPARC_Counter_read_default\n" \ - "\t.long\t0\n" \ - "\t.long\t0\n" \ - "\t.long\t0\n" \ - "\t.long\t0\n" \ - "\t.long\t0\n" \ - "\t.previous\n" \ - ) - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* _RTEMS_SCORE_SPARCIMPL_H */ diff --git a/cpukit/score/cpu/sparc/sparc-access.S b/cpukit/score/cpu/sparc/sparc-access.S index 277fb7e652..e6628d9eb7 100644 --- a/cpukit/score/cpu/sparc/sparc-access.S +++ b/cpukit/score/cpu/sparc/sparc-access.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * Optimized access routines for SPARC. * @@ -16,9 +18,26 @@ * COPYRIGHT (c) 2011 * Aeroflex Gaisler. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/asm.h> diff --git a/cpukit/score/cpu/sparc/sparc-bad-trap.S b/cpukit/score/cpu/sparc/sparc-bad-trap.S new file mode 100644 index 0000000000..f769b64125 --- /dev/null +++ b/cpukit/score/cpu/sparc/sparc-bad-trap.S @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This source file contains the implementation of _SPARC_Bad_trap(). + */ + +/* + * Copyright (C) 2021 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/asm.h> +#include <rtems/score/percpu.h> + + /* + * The trap handler entry was set up by TRAP(). + */ + PUBLIC(_SPARC_Bad_trap) +SYM(_SPARC_Bad_trap): + + /* + * Do not use the existing stack since it may be invalid. Use the ISR + * stack for this processor. If the trap was caused from within + * interrupt context, then a return to the context which caused the + * trap would be unreliable. + */ + set SYM(_ISR_Stack_size), %l5 + +#if defined(RTEMS_SMP) && defined(__leon__) + rd %asr17, %l6 + srl %l6, LEON3_ASR17_PROCESSOR_INDEX_SHIFT, %l6 + add %l6, 1, %l4 + smul %l4, %l5, %l5 +#endif + set SYM(_ISR_Stack_area_begin), %l7 + add %l7, %l5, %l7 + andn %l7, CPU_STACK_ALIGNMENT - 1, %l7 + + /* + * Establish an area on the stack for a CPU_Exception_frame. + */ + sub %l7, SPARC_EXCEPTION_FRAME_SIZE, %l7 + + /* + * Start saving the context which caused the trap. + */ + mov %wim, %l4 + rd %y, %l5 + std %l0, [%l7 + SPARC_EXCEPTION_OFFSET_PSR] + SPARC_LEON3FT_B2BST_NOP + std %l2, [%l7 + SPARC_EXCEPTION_OFFSET_NPC] + SPARC_LEON3FT_B2BST_NOP + st %l4, [%l7 + SPARC_EXCEPTION_OFFSET_WIM] + st %l5, [%l7 + SPARC_EXCEPTION_OFFSET_Y] + std %g0, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(0)] + SPARC_LEON3FT_B2BST_NOP + std %g2, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(2)] + SPARC_LEON3FT_B2BST_NOP + std %g4, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(4)] + SPARC_LEON3FT_B2BST_NOP + std %g6, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(6)] + SPARC_LEON3FT_B2BST_NOP + std %i0, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(0)] + SPARC_LEON3FT_B2BST_NOP + std %i2, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(2)] + SPARC_LEON3FT_B2BST_NOP + std %i4, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(4)] + SPARC_LEON3FT_B2BST_NOP + std %i6, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(6)] + + /* + * Initialize %g6 since it may be corrupt. + */ + set SYM(_Per_CPU_Information), %g6 +#if defined(RTEMS_SMP) && defined(__leon__) + sll %l6, PER_CPU_CONTROL_SIZE_LOG2, %l4 + add %g6, %l4, %g6 +#endif + + /* + * Disable WIM traps. + */ + mov %g0, %wim + nop + nop + nop + + /* + * Save the remaining register windows. + */ + set SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g2 + add %l7, SPARC_EXCEPTION_OFFSET_WINDOWS(0), %g3 + +.Lsave_register_windows: + + restore + std %l0, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(0)] + SPARC_LEON3FT_B2BST_NOP + std %l2, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(2)] + SPARC_LEON3FT_B2BST_NOP + std %l4, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(4)] + SPARC_LEON3FT_B2BST_NOP + std %l6, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(6)] + SPARC_LEON3FT_B2BST_NOP + std %i0, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(0)] + SPARC_LEON3FT_B2BST_NOP + std %i2, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(2)] + SPARC_LEON3FT_B2BST_NOP + std %i4, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(4)] + SPARC_LEON3FT_B2BST_NOP + std %i6, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(6)] + add %g3, SPARC_REGISTER_WINDOW_SIZE, %g3 + subcc %g2, 1, %g2 + bne .Lsave_register_windows + nop + + /* + * Go back to register window at trap entry. + */ + restore + + /* + * Initialize the WIM based on the PSR[CWP] to have all register + * windows available for the fatal error procedure. + */ + and %l0, SPARC_PSR_CWP_MASK, %l4 + set 1, %l5 + sll %l5, %l4, %l5 + mov %l5, %wim + +#if SPARC_HAS_FPU == 1 + /* + * Enable the FPU in the new PSR (PSR[EF] == 1). + */ + sethi %hi(SPARC_PSR_EF_MASK), %l4 + or %l0, %l4, %l0 +#endif + + /* + * Enable traps and disable interrupts. + */ + or %l0, 0xf20, %l0 + wr %l0, %psr + nop + nop + nop + +#if SPARC_HAS_FPU == 1 + st %fsr, [%l7 + SPARC_EXCEPTION_OFFSET_FSR] + std %f0, [%l7 + SPARC_EXCEPTION_OFFSET_FP(0)] + SPARC_LEON3FT_B2BST_NOP + std %f2, [%l7 + SPARC_EXCEPTION_OFFSET_FP(1)] + SPARC_LEON3FT_B2BST_NOP + std %f4, [%l7 + SPARC_EXCEPTION_OFFSET_FP(2)] + SPARC_LEON3FT_B2BST_NOP + std %f6, [%l7 + SPARC_EXCEPTION_OFFSET_FP(3)] + SPARC_LEON3FT_B2BST_NOP + std %f8, [%l7 + SPARC_EXCEPTION_OFFSET_FP(4)] + SPARC_LEON3FT_B2BST_NOP + std %f10, [%l7 + SPARC_EXCEPTION_OFFSET_FP(5)] + SPARC_LEON3FT_B2BST_NOP + std %f12, [%l7 + SPARC_EXCEPTION_OFFSET_FP(6)] + SPARC_LEON3FT_B2BST_NOP + std %f14, [%l7 + SPARC_EXCEPTION_OFFSET_FP(7)] + SPARC_LEON3FT_B2BST_NOP + std %f16, [%l7 + SPARC_EXCEPTION_OFFSET_FP(8)] + SPARC_LEON3FT_B2BST_NOP + std %f18, [%l7 + SPARC_EXCEPTION_OFFSET_FP(9)] + SPARC_LEON3FT_B2BST_NOP + std %f20, [%l7 + SPARC_EXCEPTION_OFFSET_FP(10)] + SPARC_LEON3FT_B2BST_NOP + std %f22, [%l7 + SPARC_EXCEPTION_OFFSET_FP(11)] + SPARC_LEON3FT_B2BST_NOP + std %f24, [%l7 + SPARC_EXCEPTION_OFFSET_FP(12)] + SPARC_LEON3FT_B2BST_NOP + std %f26, [%l7 + SPARC_EXCEPTION_OFFSET_FP(13)] + SPARC_LEON3FT_B2BST_NOP + std %f28, [%l7 + SPARC_EXCEPTION_OFFSET_FP(14)] + SPARC_LEON3FT_B2BST_NOP + std %f30, [%l7 + SPARC_EXCEPTION_OFFSET_FP(15)] +#endif + + /* + * Call _Terminate( RTEMS_FATAL_SOURCE_EXCEPTION, %l0 ). + */ + sub %l7, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp + set 9, %o0 + call SYM(_Terminate) + mov %l7, %o1 diff --git a/cpukit/score/cpu/sparc/sparc-context-validate.S b/cpukit/score/cpu/sparc/sparc-context-validate.S index 8bc116d700..ce1c738d7b 100644 --- a/cpukit/score/cpu/sparc/sparc-context-validate.S +++ b/cpukit/score/cpu/sparc/sparc-context-validate.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2015, 2017 embedded brains GmbH. All rights reserved. + * Copyright (C) 2015, 2017 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S b/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S index 716bcf2884..ebbaec59a6 100644 --- a/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S +++ b/cpukit/score/cpu/sparc/sparc-context-volatile-clobber.S @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * Copyright (c) 2015 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/sparc/sparc-counter-asm.S b/cpukit/score/cpu/sparc/sparc-counter-asm.S deleted file mode 100644 index 44c3fa8edb..0000000000 --- a/cpukit/score/cpu/sparc/sparc-counter-asm.S +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2016, 2018 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include <rtems/asm.h> - - /* - * All functions except _SPARC_Counter_read_clock() in this module are - * sometimes called with traps disabled. - */ - - .section ".text" - .align 4 - - PUBLIC(_SPARC_Counter_read_default) -SYM(_SPARC_Counter_read_default): - sethi %hi(_SPARC_Counter + 12), %o1 - ld [%o1 + %lo(_SPARC_Counter + 12)], %o0 - add %o0, 1, %o0 - st %o0, [%o1 + %lo(_SPARC_Counter + 12)] - jmp %o7 + 8 - nop - - PUBLIC(_SPARC_Counter_read_up) - PUBLIC(_SPARC_Get_timecount_up) -SYM(_SPARC_Counter_read_up): -SYM(_SPARC_Get_timecount_up): - sethi %hi(_SPARC_Counter + 8), %o0 - ld [%o0 + %lo(_SPARC_Counter + 8)], %o0 - jmp %o7 + 8 - ld [%o0], %o0 - - PUBLIC(_SPARC_Counter_read_down) - PUBLIC(_SPARC_Get_timecount_down) -SYM(_SPARC_Counter_read_down): -SYM(_SPARC_Get_timecount_down): - sethi %hi(_SPARC_Counter + 8), %o0 - ld [%o0 + %lo(_SPARC_Counter + 8)], %o0 - ld [%o0], %o0 - jmp %o7 + 8 - xnor %g0, %o0, %o0 - - /* - * For the corresponding C code is something like this: - * - * CPU_Counter_ticks _SPARC_Counter_read_clock_isr_disabled( void ) - * { - * const SPARC_Counter *ctr; - * CPU_Counter_ticks ticks; - * CPU_Counter_ticks accumulated; - * - * ctr = &_SPARC_Counter; - * ticks = *ctr->counter_register; - * accumulated = ctr->accumulated; - * - * if ( ( *ctr->pending_register & ctr->pending_mask ) != 0 ) { - * ticks = *ctr->counter_register; - * accumulated += ctr->interval; - * } - * - * return accumulated - ticks; - * } - */ - PUBLIC(_SPARC_Counter_read_clock_isr_disabled) -SYM(_SPARC_Counter_read_clock_isr_disabled): - sethi %hi(_SPARC_Counter), %o5 - or %o5, %lo(_SPARC_Counter), %o5 - ld [%o5 + 8], %o3 - ld [%o5 + 12], %o4 - ld [%o5 + 16], %o2 - ld [%o3], %o0 - ld [%o4], %o1 - btst %o1, %o2 - bne .Lpending_isr_disabled - ld [%o5 + 20], %o4 - jmp %o7 + 8 - sub %o4, %o0, %o0 -.Lpending_isr_disabled: - ld [%o5 + 24], %o5 - ld [%o3], %o0 - add %o4, %o5, %o4 - jmp %o7 + 8 - sub %o4, %o0, %o0 - - /* - * For the corresponding C code see - * _SPARC_Counter_read_clock_isr_disabled() above. - */ - PUBLIC(_SPARC_Counter_read_clock) - PUBLIC(_SPARC_Get_timecount_clock) -SYM(_SPARC_Counter_read_clock): -SYM(_SPARC_Get_timecount_clock): - sethi %hi(_SPARC_Counter), %o5 - or %o5, %lo(_SPARC_Counter), %o5 - ta SPARC_SWTRAP_IRQDIS - ld [%o5 + 8], %o3 - ld [%o5 + 12], %o4 - ld [%o5 + 16], %o2 - ld [%o3], %o0 - ld [%o4], %o1 - btst %o1, %o2 - bne .Lpending - ld [%o5 + 20], %o4 - ta SPARC_SWTRAP_IRQEN -#ifdef __FIX_LEON3FT_TN0018 - /* A nop is added to work around the GRLIB-TN-0018 errata */ - nop -#endif - jmp %o7 + 8 - sub %o4, %o0, %o0 -.Lpending: - ld [%o5 + 24], %o5 - ld [%o3], %o0 - ta SPARC_SWTRAP_IRQEN - add %o4, %o5, %o4 - jmp %o7 + 8 - sub %o4, %o0, %o0 - - PUBLIC(_SPARC_Counter_read_asr23) - PUBLIC(_SPARC_Get_timecount_asr23) -SYM(_SPARC_Counter_read_asr23): -SYM(_SPARC_Get_timecount_asr23): - jmp %o7 + 8 - mov %asr23, %o0 diff --git a/cpukit/score/cpu/sparc/sparc-exception-frame-print.c b/cpukit/score/cpu/sparc/sparc-exception-frame-print.c new file mode 100644 index 0000000000..71f7f89c7c --- /dev/null +++ b/cpukit/score/cpu/sparc/sparc-exception-frame-print.c @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This source file contains the SPARC-specific implementation of + * _CPU_Exception_frame_print(). + */ + +/* + * Copyright (C) 2021 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/score/cpu.h> +#include <rtems/bspIo.h> +#include <inttypes.h> + +void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ) +{ + size_t i; + size_t j; + const char *desc; + + switch ( SPARC_REAL_TRAP_NUMBER( frame->trap ) ) { + case 0x01: + desc = " (instruction access exception)"; + break; + case 0x02: + desc = " (illegal instruction)"; + break; + case 0x03: + desc = " (privileged instruction)"; + break; + case 0x04: + desc = " (fp disabled)"; + break; + case 0x05: + desc = " (window overflow)"; + break; + case 0x06: + desc = " (window underflow)"; + break; + case 0x07: + desc = " (memory address not aligned)"; + break; + case 0x08: + desc = " (fp exception)"; + break; + case 0x09: + desc = " (data access exception)"; + break; + case 0x0A: + desc = " (tag overflow)"; + break; + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: + desc = " (external interrupt)"; + break; + case 0x24: + desc = " (cp disabled)"; + break; + case 0x28: + desc = " (cp exception)"; + break; + default: + desc = ""; + break; + } + + printk( + "\n" + "unexpected trap %" PRIu32 "%s\n" + "PSR = 0x%08" PRIx32 "\n" + "PC = 0x%08" PRIx32 "\n" + "nPC = 0x%08" PRIx32 "\n" + "WIM = 0x%08" PRIx32 "\n" + "Y = 0x%08" PRIx32 "\n", + frame->trap, + desc, + frame->psr, + frame->pc, + frame->npc, + frame->wim, + frame->y + ); + + for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->global ); ++i ) { + printk( "g%zu = 0x%08" PRIx32 "\n", i, frame->global[ i ] ); + } + + for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->output ); ++i ) { + printk( "o%zu[CWP - 0] = 0x%08" PRIx32 "\n", i, frame->output[ i ] ); + } + + for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->windows ); ++i ) { + const SPARC_Register_window *win; + + win = &frame->windows[ i ]; + + for ( j = 0; j < RTEMS_ARRAY_SIZE( win->local ); ++j ) { + printk( "l%zu[CWP - %zu] = 0x%08" PRIx32 "\n", j, i, win->local[ j ] ); + } + + for ( j = 0; j < RTEMS_ARRAY_SIZE( win->input ); ++j ) { + printk( "i%zu[CWP - %zu] = 0x%08" PRIx32 "\n", j, i, win->input[ j ] ); + } + } + +#if SPARC_HAS_FPU == 1 + printk( "FSR = 0x%08" PRIx32 "\n", frame->fsr ); + + for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->fp ); ++i ) { + j = i * 2; + printk( "fp%zu:fp%zu = 0x%016" PRIx64 "\n", j, j + 1, frame->fp[ i ] ); + } +#endif +} diff --git a/cpukit/score/cpu/sparc/sparc-isr-handler.S b/cpukit/score/cpu/sparc/sparc-isr-handler.S new file mode 100644 index 0000000000..9ecb44e870 --- /dev/null +++ b/cpukit/score/cpu/sparc/sparc-isr-handler.S @@ -0,0 +1,637 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/* + * This file contains the basic algorithms for all assembly code used + * in an specific CPU port of RTEMS. These algorithms must be implemented + * in assembly language. + * + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Ported to ERC32 implementation of the SPARC by On-Line Applications + * Research Corporation (OAR) under contract to the European Space + * Agency (ESA). + * + * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. + * European Space Agency. + */ + +#include <rtems/asm.h> +#include <rtems/score/percpu.h> +#include <libcpu/grlib-tn-0018.h> + +/* + * void _ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + * We enter this handler from the 4 instructions in the trap table with + * the following registers assumed to be set as shown: + * + * l0 = PSR + * l1 = PC + * l2 = nPC + * l3 = trap type + * + * NOTE: By an executive defined convention, trap type is between 0 and 255 if + * it is an asynchonous trap and 256 and 511 if it is synchronous. + */ + + .align 4 + PUBLIC(_ISR_Handler) +SYM(_ISR_Handler): + /* + * Fix the return address for synchronous traps. + */ + + andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 + ! Is this a synchronous trap? + be,a win_ovflow ! No, then skip the adjustment + nop ! DELAY + mov %l1, %l6 ! save trapped pc for debug info + mov %l2, %l1 ! do not return to the instruction + add %l2, 4, %l2 ! indicated + +win_ovflow: + /* + * Save the globals this block uses. + * + * These registers are not restored from the locals. Their contents + * are saved directly from the locals into the ISF below. + */ + + mov %g4, %l4 ! save the globals this block uses + mov %g5, %l5 + + /* + * When at a "window overflow" trap, (wim == (1 << cwp)). + * If we get here like that, then process a window overflow. + */ + + rd %wim, %g4 + srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP + ! are LS 5 bits ; how convenient :) + cmp %g5, 1 ! Is this an invalid window? + bne dont_do_the_window ! No, then skip all this stuff + ! we are using the delay slot + + /* + * The following is same as a 1 position right rotate of WIM + */ + + srl %g4, 1, %g5 ! g5 = WIM >> 1 + sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4 + ! g4 = WIM << (Number Windows - 1) + or %g4, %g5, %g4 ! g4 = (WIM >> 1) | + ! (WIM << (Number Windows - 1)) + + /* + * At this point: + * + * g4 = the new WIM + * g5 is free + */ + + /* + * Since we are tinkering with the register windows, we need to + * make sure that all the required information is in global registers. + */ + + save ! Save into the window + wr %g4, 0, %wim ! WIM = new WIM + nop ! delay slots + nop + nop + + /* + * Now save the window just as if we overflowed to it. + */ + + std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] + SPARC_LEON3FT_B2BST_NOP + std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] + SPARC_LEON3FT_B2BST_NOP + std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] + SPARC_LEON3FT_B2BST_NOP + std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] + SPARC_LEON3FT_B2BST_NOP + + std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] + SPARC_LEON3FT_B2BST_NOP + std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] + SPARC_LEON3FT_B2BST_NOP + std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] + SPARC_LEON3FT_B2BST_NOP + std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] + + restore + nop + +dont_do_the_window: + /* + * Global registers %g4 and %g5 are saved directly from %l4 and + * %l5 directly into the ISF below. + */ + + /* + * Save the state of the interrupted task -- especially the global + * registers -- in the Interrupt Stack Frame. Note that the ISF + * includes a regular minimum stack frame which will be used if + * needed by register window overflow and underflow handlers. + * + * REGISTERS SAME AS AT _ISR_Handler + */ + + sub %fp, CPU_INTERRUPT_FRAME_SIZE, %sp + ! make space for ISF + + std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC + SPARC_LEON3FT_B2BST_NOP + st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC + st %g1, [%sp + ISF_G1_OFFSET] ! save g1 + std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3 + SPARC_LEON3FT_B2BST_NOP + std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above + SPARC_LEON3FT_B2BST_NOP + st %g7, [%sp + ISF_G7_OFFSET] ! save g7 + + std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1 + SPARC_LEON3FT_B2BST_NOP + std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3 + SPARC_LEON3FT_B2BST_NOP + std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5 + SPARC_LEON3FT_B2BST_NOP + std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7 + + rd %y, %g1 + st %g1, [%sp + ISF_Y_OFFSET] ! save y + st %l6, [%sp + ISF_TPC_OFFSET] ! save real trapped pc + + mov %sp, %o1 ! 2nd arg to ISR Handler + + /* + * Increment ISR nest level and Thread dispatch disable level. + * + * Register usage for this section: + * + * l6 = _Thread_Dispatch_disable_level value + * l7 = _ISR_Nest_level value + * + * NOTE: It is assumed that l6 - l7 will be preserved until the ISR + * nest and thread dispatch disable levels are unnested. + */ + + ld [%g6 + PER_CPU_ISR_NEST_LEVEL], %l7 + ld [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6 + + add %l7, 1, %l7 + st %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL] + SPARC_LEON3FT_B2BST_NOP + + add %l6, 1, %l6 + st %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + +#if SPARC_HAS_FPU == 1 + /* + * We cannot use an intermediate value for operations with the PSR[EF] + * bit since they use a 13-bit sign extension and PSR[EF] is bit 12. + */ + sethi %hi(SPARC_PSR_EF_MASK), %l5 +#endif + + /* + * If ISR nest level was zero (now 1), then switch stack. + */ + + mov %sp, %fp + subcc %l7, 1, %l7 ! outermost interrupt handler? + bnz dont_switch_stacks ! No, then do not switch stacks + +#if defined(RTEMS_PROFILING) + call SYM(_SPARC_Counter_read_ISR_disabled) + nop + mov %o0, %o5 +#else + nop +#endif + + ld [%g6 + PER_CPU_INTERRUPT_STACK_HIGH], %sp + +#if SPARC_HAS_FPU == 1 + /* + * Test if the interrupted thread uses the floating point unit + * (PSR[EF] == 1). In case it uses the floating point unit, then store + * the floating point status register. This has the side-effect that + * all pending floating point operations complete before the store + * completes. The PSR[EF] bit is restored after the call to the + * interrupt handler. Thus post-switch actions (e.g. signal handlers) + * and context switch extensions may still corrupt the floating point + * context. + */ + andcc %l0, %l5, %g0 + beq dont_switch_stacks + nop + st %fsr, [%g6 + SPARC_PER_CPU_FSR_OFFSET] +#endif + +dont_switch_stacks: + /* + * Make sure we have a place on the stack for the window overflow + * trap handler to write into. At this point it is safe to + * enable traps again. + */ + + sub %sp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp + + /* + * Check if we have an external interrupt (trap 0x11 - 0x1f). If so, + * set the PIL in the %psr to mask off interrupts with lower priority. + * The original %psr in %l0 is not modified since it will be restored + * when the interrupt handler returns. + */ + + mov %l0, %g5 + and %l3, 0x0ff, %g4 + subcc %g4, 0x11, %g0 + bl dont_fix_pil + subcc %g4, 0x1f, %g0 + bg dont_fix_pil + sll %g4, 8, %g4 + and %g4, SPARC_PSR_PIL_MASK, %g4 + andn %l0, SPARC_PSR_PIL_MASK, %g5 + ba pil_fixed + or %g4, %g5, %g5 +dont_fix_pil: + or %g5, SPARC_PSR_PIL_MASK, %g5 +pil_fixed: + +#if SPARC_HAS_FPU == 1 + /* + * Clear the PSR[EF] bit of the interrupted context to ensure that + * interrupt service routines cannot corrupt the floating point context. + */ + andn %g5, %l5, %g5 +#endif + + wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** + + /* + * Vector to user's handler. + * + * NOTE: TBR may no longer have vector number in it since + * we just enabled traps. It is definitely in l3. + */ + + sethi %hi(SYM(_ISR_Vector_table)), %g4 + or %g4, %lo(SYM(_ISR_Vector_table)), %g4 + and %l3, 0xFF, %g5 ! remove synchronous trap indicator + sll %g5, 2, %g5 ! g5 = offset into table + ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] + + + ! o1 = 2nd arg = address of the ISF + ! WAS LOADED WHEN ISF WAS SAVED!!! + mov %l3, %o0 ! o0 = 1st arg = vector number + call %g4 +#if defined(RTEMS_PROFILING) + mov %o5, %l3 ! save interrupt entry instant +#else + nop ! delay slot +#endif + +#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH) + mov %l0, %g1 ! PSR[EF] value of interrupted context + ta SPARC_SWTRAP_IRQDIS_FP ! **** DISABLE INTERRUPTS **** +#else + ta SPARC_SWTRAP_IRQDIS ! **** DISABLE INTERRUPTS **** +#endif + +#if defined(RTEMS_PROFILING) + cmp %l7, 0 + bne profiling_not_outer_most_exit + nop + call SYM(_SPARC_Counter_read_ISR_disabled) + mov %g1, %l4 ! Save previous interrupt status + mov %o0, %o2 ! o2 = 3rd arg = interrupt exit instant + mov %l3, %o1 ! o1 = 2nd arg = interrupt entry instant + call SYM(_Profiling_Outer_most_interrupt_entry_and_exit) + mov %g6, %o0 ! o0 = 1st arg = per-CPU control +profiling_not_outer_most_exit: +#endif + + /* + * Decrement ISR nest level and Thread dispatch disable level. + * + * Register usage for this section: + * + * o2 = g6->dispatch_necessary value + * o3 = g6->isr_dispatch_disable value + * l6 = g6->thread_dispatch_disable_level value + * l7 = g6->isr_nest_level value + */ + + ldub [%g6 + PER_CPU_DISPATCH_NEEDED], %o2 + ld [%g6 + PER_CPU_ISR_DISPATCH_DISABLE], %o3 + st %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL] + SPARC_LEON3FT_B2BST_NOP + sub %l6, 1, %l6 + st %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + + /* + * Thread dispatching is necessary and allowed if and only if + * g6->dispatch_necessary == 1 and + * g6->isr_dispatch_disable == 0 and + * g6->thread_dispatch_disable_level == 0. + * + * Otherwise, continue with the simple return. + */ + xor %o2, 1, %o2 + or %o2, %l6, %o2 + orcc %o2, %o3, %o2 + bnz simple_return + + /* + * Switch back on the interrupted tasks stack and add enough room to + * invoke the dispatcher. Doing this in the delay slot causes no harm, + * since the stack pointer (%sp) is not used in the simple return path. + */ + sub %fp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp + +isr_dispatch: + + /* Set ISR dispatch disable and thread dispatch disable level to one */ + mov 1, %l6 + st %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + st %l6, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE] + + /* Call _Thread_Do_dispatch(), this function will enable interrupts */ + + mov 0, %o1 ! ISR level for _Thread_Do_dispatch() + +#if defined(SPARC_USE_LAZY_FP_SWITCH) + /* Test if we interrupted a floating point thread (PSR[EF] == 1) */ + andcc %l0, %l5, %g0 + be .Lnon_fp_thread_dispatch + ld [%g6 + PER_CPU_OFFSET_EXECUTING], %l6 + + /* Set new floating point unit owner to executing thread */ + st %l6, [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET] + + call SYM(_Thread_Do_dispatch) + mov %g6, %o0 + + /* + * If we are still the floating point unit owner, then reset the + * floating point unit owner to NULL, otherwise clear PSR[EF] in the + * interrupt frame and let the FP disabled system call do the floating + * point context save/restore. + */ + ld [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET], %l7 + cmp %l6, %l7 + bne,a .Ldisable_fp + andn %l0, %l5, %l0 + st %g0, [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET] + ba .Lthread_dispatch_done + nop +.Ldisable_fp: + st %l0, [%fp + ISF_PSR_OFFSET] + ba .Lthread_dispatch_done + nop +.Lnon_fp_thread_dispatch: +#elif defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH) + /* Test if we interrupted a floating point thread (PSR[EF] == 1) */ + andcc %l0, %l5, %g0 + be .Lnon_fp_thread_dispatch + nop + + /* + * Yes, this is a floating point thread, then save the floating point + * context to a new stack frame. Then do the thread dispatch. + * Post-switch actions (e.g. signal handlers) and context switch + * extensions may safely use the floating point unit. + */ + sub %sp, SPARC_FP_FRAME_SIZE, %sp + std %f0, [%sp + SPARC_FP_FRAME_OFFSET_FO_F1] + SPARC_LEON3FT_B2BST_NOP + std %f2, [%sp + SPARC_FP_FRAME_OFFSET_F2_F3] + SPARC_LEON3FT_B2BST_NOP + std %f4, [%sp + SPARC_FP_FRAME_OFFSET_F4_F5] + SPARC_LEON3FT_B2BST_NOP + std %f6, [%sp + SPARC_FP_FRAME_OFFSET_F6_F7] + SPARC_LEON3FT_B2BST_NOP + std %f8, [%sp + SPARC_FP_FRAME_OFFSET_F8_F9] + SPARC_LEON3FT_B2BST_NOP + std %f10, [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11] + SPARC_LEON3FT_B2BST_NOP + std %f12, [%sp + SPARC_FP_FRAME_OFFSET_F12_F13] + SPARC_LEON3FT_B2BST_NOP + std %f14, [%sp + SPARC_FP_FRAME_OFFSET_F14_F15] + SPARC_LEON3FT_B2BST_NOP + std %f16, [%sp + SPARC_FP_FRAME_OFFSET_F16_F17] + SPARC_LEON3FT_B2BST_NOP + std %f18, [%sp + SPARC_FP_FRAME_OFFSET_F18_F19] + SPARC_LEON3FT_B2BST_NOP + std %f20, [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21] + SPARC_LEON3FT_B2BST_NOP + std %f22, [%sp + SPARC_FP_FRAME_OFFSET_F22_F23] + SPARC_LEON3FT_B2BST_NOP + std %f24, [%sp + SPARC_FP_FRAME_OFFSET_F24_F25] + SPARC_LEON3FT_B2BST_NOP + std %f26, [%sp + SPARC_FP_FRAME_OFFSET_F26_F27] + SPARC_LEON3FT_B2BST_NOP + std %f28, [%sp + SPARC_FP_FRAME_OFFSET_F28_F29] + SPARC_LEON3FT_B2BST_NOP + std %f30, [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31] + SPARC_LEON3FT_B2BST_NOP + st %fsr, [%sp + SPARC_FP_FRAME_OFFSET_FSR] + call SYM(_Thread_Do_dispatch) + mov %g6, %o0 + + /* + * Restore the floating point context from stack frame and release the + * stack frame. + */ + ldd [%sp + SPARC_FP_FRAME_OFFSET_FO_F1], %f0 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F2_F3], %f2 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F4_F5], %f4 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F6_F7], %f6 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F8_F9], %f8 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11], %f10 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F12_F13], %f12 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F14_F15], %f14 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F16_F17], %f16 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F18_F19], %f18 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21], %f20 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F22_F23], %f22 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F24_F25], %f24 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F26_F27], %f26 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F28_F29], %f28 + ldd [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31], %f30 + ld [%sp + SPARC_FP_FRAME_OFFSET_FSR], %fsr + ba .Lthread_dispatch_done + add %sp, SPARC_FP_FRAME_SIZE, %sp + +.Lnon_fp_thread_dispatch: +#endif + + call SYM(_Thread_Do_dispatch) + mov %g6, %o0 + +#if SPARC_HAS_FPU == 1 +.Lthread_dispatch_done: +#endif + + ta SPARC_SWTRAP_IRQDIS ! **** DISABLE INTERRUPTS **** + + /* + * While we had ISR dispatching disabled in this thread, + * did we miss anything? If so, then we need to do another + * _Thread_Do_dispatch() before leaving this ISR dispatch context. + */ + ldub [%g6 + PER_CPU_DISPATCH_NEEDED], %l7 + + orcc %l7, %g0, %g0 ! Is a thread dispatch necessary? + bne isr_dispatch ! Yes, then invoke the dispatcher again. + mov 0, %o1 ! ISR level for _Thread_Do_dispatch() + + /* + * No, then set the ISR dispatch disable flag to zero and continue with + * the simple return. + */ + st %g0, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE] + + /* + * The CWP in place at this point may be different from + * that which was in effect at the beginning of the ISR if we + * have been context switched between the beginning of this invocation + * of _ISR_Handler and this point. Thus the CWP and WIM should + * not be changed back to their values at ISR entry time. Any + * changes to the PSR must preserve the CWP. + */ + +simple_return: + ld [%fp + ISF_Y_OFFSET], %l5 ! restore y + wr %l5, 0, %y + + ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC + ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC + rd %psr, %l3 + and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP + andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task + or %l3, %l0, %l0 ! install it later... + andn %l0, SPARC_PSR_ET_MASK, %l0 + + /* + * Restore tasks global and out registers + */ + + mov %fp, %g1 + + ! g1 is restored later + ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3 + ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5 + ld [%fp + ISF_G7_OFFSET], %g7 ! restore g7 + + ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1 + ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3 + ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5 + ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7 + + /* + * Registers: + * + * ALL global registers EXCEPT G1 and the input registers have + * already been restored and thuse off limits. + * + * The following is the contents of the local registers: + * + * l0 = original psr + * l1 = return address (i.e. PC) + * l2 = nPC + * l3 = CWP + */ + + /* + * if (CWP + 1) is an invalid window then we need to reload it. + * + * WARNING: Traps should now be disabled + */ + + mov %l0, %psr ! **** DISABLE TRAPS **** + nop + nop + nop + rd %wim, %l4 + add %l0, 1, %l6 ! l6 = cwp + 1 + and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it + srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count + ! and CWP are conveniently LS 5 bits + cmp %l5, 1 ! Is tasks window invalid? + bne good_task_window + + /* + * The following code is the same as a 1 position left rotate of WIM. + */ + + sll %l4, 1, %l5 ! l5 = WIM << 1 + srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4 + ! l4 = WIM >> (Number Windows - 1) + or %l4, %l5, %l4 ! l4 = (WIM << 1) | + ! (WIM >> (Number Windows - 1)) + + /* + * Now restore the window just as if we underflowed to it. + */ + + wr %l4, 0, %wim ! WIM = new WIM + nop ! must delay after writing WIM + nop + nop + restore ! now into the tasks window + + ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0 + ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2 + ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4 + ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6 + ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0 + ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2 + ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4 + ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 + ! reload of sp clobbers ISF + save ! Back to ISR dispatch window + +good_task_window: + TN0018_WAIT_IFLUSH %l3,%l4 ! GRLIB-TN-0018 work around macro + + mov %l0, %psr ! **** DISABLE TRAPS **** + nop; nop; nop + ! and restore condition codes. + ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1 + TN0018_FIX %l3,%l4 ! GRLIB-TN-0018 work around macro + jmp %l1 ! transfer control and + rett %l2 ! go back to tasks window + +/* end of file */ diff --git a/cpukit/score/cpu/sparc/sparc-isr-install.c b/cpukit/score/cpu/sparc/sparc-isr-install.c new file mode 100644 index 0000000000..3c8208ef24 --- /dev/null +++ b/cpukit/score/cpu/sparc/sparc-isr-install.c @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + +/** + * @file + * + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This source file contains the SPARC-specific implementation of + * _CPU_ISR_install_raw_handler() and _CPU_ISR_install_vector(). + */ + +/* + * COPYRIGHT (c) 1989-2007. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <rtems/score/isr.h> +#include <rtems/rtems/cache.h> + +/* + * This initializes the set of opcodes placed in each trap + * table entry. The routine which installs a handler is responsible + * for filling in the fields for the _handler address and the _vector + * trap type. + * + * The constants following this structure are masks for the fields which + * must be filled in when the handler is installed. + */ +const CPU_Trap_table_entry _CPU_Trap_slot_template = { + 0xa1480000, /* mov %psr, %l0 */ + 0x29000000, /* sethi %hi(_handler), %l4 */ + 0x81c52000, /* jmp %l4 + %lo(_handler) */ + 0xa6102000 /* mov _vector, %l3 */ +}; + +/* + * _CPU_ISR_install_raw_handler + * + * This routine installs the specified handler as a "raw" non-executive + * supported trap handler (a.k.a. interrupt service routine). + * + * Input Parameters: + * vector - trap table entry number plus synchronous + * vs. asynchronous information + * new_handler - address of the handler to be installed + * old_handler - pointer to an address of the handler previously installed + * + * Output Parameters: NONE + * *new_handler - address of the handler previously installed + * + * NOTE: + * + * On the SPARC, there are really only 256 vectors. However, the executive + * has no easy, fast, reliable way to determine which traps are synchronous + * and which are asynchronous. By default, synchronous traps return to the + * instruction which caused the interrupt. So if you install a software + * trap handler as an executive interrupt handler (which is desirable since + * RTEMS takes care of window and register issues), then the executive needs + * to know that the return address is to the trap rather than the instruction + * following the trap. + * + * So vectors 0 through 255 are treated as regular asynchronous traps which + * provide the "correct" return address. Vectors 256 through 512 are assumed + * by the executive to be synchronous and to require that the return address + * be fudged. + * + * If you use this mechanism to install a trap handler which must reexecute + * the instruction which caused the trap, then it should be installed as + * an asynchronous trap. This will avoid the executive changing the return + * address. + */ + +void _CPU_ISR_install_raw_handler( + uint32_t vector, + CPU_ISR_raw_handler new_handler, + CPU_ISR_raw_handler *old_handler +) +{ + uint32_t real_vector; + CPU_Trap_table_entry *tbr; + CPU_Trap_table_entry *slot; + uint32_t u32_tbr; + uint32_t u32_handler; + + /* + * Get the "real" trap number for this vector ignoring the synchronous + * versus asynchronous indicator included with our vector numbers. + */ + + real_vector = SPARC_REAL_TRAP_NUMBER( vector ); + + /* + * Get the current base address of the trap table and calculate a pointer + * to the slot we are interested in. + */ + + sparc_get_tbr( u32_tbr ); + + u32_tbr &= 0xfffff000; + + tbr = (CPU_Trap_table_entry *) u32_tbr; + + slot = &tbr[ real_vector ]; + + /* + * Get the address of the old_handler from the trap table. + * + * NOTE: The old_handler returned will be bogus if it does not follow + * the RTEMS model. + */ + +#define HIGH_BITS_MASK 0xFFFFFC00 +#define HIGH_BITS_SHIFT 10 +#define LOW_BITS_MASK 0x000003FF + + if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) { + u32_handler = + (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) | + (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK); + *old_handler = (CPU_ISR_raw_handler) u32_handler; + } else + *old_handler = 0; + + /* + * Copy the template to the slot and then fix it. + */ + + *slot = _CPU_Trap_slot_template; + + u32_handler = (uint32_t) new_handler; + + slot->mov_vector_l3 |= vector; + slot->sethi_of_handler_to_l4 |= + (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT; + slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK); + + /* + * There is no instruction cache snooping, so we need to invalidate + * the instruction cache to make sure that the processor sees the + * changes to the trap table. This step is required on both single- + * and multiprocessor systems. + * + * In a SMP configuration a change to the trap table might be + * missed by other cores. If the system state is up, the other + * cores can be notified using SMP messages that they need to + * flush their icache. If the up state has not been reached + * there is no need to notify other cores. They will do an + * automatic flush of the icache just after entering the up + * state, but before enabling interrupts. + */ + rtems_cache_invalidate_entire_instruction(); +} + +void _CPU_ISR_install_vector( + uint32_t vector, + CPU_ISR_handler new_handler, + CPU_ISR_handler *old_handler +) +{ + uint32_t real_vector; + CPU_ISR_raw_handler ignored; + + /* + * Get the "real" trap number for this vector ignoring the synchronous + * versus asynchronous indicator included with our vector numbers. + */ + + real_vector = SPARC_REAL_TRAP_NUMBER( vector ); + + /* + * Return the previous ISR handler. + */ + + *old_handler = _ISR_Vector_table[ real_vector ]; + + /* + * Install the wrapper so this ISR can be invoked properly. + */ + + _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); + + /* + * We put the actual user ISR address in '_ISR_vector_table'. This will + * be used by the _ISR_Handler so the user gets control. + */ + + _ISR_Vector_table[ real_vector ] = new_handler; +} diff --git a/cpukit/score/cpu/sparc/syscall.S b/cpukit/score/cpu/sparc/syscall.S index 737a501098..ee21c1b005 100644 --- a/cpukit/score/cpu/sparc/syscall.S +++ b/cpukit/score/cpu/sparc/syscall.S @@ -11,7 +11,7 @@ * COPYRIGHT: * * COPYRIGHT (c) 1995. European Space Agency. - * Copyright (c) 2016, 2017 embedded brains GmbH + * Copyright (C) 2016, 2017 embedded brains GmbH & Co. KG * * This terms of the RTEMS license apply to this file. * @@ -44,8 +44,10 @@ SYM(syscall): ta 0 ! syscall 1, halt with %g1,%g2,%g3 info PUBLIC(sparc_syscall_exit) + PUBLIC(_CPU_Fatal_halt) SYM(sparc_syscall_exit): +SYM(_CPU_Fatal_halt): mov SYS_exit, %g1 mov %o0, %g2 ! Additional exit code 1 diff --git a/cpukit/score/cpu/sparc/syscall.h b/cpukit/score/cpu/sparc/syscall.h index 2f20886840..6fc8fa3a6f 100644 --- a/cpukit/score/cpu/sparc/syscall.h +++ b/cpukit/score/cpu/sparc/syscall.h @@ -1 +1,9 @@ +/** + * @file + * + * @ingroup RTEMSScoreCPUSPARC + * + * @brief This header file provides system call interfaces. + */ + #define SYS_exit 1 diff --git a/cpukit/score/cpu/sparc64/context.S b/cpukit/score/cpu/sparc64/context.S index e83507d6c9..8e9178218f 100644 --- a/cpukit/score/cpu/sparc64/context.S +++ b/cpukit/score/cpu/sparc64/context.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* context.S * * This file contains the basic algorithms for all assembly code used @@ -6,9 +8,26 @@ * * COPYRIGHT (c) 2010. Gedare Bloom. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/asm.h> diff --git a/cpukit/score/cpu/sparc64/cpu.c b/cpukit/score/cpu/sparc64/cpu.c index 2d92aa7286..ae3b359876 100644 --- a/cpukit/score/cpu/sparc64/cpu.c +++ b/cpukit/score/cpu/sparc64/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -11,11 +13,29 @@ * provide support for the SPARC-v9. * COPYRIGHT (c) 2010. Gedare Bloom. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> #include <rtems/score/tls.h> #include <rtems/rtems/cache.h> @@ -63,6 +83,15 @@ void _CPU_Initialize(void) _CPU_ISR_Dispatch_disable = 0; } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + uint32_t level; + + level = sparc_disable_interrupts(); + __asm__ volatile ( "mov %0, %%g1 " : "=r" (level) : "0" (level) ); + while (1); /* loop forever */ +} + void _CPU_Context_Initialize( Context_Control *the_context, void *stack_base, @@ -103,7 +132,7 @@ void _CPU_Context_Initialize( the_context->isr_dispatch_disable = 0; if ( tls_area != NULL ) { - void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area ); + void *tcb = _TLS_Initialize_area( tls_area ); the_context->g7 = (uintptr_t) tcb; } diff --git a/cpukit/score/cpu/sparc64/headers.am b/cpukit/score/cpu/sparc64/headers.am deleted file mode 100644 index a2a1c1fb2c..0000000000 --- a/cpukit/score/cpu/sparc64/headers.am +++ /dev/null @@ -1,6 +0,0 @@ -## This file was generated by "./boostrap -H". -include_rtems_HEADERS += score/cpu/sparc64/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/sparc64/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/sparc64/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/sparc64/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/sparc64/include/rtems/score/sparc64.h diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h index e0f56b8e89..90ffe96341 100644 --- a/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -16,9 +18,26 @@ * to support the SPARC64 processor. * COPYRIGHT (c) 2010. Gedare Bloom. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -652,12 +671,6 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template; */ /* - * Support routine to initialize the RTEMS vector table after it is allocated. - */ - -#define _CPU_Initialize_vectors() - -/* * Disable all interrupts for a critical section. The previous * level is returned in _level. */ @@ -684,7 +697,7 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template; #define _CPU_ISR_Flash( _level ) \ sparc_flash_interrupts( _level ) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & SPARC_PSTATE_IE_MASK ) != 0; } @@ -775,25 +788,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/* - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - */ - -#define _CPU_Fatal_halt( _source, _error ) \ - do { \ - uint32_t level; \ - \ - level = sparc_disable_interrupts(); \ - __asm__ volatile ( "mov %0, %%g1 " : "=r" (level) : "0" (level) ); \ - while (1); /* loop forever */ \ - } while (0) - -/* end of Fatal Error manager macros */ - #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE /* Bitfield handler macros */ @@ -940,14 +934,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/sparc64/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h index e137756e62..25a362c350 100644 --- a/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,34 +48,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 20 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( "unimp" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/sparc64.h b/cpukit/score/cpu/sparc64/include/rtems/score/sparc64.h index d20c2ff509..14020e75d0 100644 --- a/cpukit/score/cpu/sparc64/include/rtems/score/sparc64.h +++ b/cpukit/score/cpu/sparc64/include/rtems/score/sparc64.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -15,9 +17,26 @@ * to support the SPARC64 processor. * COPYRIGHT (c) 2010. Gedare Bloom. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_SPARC_H diff --git a/cpukit/score/cpu/sparc64/interrupt.S b/cpukit/score/cpu/sparc64/interrupt.S index d249b39e8b..70ae679fd2 100644 --- a/cpukit/score/cpu/sparc64/interrupt.S +++ b/cpukit/score/cpu/sparc64/interrupt.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* cpu_asm.s * * This file contains the basic algorithms for all assembly code used @@ -7,9 +9,26 @@ * COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR). * COPYRIGHT (c) 2010. Gedare Bloom. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/asm.h> diff --git a/cpukit/score/cpu/sparc64/sparc64-exception-frame-print.c b/cpukit/score/cpu/sparc64/sparc64-exception-frame-print.c index e54eb60e9f..ba629fd073 100644 --- a/cpukit/score/cpu/sparc64/sparc64-exception-frame-print.c +++ b/cpukit/score/cpu/sparc64/sparc64-exception-frame-print.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/sparc64/sparc64-syscall.S b/cpukit/score/cpu/sparc64/sparc64-syscall.S index ffd6e8538d..9a467e8922 100644 --- a/cpukit/score/cpu/sparc64/sparc64-syscall.S +++ b/cpukit/score/cpu/sparc64/sparc64-syscall.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* * systrap.S * @@ -10,9 +12,26 @@ * * COPYRIGHT (c) 2010. Gedare Bloom. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #include <rtems/asm.h> diff --git a/cpukit/score/cpu/sparc64/sparc64-syscall.h b/cpukit/score/cpu/sparc64/sparc64-syscall.h index 9af3560267..85a74fa0f8 100644 --- a/cpukit/score/cpu/sparc64/sparc64-syscall.h +++ b/cpukit/score/cpu/sparc64/sparc64-syscall.h @@ -1,3 +1,18 @@ +/* + * Copyright (C) 1999 Jiri Gaisler <jgais@ws.estec.esa.nl> + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR + * BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES + * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, + * ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + #define SYS_exit 1 #define SYS_irqdis 2 #define SYS_irqen 3 diff --git a/cpukit/score/cpu/v850/cpu.c b/cpukit/score/cpu/v850/cpu.c index a921775413..e61874f9c1 100644 --- a/cpukit/score/cpu/v850/cpu.c +++ b/cpukit/score/cpu/v850/cpu.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -8,15 +10,33 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/isr.h> #include <string.h> /* for memset */ @@ -30,6 +50,13 @@ void _CPU_Initialize(void) { } +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + __asm__ __volatile__ ( "di" ); + __asm__ __volatile__ ( "mov %0, r10; " : "=r" ((error)) ); + __asm__ __volatile__ ( "halt" ); +} + /* * v850 Specific Information: * diff --git a/cpukit/score/cpu/v850/cpu_asm.S b/cpukit/score/cpu/v850/cpu_asm.S index e9aa738d13..579d26bcee 100644 --- a/cpukit/score/cpu/v850/cpu_asm.S +++ b/cpukit/score/cpu/v850/cpu_asm.S @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -10,9 +12,26 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/v850/headers.am b/cpukit/score/cpu/v850/headers.am deleted file mode 100644 index 547f128401..0000000000 --- a/cpukit/score/cpu/v850/headers.am +++ /dev/null @@ -1,8 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/v850/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/v850/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/v850/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/v850/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/v850/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/v850/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/v850/include/rtems/score/v850.h diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpu.h b/cpukit/score/cpu/v850/include/rtems/score/cpu.h index 1921216e2d..00addf0bc6 100644 --- a/cpukit/score/cpu/v850/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/v850/include/rtems/score/cpu.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -11,9 +13,26 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_H @@ -327,7 +346,7 @@ typedef struct { __asm__ __volatile__( "di" ); \ } while (0) -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +static inline bool _CPU_ISR_Is_enabled( uint32_t level ) { return ( level & V850_PSW_INTERRUPT_DISABLE_MASK ) != V850_PSW_INTERRUPT_DISABLE; @@ -461,26 +480,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -/** - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - * - * Port Specific Information: - * - * Move the error code into r10, disable interrupts and halt. - */ -#define _CPU_Fatal_halt( _source, _error ) \ - do { \ - __asm__ __volatile__ ( "di" ); \ - __asm__ __volatile__ ( "mov %0, r10; " : "=r" ((_error)) ); \ - __asm__ __volatile__ ( "halt" ); \ - } while (0) - -/* end of Fatal Error manager macros */ - #define CPU_USE_GENERIC_BITFIELD_CODE TRUE #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE @@ -675,14 +674,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - /** Type that can store a 32-bit integer or a pointer. */ typedef uintptr_t CPU_Uint32ptr; diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/v850/include/rtems/score/cpu_asm.h index 8a74cc6410..87a91c6052 100644 --- a/cpukit/score/cpu/v850/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/v850/include/rtems/score/cpu_asm.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -11,9 +13,26 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPU_ASM_H diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/v850/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/v850/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h index f7b02ccb6b..5bc1c42459 100644 --- a/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,11 +7,28 @@ */ /* - * Copyright (c) 2013 embedded brains GmbH + * Copyright (c) 2013 embedded brains GmbH & Co. KG + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_CPUIMPL_H @@ -29,34 +48,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 10 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { + (void) pattern; + while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Instruction_illegal( void ) { __asm__ volatile ( ".word 0" ); } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif diff --git a/cpukit/score/cpu/v850/include/rtems/score/v850.h b/cpukit/score/cpu/v850/include/rtems/score/v850.h index 26ab6c209d..9b9729d16e 100644 --- a/cpukit/score/cpu/v850/include/rtems/score/v850.h +++ b/cpukit/score/cpu/v850/include/rtems/score/v850.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -13,9 +15,26 @@ * COPYRIGHT (c) 1989-2012. * On-Line Applications Research Corporation (OAR). * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_V850_H diff --git a/cpukit/score/cpu/v850/v850-exception-frame-print.c b/cpukit/score/cpu/v850/v850-exception-frame-print.c index e54eb60e9f..ba629fd073 100644 --- a/cpukit/score/cpu/v850/v850-exception-frame-print.c +++ b/cpukit/score/cpu/v850/v850-exception-frame-print.c @@ -1,15 +1,28 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /* - * Copyright (c) 2012 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012 embedded brains GmbH & Co. KG * - * embedded brains GmbH - * Obere Lagerstr. 30 - * 82178 Puchheim - * Germany - * <rtems@embedded-brains.de> + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H diff --git a/cpukit/score/cpu/x86_64/cpu.c b/cpukit/score/cpu/x86_64/cpu.c index 6846c431ef..0ad2d1ff3c 100644 --- a/cpukit/score/cpu/x86_64/cpu.c +++ b/cpukit/score/cpu/x86_64/cpu.c @@ -37,6 +37,7 @@ #include "config.h" #endif +#include <rtems/score/cpuimpl.h> #include <rtems/score/idt.h> #include <rtems/score/isr.h> #include <rtems/score/tls.h> @@ -48,3 +49,15 @@ void _CPU_Exception_frame_print(const CPU_Exception_frame *ctx) void _CPU_Initialize(void) { } + +void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) +{ + ISR_Level level; + + _CPU_ISR_Disable( level ); + (void) level; + + while ( true ) { + /* Do nothing */ + } +} diff --git a/cpukit/score/cpu/x86_64/headers.am b/cpukit/score/cpu/x86_64/headers.am deleted file mode 100644 index 3ce32c3317..0000000000 --- a/cpukit/score/cpu/x86_64/headers.am +++ /dev/null @@ -1,9 +0,0 @@ -## This file was generated by "./boostrap -H". -include_machine_HEADERS += score/cpu/x86_64/include/machine/elf_machdep.h -include_rtems_HEADERS += score/cpu/x86_64/include/rtems/asm.h -include_rtems_score_HEADERS += score/cpu/x86_64/include/rtems/score/cpu.h -include_rtems_score_HEADERS += score/cpu/x86_64/include/rtems/score/cpu_asm.h -include_rtems_score_HEADERS += score/cpu/x86_64/include/rtems/score/cpuatomic.h -include_rtems_score_HEADERS += score/cpu/x86_64/include/rtems/score/cpuimpl.h -include_rtems_score_HEADERS += score/cpu/x86_64/include/rtems/score/idt.h -include_rtems_score_HEADERS += score/cpu/x86_64/include/rtems/score/x86_64.h diff --git a/cpukit/score/cpu/x86_64/include/machine/elf_machdep.h b/cpukit/score/cpu/x86_64/include/machine/elf_machdep.h index 5f5cf1d6e0..ee50640df2 100644 --- a/cpukit/score/cpu/x86_64/include/machine/elf_machdep.h +++ b/cpukit/score/cpu/x86_64/include/machine/elf_machdep.h @@ -1,4 +1,76 @@ -/* - * XXX: Needs research as to purpose. Seems like this might do: - * https://github.com/NetBSD/src/blob/trunk/sys/arch/amd64/include/elf_machdep.h - */ +/* $NetBSD: elf_machdep.h,v 1.4.18.1 2017/12/03 11:35:47 jdolecek Exp $ */ + +#if !defined __i386__ + +#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB +#define ELF32_MACHDEP_ID_CASES \ + case EM_386: \ + break; + +#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB +#define ELF64_MACHDEP_ID_CASES \ + case EM_X86_64: \ + break; + +#define ELF32_MACHDEP_ID EM_386 +#define ELF64_MACHDEP_ID EM_X86_64 + +#define KERN_ELFSIZE 64 +#define ARCH_ELFSIZE 64 /* MD native binary size */ + +/* x86-64 relocations */ + +#define R_X86_64_NONE 0 +#define R_X86_64_64 1 +#define R_X86_64_PC32 2 +#define R_X86_64_GOT32 3 +#define R_X86_64_PLT32 4 +#define R_X86_64_COPY 5 +#define R_X86_64_GLOB_DAT 6 +#define R_X86_64_JUMP_SLOT 7 +#define R_X86_64_RELATIVE 8 +#define R_X86_64_GOTPCREL 9 +#define R_X86_64_32 10 +#define R_X86_64_32S 11 +#define R_X86_64_16 12 +#define R_X86_64_PC16 13 +#define R_X86_64_8 14 +#define R_X86_64_PC8 15 + +/* TLS relocations */ +#define R_X86_64_DTPMOD64 16 +#define R_X86_64_DTPOFF64 17 +#define R_X86_64_TPOFF64 18 +#define R_X86_64_TLSGD 19 +#define R_X86_64_TLSLD 20 +#define R_X86_64_DTPOFF32 21 +#define R_X86_64_GOTTPOFF 22 +#define R_X86_64_TPOFF32 23 + +#define R_X86_64_PC64 24 +#define R_X86_64_GOTOFF64 25 +#define R_X86_64_GOTPC32 26 +#define R_X86_64_GOT64 27 +#define R_X86_64_GOTPCREL64 28 +#define R_X86_64_GOTPC64 29 +#define R_X86_64_GOTPLT64 30 +#define R_X86_64_PLTOFF64 31 +#define R_X86_64_SIZE32 32 +#define R_X86_64_SIZE64 33 +#define R_X86_64_GOTPC32_TLSDESC 34 +#define R_X86_64_TLSDESC_CALL 35 +#define R_X86_64_TLSDESC 36 +#define R_X86_64_IRELATIVE 37 +#define R_X86_64_RELATIVE64 38 +#define R_X86_64_PC32_BND 39 +#define R_X86_64_PLT32_BND 40 +#define R_X86_64_GOTPCRELX 41 +#define R_X86_64_REX_GOTPCRELX 42 + +#define R_TYPE(name) __CONCAT(R_X86_64_,name) + +#else /* !__i386__ */ + +#include <i386/elf_machdep.h> + +#endif /* !__i386__ */ diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h index 80dbc25214..b26fb4c8ad 100644 --- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h +++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h @@ -144,7 +144,7 @@ typedef struct { #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE #define CPU_STACK_MINIMUM_SIZE (1024*4) #define CPU_SIZEOF_POINTER 8 -#define CPU_ALIGNMENT 8 +#define CPU_ALIGNMENT 16 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT #define CPU_STACK_ALIGNMENT 16 #define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES @@ -155,8 +155,6 @@ typedef struct { #ifndef ASM -#define _CPU_Initialize_vectors() - #define _CPU_ISR_Enable(_level) \ { \ amd64_enable_interrupts(); \ @@ -179,12 +177,12 @@ typedef struct { (void) _level; /* Prevent -Wunused-but-set-variable */ \ } -RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled(uint32_t level) +static inline bool _CPU_ISR_Is_enabled(uint32_t level) { return (level & EFLAGS_INTR_ENABLE) != 0; } -RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level) +static inline void _CPU_ISR_Set_level(uint32_t new_level) { if ( new_level ) { amd64_disable_interrupts(); @@ -194,7 +192,7 @@ RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level) } } -RTEMS_INLINE_ROUTINE uint32_t _CPU_ISR_Get_level(void) +static inline uint32_t _CPU_ISR_Get_level(void) { uint64_t rflags; @@ -234,14 +232,6 @@ void _CPU_Context_Initialize( /* end of Context handler macros */ -/* Fatal Error manager macros */ - -#define _CPU_Fatal_halt( _source, _error ) \ - { \ - } - -/* end of Fatal Error manager macros */ - #define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE /* Bitfield handler macros */ @@ -314,15 +304,6 @@ uint32_t _CPU_Counter_frequency( void ); CPU_Counter_ticks _CPU_Counter_read( void ); - -static inline CPU_Counter_ticks _CPU_Counter_difference( - CPU_Counter_ticks second, - CPU_Counter_ticks first -) -{ - return second - first; -} - #ifdef RTEMS_SMP * uint32_t _CPU_SMP_Initialize( void ); @@ -340,16 +321,6 @@ static inline CPU_Counter_ticks _CPU_Counter_difference( void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); - static inline void _CPU_SMP_Processor_event_broadcast( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - - static inline void _CPU_SMP_Processor_event_receive( void ) - { - __asm__ volatile ( "" : : : "memory" ); - } - static inline bool _CPU_Context_Get_is_executing( const Context_Control *context ) diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h index 4ad50b9f42..10e0887cb9 100644 --- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h @@ -31,7 +31,7 @@ #include <rtems/score/basedefs.h> -RTEMS_INLINE_ROUTINE uint8_t inport_byte(uint16_t port) +static inline uint8_t inport_byte(uint16_t port) { uint8_t ret; __asm__ volatile ( "inb %1, %0" @@ -40,12 +40,12 @@ RTEMS_INLINE_ROUTINE uint8_t inport_byte(uint16_t port) return ret; } -RTEMS_INLINE_ROUTINE void outport_byte(uint16_t port, uint8_t val) +static inline void outport_byte(uint16_t port, uint8_t val) { __asm__ volatile ( "outb %0, %1" : : "a" (val), "Nd" (port) ); } -RTEMS_INLINE_ROUTINE uint16_t amd64_get_cs(void) +static inline uint16_t amd64_get_cs(void) { uint16_t segment = 0; @@ -54,12 +54,12 @@ RTEMS_INLINE_ROUTINE uint16_t amd64_get_cs(void) return segment; } -RTEMS_INLINE_ROUTINE void amd64_set_cr3(uint64_t segment) +static inline void amd64_set_cr3(uint64_t segment) { __asm__ volatile ( "movq %0, %%cr3" : "=r" (segment) : "0" (segment) ); } -RTEMS_INLINE_ROUTINE void cpuid( +static inline void cpuid( uint32_t code, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx ) { __asm__ volatile ( "cpuid" @@ -67,7 +67,7 @@ RTEMS_INLINE_ROUTINE void cpuid( : "a" (code) ); } -RTEMS_INLINE_ROUTINE uint64_t rdmsr(uint32_t msr) +static inline uint64_t rdmsr(uint32_t msr) { uint32_t low, high; __asm__ volatile ( "rdmsr" : @@ -76,23 +76,23 @@ RTEMS_INLINE_ROUTINE uint64_t rdmsr(uint32_t msr) return low | (uint64_t) high << 32; } -RTEMS_INLINE_ROUTINE void wrmsr(uint32_t msr, uint32_t low, uint32_t high) +static inline void wrmsr(uint32_t msr, uint32_t low, uint32_t high) { __asm__ volatile ( "wrmsr" : : "a" (low), "d" (high), "c" (msr) ); } -RTEMS_INLINE_ROUTINE void amd64_enable_interrupts(void) +static inline void amd64_enable_interrupts(void) { __asm__ volatile ( "sti" ); } -RTEMS_INLINE_ROUTINE void amd64_disable_interrupts(void) +static inline void amd64_disable_interrupts(void) { __asm__ volatile ( "cli" ); } -RTEMS_INLINE_ROUTINE void stub_io_wait(void) +static inline void stub_io_wait(void) { /* XXX: This likely won't be required on any modern boards, but this function * exists so it's easier to find all the places it may be used. diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpuatomic.h deleted file mode 100644 index 598ee76b20..0000000000 --- a/cpukit/score/cpu/x86_64/include/rtems/score/cpuatomic.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * COPYRIGHT (c) 2012-2013 Deng Hengyi. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#ifndef _RTEMS_SCORE_ATOMIC_CPU_H -#define _RTEMS_SCORE_ATOMIC_CPU_H - -#include <rtems/score/cpustdatomic.h> - -#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h index 1730799952..d4b7a71009 100644 --- a/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h +++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h @@ -8,7 +8,7 @@ * Copyright (c) 2018. * Amaan Cheval <amaan.cheval@gmail.com> * - * Copyright (c) 2013, 2016 embedded brains GmbH + * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -32,34 +32,57 @@ #define CPU_PER_CPU_CONTROL_SIZE 0 +#define CPU_THREAD_LOCAL_STORAGE_VARIANT 20 + #ifndef ASM #ifdef __cplusplus extern "C" { #endif -RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern ) +RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) { + (void) pattern; + /* TODO */ } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void ) +static inline void _CPU_Context_validate( uintptr_t pattern ) { - __asm__ volatile ( ".word 0" ); -} + (void) pattern; -RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern ) -{ while (1) { /* TODO */ } } -RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void ) +static inline void _CPU_Instruction_illegal( void ) +{ + __asm__ volatile ( ".word 0" ); +} + +static inline void _CPU_Instruction_no_operation( void ) { __asm__ volatile ( "nop" ); } +static inline void _CPU_Use_thread_local_storage( + const Context_Control *context +) +{ + (void) context; +} + +static inline void *_CPU_Get_TLS_thread_pointer( + const Context_Control *context +) +{ + (void) context; + return NULL; +} + #ifdef __cplusplus } #endif |