summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/aarch64/include
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/cpu/aarch64/include')
-rw-r--r--cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h96
-rw-r--r--cpukit/score/cpu/aarch64/include/machine/elf_machdep.h256
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h1114
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h63
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h42
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h46
6 files changed, 1538 insertions, 79 deletions
diff --git a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
new file mode 100644
index 0000000000..ca9b60e6d1
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUAArch64
+ *
+ * @brief Definitions used in MMU setup.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LIBCPU_AARCH64_MMU_VMSAV8_64_H
+#define LIBCPU_AARCH64_MMU_VMSAV8_64_H
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <rtems.h>
+
+/* VMSAv8 Long-descriptor fields */
+#define MMU_DESC_AF ( 1 << 10 )
+#define MMU_DESC_SH_INNER ( ( 1 << 9 ) | ( 1 << 8 ) )
+#define MMU_DESC_WRITE_DISABLE ( 1 << 7 )
+/* PAGE and TABLE flags are the same bit, but only apply on certain levels */
+#define MMU_DESC_TYPE_TABLE ( 1 << 1 )
+#define MMU_DESC_TYPE_PAGE ( 1 << 1 )
+#define MMU_DESC_VALID ( 1 << 0 )
+#define MMU_DESC_MAIR_ATTR( val ) ( ( val & 0x3 ) << 2 )
+#define MMU_DESC_PAGE_TABLE_MASK 0xFFFFFFFFF000LL
+
+/* Page table configuration */
+#define MMU_PAGE_BITS 12
+#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
+#define MMU_BITS_PER_LEVEL 9
+
+#define AARCH64_MMU_FLAGS_BASE \
+ ( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
+
+#define AARCH64_MMU_DATA_RO_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE_CACHED AARCH64_MMU_DATA_RO_CACHED
+#define AARCH64_MMU_CODE_RW_CACHED AARCH64_MMU_DATA_RW_CACHED
+
+#define AARCH64_MMU_DATA_RO \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 2 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE AARCH64_MMU_DATA_RO
+#define AARCH64_MMU_CODE_RW AARCH64_MMU_DATA_RW
+
+/* RW implied by not ORing in RO */
+#define AARCH64_MMU_DATA_RW_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) )
+#define AARCH64_MMU_DATA_RW \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 2 ) )
+#define AARCH64_MMU_DEVICE ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 0 ) )
+
+rtems_status_code aarch64_mmu_map(
+ uintptr_t addr,
+ uint64_t size,
+ uint64_t flags
+);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* ASM */
+
+#endif /* LIBCPU_AARCH64_MMU_VMSAV8_64_H */
diff --git a/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
new file mode 100644
index 0000000000..c1d219d715
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
@@ -0,0 +1,256 @@
+/* $NetBSD: elf_machdep.h,v 1.4 2018/10/12 01:28:58 ryo Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_ELF_MACHDEP_H_
+#define _AARCH64_ELF_MACHDEP_H_
+
+#ifdef __aarch64__
+
+#if defined(__AARCH64EB__)
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2MSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2MSB
+#else
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB
+#endif
+
+/* Processor specific flags for the ELF header e_flags field. */
+#define EF_ARM_RELEXEC 0x00000001
+#define EF_ARM_HASENTRY 0x00000002
+#define EF_ARM_INTERWORK 0x00000004 /* GNU binutils 000413 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ARM ELF A08 */
+#define EF_ARM_APCS_26 0x00000008 /* GNU binutils 000413 */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ARM ELF B01 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* GNU binutils 000413 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ARM ELF B01 */
+#define EF_ARM_PIC 0x00000020
+#define EF_ARM_ALIGN8 0x00000040 /* 8-bit structure alignment. */
+#define EF_ARM_NEW_ABI 0x00000080
+#define EF_ARM_OLD_ABI 0x00000100
+#define EF_ARM_SOFT_FLOAT 0x00000200
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_EABIMASK 0xff000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define ELF32_MACHDEP_ID_CASES \
+ case EM_ARM: \
+ break;
+
+#define ELF64_MACHDEP_ID_CASES \
+ case EM_AARCH64: \
+ break;
+
+#define ELF64_MACHDEP_ID EM_AARCH64
+#define ELF32_MACHDEP_ID EM_ARM
+
+#define KERN_ELFSIZE 64
+#define ARCH_ELFSIZE 64 /* MD native binary size */
+
+/* Processor specific relocation types */
+
+#define R_AARCH64_NONE 0
+#define R_AARCH64_NONE2 256
+
+#define R_AARCH64_ABS64 257 /* S + A */
+#define R_AARCH64_ABS32 258 /* S + A */
+#define R_AARCH64_ABS16 259 /* S + A */
+#define R_AARCH64_PREL64 260 /* S + A - P */
+#define R_AARCH64_PREL32 261 /* S + A - P */
+#define R_AARCH64_PREL16 262 /* S + A - P */
+#define R_AARCH64_MOVW_UABS_G0 263 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G0_NC 264 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G1 265 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G1_NC 266 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G2 267 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G2_NC 268 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G3 269 /* S + A [bits 48..63] */
+#define R_AARCH64_MOVW_SABS_G0 270 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_SABS_G1 271 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_SABS_G2 272 /* S + A [bits 32..47] */
+#define R_AARCH64_LD_PREL_LO19 273 /* S + A - P */
+#define R_AARCH64_ADR_PREL_LO21 274 /* S + A - P */
+#define R_AARCH64_ADR_PREL_PG_HI21 275 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADD_ABS_LO12_NC 277 /* S + A */
+#define R_AARCH64_LDST8_ABS_LO12_NC 278 /* S + A */
+#define R_AARCH_TSTBR14 279 /* S + A - P */
+#define R_AARCH_CONDBR19 281 /* S + A - P */
+#define R_AARCH_JUMP26 282 /* S + A - P */
+#define R_AARCH_CALL26 283 /* S + A - P */
+#define R_AARCH_LDST16_ABS_LO12_NC 284 /* S + A */
+#define R_AARCH_LDST32_ABS_LO12_NC 285 /* S + A */
+#define R_AARCH_LDST64_ABS_LO12_NC 286 /* S + A */
+#define R_AARCH64_MOVW_PREL_G0 287 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G0_NC 288 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1 289 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1_NC 290 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2 291 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2_NC 292 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G3 293 /* S + A - P */
+
+#define R_AARCH64_LDST128_ABS_LO12_NC 299 /* S + A */
+#define R_AARCH64_MOVW_GOTOFF_G0 300 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1 302 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2 304 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G3 306 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_GOTREL64 307 /* S + A - GOT */
+#define R_AARCH64_GOTREL32 308 /* S + A - GOT */
+#define R_AARCH64_GOT_LD_PREL19 309 /* G(GDAT(S + A)) - P */
+#define R_AARCH64_LD64_GOTOFF_LO15 310 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_ADR_GOT_PAGE 311 /* Page(G(GDAT(S + A))) - Page(GOT) */
+#define R_AARCH64_LD64_GOT_LO12_NC 312 /* G(GDAT(S + A)) */
+#define R_AARCH64_LD64_GOTPAGE_LO15 313 /* G(GDAT(S + A)) - Page(GOT) */
+
+#define R_AARCH64_TLSGD_ADR_PREL21 512 /* G(GTLSIDX(S,A)) - P */
+#define R_AARCH64_TLSGD_ADR_PAGE21 513 /* Page(G(GTLSIDX(S,A))) - Page(P) */
+#define R_AARCH64_TLSGD_ADD_LO12_NC 514 /* G(GTLSIDX(S,A)) */
+#define R_AARCH64_TLSGD_MOVW_G1 515 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSGD_MOVW_G0_NV 516 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSLD_ADR_PREL21 517 /* G(GLDM(S,A)) - P */
+#define R_AARCH64_TLSLD_ADR_PAGE21 518 /* Page(G(GLDM(S))) - Page(P) */
+#define R_AARCH64_TLSLD_ADD_LO12_NC 519 /* G(GLDM(S)) */
+#define R_AARCH64_TLSLD_MOVW_G1 520 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_MOVW_G0_NC 521 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_LD_PREL21 522 /* G(GLDM(S)) - P */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 /* DTPREL(S+A) */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 /* Page(G(GTPREL(S+A))) - Page(P) */
+#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 /* G(GTPREL(S+A)) */
+#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 /* G(GTPREL(S+A)) - P */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1 545 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1_NC 546 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0 547 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0_NC 548 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_HI12 549 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12 550 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12_NC 551 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12 552 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12_NC 553 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12 554 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12_NC 555 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12 556 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12_NC 557 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12 558 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12_NC 559 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC_LD_PREL19 560 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PREL21 561 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PAGE21 562 /* Page(G(GTLSDESC(S+A))) - Page(P) */
+#define R_AARCH64_TLSDESC_LD64_LO12 563 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_ADD_LO12 564 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_OFF_G1 565 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_OFF_G0_NC 566 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_LDR 567 /* */
+#define R_AARCH64_TLSDESC_ADD 568 /* */
+#define R_AARCH64_TLSDESC_CALL 569 /* */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12 570 /* TPREL(S+A) */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC 571 /* TPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12 572 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC 572 /* DTPREL(S+A) */
+
+/* Dynamic Relocations */
+#define R_AARCH64_P32_COPY 180
+#define R_AARCH64_P32_GLOB_DAT 181 /* S + A */
+#define R_AARCH64_P32_JUMP_SLOT 182 /* S + A */
+#define R_AARCH64_P32_RELATIVE 183 /* Delta(S) + A */
+#define R_AARCH64_P32_TLS_DTPREL 184 /* DTPREL(S+A) */
+#define R_AARCH64_P32_TLS_DTPMOD 185 /* LBM(S) */
+#define R_AARCH64_P32_TLS_TPREL 186 /* TPREL(S+A) */
+#define R_AARCH64_P32_TLSDESC 187 /* TLSDESC(S+A) */
+#define R_AARCH64_P32_IRELATIVE 188 /* Indirect(Delta(S) + A) */
+
+#define R_AARCH64_COPY 1024
+#define R_AARCH64_GLOB_DAT 1025 /* S + A */
+#define R_AARCH64_JUMP_SLOT 1026 /* S + A */
+#define R_AARCH64_RELATIVE 1027 /* Delta(S) + A */
+#define R_AARCH64_TLS_DTPREL64 1028 /* DTPREL(S+A) */
+#define R_AARCH64_TLS_DTPMOD64 1029 /* LBM(S) */
+#define R_AARCH64_TLS_TPREL64 1030 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC 1031 /* TLSDESC(S+A) */
+#define R_AARCH64_IRELATIVE 1032 /* Indirect(Delta(S) + A) */
+
+#define R_TYPE(name) R_AARCH64_ ## name
+#define R_TLS_TYPE(name) R_AARCH64_ ## name ## 64
+
+/* Processor specific program header types */
+#define PT_AARCH64_ARCHEXT (PT_LOPROC + 0)
+#define PT_AARCH64_UNWIND (PT_LOPROC + 1)
+
+/* Processor specific section header flags */
+#define SHF_ENTRYSECT 0x10000000
+#define SHF_COMDEF 0x80000000
+
+#define SHT_AARCH64_ATTRIBUTES (SHT_LOPROC + 3)
+
+#ifdef _KERNEL
+#ifdef ELFSIZE
+#define ELF_MD_PROBE_FUNC ELFNAME2(aarch64_netbsd,probe)
+#endif
+
+struct exec_package;
+
+int aarch64_netbsd_elf64_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+int aarch64_netbsd_elf32_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+#endif
+
+#elif defined(__arm__)
+
+#include <arm/elf_machdep.h>
+
+#endif
+
+#endif /* _AARCH64_ELF_MACHDEP_H_ */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
index dc2afdeca8..8ddad5becf 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
@@ -8,7 +8,7 @@
*/
/*
- * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2020 embedded brains GmbH & Co. KG
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -6719,21 +6719,291 @@ static inline uint64_t _AArch64_Read_dbgauthstatus_el1( void )
#define AARCH64_DBGBCR_N_EL1_BT_GET( _reg ) \
( ( ( _reg ) >> 20 ) & 0xfU )
-static inline uint64_t _AArch64_Read_dbgbcr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgbcr0_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGBCR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGBCR0_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgbcr0_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGBCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGBCR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr15_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR15_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr15_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -6781,21 +7051,291 @@ static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
#define AARCH64_DBGBVR_N_EL1_RESS_14_4_GET( _reg ) \
( ( ( _reg ) >> 53 ) & 0x7ffULL )
-static inline uint64_t _AArch64_Read_dbgbvr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgbvr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr15_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGBVR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGBVR15_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgbvr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgbvr15_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGBVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGBVR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -7027,21 +7567,291 @@ static inline void _AArch64_Write_dbgvcr32_el2( uint64_t value )
#define AARCH64_DBGWCR_N_EL1_MASK_GET( _reg ) \
( ( ( _reg ) >> 24 ) & 0x1fU )
-static inline uint64_t _AArch64_Read_dbgwcr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgwcr0_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGWCR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGWCR0_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgwcr0_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGWCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGWCR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr15_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR15_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr15_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -7065,21 +7875,291 @@ static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
#define AARCH64_DBGWVR_N_EL1_RESS_14_4_GET( _reg ) \
( ( ( _reg ) >> 53 ) & 0x7ffULL )
-static inline uint64_t _AArch64_Read_dbgwvr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgwvr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr15_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGWVR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGWVR15_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgwvr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgwvr15_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGWVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGWVR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index ae7e2bdcba..aa4f90f1a8 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -43,6 +43,7 @@
#endif
#include <rtems/score/aarch64.h>
#include <libcpu/vectors.h>
+#include <limits.h>
/**
* @addtogroup RTEMSScoreCPUAArch64
@@ -101,7 +102,7 @@
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
-#define CPU_STACK_MINIMUM_SIZE (1024 * 10)
+#define CPU_STACK_MINIMUM_SIZE (1024 * 8)
/* This could be either 4 or 8, depending on the ABI in use.
* Could also use __LP64__ or __ILP32__ */
@@ -156,7 +157,14 @@
extern "C" {
#endif
+/*
+ This is to fix the following warning
+ ISO C does not support 'uint128_t' types
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef unsigned __int128 uint128_t;
+#pragma GCC diagnostic pop
typedef struct {
uint64_t register_x19;
@@ -252,7 +260,7 @@ static inline void AArch64_interrupt_flash( uint64_t isr_cookie )
#define _CPU_ISR_Flash( _isr_cookie ) \
AArch64_interrupt_flash( _isr_cookie )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
+static inline bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
{
return ( isr_cookie & AARCH64_PSTATE_I ) == 0;
}
@@ -285,6 +293,10 @@ void _CPU_Context_Initialize(
{
context->is_executing = is_executing;
}
+
+ RTEMS_NO_RETURN void _AArch64_Start_multitasking( Context_Control *heir );
+
+ #define _CPU_Start_multitasking( _heir ) _AArch64_Start_multitasking( _heir )
#endif
#define _CPU_Context_Restart_self( _the_context ) \
@@ -302,12 +314,6 @@ void _CPU_Initialize( void );
typedef void ( *CPU_ISR_handler )( void );
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-);
-
/**
* @brief CPU switch context.
*/
@@ -378,14 +384,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
void *_CPU_Thread_Idle_body( uintptr_t ignored );
typedef enum {
@@ -406,7 +404,7 @@ typedef enum {
AARCH64_EXCEPTION_LEL32_FIQ = 14,
AARCH64_EXCEPTION_LEL32_SERROR = 15,
MAX_EXCEPTIONS = 16,
- AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff
+ AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = INT_MAX
} AArch64_symbolic_exception_name;
#define VECTOR_POINTER_OFFSET 0x78
@@ -434,7 +432,15 @@ static inline void* AArch64_set_exception_handler(
*vector_address = handler;
/* return now-previous vector pointer */
- return (void*)current_vector_pointer;
+
+/*
+ * This was put in to fix the following warning:
+ * warning: ISO C forbids conversion of function pointer to object pointer type.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ return (void*)current_vector_pointer;
+#pragma GCC diagnostic pop
}
typedef struct {
@@ -524,6 +530,27 @@ typedef struct {
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
+RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame );
+
+RTEMS_NO_RETURN void
+_CPU_Exception_dispatch_and_resume( CPU_Exception_frame *frame );
+
+void _CPU_Exception_disable_thread_dispatch( void );
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame );
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame,
+ void *address );
+
+void _CPU_Exception_frame_make_resume_next_instruction(
+ CPU_Exception_frame *frame
+);
+
+void _AArch64_Exception_frame_copy(
+ CPU_Exception_frame *new_ef,
+ CPU_Exception_frame *old_ef
+);
+
void _AArch64_Exception_default( CPU_Exception_frame *frame );
/** Type that can store a 32-bit integer or a pointer. */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
deleted file mode 100644
index ed8091d73c..0000000000
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuatomic.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-
-/**
- * @file
- *
- * @ingroup RTEMSScoreCPU
- *
- * @brief AArch64 Atomics support
- */
-
-/*
- * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
- * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
-#define _RTEMS_SCORE_ATOMIC_CPU_H
-
-#include <rtems/score/cpustdatomic.h>
-
-#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index 90fd48ad4e..8a0e476899 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -50,8 +50,11 @@
*/
#define CPU_PER_CPU_CONTROL_SIZE 0
+
#define CPU_INTERRUPT_FRAME_SIZE 0x2E0
+#define CPU_THREAD_LOCAL_STORAGE_VARIANT 11
+
#ifndef ASM
#ifdef __cplusplus
@@ -125,20 +128,59 @@ typedef struct {
uint64_t register_fpcr;
} CPU_Interrupt_frame;
+#ifdef RTEMS_SMP
+
+static inline
+struct Per_CPU_Control *_AARCH64_Get_current_per_CPU_control( void )
+{
+ struct Per_CPU_Control *cpu_self;
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ /* Use EL1 Thread ID Register (TPIDR_EL1) */
+ cpu_self = (struct Per_CPU_Control *)(uintptr_t)value;
+
+ return cpu_self;
+}
+
+#define _CPU_Get_current_per_CPU_control() \
+ _AARCH64_Get_current_per_CPU_control()
+
+#endif /* RTEMS_SMP */
+
void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".inst 0x0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ __asm__ volatile (
+ "msr TPIDR_EL0, %0" : : "r" ( context->thread_id ) : "memory"
+ );
+}
+
+static inline void *_CPU_Get_TLS_thread_pointer(
+ const Context_Control *context
+)
+{
+ return (void *)(uintptr_t) context->thread_id;
+}
+
#ifdef __cplusplus
}
#endif