summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/cpu/aarch64/cpu.c25
-rw-r--r--cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h1
-rw-r--r--cpukit/score/cpu/aarch64/include/machine/elf_machdep.h256
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h36
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h13
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h17
-rw-r--r--cpukit/score/cpu/bfin/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/i386/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h34
-rw-r--r--cpukit/score/cpu/i386/include/rtems/score/i386.h4
-rw-r--r--cpukit/score/cpu/lm32/include/rtems/score/cpu.h12
-rw-r--r--cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/m68k/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h19
-rw-r--r--cpukit/score/cpu/microblaze/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/mips/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/moxie/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/nios2/include/rtems/score/cpu.h8
-rw-r--r--cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h16
-rw-r--r--cpukit/score/cpu/nios2/nios2-iic-irq.c2
-rw-r--r--cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h41
-rw-r--r--cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h21
-rw-r--r--cpukit/score/cpu/or1k/include/rtems/score/cpu.h12
-rw-r--r--cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h11
-rw-r--r--cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h10
-rw-r--r--cpukit/score/cpu/powerpc/cpu.c5
-rw-r--r--cpukit/score/cpu/powerpc/include/rtems/score/cpu.h35
-rw-r--r--cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h20
-rw-r--r--cpukit/score/cpu/powerpc/ppc-context-validate.S77
-rw-r--r--cpukit/score/cpu/riscv/include/rtems/score/cpu.h14
-rw-r--r--cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h18
-rw-r--r--cpukit/score/cpu/sh/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/sparc/include/libcpu/byteorder.h16
-rw-r--r--cpukit/score/cpu/sparc/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h16
-rw-r--r--cpukit/score/cpu/sparc64/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/v850/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/cpu/x86_64/include/rtems/score/cpu.h15
-rw-r--r--cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h20
-rw-r--r--cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h15
-rw-r--r--cpukit/score/src/gcovdumpinfo.c96
-rw-r--r--cpukit/score/src/gcovdumpinfobase64.c104
-rw-r--r--cpukit/score/src/gcovinfoset.c43
-rw-r--r--cpukit/score/src/memorydirtyfreeareas.c9
-rw-r--r--cpukit/score/src/memorynoinit.c45
-rw-r--r--cpukit/score/src/memoryzerofreeareas.c9
-rw-r--r--cpukit/score/src/mpci.c2
-rw-r--r--cpukit/score/src/objectactivecount.c18
-rw-r--r--cpukit/score/src/objectfree.c12
-rw-r--r--cpukit/score/src/percpudata.c9
-rw-r--r--cpukit/score/src/profilingisrentryexit.c5
-rw-r--r--cpukit/score/src/scheduleredfblock.c4
-rw-r--r--cpukit/score/src/scheduleredfchangepriority.c5
-rw-r--r--cpukit/score/src/scheduleredfschedule.c5
-rw-r--r--cpukit/score/src/scheduleredfunblock.c20
-rw-r--r--cpukit/score/src/scheduleredfyield.c2
-rw-r--r--cpukit/score/src/schedulerpriorityblock.c4
-rw-r--r--cpukit/score/src/schedulerprioritychangepriority.c5
-rw-r--r--cpukit/score/src/schedulerpriorityschedule.c5
-rw-r--r--cpukit/score/src/schedulerpriorityunblock.c16
-rw-r--r--cpukit/score/src/schedulerpriorityyield.c5
-rw-r--r--cpukit/score/src/schedulersimpleblock.c4
-rw-r--r--cpukit/score/src/schedulersimplechangepriority.c5
-rw-r--r--cpukit/score/src/schedulersimpleschedule.c5
-rw-r--r--cpukit/score/src/schedulersimpleunblock.c20
-rw-r--r--cpukit/score/src/schedulersimpleyield.c5
-rw-r--r--cpukit/score/src/threadchangepriority.c2
-rw-r--r--cpukit/score/src/threadcreateidle.c11
-rw-r--r--cpukit/score/src/threadinitialize.c1
-rw-r--r--cpukit/score/src/threadrestart.c128
78 files changed, 1135 insertions, 488 deletions
diff --git a/cpukit/score/cpu/aarch64/cpu.c b/cpukit/score/cpu/aarch64/cpu.c
index f0062adf30..923f53da08 100644
--- a/cpukit/score/cpu/aarch64/cpu.c
+++ b/cpukit/score/cpu/aarch64/cpu.c
@@ -174,31 +174,6 @@ uint32_t _CPU_ISR_Get_level( void )
return ( level & AARCH64_PSTATE_I ) != 0;
}
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-)
-{
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Warray-bounds"
- /* Redirection table starts at the end of the vector table */
- CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4);
-
- CPU_ISR_handler current_handler = table [vector];
-
- /* The current handler is now the old one */
- if (old_handler != NULL) {
- *old_handler = current_handler;
- }
-
- /* Write only if necessary to avoid writes to a maybe read-only memory */
- if (current_handler != new_handler) {
- table [vector] = new_handler;
- }
-#pragma GCC diagnostic pop
-}
-
void _CPU_Initialize( void )
{
/* Do nothing */
diff --git a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
index 6b6296bb7a..0d65004f88 100644
--- a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
+++ b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
@@ -60,7 +60,6 @@ extern "C" {
#define MMU_PAGE_BITS 12
#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
#define MMU_BITS_PER_LEVEL 9
-#define MMU_TOP_LEVEL_PAGE_BITS ( 2 * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS )
#define AARCH64_MMU_FLAGS_BASE \
( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
diff --git a/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
new file mode 100644
index 0000000000..c1d219d715
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
@@ -0,0 +1,256 @@
+/* $NetBSD: elf_machdep.h,v 1.4 2018/10/12 01:28:58 ryo Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_ELF_MACHDEP_H_
+#define _AARCH64_ELF_MACHDEP_H_
+
+#ifdef __aarch64__
+
+#if defined(__AARCH64EB__)
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2MSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2MSB
+#else
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB
+#endif
+
+/* Processor specific flags for the ELF header e_flags field. */
+#define EF_ARM_RELEXEC 0x00000001
+#define EF_ARM_HASENTRY 0x00000002
+#define EF_ARM_INTERWORK 0x00000004 /* GNU binutils 000413 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ARM ELF A08 */
+#define EF_ARM_APCS_26 0x00000008 /* GNU binutils 000413 */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ARM ELF B01 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* GNU binutils 000413 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ARM ELF B01 */
+#define EF_ARM_PIC 0x00000020
+#define EF_ARM_ALIGN8 0x00000040 /* 8-bit structure alignment. */
+#define EF_ARM_NEW_ABI 0x00000080
+#define EF_ARM_OLD_ABI 0x00000100
+#define EF_ARM_SOFT_FLOAT 0x00000200
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_EABIMASK 0xff000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define ELF32_MACHDEP_ID_CASES \
+ case EM_ARM: \
+ break;
+
+#define ELF64_MACHDEP_ID_CASES \
+ case EM_AARCH64: \
+ break;
+
+#define ELF64_MACHDEP_ID EM_AARCH64
+#define ELF32_MACHDEP_ID EM_ARM
+
+#define KERN_ELFSIZE 64
+#define ARCH_ELFSIZE 64 /* MD native binary size */
+
+/* Processor specific relocation types */
+
+#define R_AARCH64_NONE 0
+#define R_AARCH64_NONE2 256
+
+#define R_AARCH64_ABS64 257 /* S + A */
+#define R_AARCH64_ABS32 258 /* S + A */
+#define R_AARCH64_ABS16 259 /* S + A */
+#define R_AARCH64_PREL64 260 /* S + A - P */
+#define R_AARCH64_PREL32 261 /* S + A - P */
+#define R_AARCH64_PREL16 262 /* S + A - P */
+#define R_AARCH64_MOVW_UABS_G0 263 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G0_NC 264 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G1 265 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G1_NC 266 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G2 267 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G2_NC 268 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G3 269 /* S + A [bits 48..63] */
+#define R_AARCH64_MOVW_SABS_G0 270 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_SABS_G1 271 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_SABS_G2 272 /* S + A [bits 32..47] */
+#define R_AARCH64_LD_PREL_LO19 273 /* S + A - P */
+#define R_AARCH64_ADR_PREL_LO21 274 /* S + A - P */
+#define R_AARCH64_ADR_PREL_PG_HI21 275 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADD_ABS_LO12_NC 277 /* S + A */
+#define R_AARCH64_LDST8_ABS_LO12_NC 278 /* S + A */
+#define R_AARCH_TSTBR14 279 /* S + A - P */
+#define R_AARCH_CONDBR19 281 /* S + A - P */
+#define R_AARCH_JUMP26 282 /* S + A - P */
+#define R_AARCH_CALL26 283 /* S + A - P */
+#define R_AARCH_LDST16_ABS_LO12_NC 284 /* S + A */
+#define R_AARCH_LDST32_ABS_LO12_NC 285 /* S + A */
+#define R_AARCH_LDST64_ABS_LO12_NC 286 /* S + A */
+#define R_AARCH64_MOVW_PREL_G0 287 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G0_NC 288 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1 289 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1_NC 290 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2 291 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2_NC 292 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G3 293 /* S + A - P */
+
+#define R_AARCH64_LDST128_ABS_LO12_NC 299 /* S + A */
+#define R_AARCH64_MOVW_GOTOFF_G0 300 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1 302 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2 304 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G3 306 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_GOTREL64 307 /* S + A - GOT */
+#define R_AARCH64_GOTREL32 308 /* S + A - GOT */
+#define R_AARCH64_GOT_LD_PREL19 309 /* G(GDAT(S + A)) - P */
+#define R_AARCH64_LD64_GOTOFF_LO15 310 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_ADR_GOT_PAGE 311 /* Page(G(GDAT(S + A))) - Page(GOT) */
+#define R_AARCH64_LD64_GOT_LO12_NC 312 /* G(GDAT(S + A)) */
+#define R_AARCH64_LD64_GOTPAGE_LO15 313 /* G(GDAT(S + A)) - Page(GOT) */
+
+#define R_AARCH64_TLSGD_ADR_PREL21 512 /* G(GTLSIDX(S,A)) - P */
+#define R_AARCH64_TLSGD_ADR_PAGE21 513 /* Page(G(GTLSIDX(S,A))) - Page(P) */
+#define R_AARCH64_TLSGD_ADD_LO12_NC 514 /* G(GTLSIDX(S,A)) */
+#define R_AARCH64_TLSGD_MOVW_G1 515 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSGD_MOVW_G0_NV 516 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSLD_ADR_PREL21 517 /* G(GLDM(S,A)) - P */
+#define R_AARCH64_TLSLD_ADR_PAGE21 518 /* Page(G(GLDM(S))) - Page(P) */
+#define R_AARCH64_TLSLD_ADD_LO12_NC 519 /* G(GLDM(S)) */
+#define R_AARCH64_TLSLD_MOVW_G1 520 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_MOVW_G0_NC 521 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_LD_PREL21 522 /* G(GLDM(S)) - P */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 /* DTPREL(S+A) */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 /* Page(G(GTPREL(S+A))) - Page(P) */
+#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 /* G(GTPREL(S+A)) */
+#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 /* G(GTPREL(S+A)) - P */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1 545 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1_NC 546 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0 547 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0_NC 548 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_HI12 549 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12 550 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12_NC 551 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12 552 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12_NC 553 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12 554 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12_NC 555 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12 556 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12_NC 557 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12 558 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12_NC 559 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC_LD_PREL19 560 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PREL21 561 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PAGE21 562 /* Page(G(GTLSDESC(S+A))) - Page(P) */
+#define R_AARCH64_TLSDESC_LD64_LO12 563 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_ADD_LO12 564 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_OFF_G1 565 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_OFF_G0_NC 566 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_LDR 567 /* */
+#define R_AARCH64_TLSDESC_ADD 568 /* */
+#define R_AARCH64_TLSDESC_CALL 569 /* */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12 570 /* TPREL(S+A) */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC 571 /* TPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12 572 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC 572 /* DTPREL(S+A) */
+
+/* Dynamic Relocations */
+#define R_AARCH64_P32_COPY 180
+#define R_AARCH64_P32_GLOB_DAT 181 /* S + A */
+#define R_AARCH64_P32_JUMP_SLOT 182 /* S + A */
+#define R_AARCH64_P32_RELATIVE 183 /* Delta(S) + A */
+#define R_AARCH64_P32_TLS_DTPREL 184 /* DTPREL(S+A) */
+#define R_AARCH64_P32_TLS_DTPMOD 185 /* LBM(S) */
+#define R_AARCH64_P32_TLS_TPREL 186 /* TPREL(S+A) */
+#define R_AARCH64_P32_TLSDESC 187 /* TLSDESC(S+A) */
+#define R_AARCH64_P32_IRELATIVE 188 /* Indirect(Delta(S) + A) */
+
+#define R_AARCH64_COPY 1024
+#define R_AARCH64_GLOB_DAT 1025 /* S + A */
+#define R_AARCH64_JUMP_SLOT 1026 /* S + A */
+#define R_AARCH64_RELATIVE 1027 /* Delta(S) + A */
+#define R_AARCH64_TLS_DTPREL64 1028 /* DTPREL(S+A) */
+#define R_AARCH64_TLS_DTPMOD64 1029 /* LBM(S) */
+#define R_AARCH64_TLS_TPREL64 1030 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC 1031 /* TLSDESC(S+A) */
+#define R_AARCH64_IRELATIVE 1032 /* Indirect(Delta(S) + A) */
+
+#define R_TYPE(name) R_AARCH64_ ## name
+#define R_TLS_TYPE(name) R_AARCH64_ ## name ## 64
+
+/* Processor specific program header types */
+#define PT_AARCH64_ARCHEXT (PT_LOPROC + 0)
+#define PT_AARCH64_UNWIND (PT_LOPROC + 1)
+
+/* Processor specific section header flags */
+#define SHF_ENTRYSECT 0x10000000
+#define SHF_COMDEF 0x80000000
+
+#define SHT_AARCH64_ATTRIBUTES (SHT_LOPROC + 3)
+
+#ifdef _KERNEL
+#ifdef ELFSIZE
+#define ELF_MD_PROBE_FUNC ELFNAME2(aarch64_netbsd,probe)
+#endif
+
+struct exec_package;
+
+int aarch64_netbsd_elf64_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+int aarch64_netbsd_elf32_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+#endif
+
+#elif defined(__arm__)
+
+#include <arm/elf_machdep.h>
+
+#endif
+
+#endif /* _AARCH64_ELF_MACHDEP_H_ */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index fdc0e3d929..399e0fc802 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -43,6 +43,7 @@
#endif
#include <rtems/score/aarch64.h>
#include <libcpu/vectors.h>
+#include <limits.h>
/**
* @addtogroup RTEMSScoreCPUAArch64
@@ -156,7 +157,14 @@
extern "C" {
#endif
+/*
+ This is to fix the following warning
+ ISO C does not support 'uint128_t' types
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef unsigned __int128 uint128_t;
+#pragma GCC diagnostic pop
typedef struct {
uint64_t register_x19;
@@ -252,7 +260,7 @@ static inline void AArch64_interrupt_flash( uint64_t isr_cookie )
#define _CPU_ISR_Flash( _isr_cookie ) \
AArch64_interrupt_flash( _isr_cookie )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
+static inline bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
{
return ( isr_cookie & AARCH64_PSTATE_I ) == 0;
}
@@ -306,12 +314,6 @@ void _CPU_Initialize( void );
typedef void ( *CPU_ISR_handler )( void );
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-);
-
/**
* @brief CPU switch context.
*/
@@ -382,14 +384,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
void *_CPU_Thread_Idle_body( uintptr_t ignored );
typedef enum {
@@ -410,7 +404,7 @@ typedef enum {
AARCH64_EXCEPTION_LEL32_FIQ = 14,
AARCH64_EXCEPTION_LEL32_SERROR = 15,
MAX_EXCEPTIONS = 16,
- AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff
+ AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = INT_MAX
} AArch64_symbolic_exception_name;
#define VECTOR_POINTER_OFFSET 0x78
@@ -438,7 +432,15 @@ static inline void* AArch64_set_exception_handler(
*vector_address = handler;
/* return now-previous vector pointer */
- return (void*)current_vector_pointer;
+
+/*
+ * This was put in to fix the following warning:
+ * warning: ISO C forbids conversion of function pointer to object pointer type.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ return (void*)current_vector_pointer;
+#pragma GCC diagnostic pop
}
typedef struct {
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index ffdef2f30a..880ae7d9f7 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -152,16 +152,25 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".inst 0x0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ __asm__ volatile (
+ "msr TPIDR_EL0, %0" : : "r" ( context->thread_id ) : "memory"
+ );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu.h b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
index 7ac180ac26..beb917a0b7 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
@@ -402,7 +402,7 @@ static inline void arm_interrupt_flash( uint32_t level )
#define _CPU_ISR_Flash( _isr_cookie ) \
arm_interrupt_flash( _isr_cookie )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
return ( level & 0x80 ) == 0;
@@ -574,14 +574,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
void *_CPU_Thread_Idle_body( uintptr_t ignored );
#if defined(ARM_MULTILIB_ARCH_V4)
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
index 0ce347c86f..6e8230ce30 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
@@ -150,16 +150,29 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( "udf" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
+ __asm__ volatile (
+ "mcr p15, 0, %0, c13, c0, 3" : : "r" ( context->thread_id ) : "memory"
+ );
+#else
+ (void) context;
+#endif
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpu.h b/cpukit/score/cpu/bfin/include/rtems/score/cpu.h
index 91b1a91b07..72e6d14433 100644
--- a/cpukit/score/cpu/bfin/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/bfin/include/rtems/score/cpu.h
@@ -355,7 +355,7 @@ typedef struct {
: : "d"(_level) : "R0" ); \
}
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return level != 0;
}
@@ -605,14 +605,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h
index 1485abd365..a03bc596ba 100644
--- a/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h
@@ -37,28 +37,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpu.h b/cpukit/score/cpu/i386/include/rtems/score/cpu.h
index 1de9b7544c..6aa97d309e 100644
--- a/cpukit/score/cpu/i386/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/i386/include/rtems/score/cpu.h
@@ -428,7 +428,7 @@ extern Context_Control_fp _CPU_Null_fp_context;
#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
#endif
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & EFLAGS_INTR_ENABLE ) != 0;
}
@@ -651,14 +651,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/**@}**/
/** Type that can store a 32-bit integer or a pointer. */
diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h
index 31ec0ac8bb..4f99f64711 100644
--- a/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h
@@ -58,28 +58,54 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ uint32_t tmp;
+ uint32_t cpu_index;
+
+#ifdef RTEMS_SMP
+ cpu_index = _CPU_SMP_Get_current_processor();
+#else
+ cpu_index = 0;
+#endif
+
+ __asm__ volatile (
+ "movl " RTEMS_XSTRING( I386_CONTEXT_CONTROL_GS_0_OFFSET ) "(%2), %0\n"
+ "movl %0, _Global_descriptor_table+24(,%1,8)\n"
+ "movl " RTEMS_XSTRING( I386_CONTEXT_CONTROL_GS_1_OFFSET ) "(%2), %0\n"
+ "movl %0, _Global_descriptor_table+28(,%1,8)\n"
+ "leal 24(,%1,8), %0\n"
+ "movl %0, %%gs\n"
+ : "=&r" ( tmp )
+ : "r" ( cpu_index ), "r" ( context )
+ : "memory"
+ );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/i386/include/rtems/score/i386.h b/cpukit/score/cpu/i386/include/rtems/score/i386.h
index 6b0ae5d6c2..7598204a64 100644
--- a/cpukit/score/cpu/i386/include/rtems/score/i386.h
+++ b/cpukit/score/cpu/i386/include/rtems/score/i386.h
@@ -223,7 +223,7 @@ void *i386_Physical_to_logical(
* @param[in] offset used with \p segment to compute physical address
* @retval physical address
*/
-RTEMS_INLINE_ROUTINE void *i386_Real_to_physical(
+static inline void *i386_Real_to_physical(
uint16_t segment,
uint16_t offset)
{
@@ -577,7 +577,7 @@ extern segment_descriptors* i386_get_gdt_entry (uint16_t sgmnt_selector);
* @param[in] gdt_entry pointer to entry from which base should be retrieved
* @retval base address from GDT entry
*/
-RTEMS_INLINE_ROUTINE void* i386_base_gdt_entry (segment_descriptors* gdt_entry)
+static inline void* i386_base_gdt_entry (segment_descriptors* gdt_entry)
{
return (void*)(gdt_entry->base_address_15_0 |
(gdt_entry->base_address_23_16<<16) |
diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpu.h b/cpukit/score/cpu/lm32/include/rtems/score/cpu.h
index 9bceb3b892..335d3407fe 100644
--- a/cpukit/score/cpu/lm32/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/lm32/include/rtems/score/cpu.h
@@ -456,7 +456,7 @@ extern Context_Control_fp _CPU_Null_fp_context;
#define _CPU_ISR_Flash( _isr_cookie ) \
lm32_flash_interrupts( _isr_cookie );
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & 0x0001 ) != 0;
}
@@ -612,7 +612,7 @@ void _CPU_Initialize(void);
typedef void ( *CPU_ISR_raw_handler )( void );
-RTEMS_INLINE_ROUTINE void _CPU_ISR_install_raw_handler(
+static inline void _CPU_ISR_install_raw_handler(
uint32_t vector,
CPU_ISR_raw_handler new_handler,
CPU_ISR_raw_handler *old_handler
@@ -745,14 +745,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h
index eb0c058723..dce0cc6017 100644
--- a/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h
@@ -36,28 +36,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpu.h b/cpukit/score/cpu/m68k/include/rtems/score/cpu.h
index e72b735501..8fdcb8c15a 100644
--- a/cpukit/score/cpu/m68k/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/m68k/include/rtems/score/cpu.h
@@ -384,7 +384,7 @@ extern void* _VBR;
#define _CPU_ISR_Flash( _level ) \
m68k_flash_interrupts( _level )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & 0x0700 ) == 0;
}
@@ -621,14 +621,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
#if (M68K_HAS_FPSP_PACKAGE == 1)
/*
* Hooks for the Floating Point Support Package (FPSP) provided by Motorola
diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h
index e3b61efd9f..b94c846bf2 100644
--- a/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h
@@ -56,28 +56,39 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( "illegal" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ /*
+ * There is nothing to do since the thread-local storage area is obtained by
+ * calling __m68k_read_tp().
+ */
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h b/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h
index a486c3d2b3..1325962f7c 100644
--- a/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/microblaze/include/rtems/score/cpu.h
@@ -264,7 +264,7 @@ void _CPU_ISR_Set_level( uint32_t level );
uint32_t _CPU_ISR_Get_level( void );
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & MICROBLAZE_MSR_IE ) != 0;
}
@@ -450,14 +450,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
void *_CPU_Thread_Idle_body( uintptr_t ignored );
void bsp_interrupt_dispatch( uint32_t source );
diff --git a/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h
index 0573759d52..0b9e06cfa8 100644
--- a/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h
@@ -76,16 +76,27 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0x0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ /*
+ * There is nothing to do since the thread-local storage area is obtained by
+ * calling __tls_get_addr().
+ */
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/mips/include/rtems/score/cpu.h b/cpukit/score/cpu/mips/include/rtems/score/cpu.h
index fdb7ee4d73..447a384c88 100644
--- a/cpukit/score/cpu/mips/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/mips/include/rtems/score/cpu.h
@@ -602,7 +602,7 @@ uint32_t mips_interrupt_mask( void );
_xlevel = _scratch2; \
} while(0)
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & SR_INTERRUPT_ENABLE_BITS ) != 0;
}
@@ -835,14 +835,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h
index 0568134351..cb4c925630 100644
--- a/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h
@@ -56,28 +56,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word -1" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpu.h b/cpukit/score/cpu/moxie/include/rtems/score/cpu.h
index 18329a5b3d..b94d47ab31 100644
--- a/cpukit/score/cpu/moxie/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/moxie/include/rtems/score/cpu.h
@@ -317,7 +317,7 @@ typedef struct {
_CPU_ISR_Disable( _isr_cookie ); \
} while (0)
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return true;
}
@@ -532,14 +532,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h
index 038a1326cc..9c6ae11d74 100644
--- a/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h
@@ -56,28 +56,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpu.h b/cpukit/score/cpu/nios2/include/rtems/score/cpu.h
index d7c484f9bc..81f454fd09 100644
--- a/cpukit/score/cpu/nios2/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/nios2/include/rtems/score/cpu.h
@@ -353,14 +353,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h
index 215df68f67..56c2cb0108 100644
--- a/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h
@@ -60,16 +60,28 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ register uint32_t r23 __asm__( "r23" );
+
+ r23 = context->r23;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( r23 ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/nios2/nios2-iic-irq.c b/cpukit/score/cpu/nios2/nios2-iic-irq.c
index 561ba3865e..8215e16a4c 100644
--- a/cpukit/score/cpu/nios2/nios2-iic-irq.c
+++ b/cpukit/score/cpu/nios2/nios2-iic-irq.c
@@ -60,7 +60,7 @@ void __Exception_Handler(CPU_Exception_frame *efr);
register unsigned long *stack_ptr __asm__ ("sp");
-RTEMS_INLINE_ROUTINE void __IIC_Handler(void)
+static inline void __IIC_Handler(void)
{
uint32_t active;
uint32_t mask;
diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
index 5403cc8a10..962fc486fc 100644
--- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
@@ -695,7 +695,7 @@ extern Context_Control_fp _CPU_Null_fp_context;
* @retval true Interrupts are enabled in the ISR level.
* @retval false Otherwise.
*/
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return false;
}
@@ -1293,44 +1293,27 @@ static inline uint32_t CPU_swap_u32(
typedef uint32_t CPU_Counter_ticks;
/**
- * @brief Returns the current CPU counter frequency in Hz.
+ * @brief Gets the current CPU counter frequency in Hz.
*
- * @return The current CPU counter frequency in Hz.
+ * @return Returns the current CPU counter frequency in Hz.
*/
uint32_t _CPU_Counter_frequency( void );
/**
- * @brief Returns the current CPU counter value.
+ * @brief Gets the current CPU counter value.
*
- * A CPU counter is some free-running counter. It ticks usually with a
- * frequency close to the CPU or system bus clock. The board support package
- * must ensure that this function works before the RTEMS initialization.
- * Otherwise invalid profiling statistics will be gathered.
+ * A CPU counter should be some monotonically increasing free-running counter.
+ * It ticks usually with a frequency close to the CPU or system bus clock. The
+ * counter should not be affected by power saving states so that it can be used
+ * for timestamps. The CPU counter should be initialized at the
+ * RTEMS_SYSINIT_CPU_COUNTER initialization step if necessary. If
+ * RTEMS_PROFILING is enabled, the CPU counter may have to work very early in
+ * the system initialization to avoid invalid profiling statistics.
*
- * @return The current CPU counter value.
+ * @return Returns the current CPU counter value.
*/
CPU_Counter_ticks _CPU_Counter_read( void );
-/**
- * @brief Returns the difference between the second and first CPU counter
- * value.
- *
- * This operation may be carried out as a modulo operation depending on the
- * range of the CPU counter device.
- *
- * @param[in] second The second CPU counter value.
- * @param[in] first The first CPU counter value.
- *
- * @return Returns second minus first modulo counter period.
- */
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
#ifdef RTEMS_SMP
/**
* @brief Performs CPU specific SMP initialization in the context of the boot
diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h
index 6f4abfcfc3..d5082383e8 100644
--- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h
@@ -151,7 +151,7 @@ void _CPU_Context_validate( uintptr_t pattern );
*
* This function is used only in test sptests/spfatal26.
*/
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
@@ -161,11 +161,28 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
*
* This function is used only in test sptests/spcache01.
*/
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+/**
+ * @brief Uses the thread-local storage area of the context.
+ *
+ * Some architectures may use dedicated registers to reference the thread-local
+ * storage area of the associated thread. This function should set these
+ * registers to the values defined by the specified processor context.
+ *
+ * @param context is the processor context defining the thread-local storage
+ * area to use.
+ */
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpu.h b/cpukit/score/cpu/or1k/include/rtems/score/cpu.h
index 83d46010ba..ce1aa301b3 100644
--- a/cpukit/score/cpu/or1k/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/or1k/include/rtems/score/cpu.h
@@ -316,7 +316,7 @@ static inline void or1k_interrupt_enable(uint32_t level)
_OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \
} while(0)
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & CPU_OR1K_SPR_SR ) != 0;
}
@@ -468,7 +468,7 @@ void _CPU_ISR_install_raw_handler(
typedef void ( *CPU_ISR_handler )( uint32_t );
-RTEMS_INLINE_ROUTINE void _CPU_ISR_install_vector(
+static inline void _CPU_ISR_install_vector(
uint32_t vector,
CPU_ISR_handler new_handler,
CPU_ISR_handler *old_handler
@@ -578,14 +578,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h
index 37cd1db436..352f14589a 100644
--- a/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h
@@ -60,16 +60,23 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "l.nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h b/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h
index c7618c9355..58db24fbbd 100644
--- a/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h
+++ b/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h
@@ -345,7 +345,7 @@ static inline uint32_t _OR1K_mfspr(uint32_t reg)
{
uint32_t spr_value;
- asm volatile (
+ __asm__ volatile (
"l.mfspr %0, %1, 0;\n\t"
: "=r" (spr_value) : "r" (reg));
@@ -354,7 +354,7 @@ static inline uint32_t _OR1K_mfspr(uint32_t reg)
static inline void _OR1K_mtspr(uint32_t reg, uint32_t value)
{
- asm volatile (
+ __asm__ volatile (
"l.mtspr %1, %0, 0;\n\t"
:: "r" (value), "r" (reg)
);
@@ -386,12 +386,12 @@ static inline void _OR1K_mtspr(uint32_t reg, uint32_t value)
static inline void _OR1K_Sync_mem( void )
{
- asm volatile("l.msync");
+ __asm__ volatile("l.msync");
}
static inline void _OR1K_Sync_pipeline( void )
{
- asm volatile("l.psync");
+ __asm__ volatile("l.psync");
}
/**
@@ -402,7 +402,7 @@ static inline void _OR1K_Sync_pipeline( void )
*
*/
#define _OR1KSIM_CPU_Halt() \
- asm volatile ("l.nop 0xc")
+ __asm__ volatile ("l.nop 0xc")
#ifdef __cplusplus
}
diff --git a/cpukit/score/cpu/powerpc/cpu.c b/cpukit/score/cpu/powerpc/cpu.c
index 6147d7be74..bdb9cf6ab5 100644
--- a/cpukit/score/cpu/powerpc/cpu.c
+++ b/cpukit/score/cpu/powerpc/cpu.c
@@ -79,8 +79,10 @@ PPC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE);
#endif
#ifdef PPC_MULTILIB_ALTIVEC
+ PPC_ASSERT_OFFSET(vrsave, VRSAVE);
+ PPC_ASSERT_OFFSET(vscr, VSCR);
RTEMS_STATIC_ASSERT(
- PPC_CONTEXT_OFFSET_V20 % 16 == 0,
+ PPC_CONTEXT_OFFSET_V20 % PPC_DEFAULT_CACHE_LINE_SIZE == 0,
ppc_context_altivec
);
PPC_ASSERT_OFFSET(v20, V20);
@@ -95,7 +97,6 @@ PPC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE);
PPC_ASSERT_OFFSET(v29, V29);
PPC_ASSERT_OFFSET(v30, V30);
PPC_ASSERT_OFFSET(v31, V31);
- PPC_ASSERT_OFFSET(vrsave, VRSAVE);
#endif
#ifdef PPC_MULTILIB_FPU
diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
index 42900aeb1d..84f0bf3f65 100644
--- a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
@@ -29,7 +29,7 @@
*
* Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
*
- * Copyright (c) 2010, 2017 embedded brains GmbH.
+ * Copyright (c) 2010, 2020 embedded brains GmbH.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -246,6 +246,13 @@ typedef struct {
uint32_t isr_dispatch_disable;
uint32_t reserved_for_alignment;
#if defined(PPC_MULTILIB_ALTIVEC)
+ #if !defined(__powerpc64__)
+ uint32_t reserved_for_alignment_2[4];
+ #endif
+ uint32_t vrsave;
+ uint32_t reserved_for_alignment_3[2];
+ /* This field must take stvewx/lvewx requirements into account */
+ uint32_t vscr;
uint8_t v20[16];
uint8_t v21[16];
uint8_t v22[16];
@@ -258,7 +265,6 @@ typedef struct {
uint8_t v29[16];
uint8_t v30[16];
uint8_t v31[16];
- uint32_t vrsave;
#elif defined(__ALTIVEC__)
/*
* 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
@@ -373,8 +379,16 @@ static inline ppc_context *ppc_get_context( const Context_Control *context )
#define PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE PPC_CONTEXT_GPR_OFFSET( 32 )
#ifdef PPC_MULTILIB_ALTIVEC
+ #ifdef __powerpc64__
+ #define PPC_CONTEXT_OFFSET_VRSAVE \
+ ( PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 )
+ #else
+ #define PPC_CONTEXT_OFFSET_VRSAVE \
+ ( PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 24 )
+ #endif
+ #define PPC_CONTEXT_OFFSET_VSCR ( PPC_CONTEXT_OFFSET_VRSAVE + 12 )
#define PPC_CONTEXT_OFFSET_V( v ) \
- ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8)
+ ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_VRSAVE + 16)
#define PPC_CONTEXT_OFFSET_V20 PPC_CONTEXT_OFFSET_V( 20 )
#define PPC_CONTEXT_OFFSET_V21 PPC_CONTEXT_OFFSET_V( 21 )
#define PPC_CONTEXT_OFFSET_V22 PPC_CONTEXT_OFFSET_V( 22 )
@@ -387,9 +401,8 @@ static inline ppc_context *ppc_get_context( const Context_Control *context )
#define PPC_CONTEXT_OFFSET_V29 PPC_CONTEXT_OFFSET_V( 29 )
#define PPC_CONTEXT_OFFSET_V30 PPC_CONTEXT_OFFSET_V( 30 )
#define PPC_CONTEXT_OFFSET_V31 PPC_CONTEXT_OFFSET_V( 31 )
- #define PPC_CONTEXT_OFFSET_VRSAVE PPC_CONTEXT_OFFSET_V( 32 )
#define PPC_CONTEXT_OFFSET_F( f ) \
- ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_VRSAVE + 8 )
+ ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_V( 32 ) )
#else
#define PPC_CONTEXT_OFFSET_F( f ) \
( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 )
@@ -419,7 +432,7 @@ static inline ppc_context *ppc_get_context( const Context_Control *context )
#if defined(PPC_MULTILIB_FPU)
#define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_F( 32 )
#elif defined(PPC_MULTILIB_ALTIVEC)
- #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_OFFSET_VRSAVE + 4)
+ #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_V( 33 )
#elif defined(__ALTIVEC__)
#define PPC_CONTEXT_VOLATILE_SIZE \
(PPC_CONTEXT_GPR_OFFSET( 32 ) + 8 \
@@ -595,7 +608,7 @@ typedef struct {
#ifndef ASM
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & MSR_EE ) != 0;
}
@@ -729,14 +742,6 @@ static inline CPU_Counter_ticks _CPU_Counter_read( void )
return value;
}
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
#endif /* ASM */
diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h
index cfed43ced4..c81675b53d 100644
--- a/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h
@@ -273,16 +273,32 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".long 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+#ifdef __powerpc64__
+ register uintptr_t tp __asm__( "13" );
+#else
+ register uintptr_t tp __asm__( "2" );
+#endif
+
+ tp = ppc_get_context( context )->tp;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( tp ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/powerpc/ppc-context-validate.S b/cpukit/score/cpu/powerpc/ppc-context-validate.S
index e4331b2661..67cb5b45c3 100644
--- a/cpukit/score/cpu/powerpc/ppc-context-validate.S
+++ b/cpukit/score/cpu/powerpc/ppc-context-validate.S
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013, 2020 embedded brains GmbH. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -99,6 +99,7 @@
#define VTMP_OFFSET VOFFSET(12)
#define VTMP2_OFFSET VOFFSET(13)
#define VRSAVE_OFFSET VOFFSET(14)
+ #define VRSAVE2_OFFSET (VOFFSET(14) + 4)
#define VSCR_OFFSET (VOFFSET(14) + 12)
#define ALTIVECEND VOFFSET(15)
#else
@@ -161,6 +162,13 @@ _CPU_Context_validate:
#endif
#ifdef PPC_MULTILIB_ALTIVEC
+ mfvrsave r0
+ stw r0, VRSAVE_OFFSET(r1)
+ li r0, 0xffffffff
+ mtvrsave r0
+ mfvscr v0
+ li r0, VSCR_OFFSET
+ stvewx v0, r1, r0
li r0, V20_OFFSET
stvx v20, r1, r0
li r0, V21_OFFSET
@@ -185,11 +193,6 @@ _CPU_Context_validate:
stvx v30, r1, r0
li r0, V31_OFFSET
stvx v31, r1, r0
- mfvscr v0
- li r0, VSCR_OFFSET
- stvewx v0, r1, r0
- mfvrsave r0
- stw r0, VRSAVE_OFFSET(r1)
#endif
/* Fill */
@@ -337,9 +340,11 @@ _CPU_Context_validate:
FILL_V 29
FILL_V 30
FILL_V 31
+#ifndef __PPC_VRSAVE__
addi r4, r3, 0x700
mtvrsave r4
#endif
+#endif
/* Check */
check:
@@ -516,6 +521,15 @@ check:
#ifdef PPC_MULTILIB_ALTIVEC
.macro CHECK_V i
+#ifdef __PPC_VRSAVE__
+ mfvrsave r4
+.if (31 - \i) > 15
+ andis. r5, r4, 1 << (31 - \i - 16)
+.else
+ andi. r5, r4, 1 << (31 - \i)
+.endif
+ beq 1f
+#endif
li r4, VTMP_OFFSET
stvx \i, r1, r4
lwz r5, VTMP_OFFSET(r1)
@@ -534,9 +548,43 @@ check:
addi r4, r3, 0x600 + \i
cmpw r5, r4
bne restore
+#ifdef __PPC_VRSAVE__
+ mfvrsave r4
+.if (31 - \i) > 15
+ xoris r4, r4, 1 << (31 - \i - 16)
+.else
+ xori r4, r4, 1 << (31 - \i)
+.endif
+ mtvrsave r4
+ b 2f
+1:
+.if (31 - \i) > 15
+ oris r4, r4, 1 << (31 - \i - 16)
+.else
+ ori r4, r4, 1 << (31 - \i)
+.endif
+ mtvrsave r4
+ addi r4, r3, 0x300 + \i
+ stw r4, VTMP_OFFSET(r1)
+ addi r4, r3, 0x400 + \i
+ stw r4, VTMP_OFFSET + 4(r1)
+ addi r4, r3, 0x500 + \i
+ stw r4, VTMP_OFFSET + 8(r1)
+ addi r4, r3, 0x600 + \i
+ stw r4, VTMP_OFFSET + 12(r1)
+ li r4, VTMP_OFFSET
+ lvx \i, r1, r4
+2:
+#endif
.endm
/* Check VSCR */
+#ifdef __PPC_VRSAVE__
+ mfvrsave r4
+ stw r4, VRSAVE2_OFFSET(r1)
+ oris r4, r4, 0x8000
+ mtvrsave r4
+#endif
li r4, VTMP_OFFSET
stvx v0, r1, r4
mfvscr v0
@@ -548,6 +596,10 @@ check:
bne restore
li r4, VTMP_OFFSET
lvx v0, r1, r4
+#ifdef __PPC_VRSAVE__
+ lwz r4, VRSAVE2_OFFSET(r1)
+ mtvrsave r4
+#endif
CHECK_V 0
CHECK_V 1
@@ -582,10 +634,16 @@ check:
CHECK_V 30
CHECK_V 31
mfvrsave r5
+#ifdef __PPC_VRSAVE__
+ addi r5, r5, 1
+ cmplwi r0, r5, 1
+ bgt restore
+#else
addi r4, r3, 0x700
cmpw r5, r4
bne restore
#endif
+#endif
mtcr r29
addi r5, r3, 1
@@ -595,7 +653,7 @@ check:
restore:
#ifdef PPC_MULTILIB_ALTIVEC
- lwz r0, VRSAVE_OFFSET(r1)
+ li r0, 0xffffffff
mtvrsave r0
li r0, V31_OFFSET
lvx v31, r1, r0
@@ -621,6 +679,11 @@ restore:
lvx v21, r1, r0
li r0, V20_OFFSET
lvx v20, r1, r0
+ li r0, VSCR_OFFSET
+ lvewx v0, r1, r0
+ mtvscr v0
+ lwz r0, VRSAVE_OFFSET(r1)
+ mtvrsave r0
#endif
#ifdef PPC_MULTILIB_FPU
diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
index 471c6c6c3e..88f7e7960c 100644
--- a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
@@ -186,12 +186,12 @@ static inline void riscv_interrupt_enable( uint32_t level )
riscv_interrupt_disable(); \
} while(0)
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
+static inline bool _CPU_ISR_Is_enabled( unsigned long level )
{
return ( level & RISCV_MSTATUS_MIE ) != 0;
}
-RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
+static inline void _CPU_ISR_Set_level( uint32_t level )
{
if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
__asm__ volatile (
@@ -459,14 +459,6 @@ extern volatile uint32_t * const _RISCV_Counter;
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
#ifdef RTEMS_SMP
uint32_t _CPU_SMP_Initialize( void );
@@ -489,7 +481,7 @@ static inline uint32_t _CPU_SMP_Get_current_processor( void )
"=&r" ( mhartid )
);
- return (uint32_t) mhartid;
+ return (uint32_t) mhartid - RISCV_BOOT_HARTID;
}
void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h
index 5162cbbd51..5fd25e32cf 100644
--- a/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h
@@ -325,7 +325,7 @@ typedef struct {
uint32_t priority[RISCV_PLIC_MAX_INTERRUPTS];
uint32_t pending[1024];
uint32_t enable[16320][32];
- RISCV_PLIC_hart_regs harts[CPU_MAXIMUM_PROCESSORS];
+ RISCV_PLIC_hart_regs harts[CPU_MAXIMUM_PROCESSORS + RISCV_BOOT_HARTID];
} RISCV_PLIC_regs;
typedef struct {
@@ -420,16 +420,28 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( "unimp" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ register uintptr_t tp __asm__( "tp" );
+
+ tp = context->tp;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( tp ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpu.h b/cpukit/score/cpu/sh/include/rtems/score/cpu.h
index 2805e4244c..f2b59a8713 100644
--- a/cpukit/score/cpu/sh/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sh/include/rtems/score/cpu.h
@@ -393,7 +393,7 @@ void CPU_delay( uint32_t microseconds );
#define _CPU_ISR_Flash( _level) \
sh_flash_interrupts( _level)
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
sh_get_interrupt_level( level );
return level == 0;
@@ -576,14 +576,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h
index 745a185d1a..295b01eeaa 100644
--- a/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h
@@ -37,28 +37,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/sparc/include/libcpu/byteorder.h b/cpukit/score/cpu/sparc/include/libcpu/byteorder.h
index 38b1f18f63..0c469ba36b 100644
--- a/cpukit/score/cpu/sparc/include/libcpu/byteorder.h
+++ b/cpukit/score/cpu/sparc/include/libcpu/byteorder.h
@@ -37,42 +37,42 @@
extern "C" {
#endif
-RTEMS_INLINE_ROUTINE uint16_t ld_le16(volatile uint16_t *addr)
+static inline uint16_t ld_le16(volatile uint16_t *addr)
{
return CPU_swap_u16(*addr);
}
-RTEMS_INLINE_ROUTINE void st_le16(volatile uint16_t *addr, uint16_t val)
+static inline void st_le16(volatile uint16_t *addr, uint16_t val)
{
*addr = CPU_swap_u16(val);
}
-RTEMS_INLINE_ROUTINE uint32_t ld_le32(volatile uint32_t *addr)
+static inline uint32_t ld_le32(volatile uint32_t *addr)
{
return CPU_swap_u32(*addr);
}
-RTEMS_INLINE_ROUTINE void st_le32(volatile uint32_t *addr, uint32_t val)
+static inline void st_le32(volatile uint32_t *addr, uint32_t val)
{
*addr = CPU_swap_u32(val);
}
-RTEMS_INLINE_ROUTINE uint16_t ld_be16(volatile uint16_t *addr)
+static inline uint16_t ld_be16(volatile uint16_t *addr)
{
return *addr;
}
-RTEMS_INLINE_ROUTINE void st_be16(volatile uint16_t *addr, uint16_t val)
+static inline void st_be16(volatile uint16_t *addr, uint16_t val)
{
*addr = val;
}
-RTEMS_INLINE_ROUTINE uint32_t ld_be32(volatile uint32_t *addr)
+static inline uint32_t ld_be32(volatile uint32_t *addr)
{
return *addr;
}
-RTEMS_INLINE_ROUTINE void st_be32(volatile uint32_t *addr, uint32_t val)
+static inline void st_be32(volatile uint32_t *addr, uint32_t val)
{
*addr = val;
}
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
index 225eaf96c6..9044294304 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
@@ -797,7 +797,7 @@ void _SPARC_Interrupt_dispatch( uint32_t irq );
#define _CPU_ISR_Is_enabled( _isr_cookie ) \
sparc_interrupt_is_enabled( _isr_cookie )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & SPARC_PSR_PIL_MASK ) == 0;
}
@@ -1158,14 +1158,6 @@ static inline CPU_Counter_ticks _CPU_Counter_read( void )
return ( *_SPARC_Counter.read )();
}
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
index 7197eb960e..e98a75cf32 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
@@ -224,16 +224,28 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
void _CPU_Context_validate( uintptr_t pattern );
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( "unimp 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ register uint32_t g7 __asm__( "g7" );
+
+ g7 = context->g7;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( g7 ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h
index 03f6e2aa63..90ffe96341 100644
--- a/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h
@@ -697,7 +697,7 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
#define _CPU_ISR_Flash( _level ) \
sparc_flash_interrupts( _level )
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & SPARC_PSTATE_IE_MASK ) != 0;
}
@@ -934,14 +934,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h
index c026687d01..8df4c8814c 100644
--- a/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h
@@ -56,28 +56,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( "unimp" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpu.h b/cpukit/score/cpu/v850/include/rtems/score/cpu.h
index 958bed5dc3..00addf0bc6 100644
--- a/cpukit/score/cpu/v850/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/v850/include/rtems/score/cpu.h
@@ -346,7 +346,7 @@ typedef struct {
__asm__ __volatile__( "di" ); \
} while (0)
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+static inline bool _CPU_ISR_Is_enabled( uint32_t level )
{
return ( level & V850_PSW_INTERRUPT_DISABLE_MASK )
!= V850_PSW_INTERRUPT_DISABLE;
@@ -674,14 +674,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
/** Type that can store a 32-bit integer or a pointer. */
typedef uintptr_t CPU_Uint32ptr;
diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h
index 23c1437ba0..4b0f78c845 100644
--- a/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h
@@ -56,28 +56,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
index bea8e05e39..2671c607a7 100644
--- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
@@ -177,12 +177,12 @@ typedef struct {
(void) _level; /* Prevent -Wunused-but-set-variable */ \
}
-RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled(uint32_t level)
+static inline bool _CPU_ISR_Is_enabled(uint32_t level)
{
return (level & EFLAGS_INTR_ENABLE) != 0;
}
-RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level)
+static inline void _CPU_ISR_Set_level(uint32_t new_level)
{
if ( new_level ) {
amd64_disable_interrupts();
@@ -192,7 +192,7 @@ RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level)
}
}
-RTEMS_INLINE_ROUTINE uint32_t _CPU_ISR_Get_level(void)
+static inline uint32_t _CPU_ISR_Get_level(void)
{
uint64_t rflags;
@@ -304,15 +304,6 @@ uint32_t _CPU_Counter_frequency( void );
CPU_Counter_ticks _CPU_Counter_read( void );
-
-static inline CPU_Counter_ticks _CPU_Counter_difference(
- CPU_Counter_ticks second,
- CPU_Counter_ticks first
-)
-{
- return second - first;
-}
-
#ifdef RTEMS_SMP
*
uint32_t _CPU_SMP_Initialize( void );
diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h
index 4ad50b9f42..10e0887cb9 100644
--- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h
+++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h
@@ -31,7 +31,7 @@
#include <rtems/score/basedefs.h>
-RTEMS_INLINE_ROUTINE uint8_t inport_byte(uint16_t port)
+static inline uint8_t inport_byte(uint16_t port)
{
uint8_t ret;
__asm__ volatile ( "inb %1, %0"
@@ -40,12 +40,12 @@ RTEMS_INLINE_ROUTINE uint8_t inport_byte(uint16_t port)
return ret;
}
-RTEMS_INLINE_ROUTINE void outport_byte(uint16_t port, uint8_t val)
+static inline void outport_byte(uint16_t port, uint8_t val)
{
__asm__ volatile ( "outb %0, %1" : : "a" (val), "Nd" (port) );
}
-RTEMS_INLINE_ROUTINE uint16_t amd64_get_cs(void)
+static inline uint16_t amd64_get_cs(void)
{
uint16_t segment = 0;
@@ -54,12 +54,12 @@ RTEMS_INLINE_ROUTINE uint16_t amd64_get_cs(void)
return segment;
}
-RTEMS_INLINE_ROUTINE void amd64_set_cr3(uint64_t segment)
+static inline void amd64_set_cr3(uint64_t segment)
{
__asm__ volatile ( "movq %0, %%cr3" : "=r" (segment) : "0" (segment) );
}
-RTEMS_INLINE_ROUTINE void cpuid(
+static inline void cpuid(
uint32_t code, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx
) {
__asm__ volatile ( "cpuid"
@@ -67,7 +67,7 @@ RTEMS_INLINE_ROUTINE void cpuid(
: "a" (code) );
}
-RTEMS_INLINE_ROUTINE uint64_t rdmsr(uint32_t msr)
+static inline uint64_t rdmsr(uint32_t msr)
{
uint32_t low, high;
__asm__ volatile ( "rdmsr" :
@@ -76,23 +76,23 @@ RTEMS_INLINE_ROUTINE uint64_t rdmsr(uint32_t msr)
return low | (uint64_t) high << 32;
}
-RTEMS_INLINE_ROUTINE void wrmsr(uint32_t msr, uint32_t low, uint32_t high)
+static inline void wrmsr(uint32_t msr, uint32_t low, uint32_t high)
{
__asm__ volatile ( "wrmsr" : :
"a" (low), "d" (high), "c" (msr) );
}
-RTEMS_INLINE_ROUTINE void amd64_enable_interrupts(void)
+static inline void amd64_enable_interrupts(void)
{
__asm__ volatile ( "sti" );
}
-RTEMS_INLINE_ROUTINE void amd64_disable_interrupts(void)
+static inline void amd64_disable_interrupts(void)
{
__asm__ volatile ( "cli" );
}
-RTEMS_INLINE_ROUTINE void stub_io_wait(void)
+static inline void stub_io_wait(void)
{
/* XXX: This likely won't be required on any modern boards, but this function
* exists so it's easier to find all the places it may be used.
diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h
index d3a4b848e6..742763c168 100644
--- a/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h
@@ -40,28 +40,35 @@ extern "C" {
RTEMS_NO_RETURN void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error );
-RTEMS_INLINE_ROUTINE void _CPU_Context_volatile_clobber( uintptr_t pattern )
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
{
/* TODO */
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_illegal( void )
+static inline void _CPU_Instruction_illegal( void )
{
__asm__ volatile ( ".word 0" );
}
-RTEMS_INLINE_ROUTINE void _CPU_Context_validate( uintptr_t pattern )
+static inline void _CPU_Context_validate( uintptr_t pattern )
{
while (1) {
/* TODO */
}
}
-RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
+static inline void _CPU_Instruction_no_operation( void )
{
__asm__ volatile ( "nop" );
}
+static inline void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/src/gcovdumpinfo.c b/cpukit/score/src/gcovdumpinfo.c
new file mode 100644
index 0000000000..8598fce578
--- /dev/null
+++ b/cpukit/score/src/gcovdumpinfo.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreIO
+ *
+ * @brief This source file contains the implementation of _Gcov_Ddump_info().
+ */
+
+/*
+ * Copyright (C) 2021, 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/gcov.h>
+
+typedef struct {
+ IO_Put_char put_char;
+ void *arg;
+} Gcov_Context;
+
+static void _Gcov_Dump( const void *data, unsigned length, void *arg )
+{
+ Gcov_Context *ctx;
+ IO_Put_char put_char;
+ void *ctx_arg;
+ const char *in;
+ const void *end;
+
+ ctx = arg;
+ in = data;
+ end = in + length;
+ put_char = ctx->put_char;
+ ctx_arg = ctx->arg;
+
+ while ( in != end ) {
+ ( *put_char )( *in, ctx_arg );
+ ++in;
+ }
+}
+
+static void _Gcov_Filename( const char *filename, void *arg )
+{
+ __gcov_filename_to_gcfn( filename, _Gcov_Dump, arg );
+}
+
+static void *_Gcov_Allocate( unsigned length, void *arg )
+{
+ (void) length;
+ (void) arg;
+ return NULL;
+}
+
+void _Gcov_Dump_info( IO_Put_char put_char, void *arg )
+{
+ Gcov_Context ctx;
+ const struct gcov_info * const *item;
+
+ ctx.put_char = put_char;
+ ctx.arg = arg;
+
+ RTEMS_LINKER_SET_FOREACH( gcov_info, item ) {
+ __gcov_info_to_gcda(
+ *item,
+ _Gcov_Filename,
+ _Gcov_Dump,
+ _Gcov_Allocate,
+ &ctx
+ );
+ }
+}
diff --git a/cpukit/score/src/gcovdumpinfobase64.c b/cpukit/score/src/gcovdumpinfobase64.c
new file mode 100644
index 0000000000..be07f03291
--- /dev/null
+++ b/cpukit/score/src/gcovdumpinfobase64.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreIO
+ *
+ * @brief This source file contains the implementation of
+ * _Gcov_Dump_info_base64().
+ */
+
+/*
+ * Copyright (C) 2021, 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/gcov.h>
+
+#include <limits.h>
+#include <string.h>
+
+typedef struct {
+ IO_Put_char put_char;
+ void *arg;
+ int out;
+ size_t index;
+ char buf[ 57 ];
+} Gcov_Base64_context;
+
+static void _Gcov_Base64_put_char( int c, void *arg )
+{
+ Gcov_Base64_context *ctx;
+
+ ctx = arg;
+
+ ( *ctx->put_char )( c, ctx->arg );
+ ++ctx->out;
+
+ if ( ctx->out >= 76 ) {
+ ctx->out = 0;
+ ( *ctx->put_char )( '\n', ctx->arg );
+ }
+}
+
+static void _Gcov_Base64_encode( int c, void *arg )
+{
+ Gcov_Base64_context *ctx;
+ size_t index;
+
+ ctx = arg;
+ index = ctx->index;
+ ctx->buf[ index ] = (char) c;
+
+ if ( index == RTEMS_ARRAY_SIZE( ctx->buf ) - 1 ) {
+ index = 0;
+ _IO_Base64(
+ _Gcov_Base64_put_char,
+ ctx,
+ ctx->buf,
+ sizeof( ctx->buf ),
+ NULL,
+ INT_MAX
+ );
+ } else {
+ ++index;
+ }
+
+ ctx->index = index;
+}
+
+void _Gcov_Dump_info_base64( IO_Put_char put_char, void *arg )
+{
+ Gcov_Base64_context ctx;
+
+ memset( &ctx, 0, sizeof( ctx ) );
+ ctx.put_char = put_char;
+ ctx.arg = arg;
+ _Gcov_Dump_info( _Gcov_Base64_encode, &ctx );
+ _IO_Base64( _Gcov_Base64_put_char, &ctx, ctx.buf, ctx.index, NULL, INT_MAX );
+}
diff --git a/cpukit/score/src/gcovinfoset.c b/cpukit/score/src/gcovinfoset.c
new file mode 100644
index 0000000000..6fd695043b
--- /dev/null
+++ b/cpukit/score/src/gcovinfoset.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreIO
+ *
+ * @brief This source file contains the definition of the gcov information
+ * linker set.
+ */
+
+/*
+ * Copyright (C) 2021, 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/gcov.h>
+
+RTEMS_LINKER_ROSET( gcov_info, const struct gcov_info * );
diff --git a/cpukit/score/src/memorydirtyfreeareas.c b/cpukit/score/src/memorydirtyfreeareas.c
index fc6c2630bf..8a817c2208 100644
--- a/cpukit/score/src/memorydirtyfreeareas.c
+++ b/cpukit/score/src/memorydirtyfreeareas.c
@@ -10,7 +10,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (C) 2020 embedded brains GmbH
+ * Copyright (C) 2020, 2022 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,14 @@
#include <rtems/score/memory.h>
+#include <string.h>
+
void _Memory_Dirty_free_areas( void )
{
_Memory_Fill( _Memory_Get(), 0xcf );
+ memset(
+ _Memory_Noinit_begin,
+ 0xcf,
+ (uintptr_t) _Memory_Noinit_end - (uintptr_t) _Memory_Noinit_begin
+ );
}
diff --git a/cpukit/score/src/memorynoinit.c b/cpukit/score/src/memorynoinit.c
new file mode 100644
index 0000000000..19772356cd
--- /dev/null
+++ b/cpukit/score/src/memorynoinit.c
@@ -0,0 +1,45 @@
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreMemory
+ *
+ * @brief This source file contains the definition of ::_Memory_Noinit_begin
+ * and ::_Memory_Noinit_end.
+ */
+
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C) 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/memory.h>
+
+RTEMS_SECTION( ".noinit.rtems.begin" ) char _Memory_Noinit_begin[ 0 ];
+
+RTEMS_SECTION( ".noinit.rtems.end" ) char _Memory_Noinit_end[ 0 ];
diff --git a/cpukit/score/src/memoryzerofreeareas.c b/cpukit/score/src/memoryzerofreeareas.c
index 4ea0812426..b1cef47ef9 100644
--- a/cpukit/score/src/memoryzerofreeareas.c
+++ b/cpukit/score/src/memoryzerofreeareas.c
@@ -10,7 +10,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (C) 2020 embedded brains GmbH
+ * Copyright (C) 2020, 2022 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,14 @@
#include <rtems/score/memory.h>
+#include <string.h>
+
void _Memory_Zero_free_areas( void )
{
_Memory_Fill( _Memory_Get(), 0 );
+ memset(
+ _Memory_Noinit_begin,
+ 0,
+ (uintptr_t) _Memory_Noinit_end - (uintptr_t) _Memory_Noinit_begin
+ );
}
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index 3b525a2066..458031c794 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -162,7 +162,7 @@ static void _MPCI_Create_server( void )
memset( &config, 0, sizeof( config ) );
config.scheduler = &_Scheduler_Table[ 0 ];
config.name = _Objects_Build_name( 'M', 'P', 'C', 'I' );
- config.priority = PRIORITY_PSEUDO_ISR;
+ config.priority = PRIORITY_MINIMUM;
config.is_fp = CPU_ALL_TASKS_ARE_FP;
config.stack_size = _Stack_Minimum()
+ _MPCI_Configuration.extra_mpci_receive_server_stack
diff --git a/cpukit/score/src/objectactivecount.c b/cpukit/score/src/objectactivecount.c
index 028058e473..5f0304fead 100644
--- a/cpukit/score/src/objectactivecount.c
+++ b/cpukit/score/src/objectactivecount.c
@@ -46,14 +46,22 @@ Objects_Maximum _Objects_Active_count(
const Objects_Information *information
)
{
- Objects_Maximum inactive;
- Objects_Maximum maximum;
+ Objects_Maximum active;
+ Objects_Maximum index;
+ Objects_Maximum maximum;
+ Objects_Control **local_table;
_Assert( _Objects_Allocator_is_owner() );
- inactive = (Objects_Maximum)
- _Chain_Node_count_unprotected( &information->Inactive );
+ active = 0;
maximum = _Objects_Get_maximum_index( information );
+ local_table = information->local_table;
- return maximum - inactive;
+ for ( index = 0; index < maximum; ++index ) {
+ if ( local_table[ index ] != NULL ) {
+ ++active;
+ }
+ }
+
+ return active;
}
diff --git a/cpukit/score/src/objectfree.c b/cpukit/score/src/objectfree.c
index 45b2ba2c86..06d7d82672 100644
--- a/cpukit/score/src/objectfree.c
+++ b/cpukit/score/src/objectfree.c
@@ -51,14 +51,16 @@ void _Objects_Free_unlimited(
if ( _Objects_Is_auto_extend( information ) ) {
Objects_Maximum objects_per_block;
- Objects_Maximum block;
- Objects_Maximum inactive;
+ Objects_Maximum index;
objects_per_block = information->objects_per_block;
- block = _Objects_Get_index( the_object->id ) - OBJECTS_INDEX_MINIMUM;
+ index = _Objects_Get_index( the_object->id ) - OBJECTS_INDEX_MINIMUM;
- if ( block > objects_per_block ) {
- block /= objects_per_block;
+ if ( _Objects_Is_in_allocated_block( index, objects_per_block ) ) {
+ Objects_Maximum block;
+ Objects_Maximum inactive;
+
+ block = index / objects_per_block;
++information->inactive_per_block[ block ];
diff --git a/cpukit/score/src/percpudata.c b/cpukit/score/src/percpudata.c
index c81c1b6a16..6b31ef3602 100644
--- a/cpukit/score/src/percpudata.c
+++ b/cpukit/score/src/percpudata.c
@@ -66,14 +66,19 @@ static void _Per_CPU_Data_initialize( void )
size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
if ( size > 0 ) {
+ char *data_begin;
const Memory_Information *mem;
Per_CPU_Control *cpu;
uint32_t cpu_index;
uint32_t cpu_max;
+ /* Prevent an out of bounds warning in the memcpy() below */
+ data_begin = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
+ RTEMS_OBFUSCATE_VARIABLE( data_begin );
+
mem = _Memory_Get();
cpu = _Per_CPU_Get_by_index( 0 );
- cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
+ cpu->data = data_begin;
cpu_max = rtems_configuration_get_maximum_processors();
@@ -85,7 +90,7 @@ static void _Per_CPU_Data_initialize( void )
_Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA );
}
- memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
+ memcpy( cpu->data, data_begin, size);
}
}
}
diff --git a/cpukit/score/src/profilingisrentryexit.c b/cpukit/score/src/profilingisrentryexit.c
index 2619bdc0c0..2c33182056 100644
--- a/cpukit/score/src/profilingisrentryexit.c
+++ b/cpukit/score/src/profilingisrentryexit.c
@@ -54,10 +54,7 @@ void _Profiling_Outer_most_interrupt_entry_and_exit(
_Assert( cpu->isr_nest_level == 1 );
stats = &cpu->Stats;
- delta = _CPU_Counter_difference(
- interrupt_exit_instant,
- interrupt_entry_instant
- );
+ delta = interrupt_exit_instant - interrupt_entry_instant;
++stats->interrupt_count;
stats->total_interrupt_time += delta;
diff --git a/cpukit/score/src/scheduleredfblock.c b/cpukit/score/src/scheduleredfblock.c
index 56f7c9c021..cb7ab91450 100644
--- a/cpukit/score/src/scheduleredfblock.c
+++ b/cpukit/score/src/scheduleredfblock.c
@@ -47,11 +47,11 @@ void _Scheduler_EDF_Block(
Scheduler_Node *node
)
{
- _Scheduler_Generic_block(
+ _Scheduler_uniprocessor_Block(
scheduler,
the_thread,
node,
_Scheduler_EDF_Extract_body,
- _Scheduler_EDF_Schedule_body
+ _Scheduler_EDF_Get_highest_ready
);
}
diff --git a/cpukit/score/src/scheduleredfchangepriority.c b/cpukit/score/src/scheduleredfchangepriority.c
index 26ce1c348f..de17ca0fad 100644
--- a/cpukit/score/src/scheduleredfchangepriority.c
+++ b/cpukit/score/src/scheduleredfchangepriority.c
@@ -71,5 +71,8 @@ void _Scheduler_EDF_Update_priority(
_Scheduler_EDF_Extract( context, the_node );
_Scheduler_EDF_Enqueue( context, the_node, insert_priority );
- _Scheduler_EDF_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_EDF_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/scheduleredfschedule.c b/cpukit/score/src/scheduleredfschedule.c
index d0acea7a30..40c5ab2c06 100644
--- a/cpukit/score/src/scheduleredfschedule.c
+++ b/cpukit/score/src/scheduleredfschedule.c
@@ -46,5 +46,8 @@ void _Scheduler_EDF_Schedule(
Thread_Control *the_thread
)
{
- _Scheduler_EDF_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_EDF_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/scheduleredfunblock.c b/cpukit/score/src/scheduleredfunblock.c
index d6604686e3..4638eedf71 100644
--- a/cpukit/score/src/scheduleredfunblock.c
+++ b/cpukit/score/src/scheduleredfunblock.c
@@ -62,23 +62,5 @@ void _Scheduler_EDF_Unblock(
the_node->priority = priority;
_Scheduler_EDF_Enqueue( context, the_node, insert_priority );
-
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir(
- the_thread,
- priority == ( SCHEDULER_EDF_PRIO_MSB | PRIORITY_PSEUDO_ISR )
- );
- }
+ _Scheduler_uniprocessor_Unblock( scheduler, the_thread, priority );
}
diff --git a/cpukit/score/src/scheduleredfyield.c b/cpukit/score/src/scheduleredfyield.c
index d83e1d9268..d38bea705a 100644
--- a/cpukit/score/src/scheduleredfyield.c
+++ b/cpukit/score/src/scheduleredfyield.c
@@ -55,5 +55,5 @@ void _Scheduler_EDF_Yield(
_Scheduler_EDF_Extract( context, the_node );
_Scheduler_EDF_Enqueue( context, the_node, the_node->priority );
- _Scheduler_EDF_Schedule_body( scheduler, the_thread, true );
+ _Scheduler_uniprocessor_Yield( scheduler, _Scheduler_EDF_Get_highest_ready );
}
diff --git a/cpukit/score/src/schedulerpriorityblock.c b/cpukit/score/src/schedulerpriorityblock.c
index 53636940dc..ffc7a3ad86 100644
--- a/cpukit/score/src/schedulerpriorityblock.c
+++ b/cpukit/score/src/schedulerpriorityblock.c
@@ -49,11 +49,11 @@ void _Scheduler_priority_Block(
Scheduler_Node *node
)
{
- _Scheduler_Generic_block(
+ _Scheduler_uniprocessor_Block(
scheduler,
the_thread,
node,
_Scheduler_priority_Extract_body,
- _Scheduler_priority_Schedule_body
+ _Scheduler_priority_Get_highest_ready
);
}
diff --git a/cpukit/score/src/schedulerprioritychangepriority.c b/cpukit/score/src/schedulerprioritychangepriority.c
index 8a059763de..3588a2ce42 100644
--- a/cpukit/score/src/schedulerprioritychangepriority.c
+++ b/cpukit/score/src/schedulerprioritychangepriority.c
@@ -96,5 +96,8 @@ void _Scheduler_priority_Update_priority(
);
}
- _Scheduler_priority_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_priority_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulerpriorityschedule.c b/cpukit/score/src/schedulerpriorityschedule.c
index bde749f9bc..bb7cf87399 100644
--- a/cpukit/score/src/schedulerpriorityschedule.c
+++ b/cpukit/score/src/schedulerpriorityschedule.c
@@ -46,5 +46,8 @@ void _Scheduler_priority_Schedule(
Thread_Control *the_thread
)
{
- _Scheduler_priority_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_priority_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulerpriorityunblock.c b/cpukit/score/src/schedulerpriorityunblock.c
index 190c126908..f9b6cabff7 100644
--- a/cpukit/score/src/schedulerpriorityunblock.c
+++ b/cpukit/score/src/schedulerpriorityunblock.c
@@ -76,19 +76,5 @@ void _Scheduler_priority_Unblock (
/* TODO: flash critical section? */
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir( the_thread, priority == PRIORITY_PSEUDO_ISR );
- }
+ _Scheduler_uniprocessor_Unblock( scheduler, the_thread, priority );
}
diff --git a/cpukit/score/src/schedulerpriorityyield.c b/cpukit/score/src/schedulerpriorityyield.c
index 77fcecc418..adb443df94 100644
--- a/cpukit/score/src/schedulerpriorityyield.c
+++ b/cpukit/score/src/schedulerpriorityyield.c
@@ -59,5 +59,8 @@ void _Scheduler_priority_Yield(
_Chain_Append_unprotected( ready_chain, &the_thread->Object.Node );
}
- _Scheduler_priority_Schedule_body( scheduler, the_thread, true );
+ _Scheduler_uniprocessor_Yield(
+ scheduler,
+ _Scheduler_priority_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulersimpleblock.c b/cpukit/score/src/schedulersimpleblock.c
index a8229d409f..dfd18df3a2 100644
--- a/cpukit/score/src/schedulersimpleblock.c
+++ b/cpukit/score/src/schedulersimpleblock.c
@@ -47,11 +47,11 @@ void _Scheduler_simple_Block(
Scheduler_Node *node
)
{
- _Scheduler_Generic_block(
+ _Scheduler_uniprocessor_Block(
scheduler,
the_thread,
node,
_Scheduler_simple_Extract,
- _Scheduler_simple_Schedule_body
+ _Scheduler_simple_Get_highest_ready
);
}
diff --git a/cpukit/score/src/schedulersimplechangepriority.c b/cpukit/score/src/schedulersimplechangepriority.c
index b4711dfd01..5c53c96fb3 100644
--- a/cpukit/score/src/schedulersimplechangepriority.c
+++ b/cpukit/score/src/schedulersimplechangepriority.c
@@ -60,5 +60,8 @@ void _Scheduler_simple_Update_priority(
_Scheduler_simple_Extract( scheduler, the_thread, node );
_Scheduler_simple_Insert( &context->Ready, the_thread, new_priority );
- _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_simple_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulersimpleschedule.c b/cpukit/score/src/schedulersimpleschedule.c
index 4108fdaee8..83a3fed7fe 100644
--- a/cpukit/score/src/schedulersimpleschedule.c
+++ b/cpukit/score/src/schedulersimpleschedule.c
@@ -46,5 +46,8 @@ void _Scheduler_simple_Schedule(
Thread_Control *the_thread
)
{
- _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_simple_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulersimpleunblock.c b/cpukit/score/src/schedulersimpleunblock.c
index 7791503d7a..436f20ac38 100644
--- a/cpukit/score/src/schedulersimpleunblock.c
+++ b/cpukit/score/src/schedulersimpleunblock.c
@@ -58,23 +58,5 @@ void _Scheduler_simple_Unblock(
priority = _Thread_Get_priority( the_thread );
insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
_Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
-
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir(
- the_thread,
- priority == PRIORITY_PSEUDO_ISR
- );
- }
+ _Scheduler_uniprocessor_Unblock( scheduler, the_thread, priority );
}
diff --git a/cpukit/score/src/schedulersimpleyield.c b/cpukit/score/src/schedulersimpleyield.c
index c25cceb8a7..ed8dd5b4cc 100644
--- a/cpukit/score/src/schedulersimpleyield.c
+++ b/cpukit/score/src/schedulersimpleyield.c
@@ -58,5 +58,8 @@ void _Scheduler_simple_Yield(
insert_priority = (unsigned int) _Thread_Get_priority( the_thread );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
_Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
- _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Yield(
+ scheduler,
+ _Scheduler_simple_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index 80f030fdc6..0ddfd1cf9b 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -360,6 +360,7 @@ void _Thread_Priority_changed(
);
}
+#if defined(RTEMS_SMP)
void _Thread_Priority_replace(
Thread_Control *the_thread,
Priority_Node *victim_node,
@@ -375,6 +376,7 @@ void _Thread_Priority_replace(
replacement_node
);
}
+#endif
void _Thread_Priority_update( Thread_queue_Context *queue_context )
{
diff --git a/cpukit/score/src/threadcreateidle.c b/cpukit/score/src/threadcreateidle.c
index 9f3c01d118..b5e0cfdc9b 100644
--- a/cpukit/score/src/threadcreateidle.c
+++ b/cpukit/score/src/threadcreateidle.c
@@ -40,6 +40,7 @@
#endif
#include <rtems/score/threadidledata.h>
+#include <rtems/score/cpuimpl.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/assert.h>
#include <rtems/score/schedulerimpl.h>
@@ -111,10 +112,10 @@ static void _Thread_Create_idle_for_CPU( Per_CPU_Control *cpu )
void _Thread_Create_idle( void )
{
+#if defined(RTEMS_SMP)
uint32_t cpu_max;
uint32_t cpu_index;
- _System_state_Set( SYSTEM_STATE_BEFORE_MULTITASKING );
cpu_max = _SMP_Get_processor_maximum();
for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
@@ -124,4 +125,12 @@ void _Thread_Create_idle( void )
_Thread_Create_idle_for_CPU( cpu );
}
}
+#else
+ _Thread_Create_idle_for_CPU( _Per_CPU_Get() );
+#endif
+
+ _CPU_Use_thread_local_storage(
+ &_Per_CPU_Get_executing( _Per_CPU_Get() )->Registers
+ );
+ _System_state_Set( SYSTEM_STATE_BEFORE_MULTITASKING );
}
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index 457fdaa54a..9b37206c6d 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -298,6 +298,7 @@ static bool _Thread_Try_initialize(
the_thread->Start.is_preemptible = config->is_preemptible;
the_thread->Start.cpu_budget_operations = config->cpu_budget_operations;
the_thread->Start.stack_free = config->stack_free;
+ the_thread->Join_queue.Queue.owner = the_thread;
_Thread_Timer_initialize( &the_thread->Timer, cpu );
_Thread_Initialize_scheduler_and_wait_nodes( the_thread, config );
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 25f57e2a40..635143427c 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -14,7 +14,7 @@
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2014, 2021 embedded brains GmbH.
+ * Copyright (C) 2014, 2022 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -55,7 +55,9 @@
#include <rtems/score/userextimpl.h>
#include <rtems/score/watchdogimpl.h>
-#define THREAD_JOIN_TQ_OPERATIONS &_Thread_queue_Operations_priority
+#include <pthread.h>
+
+#define THREAD_JOIN_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
static void _Thread_Life_action_handler(
Thread_Control *executing,
@@ -70,31 +72,6 @@ Thread_Zombie_registry _Thread_Zombies = {
.Chain = CHAIN_INITIALIZER_EMPTY( _Thread_Zombies.Chain )
};
-static void _Thread_Raise_real_priority(
- Thread_Control *the_thread,
- Priority_Control priority
-)
-{
- Thread_queue_Context queue_context;
-
- _Thread_queue_Context_initialize( &queue_context );
- _Thread_queue_Context_clear_priority_updates( &queue_context );
- _Thread_Wait_acquire( the_thread, &queue_context );
-
- if ( priority < the_thread->Real_priority.priority ) {
- _Thread_Priority_change(
- the_thread,
- &the_thread->Real_priority,
- priority,
- PRIORITY_GROUP_LAST,
- &queue_context
- );
- }
-
- _Thread_Wait_release( the_thread, &queue_context );
- _Thread_Priority_update( &queue_context );
-}
-
typedef struct {
Thread_queue_Context Base;
void *exit_value;
@@ -388,18 +365,37 @@ static void _Thread_Remove_life_change_request( Thread_Control *the_thread )
_Thread_State_release( the_thread, &lock_context );
}
-void _Thread_Join(
+static void _Thread_Clear_waiting_for_join_at_exit(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+)
+{
+ (void) the_thread;
+ (void) cpu_self;
+ (void) queue_context;
+ _Thread_Clear_state( queue->owner, STATES_WAITING_FOR_JOIN_AT_EXIT );
+}
+
+Status_Control _Thread_Join(
Thread_Control *the_thread,
States_Control waiting_for_join,
Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
- _Assert( the_thread != executing );
_Assert( _Thread_State_is_owner( the_thread ) );
executing->Wait.return_argument = NULL;
-
+ _Thread_queue_Context_set_enqueue_callout(
+ queue_context,
+ _Thread_Clear_waiting_for_join_at_exit
+ );
+ _Thread_queue_Context_set_deadlock_callout(
+ queue_context,
+ _Thread_queue_Deadlock_status
+ );
_Thread_queue_Context_set_thread_state( queue_context, waiting_for_join );
_Thread_queue_Enqueue(
&the_thread->Join_queue.Queue,
@@ -407,6 +403,7 @@ void _Thread_Join(
executing,
queue_context
);
+ return _Thread_Wait_get_status( executing );
}
static void _Thread_Set_exit_value(
@@ -435,77 +432,70 @@ static void _Thread_Try_life_change_request(
}
}
-void _Thread_Cancel(
- Thread_Control *the_thread,
- Thread_Control *executing,
- void *exit_value
+Thread_Cancel_state _Thread_Cancel(
+ Thread_Control *the_thread,
+ Thread_Control *executing,
+ Thread_Life_state life_states_to_clear
)
{
- ISR_lock_Context lock_context;
- Thread_Life_state previous;
- Per_CPU_Control *cpu_self;
+ ISR_lock_Context lock_context;
+ Thread_Life_state previous;
_Assert( the_thread != executing );
_Thread_State_acquire( the_thread, &lock_context );
- _Thread_Set_exit_value( the_thread, exit_value );
+ _Thread_Set_exit_value( the_thread, PTHREAD_CANCELED );
previous = _Thread_Change_life_locked(
the_thread,
- 0,
+ life_states_to_clear,
THREAD_LIFE_TERMINATING,
0
);
- cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
-
if ( _States_Is_dormant( the_thread->current_state ) ) {
_Thread_State_release( the_thread, &lock_context );
_Thread_Make_zombie( the_thread );
- } else {
- Priority_Control priority;
-
- _Thread_Try_life_change_request( the_thread, previous, &lock_context );
- priority = _Thread_Get_priority( executing );
- _Thread_Raise_real_priority( the_thread, priority );
+ return THREAD_CANCEL_DONE;
}
- _Thread_Dispatch_enable( cpu_self );
+ _Thread_Try_life_change_request( the_thread, previous, &lock_context );
+ return THREAD_CANCEL_IN_PROGRESS;
}
-static void _Thread_Close_enqueue_callout(
- Thread_queue_Queue *queue,
+Status_Control _Thread_Close(
Thread_Control *the_thread,
- Per_CPU_Control *cpu_self,
+ Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
- Thread_Close_context *context;
-
- context = (Thread_Close_context *) queue_context;
- _Thread_Cancel( context->cancel, the_thread, NULL );
-}
+ Per_CPU_Control *cpu_self;
+ Thread_Cancel_state cancel_state;
-void _Thread_Close(
- Thread_Control *the_thread,
- Thread_Control *executing,
- Thread_Close_context *context
-)
-{
- context->cancel = the_thread;
- _Thread_queue_Context_set_enqueue_callout(
- &context->Base,
- _Thread_Close_enqueue_callout
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
);
+ _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
+
+ cancel_state = _Thread_Cancel( the_thread, executing, THREAD_LIFE_DETACHED );
+
+ if ( cancel_state == THREAD_CANCEL_DONE ) {
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+ }
+
+ _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
+ _Thread_Dispatch_unnest( cpu_self );
_Thread_State_acquire_critical(
the_thread,
- &context->Base.Lock_context.Lock_context
+ &queue_context->Lock_context.Lock_context
);
- _Thread_Join(
+
+ return _Thread_Join(
the_thread,
STATES_WAITING_FOR_JOIN,
executing,
- &context->Base
+ queue_context
);
}