summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/cpu/aarch64/cpu.c22
-rw-r--r--cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h1
-rw-r--r--cpukit/score/cpu/aarch64/include/machine/elf_machdep.h256
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h6
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h9
-rw-r--r--cpukit/score/cpu/arm/aarch32-psma-init.c14
-rw-r--r--cpukit/score/cpu/arm/cpu.c5
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h13
-rw-r--r--cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h26
-rw-r--r--cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h11
-rw-r--r--cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h11
-rw-r--r--cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h12
-rw-r--r--cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h17
-rw-r--r--cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h10
-rw-r--r--cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h16
-rw-r--r--cpukit/score/cpu/riscv/include/libcpu/byteorder.h2
-rw-r--r--cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h12
-rw-r--r--cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h12
-rw-r--r--cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h7
-rw-r--r--cpukit/score/src/gcovdumpinfo.c96
-rw-r--r--cpukit/score/src/gcovdumpinfobase64.c104
-rw-r--r--cpukit/score/src/gcovinfoset.c43
-rw-r--r--cpukit/score/src/kern_ntptime.c35
-rw-r--r--cpukit/score/src/kern_tc.c81
-rw-r--r--cpukit/score/src/memorydirtyfreeareas.c9
-rw-r--r--cpukit/score/src/memorynoinit.c45
-rw-r--r--cpukit/score/src/memoryzerofreeareas.c9
-rw-r--r--cpukit/score/src/mpci.c2
-rw-r--r--cpukit/score/src/objectactivecount.c18
-rw-r--r--cpukit/score/src/objectfree.c12
-rw-r--r--cpukit/score/src/rbtreemax.c4
-rw-r--r--cpukit/score/src/rbtreemin.c4
-rw-r--r--cpukit/score/src/rbtreenext.c4
-rw-r--r--cpukit/score/src/rbtreeprev.c4
-rw-r--r--cpukit/score/src/scheduleredfblock.c4
-rw-r--r--cpukit/score/src/scheduleredfchangepriority.c5
-rw-r--r--cpukit/score/src/scheduleredfschedule.c5
-rw-r--r--cpukit/score/src/scheduleredfunblock.c20
-rw-r--r--cpukit/score/src/scheduleredfyield.c2
-rw-r--r--cpukit/score/src/schedulerpriorityblock.c4
-rw-r--r--cpukit/score/src/schedulerprioritychangepriority.c5
-rw-r--r--cpukit/score/src/schedulerpriorityschedule.c5
-rw-r--r--cpukit/score/src/schedulerpriorityunblock.c16
-rw-r--r--cpukit/score/src/schedulerpriorityyield.c5
-rw-r--r--cpukit/score/src/schedulersimpleblock.c4
-rw-r--r--cpukit/score/src/schedulersimplechangepriority.c5
-rw-r--r--cpukit/score/src/schedulersimpleschedule.c5
-rw-r--r--cpukit/score/src/schedulersimpleunblock.c20
-rw-r--r--cpukit/score/src/schedulersimpleyield.c5
-rw-r--r--cpukit/score/src/threadchangepriority.c12
-rw-r--r--cpukit/score/src/threadcreateidle.c11
-rw-r--r--cpukit/score/src/threadinitialize.c1
-rw-r--r--cpukit/score/src/threadqops.c16
-rw-r--r--cpukit/score/src/threadqtimeout.c4
-rw-r--r--cpukit/score/src/threadrestart.c128
-rw-r--r--cpukit/score/src/watchdogtick.c4
64 files changed, 1057 insertions, 217 deletions
diff --git a/cpukit/score/cpu/aarch64/cpu.c b/cpukit/score/cpu/aarch64/cpu.c
index 88e7ad8a8c..923f53da08 100644
--- a/cpukit/score/cpu/aarch64/cpu.c
+++ b/cpukit/score/cpu/aarch64/cpu.c
@@ -174,28 +174,6 @@ uint32_t _CPU_ISR_Get_level( void )
return ( level & AARCH64_PSTATE_I ) != 0;
}
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-)
-{
- /* Redirection table starts at the end of the vector table */
- CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4);
-
- CPU_ISR_handler current_handler = table [vector];
-
- /* The current handler is now the old one */
- if (old_handler != NULL) {
- *old_handler = current_handler;
- }
-
- /* Write only if necessary to avoid writes to a maybe read-only memory */
- if (current_handler != new_handler) {
- table [vector] = new_handler;
- }
-}
-
void _CPU_Initialize( void )
{
/* Do nothing */
diff --git a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
index 6b6296bb7a..0d65004f88 100644
--- a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
+++ b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
@@ -60,7 +60,6 @@ extern "C" {
#define MMU_PAGE_BITS 12
#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
#define MMU_BITS_PER_LEVEL 9
-#define MMU_TOP_LEVEL_PAGE_BITS ( 2 * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS )
#define AARCH64_MMU_FLAGS_BASE \
( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
diff --git a/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
new file mode 100644
index 0000000000..c1d219d715
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/machine/elf_machdep.h
@@ -0,0 +1,256 @@
+/* $NetBSD: elf_machdep.h,v 1.4 2018/10/12 01:28:58 ryo Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AARCH64_ELF_MACHDEP_H_
+#define _AARCH64_ELF_MACHDEP_H_
+
+#ifdef __aarch64__
+
+#if defined(__AARCH64EB__)
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2MSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2MSB
+#else
+#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB
+#define ELF32_MACHDEP_ENDIANNESS ELFDATA2LSB
+#endif
+
+/* Processor specific flags for the ELF header e_flags field. */
+#define EF_ARM_RELEXEC 0x00000001
+#define EF_ARM_HASENTRY 0x00000002
+#define EF_ARM_INTERWORK 0x00000004 /* GNU binutils 000413 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ARM ELF A08 */
+#define EF_ARM_APCS_26 0x00000008 /* GNU binutils 000413 */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ARM ELF B01 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* GNU binutils 000413 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ARM ELF B01 */
+#define EF_ARM_PIC 0x00000020
+#define EF_ARM_ALIGN8 0x00000040 /* 8-bit structure alignment. */
+#define EF_ARM_NEW_ABI 0x00000080
+#define EF_ARM_OLD_ABI 0x00000100
+#define EF_ARM_SOFT_FLOAT 0x00000200
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_EABIMASK 0xff000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define ELF32_MACHDEP_ID_CASES \
+ case EM_ARM: \
+ break;
+
+#define ELF64_MACHDEP_ID_CASES \
+ case EM_AARCH64: \
+ break;
+
+#define ELF64_MACHDEP_ID EM_AARCH64
+#define ELF32_MACHDEP_ID EM_ARM
+
+#define KERN_ELFSIZE 64
+#define ARCH_ELFSIZE 64 /* MD native binary size */
+
+/* Processor specific relocation types */
+
+#define R_AARCH64_NONE 0
+#define R_AARCH64_NONE2 256
+
+#define R_AARCH64_ABS64 257 /* S + A */
+#define R_AARCH64_ABS32 258 /* S + A */
+#define R_AARCH64_ABS16 259 /* S + A */
+#define R_AARCH64_PREL64 260 /* S + A - P */
+#define R_AARCH64_PREL32 261 /* S + A - P */
+#define R_AARCH64_PREL16 262 /* S + A - P */
+#define R_AARCH64_MOVW_UABS_G0 263 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G0_NC 264 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_UABS_G1 265 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G1_NC 266 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_UABS_G2 267 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G2_NC 268 /* S + A [bits 32..47] */
+#define R_AARCH64_MOVW_UABS_G3 269 /* S + A [bits 48..63] */
+#define R_AARCH64_MOVW_SABS_G0 270 /* S + A [bits 0..15] */
+#define R_AARCH64_MOVW_SABS_G1 271 /* S + A [bits 16..31] */
+#define R_AARCH64_MOVW_SABS_G2 272 /* S + A [bits 32..47] */
+#define R_AARCH64_LD_PREL_LO19 273 /* S + A - P */
+#define R_AARCH64_ADR_PREL_LO21 274 /* S + A - P */
+#define R_AARCH64_ADR_PREL_PG_HI21 275 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 /* Page(S + A) - Page(P) */
+#define R_AARCH64_ADD_ABS_LO12_NC 277 /* S + A */
+#define R_AARCH64_LDST8_ABS_LO12_NC 278 /* S + A */
+#define R_AARCH_TSTBR14 279 /* S + A - P */
+#define R_AARCH_CONDBR19 281 /* S + A - P */
+#define R_AARCH_JUMP26 282 /* S + A - P */
+#define R_AARCH_CALL26 283 /* S + A - P */
+#define R_AARCH_LDST16_ABS_LO12_NC 284 /* S + A */
+#define R_AARCH_LDST32_ABS_LO12_NC 285 /* S + A */
+#define R_AARCH_LDST64_ABS_LO12_NC 286 /* S + A */
+#define R_AARCH64_MOVW_PREL_G0 287 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G0_NC 288 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1 289 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G1_NC 290 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2 291 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G2_NC 292 /* S + A - P */
+#define R_AARCH64_MOVW_PREL_G3 293 /* S + A - P */
+
+#define R_AARCH64_LDST128_ABS_LO12_NC 299 /* S + A */
+#define R_AARCH64_MOVW_GOTOFF_G0 300 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1 302 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2 304 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_MOVW_GOTOFF_G3 306 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_GOTREL64 307 /* S + A - GOT */
+#define R_AARCH64_GOTREL32 308 /* S + A - GOT */
+#define R_AARCH64_GOT_LD_PREL19 309 /* G(GDAT(S + A)) - P */
+#define R_AARCH64_LD64_GOTOFF_LO15 310 /* G(GDAT(S + A)) - GOT */
+#define R_AARCH64_ADR_GOT_PAGE 311 /* Page(G(GDAT(S + A))) - Page(GOT) */
+#define R_AARCH64_LD64_GOT_LO12_NC 312 /* G(GDAT(S + A)) */
+#define R_AARCH64_LD64_GOTPAGE_LO15 313 /* G(GDAT(S + A)) - Page(GOT) */
+
+#define R_AARCH64_TLSGD_ADR_PREL21 512 /* G(GTLSIDX(S,A)) - P */
+#define R_AARCH64_TLSGD_ADR_PAGE21 513 /* Page(G(GTLSIDX(S,A))) - Page(P) */
+#define R_AARCH64_TLSGD_ADD_LO12_NC 514 /* G(GTLSIDX(S,A)) */
+#define R_AARCH64_TLSGD_MOVW_G1 515 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSGD_MOVW_G0_NV 516 /* G(GTLSIDX(S,A)) - GOT */
+#define R_AARCH64_TLSLD_ADR_PREL21 517 /* G(GLDM(S,A)) - P */
+#define R_AARCH64_TLSLD_ADR_PAGE21 518 /* Page(G(GLDM(S))) - Page(P) */
+#define R_AARCH64_TLSLD_ADD_LO12_NC 519 /* G(GLDM(S)) */
+#define R_AARCH64_TLSLD_MOVW_G1 520 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_MOVW_G0_NC 521 /* G(GLDM(S)) - GOT */
+#define R_AARCH64_TLSLD_LD_PREL21 522 /* G(GLDM(S)) - P */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 /* DTPREL(S+A) */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 /* G(GTPREL(S+A)) - GOT */
+#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 /* Page(G(GTPREL(S+A))) - Page(P) */
+#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 /* G(GTPREL(S+A)) */
+#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 /* G(GTPREL(S+A)) - P */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1 545 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G1_NC 546 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0 547 /* TPREL(S+A) */
+#define R_AARCH64_MOVW_TPREL_G0_NC 548 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_HI12 549 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12 550 /* TPREL(S+A) */
+#define R_AARCH64_ADD_TPREL_LO12_NC 551 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12 552 /* TPREL(S+A) */
+#define R_AARCH64_LDST8_TPREL_LO12_NC 553 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12 554 /* TPREL(S+A) */
+#define R_AARCH64_LDST16_TPREL_LO12_NC 555 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12 556 /* TPREL(S+A) */
+#define R_AARCH64_LDST32_TPREL_LO12_NC 557 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12 558 /* TPREL(S+A) */
+#define R_AARCH64_LDST64_TPREL_LO12_NC 559 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC_LD_PREL19 560 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PREL21 561 /* G(GTLSDESC(S+A)) - P */
+#define R_AARCH64_TLSDESC_LD_PAGE21 562 /* Page(G(GTLSDESC(S+A))) - Page(P) */
+#define R_AARCH64_TLSDESC_LD64_LO12 563 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_ADD_LO12 564 /* G(GTLSDESC(S+A)) */
+#define R_AARCH64_TLSDESC_OFF_G1 565 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_OFF_G0_NC 566 /* G(GTLSDESC(S+A)) - GOT */
+#define R_AARCH64_TLSDESC_LDR 567 /* */
+#define R_AARCH64_TLSDESC_ADD 568 /* */
+#define R_AARCH64_TLSDESC_CALL 569 /* */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12 570 /* TPREL(S+A) */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC 571 /* TPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12 572 /* DTPREL(S+A) */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC 572 /* DTPREL(S+A) */
+
+/* Dynamic Relocations */
+#define R_AARCH64_P32_COPY 180
+#define R_AARCH64_P32_GLOB_DAT 181 /* S + A */
+#define R_AARCH64_P32_JUMP_SLOT 182 /* S + A */
+#define R_AARCH64_P32_RELATIVE 183 /* Delta(S) + A */
+#define R_AARCH64_P32_TLS_DTPREL 184 /* DTPREL(S+A) */
+#define R_AARCH64_P32_TLS_DTPMOD 185 /* LBM(S) */
+#define R_AARCH64_P32_TLS_TPREL 186 /* TPREL(S+A) */
+#define R_AARCH64_P32_TLSDESC 187 /* TLSDESC(S+A) */
+#define R_AARCH64_P32_IRELATIVE 188 /* Indirect(Delta(S) + A) */
+
+#define R_AARCH64_COPY 1024
+#define R_AARCH64_GLOB_DAT 1025 /* S + A */
+#define R_AARCH64_JUMP_SLOT 1026 /* S + A */
+#define R_AARCH64_RELATIVE 1027 /* Delta(S) + A */
+#define R_AARCH64_TLS_DTPREL64 1028 /* DTPREL(S+A) */
+#define R_AARCH64_TLS_DTPMOD64 1029 /* LBM(S) */
+#define R_AARCH64_TLS_TPREL64 1030 /* TPREL(S+A) */
+#define R_AARCH64_TLSDESC 1031 /* TLSDESC(S+A) */
+#define R_AARCH64_IRELATIVE 1032 /* Indirect(Delta(S) + A) */
+
+#define R_TYPE(name) R_AARCH64_ ## name
+#define R_TLS_TYPE(name) R_AARCH64_ ## name ## 64
+
+/* Processor specific program header types */
+#define PT_AARCH64_ARCHEXT (PT_LOPROC + 0)
+#define PT_AARCH64_UNWIND (PT_LOPROC + 1)
+
+/* Processor specific section header flags */
+#define SHF_ENTRYSECT 0x10000000
+#define SHF_COMDEF 0x80000000
+
+#define SHT_AARCH64_ATTRIBUTES (SHT_LOPROC + 3)
+
+#ifdef _KERNEL
+#ifdef ELFSIZE
+#define ELF_MD_PROBE_FUNC ELFNAME2(aarch64_netbsd,probe)
+#endif
+
+struct exec_package;
+
+int aarch64_netbsd_elf64_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+int aarch64_netbsd_elf32_probe(struct lwp *, struct exec_package *, void *,
+ char *, vaddr_t *);
+#endif
+
+#elif defined(__arm__)
+
+#include <arm/elf_machdep.h>
+
+#endif
+
+#endif /* _AARCH64_ELF_MACHDEP_H_ */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index fdc0e3d929..47a8e97985 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -306,12 +306,6 @@ void _CPU_Initialize( void );
typedef void ( *CPU_ISR_handler )( void );
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-);
-
/**
* @brief CPU switch context.
*/
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
index ffdef2f30a..14836965ef 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpuimpl.h
@@ -162,6 +162,15 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ __asm__ volatile (
+ "msr TPIDR_EL0, %0" : : "r" ( context->thread_id ) : "memory"
+ );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/arm/aarch32-psma-init.c b/cpukit/score/cpu/arm/aarch32-psma-init.c
index ee9338f050..543e1b7d9b 100644
--- a/cpukit/score/cpu/arm/aarch32-psma-init.c
+++ b/cpukit/score/cpu/arm/aarch32-psma-init.c
@@ -45,7 +45,7 @@
#include <rtems/score/aarch32-system-registers.h>
#include <rtems/score/cpu.h>
-#define AARCH32_PSMA_REGION_MAX \
+#define AARCH32_PMSA_REGION_MAX \
( ( AARCH32_MPUIR_REGION_MASK >> AARCH32_MPUIR_REGION_SHIFT ) + 1 )
static void _AArch32_PMSA_Configure(
@@ -133,16 +133,16 @@ size_t _AArch32_PMSA_Map_sections_to_regions(
if ( attr == region_attr ) {
uint32_t region_end;
- if ( end == region_base ) {
- /* Extend the region region */
+ if ( end - region_base <= AARCH32_PMSA_MIN_REGION_ALIGN ) {
+ /* Extend the region */
regions[ ri ].base = base;
break;
}
region_end = region_limit + AARCH32_PMSA_MIN_REGION_ALIGN;
- if ( base == region_end ) {
- /* Extend the region region */
+ if ( region_end - base <= AARCH32_PMSA_MIN_REGION_ALIGN ) {
+ /* Extend the region */
regions[ ri ].limit = limit;
break;
}
@@ -153,7 +153,7 @@ size_t _AArch32_PMSA_Map_sections_to_regions(
}
}
- if ( end <= region_base ) {
+ if ( base <= region_base ) {
size_t i;
if ( region_used >= region_max ) {
@@ -196,7 +196,7 @@ void _AArch32_PMSA_Initialize(
size_t section_count
)
{
- AArch32_PMSA_Region regions[ AARCH32_PSMA_REGION_MAX ];
+ AArch32_PMSA_Region regions[ AARCH32_PMSA_REGION_MAX ];
size_t region_max;
size_t region_used;
diff --git a/cpukit/score/cpu/arm/cpu.c b/cpukit/score/cpu/arm/cpu.c
index 5c5b253470..b2cc6039b0 100644
--- a/cpukit/score/cpu/arm/cpu.c
+++ b/cpukit/score/cpu/arm/cpu.c
@@ -167,8 +167,10 @@ void _CPU_ISR_install_vector(
CPU_ISR_handler *old_handler
)
{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
/* Redirection table starts at the end of the vector table */
- CPU_ISR_handler *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4);
+ CPU_ISR_handler volatile *table = (CPU_ISR_handler *) (MAX_EXCEPTIONS * 4);
CPU_ISR_handler current_handler = table [vector];
@@ -181,6 +183,7 @@ void _CPU_ISR_install_vector(
if (current_handler != new_handler) {
table [vector] = new_handler;
}
+#pragma GCC diagnostic pop
}
void _CPU_Initialize( void )
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
index 0ce347c86f..4f20113b71 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
@@ -160,6 +160,19 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
+ __asm__ volatile (
+ "mcr p15, 0, %0, c13, c0, 3" : : "r" ( context->thread_id ) : "memory"
+ );
+#else
+ (void) context;
+#endif
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h
index 1485abd365..91e57da4a0 100644
--- a/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/bfin/include/rtems/score/cpuimpl.h
@@ -59,6 +59,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h
index 31ec0ac8bb..71f2679dde 100644
--- a/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/i386/include/rtems/score/cpuimpl.h
@@ -80,6 +80,32 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ uint32_t tmp;
+ uint32_t cpu_index;
+
+#ifdef RTEMS_SMP
+ cpu_index = _CPU_SMP_Get_current_processor();
+#else
+ cpu_index = 0;
+#endif
+
+ __asm__ volatile (
+ "movl " RTEMS_XSTRING( I386_CONTEXT_CONTROL_GS_0_OFFSET ) "(%2), %0\n"
+ "movl %0, _Global_descriptor_table+24(,%1,8)\n"
+ "movl " RTEMS_XSTRING( I386_CONTEXT_CONTROL_GS_1_OFFSET ) "(%2), %0\n"
+ "movl %0, _Global_descriptor_table+28(,%1,8)\n"
+ "leal 24(,%1,8), %0\n"
+ "movl %0, %%gs\n"
+ : "=&r" ( tmp )
+ : "r" ( cpu_index ), "r" ( context )
+ : "memory"
+ );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h
index eb0c058723..24e8e5cb41 100644
--- a/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/lm32/include/rtems/score/cpuimpl.h
@@ -58,6 +58,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h
index e3b61efd9f..5c7c35943a 100644
--- a/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/m68k/include/rtems/score/cpuimpl.h
@@ -78,6 +78,17 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ /*
+ * There is nothing to do since the thread-local storage area is obtained by
+ * calling __m68k_read_tp().
+ */
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h
index 0573759d52..e4f0303ad8 100644
--- a/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/microblaze/include/rtems/score/cpuimpl.h
@@ -86,6 +86,17 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ /*
+ * There is nothing to do since the thread-local storage area is obtained by
+ * calling __tls_get_addr().
+ */
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h
index 0568134351..23d3f35960 100644
--- a/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/mips/include/rtems/score/cpuimpl.h
@@ -78,6 +78,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h
index 038a1326cc..a54824f16b 100644
--- a/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/moxie/include/rtems/score/cpuimpl.h
@@ -78,6 +78,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h
index 215df68f67..518fac4308 100644
--- a/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/nios2/include/rtems/score/cpuimpl.h
@@ -70,6 +70,18 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ register uint32_t r23 __asm__( "r23" );
+
+ r23 = context->r23;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( r23 ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h
index 6f4abfcfc3..1eec4e6b7a 100644
--- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpuimpl.h
@@ -166,6 +166,23 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+/**
+ * @brief Uses the thread-local storage area of the context.
+ *
+ * Some architectures may use dedicated registers to reference the thread-local
+ * storage area of the associated thread. This function should set these
+ * registers to the values defined by the specified processor context.
+ *
+ * @param context is the processor context defining the thread-local storage
+ * area to use.
+ */
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h
index 37cd1db436..35d186990d 100644
--- a/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/or1k/include/rtems/score/cpuimpl.h
@@ -70,6 +70,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "l.nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h b/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h
index c7618c9355..58db24fbbd 100644
--- a/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h
+++ b/cpukit/score/cpu/or1k/include/rtems/score/or1k-utility.h
@@ -345,7 +345,7 @@ static inline uint32_t _OR1K_mfspr(uint32_t reg)
{
uint32_t spr_value;
- asm volatile (
+ __asm__ volatile (
"l.mfspr %0, %1, 0;\n\t"
: "=r" (spr_value) : "r" (reg));
@@ -354,7 +354,7 @@ static inline uint32_t _OR1K_mfspr(uint32_t reg)
static inline void _OR1K_mtspr(uint32_t reg, uint32_t value)
{
- asm volatile (
+ __asm__ volatile (
"l.mtspr %1, %0, 0;\n\t"
:: "r" (value), "r" (reg)
);
@@ -386,12 +386,12 @@ static inline void _OR1K_mtspr(uint32_t reg, uint32_t value)
static inline void _OR1K_Sync_mem( void )
{
- asm volatile("l.msync");
+ __asm__ volatile("l.msync");
}
static inline void _OR1K_Sync_pipeline( void )
{
- asm volatile("l.psync");
+ __asm__ volatile("l.psync");
}
/**
@@ -402,7 +402,7 @@ static inline void _OR1K_Sync_pipeline( void )
*
*/
#define _OR1KSIM_CPU_Halt() \
- asm volatile ("l.nop 0xc")
+ __asm__ volatile ("l.nop 0xc")
#ifdef __cplusplus
}
diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h
index cfed43ced4..4a88fe18b1 100644
--- a/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpuimpl.h
@@ -283,6 +283,22 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+#ifdef __powerpc64__
+ register uintptr_t tp __asm__( "13" );
+#else
+ register uintptr_t tp __asm__( "2" );
+#endif
+
+ tp = ppc_get_context( context )->tp;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( tp ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/riscv/include/libcpu/byteorder.h b/cpukit/score/cpu/riscv/include/libcpu/byteorder.h
index 939e51fe84..1b4f6f3b1e 100644
--- a/cpukit/score/cpu/riscv/include/libcpu/byteorder.h
+++ b/cpukit/score/cpu/riscv/include/libcpu/byteorder.h
@@ -7,6 +7,8 @@
#ifndef _LIBCPU_BYTEORDER_H
#define _LIBCPU_BYTEORDER_H
+#include <stdint.h>
+
static inline void st_le32(volatile uint32_t *addr, uint32_t value)
{
*(addr)=value ;
diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h
index 5162cbbd51..ca09832d0e 100644
--- a/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/riscv/include/rtems/score/cpuimpl.h
@@ -430,6 +430,18 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ register uintptr_t tp __asm__( "tp" );
+
+ tp = context->tp;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( tp ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h
index 745a185d1a..cb20bab616 100644
--- a/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sh/include/rtems/score/cpuimpl.h
@@ -59,6 +59,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
index 7197eb960e..2a200be7e3 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
@@ -234,6 +234,18 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ register uint32_t g7 __asm__( "g7" );
+
+ g7 = context->g7;
+
+ /* Make sure that the register assignment is not optimized away */
+ __asm__ volatile ( "" : : "r" ( g7 ) );
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h
index c026687d01..23aed1a8d6 100644
--- a/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sparc64/include/rtems/score/cpuimpl.h
@@ -78,6 +78,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h
index 23c1437ba0..8f73b45ad6 100644
--- a/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/v850/include/rtems/score/cpuimpl.h
@@ -78,6 +78,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h
index d3a4b848e6..680c61ae20 100644
--- a/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpuimpl.h
@@ -62,6 +62,13 @@ RTEMS_INLINE_ROUTINE void _CPU_Instruction_no_operation( void )
__asm__ volatile ( "nop" );
}
+RTEMS_INLINE_ROUTINE void _CPU_Use_thread_local_storage(
+ const Context_Control *context
+)
+{
+ (void) context;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/src/gcovdumpinfo.c b/cpukit/score/src/gcovdumpinfo.c
new file mode 100644
index 0000000000..8598fce578
--- /dev/null
+++ b/cpukit/score/src/gcovdumpinfo.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreIO
+ *
+ * @brief This source file contains the implementation of _Gcov_Ddump_info().
+ */
+
+/*
+ * Copyright (C) 2021, 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/gcov.h>
+
+typedef struct {
+ IO_Put_char put_char;
+ void *arg;
+} Gcov_Context;
+
+static void _Gcov_Dump( const void *data, unsigned length, void *arg )
+{
+ Gcov_Context *ctx;
+ IO_Put_char put_char;
+ void *ctx_arg;
+ const char *in;
+ const void *end;
+
+ ctx = arg;
+ in = data;
+ end = in + length;
+ put_char = ctx->put_char;
+ ctx_arg = ctx->arg;
+
+ while ( in != end ) {
+ ( *put_char )( *in, ctx_arg );
+ ++in;
+ }
+}
+
+static void _Gcov_Filename( const char *filename, void *arg )
+{
+ __gcov_filename_to_gcfn( filename, _Gcov_Dump, arg );
+}
+
+static void *_Gcov_Allocate( unsigned length, void *arg )
+{
+ (void) length;
+ (void) arg;
+ return NULL;
+}
+
+void _Gcov_Dump_info( IO_Put_char put_char, void *arg )
+{
+ Gcov_Context ctx;
+ const struct gcov_info * const *item;
+
+ ctx.put_char = put_char;
+ ctx.arg = arg;
+
+ RTEMS_LINKER_SET_FOREACH( gcov_info, item ) {
+ __gcov_info_to_gcda(
+ *item,
+ _Gcov_Filename,
+ _Gcov_Dump,
+ _Gcov_Allocate,
+ &ctx
+ );
+ }
+}
diff --git a/cpukit/score/src/gcovdumpinfobase64.c b/cpukit/score/src/gcovdumpinfobase64.c
new file mode 100644
index 0000000000..be07f03291
--- /dev/null
+++ b/cpukit/score/src/gcovdumpinfobase64.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreIO
+ *
+ * @brief This source file contains the implementation of
+ * _Gcov_Dump_info_base64().
+ */
+
+/*
+ * Copyright (C) 2021, 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/gcov.h>
+
+#include <limits.h>
+#include <string.h>
+
+typedef struct {
+ IO_Put_char put_char;
+ void *arg;
+ int out;
+ size_t index;
+ char buf[ 57 ];
+} Gcov_Base64_context;
+
+static void _Gcov_Base64_put_char( int c, void *arg )
+{
+ Gcov_Base64_context *ctx;
+
+ ctx = arg;
+
+ ( *ctx->put_char )( c, ctx->arg );
+ ++ctx->out;
+
+ if ( ctx->out >= 76 ) {
+ ctx->out = 0;
+ ( *ctx->put_char )( '\n', ctx->arg );
+ }
+}
+
+static void _Gcov_Base64_encode( int c, void *arg )
+{
+ Gcov_Base64_context *ctx;
+ size_t index;
+
+ ctx = arg;
+ index = ctx->index;
+ ctx->buf[ index ] = (char) c;
+
+ if ( index == RTEMS_ARRAY_SIZE( ctx->buf ) - 1 ) {
+ index = 0;
+ _IO_Base64(
+ _Gcov_Base64_put_char,
+ ctx,
+ ctx->buf,
+ sizeof( ctx->buf ),
+ NULL,
+ INT_MAX
+ );
+ } else {
+ ++index;
+ }
+
+ ctx->index = index;
+}
+
+void _Gcov_Dump_info_base64( IO_Put_char put_char, void *arg )
+{
+ Gcov_Base64_context ctx;
+
+ memset( &ctx, 0, sizeof( ctx ) );
+ ctx.put_char = put_char;
+ ctx.arg = arg;
+ _Gcov_Dump_info( _Gcov_Base64_encode, &ctx );
+ _IO_Base64( _Gcov_Base64_put_char, &ctx, ctx.buf, ctx.index, NULL, INT_MAX );
+}
diff --git a/cpukit/score/src/gcovinfoset.c b/cpukit/score/src/gcovinfoset.c
new file mode 100644
index 0000000000..6fd695043b
--- /dev/null
+++ b/cpukit/score/src/gcovinfoset.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreIO
+ *
+ * @brief This source file contains the definition of the gcov information
+ * linker set.
+ */
+
+/*
+ * Copyright (C) 2021, 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/gcov.h>
+
+RTEMS_LINKER_ROSET( gcov_info, const struct gcov_info * );
diff --git a/cpukit/score/src/kern_ntptime.c b/cpukit/score/src/kern_ntptime.c
index cb39133408..1233166a61 100644
--- a/cpukit/score/src/kern_ntptime.c
+++ b/cpukit/score/src/kern_ntptime.c
@@ -58,6 +58,9 @@ __FBSDID("$FreeBSD$");
#include <sys/time.h>
#include <sys/timex.h>
#include <sys/timetc.h>
+#ifdef __rtems__
+#define _KERNEL
+#endif /* __rtems__ */
#include <sys/timepps.h>
#ifndef __rtems__
#include <sys/syscallsubr.h>
@@ -71,11 +74,31 @@ __FBSDID("$FreeBSD$");
#define ntp_update_second _Timecounter_NTP_update_second
#define time_uptime _Timecounter_Time_uptime
struct thread;
+
+static inline long
+lmax(long a, long b)
+{
+
+ if (a > b)
+ return (a);
+ return (b);
+}
+
+static inline quad_t
+qmin(quad_t a, quad_t b)
+{
+
+ if (a < b)
+ return (a);
+ return (b);
+}
#endif /* __rtems__ */
+#ifndef __rtems__
#ifdef PPS_SYNC
FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL");
#endif
+#endif /* __rtems__ */
/*
* Single-precision macros for 64-bit machines
@@ -374,7 +397,6 @@ SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD |
CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval",
"");
-#endif /* __rtems__ */
#ifdef PPS_SYNC
SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW,
@@ -391,6 +413,7 @@ SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
&time_freq, 0,
"Frequency offset (ns/sec)");
#endif
+#endif /* __rtems__ */
/*
* ntp_adjtime() - NTP daemon application interface
@@ -531,16 +554,6 @@ kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp)
ntv->jitcnt = pps_jitcnt;
ntv->stbcnt = pps_stbcnt;
#endif /* PPS_SYNC */
-#ifdef __rtems__
- ntv->ppsfreq = 0;
- ntv->jitter = 0;
- ntv->shift = 0;
- ntv->stabil = 0;
- ntv->jitcnt = 0;
- ntv->calcnt = 0;
- ntv->errcnt = 0;
- ntv->stbcnt = 0;
-#endif /* __rtems__ */
retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state;
NTP_UNLOCK();
diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c
index e57da2c0ca..643026a1c8 100644
--- a/cpukit/score/src/kern_tc.c
+++ b/cpukit/score/src/kern_tc.c
@@ -56,12 +56,17 @@
#define timecounter _Timecounter
#define time_second _Timecounter_Time_second
#define time_uptime _Timecounter_Time_uptime
+
#include <rtems/score/timecounterimpl.h>
+#include <rtems/score/assert.h>
#include <rtems/score/atomic.h>
#include <rtems/score/smp.h>
#include <rtems/score/todimpl.h>
#include <rtems/score/watchdogimpl.h>
#include <rtems/rtems/clock.h>
+
+#define ENOIOCTL EINVAL
+#define KASSERT(exp, arg) _Assert(exp)
#endif /* __rtems__ */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -90,6 +95,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vdso.h>
#endif /* __rtems__ */
#ifdef __rtems__
+#include <errno.h>
#include <limits.h>
#include <string.h>
#include <rtems.h>
@@ -115,6 +121,13 @@ atomic_thread_fence_rel(void)
}
static inline u_int
+atomic_load_int(Atomic_Uint *i)
+{
+
+ return (_Atomic_Load_uint(i, ATOMIC_ORDER_RELAXED));
+}
+
+static inline u_int
atomic_load_acq_int(Atomic_Uint *i)
{
@@ -1506,7 +1519,6 @@ unlock:
#endif /* __rtems__ */
}
-#ifndef __rtems__
/* Report the frequency of the current timecounter. */
uint64_t
tc_getfrequency(void)
@@ -1515,6 +1527,7 @@ tc_getfrequency(void)
return (timehands->th_counter->tc_frequency);
}
+#ifndef __rtems__
static bool
sleeping_on_old_rtc(struct thread *td)
{
@@ -1891,7 +1904,6 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice,
"Timecounter hardware detected");
#endif /* __rtems__ */
-#ifndef __rtems__
/*
* RFC 2783 PPS-API implementation.
*/
@@ -1910,9 +1922,15 @@ abi_aware(struct pps_state *pps, int vers)
static int
pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
{
+#ifndef __rtems__
int err, timo;
+#else /* __rtems__ */
+ int err;
+#endif /* __rtems__ */
pps_seq_t aseq, cseq;
+#ifndef __rtems__
struct timeval tv;
+#endif /* __rtems__ */
if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
return (EINVAL);
@@ -1925,6 +1943,7 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
* sleep a long time.
*/
if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
+#ifndef __rtems__
if (fapi->timeout.tv_sec == -1)
timo = 0x7fffffff;
else {
@@ -1932,10 +1951,12 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
tv.tv_usec = fapi->timeout.tv_nsec / 1000;
timo = tvtohz(&tv);
}
+#endif /* __rtems__ */
aseq = atomic_load_int(&pps->ppsinfo.assert_sequence);
cseq = atomic_load_int(&pps->ppsinfo.clear_sequence);
while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) &&
cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) {
+#ifndef __rtems__
if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
if (pps->flags & PPSFLAG_MTX_SPIN) {
err = msleep_spin(pps, pps->driver_mtx,
@@ -1956,6 +1977,12 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
} else if (err != 0) {
return (err);
}
+#else /* __rtems__ */
+ _Assert(pps->wait != NULL);
+ err = (*pps->wait)(pps, fapi->timeout);
+ if (err != 0)
+ return (err);
+#endif /* __rtems__ */
}
}
@@ -2051,9 +2078,43 @@ pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
}
}
+#ifdef __rtems__
+/*
+ * The real implementation of hardpps() is defined in kern_ntptime.c. Use it
+ * only if the NTP support is needed by the application.
+ */
+RTEMS_WEAK void
+hardpps(struct timespec *tsp, long nsec)
+{
+
+ (void)tsp;
+ (void)nsec;
+}
+
+static int
+default_wait(struct pps_state *pps, struct timespec timeout)
+{
+
+ (void)pps;
+ (void)timeout;
+
+ return (ETIMEDOUT);
+}
+
+static void
+default_wakeup(struct pps_state *pps)
+{
+
+ (void)pps;
+}
+#endif /* __rtems__ */
void
pps_init(struct pps_state *pps)
{
+#ifdef __rtems__
+ pps->wait = default_wait;
+ pps->wakeup = default_wakeup;
+#endif /* __rtems__ */
pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
if (pps->ppscap & PPS_CAPTUREASSERT)
pps->ppscap |= PPS_OFFSETASSERT;
@@ -2089,9 +2150,11 @@ pps_capture(struct pps_state *pps)
pps->capffth = fftimehands;
#endif
pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
+#if defined(RTEMS_SMP)
atomic_thread_fence_acq();
if (pps->capgen != th->th_generation)
pps->capgen = 0;
+#endif
}
void
@@ -2116,7 +2179,11 @@ pps_event(struct pps_state *pps, int event)
if ((event & pps->ppsparam.mode) == 0)
return;
/* If the timecounter was wound up underneath us, bail out. */
+#if defined(RTEMS_SMP)
if (pps->capgen == 0 || pps->capgen !=
+#else
+ if (pps->capgen !=
+#endif
atomic_load_acq_int(&pps->capth->th_generation))
return;
@@ -2220,11 +2287,13 @@ pps_event(struct pps_state *pps, int event)
#endif
/* Wakeup anyone sleeping in pps_fetch(). */
+#ifndef __rtems__
wakeup(pps);
-}
#else /* __rtems__ */
-/* FIXME: https://devel.rtems.org/ticket/2349 */
+ _Assert(pps->wakeup != NULL);
+ (*pps->wakeup)(pps);
#endif /* __rtems__ */
+}
/*
* Timecounters need to be updated every so often to prevent the hardware
@@ -2260,9 +2329,13 @@ _Timecounter_Tick(void)
{
Per_CPU_Control *cpu_self = _Per_CPU_Get();
+#if defined(RTEMS_SMP)
if (_Per_CPU_Is_boot_processor(cpu_self)) {
+#endif
tc_windup(NULL);
+#if defined(RTEMS_SMP)
}
+#endif
_Watchdog_Tick(cpu_self);
}
diff --git a/cpukit/score/src/memorydirtyfreeareas.c b/cpukit/score/src/memorydirtyfreeareas.c
index fc6c2630bf..8a817c2208 100644
--- a/cpukit/score/src/memorydirtyfreeareas.c
+++ b/cpukit/score/src/memorydirtyfreeareas.c
@@ -10,7 +10,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (C) 2020 embedded brains GmbH
+ * Copyright (C) 2020, 2022 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,14 @@
#include <rtems/score/memory.h>
+#include <string.h>
+
void _Memory_Dirty_free_areas( void )
{
_Memory_Fill( _Memory_Get(), 0xcf );
+ memset(
+ _Memory_Noinit_begin,
+ 0xcf,
+ (uintptr_t) _Memory_Noinit_end - (uintptr_t) _Memory_Noinit_begin
+ );
}
diff --git a/cpukit/score/src/memorynoinit.c b/cpukit/score/src/memorynoinit.c
new file mode 100644
index 0000000000..19772356cd
--- /dev/null
+++ b/cpukit/score/src/memorynoinit.c
@@ -0,0 +1,45 @@
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreMemory
+ *
+ * @brief This source file contains the definition of ::_Memory_Noinit_begin
+ * and ::_Memory_Noinit_end.
+ */
+
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C) 2022 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/memory.h>
+
+RTEMS_SECTION( ".noinit.rtems.begin" ) char _Memory_Noinit_begin[ 0 ];
+
+RTEMS_SECTION( ".noinit.rtems.end" ) char _Memory_Noinit_end[ 0 ];
diff --git a/cpukit/score/src/memoryzerofreeareas.c b/cpukit/score/src/memoryzerofreeareas.c
index 4ea0812426..b1cef47ef9 100644
--- a/cpukit/score/src/memoryzerofreeareas.c
+++ b/cpukit/score/src/memoryzerofreeareas.c
@@ -10,7 +10,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (C) 2020 embedded brains GmbH
+ * Copyright (C) 2020, 2022 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,14 @@
#include <rtems/score/memory.h>
+#include <string.h>
+
void _Memory_Zero_free_areas( void )
{
_Memory_Fill( _Memory_Get(), 0 );
+ memset(
+ _Memory_Noinit_begin,
+ 0,
+ (uintptr_t) _Memory_Noinit_end - (uintptr_t) _Memory_Noinit_begin
+ );
}
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index 3b525a2066..458031c794 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -162,7 +162,7 @@ static void _MPCI_Create_server( void )
memset( &config, 0, sizeof( config ) );
config.scheduler = &_Scheduler_Table[ 0 ];
config.name = _Objects_Build_name( 'M', 'P', 'C', 'I' );
- config.priority = PRIORITY_PSEUDO_ISR;
+ config.priority = PRIORITY_MINIMUM;
config.is_fp = CPU_ALL_TASKS_ARE_FP;
config.stack_size = _Stack_Minimum()
+ _MPCI_Configuration.extra_mpci_receive_server_stack
diff --git a/cpukit/score/src/objectactivecount.c b/cpukit/score/src/objectactivecount.c
index 028058e473..5f0304fead 100644
--- a/cpukit/score/src/objectactivecount.c
+++ b/cpukit/score/src/objectactivecount.c
@@ -46,14 +46,22 @@ Objects_Maximum _Objects_Active_count(
const Objects_Information *information
)
{
- Objects_Maximum inactive;
- Objects_Maximum maximum;
+ Objects_Maximum active;
+ Objects_Maximum index;
+ Objects_Maximum maximum;
+ Objects_Control **local_table;
_Assert( _Objects_Allocator_is_owner() );
- inactive = (Objects_Maximum)
- _Chain_Node_count_unprotected( &information->Inactive );
+ active = 0;
maximum = _Objects_Get_maximum_index( information );
+ local_table = information->local_table;
- return maximum - inactive;
+ for ( index = 0; index < maximum; ++index ) {
+ if ( local_table[ index ] != NULL ) {
+ ++active;
+ }
+ }
+
+ return active;
}
diff --git a/cpukit/score/src/objectfree.c b/cpukit/score/src/objectfree.c
index 45b2ba2c86..06d7d82672 100644
--- a/cpukit/score/src/objectfree.c
+++ b/cpukit/score/src/objectfree.c
@@ -51,14 +51,16 @@ void _Objects_Free_unlimited(
if ( _Objects_Is_auto_extend( information ) ) {
Objects_Maximum objects_per_block;
- Objects_Maximum block;
- Objects_Maximum inactive;
+ Objects_Maximum index;
objects_per_block = information->objects_per_block;
- block = _Objects_Get_index( the_object->id ) - OBJECTS_INDEX_MINIMUM;
+ index = _Objects_Get_index( the_object->id ) - OBJECTS_INDEX_MINIMUM;
- if ( block > objects_per_block ) {
- block /= objects_per_block;
+ if ( _Objects_Is_in_allocated_block( index, objects_per_block ) ) {
+ Objects_Maximum block;
+ Objects_Maximum inactive;
+
+ block = index / objects_per_block;
++information->inactive_per_block[ block ];
diff --git a/cpukit/score/src/rbtreemax.c b/cpukit/score/src/rbtreemax.c
index 1b0e463aa2..f42e42043e 100644
--- a/cpukit/score/src/rbtreemax.c
+++ b/cpukit/score/src/rbtreemax.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -8,7 +10,7 @@
*/
/*
- * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2021 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/score/src/rbtreemin.c b/cpukit/score/src/rbtreemin.c
index b3cd4331c1..86e5b6e5c9 100644
--- a/cpukit/score/src/rbtreemin.c
+++ b/cpukit/score/src/rbtreemin.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -8,7 +10,7 @@
*/
/*
- * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2021 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/score/src/rbtreenext.c b/cpukit/score/src/rbtreenext.c
index 5d43af0068..a18b1cec2e 100644
--- a/cpukit/score/src/rbtreenext.c
+++ b/cpukit/score/src/rbtreenext.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -9,7 +11,7 @@
*/
/*
- * Copyright (C) 2012 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2012 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/score/src/rbtreeprev.c b/cpukit/score/src/rbtreeprev.c
index 9869cade99..c23910c085 100644
--- a/cpukit/score/src/rbtreeprev.c
+++ b/cpukit/score/src/rbtreeprev.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -8,7 +10,7 @@
*/
/*
- * Copyright (C) 2012 embedded brains GmbH (http://www.embedded-brains.de)
+ * Copyright (C) 2012 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/cpukit/score/src/scheduleredfblock.c b/cpukit/score/src/scheduleredfblock.c
index 56f7c9c021..cb7ab91450 100644
--- a/cpukit/score/src/scheduleredfblock.c
+++ b/cpukit/score/src/scheduleredfblock.c
@@ -47,11 +47,11 @@ void _Scheduler_EDF_Block(
Scheduler_Node *node
)
{
- _Scheduler_Generic_block(
+ _Scheduler_uniprocessor_Block(
scheduler,
the_thread,
node,
_Scheduler_EDF_Extract_body,
- _Scheduler_EDF_Schedule_body
+ _Scheduler_EDF_Get_highest_ready
);
}
diff --git a/cpukit/score/src/scheduleredfchangepriority.c b/cpukit/score/src/scheduleredfchangepriority.c
index 26ce1c348f..de17ca0fad 100644
--- a/cpukit/score/src/scheduleredfchangepriority.c
+++ b/cpukit/score/src/scheduleredfchangepriority.c
@@ -71,5 +71,8 @@ void _Scheduler_EDF_Update_priority(
_Scheduler_EDF_Extract( context, the_node );
_Scheduler_EDF_Enqueue( context, the_node, insert_priority );
- _Scheduler_EDF_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_EDF_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/scheduleredfschedule.c b/cpukit/score/src/scheduleredfschedule.c
index d0acea7a30..40c5ab2c06 100644
--- a/cpukit/score/src/scheduleredfschedule.c
+++ b/cpukit/score/src/scheduleredfschedule.c
@@ -46,5 +46,8 @@ void _Scheduler_EDF_Schedule(
Thread_Control *the_thread
)
{
- _Scheduler_EDF_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_EDF_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/scheduleredfunblock.c b/cpukit/score/src/scheduleredfunblock.c
index d6604686e3..4638eedf71 100644
--- a/cpukit/score/src/scheduleredfunblock.c
+++ b/cpukit/score/src/scheduleredfunblock.c
@@ -62,23 +62,5 @@ void _Scheduler_EDF_Unblock(
the_node->priority = priority;
_Scheduler_EDF_Enqueue( context, the_node, insert_priority );
-
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir(
- the_thread,
- priority == ( SCHEDULER_EDF_PRIO_MSB | PRIORITY_PSEUDO_ISR )
- );
- }
+ _Scheduler_uniprocessor_Unblock( scheduler, the_thread, priority );
}
diff --git a/cpukit/score/src/scheduleredfyield.c b/cpukit/score/src/scheduleredfyield.c
index d83e1d9268..d38bea705a 100644
--- a/cpukit/score/src/scheduleredfyield.c
+++ b/cpukit/score/src/scheduleredfyield.c
@@ -55,5 +55,5 @@ void _Scheduler_EDF_Yield(
_Scheduler_EDF_Extract( context, the_node );
_Scheduler_EDF_Enqueue( context, the_node, the_node->priority );
- _Scheduler_EDF_Schedule_body( scheduler, the_thread, true );
+ _Scheduler_uniprocessor_Yield( scheduler, _Scheduler_EDF_Get_highest_ready );
}
diff --git a/cpukit/score/src/schedulerpriorityblock.c b/cpukit/score/src/schedulerpriorityblock.c
index 53636940dc..ffc7a3ad86 100644
--- a/cpukit/score/src/schedulerpriorityblock.c
+++ b/cpukit/score/src/schedulerpriorityblock.c
@@ -49,11 +49,11 @@ void _Scheduler_priority_Block(
Scheduler_Node *node
)
{
- _Scheduler_Generic_block(
+ _Scheduler_uniprocessor_Block(
scheduler,
the_thread,
node,
_Scheduler_priority_Extract_body,
- _Scheduler_priority_Schedule_body
+ _Scheduler_priority_Get_highest_ready
);
}
diff --git a/cpukit/score/src/schedulerprioritychangepriority.c b/cpukit/score/src/schedulerprioritychangepriority.c
index 8a059763de..3588a2ce42 100644
--- a/cpukit/score/src/schedulerprioritychangepriority.c
+++ b/cpukit/score/src/schedulerprioritychangepriority.c
@@ -96,5 +96,8 @@ void _Scheduler_priority_Update_priority(
);
}
- _Scheduler_priority_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_priority_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulerpriorityschedule.c b/cpukit/score/src/schedulerpriorityschedule.c
index bde749f9bc..bb7cf87399 100644
--- a/cpukit/score/src/schedulerpriorityschedule.c
+++ b/cpukit/score/src/schedulerpriorityschedule.c
@@ -46,5 +46,8 @@ void _Scheduler_priority_Schedule(
Thread_Control *the_thread
)
{
- _Scheduler_priority_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_priority_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulerpriorityunblock.c b/cpukit/score/src/schedulerpriorityunblock.c
index 190c126908..f9b6cabff7 100644
--- a/cpukit/score/src/schedulerpriorityunblock.c
+++ b/cpukit/score/src/schedulerpriorityunblock.c
@@ -76,19 +76,5 @@ void _Scheduler_priority_Unblock (
/* TODO: flash critical section? */
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir( the_thread, priority == PRIORITY_PSEUDO_ISR );
- }
+ _Scheduler_uniprocessor_Unblock( scheduler, the_thread, priority );
}
diff --git a/cpukit/score/src/schedulerpriorityyield.c b/cpukit/score/src/schedulerpriorityyield.c
index 77fcecc418..adb443df94 100644
--- a/cpukit/score/src/schedulerpriorityyield.c
+++ b/cpukit/score/src/schedulerpriorityyield.c
@@ -59,5 +59,8 @@ void _Scheduler_priority_Yield(
_Chain_Append_unprotected( ready_chain, &the_thread->Object.Node );
}
- _Scheduler_priority_Schedule_body( scheduler, the_thread, true );
+ _Scheduler_uniprocessor_Yield(
+ scheduler,
+ _Scheduler_priority_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulersimpleblock.c b/cpukit/score/src/schedulersimpleblock.c
index a8229d409f..dfd18df3a2 100644
--- a/cpukit/score/src/schedulersimpleblock.c
+++ b/cpukit/score/src/schedulersimpleblock.c
@@ -47,11 +47,11 @@ void _Scheduler_simple_Block(
Scheduler_Node *node
)
{
- _Scheduler_Generic_block(
+ _Scheduler_uniprocessor_Block(
scheduler,
the_thread,
node,
_Scheduler_simple_Extract,
- _Scheduler_simple_Schedule_body
+ _Scheduler_simple_Get_highest_ready
);
}
diff --git a/cpukit/score/src/schedulersimplechangepriority.c b/cpukit/score/src/schedulersimplechangepriority.c
index b4711dfd01..5c53c96fb3 100644
--- a/cpukit/score/src/schedulersimplechangepriority.c
+++ b/cpukit/score/src/schedulersimplechangepriority.c
@@ -60,5 +60,8 @@ void _Scheduler_simple_Update_priority(
_Scheduler_simple_Extract( scheduler, the_thread, node );
_Scheduler_simple_Insert( &context->Ready, the_thread, new_priority );
- _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_simple_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulersimpleschedule.c b/cpukit/score/src/schedulersimpleschedule.c
index 4108fdaee8..83a3fed7fe 100644
--- a/cpukit/score/src/schedulersimpleschedule.c
+++ b/cpukit/score/src/schedulersimpleschedule.c
@@ -46,5 +46,8 @@ void _Scheduler_simple_Schedule(
Thread_Control *the_thread
)
{
- _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Schedule(
+ scheduler,
+ _Scheduler_simple_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/schedulersimpleunblock.c b/cpukit/score/src/schedulersimpleunblock.c
index 7791503d7a..436f20ac38 100644
--- a/cpukit/score/src/schedulersimpleunblock.c
+++ b/cpukit/score/src/schedulersimpleunblock.c
@@ -58,23 +58,5 @@ void _Scheduler_simple_Unblock(
priority = _Thread_Get_priority( the_thread );
insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
_Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
-
- /*
- * If the thread that was unblocked is more important than the heir,
- * then we have a new heir. This may or may not result in a
- * context switch.
- *
- * Normal case:
- * If the current thread is preemptible, then we need to do
- * a context switch.
- * Pseudo-ISR case:
- * Even if the thread isn't preemptible, if the new heir is
- * a pseudo-ISR system task, we need to do a context switch.
- */
- if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
- _Scheduler_Update_heir(
- the_thread,
- priority == PRIORITY_PSEUDO_ISR
- );
- }
+ _Scheduler_uniprocessor_Unblock( scheduler, the_thread, priority );
}
diff --git a/cpukit/score/src/schedulersimpleyield.c b/cpukit/score/src/schedulersimpleyield.c
index c25cceb8a7..ed8dd5b4cc 100644
--- a/cpukit/score/src/schedulersimpleyield.c
+++ b/cpukit/score/src/schedulersimpleyield.c
@@ -58,5 +58,8 @@ void _Scheduler_simple_Yield(
insert_priority = (unsigned int) _Thread_Get_priority( the_thread );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
_Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
- _Scheduler_simple_Schedule_body( scheduler, the_thread, false );
+ _Scheduler_uniprocessor_Yield(
+ scheduler,
+ _Scheduler_simple_Get_highest_ready
+ );
}
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index 321bb15cab..0ddfd1cf9b 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -135,11 +135,15 @@ static void _Thread_Priority_do_perform_actions(
priority_aggregation = _Priority_Actions_move( &queue_context->Priority.Actions );
do {
+#if defined(RTEMS_SMP)
Priority_Aggregation *next_aggregation;
+#endif
Priority_Node *priority_action_node;
Priority_Action_type priority_action_type;
+#if defined(RTEMS_SMP)
next_aggregation = _Priority_Get_next_action( priority_aggregation );
+#endif
priority_action_node = priority_aggregation->Action.node;
priority_action_type = priority_aggregation->Action.type;
@@ -198,8 +202,12 @@ static void _Thread_Priority_do_perform_actions(
break;
}
+#if defined(RTEMS_SMP)
priority_aggregation = next_aggregation;
- } while ( _Priority_Actions_is_valid( priority_aggregation ) );
+ } while ( priority_aggregation != NULL );
+#else
+ } while ( false );
+#endif
if ( !_Priority_Actions_is_empty( &queue_context->Priority.Actions ) ) {
_Thread_queue_Context_add_priority_update( queue_context, the_thread );
@@ -352,6 +360,7 @@ void _Thread_Priority_changed(
);
}
+#if defined(RTEMS_SMP)
void _Thread_Priority_replace(
Thread_Control *the_thread,
Priority_Node *victim_node,
@@ -367,6 +376,7 @@ void _Thread_Priority_replace(
replacement_node
);
}
+#endif
void _Thread_Priority_update( Thread_queue_Context *queue_context )
{
diff --git a/cpukit/score/src/threadcreateidle.c b/cpukit/score/src/threadcreateidle.c
index 9f3c01d118..b5e0cfdc9b 100644
--- a/cpukit/score/src/threadcreateidle.c
+++ b/cpukit/score/src/threadcreateidle.c
@@ -40,6 +40,7 @@
#endif
#include <rtems/score/threadidledata.h>
+#include <rtems/score/cpuimpl.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/assert.h>
#include <rtems/score/schedulerimpl.h>
@@ -111,10 +112,10 @@ static void _Thread_Create_idle_for_CPU( Per_CPU_Control *cpu )
void _Thread_Create_idle( void )
{
+#if defined(RTEMS_SMP)
uint32_t cpu_max;
uint32_t cpu_index;
- _System_state_Set( SYSTEM_STATE_BEFORE_MULTITASKING );
cpu_max = _SMP_Get_processor_maximum();
for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
@@ -124,4 +125,12 @@ void _Thread_Create_idle( void )
_Thread_Create_idle_for_CPU( cpu );
}
}
+#else
+ _Thread_Create_idle_for_CPU( _Per_CPU_Get() );
+#endif
+
+ _CPU_Use_thread_local_storage(
+ &_Per_CPU_Get_executing( _Per_CPU_Get() )->Registers
+ );
+ _System_state_Set( SYSTEM_STATE_BEFORE_MULTITASKING );
}
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index 457fdaa54a..9b37206c6d 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -298,6 +298,7 @@ static bool _Thread_Try_initialize(
the_thread->Start.is_preemptible = config->is_preemptible;
the_thread->Start.cpu_budget_operations = config->cpu_budget_operations;
the_thread->Start.stack_free = config->stack_free;
+ the_thread->Join_queue.Queue.owner = the_thread;
_Thread_Timer_initialize( &the_thread->Timer, cpu );
_Thread_Initialize_scheduler_and_wait_nodes( the_thread, config );
diff --git a/cpukit/score/src/threadqops.c b/cpukit/score/src/threadqops.c
index 33fc5a44cb..fbea9f6de6 100644
--- a/cpukit/score/src/threadqops.c
+++ b/cpukit/score/src/threadqops.c
@@ -404,8 +404,12 @@ static void _Thread_queue_Priority_priority_actions(
break;
}
+#if defined(RTEMS_SMP)
priority_aggregation = _Priority_Get_next_action( priority_aggregation );
- } while ( _Priority_Actions_is_valid( priority_aggregation ) );
+ } while ( priority_aggregation != NULL );
+#else
+ } while ( false );
+#endif
}
static void _Thread_queue_Priority_do_initialize(
@@ -734,14 +738,18 @@ static void _Thread_queue_Priority_inherit_priority_actions(
priority_aggregation = _Priority_Actions_move( priority_actions );
do {
+#if defined(RTEMS_SMP)
Priority_Aggregation *next_aggregation;
+#endif
Scheduler_Node *scheduler_node;
size_t scheduler_index;
Thread_queue_Priority_queue *priority_queue;
Scheduler_Node *scheduler_node_of_owner;
Priority_Action_type priority_action_type;
+#if defined(RTEMS_SMP)
next_aggregation = _Priority_Get_next_action( priority_aggregation );
+#endif
scheduler_node = SCHEDULER_NODE_OF_WAIT_PRIORITY( priority_aggregation );
scheduler_index = _Thread_queue_Scheduler_index( scheduler_node );
@@ -797,8 +805,12 @@ static void _Thread_queue_Priority_inherit_priority_actions(
break;
}
+#if defined(RTEMS_SMP)
priority_aggregation = next_aggregation;
- } while ( _Priority_Actions_is_valid( priority_aggregation ) );
+ } while ( priority_aggregation != NULL );
+#else
+ } while ( false );
+#endif
}
static void _Thread_queue_Priority_inherit_do_initialize(
diff --git a/cpukit/score/src/threadqtimeout.c b/cpukit/score/src/threadqtimeout.c
index 271ea27f27..10e194f6d8 100644
--- a/cpukit/score/src/threadqtimeout.c
+++ b/cpukit/score/src/threadqtimeout.c
@@ -132,7 +132,7 @@ void _Thread_queue_Add_timeout_monotonic_timespec(
{
struct timespec now;
- _Timecounter_Getnanouptime( &now );
+ _Timecounter_Nanouptime( &now );
_Thread_queue_Add_timeout_timespec(
queue,
the_thread,
@@ -152,7 +152,7 @@ void _Thread_queue_Add_timeout_realtime_timespec(
{
struct timespec now;
- _Timecounter_Getnanotime( &now );
+ _Timecounter_Nanotime( &now );
_Thread_queue_Add_timeout_timespec(
queue,
the_thread,
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 25f57e2a40..635143427c 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -14,7 +14,7 @@
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2014, 2021 embedded brains GmbH.
+ * Copyright (C) 2014, 2022 embedded brains GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -55,7 +55,9 @@
#include <rtems/score/userextimpl.h>
#include <rtems/score/watchdogimpl.h>
-#define THREAD_JOIN_TQ_OPERATIONS &_Thread_queue_Operations_priority
+#include <pthread.h>
+
+#define THREAD_JOIN_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
static void _Thread_Life_action_handler(
Thread_Control *executing,
@@ -70,31 +72,6 @@ Thread_Zombie_registry _Thread_Zombies = {
.Chain = CHAIN_INITIALIZER_EMPTY( _Thread_Zombies.Chain )
};
-static void _Thread_Raise_real_priority(
- Thread_Control *the_thread,
- Priority_Control priority
-)
-{
- Thread_queue_Context queue_context;
-
- _Thread_queue_Context_initialize( &queue_context );
- _Thread_queue_Context_clear_priority_updates( &queue_context );
- _Thread_Wait_acquire( the_thread, &queue_context );
-
- if ( priority < the_thread->Real_priority.priority ) {
- _Thread_Priority_change(
- the_thread,
- &the_thread->Real_priority,
- priority,
- PRIORITY_GROUP_LAST,
- &queue_context
- );
- }
-
- _Thread_Wait_release( the_thread, &queue_context );
- _Thread_Priority_update( &queue_context );
-}
-
typedef struct {
Thread_queue_Context Base;
void *exit_value;
@@ -388,18 +365,37 @@ static void _Thread_Remove_life_change_request( Thread_Control *the_thread )
_Thread_State_release( the_thread, &lock_context );
}
-void _Thread_Join(
+static void _Thread_Clear_waiting_for_join_at_exit(
+ Thread_queue_Queue *queue,
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+)
+{
+ (void) the_thread;
+ (void) cpu_self;
+ (void) queue_context;
+ _Thread_Clear_state( queue->owner, STATES_WAITING_FOR_JOIN_AT_EXIT );
+}
+
+Status_Control _Thread_Join(
Thread_Control *the_thread,
States_Control waiting_for_join,
Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
- _Assert( the_thread != executing );
_Assert( _Thread_State_is_owner( the_thread ) );
executing->Wait.return_argument = NULL;
-
+ _Thread_queue_Context_set_enqueue_callout(
+ queue_context,
+ _Thread_Clear_waiting_for_join_at_exit
+ );
+ _Thread_queue_Context_set_deadlock_callout(
+ queue_context,
+ _Thread_queue_Deadlock_status
+ );
_Thread_queue_Context_set_thread_state( queue_context, waiting_for_join );
_Thread_queue_Enqueue(
&the_thread->Join_queue.Queue,
@@ -407,6 +403,7 @@ void _Thread_Join(
executing,
queue_context
);
+ return _Thread_Wait_get_status( executing );
}
static void _Thread_Set_exit_value(
@@ -435,77 +432,70 @@ static void _Thread_Try_life_change_request(
}
}
-void _Thread_Cancel(
- Thread_Control *the_thread,
- Thread_Control *executing,
- void *exit_value
+Thread_Cancel_state _Thread_Cancel(
+ Thread_Control *the_thread,
+ Thread_Control *executing,
+ Thread_Life_state life_states_to_clear
)
{
- ISR_lock_Context lock_context;
- Thread_Life_state previous;
- Per_CPU_Control *cpu_self;
+ ISR_lock_Context lock_context;
+ Thread_Life_state previous;
_Assert( the_thread != executing );
_Thread_State_acquire( the_thread, &lock_context );
- _Thread_Set_exit_value( the_thread, exit_value );
+ _Thread_Set_exit_value( the_thread, PTHREAD_CANCELED );
previous = _Thread_Change_life_locked(
the_thread,
- 0,
+ life_states_to_clear,
THREAD_LIFE_TERMINATING,
0
);
- cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
-
if ( _States_Is_dormant( the_thread->current_state ) ) {
_Thread_State_release( the_thread, &lock_context );
_Thread_Make_zombie( the_thread );
- } else {
- Priority_Control priority;
-
- _Thread_Try_life_change_request( the_thread, previous, &lock_context );
- priority = _Thread_Get_priority( executing );
- _Thread_Raise_real_priority( the_thread, priority );
+ return THREAD_CANCEL_DONE;
}
- _Thread_Dispatch_enable( cpu_self );
+ _Thread_Try_life_change_request( the_thread, previous, &lock_context );
+ return THREAD_CANCEL_IN_PROGRESS;
}
-static void _Thread_Close_enqueue_callout(
- Thread_queue_Queue *queue,
+Status_Control _Thread_Close(
Thread_Control *the_thread,
- Per_CPU_Control *cpu_self,
+ Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
- Thread_Close_context *context;
-
- context = (Thread_Close_context *) queue_context;
- _Thread_Cancel( context->cancel, the_thread, NULL );
-}
+ Per_CPU_Control *cpu_self;
+ Thread_Cancel_state cancel_state;
-void _Thread_Close(
- Thread_Control *the_thread,
- Thread_Control *executing,
- Thread_Close_context *context
-)
-{
- context->cancel = the_thread;
- _Thread_queue_Context_set_enqueue_callout(
- &context->Base,
- _Thread_Close_enqueue_callout
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
);
+ _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
+
+ cancel_state = _Thread_Cancel( the_thread, executing, THREAD_LIFE_DETACHED );
+
+ if ( cancel_state == THREAD_CANCEL_DONE ) {
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
+ }
+
+ _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
+ _Thread_Dispatch_unnest( cpu_self );
_Thread_State_acquire_critical(
the_thread,
- &context->Base.Lock_context.Lock_context
+ &queue_context->Lock_context.Lock_context
);
- _Thread_Join(
+
+ return _Thread_Join(
the_thread,
STATES_WAITING_FOR_JOIN,
executing,
- &context->Base
+ queue_context
);
}
diff --git a/cpukit/score/src/watchdogtick.c b/cpukit/score/src/watchdogtick.c
index 6edb3f071a..71311b598e 100644
--- a/cpukit/score/src/watchdogtick.c
+++ b/cpukit/score/src/watchdogtick.c
@@ -83,9 +83,13 @@ void _Watchdog_Tick( Per_CPU_Control *cpu )
Thread_Control *executing;
const Thread_CPU_budget_operations *cpu_budget_operations;
+#ifdef RTEMS_SMP
if ( _Per_CPU_Is_boot_processor( cpu ) ) {
+#endif
++_Watchdog_Ticks_since_boot;
+#ifdef RTEMS_SMP
}
+#endif
_ISR_lock_ISR_disable_and_acquire( &cpu->Watchdog.Lock, &lock_context );