summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bsps/aarch64/include/bsp/aarch64-mmu.h60
-rw-r--r--bsps/aarch64/include/dev/irq/arm-gic-arch.h4
-rw-r--r--bsps/aarch64/shared/mmu/vmsav8-64-nommu.c49
-rw-r--r--bsps/aarch64/shared/mmu/vmsav8-64.c69
-rw-r--r--bsps/aarch64/shared/start/start.S4
-rw-r--r--bsps/aarch64/xilinx-versal/start/bspstartmmu.c1
-rw-r--r--bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c1
-rw-r--r--bsps/arm/altera-cyclone-v/start/bsp_specs0
-rw-r--r--bsps/arm/atsam/start/bsp_specs0
-rw-r--r--bsps/arm/beagle/start/bsp_specs0
-rw-r--r--bsps/arm/csb336/start/bsp_specs0
-rw-r--r--bsps/arm/csb337/start/bsp_specs0
-rw-r--r--bsps/arm/edb7312/start/bsp_specs0
-rw-r--r--bsps/arm/gumstix/start/bsp_specs0
-rw-r--r--bsps/arm/imx/start/bsp_specs0
-rw-r--r--bsps/arm/lm3s69xx/start/bsp_specs0
-rw-r--r--bsps/arm/lpc176x/start/bsp_specs0
-rw-r--r--bsps/arm/lpc24xx/start/bsp_specs0
-rw-r--r--bsps/arm/lpc32xx/start/bsp_specs0
-rw-r--r--bsps/arm/raspberrypi/start/bsp_specs0
-rw-r--r--bsps/arm/realview-pbx-a9/start/bsp_specs0
-rw-r--r--bsps/arm/rtl22xx/start/bsp_specs0
-rw-r--r--bsps/arm/smdk2410/start/bsp_specs0
-rw-r--r--bsps/arm/stm32f4/start/bsp_specs0
-rw-r--r--bsps/arm/stm32h7/start/bsp_specs0
-rw-r--r--bsps/arm/tms570/start/bsp_specs0
-rw-r--r--bsps/arm/xen/start/bsp_specs0
-rw-r--r--bsps/arm/xilinx-zynq/start/bsp_specs0
-rw-r--r--bsps/arm/xilinx-zynqmp/start/bsp_specs0
-rw-r--r--bsps/bfin/TLL6527M/start/bsp_specs0
-rw-r--r--bsps/bfin/bf537Stamp/start/bsp_specs0
-rw-r--r--bsps/bfin/eZKit533/start/bsp_specs0
-rw-r--r--bsps/i386/pc386/start/bsp_specs0
-rw-r--r--bsps/lm32/lm32_evr/start/bsp_specs0
-rw-r--r--bsps/lm32/milkymist/start/bsp_specs0
-rw-r--r--bsps/m68k/av5282/start/bsp_specs0
-rw-r--r--bsps/m68k/csb360/start/bsp_specs0
-rw-r--r--bsps/m68k/gen68340/start/bsp_specs0
-rw-r--r--bsps/m68k/gen68360/start/bsp_specs0
-rw-r--r--bsps/m68k/genmcf548x/start/bsp_specs0
-rw-r--r--bsps/m68k/mcf5206elite/start/bsp_specs0
-rw-r--r--bsps/m68k/mcf52235/start/bsp_specs0
-rw-r--r--bsps/m68k/mcf5225x/start/bsp_specs0
-rw-r--r--bsps/m68k/mcf5235/start/bsp_specs0
-rw-r--r--bsps/m68k/mcf5329/start/bsp_specs0
-rw-r--r--bsps/m68k/mrm332/start/bsp_specs0
-rw-r--r--bsps/m68k/mvme147/start/bsp_specs0
-rw-r--r--bsps/m68k/mvme147s/start/bsp_specs0
-rw-r--r--bsps/m68k/mvme162/start/bsp_specs0
-rw-r--r--bsps/m68k/mvme167/start/bsp_specs0
-rw-r--r--bsps/m68k/uC5282/start/bsp_specs0
-rw-r--r--bsps/mips/csb350/start/bsp_specs0
-rw-r--r--bsps/mips/hurricane/start/bsp_specs0
-rw-r--r--bsps/mips/jmr3904/start/bsp_specs0
-rw-r--r--bsps/mips/malta/start/bsp_specs0
-rw-r--r--bsps/mips/rbtx4925/start/bsp_specs0
-rw-r--r--bsps/mips/rbtx4938/start/bsp_specs0
-rw-r--r--bsps/moxie/moxiesim/start/bsp_specs0
-rw-r--r--bsps/nios2/nios2_iss/start/bsp_specs0
-rw-r--r--bsps/no_cpu/no_bsp/start/bsp_specs0
-rw-r--r--bsps/or1k/generic_or1k/start/bsp_specs0
-rw-r--r--bsps/powerpc/beatnik/start/bsp_specs0
-rw-r--r--bsps/powerpc/gen5200/start/bsp_specs0
-rw-r--r--bsps/powerpc/gen83xx/start/bsp_specs0
-rw-r--r--bsps/powerpc/haleakala/start/bsp_specs0
-rw-r--r--bsps/powerpc/motorola_powerpc/start/bsp_specs0
-rw-r--r--bsps/powerpc/mpc55xxevb/start/bsp_specs0
-rw-r--r--bsps/powerpc/mpc8260ads/start/bsp_specs0
-rw-r--r--bsps/powerpc/mvme3100/start/bsp_specs0
-rw-r--r--bsps/powerpc/mvme5500/start/bsp_specs0
-rw-r--r--bsps/powerpc/psim/start/bsp_specs0
-rw-r--r--bsps/powerpc/qemuppc/start/bsp_specs0
-rw-r--r--bsps/powerpc/qoriq/start/bsp_specs0
-rw-r--r--bsps/powerpc/ss555/start/bsp_specs0
-rw-r--r--bsps/powerpc/t32mppc/start/bsp_specs0
-rw-r--r--bsps/powerpc/tqm8xx/start/bsp_specs0
-rw-r--r--bsps/powerpc/virtex/start/bsp_specs0
-rw-r--r--bsps/powerpc/virtex4/start/bsp_specs0
-rw-r--r--bsps/powerpc/virtex5/start/bsp_specs0
-rw-r--r--bsps/riscv/griscv/start/bsp_specs0
-rw-r--r--bsps/riscv/riscv/start/bsp_specs0
-rw-r--r--bsps/sh/gensh1/start/bsp_specs0
-rw-r--r--bsps/sh/gensh2/start/bsp_specs0
-rw-r--r--bsps/sh/gensh4/start/bsp_specs0
-rw-r--r--bsps/sh/shsim/start/bsp_specs0
-rw-r--r--bsps/shared/start/mallocinitmulti.c49
-rw-r--r--bsps/shared/start/mallocinitone.c49
-rw-r--r--bsps/shared/start/wkspaceinitmulti.c46
-rw-r--r--bsps/shared/start/wkspaceinitone.c46
-rw-r--r--bsps/sparc/erc32/start/bsp_specs0
-rw-r--r--bsps/sparc/leon2/start/bsp_specs0
-rw-r--r--bsps/sparc/leon3/start/bsp_specs0
-rw-r--r--bsps/sparc64/niagara/start/bsp_specs0
-rw-r--r--bsps/sparc64/usiii/start/bsp_specs0
-rw-r--r--bsps/v850/gdbv850sim/start/bsp_specs0
-rw-r--r--bsps/x86_64/amd64/start/bsp_specs0
-rw-r--r--cpukit/aclocal/canonical-target-name.m425
-rw-r--r--cpukit/aclocal/canonicalize-tools.m413
-rw-r--r--cpukit/aclocal/check-func.m421
-rw-r--r--cpukit/aclocal/check-multiprocessing.m45
-rw-r--r--cpukit/aclocal/check-networking.m425
-rw-r--r--cpukit/aclocal/check-newlib.m428
-rw-r--r--cpukit/aclocal/check-posix.m419
-rw-r--r--cpukit/aclocal/check-rtems-debug.m45
-rw-r--r--cpukit/aclocal/check-smp.m418
-rw-r--r--cpukit/aclocal/check-tool.m49
-rw-r--r--cpukit/aclocal/enable-drvmgr.m412
-rw-r--r--cpukit/aclocal/enable-multiprocessing.m413
-rw-r--r--cpukit/aclocal/enable-networking.m412
-rw-r--r--cpukit/aclocal/enable-paravirt.m413
-rw-r--r--cpukit/aclocal/enable-posix.m421
-rw-r--r--cpukit/aclocal/enable-profiling.m49
-rw-r--r--cpukit/aclocal/enable-rtems-debug.m410
-rw-r--r--cpukit/aclocal/enable-smp.m417
-rw-r--r--cpukit/aclocal/env-rtemscpu.m46
-rw-r--r--cpukit/aclocal/gcc-pipe.m418
-rw-r--r--cpukit/aclocal/gcc-sanity.m4114
-rw-r--r--cpukit/aclocal/gcc-weak.m419
-rw-r--r--cpukit/aclocal/multi.m450
-rw-r--r--cpukit/aclocal/multilib.m414
-rw-r--r--cpukit/aclocal/prog-cc.m438
-rw-r--r--cpukit/aclocal/prog-ccas.m46
-rw-r--r--cpukit/aclocal/rtems-bsp-includes.m413
-rw-r--r--cpukit/aclocal/rtems-build-top.m412
-rw-r--r--cpukit/aclocal/rtems-includes.m423
-rw-r--r--cpukit/aclocal/rtems-source-top.m48
-rw-r--r--cpukit/aclocal/rtems-top.m470
-rw-r--r--cpukit/aclocal/version.m44
-rw-r--r--cpukit/doxygen/appl-config.h51
-rw-r--r--cpukit/include/rtems/confdefs/bsp.h15
-rw-r--r--cpukit/include/rtems/confdefs/extensions.h10
-rw-r--r--cpukit/include/rtems/confdefs/iodrivers.h62
-rw-r--r--cpukit/include/rtems/confdefs/malloc.h10
-rw-r--r--cpukit/include/rtems/confdefs/percpu.h9
-rw-r--r--cpukit/include/rtems/config.h30
-rw-r--r--cpukit/include/rtems/malloc.h6
-rw-r--r--cpukit/include/rtems/mallocinitmulti.h100
-rw-r--r--cpukit/include/rtems/mallocinitone.h90
-rw-r--r--cpukit/include/rtems/posix/pthreadimpl.h15
-rw-r--r--cpukit/include/rtems/rtems/asr.h2
-rw-r--r--cpukit/include/rtems/rtems/clock.h592
-rw-r--r--cpukit/include/rtems/rtems/modesimpl.h8
-rw-r--r--cpukit/include/rtems/rtems/ratemon.h7
-rw-r--r--cpukit/include/rtems/rtems/ratemonimpl.h10
-rw-r--r--cpukit/include/rtems/rtems/signal.h2
-rw-r--r--cpukit/include/rtems/rtems/tasks.h12
-rw-r--r--cpukit/include/rtems/score/chainimpl.h11
-rw-r--r--cpukit/include/rtems/score/coremuteximpl.h34
-rw-r--r--cpukit/include/rtems/score/exception.h71
-rw-r--r--cpukit/include/rtems/score/mrspimpl.h19
-rw-r--r--cpukit/include/rtems/score/objectdata.h2
-rw-r--r--cpukit/include/rtems/score/percpu.h31
-rw-r--r--cpukit/include/rtems/score/rbtreeimpl.h26
-rw-r--r--cpukit/include/rtems/score/scheduler.h92
-rw-r--r--cpukit/include/rtems/score/schedulercbs.h11
-rw-r--r--cpukit/include/rtems/score/scheduleredf.h1
-rw-r--r--cpukit/include/rtems/score/scheduleredfsmp.h46
-rw-r--r--cpukit/include/rtems/score/schedulerimpl.h483
-rw-r--r--cpukit/include/rtems/score/schedulernodeimpl.h21
-rw-r--r--cpukit/include/rtems/score/schedulerpriority.h1
-rw-r--r--cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h33
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmp.h34
-rw-r--r--cpukit/include/rtems/score/schedulerprioritysmpimpl.h35
-rw-r--r--cpukit/include/rtems/score/schedulersimple.h1
-rw-r--r--cpukit/include/rtems/score/schedulersimpleimpl.h13
-rw-r--r--cpukit/include/rtems/score/schedulersimplesmp.h33
-rw-r--r--cpukit/include/rtems/score/schedulersmp.h9
-rw-r--r--cpukit/include/rtems/score/schedulersmpimpl.h1032
-rw-r--r--cpukit/include/rtems/score/schedulerstrongapa.h63
-rw-r--r--cpukit/include/rtems/score/status.h2
-rw-r--r--cpukit/include/rtems/score/thread.h129
-rw-r--r--cpukit/include/rtems/score/threadcpubudget.h102
-rw-r--r--cpukit/include/rtems/score/threadimpl.h161
-rw-r--r--cpukit/include/rtems/score/threadmp.h13
-rw-r--r--cpukit/include/rtems/score/threadqimpl.h25
-rw-r--r--cpukit/include/rtems/score/timecounter.h24
-rw-r--r--cpukit/include/rtems/score/watchdogimpl.h55
-rw-r--r--cpukit/include/rtems/score/wkspace.h12
-rw-r--r--cpukit/include/rtems/score/wkspaceinitmulti.h129
-rw-r--r--cpukit/include/rtems/score/wkspaceinitone.h113
-rw-r--r--cpukit/libcsupport/src/__times.c3
-rw-r--r--cpukit/libcsupport/src/malloc_initialize.c81
-rw-r--r--cpukit/libcsupport/src/mallocheap.c2
-rw-r--r--cpukit/libdebugger/rtems-debugger-aarch64.c1884
-rw-r--r--cpukit/libdebugger/rtems-debugger-i386.c12
-rw-r--r--cpukit/libdebugger/rtems-debugger-server.c32
-rw-r--r--cpukit/libdebugger/rtems-debugger-target.c4
-rw-r--r--cpukit/libdebugger/rtems-debugger-target.h12
-rw-r--r--cpukit/libdebugger/rtems-debugger-threads.c6
-rw-r--r--cpukit/libdebugger/rtems-debugger-threads.h10
-rw-r--r--cpukit/libmisc/cpuuse/cpuusagereport.c2
-rw-r--r--cpukit/libmisc/cpuuse/cpuusagereset.c3
-rw-r--r--cpukit/libmisc/cpuuse/cpuusagetop.c2
-rw-r--r--cpukit/libtest/t-test-interrupt.c9
-rw-r--r--cpukit/libtest/testbusy.c4
-rw-r--r--cpukit/posix/src/psignalunblockthread.c7
-rw-r--r--cpukit/posix/src/psxtransschedparam.c44
-rw-r--r--cpukit/posix/src/pthreadcreate.c36
-rw-r--r--cpukit/posix/src/pthreadgetattrnp.c16
-rw-r--r--cpukit/posix/src/pthreadgetschedparam.c14
-rw-r--r--cpukit/posix/src/pthreadsetschedparam.c24
-rw-r--r--cpukit/rtems/src/eventsurrender.c12
-rw-r--r--cpukit/rtems/src/ratemongetstatus.c28
-rw-r--r--cpukit/rtems/src/ratemonperiod.c23
-rw-r--r--cpukit/rtems/src/ratemontimeout.c4
-rw-r--r--cpukit/rtems/src/schedulergetprocessor.c54
-rw-r--r--cpukit/rtems/src/schedulergetprocessormax.c54
-rw-r--r--cpukit/rtems/src/schedulerremoveprocessor.c50
-rw-r--r--cpukit/rtems/src/semsetpriority.c50
-rw-r--r--cpukit/rtems/src/signalsend.c25
-rw-r--r--cpukit/rtems/src/taskconstruct.c9
-rw-r--r--cpukit/rtems/src/taskident.c3
-rw-r--r--cpukit/rtems/src/taskmode.c2
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-default.S131
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-default.c59
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S165
-rw-r--r--cpukit/score/cpu/aarch64/cpu.c16
-rw-r--r--cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h97
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h1112
-rw-r--r--cpukit/score/cpu/aarch64/include/rtems/score/cpu.h21
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h38
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpu.h2
-rw-r--r--cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h63
-rw-r--r--cpukit/score/cpu/sparc/cpu_asm.S12
-rw-r--r--cpukit/score/src/coremsginsert.c6
-rw-r--r--cpukit/score/src/coretodset.c8
-rw-r--r--cpukit/score/src/exceptionmapping.c104
-rw-r--r--cpukit/score/src/kern_tc.c733
-rw-r--r--cpukit/score/src/mpci.c1
-rw-r--r--cpukit/score/src/rbtreeappend.c58
-rw-r--r--cpukit/score/src/rbtreeprepend.c58
-rw-r--r--cpukit/score/src/schedulercbs.c41
-rw-r--r--cpukit/score/src/schedulercbsattachthread.c5
-rw-r--r--cpukit/score/src/schedulercbsdetachthread.c20
-rw-r--r--cpukit/score/src/schedulercbsgetexecutiontime.c2
-rw-r--r--cpukit/score/src/schedulercbsgetremainingbudget.c2
-rw-r--r--cpukit/score/src/schedulercbsreleasejob.c2
-rw-r--r--cpukit/score/src/schedulercbsunblock.c2
-rw-r--r--cpukit/score/src/schedulerdefaultmakecleansticky.c52
-rw-r--r--cpukit/score/src/schedulerdefaulttick.c81
-rw-r--r--cpukit/score/src/scheduleredfsmp.c349
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c89
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c73
-rw-r--r--cpukit/score/src/schedulersimplesmp.c96
-rw-r--r--cpukit/score/src/schedulersmp.c29
-rw-r--r--cpukit/score/src/schedulerstrongapa.c162
-rw-r--r--cpukit/score/src/stackallocatorforidle.c14
-rw-r--r--cpukit/score/src/threadchangepriority.c132
-rw-r--r--cpukit/score/src/threadcreateidle.c1
-rw-r--r--cpukit/score/src/threaddispatch.c13
-rw-r--r--cpukit/score/src/threadexhausttimeslice.c53
-rw-r--r--cpukit/score/src/threadgetcputimeused.c32
-rw-r--r--cpukit/score/src/threadgetcputimeusedafterreset.c71
-rw-r--r--cpukit/score/src/threadinitialize.c14
-rw-r--r--cpukit/score/src/threadloadenv.c11
-rw-r--r--cpukit/score/src/threadqenqueue.c68
-rw-r--r--cpukit/score/src/threadqextract.c57
-rw-r--r--cpukit/score/src/threadqextractwithproxy.c46
-rw-r--r--cpukit/score/src/threadresettimeslice.c91
-rw-r--r--cpukit/score/src/threadrestart.c12
-rw-r--r--cpukit/score/src/threadscheduler.c58
-rw-r--r--cpukit/score/src/threadtimeout.c8
-rw-r--r--cpukit/score/src/userextremoveset.c2
-rw-r--r--cpukit/score/src/watchdogtick.c25
-rw-r--r--cpukit/score/src/wkspace.c125
-rw-r--r--cpukit/score/src/wkspaceallocate.c47
-rw-r--r--cpukit/score/src/wkspacefree.c51
-rw-r--r--spec/build/bsps/aarch64/a53/grp.yml2
-rw-r--r--spec/build/bsps/aarch64/a53/obj.yml2
-rw-r--r--spec/build/bsps/aarch64/a53/tsta53.yml4
-rw-r--r--spec/build/bsps/aarch64/a72/grp.yml2
-rw-r--r--spec/build/bsps/aarch64/a72/obj.yml2
-rw-r--r--spec/build/bsps/aarch64/a72/tsta72.yml4
-rw-r--r--spec/build/bsps/aarch64/xilinx-versal/grp.yml4
-rw-r--r--spec/build/bsps/aarch64/xilinx-versal/obj.yml4
-rw-r--r--spec/build/bsps/aarch64/xilinx-versal/tstqemu.yml4
-rw-r--r--spec/build/bsps/aarch64/xilinx-versal/tstvck190.yml4
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/grp.yml2
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/obj.yml2
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/tstqemu.yml4
-rw-r--r--spec/build/bsps/aarch64/xilinx-zynqmp/tstzu3eg.yml3
-rw-r--r--spec/build/bsps/arm/altera-cyclone-v/bspalteracyclonev.yml2
-rw-r--r--spec/build/bsps/arm/atsam/bspatsam.yml3
-rw-r--r--spec/build/bsps/arm/beagle/grp.yml2
-rw-r--r--spec/build/bsps/arm/beagle/obj.yml1
-rw-r--r--spec/build/bsps/arm/csb336/bspcsb336.yml3
-rw-r--r--spec/build/bsps/arm/csb337/grp.yml2
-rw-r--r--spec/build/bsps/arm/csb337/obj.yml1
-rw-r--r--spec/build/bsps/arm/edb7312/bspedb7312.yml3
-rw-r--r--spec/build/bsps/arm/fvp/grp.yml2
-rw-r--r--spec/build/bsps/arm/fvp/obj.yml1
-rw-r--r--spec/build/bsps/arm/gumstix/bspgumstix.yml3
-rw-r--r--spec/build/bsps/arm/imx/bspimx.yml2
-rw-r--r--spec/build/bsps/arm/imxrt/bspimxrt.yml3
-rw-r--r--spec/build/bsps/arm/lm3s69xx/grp.yml2
-rw-r--r--spec/build/bsps/arm/lm3s69xx/obj.yml1
-rw-r--r--spec/build/bsps/arm/lpc176x/grp.yml2
-rw-r--r--spec/build/bsps/arm/lpc176x/obj.yml1
-rw-r--r--spec/build/bsps/arm/lpc24xx/grp.yml2
-rw-r--r--spec/build/bsps/arm/lpc24xx/obj.yml1
-rw-r--r--spec/build/bsps/arm/lpc32xx/grp.yml2
-rw-r--r--spec/build/bsps/arm/lpc32xx/obj.yml1
-rw-r--r--spec/build/bsps/arm/raspberrypi/obj.yml2
-rw-r--r--spec/build/bsps/arm/realview-pbx-a9/bsprealviewpbxa9.yml3
-rw-r--r--spec/build/bsps/arm/rtl22xx/grp.yml2
-rw-r--r--spec/build/bsps/arm/rtl22xx/obj.yml1
-rw-r--r--spec/build/bsps/arm/smdk2410/bspsmdk2410.yml3
-rw-r--r--spec/build/bsps/arm/stm32f4/grp.yml2
-rw-r--r--spec/build/bsps/arm/stm32f4/obj.yml1
-rw-r--r--spec/build/bsps/arm/stm32h7/grp.yml2
-rw-r--r--spec/build/bsps/arm/stm32h7/obj.yml4
-rw-r--r--spec/build/bsps/arm/tms570/grp.yml2
-rw-r--r--spec/build/bsps/arm/tms570/obj.yml1
-rw-r--r--spec/build/bsps/arm/xen/bspxen.yml3
-rw-r--r--spec/build/bsps/arm/xilinx-zynq/grp.yml2
-rw-r--r--spec/build/bsps/arm/xilinx-zynq/obj.yml1
-rw-r--r--spec/build/bsps/arm/xilinx-zynqmp/bspxilinxzynqmp.yml3
-rw-r--r--spec/build/bsps/bfin/TLL6527M/bsptll6527m.yml3
-rw-r--r--spec/build/bsps/bfin/bf537Stamp/bspbf537stamp.yml3
-rw-r--r--spec/build/bsps/bfin/eZKit533/bspezkit533.yml3
-rw-r--r--spec/build/bsps/i386/pc386/obj.yml2
-rw-r--r--spec/build/bsps/lm32/lm32_evr/grp.yml2
-rw-r--r--spec/build/bsps/lm32/lm32_evr/obj.yml1
-rw-r--r--spec/build/bsps/lm32/milkymist/bspmilkymist.yml3
-rw-r--r--spec/build/bsps/m68k/av5282/bspav5282.yml3
-rw-r--r--spec/build/bsps/m68k/csb360/bspcsb360.yml3
-rw-r--r--spec/build/bsps/m68k/gen68340/bspgen68340.yml3
-rw-r--r--spec/build/bsps/m68k/gen68360/grp.yml2
-rw-r--r--spec/build/bsps/m68k/gen68360/obj.yml1
-rw-r--r--spec/build/bsps/m68k/genmcf548x/grp.yml2
-rw-r--r--spec/build/bsps/m68k/genmcf548x/obj.yml1
-rw-r--r--spec/build/bsps/m68k/mcf5206elite/bspmcf5206elite.yml3
-rw-r--r--spec/build/bsps/m68k/mcf52235/bspmcf52235.yml3
-rw-r--r--spec/build/bsps/m68k/mcf5225x/bspmcf5225x.yml3
-rw-r--r--spec/build/bsps/m68k/mcf5235/bspmcf5235.yml3
-rw-r--r--spec/build/bsps/m68k/mcf5329/bspmcf5329.yml3
-rw-r--r--spec/build/bsps/m68k/mrm332/bspmrm332.yml3
-rw-r--r--spec/build/bsps/m68k/mvme147/bspmvme147.yml3
-rw-r--r--spec/build/bsps/m68k/mvme147s/bspmvme147s.yml3
-rw-r--r--spec/build/bsps/m68k/mvme162/grp.yml2
-rw-r--r--spec/build/bsps/m68k/mvme162/obj.yml1
-rw-r--r--spec/build/bsps/m68k/mvme167/bspmvme167.yml3
-rw-r--r--spec/build/bsps/m68k/uC5282/bspuc5282.yml3
-rw-r--r--spec/build/bsps/microblaze/microblaze_fpga/grp.yml2
-rw-r--r--spec/build/bsps/microblaze/microblaze_fpga/obj.yml1
-rw-r--r--spec/build/bsps/mips/csb350/bspcsb350.yml3
-rw-r--r--spec/build/bsps/mips/hurricane/bsphurricane.yml3
-rw-r--r--spec/build/bsps/mips/jmr3904/bspjmr3904.yml3
-rw-r--r--spec/build/bsps/mips/malta/bspmalta.yml3
-rw-r--r--spec/build/bsps/mips/rbtx4925/bsprbtx4925.yml3
-rw-r--r--spec/build/bsps/mips/rbtx4938/bsprbtx4938.yml3
-rw-r--r--spec/build/bsps/moxie/moxiesim/bspmoxiesim.yml3
-rw-r--r--spec/build/bsps/nios2/nios2_iss/bspnios2iss.yml3
-rw-r--r--spec/build/bsps/objmem.yml16
-rw-r--r--spec/build/bsps/or1k/generic_or1k/bspgenericor1k.yml3
-rw-r--r--spec/build/bsps/powerpc/beatnik/bspbeatnik.yml3
-rw-r--r--spec/build/bsps/powerpc/gen5200/grp.yml2
-rw-r--r--spec/build/bsps/powerpc/gen5200/obj.yml1
-rw-r--r--spec/build/bsps/powerpc/gen83xx/grp.yml2
-rw-r--r--spec/build/bsps/powerpc/gen83xx/obj.yml1
-rw-r--r--spec/build/bsps/powerpc/haleakala/bsphaleakala.yml3
-rw-r--r--spec/build/bsps/powerpc/motorola_powerpc/grp.yml2
-rw-r--r--spec/build/bsps/powerpc/motorola_powerpc/obj.yml1
-rw-r--r--spec/build/bsps/powerpc/mpc55xxevb/obj.yml2
-rw-r--r--spec/build/bsps/powerpc/mpc8260ads/bspmpc8260ads.yml3
-rw-r--r--spec/build/bsps/powerpc/mvme3100/bspmvme3100.yml3
-rw-r--r--spec/build/bsps/powerpc/mvme5500/bspmvme5500.yml3
-rw-r--r--spec/build/bsps/powerpc/objmem.yml16
-rw-r--r--spec/build/bsps/powerpc/psim/bsppsim.yml3
-rw-r--r--spec/build/bsps/powerpc/qemuppc/bspqemuppc.yml3
-rw-r--r--spec/build/bsps/powerpc/qoriq/obj.yml2
-rw-r--r--spec/build/bsps/powerpc/ss555/bspss555.yml3
-rw-r--r--spec/build/bsps/powerpc/t32mppc/bspt32mppc.yml3
-rw-r--r--spec/build/bsps/powerpc/tqm8xx/obj.yml2
-rw-r--r--spec/build/bsps/powerpc/virtex/bspvirtex.yml3
-rw-r--r--spec/build/bsps/powerpc/virtex4/bspvirtex4.yml3
-rw-r--r--spec/build/bsps/powerpc/virtex5/bspvirtex5.yml3
-rw-r--r--spec/build/bsps/riscv/griscv/grp.yml2
-rw-r--r--spec/build/bsps/riscv/griscv/obj.yml1
-rw-r--r--spec/build/bsps/riscv/riscv/grp.yml2
-rw-r--r--spec/build/bsps/riscv/riscv/obj.yml1
-rw-r--r--spec/build/bsps/sh/gensh1/bspgensh1.yml3
-rw-r--r--spec/build/bsps/sh/gensh2/bspgensh2.yml3
-rw-r--r--spec/build/bsps/sh/gensh4/bspgensh4.yml3
-rw-r--r--spec/build/bsps/sh/shsim/grp.yml2
-rw-r--r--spec/build/bsps/sh/shsim/obj.yml1
-rw-r--r--spec/build/bsps/sparc/erc32/bsperc32.yml3
-rw-r--r--spec/build/bsps/sparc/leon2/grp.yml2
-rw-r--r--spec/build/bsps/sparc/leon2/obj.yml1
-rw-r--r--spec/build/bsps/sparc/leon3/grp.yml2
-rw-r--r--spec/build/bsps/sparc/leon3/obj.yml1
-rw-r--r--spec/build/bsps/sparc/objmem.yml16
-rw-r--r--spec/build/bsps/sparc64/niagara/bspniagara.yml3
-rw-r--r--spec/build/bsps/sparc64/usiii/bspusiii.yml3
-rw-r--r--spec/build/bsps/v850/gdbv850sim/grp.yml2
-rw-r--r--spec/build/bsps/v850/gdbv850sim/obj.yml1
-rw-r--r--spec/build/bsps/x86_64/amd64/bspamd64.yml3
-rw-r--r--spec/build/cpukit/cpuaarch64.yml1
-rw-r--r--spec/build/cpukit/cpuopts.yml4
-rw-r--r--spec/build/cpukit/libdebugger.yml4
-rw-r--r--spec/build/cpukit/librtemscpu.yml17
-rw-r--r--spec/build/cpukit/objdbgaarch64.yml15
-rw-r--r--spec/build/cpukit/objexceptionmapping.yml15
-rw-r--r--spec/build/cpukit/objmpci.yml1
-rw-r--r--spec/build/cpukit/objsmp.yml1
-rw-r--r--spec/build/cpukit/optexceptionextensions.yml19
-rw-r--r--spec/build/cpukit/optlibdebugger.yml17
-rw-r--r--spec/build/cpukit/optvermaj.yml2
-rw-r--r--spec/build/testsuites/libtests/debugger01.yml4
-rw-r--r--spec/build/testsuites/psxtests/grp.yml2
-rw-r--r--spec/build/testsuites/psxtests/psxsignal09.yml22
-rw-r--r--spec/build/testsuites/sptests/grp.yml2
-rw-r--r--spec/build/testsuites/sptests/spfatal35.yml20
-rw-r--r--testsuites/aclocal/canonical-target-name.m425
-rw-r--r--testsuites/aclocal/canonicalize-tools.m420
-rw-r--r--testsuites/aclocal/check-cpuopts.m421
-rw-r--r--testsuites/aclocal/check-custom-bsp.m422
-rw-r--r--testsuites/aclocal/check-cxx.m420
-rw-r--r--testsuites/aclocal/check-tool.m49
-rw-r--r--testsuites/aclocal/enable-cxx.m411
-rw-r--r--testsuites/aclocal/enable-tests.m411
-rw-r--r--testsuites/aclocal/env-rtemsbsp.m427
-rw-r--r--testsuites/aclocal/gcc-specs.m416
-rw-r--r--testsuites/aclocal/prog-cc.m440
-rw-r--r--testsuites/aclocal/prog-cxx.m449
-rw-r--r--testsuites/aclocal/project-root.m47
-rw-r--r--testsuites/aclocal/rtems-bsp-includes.m413
-rw-r--r--testsuites/aclocal/rtems-bsp-linkcmds.m425
-rw-r--r--testsuites/aclocal/rtems-build-top.m412
-rw-r--r--testsuites/aclocal/rtems-includes.m423
-rw-r--r--testsuites/aclocal/rtems-source-top.m48
-rw-r--r--testsuites/aclocal/rtems-test-check.m431
-rw-r--r--testsuites/aclocal/rtems-top.m483
-rw-r--r--testsuites/aclocal/version.m44
-rw-r--r--testsuites/ada/aclocal/prog-gnat.m444
-rw-r--r--testsuites/libtests/POSIX/calloc.c4
-rw-r--r--testsuites/libtests/malloc04/init.c37
-rw-r--r--testsuites/libtests/malloctest/task1.c2
-rw-r--r--testsuites/libtests/stackchk/task1.c2
-rw-r--r--testsuites/mptests/mp01/task1.c2
-rw-r--r--testsuites/mptests/mp03/task1.c2
-rw-r--r--testsuites/mptests/mp04/task1.c2
-rw-r--r--testsuites/psxtests/psxsignal09/init.c73
-rw-r--r--testsuites/psxtests/psxsignal09/psxsignal09.doc7
-rw-r--r--testsuites/psxtests/psxsignal09/psxsignal09.scn3
-rw-r--r--testsuites/psxtests/psxsignal09/system.h55
-rw-r--r--testsuites/samples/base_mp/apptask.c2
-rw-r--r--testsuites/samples/base_sp/apptask.c2
-rw-r--r--testsuites/samples/ticker/tasks.c2
-rw-r--r--testsuites/smptests/smp08/tasks.c2
-rw-r--r--testsuites/smptests/smpaffinity01/init.c2
-rw-r--r--testsuites/smptests/smpscheduler02/init.c2
-rw-r--r--testsuites/sptests/sp19/first.c2
-rw-r--r--testsuites/sptests/sp19/fptask.c2
-rw-r--r--testsuites/sptests/sp19/task1.c2
-rw-r--r--testsuites/sptests/sp24/task1.c2
-rw-r--r--testsuites/sptests/sp30/task1.c2
-rw-r--r--testsuites/sptests/sp42/init.c2
-rw-r--r--testsuites/sptests/sp69/init.c25
-rw-r--r--testsuites/sptests/sp76/init.c2
-rw-r--r--testsuites/sptests/spcbssched01/task1.c2
-rw-r--r--testsuites/sptests/spchain/init.c8
-rw-r--r--testsuites/sptests/spedfsched01/task1.c2
-rw-r--r--testsuites/sptests/spfatal35/init.c89
-rw-r--r--testsuites/sptests/spfatal35/spfatal35.doc7
-rw-r--r--testsuites/sptests/spfatal35/spfatal35.scn2
-rw-r--r--testsuites/sptests/spintrcritical08/init.c13
-rw-r--r--testsuites/sptests/spintrcritical09/init.c2
-rw-r--r--testsuites/sptests/spintrcritical10/init.c9
-rw-r--r--testsuites/sptests/spintrcritical22/init.c2
-rw-r--r--testsuites/sptests/spsimplesched01/init.c2
-rw-r--r--testsuites/sptests/spsimplesched02/init.c2
-rw-r--r--testsuites/sptests/sptask_err04/task1.c4
-rw-r--r--testsuites/sptests/sptimecounter01/init.c12
-rwxr-xr-xwscript61
475 files changed, 9809 insertions, 3750 deletions
diff --git a/bsps/aarch64/include/bsp/aarch64-mmu.h b/bsps/aarch64/include/bsp/aarch64-mmu.h
index a5f6e846f3..b1a471d534 100644
--- a/bsps/aarch64/include/bsp/aarch64-mmu.h
+++ b/bsps/aarch64/include/bsp/aarch64-mmu.h
@@ -42,50 +42,12 @@
#include <rtems/score/aarch64-system-registers.h>
#include <bspopts.h>
#include <bsp/utility.h>
+#include <libcpu/mmu-vmsav8-64.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
-/* VMSAv8 Long-descriptor fields */
-#define MMU_DESC_AF BSP_BIT64( 10 )
-#define MMU_DESC_SH_INNER ( BSP_BIT64( 9 ) | BSP_BIT64( 8 ) )
-#define MMU_DESC_WRITE_DISABLE BSP_BIT64( 7 )
-/* PAGE and TABLE flags are the same bit, but only apply on certain levels */
-#define MMU_DESC_TYPE_TABLE BSP_BIT64( 1 )
-#define MMU_DESC_TYPE_PAGE BSP_BIT64( 1 )
-#define MMU_DESC_VALID BSP_BIT64( 0 )
-#define MMU_DESC_MAIR_ATTR( val ) BSP_FLD64( val, 2, 3 )
-#define MMU_DESC_MAIR_ATTR_GET( reg ) BSP_FLD64GET( reg, 2, 3 )
-#define MMU_DESC_MAIR_ATTR_SET( reg, val ) BSP_FLD64SET( reg, val, 2, 3 )
-#define MMU_DESC_PAGE_TABLE_MASK 0xFFFFFFFFF000LL
-
-/* Page table configuration */
-#define MMU_PAGE_BITS 12
-#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
-#define MMU_BITS_PER_LEVEL 9
-#define MMU_TOP_LEVEL_PAGE_BITS ( 2 * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS )
-
-#define AARCH64_MMU_FLAGS_BASE \
- ( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
-
-#define AARCH64_MMU_DATA_RO_CACHED \
- ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) | MMU_DESC_WRITE_DISABLE )
-#define AARCH64_MMU_CODE_CACHED AARCH64_MMU_DATA_RO_CACHED
-#define AARCH64_MMU_CODE_RW_CACHED AARCH64_MMU_DATA_RW_CACHED
-
-#define AARCH64_MMU_DATA_RO \
- ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 1 ) | MMU_DESC_WRITE_DISABLE )
-#define AARCH64_MMU_CODE AARCH64_MMU_DATA_RO
-#define AARCH64_MMU_CODE_RW AARCH64_MMU_DATA_RW
-
-/* RW implied by not ORing in RO */
-#define AARCH64_MMU_DATA_RW_CACHED \
- ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) )
-#define AARCH64_MMU_DATA_RW \
- ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 1 ) )
-#define AARCH64_MMU_DEVICE ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 0 ) )
-
typedef struct {
uintptr_t begin;
uintptr_t end;
@@ -284,6 +246,15 @@ BSP_START_TEXT_SECTION static inline rtems_status_code aarch64_mmu_map_block(
} else {
/* block starts on a boundary, but is short */
chunk_size = size;
+
+ /* it isn't possible to go beyond page table level 2 */
+ if ( page_flag ) {
+ /* no sub-table, apply block properties */
+ page_table[index] = addr | flags | page_flag;
+ size -= chunk_size;
+ addr += chunk_size;
+ continue;
+ }
}
} else {
uintptr_t block_top = RTEMS_ALIGN_UP( addr, granularity );
@@ -400,6 +371,17 @@ aarch64_mmu_enable( void )
_AArch64_Write_sctlr_el1( sctlr );
}
+BSP_START_TEXT_SECTION static inline void
+aarch64_mmu_disable( void )
+{
+ uint64_t sctlr;
+
+ /* Enable MMU and cache */
+ sctlr = _AArch64_Read_sctlr_el1();
+ sctlr &= ~(AARCH64_SCTLR_EL1_M);
+ _AArch64_Write_sctlr_el1( sctlr );
+}
+
BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup( void )
{
/* Set TCR */
diff --git a/bsps/aarch64/include/dev/irq/arm-gic-arch.h b/bsps/aarch64/include/dev/irq/arm-gic-arch.h
index 0911320851..f1b6fdc03d 100644
--- a/bsps/aarch64/include/dev/irq/arm-gic-arch.h
+++ b/bsps/aarch64/include/dev/irq/arm-gic-arch.h
@@ -49,8 +49,10 @@ extern "C" {
static inline void arm_interrupt_handler_dispatch(rtems_vector_number vector)
{
uint32_t interrupt_level = _CPU_ISR_Get_level();
- AArch64_interrupt_enable(1);
+ /* Enable interrupts for nesting */
+ _CPU_ISR_Set_level(0);
bsp_interrupt_handler_dispatch(vector);
+ /* Restore interrupts to previous level */
_CPU_ISR_Set_level(interrupt_level);
}
diff --git a/bsps/aarch64/shared/mmu/vmsav8-64-nommu.c b/bsps/aarch64/shared/mmu/vmsav8-64-nommu.c
new file mode 100644
index 0000000000..2c793fa239
--- /dev/null
+++ b/bsps/aarch64/shared/mmu/vmsav8-64-nommu.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief AArch64 MMU dummy implementation.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <libcpu/mmu-vmsav8-64.h>
+
+/*
+ * This must have a non-header implementation because it is used by libdebugger.
+ */
+rtems_status_code aarch64_mmu_map(
+ uintptr_t addr,
+ uint64_t size,
+ uint64_t flags
+)
+{
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/bsps/aarch64/shared/mmu/vmsav8-64.c b/bsps/aarch64/shared/mmu/vmsav8-64.c
new file mode 100644
index 0000000000..9caa91c414
--- /dev/null
+++ b/bsps/aarch64/shared/mmu/vmsav8-64.c
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief AArch64 MMU implementation.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bsp/aarch64-mmu.h>
+#include <rtems/score/cpu.h>
+
+/*
+ * This must have a non-header implementation because it is used by libdebugger.
+ */
+rtems_status_code aarch64_mmu_map(
+ uintptr_t addr,
+ uint64_t size,
+ uint64_t flags
+)
+{
+ rtems_status_code sc;
+
+ aarch64_mmu_disable();
+ sc = aarch64_mmu_map_block(
+ (uint64_t *) bsp_translation_table_base,
+ 0x0,
+ addr,
+ size,
+ 0,
+ flags
+ );
+ _AARCH64_Data_synchronization_barrier();
+ __asm__ volatile(
+ "tlbi vmalle1\n"
+ );
+ _AARCH64_Data_synchronization_barrier();
+ _AARCH64_Instruction_synchronization_barrier();
+ aarch64_mmu_enable();
+
+ return sc;
+}
diff --git a/bsps/aarch64/shared/start/start.S b/bsps/aarch64/shared/start/start.S
index bc6a855217..f03c7921ca 100644
--- a/bsps/aarch64/shared/start/start.S
+++ b/bsps/aarch64/shared/start/start.S
@@ -201,8 +201,8 @@ _el1_start:
#endif
add x3, x1, x2
- /* Disable interrupts */
- msr DAIFSet, #0x2
+ /* Disable interrupts and debug */
+ msr DAIFSet, #0xa
#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION
mov x8, XZR
diff --git a/bsps/aarch64/xilinx-versal/start/bspstartmmu.c b/bsps/aarch64/xilinx-versal/start/bspstartmmu.c
index 8b622aec7d..5949111d0d 100644
--- a/bsps/aarch64/xilinx-versal/start/bspstartmmu.c
+++ b/bsps/aarch64/xilinx-versal/start/bspstartmmu.c
@@ -36,6 +36,7 @@
#include <bsp.h>
#include <bsp/start.h>
#include <bsp/aarch64-mmu.h>
+#include <libcpu/mmu-vmsav8-64.h>
BSP_START_DATA_SECTION static const aarch64_mmu_config_entry
versal_mmu_config_table[] = {
diff --git a/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c b/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
index 09012c9db5..33ca1eafab 100644
--- a/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
+++ b/bsps/aarch64/xilinx-zynqmp/start/bspstartmmu.c
@@ -37,6 +37,7 @@
#include <bsp.h>
#include <bsp/start.h>
#include <bsp/aarch64-mmu.h>
+#include <libcpu/mmu-vmsav8-64.h>
BSP_START_DATA_SECTION static const aarch64_mmu_config_entry
zynqmp_mmu_config_table[] = {
diff --git a/bsps/arm/altera-cyclone-v/start/bsp_specs b/bsps/arm/altera-cyclone-v/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/altera-cyclone-v/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/atsam/start/bsp_specs b/bsps/arm/atsam/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/atsam/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/beagle/start/bsp_specs b/bsps/arm/beagle/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/beagle/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/csb336/start/bsp_specs b/bsps/arm/csb336/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/csb336/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/csb337/start/bsp_specs b/bsps/arm/csb337/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/csb337/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/edb7312/start/bsp_specs b/bsps/arm/edb7312/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/edb7312/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/gumstix/start/bsp_specs b/bsps/arm/gumstix/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/gumstix/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/imx/start/bsp_specs b/bsps/arm/imx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/imx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/lm3s69xx/start/bsp_specs b/bsps/arm/lm3s69xx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/lm3s69xx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/lpc176x/start/bsp_specs b/bsps/arm/lpc176x/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/lpc176x/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/lpc24xx/start/bsp_specs b/bsps/arm/lpc24xx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/lpc24xx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/lpc32xx/start/bsp_specs b/bsps/arm/lpc32xx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/lpc32xx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/raspberrypi/start/bsp_specs b/bsps/arm/raspberrypi/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/raspberrypi/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/realview-pbx-a9/start/bsp_specs b/bsps/arm/realview-pbx-a9/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/realview-pbx-a9/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/rtl22xx/start/bsp_specs b/bsps/arm/rtl22xx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/rtl22xx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/smdk2410/start/bsp_specs b/bsps/arm/smdk2410/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/smdk2410/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/stm32f4/start/bsp_specs b/bsps/arm/stm32f4/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/stm32f4/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/stm32h7/start/bsp_specs b/bsps/arm/stm32h7/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/stm32h7/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/tms570/start/bsp_specs b/bsps/arm/tms570/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/tms570/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/xen/start/bsp_specs b/bsps/arm/xen/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/xen/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/xilinx-zynq/start/bsp_specs b/bsps/arm/xilinx-zynq/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/xilinx-zynq/start/bsp_specs
+++ /dev/null
diff --git a/bsps/arm/xilinx-zynqmp/start/bsp_specs b/bsps/arm/xilinx-zynqmp/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/arm/xilinx-zynqmp/start/bsp_specs
+++ /dev/null
diff --git a/bsps/bfin/TLL6527M/start/bsp_specs b/bsps/bfin/TLL6527M/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/bfin/TLL6527M/start/bsp_specs
+++ /dev/null
diff --git a/bsps/bfin/bf537Stamp/start/bsp_specs b/bsps/bfin/bf537Stamp/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/bfin/bf537Stamp/start/bsp_specs
+++ /dev/null
diff --git a/bsps/bfin/eZKit533/start/bsp_specs b/bsps/bfin/eZKit533/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/bfin/eZKit533/start/bsp_specs
+++ /dev/null
diff --git a/bsps/i386/pc386/start/bsp_specs b/bsps/i386/pc386/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/i386/pc386/start/bsp_specs
+++ /dev/null
diff --git a/bsps/lm32/lm32_evr/start/bsp_specs b/bsps/lm32/lm32_evr/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/lm32/lm32_evr/start/bsp_specs
+++ /dev/null
diff --git a/bsps/lm32/milkymist/start/bsp_specs b/bsps/lm32/milkymist/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/lm32/milkymist/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/av5282/start/bsp_specs b/bsps/m68k/av5282/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/av5282/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/csb360/start/bsp_specs b/bsps/m68k/csb360/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/csb360/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/gen68340/start/bsp_specs b/bsps/m68k/gen68340/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/gen68340/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/gen68360/start/bsp_specs b/bsps/m68k/gen68360/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/gen68360/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/genmcf548x/start/bsp_specs b/bsps/m68k/genmcf548x/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/genmcf548x/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mcf5206elite/start/bsp_specs b/bsps/m68k/mcf5206elite/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mcf5206elite/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mcf52235/start/bsp_specs b/bsps/m68k/mcf52235/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mcf52235/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mcf5225x/start/bsp_specs b/bsps/m68k/mcf5225x/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mcf5225x/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mcf5235/start/bsp_specs b/bsps/m68k/mcf5235/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mcf5235/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mcf5329/start/bsp_specs b/bsps/m68k/mcf5329/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mcf5329/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mrm332/start/bsp_specs b/bsps/m68k/mrm332/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mrm332/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mvme147/start/bsp_specs b/bsps/m68k/mvme147/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mvme147/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mvme147s/start/bsp_specs b/bsps/m68k/mvme147s/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mvme147s/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mvme162/start/bsp_specs b/bsps/m68k/mvme162/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mvme162/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/mvme167/start/bsp_specs b/bsps/m68k/mvme167/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/mvme167/start/bsp_specs
+++ /dev/null
diff --git a/bsps/m68k/uC5282/start/bsp_specs b/bsps/m68k/uC5282/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/m68k/uC5282/start/bsp_specs
+++ /dev/null
diff --git a/bsps/mips/csb350/start/bsp_specs b/bsps/mips/csb350/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/mips/csb350/start/bsp_specs
+++ /dev/null
diff --git a/bsps/mips/hurricane/start/bsp_specs b/bsps/mips/hurricane/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/mips/hurricane/start/bsp_specs
+++ /dev/null
diff --git a/bsps/mips/jmr3904/start/bsp_specs b/bsps/mips/jmr3904/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/mips/jmr3904/start/bsp_specs
+++ /dev/null
diff --git a/bsps/mips/malta/start/bsp_specs b/bsps/mips/malta/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/mips/malta/start/bsp_specs
+++ /dev/null
diff --git a/bsps/mips/rbtx4925/start/bsp_specs b/bsps/mips/rbtx4925/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/mips/rbtx4925/start/bsp_specs
+++ /dev/null
diff --git a/bsps/mips/rbtx4938/start/bsp_specs b/bsps/mips/rbtx4938/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/mips/rbtx4938/start/bsp_specs
+++ /dev/null
diff --git a/bsps/moxie/moxiesim/start/bsp_specs b/bsps/moxie/moxiesim/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/moxie/moxiesim/start/bsp_specs
+++ /dev/null
diff --git a/bsps/nios2/nios2_iss/start/bsp_specs b/bsps/nios2/nios2_iss/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/nios2/nios2_iss/start/bsp_specs
+++ /dev/null
diff --git a/bsps/no_cpu/no_bsp/start/bsp_specs b/bsps/no_cpu/no_bsp/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/no_cpu/no_bsp/start/bsp_specs
+++ /dev/null
diff --git a/bsps/or1k/generic_or1k/start/bsp_specs b/bsps/or1k/generic_or1k/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/or1k/generic_or1k/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/beatnik/start/bsp_specs b/bsps/powerpc/beatnik/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/beatnik/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/gen5200/start/bsp_specs b/bsps/powerpc/gen5200/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/gen5200/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/gen83xx/start/bsp_specs b/bsps/powerpc/gen83xx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/gen83xx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/haleakala/start/bsp_specs b/bsps/powerpc/haleakala/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/haleakala/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/motorola_powerpc/start/bsp_specs b/bsps/powerpc/motorola_powerpc/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/motorola_powerpc/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/mpc55xxevb/start/bsp_specs b/bsps/powerpc/mpc55xxevb/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/mpc55xxevb/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/mpc8260ads/start/bsp_specs b/bsps/powerpc/mpc8260ads/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/mpc8260ads/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/mvme3100/start/bsp_specs b/bsps/powerpc/mvme3100/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/mvme3100/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/mvme5500/start/bsp_specs b/bsps/powerpc/mvme5500/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/mvme5500/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/psim/start/bsp_specs b/bsps/powerpc/psim/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/psim/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/qemuppc/start/bsp_specs b/bsps/powerpc/qemuppc/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/qemuppc/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/qoriq/start/bsp_specs b/bsps/powerpc/qoriq/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/qoriq/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/ss555/start/bsp_specs b/bsps/powerpc/ss555/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/ss555/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/t32mppc/start/bsp_specs b/bsps/powerpc/t32mppc/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/t32mppc/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/tqm8xx/start/bsp_specs b/bsps/powerpc/tqm8xx/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/tqm8xx/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/virtex/start/bsp_specs b/bsps/powerpc/virtex/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/virtex/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/virtex4/start/bsp_specs b/bsps/powerpc/virtex4/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/virtex4/start/bsp_specs
+++ /dev/null
diff --git a/bsps/powerpc/virtex5/start/bsp_specs b/bsps/powerpc/virtex5/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/powerpc/virtex5/start/bsp_specs
+++ /dev/null
diff --git a/bsps/riscv/griscv/start/bsp_specs b/bsps/riscv/griscv/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/riscv/griscv/start/bsp_specs
+++ /dev/null
diff --git a/bsps/riscv/riscv/start/bsp_specs b/bsps/riscv/riscv/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/riscv/riscv/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sh/gensh1/start/bsp_specs b/bsps/sh/gensh1/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sh/gensh1/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sh/gensh2/start/bsp_specs b/bsps/sh/gensh2/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sh/gensh2/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sh/gensh4/start/bsp_specs b/bsps/sh/gensh4/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sh/gensh4/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sh/shsim/start/bsp_specs b/bsps/sh/shsim/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sh/shsim/start/bsp_specs
+++ /dev/null
diff --git a/bsps/shared/start/mallocinitmulti.c b/bsps/shared/start/mallocinitmulti.c
new file mode 100644
index 0000000000..ad04ea14ed
--- /dev/null
+++ b/bsps/shared/start/mallocinitmulti.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup MallocSupport
+ *
+ * @brief This source file contains the _Workspace_Malloc_initialize_separate()
+ * implementation which supports more than one memory area.
+ */
+
+/*
+ * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/mallocinitmulti.h>
+#include <rtems/score/wkspacedata.h>
+
+static Heap_Control _Malloc_Heap;
+
+Heap_Control *_Workspace_Malloc_initialize_separate( void )
+{
+ return _Malloc_Initialize_for_multiple_areas( &_Malloc_Heap );
+}
diff --git a/bsps/shared/start/mallocinitone.c b/bsps/shared/start/mallocinitone.c
new file mode 100644
index 0000000000..249f0b8e02
--- /dev/null
+++ b/bsps/shared/start/mallocinitone.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup MallocSupport
+ *
+ * @brief This source file contains the _Workspace_Malloc_initialize_separate()
+ * implementation which supports exactly one memory area.
+ */
+
+/*
+ * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/mallocinitone.h>
+#include <rtems/score/wkspacedata.h>
+
+static Heap_Control _Malloc_Heap;
+
+Heap_Control *_Workspace_Malloc_initialize_separate( void )
+{
+ return _Malloc_Initialize_for_one_area( &_Malloc_Heap );
+}
diff --git a/bsps/shared/start/wkspaceinitmulti.c b/bsps/shared/start/wkspaceinitmulti.c
new file mode 100644
index 0000000000..2a0d0b5806
--- /dev/null
+++ b/bsps/shared/start/wkspaceinitmulti.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreWorkspace
+ *
+ * @brief This source file contains the _Workspace_Handler_initialization()
+ * implementation which supports more than one memory area.
+ */
+
+/*
+ * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/wkspaceinitmulti.h>
+
+void _Workspace_Handler_initialization( void )
+{
+ _Workspace_Initialize_for_multiple_areas();
+}
diff --git a/bsps/shared/start/wkspaceinitone.c b/bsps/shared/start/wkspaceinitone.c
new file mode 100644
index 0000000000..91d007d100
--- /dev/null
+++ b/bsps/shared/start/wkspaceinitone.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreWorkspace
+ *
+ * @brief This source file contains the _Workspace_Handler_initialization()
+ * implementation which supports exactly one memory area.
+ */
+
+/*
+ * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/wkspaceinitone.h>
+
+void _Workspace_Handler_initialization( void )
+{
+ _Workspace_Initialize_for_one_area();
+}
diff --git a/bsps/sparc/erc32/start/bsp_specs b/bsps/sparc/erc32/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sparc/erc32/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sparc/leon2/start/bsp_specs b/bsps/sparc/leon2/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sparc/leon2/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sparc/leon3/start/bsp_specs b/bsps/sparc/leon3/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sparc/leon3/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sparc64/niagara/start/bsp_specs b/bsps/sparc64/niagara/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sparc64/niagara/start/bsp_specs
+++ /dev/null
diff --git a/bsps/sparc64/usiii/start/bsp_specs b/bsps/sparc64/usiii/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/sparc64/usiii/start/bsp_specs
+++ /dev/null
diff --git a/bsps/v850/gdbv850sim/start/bsp_specs b/bsps/v850/gdbv850sim/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/v850/gdbv850sim/start/bsp_specs
+++ /dev/null
diff --git a/bsps/x86_64/amd64/start/bsp_specs b/bsps/x86_64/amd64/start/bsp_specs
deleted file mode 100644
index e69de29bb2..0000000000
--- a/bsps/x86_64/amd64/start/bsp_specs
+++ /dev/null
diff --git a/cpukit/aclocal/canonical-target-name.m4 b/cpukit/aclocal/canonical-target-name.m4
deleted file mode 100644
index d3c2531cda..0000000000
--- a/cpukit/aclocal/canonical-target-name.m4
+++ /dev/null
@@ -1,25 +0,0 @@
-dnl canonicalize target cpu
-dnl NOTE: Most rtems targets do not fullfil autoconf's
-dnl target naming conventions "processor-vendor-os"
-dnl Therefore autoconf's AC_CANONICAL_TARGET will fail for them
-dnl and we have to fix it for rtems ourselves
-
-AC_DEFUN([RTEMS_CANONICAL_TARGET_CPU],
-[AC_REQUIRE([AC_CANONICAL_HOST])
-AC_MSG_CHECKING(rtems target cpu)
-case "${host}" in
-riscv*-*-rtems*)
- RTEMS_CPU=riscv;;
-*-*-rtems*)
- RTEMS_CPU="$host_cpu";;
-*)
- ;;
-esac
-AS_IF([test -n "$RTEMS_CPU"],
-[dnl
-AC_MSG_RESULT($RTEMS_CPU)],
-[dnl
-AC_MSG_RESULT([<none>])
-AC_MSG_ERROR([unsupported host $host])])
-AC_SUBST(RTEMS_CPU)
-])
diff --git a/cpukit/aclocal/canonicalize-tools.m4 b/cpukit/aclocal/canonicalize-tools.m4
deleted file mode 100644
index 5795c1ecae..0000000000
--- a/cpukit/aclocal/canonicalize-tools.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-dnl
-dnl Set target tools
-dnl
-
-AC_DEFUN([RTEMS_CANONICALIZE_TOOLS],
-[AC_REQUIRE([RTEMS_PROG_CC])dnl
-
-dnl FIXME: What shall be done if these tools are not available?
- RTEMS_CHECK_TOOL(AR,ar,no)
-
-dnl special treatment of ranlib
- RTEMS_CHECK_TOOL(RANLIB,ranlib,:)
-])
diff --git a/cpukit/aclocal/check-func.m4 b/cpukit/aclocal/check-func.m4
deleted file mode 100644
index 36d700bcdd..0000000000
--- a/cpukit/aclocal/check-func.m4
+++ /dev/null
@@ -1,21 +0,0 @@
-# Check whether FUNCTION is declared in INCLUDES
-# and whether rtems_stub_FUNCTION or FUNCTION is supplied.
-
-# RTEMS_CHECK_FUNC(FUNCTION[,INCLUDES])
-AC_DEFUN([RTEMS_CHECK_FUNC],
-[AC_REQUIRE([RTEMS_CHECK_NEWLIB])
-AC_CHECK_DECLS([$1],,,[$2])
-
-AC_CACHE_CHECK([for $1],[ac_cv_$1],[
- AC_LINK_IFELSE(
- [AC_LANG_PROGRAM([[$2]],[[rtems_stub_$1()]])],
- [ac_cv_$1="stub"],
- [AC_LINK_IFELSE(
- [AC_LANG_PROGRAM([[$2]],[[$1()]])],
- [ac_cv_$1="yes"],
- [ac_cv_$1="no"])
- ])
- ])
-AS_IF([test "$ac_cv_$1" = yes],
- [AC_DEFINE_UNQUOTED(AS_TR_CPP([HAVE_]$1),[1],[Define to 1 if you have the `$1' function.])])
-])
diff --git a/cpukit/aclocal/check-multiprocessing.m4 b/cpukit/aclocal/check-multiprocessing.m4
deleted file mode 100644
index 1698d3d51f..0000000000
--- a/cpukit/aclocal/check-multiprocessing.m4
+++ /dev/null
@@ -1,5 +0,0 @@
-AC_DEFUN([RTEMS_CHECK_MULTIPROCESSING],
-[dnl
-AC_REQUIRE([RTEMS_ENV_RTEMSCPU])dnl
-AC_REQUIRE([RTEMS_ENABLE_MULTIPROCESSING])dnl
-])
diff --git a/cpukit/aclocal/check-networking.m4 b/cpukit/aclocal/check-networking.m4
deleted file mode 100644
index 7f38875dc6..0000000000
--- a/cpukit/aclocal/check-networking.m4
+++ /dev/null
@@ -1,25 +0,0 @@
-dnl
-AC_DEFUN([RTEMS_CHECK_NETWORKING],
-[dnl
-AC_REQUIRE([RTEMS_CANONICAL_TARGET_CPU])dnl
-AC_REQUIRE([RTEMS_ENABLE_NETWORKING])dnl
-AC_REQUIRE([RTEMS_INCLUDES])dnl
-
-AC_CACHE_CHECK([whether CPU supports networking],
- rtems_cv_HAS_NETWORKING,
- [dnl
- case "$host" in
- # Newer architecture ports that should only use new TCP/IP stack
- x86_64*)
- rtems_cv_HAS_NETWORKING="no"
- ;;
- *)
- AS_IF([test "${RTEMS_HAS_NETWORKING}" = "yes"],
- [rtems_cv_HAS_NETWORKING="yes"
- RTEMS_CPPFLAGS="${RTEMS_CPPFLAGS} -I${RTEMS_SOURCE_ROOT}/cpukit/libnetworking"],
- [rtems_cv_HAS_NETWORKING="no"])
- ;;
- esac
- ])
- ])
-])
diff --git a/cpukit/aclocal/check-newlib.m4 b/cpukit/aclocal/check-newlib.m4
deleted file mode 100644
index f23a90c7c7..0000000000
--- a/cpukit/aclocal/check-newlib.m4
+++ /dev/null
@@ -1,28 +0,0 @@
-AC_DEFUN([RTEMS_CHECK_NEWLIB],
-[dnl
-AC_REQUIRE([RTEMS_PROG_CC_FOR_TARGET])dnl
-AC_REQUIRE([RTEMS_CANONICALIZE_TOOLS])dnl
-AC_CACHE_CHECK([for RTEMS newlib],
- rtems_cv_use_newlib,
- [
-dnl some versions of newlib provide not_required_by_rtems
- AC_LINK_IFELSE(
- [AC_LANG_PROGRAM([[extern void not_required_by_rtems() ;]],
- [[not_required_by_rtems()]])],
- [rtems_cv_use_newlib="yes"],[])
-
-dnl some versions of newlib provide rtems_provides_crt0()
- AS_IF([test -z "$rtems_cv_use_newlib"],
- [AC_LINK_IFELSE(
- [AC_LANG_PROGRAM([[extern void rtems_provides_crt0() ;]],
- [[rtems_provides_crt0()]])],
- [rtems_cv_use_newlib="yes"],[rtems_cv_use_newlib="no"])
- ])
- ])
- RTEMS_USE_NEWLIB="$rtems_cv_use_newlib"
- AC_SUBST(RTEMS_USE_NEWLIB)
-
- AS_IF([test x"${RTEMS_USE_NEWLIB}" = x"yes"],
- [ AC_DEFINE_UNQUOTED(RTEMS_NEWLIB,1,[if using newlib])]
- )
-])
diff --git a/cpukit/aclocal/check-posix.m4 b/cpukit/aclocal/check-posix.m4
deleted file mode 100644
index ad1d1c87da..0000000000
--- a/cpukit/aclocal/check-posix.m4
+++ /dev/null
@@ -1,19 +0,0 @@
-dnl
-AC_DEFUN([RTEMS_CHECK_POSIX_API],
-[dnl
-AC_REQUIRE([RTEMS_CANONICAL_TARGET_CPU])dnl
-AC_REQUIRE([RTEMS_ENABLE_POSIX])dnl
-
-AC_CACHE_CHECK([whether CPU supports libposix],
- rtems_cv_HAS_POSIX_API,
- [dnl
- case "$host" in
- *-*-rtems*)
- if test "${RTEMS_HAS_POSIX_API}" = "yes"; then
- rtems_cv_HAS_POSIX_API="yes";
- else
- rtems_cv_HAS_POSIX_API="disabled";
- fi
- ;;
- esac])
-])
diff --git a/cpukit/aclocal/check-rtems-debug.m4 b/cpukit/aclocal/check-rtems-debug.m4
deleted file mode 100644
index 280edccd65..0000000000
--- a/cpukit/aclocal/check-rtems-debug.m4
+++ /dev/null
@@ -1,5 +0,0 @@
-AC_DEFUN([RTEMS_CHECK_RTEMS_DEBUG],
-[AC_REQUIRE([RTEMS_ENABLE_RTEMS_DEBUG])
-AS_IF([test x"${enable_rtems_debug}" = x"yes"],
- [AC_DEFINE_UNQUOTED(RTEMS_DEBUG,1,[if RTEMS_DEBUG is enabled])])
-])
diff --git a/cpukit/aclocal/check-smp.m4 b/cpukit/aclocal/check-smp.m4
deleted file mode 100644
index a0c8b819fb..0000000000
--- a/cpukit/aclocal/check-smp.m4
+++ /dev/null
@@ -1,18 +0,0 @@
-dnl
-AC_DEFUN([RTEMS_CHECK_SMP],
-[dnl
-AC_REQUIRE([RTEMS_ENABLE_SMP])dnl
-
-AC_CACHE_CHECK([whether CPU supports SMP],
- rtems_cv_HAS_SMP,
- [dnl
- case "$RTEMS_CPU" in
- *)
- if test "${RTEMS_HAS_SMP}" = "yes"; then
- rtems_cv_HAS_SMP="yes";
- else
- rtems_cv_HAS_SMP="disabled";
- fi
- ;;
- esac])
-])
diff --git a/cpukit/aclocal/check-tool.m4 b/cpukit/aclocal/check-tool.m4
deleted file mode 100644
index 2bc137cd96..0000000000
--- a/cpukit/aclocal/check-tool.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-## Check for a cross tool, similar to AC_CHECK_TOOL, but do not fall back to
-## the un-prefixed version of PROG-TO-CHECK-FOR.
-dnl RTEMS_CHECK_TOOL(VARIABLE, PROG-TO-CHECK-FOR[, VALUE-IF-NOT-FOUND [, PATH]])
-AC_DEFUN([RTEMS_CHECK_TOOL],
-[
- AS_IF([test "x$build_alias" != "x$host_alias"],
- [rtems_tool_prefix=${ac_tool_prefix}])
- AC_CHECK_PROG($1, ${rtems_tool_prefix}$2, ${rtems_tool_prefix}$2, $3, $4)
-])
diff --git a/cpukit/aclocal/enable-drvmgr.m4 b/cpukit/aclocal/enable-drvmgr.m4
deleted file mode 100644
index a9da288b11..0000000000
--- a/cpukit/aclocal/enable-drvmgr.m4
+++ /dev/null
@@ -1,12 +0,0 @@
-dnl $Id: enable-drvmgr.m4,v 1.0
-
-AC_DEFUN([RTEMS_ENABLE_DRVMGR],
-[
-AC_ARG_ENABLE(drvmgr,
-AS_HELP_STRING(--enable-drvmgr,enable drvmgr at startup),
-[case "${enableval}" in
- yes) RTEMS_DRVMGR_STARTUP=yes ;;
- no) RTEMS_DRVMGR_STARTUP=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable-drvmgr option) ;;
-esac],[RTEMS_DRVMGR_STARTUP=yes])
-])
diff --git a/cpukit/aclocal/enable-multiprocessing.m4 b/cpukit/aclocal/enable-multiprocessing.m4
deleted file mode 100644
index 53fa8ffe81..0000000000
--- a/cpukit/aclocal/enable-multiprocessing.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_MULTIPROCESSING],
-[
-AC_ARG_ENABLE(multiprocessing,
-[AS_HELP_STRING([--enable-multiprocessing],
-[enable multiprocessing interface; the multiprocessing interface is a
-communication interface between different RTEMS instances and allows
-synchronization of objects via message passing])],
-[case "${enable_multiprocessing}" in
- yes) ;;
- no) ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable-multiprocessing option) ;;
-esac],[enable_multiprocessing=no])
-])
diff --git a/cpukit/aclocal/enable-networking.m4 b/cpukit/aclocal/enable-networking.m4
deleted file mode 100644
index d9b50ef208..0000000000
--- a/cpukit/aclocal/enable-networking.m4
+++ /dev/null
@@ -1,12 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_NETWORKING],
-[
-## AC_BEFORE([$0], [RTEMS_CHECK_NETWORKING])dnl
-
-AC_ARG_ENABLE(networking,
-AS_HELP_STRING(--enable-networking,enable TCP/IP stack),
-[case "${enableval}" in
- yes) RTEMS_HAS_NETWORKING=yes ;;
- no) RTEMS_HAS_NETWORKING=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable-networking option) ;;
-esac],[RTEMS_HAS_NETWORKING=yes])
-])
diff --git a/cpukit/aclocal/enable-paravirt.m4 b/cpukit/aclocal/enable-paravirt.m4
deleted file mode 100644
index b3ef97edad..0000000000
--- a/cpukit/aclocal/enable-paravirt.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_PARAVIRT],
-[
-
-AC_ARG_ENABLE(paravirt,
-[AS_HELP_STRING([--enable-paravirt],[enable support for paravirtualization
-(default=no)])],
-
-[case "${enableval}" in
- yes) RTEMS_HAS_PARAVIRT=yes ;;
- no) RTEMS_HAS_PARAVIRT=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable-paravirt option) ;;
-esac],[RTEMS_HAS_PARAVIRT=no])
-])
diff --git a/cpukit/aclocal/enable-posix.m4 b/cpukit/aclocal/enable-posix.m4
deleted file mode 100644
index c1833950e3..0000000000
--- a/cpukit/aclocal/enable-posix.m4
+++ /dev/null
@@ -1,21 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_POSIX],
-[
-## AC_BEFORE([$0], [RTEMS_CHECK_POSIX_API])dnl
-
-AC_ARG_ENABLE(posix,
-AS_HELP_STRING(--enable-posix,enable posix interface),
-[case "${enableval}" in
- yes) RTEMS_HAS_POSIX_API=yes ;;
- no) RTEMS_HAS_POSIX_API=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable-posix option) ;;
-esac],[RTEMS_HAS_POSIX_API=yes])
-
-case "${host}" in
- no_cpu-*rtems*)
- RTEMS_HAS_POSIX_API=no
- ;;
- *)
- ;;
-esac
-AC_SUBST(RTEMS_HAS_POSIX_API)
-])
diff --git a/cpukit/aclocal/enable-profiling.m4 b/cpukit/aclocal/enable-profiling.m4
deleted file mode 100644
index b363ae28c6..0000000000
--- a/cpukit/aclocal/enable-profiling.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_PROFILING],
- [AC_ARG_ENABLE(profiling,
- [AS_HELP_STRING([--enable-profiling],[enable support for profiling (default=no)])],
- [case "${enableval}" in
- yes) RTEMS_HAS_PROFILING=yes ;;
- no) RTEMS_HAS_PROFILING=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable profiling option) ;;
- esac],
- [RTEMS_HAS_PROFILING=no])])
diff --git a/cpukit/aclocal/enable-rtems-debug.m4 b/cpukit/aclocal/enable-rtems-debug.m4
deleted file mode 100644
index ba4a4f2111..0000000000
--- a/cpukit/aclocal/enable-rtems-debug.m4
+++ /dev/null
@@ -1,10 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_RTEMS_DEBUG],
-[
-AC_ARG_ENABLE(rtems-debug,
-AS_HELP_STRING(--enable-rtems-debug,enable RTEMS_DEBUG),
-[case "${enable_rtems_debug}" in
- yes) enable_rtems_debug=yes ;;
- no) enable_rtems_debug=no ;;
- *) AC_MSG_ERROR([bad value ${enable_rtems_debug} for RTEMS_DEBUG]) ;;
-esac],[enable_rtems_debug=no])
-])
diff --git a/cpukit/aclocal/enable-smp.m4 b/cpukit/aclocal/enable-smp.m4
deleted file mode 100644
index 1448a2e5eb..0000000000
--- a/cpukit/aclocal/enable-smp.m4
+++ /dev/null
@@ -1,17 +0,0 @@
-AC_DEFUN([RTEMS_ENABLE_SMP],
-[
-## AC_BEFORE([$0], [RTEMS_CHECK_SMP])dnl
-
-AC_ARG_ENABLE(smp,
-[AS_HELP_STRING([--enable-smp],[enable support for symmetric multiprocessing
-(SMP)])],
-[case "${enableval}" in
- yes) case "${RTEMS_CPU}" in
- arm|powerpc|riscv*|sparc|i386) RTEMS_HAS_SMP=yes ;;
- *) RTEMS_HAS_SMP=no ;;
- esac
- ;;
- no) RTEMS_HAS_SMP=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for enable-smp option) ;;
-esac],[RTEMS_HAS_SMP=no])
-])
diff --git a/cpukit/aclocal/env-rtemscpu.m4 b/cpukit/aclocal/env-rtemscpu.m4
deleted file mode 100644
index 7c18e3f521..0000000000
--- a/cpukit/aclocal/env-rtemscpu.m4
+++ /dev/null
@@ -1,6 +0,0 @@
-AC_DEFUN([RTEMS_ENV_RTEMSCPU],
-[
- AC_REQUIRE([RTEMS_ENABLE_MULTILIB])
-
- AM_CONDITIONAL([MULTILIB],[test x"$multilib" = x"yes"])
-])
diff --git a/cpukit/aclocal/gcc-pipe.m4 b/cpukit/aclocal/gcc-pipe.m4
deleted file mode 100644
index fbc78b9ca7..0000000000
--- a/cpukit/aclocal/gcc-pipe.m4
+++ /dev/null
@@ -1,18 +0,0 @@
-dnl Check whether the target compiler accepts -pipe
-dnl
-
-AC_DEFUN([RTEMS_GCC_PIPE],
-[AC_REQUIRE([RTEMS_PROG_CC])
-AC_REQUIRE([AC_CANONICAL_HOST])
-AC_CACHE_CHECK(whether $CC accepts --pipe,rtems_cv_gcc_pipe,
-[
-rtems_cv_gcc_pipe=no
-if test x"$GCC" = x"yes"; then
- echo 'void f(){}' >conftest.c
- if test -z "`${CC} --pipe -c conftest.c 2>&1`";then
- rtems_cv_gcc_pipe=yes
- fi
- rm -f conftest*
-fi
-])
-])
diff --git a/cpukit/aclocal/gcc-sanity.m4 b/cpukit/aclocal/gcc-sanity.m4
deleted file mode 100644
index 0177009e28..0000000000
--- a/cpukit/aclocal/gcc-sanity.m4
+++ /dev/null
@@ -1,114 +0,0 @@
-# Some GCC sanity checks to check for known bugs in the rtems gcc toolchains
-
-# Internal macro
-# _RTEMS_GCC_WARNING(msg,cache-variable,include,main)
-AC_DEFUN([_RTEMS_GCC_WARNING],[
- AC_CACHE_CHECK([$1],
- [$2],[
- AS_IF([test x"$GCC" = xyes],[
- save_CFLAGS=$CFLAGS
- CFLAGS="-Wall -Werror"])
-
- AC_COMPILE_IFELSE([
- AC_LANG_PROGRAM([$3],[$4])],
- [$2=yes],
- [$2=no])
-
- AS_IF([test x"$GCC" = xyes],[
- CFLAGS=$save_CFLAGS])
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRIxPTR],[
- _RTEMS_GCC_WARNING(
- [if printf("%" PRIxPTR, uintptr_t) works],
- [rtems_cv_PRIxPTR],[
- #include <inttypes.h>
- #include <stdio.h>
- ],[
- uintptr_t ptr = 42;
- printf("%" PRIxPTR "\n", ptr);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRIuPTR],[
- _RTEMS_GCC_WARNING(
- [if printf("%" PRIuPTR, uintptr_t) works],
- [rtems_cv_PRIuPTR],[
- #include <inttypes.h>
- #include <stdio.h>
- ],[
- uintptr_t ptr = 42;
- printf("%" PRIuPTR "\n", ptr);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRIdPTR],[
- _RTEMS_GCC_WARNING(
- [if printf("%" PRIdPTR, intptr_t) works],
- [rtems_cv_PRIdPTR],[
- #include <inttypes.h>
- #include <stdio.h>
- ],[
- intptr_t ptr = -1;
- printf("%" PRIdPTR "\n", ptr);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRINTF_ZU_SIZE_T],[
- _RTEMS_GCC_WARNING(
- [if printf("%zu", size_t) works],
- [rtems_cv_PRINTF_ZU_SIZE_T],[
- #include <sys/types.h>
- #include <stdio.h>
- ],[
- size_t sz = 1;
- printf("%zu\n", sz);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRINTF_ZD_SSIZE_T],[
- _RTEMS_GCC_WARNING(
- [if printf("%zd", ssize_t) works],
- [rtems_cv_PRINTF_ZD_SSIZE_T],[
- #include <sys/types.h>
- #include <stdio.h>
- ],[
- ssize_t sz = 1;
- printf("%zd\n", sz);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRINTF_LD_OFF_T],[
- _RTEMS_GCC_WARNING(
- [if printf("%ld", off_t) works],
- [rtems_cv_PRINTF_LD_OFF_T],[
- #include <sys/types.h>
- #include <stdio.h>
- ],[
- off_t off = 1;
- printf("%ld\n", off);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_PRINTF_LLD_OFF_T],[
- _RTEMS_GCC_WARNING(
- [if printf("%lld", off_t) works],
- [rtems_cv_PRINTF_LLD_OFF_T],[
- #include <sys/types.h>
- #include <stdio.h>
- ],[
- off_t off = 1;
- printf("%lld\n", off);
- ])
-])
-
-AC_DEFUN([RTEMS_CHECK_GCC_SANITY],[
-RTEMS_CHECK_GCC_PRIxPTR
-RTEMS_CHECK_GCC_PRIuPTR
-RTEMS_CHECK_GCC_PRIdPTR
-RTEMS_CHECK_GCC_PRINTF_ZU_SIZE_T
-RTEMS_CHECK_GCC_PRINTF_ZD_SSIZE_T
-RTEMS_CHECK_GCC_PRINTF_LD_OFF_T
-RTEMS_CHECK_GCC_PRINTF_LLD_OFF_T
-])
diff --git a/cpukit/aclocal/gcc-weak.m4 b/cpukit/aclocal/gcc-weak.m4
deleted file mode 100644
index 73cfb28e7e..0000000000
--- a/cpukit/aclocal/gcc-weak.m4
+++ /dev/null
@@ -1,19 +0,0 @@
-AC_DEFUN([RTEMS_CHECK_GCC_WEAK],[
-AC_CACHE_CHECK([whether $CC supports function __attribute__((weak))],
-[rtems_cv_cc_attribute_weak],[
- AS_IF([test x"$GCC" = xyes],[
- save_CFLAGS=$CFLAGS
- CFLAGS=-Werror])
-
- AC_COMPILE_IFELSE([
- AC_LANG_PROGRAM(
- [void myfunc(char c) __attribute__ ((weak));
- void myfunc(char c) {}],
- [])],
- [rtems_cv_cc_attribute_weak=yes],
- [rtems_cv_cc_attribute_weak=no])
-
- AS_IF([test x"$GCC" = xyes],[
- CFLAGS=$save_CFLAGS])
-])
-])
diff --git a/cpukit/aclocal/multi.m4 b/cpukit/aclocal/multi.m4
deleted file mode 100644
index 7cdc3cb023..0000000000
--- a/cpukit/aclocal/multi.m4
+++ /dev/null
@@ -1,50 +0,0 @@
-#serial 99
-
-AC_DEFUN([AC_ENABLE_MULTILIB],
-[
-AC_ARG_ENABLE(multilib,
-AS_HELP_STRING(--enable-multilib,build many library versions (default=no)),
-[case "${enableval}" in
- yes) multilib=yes ;;
- no) multilib=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for multilib option) ;;
- esac], [multilib=no])dnl
-
-AM_CONDITIONAL(MULTILIB,test x"${multilib}" = x"yes")
-
-dnl We may get other options which we don't document:
-dnl --with-target-subdir, --with-multisrctop, --with-multisubdir
-
-if test "[$]{srcdir}" = "."; then
- if test "[$]{with_target_subdir}" != "."; then
- multilib_basedir="[$]{srcdir}/[$]{with_multisrctop}../ifelse([$2],,,[$2])"
- else
- multilib_basedir="[$]{srcdir}/[$]{with_multisrctop}ifelse([$2],,,[$2])"
- fi
-else
- multilib_basedir="[$]{srcdir}/ifelse([$2],,,[$2])"
-fi
-AC_SUBST(multilib_basedir)
-
-if test "${multilib}" = "yes"; then
- multilib_arg="--enable-multilib"
-else
- multilib_arg=
-fi
-
-AC_CONFIG_COMMANDS([default-1],[case " $CONFIG_FILES " in
- *" ]m4_if([$1],,Makefile,[$1])[ "*)
- ac_file=]m4_if([$1],,Makefile,[$1])[ . ${multilib_basedir}/config-ml.in
-esac],[
- srcdir=${srcdir}
- host=${host}
- target=${target}
- with_multisrctop="${with_multisrctop}"
- with_target_subdir="${with_target_subdir}"
- with_multisubdir="${with_multisubdir}"
- ac_configure_args="${multilib_arg} ${ac_configure_args}"
- CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
- multilib_basedir=${multilib_basedir}
- CC="${CC}"
-])
-])
diff --git a/cpukit/aclocal/multilib.m4 b/cpukit/aclocal/multilib.m4
deleted file mode 100644
index 90e6217780..0000000000
--- a/cpukit/aclocal/multilib.m4
+++ /dev/null
@@ -1,14 +0,0 @@
-dnl This provides configure definitions used for multilib support
-
-AC_DEFUN([RTEMS_ENABLE_MULTILIB],
-[
-AC_ARG_ENABLE(multilib,
-AS_HELP_STRING(--enable-multilib,build many library versions (default=no)),
-[case "${enableval}" in
- yes) multilib=yes ;;
- no) multilib=no ;;
- *) AC_MSG_ERROR(bad value ${enableval} for multilib option) ;;
- esac], [multilib=no])dnl
-
-AM_CONDITIONAL([MULTILIB],[test x"${multilib}" = x"yes"])
-])
diff --git a/cpukit/aclocal/prog-cc.m4 b/cpukit/aclocal/prog-cc.m4
deleted file mode 100644
index 9006e0e53a..0000000000
--- a/cpukit/aclocal/prog-cc.m4
+++ /dev/null
@@ -1,38 +0,0 @@
-dnl
-dnl Check for target gcc
-dnl
-
-AC_DEFUN([RTEMS_PROG_CC],
-[
-AC_BEFORE([$0], [AC_PROG_CPP])dnl
-AC_BEFORE([$0], [AC_PROG_CC])dnl
-AC_BEFORE([$0], [RTEMS_CANONICALIZE_TOOLS])dnl
-
-RTEMS_CHECK_TOOL(CC,gcc)
-test -z "$CC" && \
- AC_MSG_ERROR([no acceptable cc found in \$PATH])
-AC_PROG_CC
-AC_PROG_CPP
-])
-
-AC_DEFUN([RTEMS_PROG_CC_FOR_TARGET],
-[
-# Was CFLAGS set?
-rtems_cv_CFLAGS_set="${CFLAGS+set}"
-dnl check target cc
-RTEMS_PROG_CC
-dnl check if the target compiler may use --pipe
-RTEMS_GCC_PIPE
-test "$rtems_cv_gcc_pipe" = "yes" && CC="$CC --pipe"
-
-# Append warning flags if CFLAGS wasn't set.
-AS_IF([test "$GCC" = yes && test "$rtems_cv_CFLAGS_set" != set],
-[CFLAGS="$CFLAGS -Wall -Wimplicit-function-declaration -Wstrict-prototypes -Wnested-externs"])
-
-AS_IF([test "$GCC" = yes],[
- RTEMS_RELLDFLAGS="-qnolinkcmds -nostdlib -r"
-])
-AC_SUBST(RTEMS_RELLDFLAGS)
-
-RTEMS_INCLUDES
-])
diff --git a/cpukit/aclocal/prog-ccas.m4 b/cpukit/aclocal/prog-ccas.m4
deleted file mode 100644
index c30b884e19..0000000000
--- a/cpukit/aclocal/prog-ccas.m4
+++ /dev/null
@@ -1,6 +0,0 @@
-AC_DEFUN([RTEMS_PROG_CCAS],
-[
-AC_REQUIRE([RTEMS_PROG_CC])
-AM_PROG_AS
-AC_SUBST(RTEMS_CCASFLAGS,["-DASM \$(CFLAGS)"])
-])
diff --git a/cpukit/aclocal/rtems-bsp-includes.m4 b/cpukit/aclocal/rtems-bsp-includes.m4
deleted file mode 100644
index 2248211192..0000000000
--- a/cpukit/aclocal/rtems-bsp-includes.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-dnl
-dnl RTEMS Include paths.
-dnl
-AC_DEFUN([RTEMS_BSP_INCLUDES],
-[
-AC_REQUIRE([RTEMS_SOURCE_TOP])
-AC_REQUIRE([RTEMS_BUILD_TOP])
-RTEMS_BSP_CPPFLAGS="-I${RTEMS_BUILD_ROOT}/lib/libbsp/\$(RTEMS_CPU)/\$(RTEMS_BSP_FAMILY)/include \
--I${RTEMS_SOURCE_ROOT}/bsps/include \
--I${RTEMS_SOURCE_ROOT}/bsps/\$(RTEMS_CPU)/include \
--I${RTEMS_SOURCE_ROOT}/bsps/\$(RTEMS_CPU)/\$(RTEMS_BSP_FAMILY)/include"
-AC_SUBST([RTEMS_BSP_CPPFLAGS])
-])
diff --git a/cpukit/aclocal/rtems-build-top.m4 b/cpukit/aclocal/rtems-build-top.m4
deleted file mode 100644
index 5708119c14..0000000000
--- a/cpukit/aclocal/rtems-build-top.m4
+++ /dev/null
@@ -1,12 +0,0 @@
-dnl
-dnl RTEMS_BUILD_TOP($1)
-dnl
-AC_DEFUN([RTEMS_BUILD_TOP],
-[dnl
-#
-# This is a copy of the horrible hack in rtems-top.m4 and it is simpler to
-# copy it that attempt to clean this crap up.
-#
-RTEMS_BUILD_ROOT="${with_rtems_build_top}"
-AC_SUBST([RTEMS_BUILD_ROOT])
-])dnl
diff --git a/cpukit/aclocal/rtems-includes.m4 b/cpukit/aclocal/rtems-includes.m4
deleted file mode 100644
index ca4ebed581..0000000000
--- a/cpukit/aclocal/rtems-includes.m4
+++ /dev/null
@@ -1,23 +0,0 @@
-dnl
-dnl RTEMS Include paths.
-dnl
-AC_DEFUN([RTEMS_INCLUDES],
-[
-AC_REQUIRE([RTEMS_SOURCE_TOP])
-AC_REQUIRE([RTEMS_BUILD_TOP])
-
-# Was CFLAGS set?
-rtems_cv_CFLAGS_set="${CFLAGS+set}"
-
-RTEMS_INCLUDE_CPUKIT="-I${RTEMS_SOURCE_ROOT}/cpukit/include"
-RTEMS_INCLUDE_CPUKIT_ARCH="-I${RTEMS_SOURCE_ROOT}/cpukit/score/cpu/\$(RTEMS_CPU)/include"
-
-RTEMS_CPUKIT_INCLUDE="${RTEMS_INCLUDE_CPUKIT} ${RTEMS_INCLUDE_CPUKIT_ARCH}"
-RTEMS_BUILD_INCLUDE="-I\$(top_builddir) -I${RTEMS_BUILD_ROOT}/include"
-
-RTEMS_INCLUDE="${RTEMS_BUILD_INCLUDE} ${RTEMS_CPUKIT_INCLUDE}"
-
-RTEMS_CPPFLAGS="${RTEMS_INCLUDE}"
-
-AC_SUBST([RTEMS_CPPFLAGS])
-])
diff --git a/cpukit/aclocal/rtems-source-top.m4 b/cpukit/aclocal/rtems-source-top.m4
deleted file mode 100644
index d1460ae82d..0000000000
--- a/cpukit/aclocal/rtems-source-top.m4
+++ /dev/null
@@ -1,8 +0,0 @@
-dnl
-dnl RTEMS_SOURCE_TOP
-dnl
-AC_DEFUN([RTEMS_SOURCE_TOP],
-[dnl
-RTEMS_SOURCE_ROOT="${with_rtems_source_top}"
-AC_SUBST([RTEMS_SOURCE_ROOT])
-])dnl
diff --git a/cpukit/aclocal/rtems-top.m4 b/cpukit/aclocal/rtems-top.m4
deleted file mode 100644
index b0cdaaa4d7..0000000000
--- a/cpukit/aclocal/rtems-top.m4
+++ /dev/null
@@ -1,70 +0,0 @@
-# AC_DISABLE_OPTION_CHECKING is not available before 2.62
-AC_PREREQ(2.62)
-
-dnl
-dnl RTEMS_TOP($1)
-dnl
-dnl $1 .. relative path from this configure.ac to the toplevel configure.ac
-dnl
-AC_DEFUN([RTEMS_TOP],
-[dnl
-AC_REQUIRE([RTEMS_VERSIONING])
-AC_REQUIRE([AM_SET_LEADING_DOT])
-AC_REQUIRE([AC_DISABLE_OPTION_CHECKING])
-AC_CONFIG_AUX_DIR([$1])
-AC_CHECK_PROGS(MAKE, gmake make)
-AC_BEFORE([$0], [AM_INIT_AUTOMAKE])dnl
-
-AC_PREFIX_DEFAULT([/opt/rtems-][_RTEMS_API])
-
-AC_SUBST([RTEMS_TOPdir],["$1"])
-
-# HACK: The sed pattern below strips of "../", corresponding to "cpukit/"
-rtems_updir=m4_if([$2],[],[`echo "$1/" | sed 's,^\.\.\/,,'`],[$2/])
-
-AS_IF([test -n "$with_multisubdir"],
- [MULTIBUILDTOP=`echo "/$with_multisubdir" | sed 's,/[[^\\/]]*,../,g'`])
-AC_SUBST(MULTIBUILDTOP)
-
-AS_IF([test -n "$with_multisubdir"],
- [MULTISUBDIR="/$with_multisubdir"])
-AC_SUBST(MULTISUBDIR)
-
-AC_ARG_WITH([project-root],[
-AS_HELP_STRING(--with-project-root,directory to pre-install files into)],[
-## Make sure to have a terminating '/'
-case "${with_project_root}" in
-*/) ;;
-*) with_project_root="${with_project_root}/" ;;
-esac
-
-case "${with_project_root}" in
- [[\\/$]]* | ?:[[\\/]]* ) # absolute directory
- PROJECT_ROOT=${with_project_root}
- ;;
- *) # relative directory
- sav0dir=`pwd` && cd ./${rtems_updir}
- sav1dir=`pwd` && cd ../${MULTIBUILDTOP}
- sav2dir=`pwd` && cd "$sav0dir"
- mydir=`echo "$sav1dir" | sed "s,^$sav2dir${MULTISUBDIR}/,,"`
- PROJECT_ROOT='$(top_builddir)'/${rtems_updir}'../$(MULTIBUILDTOP)'${mydir}/${with_project_root}
- ;;
-esac],[
-## Defaults: Note: Two different defaults!
-## ../ for multilib
-## '.' for non-multilib
-AS_IF([test "$enable_multilib" = "yes"],[
- PROJECT_ROOT='$(top_builddir)'/${rtems_updir}'../$(MULTIBUILDTOP)'],[
- PROJECT_ROOT='$(top_builddir)'/${rtems_updir}])
-])
-
-AC_SUBST([PROJECT_INCLUDE],["${PROJECT_ROOT}lib/include"])
-AC_SUBST([PROJECT_LIB],["${PROJECT_ROOT}lib\$(MULTISUBDIR)"])
-
-libdir="${libdir}\$(MULTISUBDIR)"
-
-AC_SUBST([project_libdir],["\$(libdir)"])
-AC_SUBST([project_includedir],["\$(includedir)"])
-
-AC_SUBST([dirstamp],[\${am__leading_dot}dirstamp])
-])dnl
diff --git a/cpukit/aclocal/version.m4 b/cpukit/aclocal/version.m4
deleted file mode 100644
index 69e3eea10a..0000000000
--- a/cpukit/aclocal/version.m4
+++ /dev/null
@@ -1,4 +0,0 @@
-AC_DEFUN([RTEMS_VERSIONING],
-m4_define([_RTEMS_VERSION],[6.0.0]))
-
-m4_define([_RTEMS_API],[6])
diff --git a/cpukit/doxygen/appl-config.h b/cpukit/doxygen/appl-config.h
index bbeb438bec..4735af8766 100644
--- a/cpukit/doxygen/appl-config.h
+++ b/cpukit/doxygen/appl-config.h
@@ -3,7 +3,7 @@
/*
* Copyright (C) 2019, 2021 embedded brains GmbH (http://www.embedded-brains.de)
* Copyright (C) 2010 Gedare Bloom
- * Copyright (C) 1988, 2008 On-Line Applications Research Corporation (OAR)
+ * Copyright (C) 1988, 2021 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -1773,6 +1773,29 @@
*/
#define CONFIGURE_ATA_DRIVER_TASK_PRIORITY
+/* Generated from spec:/acfg/if/exception-to-signal-mapping */
+
+/**
+ * @brief This configuration option is a boolean feature define.
+ *
+ * In case this configuration option is defined, then the machine exception to
+ * POSIX signal mapping is configured during system initialization.
+ *
+ * @par Default Configuration
+ * If this configuration option is undefined, then the described feature is not
+ * enabled.
+ *
+ * @par Notes
+ * @parblock
+ * This device driver is responsible for setting up a mapping from machine
+ * exceptions to POSIX signals so that applications may consume them and alter
+ * task execution as necessary.
+ *
+ * This is especially useful for applications written in Ada or C++.
+ * @endparblock
+ */
+#define CONFIGURE_EXCEPTION_TO_SIGNAL_MAPPING
+
/* Generated from spec:/acfg/if/max-drivers */
/**
@@ -4671,6 +4694,32 @@
*/
#define CONFIGURE_TASK_STACK_ALLOCATOR_AVOIDS_WORK_SPACE
+/* Generated from spec:/acfg/if/task-stack-allocator-for-idle */
+
+/**
+ * @brief This configuration option is an initializer define.
+ *
+ * The value of this configuration option is the address for the stack
+ * allocator allocate handler used to allocate the task stack of each IDLE
+ * task.
+ *
+ * @par Default Value
+ * The default value is ``_Stack_Allocator_allocate_for_idle_default``, which
+ * indicates that IDLE task stacks will be allocated from an area statically
+ * allocated by ``<rtems/confdefs.h>``.
+ *
+ * @par Value Constraints
+ * The value of this configuration option shall be defined to a valid function
+ * pointer of the type ``void *( *allocate )( uint32_t, size_t )``.
+ *
+ * @par Notes
+ * This configuration option is independent of the other thread stack allocator
+ * configuration options. It is assumed that any memory allocated for the
+ * stack of an IDLE task will not be from the RTEMS Workspace or the memory
+ * statically allocated by default.
+ */
+#define CONFIGURE_TASK_STACK_ALLOCATOR_FOR_IDLE
+
/* Generated from spec:/acfg/if/task-stack-allocator-init */
/**
diff --git a/cpukit/include/rtems/confdefs/bsp.h b/cpukit/include/rtems/confdefs/bsp.h
index 03ad9bf55f..bc96713765 100644
--- a/cpukit/include/rtems/confdefs/bsp.h
+++ b/cpukit/include/rtems/confdefs/bsp.h
@@ -41,19 +41,8 @@
#error "Do not include this file directly, use <rtems/confdefs.h> instead"
#endif
-#ifdef CONFIGURE_INIT
-
-#ifdef CONFIGURE_DISABLE_BSP_SETTINGS
- #undef BSP_IDLE_TASK_BODY
- #undef BSP_IDLE_TASK_STACK_SIZE
- #undef BSP_INITIAL_EXTENSION
- #undef BSP_INTERRUPT_STACK_SIZE
- #undef CONFIGURE_BSP_PREREQUISITE_DRIVERS
- #undef CONFIGURE_MALLOC_BSP_SUPPORTS_SBRK
-#else
- #include <bsp.h>
+#if defined(CONFIGURE_INIT) && !defined(CONFIGURE_DISABLE_BSP_SETTINGS)
+#include <bsp.h>
#endif
-#endif /* CONFIGURE_INIT */
-
#endif /* _RTEMS_CONFDEFS_BSP_H */
diff --git a/cpukit/include/rtems/confdefs/extensions.h b/cpukit/include/rtems/confdefs/extensions.h
index 83d690d50a..118ad29484 100644
--- a/cpukit/include/rtems/confdefs/extensions.h
+++ b/cpukit/include/rtems/confdefs/extensions.h
@@ -93,6 +93,10 @@
#include <rtems/stackchk.h>
#endif
+#ifdef CONFIGURE_EXCEPTION_TO_SIGNAL_MAPPING
+ #include <rtems/score/exception.h>
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -103,6 +107,9 @@ extern "C" {
|| defined(CONFIGURE_INITIAL_EXTENSIONS) \
|| defined(BSP_INITIAL_EXTENSION)
const User_extensions_Table _User_extensions_Initial_extensions[] = {
+ #ifdef CONFIGURE_EXCEPTION_TO_SIGNAL_MAPPING
+ { .fatal = _Exception_Raise_signal },
+ #endif
#ifdef _CONFIGURE_RECORD_NEED_EXTENSION
{
#ifdef CONFIGURE_RECORD_EXTENSIONS_ENABLED
@@ -139,7 +146,8 @@ extern "C" {
#ifdef CONFIGURE_INITIAL_EXTENSIONS
CONFIGURE_INITIAL_EXTENSIONS,
#endif
- #ifdef BSP_INITIAL_EXTENSION
+ #if !defined(CONFIGURE_DISABLE_BSP_SETTINGS) && \
+ defined(BSP_INITIAL_EXTENSION)
BSP_INITIAL_EXTENSION
#endif
};
diff --git a/cpukit/include/rtems/confdefs/iodrivers.h b/cpukit/include/rtems/confdefs/iodrivers.h
index a7de77a8c3..1f77948676 100644
--- a/cpukit/include/rtems/confdefs/iodrivers.h
+++ b/cpukit/include/rtems/confdefs/iodrivers.h
@@ -43,16 +43,30 @@
#ifdef CONFIGURE_INIT
-#if defined(CONFIGURE_APPLICATION_EXTRA_DRIVERS) \
- || defined(CONFIGURE_APPLICATION_NEEDS_ATA_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_FRAME_BUFFER_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_IDE_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_NULL_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_RTC_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_STUB_DRIVER) \
- || defined(CONFIGURE_APPLICATION_NEEDS_ZERO_DRIVER) \
- || CONFIGURE_MAXIMUM_DRIVERS > 0
+#include <rtems/confdefs/bsp.h>
+
+#if !defined(CONFIGURE_DISABLE_BSP_SETTINGS) && \
+ defined(CONFIGURE_BSP_PREREQUISITE_DRIVERS)
+#define _CONFIGURE_BSP_PREREQUISITE_DRIVERS CONFIGURE_BSP_PREREQUISITE_DRIVERS
+#endif
+
+#if defined(_CONFIGURE_BSP_PREREQUISITE_DRIVERS) || \
+ defined(CONFIGURE_APPLICATION_PREREQUISITE_DRIVERS) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_RTC_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_WATCHDOG_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_STUB_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_ZERO_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_IDE_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_ATA_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_FRAME_BUFFER_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_EXTRA_DRIVERS)
+#define _CONFIGURE_HAS_IO_DRIVERS
+#endif
+
+#if defined(_CONFIGURE_HAS_IO_DRIVERS) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_NULL_DRIVER) || \
+ CONFIGURE_MAXIMUM_DRIVERS > 0
#include <rtems/ioimpl.h>
#include <rtems/sysinit.h>
@@ -110,8 +124,8 @@ extern "C" {
rtems_driver_address_table
_IO_Driver_address_table[ CONFIGURE_MAXIMUM_DRIVERS ] = {
- #ifdef CONFIGURE_BSP_PREREQUISITE_DRIVERS
- CONFIGURE_BSP_PREREQUISITE_DRIVERS,
+ #ifdef _CONFIGURE_BSP_PREREQUISITE_DRIVERS
+ _CONFIGURE_BSP_PREREQUISITE_DRIVERS,
#endif
#ifdef CONFIGURE_APPLICATION_PREREQUISITE_DRIVERS
CONFIGURE_APPLICATION_PREREQUISITE_DRIVERS,
@@ -143,15 +157,8 @@ _IO_Driver_address_table[ CONFIGURE_MAXIMUM_DRIVERS ] = {
#ifdef CONFIGURE_APPLICATION_EXTRA_DRIVERS
CONFIGURE_APPLICATION_EXTRA_DRIVERS,
#endif
- #if defined(CONFIGURE_APPLICATION_NEEDS_NULL_DRIVER) \
- || ( !defined(CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_NEEDS_RTC_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_NEEDS_STUB_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_NEEDS_ZERO_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_NEEDS_IDE_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_NEEDS_ATA_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_NEEDS_FRAME_BUFFER_DRIVER) \
- && !defined(CONFIGURE_APPLICATION_EXTRA_DRIVERS) )
+ #if defined(CONFIGURE_APPLICATION_NEEDS_NULL_DRIVER) || \
+ !defined(_CONFIGURE_HAS_IO_DRIVERS)
NULL_DRIVER_TABLE_ENTRY
#endif
};
@@ -178,16 +185,9 @@ RTEMS_SYSINIT_ITEM(
}
#endif
-#endif /* CONFIGURE_APPLICATION_EXTRA_DRIVERS
- || CONFIGURE_APPLICATION_NEEDS_ATA_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_FRAME_BUFFER_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_IDE_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_NULL_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_RTC_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_STUB_DRIVER
- || CONFIGURE_APPLICATION_NEEDS_ZERO_DRIVER
- || CONFIGURE_MAXIMUM_DRIVERS */
+#endif /* _CONFIGURE_HAS_IO_DRIVERS) ||
+ CONFIGURE_APPLICATION_NEEDS_NULL_DRIVER ||
+ CONFIGURE_MAXIMUM_DRIVERS */
/*
diff --git a/cpukit/include/rtems/confdefs/malloc.h b/cpukit/include/rtems/confdefs/malloc.h
index d864a98270..a8dae6e739 100644
--- a/cpukit/include/rtems/confdefs/malloc.h
+++ b/cpukit/include/rtems/confdefs/malloc.h
@@ -45,8 +45,12 @@
#include <rtems/confdefs/bsp.h>
-#if defined(CONFIGURE_MALLOC_BSP_SUPPORTS_SBRK) \
- || defined(CONFIGURE_MALLOC_DIRTY)
+#if !defined(CONFIGURE_DISABLE_BSP_SETTINGS) && \
+ defined(CONFIGURE_MALLOC_BSP_SUPPORTS_SBRK)
+#define _CONFIGURE_HEAP_EXTEND_VIA_SBRK
+#endif
+
+#if defined(_CONFIGURE_HEAP_EXTEND_VIA_SBRK) || defined(CONFIGURE_MALLOC_DIRTY)
#include <rtems/malloc.h>
#endif
@@ -54,7 +58,7 @@
extern "C" {
#endif
-#ifdef CONFIGURE_MALLOC_BSP_SUPPORTS_SBRK
+#ifdef _CONFIGURE_HEAP_EXTEND_VIA_SBRK
const rtems_heap_extend_handler rtems_malloc_extend_handler =
rtems_heap_extend_via_sbrk;
#endif
diff --git a/cpukit/include/rtems/confdefs/percpu.h b/cpukit/include/rtems/confdefs/percpu.h
index 3bea8340cb..b91590bfd9 100644
--- a/cpukit/include/rtems/confdefs/percpu.h
+++ b/cpukit/include/rtems/confdefs/percpu.h
@@ -78,7 +78,8 @@ extern "C" {
/* Interrupt stack configuration */
#ifndef CONFIGURE_INTERRUPT_STACK_SIZE
- #ifdef BSP_INTERRUPT_STACK_SIZE
+ #if !defined(CONFIGURE_DISABLE_BSP_SETTINGS) && \
+ defined(BSP_INTERRUPT_STACK_SIZE)
#define CONFIGURE_INTERRUPT_STACK_SIZE BSP_INTERRUPT_STACK_SIZE
#else
#define CONFIGURE_INTERRUPT_STACK_SIZE CPU_STACK_MINIMUM_SIZE
@@ -121,7 +122,8 @@ RTEMS_DEFINE_GLOBAL_SYMBOL(
/* Idle thread configuration */
#ifndef CONFIGURE_IDLE_TASK_STACK_SIZE
- #ifdef BSP_IDLE_TASK_STACK_SIZE
+ #if !defined(CONFIGURE_DISABLE_BSP_SETTINGS) && \
+ defined(BSP_IDLE_TASK_STACK_SIZE)
#define CONFIGURE_IDLE_TASK_STACK_SIZE BSP_IDLE_TASK_STACK_SIZE
#else
#define CONFIGURE_IDLE_TASK_STACK_SIZE CONFIGURE_MINIMUM_TASK_STACK_SIZE
@@ -153,7 +155,8 @@ const size_t _Thread_Idle_stack_size = CONFIGURE_IDLE_TASK_STACK_SIZE;
#error "If you define CONFIGURE_IDLE_TASK_INITIALIZES_APPLICATION, then you must define CONFIGURE_IDLE_TASK_BODY as well"
#endif
-#if !defined(CONFIGURE_IDLE_TASK_BODY) && defined(BSP_IDLE_TASK_BODY)
+#if !defined(CONFIGURE_IDLE_TASK_BODY) && \
+ !defined(CONFIGURE_DISABLE_BSP_SETTINGS) && defined(BSP_IDLE_TASK_BODY)
#define CONFIGURE_IDLE_TASK_BODY BSP_IDLE_TASK_BODY
#endif
diff --git a/cpukit/include/rtems/config.h b/cpukit/include/rtems/config.h
index 7b15bc34a2..32dd662b3e 100644
--- a/cpukit/include/rtems/config.h
+++ b/cpukit/include/rtems/config.h
@@ -10,6 +10,7 @@
*/
/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
* Copyright (C) 2009, 2021 embedded brains GmbH (http://www.embedded-brains.de)
*
* Redistribution and use in source and binary forms, with or without
@@ -302,6 +303,23 @@ uint32_t rtems_configuration_get_maximum_extensions( void );
#define rtems_configuration_get_number_of_initial_extensions() \
((uint32_t) _User_extensions_Initial_count)
+/* Generated from spec:/rtems/config/if/get-stack-allocate-for-idle-hook */
+
+/**
+ * @ingroup RTEMSAPIConfig
+ *
+ * @brief Gets the thread stack allocator allocate hook used to allocate the
+ * stack of each IDLE task configured for this application.
+ *
+ * @return Returns the thread stack allocator allocate hook used to allocate
+ * the stack of each IDLE task configured for this application.
+ *
+ * @par Notes
+ * See #CONFIGURE_TASK_STACK_ALLOCATOR_FOR_IDLE.
+ */
+#define rtems_configuration_get_stack_allocate_for_idle_hook() \
+ _Stack_Allocator_allocate_for_idle
+
/* Generated from spec:/rtems/config/if/get-stack-allocate-hook */
/**
@@ -368,18 +386,6 @@ uint32_t rtems_configuration_get_maximum_extensions( void );
*/
#define rtems_configuration_get_stack_free_hook() _Stack_Allocator_free
-/**
- * @ingroup RTEMSAPIConfig
- *
- * @brief Gets the IDLE thread stack allocator hook configured for this
- * application.
- *
- * @return Returns the IDLE thread stack allocator hook configured for this
- * application.
- */
-#define rtems_configuration_get_stack_allocate_for_idle_hook() \
- (_Stack_Allocator_allocate_for_idle)
-
/* Generated from spec:/rtems/config/if/get-stack-space-size */
/**
diff --git a/cpukit/include/rtems/malloc.h b/cpukit/include/rtems/malloc.h
index eba538e223..c0d15fbf72 100644
--- a/cpukit/include/rtems/malloc.h
+++ b/cpukit/include/rtems/malloc.h
@@ -19,7 +19,6 @@
#include <rtems.h>
#include <rtems/bspIo.h>
#include <rtems/libcsupport.h> /* for malloc_walk() */
-#include <rtems/score/memory.h>
#include <stdint.h>
@@ -43,10 +42,7 @@ extern "C" {
*/
extern Heap_Control *RTEMS_Malloc_Heap;
-Heap_Control *RTEMS_Malloc_Initialize(
- const Memory_Information *mem,
- Heap_Initialization_or_extend_handler extend
-);
+void _Malloc_Initialize( void );
void rtems_heap_set_sbrk_amount( ptrdiff_t sbrk_amount );
diff --git a/cpukit/include/rtems/mallocinitmulti.h b/cpukit/include/rtems/mallocinitmulti.h
new file mode 100644
index 0000000000..a64c6e4802
--- /dev/null
+++ b/cpukit/include/rtems/mallocinitmulti.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup MallocSupport
+ *
+ * @brief This header file provides the implementation of
+ * _Malloc_Initialize_for_multiple_areas().
+ */
+
+/*
+ * Copyright (C) 2012, 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_MALLOCINITMULTI_H
+#define _RTEMS_MALLOCINITMULTI_H
+
+#include <rtems/malloc.h>
+#include <rtems/score/heapimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @ingroup MallocSupport
+ *
+ * @brief Initializes the separate C Program Heap with support for more than
+ * one memory area.
+ *
+ * This implementation should be used by BSPs which provide more than one
+ * memory area via _Memory_Get() to implement
+ * _Workspace_Malloc_initialize_separate().
+ */
+RTEMS_INLINE_ROUTINE Heap_Control *_Malloc_Initialize_for_multiple_areas(
+ Heap_Control *heap
+)
+{
+ const Memory_Information *mem;
+ Heap_Initialization_or_extend_handler init_or_extend;
+ uintptr_t page_size;
+ size_t i;
+
+ mem = _Memory_Get();
+ RTEMS_Malloc_Heap = heap;
+ init_or_extend = _Heap_Initialize;
+ page_size = CPU_HEAP_ALIGNMENT;
+
+ for (i = 0; i < _Memory_Get_count( mem ); ++i) {
+ Memory_Area *area;
+ uintptr_t space_available;
+
+ area = _Memory_Get_area( mem, i );
+ space_available = ( *init_or_extend )(
+ heap,
+ _Memory_Get_free_begin( area ),
+ _Memory_Get_free_size( area ),
+ page_size
+ );
+
+ if ( space_available > 0 ) {
+ _Memory_Consume( area, _Memory_Get_free_size( area ) );
+ init_or_extend = _Heap_Extend;
+ }
+ }
+
+ if ( init_or_extend == _Heap_Initialize ) {
+ _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_HEAP );
+ }
+
+ return heap;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_MALLOCINITMULTI_H */
diff --git a/cpukit/include/rtems/mallocinitone.h b/cpukit/include/rtems/mallocinitone.h
new file mode 100644
index 0000000000..eaa0d8eb3e
--- /dev/null
+++ b/cpukit/include/rtems/mallocinitone.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup MallocSupport
+ *
+ * @brief This header file provides the implementation of
+ * _Malloc_Initialize_for_one_area().
+ */
+
+/*
+ * Copyright (C) 2012, 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_MALLOCINITONE_H
+#define _RTEMS_MALLOCINITONE_H
+
+#include <rtems/malloc.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/heapimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @ingroup MallocSupport
+ *
+ * @brief Initializes the separate C Program Heap with support for exactly one
+ * memory area.
+ *
+ * This implementation should be used by BSPs which provide exactly one memory
+ * area via _Memory_Get() to implement _Workspace_Malloc_initialize_separate().
+ */
+RTEMS_INLINE_ROUTINE Heap_Control *_Malloc_Initialize_for_one_area(
+ Heap_Control *heap
+)
+{
+ const Memory_Information *mem;
+ Memory_Area *area;
+ uintptr_t space_available;
+
+ mem = _Memory_Get();
+ _Assert( _Memory_Get_count( mem ) == 1 );
+
+ RTEMS_Malloc_Heap = heap;
+ area = _Memory_Get_area( mem, 0 );
+ space_available = _Heap_Initialize(
+ heap,
+ _Memory_Get_free_begin( area ),
+ _Memory_Get_free_size( area ),
+ CPU_HEAP_ALIGNMENT
+ );
+
+ if ( space_available > 0 ) {
+ _Memory_Consume( area, _Memory_Get_free_size( area ) );
+ } else {
+ _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_HEAP );
+ }
+
+ return heap;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_MALLOCINITONE_H */
diff --git a/cpukit/include/rtems/posix/pthreadimpl.h b/cpukit/include/rtems/posix/pthreadimpl.h
index 723b20e8d2..0dc7d9dac5 100644
--- a/cpukit/include/rtems/posix/pthreadimpl.h
+++ b/cpukit/include/rtems/posix/pthreadimpl.h
@@ -47,7 +47,7 @@ RTEMS_INLINE_ROUTINE void _POSIX_Threads_Sporadic_timer_insert(
POSIX_API_Control *api
)
{
- the_thread->cpu_time_budget =
+ the_thread->CPU_budget.available =
_Timespec_To_ticks( &api->Sporadic.sched_ss_init_budget );
_Watchdog_Per_CPU_insert_ticks(
@@ -61,19 +61,12 @@ RTEMS_INLINE_ROUTINE void _POSIX_Threads_Sporadic_timer_insert(
void _POSIX_Threads_Sporadic_timer( Watchdog_Control *watchdog );
/**
- * @brief POSIX threads sporadic budget callout.
- *
- * This routine handles the sporadic scheduling algorithm.
- *
- * @param[in] the_thread is a pointer to the thread whose budget
- * has been exceeded.
+ * @brief The POSIX threads sporadic budget operations.
*/
-void _POSIX_Threads_Sporadic_budget_callout(
- Thread_Control *the_thread
-);
+extern const Thread_CPU_budget_operations _POSIX_Threads_Sporadic_budget;
int _POSIX_Thread_Translate_to_sched_policy(
- Thread_CPU_budget_algorithms budget_algorithm
+ const Thread_CPU_budget_operations *operations
);
/**
diff --git a/cpukit/include/rtems/rtems/asr.h b/cpukit/include/rtems/rtems/asr.h
index f58d60790d..1b0af08a0e 100644
--- a/cpukit/include/rtems/rtems/asr.h
+++ b/cpukit/include/rtems/rtems/asr.h
@@ -3,6 +3,8 @@
/**
* @file
*
+ * @ingroup RTEMSImplClassicSignal
+ *
* @brief This header file defines the parts of the Signal Manager API.
*/
diff --git a/cpukit/include/rtems/rtems/clock.h b/cpukit/include/rtems/rtems/clock.h
index e158b18947..6cdc8d68e3 100644
--- a/cpukit/include/rtems/rtems/clock.h
+++ b/cpukit/include/rtems/rtems/clock.h
@@ -81,6 +81,11 @@ extern "C" {
* related capabilities.
*/
+/* Generated from spec:/rtems/clock/if/bintime */
+
+/* Forward declaration */
+struct bintime;
+
/* Generated from spec:/rtems/clock/if/set */
/**
@@ -213,6 +218,593 @@ rtems_status_code rtems_clock_get_tod( rtems_time_of_day *time_of_day );
*/
rtems_status_code rtems_clock_get_tod_timeval( struct timeval *time_of_day );
+/* Generated from spec:/rtems/clock/if/get-realtime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch measured using
+ * CLOCK_REALTIME in seconds and nanoseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a struct timespec object. The
+ * time elapsed since the Unix epoch measured using the CLOCK_REALTIME at
+ * some time point during the directive call will be stored in this object.
+ * Calling the directive with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system. Alternatively, the
+ * rtems_clock_get_realtime_coarse() directive may be used to get the time in a
+ * lower resolution and with less runtime overhead.
+ *
+ * See rtems_clock_get_realtime_bintime() and
+ * rtems_clock_get_realtime_timeval() to get the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_realtime( struct timespec *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-realtime-bintime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch measured using
+ * CLOCK_REALTIME in binary time format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since the Unix epoch measured using the CLOCK_REALTIME at some
+ * time point during the directive call will be stored in this object.
+ * Calling the directive with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system. Alternatively, the
+ * rtems_clock_get_realtime_coarse_bintime() directive may be used to get the
+ * time in a lower resolution and with less runtime overhead.
+ *
+ * See rtems_clock_get_realtime() and rtems_clock_get_realtime_timeval() to get
+ * the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_realtime_bintime( struct bintime *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-realtime-timeval */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch measured using
+ * CLOCK_REALTIME in seconds and microseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a struct timeval object. The
+ * time elapsed since the Unix epoch measured using the CLOCK_REALTIME at
+ * some time point during the directive call will be stored in this object.
+ * Calling the directive with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system. Alternatively, the
+ * rtems_clock_get_realtime_coarse_timeval() directive may be used to get the
+ * time in a lower resolution and with less runtime overhead.
+ *
+ * See rtems_clock_get_realtime() and rtems_clock_get_realtime_bintime() to get
+ * the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_realtime_timeval( struct timeval *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-realtime-coarse */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch measured using
+ * CLOCK_REALTIME in coarse resolution in seconds and nanoseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a struct timespec object. The
+ * time elapsed since the Unix epoch measured using the CLOCK_REALTIME at
+ * some time point close to the directive call will be stored in this object.
+ * Calling the directive with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive does not access a device to get the time. It uses a recent
+ * snapshot provided by the Clock Driver. Alternatively, the
+ * rtems_clock_get_realtime() directive may be used to get the time in a higher
+ * resolution and with a higher runtime overhead.
+ *
+ * See rtems_clock_get_realtime_coarse_bintime() and
+ * rtems_clock_get_realtime_coarse_timeval() to get the time in alternative
+ * formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_realtime_coarse( struct timespec *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-realtime-coarse-bintime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch measured using
+ * CLOCK_REALTIME in coarse resolution in binary time format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since the Unix epoch measured using the CLOCK_REALTIME at some
+ * time point close to the directive call will be stored in this object.
+ * Calling the directive with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive does not access a device to get the time. It uses a recent
+ * snapshot provided by the Clock Driver. Alternatively, the
+ * rtems_clock_get_realtime_bintime() directive may be used to get the time in
+ * a higher resolution and with a higher runtime overhead.
+ *
+ * See rtems_clock_get_realtime_coarse() and
+ * rtems_clock_get_realtime_coarse_timeval() to get the time in alternative
+ * formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_realtime_coarse_bintime( struct bintime *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-realtime-coarse-timeval */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch measured using
+ * CLOCK_REALTIME in coarse resolution in seconds and microseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a struct timeval object. The
+ * time elapsed since the Unix epoch measured using the CLOCK_REALTIME at
+ * some time point close to the directive call will be stored in this object.
+ * Calling the directive with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive does not access a device to get the time. It uses a recent
+ * snapshot provided by the Clock Driver. Alternatively, the
+ * rtems_clock_get_realtime_timeval() directive may be used to get the time in
+ * a higher resolution and with a higher runtime overhead.
+ *
+ * See rtems_clock_get_realtime_coarse() and
+ * rtems_clock_get_realtime_coarse_timeval() to get the time in alternative
+ * formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_realtime_coarse_timeval( struct timeval *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in seconds and nanoseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since some fixed time point in the past measured using the
+ * CLOCK_MONOTONIC at some time point during the directive call will be
+ * stored in this object. Calling the directive with a pointer equal to NULL
+ * is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system. Alternatively, the
+ * rtems_clock_get_monotonic_coarse() directive may be used to get the time
+ * with in a lower resolution and with less runtime overhead.
+ *
+ * See rtems_clock_get_monotonic_bintime(),
+ * rtems_clock_get_monotonic_sbintime(), and
+ * rtems_clock_get_monotonic_timeval() to get the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_monotonic( struct timespec *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic-bintime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in binary time format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since some fixed time point in the past measured using the
+ * CLOCK_MONOTONIC at some time point during the directive call will be
+ * stored in this object. Calling the directive with a pointer equal to NULL
+ * is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system. Alternatively, the
+ * rtems_clock_get_monotonic_coarse_bintime() directive may be used to get the
+ * time in a lower resolution and with less runtime overhead.
+ *
+ * See rtems_clock_get_monotonic(), rtems_clock_get_monotonic_sbintime(), and
+ * rtems_clock_get_monotonic_timeval() to get the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_monotonic_bintime( struct bintime *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic-sbintime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in signed binary time format.
+ *
+ * @return Returns the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC at some time point during the directive
+ * call.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system.
+ *
+ * See rtems_clock_get_monotonic(), rtems_clock_get_monotonic_bintime(), and
+ * rtems_clock_get_monotonic_timeval() to get the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+int64_t rtems_clock_get_monotonic_sbintime( void );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic-timeval */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in seconds and microseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since some fixed time point in the past measured using the
+ * CLOCK_MONOTONIC at some time point during the directive call will be
+ * stored in this object. Calling the directive with a pointer equal to NULL
+ * is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive accesses a device provided by the Clock Driver to get the time
+ * in the highest resolution available to the system. Alternatively, the
+ * rtems_clock_get_monotonic_coarse_timeval() directive may be used to get the
+ * time in a lower resolution and with less runtime overhead.
+ *
+ * See rtems_clock_get_monotonic(), rtems_clock_get_monotonic_bintime(), and
+ * rtems_clock_get_monotonic_sbintime() to get the time in alternative formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_monotonic_timeval( struct timeval *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic-coarse */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in coarse resolution in seconds and
+ * nanoseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since some fixed time point in the past measured using the
+ * CLOCK_MONOTONIC at some time point close to the directive call will be
+ * stored in this object. Calling the directive with a pointer equal to NULL
+ * is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive does not access a device to get the time. It uses a recent
+ * snapshot provided by the Clock Driver. Alternatively, the
+ * rtems_clock_get_monotonic() directive may be used to get the time in a
+ * higher resolution and with a higher runtime overhead.
+ *
+ * See rtems_clock_get_monotonic_coarse_bintime() and
+ * rtems_clock_get_monotonic_coarse_timeval() to get the time in alternative
+ * formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_monotonic_coarse( struct timespec *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic-coarse-bintime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in coarse resolution in binary time
+ * format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since some fixed time point in the past measured using the
+ * CLOCK_MONOTONIC at some time point close to the directive call will be
+ * stored in this object. Calling the directive with a pointer equal to NULL
+ * is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive does not access a device to get the time. It uses a recent
+ * snapshot provided by the Clock Driver. Alternatively, the
+ * rtems_clock_get_monotonic_bintime() directive may be used to get the time in
+ * a higher resolution and with a higher runtime overhead.
+ *
+ * See rtems_clock_get_monotonic_coarse() and
+ * rtems_clock_get_monotonic_coarse_timeval() to get the time in alternative
+ * formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_monotonic_coarse_bintime( struct bintime *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-monotonic-coarse-timeval */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since some fixed time point in the past
+ * measured using the CLOCK_MONOTONIC in coarse resolution in seconds and
+ * microseconds format.
+ *
+ * @param[out] time_snapshot is the pointer to a bintime object. The time
+ * elapsed since some fixed time point in the past measured using the
+ * CLOCK_MONOTONIC at some time point close to the directive call will be
+ * stored in this object. Calling the directive with a pointer equal to NULL
+ * is undefined behaviour.
+ *
+ * @par Notes
+ * @parblock
+ * The directive does not access a device to get the time. It uses a recent
+ * snapshot provided by the Clock Driver. Alternatively, the
+ * rtems_clock_get_monotonic_timeval() directive may be used to get the time in
+ * a higher resolution and with a higher runtime overhead.
+ *
+ * See rtems_clock_get_monotonic_coarse() and
+ * rtems_clock_get_monotonic_coarse_bintime() to get the time in alternative
+ * formats.
+ * @endparblock
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_monotonic_coarse_timeval( struct timeval *time_snapshot );
+
+/* Generated from spec:/rtems/clock/if/get-boot-time */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch at some time point during
+ * system initialization in seconds and nanoseconds format.
+ *
+ * @param[out] boot_time is the pointer to a struct timespec object. The time
+ * elapsed since the Unix epoch at some time point during system
+ * initialization call will be stored in this object. Calling the directive
+ * with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * See rtems_clock_get_boot_time_bintime() and
+ * rtems_clock_get_boot_time_timeval() to get the boot time in alternative
+ * formats. Setting the CLOCK_REALTIME will also set the boot time.
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_boot_time( struct timespec *boot_time );
+
+/* Generated from spec:/rtems/clock/if/get-boot-time-bintime */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch at some time point during
+ * system initialization in binary time format.
+ *
+ * @param[out] boot_time is the pointer to a bintime object. The time elapsed
+ * since the Unix epoch at some time point during system initialization call
+ * will be stored in this object. Calling the directive with a pointer equal
+ * to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * See rtems_clock_get_boot_time() and rtems_clock_get_boot_time_timeval() to
+ * get the boot time in alternative formats. Setting the CLOCK_REALTIME will
+ * also set the boot time.
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_boot_time_bintime( struct bintime *boot_time );
+
+/* Generated from spec:/rtems/clock/if/get-boot-time-timeval */
+
+/**
+ * @ingroup RTEMSAPIClassicClock
+ *
+ * @brief Gets the time elapsed since the Unix epoch at some time point during
+ * system initialization in seconds and microseconds format.
+ *
+ * @param[out] boot_time is the pointer to a struct timeval object. The time
+ * elapsed since the Unix epoch at some time point during system
+ * initialization call will be stored in this object. Calling the directive
+ * with a pointer equal to NULL is undefined behaviour.
+ *
+ * @par Notes
+ * See rtems_clock_get_boot_time() and rtems_clock_get_boot_time_bintime() to
+ * get the boot time in alternative formats. Setting the CLOCK_REALTIME will
+ * also set the boot time.
+ *
+ * @par Constraints
+ * @parblock
+ * The following constraints apply to this directive:
+ *
+ * * The directive may be called from within any runtime context.
+ *
+ * * The directive will not cause the calling task to be preempted.
+ *
+ * * The directive requires a Clock Driver.
+ * @endparblock
+ */
+void rtems_clock_get_boot_time_timeval( struct timeval *boot_time );
+
/* Generated from spec:/rtems/clock/if/get-seconds-since-epoch */
/**
diff --git a/cpukit/include/rtems/rtems/modesimpl.h b/cpukit/include/rtems/rtems/modesimpl.h
index 924e12fbee..8fdab263f1 100644
--- a/cpukit/include/rtems/rtems/modesimpl.h
+++ b/cpukit/include/rtems/rtems/modesimpl.h
@@ -22,6 +22,7 @@
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/smpimpl.h>
#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadcpubudget.h>
#include <rtems/config.h>
#ifdef __cplusplus
@@ -148,10 +149,11 @@ RTEMS_INLINE_ROUTINE void _Modes_Apply_timeslice_to_thread(
)
{
if ( _Modes_Is_timeslice( mode_set ) ) {
- the_thread->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
- the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
+ the_thread->CPU_budget.operations = &_Thread_CPU_budget_reset_timeslice;
+ the_thread->CPU_budget.available =
+ rtems_configuration_get_ticks_per_timeslice();
} else {
- the_thread->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
+ the_thread->CPU_budget.operations = NULL;
}
}
diff --git a/cpukit/include/rtems/rtems/ratemon.h b/cpukit/include/rtems/rtems/ratemon.h
index 8823d72c55..7c789a204b 100644
--- a/cpukit/include/rtems/rtems/ratemon.h
+++ b/cpukit/include/rtems/rtems/ratemon.h
@@ -445,6 +445,10 @@ rtems_status_code rtems_rate_monotonic_delete( rtems_id id );
*
* @retval ::RTEMS_TIMEOUT The rate monotonic period has expired.
*
+ * @par Notes
+ * Resetting the processor usage time of tasks has no impact on the period
+ * status and statistics.
+ *
* @par Constraints
* @parblock
* The following constraints apply to this directive:
@@ -502,9 +506,6 @@ rtems_status_code rtems_rate_monotonic_period(
*
* @retval ::RTEMS_INVALID_ADDRESS The ``status`` parameter was NULL.
*
- * @retval ::RTEMS_NOT_DEFINED There was no status available due to a reset of
- * the processor time usage of the owner task of the period.
- *
* @par Constraints
* @parblock
* The following constraints apply to this directive:
diff --git a/cpukit/include/rtems/rtems/ratemonimpl.h b/cpukit/include/rtems/rtems/ratemonimpl.h
index 7e42a0437c..d17c7fe4de 100644
--- a/cpukit/include/rtems/rtems/ratemonimpl.h
+++ b/cpukit/include/rtems/rtems/ratemonimpl.h
@@ -48,9 +48,6 @@ extern "C" {
#define RATE_MONOTONIC_BLOCKED \
( THREAD_WAIT_CLASS_PERIOD | THREAD_WAIT_STATE_BLOCKED )
-#define RATE_MONOTONIC_READY_AGAIN \
- ( THREAD_WAIT_CLASS_PERIOD | THREAD_WAIT_STATE_READY_AGAIN )
-
/**
* @brief Allocates a period control block from
* the inactive chain of free period control blocks.
@@ -92,7 +89,7 @@ RTEMS_INLINE_ROUTINE Rate_monotonic_Control *_Rate_monotonic_Get(
void _Rate_monotonic_Timeout( Watchdog_Control *watchdog );
/**
- * @brief _Rate_monotonic_Get_status(
+ * @brief Gets the rate monotonic CPU usage status.
*
* This routine is invoked to compute the elapsed wall time and cpu
* time for a period.
@@ -102,11 +99,8 @@ void _Rate_monotonic_Timeout( Watchdog_Control *watchdog );
* since the period was initiated.
* @param[out] cpu_since_last_period is set to the cpu time used by the
* owning thread since the period was initiated.
- *
- * @retval This routine returns true if the status can be determined
- * and false otherwise.
*/
-bool _Rate_monotonic_Get_status(
+void _Rate_monotonic_Get_status(
const Rate_monotonic_Control *the_period,
Timestamp_Control *wall_since_last_period,
Timestamp_Control *cpu_since_last_period
diff --git a/cpukit/include/rtems/rtems/signal.h b/cpukit/include/rtems/rtems/signal.h
index c9fbd79b5d..9272f807bc 100644
--- a/cpukit/include/rtems/rtems/signal.h
+++ b/cpukit/include/rtems/rtems/signal.h
@@ -3,6 +3,8 @@
/**
* @file
*
+ * @ingroup RTEMSImplClassicSignal
+ *
* @brief This header file defines the parts of the Signal Manager API.
*/
diff --git a/cpukit/include/rtems/rtems/tasks.h b/cpukit/include/rtems/rtems/tasks.h
index 8c6e8a3bca..8e87bfd14a 100644
--- a/cpukit/include/rtems/rtems/tasks.h
+++ b/cpukit/include/rtems/rtems/tasks.h
@@ -867,9 +867,12 @@ rtems_status_code rtems_scheduler_add_processor(
*
* @retval ::RTEMS_INVALID_NUMBER The processor was not owned by the scheduler.
*
- * @retval ::RTEMS_RESOURCE_IN_USE The set of processors owned by the scheduler
- * would have been empty after the processor removal and there was at least
- * one non-idle task that used this scheduler as its home scheduler.
+ * @retval ::RTEMS_RESOURCE_IN_USE The processor was required by at least one
+ * non-idle task that used the scheduler as its home scheduler.
+ *
+ * @retval ::RTEMS_RESOURCE_IN_USE The processor was the last processor owned
+ * by the scheduler and there was at least one task that used the scheduler
+ * as a helping scheduler.
*
* @par Notes
* Removing a processor from a scheduler is a complex operation that involves
@@ -1267,7 +1270,8 @@ rtems_status_code rtems_task_construct(
* This directive obtains a task identifier associated with the task name
* specified in ``name``.
*
- * A task may obtain its own identifier by specifying #RTEMS_SELF for the name.
+ * A task may obtain its own identifier by specifying #RTEMS_WHO_AM_I for the
+ * name.
*
* The node to search is specified in ``node``. It shall be
*
diff --git a/cpukit/include/rtems/score/chainimpl.h b/cpukit/include/rtems/score/chainimpl.h
index 234dd1d74e..6aaa89237d 100644
--- a/cpukit/include/rtems/score/chainimpl.h
+++ b/cpukit/include/rtems/score/chainimpl.h
@@ -826,7 +826,8 @@ RTEMS_INLINE_ROUTINE bool _Chain_Get_with_empty_check_unprotected(
* @retval false Otherwise.
*/
typedef bool ( *Chain_Node_order )(
- const void *left,
+ const void *key,
+ const Chain_Node *left,
const Chain_Node *right
);
@@ -848,18 +849,20 @@ typedef bool ( *Chain_Node_order )(
RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(
Chain_Control *the_chain,
Chain_Node *to_insert,
- const void *left,
+ const void *key,
Chain_Node_order order
)
{
const Chain_Node *tail = _Chain_Immutable_tail( the_chain );
+ Chain_Node *previous = _Chain_Head( the_chain );
Chain_Node *next = _Chain_First( the_chain );
- while ( next != tail && !( *order )( left, next ) ) {
+ while ( next != tail && !( *order )( key, to_insert, next ) ) {
+ previous = next;
next = _Chain_Next( next );
}
- _Chain_Insert_unprotected( _Chain_Previous( next ), to_insert );
+ _Chain_Insert_unprotected( previous, to_insert );
}
/**
diff --git a/cpukit/include/rtems/score/coremuteximpl.h b/cpukit/include/rtems/score/coremuteximpl.h
index 757efbde9b..5114f33e0d 100644
--- a/cpukit/include/rtems/score/coremuteximpl.h
+++ b/cpukit/include/rtems/score/coremuteximpl.h
@@ -357,12 +357,10 @@ _CORE_ceiling_mutex_Get_scheduler(
*
* @param[out] the_mutex The ceiling mutex to set the priority of.
* @param priority_ceiling The new priority ceiling of the mutex.
- * @param queue_context The thread queue context.
*/
RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
CORE_ceiling_mutex_Control *the_mutex,
- Priority_Control priority_ceiling,
- Thread_queue_Context *queue_context
+ Priority_Control priority_ceiling
)
{
Thread_Control *owner;
@@ -370,15 +368,19 @@ RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Set_priority(
owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
if ( owner != NULL ) {
- _Thread_Wait_acquire( owner, queue_context );
+ Thread_queue_Context queue_context;
+
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_clear_priority_updates( &queue_context );
+ _Thread_Wait_acquire_critical( owner, &queue_context );
_Thread_Priority_change(
owner,
&the_mutex->Priority_ceiling,
priority_ceiling,
PRIORITY_GROUP_LAST,
- queue_context
+ &queue_context
);
- _Thread_Wait_release( owner, queue_context );
+ _Thread_Wait_release_critical( owner, &queue_context );
} else {
the_mutex->Priority_ceiling.priority = priority_ceiling;
}
@@ -475,19 +477,19 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Seize(
_CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
- owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
-
- if ( owner == NULL ) {
#if defined(RTEMS_SMP)
- if (
- _Thread_Scheduler_get_home( executing )
- != _CORE_ceiling_mutex_Get_scheduler( the_mutex )
- ) {
- _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
- return STATUS_NOT_DEFINED;
- }
+ if (
+ _Thread_Scheduler_get_home( executing )
+ != _CORE_ceiling_mutex_Get_scheduler( the_mutex )
+ ) {
+ _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
+ return STATUS_NOT_DEFINED;
+ }
#endif
+ owner = _CORE_mutex_Get_owner( &the_mutex->Recursive.Mutex );
+
+ if ( owner == NULL ) {
_Thread_queue_Context_clear_priority_updates( queue_context );
return _CORE_ceiling_mutex_Set_owner(
the_mutex,
diff --git a/cpukit/include/rtems/score/exception.h b/cpukit/include/rtems/score/exception.h
new file mode 100644
index 0000000000..979a5aae7e
--- /dev/null
+++ b/cpukit/include/rtems/score/exception.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreException
+ *
+ * @brief This header file provides the interfaces for mapping
+ * exceptions to signsls.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_EXCEPTION_H
+#define _RTEMS_EXCEPTION_H
+
+#include <rtems/fatal.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup RTEMSScoreException Exception Mapping Interfaces
+ *
+ * @ingroup RTEMSScore
+ *
+ * @brief This group contains the interfaces for mapping machine exceptions to
+ * signals using the fatal error handler.
+ */
+
+/**
+ * @brief Handle an exception frame for the purpose of mapping signals
+ *
+ * See CONFIGURE_EXCEPTION_TO_SIGNAL_MAPPING documentation in the
+ * "RTEMS Classic API Guide".
+ */
+void _Exception_Raise_signal(
+ Internal_errors_Source source,
+ bool always_set_to_false,
+ Internal_errors_t code
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_EXCEPTION_H */
diff --git a/cpukit/include/rtems/score/mrspimpl.h b/cpukit/include/rtems/score/mrspimpl.h
index 3e64ad94e6..daa309e7cb 100644
--- a/cpukit/include/rtems/score/mrspimpl.h
+++ b/cpukit/include/rtems/score/mrspimpl.h
@@ -268,7 +268,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
_MRSP_Set_owner( mrsp, executing );
cpu_self = _Thread_queue_Dispatch_disable( queue_context );
_MRSP_Release( mrsp, queue_context );
- _Thread_Priority_and_sticky_update( executing, 1 );
+ _Thread_Priority_update_and_make_sticky( executing );
_Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
@@ -384,13 +384,6 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
_MRSP_Replace_priority( mrsp, executing, &ceiling_priority );
} else {
Per_CPU_Control *cpu_self;
- int sticky_level_change;
-
- if ( status != STATUS_DEADLOCK ) {
- sticky_level_change = -1;
- } else {
- sticky_level_change = 0;
- }
_ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
_MRSP_Remove_priority( executing, &ceiling_priority, queue_context );
@@ -398,7 +391,13 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
&queue_context->Lock_context.Lock_context
);
_ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
- _Thread_Priority_and_sticky_update( executing, sticky_level_change );
+
+ if ( status != STATUS_DEADLOCK ) {
+ _Thread_Priority_update_and_clean_sticky( executing );
+ } else {
+ _Thread_Priority_update_ignore_sticky( executing );
+ }
+
_Thread_Dispatch_enable( cpu_self );
}
@@ -493,7 +492,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
&queue_context->Lock_context.Lock_context
);
_MRSP_Release( mrsp, queue_context );
- _Thread_Priority_and_sticky_update( executing, -1 );
+ _Thread_Priority_update_and_clean_sticky( executing );
_Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
diff --git a/cpukit/include/rtems/score/objectdata.h b/cpukit/include/rtems/score/objectdata.h
index 149498df9c..c7fb33ca5b 100644
--- a/cpukit/include/rtems/score/objectdata.h
+++ b/cpukit/include/rtems/score/objectdata.h
@@ -286,7 +286,7 @@ struct Objects_Information {
#if defined(RTEMS_MULTIPROCESSING)
/**
- * @brief This method is used by _Thread_queue_Extract_with_proxy().
+ * @brief This method is used by _Thread_MP_Extract_proxy().
*
* This member is statically initialized and read-only.
*/
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 6081653a86..0794f15f69 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -38,18 +38,31 @@
extern "C" {
#endif
-#if defined(RTEMS_SMP)
- #if defined(RTEMS_PROFILING)
- #define PER_CPU_CONTROL_SIZE_APPROX \
- ( 512 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
- #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4
- #define PER_CPU_CONTROL_SIZE_APPROX \
- ( 256 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
+#if defined( RTEMS_SMP )
+ #if defined( RTEMS_PROFILING )
+ #define PER_CPU_CONTROL_SIZE_PROFILING 332
+ #else
+ #define PER_CPU_CONTROL_SIZE_PROFILING 0
+ #endif
+
+ #if defined( RTEMS_DEBUG )
+ #define PER_CPU_CONTROL_SIZE_DEBUG 76
#else
- #define PER_CPU_CONTROL_SIZE_APPROX \
- ( 180 + CPU_PER_CPU_CONTROL_SIZE + CPU_INTERRUPT_FRAME_SIZE )
+ #define PER_CPU_CONTROL_SIZE_DEBUG 0
#endif
+ #if CPU_SIZEOF_POINTER > 4
+ #define PER_CPU_CONTROL_SIZE_BIG_POINTER 76
+ #else
+ #define PER_CPU_CONTROL_SIZE_BIG_POINTER 0
+ #endif
+
+ #define PER_CPU_CONTROL_SIZE_BASE 180
+ #define PER_CPU_CONTROL_SIZE_APPROX \
+ ( PER_CPU_CONTROL_SIZE_BASE + CPU_PER_CPU_CONTROL_SIZE + \
+ CPU_INTERRUPT_FRAME_SIZE + PER_CPU_CONTROL_SIZE_PROFILING + \
+ PER_CPU_CONTROL_SIZE_DEBUG + PER_CPU_CONTROL_SIZE_BIG_POINTER )
+
/*
* This ensures that on SMP configurations the individual per-CPU controls
* are on different cache lines to prevent false sharing. This define can be
diff --git a/cpukit/include/rtems/score/rbtreeimpl.h b/cpukit/include/rtems/score/rbtreeimpl.h
index 597c24d771..0867240d59 100644
--- a/cpukit/include/rtems/score/rbtreeimpl.h
+++ b/cpukit/include/rtems/score/rbtreeimpl.h
@@ -31,6 +31,32 @@ extern "C" {
*/
/**
+ * @brief Appends the node to the red-black tree.
+ *
+ * The appended node is the new maximum node of the tree. The caller shall
+ * ensure that the appended node is indeed the maximum node with respect to the
+ * tree order.
+ *
+ * @param[in, out] the_rbtree is the red-black tree control.
+ *
+ * @param the_node[out] is the node to append.
+ */
+void _RBTree_Append( RBTree_Control *the_rbtree, RBTree_Node *the_node );
+
+/**
+ * @brief Prepends the node to the red-black tree.
+ *
+ * The prepended node is the new minimum node of the tree. The caller shall
+ * ensure that the prepended node is indeed the minimum node with respect to the
+ * tree order.
+ *
+ * @param[in, out] the_rbtree is the red-black tree control.
+ *
+ * @param the_node[out] is the node to prepend.
+ */
+void _RBTree_Prepend( RBTree_Control *the_rbtree, RBTree_Node *the_node );
+
+/**
* @brief Red-black tree visitor.
*
* @param[in] node The node.
diff --git a/cpukit/include/rtems/score/scheduler.h b/cpukit/include/rtems/score/scheduler.h
index df9477fbeb..95b4414bea 100644
--- a/cpukit/include/rtems/score/scheduler.h
+++ b/cpukit/include/rtems/score/scheduler.h
@@ -135,6 +135,61 @@ typedef struct {
);
/**
+ * @brief Makes the node sticky.
+ *
+ * This operation is used by _Thread_Priority_update_and_make_sticky(). It
+ * is only called for the scheduler node of the home scheduler.
+ *
+ * Uniprocessor schedulers schould provide
+ * _Scheduler_default_Sticky_do_nothing() for this operation.
+ *
+ * SMP schedulers should provide this operation using
+ * _Scheduler_SMP_Make_sticky().
+ *
+ * The make and clean sticky operations are an optimization to simplify the
+ * control flow in the update priority operation. The update priority
+ * operation is used for all scheduler nodes and not just the scheduler node
+ * of home schedulers. The update priority operation is a commonly used
+ * operations together with block and unblock. The make and clean sticky
+ * operations are used only in specific scenarios.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+ void ( *make_sticky )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
+ * @brief Cleans the sticky property from the node.
+ *
+ * This operation is used by _Thread_Priority_update_and_clean_sticky(). It
+ * is only called for the scheduler node of the home scheduler.
+ *
+ * Uniprocessor schedulers schould provide
+ * _Scheduler_default_Sticky_do_nothing() for this operation.
+ *
+ * SMP schedulers should provide this operation using
+ * _Scheduler_SMP_Clean_sticky().
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+ void ( *clean_sticky )(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
* @brief Pin thread operation.
*
* @param[in] scheduler The scheduler instance of the specified processor.
@@ -217,9 +272,6 @@ typedef struct {
Thread_queue_Context *
);
- /** @see _Scheduler_Tick() */
- void ( *tick )( const Scheduler_Control *, Thread_Control * );
-
/** @see _Scheduler_Start_idle() */
void ( *start_idle )(
const Scheduler_Control *,
@@ -403,6 +455,24 @@ Priority_Control _Scheduler_default_Unmap_priority(
/**
* @brief Does nothing.
*
+ * This default implementation for the make and clean sticky operations
+ * should be used by uniprocessor schedulers if SMP support is enabled.
+ *
+ * @param scheduler is an unused parameter.
+ *
+ * @param the_thread is an unused parameter.
+ *
+ * @param node is an unused parameter.
+ */
+ void _Scheduler_default_Sticky_do_nothing(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+ );
+
+ /**
+ * @brief Does nothing.
+ *
* This default implementation for the thread pin or unpin operations should
* be used by uniprocessor schedulers if SMP support is enabled.
*
@@ -462,6 +532,8 @@ Priority_Control _Scheduler_default_Unmap_priority(
NULL, \
NULL, \
NULL, \
+ _Scheduler_default_Sticky_do_nothing, \
+ _Scheduler_default_Sticky_do_nothing, \
_Scheduler_default_Pin_or_unpin_do_nothing, \
_Scheduler_default_Pin_or_unpin_do_nothing, \
NULL, \
@@ -544,20 +616,6 @@ void _Scheduler_default_Cancel_job(
);
/**
- * @brief Performs tick operations depending on the CPU budget algorithm for
- * each executing thread.
- *
- * This routine is invoked as part of processing each clock tick.
- *
- * @param scheduler The scheduler.
- * @param[in, out] executing An executing thread.
- */
-void _Scheduler_default_Tick(
- const Scheduler_Control *scheduler,
- Thread_Control *executing
-);
-
-/**
* @brief Starts an idle thread.
*
* @param scheduler This parameter is unused.
diff --git a/cpukit/include/rtems/score/schedulercbs.h b/cpukit/include/rtems/score/schedulercbs.h
index 4b7efc8340..8a9a49ccd9 100644
--- a/cpukit/include/rtems/score/schedulercbs.h
+++ b/cpukit/include/rtems/score/schedulercbs.h
@@ -67,7 +67,6 @@ extern "C" {
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_CBS_Release_job, /* new period of task */ \
_Scheduler_CBS_Cancel_job, /* cancel period of task */ \
- _Scheduler_default_Tick, /* tick entry point */ \
_Scheduler_default_Start_idle /* start idle entry point */ \
SCHEDULER_DEFAULT_SET_AFFINITY_OPERATION \
}
@@ -394,15 +393,9 @@ int _Scheduler_CBS_Set_parameters (
);
/**
- * @brief Invoked when a limited time quantum is exceeded.
- *
- * This routine is invoked when a limited time quantum is exceeded.
- *
- * @param the_thread The thread that exceeded a limited time quantum.
+ * @brief These are the CBS CPU budget operations.
*/
-void _Scheduler_CBS_Budget_callout(
- Thread_Control *the_thread
-);
+extern const Thread_CPU_budget_operations _Scheduler_CBS_Budget;
/**
* @brief Initializes a CBS specific scheduler node of @a the_thread.
diff --git a/cpukit/include/rtems/score/scheduleredf.h b/cpukit/include/rtems/score/scheduleredf.h
index 9e643b93eb..258563217f 100644
--- a/cpukit/include/rtems/score/scheduleredf.h
+++ b/cpukit/include/rtems/score/scheduleredf.h
@@ -66,7 +66,6 @@ extern "C" {
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_EDF_Release_job, /* new period of task */ \
_Scheduler_EDF_Cancel_job, /* cancel period of task */ \
- _Scheduler_default_Tick, /* tick entry point */ \
_Scheduler_default_Start_idle /* start idle entry point */ \
SCHEDULER_DEFAULT_SET_AFFINITY_OPERATION \
}
diff --git a/cpukit/include/rtems/score/scheduleredfsmp.h b/cpukit/include/rtems/score/scheduleredfsmp.h
index 1841aa4a7b..75865e5a6e 100644
--- a/cpukit/include/rtems/score/scheduleredfsmp.h
+++ b/cpukit/include/rtems/score/scheduleredfsmp.h
@@ -79,9 +79,18 @@ typedef struct {
RBTree_Control Queue;
/**
- * @brief The scheduled thread of the corresponding processor.
+ * @brief If this member is not NULL, then it references the scheduled thread
+ * affine only to the corresponding processor, otherwise the processor is
+ * allocated to a thread which may execute on any of the processors owned
+ * by the scheduler.
*/
- Scheduler_EDF_SMP_Node *scheduled;
+ Scheduler_EDF_SMP_Node *affine_scheduled;
+
+ /**
+ * @brief This member references the thread allocated to the corresponding
+ * processor.
+ */
+ Scheduler_EDF_SMP_Node *allocated;
} Scheduler_EDF_SMP_Ready_queue;
typedef struct {
@@ -120,6 +129,8 @@ typedef struct {
_Scheduler_EDF_SMP_Ask_for_help, \
_Scheduler_EDF_SMP_Reconsider_help_request, \
_Scheduler_EDF_SMP_Withdraw_node, \
+ _Scheduler_EDF_SMP_Make_sticky, \
+ _Scheduler_EDF_SMP_Clean_sticky, \
_Scheduler_EDF_SMP_Pin, \
_Scheduler_EDF_SMP_Unpin, \
_Scheduler_EDF_SMP_Add_processor, \
@@ -128,7 +139,6 @@ typedef struct {
_Scheduler_default_Node_destroy, \
_Scheduler_EDF_Release_job, \
_Scheduler_EDF_Cancel_job, \
- _Scheduler_default_Tick, \
_Scheduler_EDF_SMP_Start_idle, \
_Scheduler_EDF_SMP_Set_affinity \
}
@@ -241,6 +251,36 @@ void _Scheduler_EDF_SMP_Withdraw_node(
);
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+void _Scheduler_EDF_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+void _Scheduler_EDF_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
* @brief Pin thread operation.
*
* @param scheduler The scheduler instance of the specified processor.
diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h
index 98f8e337fd..dda1b4ee6e 100644
--- a/cpukit/include/rtems/score/schedulerimpl.h
+++ b/cpukit/include/rtems/score/schedulerimpl.h
@@ -174,30 +174,6 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Is_non_preempt_mode_supported(
}
#endif
-#if defined(RTEMS_SMP)
-void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
-
-/**
- * @brief Registers an ask for help request if necessary.
- *
- * The actual ask for help operation is carried out during
- * _Thread_Do_dispatch() on a processor related to the thread. This yields a
- * better separation of scheduler instances. A thread of one scheduler
- * instance should not be forced to carry out too much work for threads on
- * other scheduler instances.
- *
- * @param the_thread The thread in need for help.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
-{
- _Assert( _Thread_State_is_owner( the_thread ) );
-
- if ( the_thread->Scheduler.helping_nodes > 0 ) {
- _Scheduler_Request_ask_for_help( the_thread );
- }
-}
-#endif
-
/**
* The preferred method to add a new scheduler is to define the jump table
* entries and add a case to the _Scheduler_Initialize routine.
@@ -405,65 +381,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread
#endif
}
-#if defined(RTEMS_SMP)
-/**
- * @brief Changes the sticky level of the home scheduler node and propagates a
- * priority change of a thread to the scheduler.
- *
- * @param the_thread The thread changing its priority or sticky level.
- *
- * @see _Scheduler_Update_priority().
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
- Thread_Control *the_thread,
- int sticky_level_change
-)
-{
- Chain_Node *node;
- const Chain_Node *tail;
- Scheduler_Node *scheduler_node;
- const Scheduler_Control *scheduler;
- ISR_lock_Context lock_context;
-
- _Thread_Scheduler_process_requests( the_thread );
-
- node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
- scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
- scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
-
- _Scheduler_Acquire_critical( scheduler, &lock_context );
-
- scheduler_node->sticky_level += sticky_level_change;
- _Assert( scheduler_node->sticky_level >= 0 );
-
- ( *scheduler->Operations.update_priority )(
- scheduler,
- the_thread,
- scheduler_node
- );
-
- _Scheduler_Release_critical( scheduler, &lock_context );
-
- tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
- node = _Chain_Next( node );
-
- while ( node != tail ) {
- scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
- scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
-
- _Scheduler_Acquire_critical( scheduler, &lock_context );
- ( *scheduler->Operations.update_priority )(
- scheduler,
- the_thread,
- scheduler_node
- );
- _Scheduler_Release_critical( scheduler, &lock_context );
-
- node = _Chain_Next( node );
- }
-}
-#endif
-
/**
* @brief Maps a thread priority from the user domain to the scheduler domain.
*
@@ -600,44 +517,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
}
/**
- * @brief Scheduler method invoked at each clock tick.
- *
- * This method is invoked at each clock tick to allow the scheduler
- * implementation to perform any activities required. For the
- * scheduler which support standard RTEMS features, this includes
- * time-slicing management.
- *
- * @param cpu The cpu control for the operation.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
-{
- const Scheduler_Control *scheduler;
- Thread_Control *executing;
-
- scheduler = _Scheduler_Get_by_CPU( cpu );
-
-#if defined(RTEMS_SMP)
- if ( scheduler == NULL ) {
- /*
- * In SMP configurations, processors may be removed/added at runtime
- * from/to a scheduler. There may be still clock interrupts on currently
- * unassigned processors.
- */
- return;
- }
-#endif
-
- /*
- * Each online processor has at least an idle thread as the executing thread
- * even in case it has currently no scheduler assigned. Clock interrupts on
- * processors which are not online would be a severe bug of the Clock Driver.
- */
- executing = _Per_CPU_Get_executing( cpu );
- _Assert( executing != NULL );
- ( *scheduler->Operations.tick )( scheduler, executing );
-}
-
-/**
* @brief Starts the idle thread for a particular processor.
*
* @param scheduler The scheduler instance.
@@ -799,7 +678,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
/* TODO: flash critical section? */
- if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
+ if ( _Thread_Is_heir( the_thread ) ) {
( *schedule )( scheduler, the_thread, true );
}
}
@@ -895,26 +774,26 @@ RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
#if defined(RTEMS_SMP)
/**
- * @brief Gets an idle thread from the scheduler instance.
+ * @brief Gets a scheduler node which is owned by an unused idle thread.
*
- * @param context The scheduler instance context.
+ * @param arg is the handler argument.
*
- * @return idle An idle thread for use. This function must always return an
- * idle thread. If none is available, then this is a fatal error.
+ * @return Returns a scheduler node owned by an idle thread for use. This
+ * handler must always return a node. If none is available, then this is a
+ * fatal error.
*/
-typedef Thread_Control *( *Scheduler_Get_idle_thread )(
- Scheduler_Context *context
-);
+typedef Scheduler_Node *( *Scheduler_Get_idle_node )( void *arg );
/**
- * @brief Releases an idle thread to the scheduler instance for reuse.
+ * @brief Releases the scheduler node which is owned by an idle thread.
+ *
+ * @param node is the node to release.
*
- * @param context The scheduler instance context.
- * @param idle The idle thread to release.
+ * @param arg is the handler argument.
*/
-typedef void ( *Scheduler_Release_idle_thread )(
- Scheduler_Context *context,
- Thread_Control *idle
+typedef void ( *Scheduler_Release_idle_node )(
+ Scheduler_Node *node,
+ void *arg
);
/**
@@ -938,319 +817,119 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
}
/**
- * @brief Sets the scheduler node's idle thread.
+ * @brief Uses an idle thread for the scheduler node.
*
- * @param[in, out] node The node to receive an idle thread.
- * @param idle The idle thread control for the operation.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
- Scheduler_Node *node,
- Thread_Control *idle
-)
-{
- _Assert( _Scheduler_Node_get_idle( node ) == NULL );
- _Assert(
- _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
- );
-
- _Scheduler_Node_set_user( node, idle );
- node->idle = idle;
-}
-
-/**
- * @brief Uses an idle thread for this scheduler node.
+ * @param[in, out] node is the node which wants to use an idle thread.
*
- * A thread whose home scheduler node has a sticky level greater than zero may
- * use an idle thread in the home scheduler instance in the case it executes
- * currently in another scheduler instance or in the case it is in a blocking
- * state.
+ * @param get_idle_node is the get idle node handler.
*
- * @param context The scheduler instance context.
- * @param[in, out] node The node which wants to use the idle thread.
- * @param cpu The processor for the idle thread.
- * @param get_idle_thread Function to get an idle thread.
+ * @param arg is the handler argument.
*/
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Per_CPU_Control *cpu,
- Scheduler_Get_idle_thread get_idle_thread
+ Scheduler_Node *node,
+ Scheduler_Get_idle_node get_idle_node,
+ void *arg
)
{
- Thread_Control *idle = ( *get_idle_thread )( context );
+ Scheduler_Node *idle_node;
+ Thread_Control *idle;
+
+ idle_node = ( *get_idle_node )( arg );
+ idle = _Scheduler_Node_get_owner( idle_node );
+ _Assert( idle->is_idle );
+ _Scheduler_Node_set_idle_user( node, idle );
- _Scheduler_Set_idle_thread( node, idle );
- _Thread_Set_CPU( idle, cpu );
return idle;
}
/**
- * @brief This enumeration defines what a scheduler should do with a node which
- * could be scheduled.
- */
-typedef enum {
- SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
- SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
- SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
-} Scheduler_Try_to_schedule_action;
-
-/**
- * @brief Tries to schedule the scheduler node.
- *
- * When a scheduler needs to schedule a node, it shall use this function to
- * determine what it shall do with the node. The node replaces a victim node if
- * it can be scheduled.
- *
- * This function uses the state of the node and the scheduler state of the owner
- * thread to determine what shall be done. Each scheduler maintains its nodes
- * independent of other schedulers. This function ensures that a thread is
- * scheduled by at most one scheduler. If a node requires an executing thread
- * due to some locking protocol and the owner thread is already scheduled by
- * another scheduler, then an idle thread shall be attached to the node.
- *
- * @param[in, out] context is the scheduler context.
- * @param[in, out] node is the node which could be scheduled.
- * @param idle is an idle thread used by the victim node or NULL.
- * @param get_idle_thread points to a function to get an idle thread.
- *
- * @retval SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE The node shall be scheduled.
- *
- * @retval SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE The node shall be
- * scheduled and the provided idle thread shall be attached to the node. This
- * action is returned, if the node cannot use the owner thread and shall use
- * an idle thread instead. In this case, the idle thread is provided by the
- * victim node.
- *
- * @retval SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK The node shall be blocked. This
- * action is returned, if the owner thread is already scheduled by another
- * scheduler.
+ * @brief Releases the idle thread used by the scheduler node.
+ *
+ * @param[in, out] node is the node which wants to release the idle thread.
+ *
+ * @param idle is the idle thread to release.
+ *
+ * @param release_idle_node is the release idle node handler.
+ *
+ * @param arg is the handler argument.
*/
-RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
-_Scheduler_Try_to_schedule_node(
- Scheduler_Context *context,
- Scheduler_Node *node,
- const Thread_Control *idle,
- Scheduler_Get_idle_thread get_idle_thread
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_idle_thread(
+ Scheduler_Node *node,
+ const Thread_Control *idle,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
- ISR_lock_Context lock_context;
- Scheduler_Try_to_schedule_action action;
- Thread_Control *owner;
+ Thread_Control *owner;
+ Scheduler_Node *idle_node;
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
owner = _Scheduler_Node_get_owner( node );
- _Assert( _Scheduler_Node_get_user( node ) == owner );
- _Assert( _Scheduler_Node_get_idle( node ) == NULL );
-
- _Thread_Scheduler_acquire_critical( owner, &lock_context );
-
- if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
- _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
- _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
- } else if (
- owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
- && node->sticky_level <= 1
- ) {
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
- } else if ( node->sticky_level == 0 ) {
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
- } else if ( idle != NULL ) {
- action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
- } else {
- _Scheduler_Use_idle_thread(
- context,
- node,
- _Thread_Get_CPU( owner ),
- get_idle_thread
- );
- }
-
- _Thread_Scheduler_release_critical( owner, &lock_context );
- return action;
+ _Assert( _Scheduler_Node_get_user( node ) == idle );
+ _Scheduler_Node_set_user( node, owner );
+ node->idle = NULL;
+ idle_node = _Thread_Scheduler_get_home_node( idle );
+ ( *release_idle_node )( idle_node, arg );
}
/**
- * @brief Releases an idle thread using this scheduler node.
+ * @brief Releases the idle thread used by the scheduler node if the node uses
+ * an idle thread.
+ *
+ * @param[in, out] node is the node which wants to release the idle thread.
+ *
+ * @param release_idle_node is the release idle node handler.
+ *
+ * @param arg is the handler argument.
*
- * @param context The scheduler instance context.
- * @param[in, out] node The node which may have an idle thread as user.
- * @param release_idle_thread Function to release an idle thread.
+ * @retval NULL The scheduler node did not use an idle thread.
*
- * @retval idle The idle thread which used this node.
- * @retval NULL This node had no idle thread as an user.
+ * @return Returns the idle thread used by the scheduler node.
*/
-RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Scheduler_Release_idle_thread release_idle_thread
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
+ Scheduler_Node *node,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
- Thread_Control *idle = _Scheduler_Node_get_idle( node );
+ Thread_Control *idle;
- if ( idle != NULL ) {
- Thread_Control *owner = _Scheduler_Node_get_owner( node );
+ idle = _Scheduler_Node_get_idle( node );
- node->idle = NULL;
- _Scheduler_Node_set_user( node, owner );
- ( *release_idle_thread )( context, idle );
+ if ( idle != NULL ) {
+ _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
}
return idle;
}
/**
- * @brief Exchanges an idle thread from the scheduler node that uses it
- * right now to another scheduler node.
+ * @brief Discards the idle thread used by the scheduler node.
*
- * @param needs_idle The scheduler node that needs an idle thread.
- * @param uses_idle The scheduler node that used the idle thread.
- * @param idle The idle thread that is exchanged.
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
- Scheduler_Node *needs_idle,
- Scheduler_Node *uses_idle,
- Thread_Control *idle
-)
-{
- uses_idle->idle = NULL;
- _Scheduler_Node_set_user(
- uses_idle,
- _Scheduler_Node_get_owner( uses_idle )
- );
- _Scheduler_Set_idle_thread( needs_idle, idle );
-}
-
-/**
- * @brief Blocks this scheduler node.
- *
- * @param context The scheduler instance context.
- * @param[in, out] thread The thread which wants to get blocked referencing this
- * node. This is not necessarily the user of this node in case the node
- * participates in the scheduler helping protocol.
- * @param[in, out] node The node which wants to get blocked.
- * @param is_scheduled This node is scheduled.
- * @param get_idle_thread Function to get an idle thread.
- *
- * @retval thread_cpu The processor of the thread. Indicates to continue with
- * the blocking operation.
- * @retval NULL Otherwise.
- */
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- bool is_scheduled,
- Scheduler_Get_idle_thread get_idle_thread
-)
-{
- int sticky_level;
- ISR_lock_Context lock_context;
- Per_CPU_Control *thread_cpu;
-
- sticky_level = node->sticky_level;
- --sticky_level;
- node->sticky_level = sticky_level;
- _Assert( sticky_level >= 0 );
-
- _Thread_Scheduler_acquire_critical( thread, &lock_context );
- thread_cpu = _Thread_Get_CPU( thread );
- _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
- _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
- _Thread_Scheduler_release_critical( thread, &lock_context );
-
- if ( sticky_level > 0 ) {
- if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
- Thread_Control *idle;
-
- idle = _Scheduler_Use_idle_thread(
- context,
- node,
- thread_cpu,
- get_idle_thread
- );
- _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
- }
-
- return NULL;
- }
-
- _Assert( thread == _Scheduler_Node_get_user( node ) );
- return thread_cpu;
-}
-
-/**
- * @brief Discard the idle thread from the scheduler node.
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the node which wants to release the idle thread.
+ *
+ * @param release_idle_node is the release idle node handler.
*
- * @param context The scheduler context.
- * @param[in, out] the_thread The thread for the operation.
- * @param[in, out] node The scheduler node to discard the idle thread from.
- * @param release_idle_thread Method to release the idle thread from the context.
+ * @param arg is the handler argument.
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
- Scheduler_Context *context,
- Thread_Control *the_thread,
- Scheduler_Node *node,
- Scheduler_Release_idle_thread release_idle_thread
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_Release_idle_node release_idle_node,
+ void *arg
)
{
Thread_Control *idle;
- Thread_Control *owner;
Per_CPU_Control *cpu;
idle = _Scheduler_Node_get_idle( node );
- owner = _Scheduler_Node_get_owner( node );
-
- node->idle = NULL;
- _Assert( _Scheduler_Node_get_user( node ) == idle );
- _Scheduler_Node_set_user( node, owner );
- ( *release_idle_thread )( context, idle );
+ _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
cpu = _Thread_Get_CPU( idle );
_Thread_Set_CPU( the_thread, cpu );
_Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
}
-
-/**
- * @brief Unblocks this scheduler node.
- *
- * @param context The scheduler instance context.
- * @param[in, out] the_thread The thread which wants to get unblocked.
- * @param[in, out] node The node which wants to get unblocked.
- * @param is_scheduled This node is scheduled.
- * @param release_idle_thread Function to release an idle thread.
- *
- * @retval true Continue with the unblocking operation.
- * @retval false Do not continue with the unblocking operation.
- */
-RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
- Scheduler_Context *context,
- Thread_Control *the_thread,
- Scheduler_Node *node,
- bool is_scheduled,
- Scheduler_Release_idle_thread release_idle_thread
-)
-{
- bool unblock;
-
- ++node->sticky_level;
- _Assert( node->sticky_level > 0 );
-
- if ( is_scheduled ) {
- _Scheduler_Discard_idle_thread(
- context,
- the_thread,
- node,
- release_idle_thread
- );
- _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
- unblock = false;
- } else {
- _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
- unblock = true;
- }
-
- return unblock;
-}
#endif
/**
@@ -1270,8 +949,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
#if defined(RTEMS_SMP)
/*
- * We need this state only for _Thread_Get_CPU_time_used(). Cannot use
- * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
+ * We need this state only for _Thread_Get_CPU_time_used_locked(). Cannot
+ * use _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
* THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
* schedulers.
*/
diff --git a/cpukit/include/rtems/score/schedulernodeimpl.h b/cpukit/include/rtems/score/schedulernodeimpl.h
index e222de953b..ecdc3c6d62 100644
--- a/cpukit/include/rtems/score/schedulernodeimpl.h
+++ b/cpukit/include/rtems/score/schedulernodeimpl.h
@@ -264,6 +264,27 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
{
return node->idle;
}
+
+/**
+ * @brief Sets the scheduler node's user to the idle thread.
+ *
+ * @param[in, out] node is the node to receive an idle thread.
+ *
+ * @param idle is the idle thread to use.
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_idle_user(
+ Scheduler_Node *node,
+ Thread_Control *idle
+)
+{
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+ _Assert(
+ _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
+ );
+
+ _Scheduler_Node_set_user( node, idle );
+ node->idle = idle;
+}
#endif
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/schedulerpriority.h b/cpukit/include/rtems/score/schedulerpriority.h
index e0991f5e31..1ddbb4a8cc 100644
--- a/cpukit/include/rtems/score/schedulerpriority.h
+++ b/cpukit/include/rtems/score/schedulerpriority.h
@@ -57,7 +57,6 @@ extern "C" {
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_default_Release_job, /* new period of task */ \
_Scheduler_default_Cancel_job, /* cancel period of task */ \
- _Scheduler_default_Tick, /* tick entry point */ \
_Scheduler_default_Start_idle /* start idle entry point */ \
SCHEDULER_DEFAULT_SET_AFFINITY_OPERATION \
}
diff --git a/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h
index 1b660fa296..d77629b39d 100644
--- a/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h
+++ b/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h
@@ -65,6 +65,8 @@ extern "C" {
_Scheduler_priority_affinity_SMP_Ask_for_help, \
_Scheduler_priority_affinity_SMP_Reconsider_help_request, \
_Scheduler_priority_affinity_SMP_Withdraw_node, \
+ _Scheduler_priority_affinity_SMP_Make_sticky, \
+ _Scheduler_priority_affinity_SMP_Clean_sticky, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_priority_affinity_SMP_Add_processor, \
@@ -73,7 +75,6 @@ extern "C" {
_Scheduler_default_Node_destroy, \
_Scheduler_default_Release_job, \
_Scheduler_default_Cancel_job, \
- _Scheduler_default_Tick, \
_Scheduler_SMP_Start_idle, \
_Scheduler_priority_affinity_SMP_Set_affinity \
}
@@ -182,6 +183,36 @@ void _Scheduler_priority_affinity_SMP_Withdraw_node(
);
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+void _Scheduler_priority_affinity_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+void _Scheduler_priority_affinity_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
* @brief Adds @a idle to @a scheduler.
*
* @param[in, out] scheduler The scheduler instance to add the processor to.
diff --git a/cpukit/include/rtems/score/schedulerprioritysmp.h b/cpukit/include/rtems/score/schedulerprioritysmp.h
index 56f4aa5a5b..21916647bc 100644
--- a/cpukit/include/rtems/score/schedulerprioritysmp.h
+++ b/cpukit/include/rtems/score/schedulerprioritysmp.h
@@ -57,6 +57,7 @@ extern "C" {
*/
typedef struct {
Scheduler_SMP_Context Base;
+ Chain_Control *idle_ready_queue;
Priority_bit_map_Control Bit_map;
Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
} Scheduler_priority_SMP_Context;
@@ -93,6 +94,8 @@ typedef struct {
_Scheduler_priority_SMP_Ask_for_help, \
_Scheduler_priority_SMP_Reconsider_help_request, \
_Scheduler_priority_SMP_Withdraw_node, \
+ _Scheduler_priority_SMP_Make_sticky, \
+ _Scheduler_priority_SMP_Clean_sticky, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_priority_SMP_Add_processor, \
@@ -101,7 +104,6 @@ typedef struct {
_Scheduler_default_Node_destroy, \
_Scheduler_default_Release_job, \
_Scheduler_default_Cancel_job, \
- _Scheduler_default_Tick, \
_Scheduler_SMP_Start_idle \
SCHEDULER_DEFAULT_SET_AFFINITY_OPERATION \
}
@@ -216,6 +218,36 @@ void _Scheduler_priority_SMP_Withdraw_node(
);
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+void _Scheduler_priority_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+void _Scheduler_priority_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
* @brief Adds @a idle to @a scheduler.
*
* @param[in, out] scheduler The scheduler instance to add the processor to.
diff --git a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
index 6e14200840..8ffd8d01c1 100644
--- a/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
+++ b/cpukit/include/rtems/score/schedulerprioritysmpimpl.h
@@ -156,6 +156,41 @@ static inline void _Scheduler_priority_SMP_Extract_from_ready(
);
}
+static inline Scheduler_Node *_Scheduler_priority_SMP_Get_idle( void *arg )
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *lowest_ready;
+
+ self = _Scheduler_priority_SMP_Get_self( arg );
+ lowest_ready = (Scheduler_priority_SMP_Node *)
+ _Chain_Last( self->idle_ready_queue );
+ _Scheduler_priority_Ready_queue_extract(
+ &lowest_ready->Base.Base.Node.Chain,
+ &lowest_ready->Ready_queue,
+ &self->Bit_map
+ );
+
+ return &lowest_ready->Base.Base;
+}
+
+static inline void _Scheduler_priority_SMP_Release_idle(
+ Scheduler_Node *node_base,
+ void *arg
+)
+{
+ Scheduler_priority_SMP_Context *self;
+ Scheduler_priority_SMP_Node *node;
+
+ self = _Scheduler_priority_SMP_Get_self( arg );
+ node = _Scheduler_priority_SMP_Node_downcast( node_base );
+
+ _Scheduler_priority_Ready_queue_enqueue(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+}
+
static inline void _Scheduler_priority_SMP_Do_update(
Scheduler_Context *context,
Scheduler_Node *node_to_update,
diff --git a/cpukit/include/rtems/score/schedulersimple.h b/cpukit/include/rtems/score/schedulersimple.h
index 63310e9796..15471a6498 100644
--- a/cpukit/include/rtems/score/schedulersimple.h
+++ b/cpukit/include/rtems/score/schedulersimple.h
@@ -56,7 +56,6 @@ extern "C" {
_Scheduler_default_Node_destroy, /* node destroy entry point */ \
_Scheduler_default_Release_job, /* new period of task */ \
_Scheduler_default_Cancel_job, /* cancel period of task */ \
- _Scheduler_default_Tick, /* tick entry point */ \
_Scheduler_default_Start_idle /* start idle entry point */ \
SCHEDULER_DEFAULT_SET_AFFINITY_OPERATION \
}
diff --git a/cpukit/include/rtems/score/schedulersimpleimpl.h b/cpukit/include/rtems/score/schedulersimpleimpl.h
index 08ad7b8c66..9d762e058a 100644
--- a/cpukit/include/rtems/score/schedulersimpleimpl.h
+++ b/cpukit/include/rtems/score/schedulersimpleimpl.h
@@ -48,21 +48,26 @@ RTEMS_INLINE_ROUTINE Scheduler_simple_Context *
/**
* @brief Checks if the priority is less or equal than the priority of the node.
*
- * @param to_insert The priority to check whether it is less or equal than @a next.
- * @param next The Chain node to compare the priority of.
+ * @param key is the priority to compare.
+ *
+ * @param to_insert is the chain node to insert.
+ *
+ * @param next is the chain node to compare the priority of.
*
* @retval true @a to_insert is smaller or equal than the priority of @a next.
* @retval false @a to_insert is greater than the priority of @a next.
*/
RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Priority_less_equal(
- const void *to_insert,
+ const void *key,
+ const Chain_Node *to_insert,
const Chain_Node *next
)
{
const unsigned int *priority_to_insert;
const Thread_Control *thread_next;
- priority_to_insert = (const unsigned int *) to_insert;
+ (void) to_insert;
+ priority_to_insert = (const unsigned int *) key;
thread_next = (const Thread_Control *) next;
return *priority_to_insert <= _Thread_Get_priority( thread_next );
diff --git a/cpukit/include/rtems/score/schedulersimplesmp.h b/cpukit/include/rtems/score/schedulersimplesmp.h
index 961e60ae73..3b6f43869e 100644
--- a/cpukit/include/rtems/score/schedulersimplesmp.h
+++ b/cpukit/include/rtems/score/schedulersimplesmp.h
@@ -75,6 +75,8 @@ typedef struct {
_Scheduler_simple_SMP_Ask_for_help, \
_Scheduler_simple_SMP_Reconsider_help_request, \
_Scheduler_simple_SMP_Withdraw_node, \
+ _Scheduler_simple_SMP_Make_sticky, \
+ _Scheduler_simple_SMP_Clean_sticky, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_simple_SMP_Add_processor, \
@@ -83,7 +85,6 @@ typedef struct {
_Scheduler_default_Node_destroy, \
_Scheduler_default_Release_job, \
_Scheduler_default_Cancel_job, \
- _Scheduler_default_Tick, \
_Scheduler_SMP_Start_idle \
SCHEDULER_DEFAULT_SET_AFFINITY_OPERATION \
}
@@ -196,6 +197,36 @@ void _Scheduler_simple_SMP_Withdraw_node(
);
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+void _Scheduler_simple_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+void _Scheduler_simple_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
* @brief Adds @a idle to @a scheduler.
*
* @param[in, out] scheduler The scheduler instance to add the processor to.
diff --git a/cpukit/include/rtems/score/schedulersmp.h b/cpukit/include/rtems/score/schedulersmp.h
index 1d5294b4f0..fb4d6c46d2 100644
--- a/cpukit/include/rtems/score/schedulersmp.h
+++ b/cpukit/include/rtems/score/schedulersmp.h
@@ -55,15 +55,6 @@ typedef struct {
* @brief The chain of scheduled nodes.
*/
Chain_Control Scheduled;
-
- /**
- * @brief Chain of the available idle threads.
- *
- * Idle threads are used for the scheduler helping protocol. It is crucial
- * that the idle threads preserve their relative order. This is the case for
- * this priority based scheduler.
- */
- Chain_Control Idle_threads;
} Scheduler_SMP_Context;
/**
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index dbfc241b18..c37f53c8c0 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -8,7 +8,7 @@
*/
/*
- * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013, 2021 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -282,7 +282,11 @@ typedef bool ( *Scheduler_SMP_Has_ready )(
typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *filter
+);
+
+typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
+ Scheduler_Context *context
);
typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
@@ -330,11 +334,16 @@ typedef bool ( *Scheduler_SMP_Enqueue )(
Priority_Control priority
);
+typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_enqueue,
+ Priority_Control priority
+);
+
typedef void ( *Scheduler_SMP_Allocate_processor )(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
);
typedef void ( *Scheduler_SMP_Register_idle )(
@@ -364,21 +373,26 @@ static inline void _Scheduler_SMP_Do_nothing_register_idle(
/**
* @brief Checks if @a to_insert is less or equal than the priority of the chain node.
*
- * @param to_insert The priority to compare.
- * @param next The chain node to compare the priority of.
+ * @param key is the priority to compare.
+ *
+ * @param to_insert is the chain node to insert.
+ *
+ * @param next is the chain node to compare the priority of.
*
* @retval true @a to_insert is less or equal than the priority of @a next.
* @retval false @a to_insert is greater than the priority of @a next.
*/
static inline bool _Scheduler_SMP_Priority_less_equal(
- const void *to_insert,
+ const void *key,
+ const Chain_Node *to_insert,
const Chain_Node *next
)
{
const Priority_Control *priority_to_insert;
const Scheduler_SMP_Node *node_next;
- priority_to_insert = (const Priority_Control *) to_insert;
+ (void) to_insert;
+ priority_to_insert = (const Priority_Control *) key;
node_next = (const Scheduler_SMP_Node *) next;
return *priority_to_insert <= node_next->priority;
@@ -408,7 +422,6 @@ static inline void _Scheduler_SMP_Initialize(
)
{
_Chain_Initialize_empty( &self->Scheduled );
- _Chain_Initialize_empty( &self->Idle_threads );
}
/**
@@ -550,205 +563,300 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
}
/**
- * @brief Gets The first idle thread of the given context.
+ * @brief Removes the thread's ask for help request from the processor.
+ *
+ * The caller must be the owner of the thread's scheduler lock.
*
- * @param context The scheduler context to get the first idle thread from.
+ * @param[in, out] thread is the thread of the ask for help request.
*
- * @return The first idle thread of @a context.
+ * @param[in, out] cpu is the processor from which the ask for help request
+ * should be removed.
*/
-static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
- Scheduler_Context *context
-)
+void _Scheduler_SMP_Remove_ask_for_help_from_processor(
+ Thread_Control *thread,
+ Per_CPU_Control *cpu
+);
+
+/**
+ * @brief Cancels the thread's ask for help request.
+ *
+ * The caller must be the owner of the thread's scheduler lock.
+ *
+ * @param[in, out] thread is the thread of the ask help request.
+ */
+static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
- Thread_Control *idle = (Thread_Control *)
- _Chain_Get_first_unprotected( &self->Idle_threads );
+ Per_CPU_Control *cpu;
- _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
+ _Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
+ cpu = thread->Scheduler.ask_for_help_cpu;
- return idle;
+ if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
+ _Scheduler_SMP_Remove_ask_for_help_from_processor( thread, cpu );
+ }
}
/**
- * @brief Releases the thread and adds it to the idle threads.
+ * @brief Requests to ask for help for the thread.
*
- * @param[in, out] context The scheduler context instance.
- * @param idle The thread to add to the idle threads.
+ * The actual ask for help operations are carried out during
+ * _Thread_Do_dispatch() on the current processor.
+ *
+ * An alternative approach would be to carry out the requests on a processor
+ * related to the thread. This could reduce the overhead for the preempting
+ * thread a bit, however, there are at least two problems with this approach.
+ * Firstly, we have to figure out what is a processor related to the thread.
+ * Secondly, we may need an inter-processor interrupt.
+ *
+ * @param[in, out] thread is the thread in need for help.
*/
-static inline void _Scheduler_SMP_Release_idle_thread(
- Scheduler_Context *context,
- Thread_Control *idle
-)
+static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Per_CPU_Get();
- _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
+ _Assert( thread->Scheduler.ask_for_help_cpu == NULL );
+ thread->Scheduler.ask_for_help_cpu = cpu_self;
+ cpu_self->dispatch_necessary = true;
+
+ _Per_CPU_Acquire( cpu_self, &lock_context );
+ _Chain_Append_unprotected(
+ &cpu_self->Threads_in_need_for_help,
+ &thread->Scheduler.Help_node
+ );
+ _Per_CPU_Release( cpu_self, &lock_context );
}
/**
- * @brief Extracts the node of the idle thread.
+ * @brief This enumeration defines what a scheduler should do with a node which
+ * could be scheduled.
+ */
+typedef enum {
+ SCHEDULER_SMP_DO_SCHEDULE,
+ SCHEDULER_SMP_DO_NOT_SCHEDULE
+} Scheduler_SMP_Action;
+
+/**
+ * @brief Tries to schedule the scheduler node.
+ *
+ * When an SMP scheduler needs to schedule a node, it shall use this function
+ * to determine what it shall do with the node.
*
- * @param[in, out] idle The thread to extract the node of.
+ * This function uses the state of the node and the scheduler state of the
+ * owner thread to determine what shall be done. Each scheduler maintains its
+ * nodes independent of other schedulers. This function ensures that a thread
+ * is scheduled by at most one scheduler. If a node requires an executing
+ * thread due to some locking protocol and the owner thread is already
+ * scheduled by another scheduler, then an idle thread will be attached to the
+ * node.
+ *
+ * @param[in, out] node is the node which should be scheduled.
+ *
+ * @param get_idle_node is the get idle node handler.
+ *
+ * @param arg is the get idle node handler argument.
+ *
+ * @retval SCHEDULER_SMP_DO_SCHEDULE The node shall be scheduled.
+ *
+ * @retval SCHEDULER_SMP_DO_NOT_SCHEDULE The node shall be blocked. This
+ * action is returned, if the owner thread is already scheduled by another
+ * scheduler.
*/
-static inline void _Scheduler_SMP_Exctract_idle_thread(
- Thread_Control *idle
+static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
+ Scheduler_Node *node,
+ Scheduler_Get_idle_node get_idle_node,
+ void *arg
)
{
- _Chain_Extract_unprotected( &idle->Object.Node );
+ ISR_lock_Context lock_context;
+ Thread_Control *owner;
+ Thread_Scheduler_state owner_state;
+ int owner_sticky_level;
+
+ owner = _Scheduler_Node_get_owner( node );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ _Thread_Scheduler_acquire_critical( owner, &lock_context );
+ owner_state = owner->Scheduler.state;
+ owner_sticky_level = node->sticky_level;
+
+ if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
+ _Scheduler_SMP_Cancel_ask_for_help( owner );
+ _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ return SCHEDULER_SMP_DO_SCHEDULE;
+ }
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+
+ if (
+ ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
+ owner_sticky_level == 0
+ ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+
+ return SCHEDULER_SMP_DO_NOT_SCHEDULE;
+ }
+
+ (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
+
+ return SCHEDULER_SMP_DO_SCHEDULE;
}
/**
- * @brief Allocates the cpu for the scheduled thread.
+ * @brief Allocates a processor to the user of the scheduled node.
*
* Attempts to prevent migrations but does not take into account affinity.
*
- * @param context The scheduler context instance.
- * @param scheduled The scheduled node that should be executed next.
- * @param victim If the heir is this node's thread, no processor is allocated.
- * @param[in, out] victim_cpu The cpu to allocate.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
+ *
+ * @param[in, out] cpu is the processor to allocate.
*/
static inline void _Scheduler_SMP_Allocate_processor_lazy(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
- Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
Per_CPU_Control *cpu_self = _Per_CPU_Get();
- Thread_Control *heir;
_Assert( _ISR_Get_level() != 0 );
- if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
- if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
- heir = scheduled_cpu->heir;
- _Thread_Dispatch_update_heir(
- cpu_self,
- scheduled_cpu,
- scheduled_thread
- );
- } else {
- /* We have to force a migration to our processor set */
- heir = scheduled_thread;
- }
- } else {
- heir = scheduled_thread;
+ if ( cpu == scheduled_cpu ) {
+ _Thread_Set_CPU( scheduled_thread, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
+
+ return;
}
- if ( heir != victim_thread ) {
- _Thread_Set_CPU( heir, victim_cpu );
- _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
+ if (
+ _Thread_Is_executing_on_a_processor( scheduled_thread ) &&
+ _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu )
+ ) {
+ Thread_Control *heir = scheduled_cpu->heir;
+ _Thread_Dispatch_update_heir( cpu_self, scheduled_cpu, scheduled_thread );
+ _Thread_Set_CPU( heir, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, heir );
+
+ return;
}
+
+ _Thread_Set_CPU( scheduled_thread, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
}
/**
- * @brief Allocates the cpu for the scheduled thread.
+ * @brief Allocates exactly the processor to the user of the scheduled node.
*
* This method is slightly different from
* _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
* do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
* but does not take into account affinity.
*
- * @param context This parameter is unused.
- * @param scheduled The scheduled node whose thread should be executed next.
- * @param victim This parameter is unused.
- * @param[in, out] victim_cpu The cpu to allocate.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
+ *
+ * @param[in, out] cpu is the processor to allocate.
*/
static inline void _Scheduler_SMP_Allocate_processor_exact(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
Per_CPU_Control *cpu_self = _Per_CPU_Get();
(void) context;
- (void) victim;
- _Thread_Set_CPU( scheduled_thread, victim_cpu );
- _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
+ _Thread_Set_CPU( scheduled_thread, cpu );
+ _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
}
/**
- * @brief Allocates the cpu for the scheduled thread using the given allocation function.
+ * @brief Allocates the processor to the user of the scheduled node using the
+ * given allocation handler.
*
- * @param context The scheduler context instance.
- * @param scheduled The scheduled node that should be executed next.
- * @param victim If the heir is this node's thread, no processor is allocated.
- * @param[in, out] victim_cpu The cpu to allocate.
- * @param allocate_processor The function to use for the allocation of @a victim_cpu.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
+ *
+ * @param[in, out] cpu is the processor to allocate.
+ *
+ * @param allocate_processor is the handler which should allocate the processor.
*/
static inline void _Scheduler_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
+ Per_CPU_Control *cpu,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
_Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
- ( *allocate_processor )( context, scheduled, victim, victim_cpu );
+ ( *allocate_processor )( context, scheduled, cpu );
}
/**
- * @brief Preempts the victim's thread and allocates a cpu for the scheduled thread.
+ * @brief Preempts the victim's thread and allocates a processor for the user
+ * of the scheduled node.
*
- * @param context The scheduler context instance.
- * @param scheduled Node of the scheduled thread that is about to be executed.
- * @param[in, out] victim Node of the thread to preempt.
- * @param allocate_processor The function for allocation of a processor for the new thread.
+ * @param[in, out] context is the scheduler context.
+ *
+ * @param scheduled[in, out] is the node of the user thread that is about to
+ * get a processor allocated.
+ *
+ * @param[in, out] victim is the victim node of the thread to preempt.
*
- * @return The preempted thread.
+ * @param[in, out] victim_idle is the idle thread used by the victim node or NULL.
+ *
+ * @param allocate_processor The function for allocation of a processor for the new thread.
*/
-static inline Thread_Control *_Scheduler_SMP_Preempt(
+static inline void _Scheduler_SMP_Preempt(
Scheduler_Context *context,
Scheduler_Node *scheduled,
Scheduler_Node *victim,
+ Thread_Control *victim_idle,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
- Thread_Control *victim_thread;
- ISR_lock_Context scheduler_lock_context;
- Per_CPU_Control *victim_cpu;
+ Thread_Control *victim_owner;
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu;
- victim_thread = _Scheduler_Node_get_user( victim );
_Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
- _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
+ victim_owner = _Scheduler_Node_get_owner( victim );
+ _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
- victim_cpu = _Thread_Get_CPU( victim_thread );
+ if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
+ if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
+ _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
- if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
- _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
-
- if ( victim_thread->Scheduler.helping_nodes > 0 ) {
- ISR_lock_Context per_cpu_lock_context;
-
- _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
- _Chain_Append_unprotected(
- &victim_cpu->Threads_in_need_for_help,
- &victim_thread->Scheduler.Help_node
- );
- _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
+ if ( victim_owner->Scheduler.helping_nodes > 0 ) {
+ _Scheduler_SMP_Request_ask_for_help( victim_owner );
+ }
}
+
+ cpu = _Thread_Get_CPU( victim_owner );
+ } else {
+ cpu = _Thread_Get_CPU( victim_idle );
}
- _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
+ _Thread_Scheduler_release_critical( victim_owner, &lock_context );
_Scheduler_SMP_Allocate_processor(
context,
scheduled,
- victim,
- victim_cpu,
+ cpu,
allocate_processor
);
-
- return victim_thread;
}
/**
@@ -764,16 +872,19 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
Scheduler_Node *filter
)
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
- Chain_Control *scheduled = &self->Scheduled;
- Scheduler_Node *lowest_scheduled =
- (Scheduler_Node *) _Chain_Last( scheduled );
+ Scheduler_SMP_Context *self;
+ Scheduler_Node *lowest_scheduled;
(void) filter;
- _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
+ self = _Scheduler_SMP_Get_self( context );
+
+ _Assert( !_Chain_Is_empty( &self->Scheduled ) );
+ lowest_scheduled = (Scheduler_Node *) _Chain_Last( &self->Scheduled );
+
_Assert(
- _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
+ _Chain_Next( &lowest_scheduled->Node.Chain ) ==
+ _Chain_Tail( &self->Scheduled )
);
return lowest_scheduled;
@@ -802,52 +913,43 @@ static inline void _Scheduler_SMP_Enqueue_to_scheduled(
Scheduler_Node *lowest_scheduled,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *lowest_scheduled_idle;
+ Scheduler_SMP_Action action;
- action = _Scheduler_Try_to_schedule_node(
- context,
- node,
- _Scheduler_Node_get_idle( lowest_scheduled ),
- _Scheduler_SMP_Get_idle_thread
+ lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
+ lowest_scheduled,
+ release_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+ action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
+
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
+ lowest_scheduled_idle,
allocate_processor
);
( *insert_scheduled )( context, node, priority );
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
-
- _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state(
- lowest_scheduled,
- SCHEDULER_SMP_NODE_READY
- );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
+ } else {
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
- ( *insert_scheduled )( context, node, priority );
- ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+ if ( lowest_scheduled_idle != NULL ) {
+ (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
+ }
- _Scheduler_Exchange_idle_thread(
- node,
- lowest_scheduled,
- _Scheduler_Node_get_idle( lowest_scheduled )
- );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
}
}
@@ -881,8 +983,11 @@ static inline bool _Scheduler_SMP_Enqueue(
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
bool needs_help;
@@ -890,7 +995,13 @@ static inline bool _Scheduler_SMP_Enqueue(
lowest_scheduled = ( *get_lowest_scheduled )( context, node );
- if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
+ if (
+ ( *order )(
+ &insert_priority,
+ &node->Node.Chain,
+ &lowest_scheduled->Node.Chain
+ )
+ ) {
_Scheduler_SMP_Enqueue_to_scheduled(
context,
node,
@@ -898,10 +1009,14 @@ static inline bool _Scheduler_SMP_Enqueue(
lowest_scheduled,
insert_scheduled,
move_from_scheduled_to_ready,
- allocate_processor
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
needs_help = false;
} else {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
( *insert_ready )( context, node, insert_priority );
needs_help = true;
}
@@ -928,7 +1043,7 @@ static inline bool _Scheduler_SMP_Enqueue(
* @param allocate_processor Function to allocate a processor to a node
* based on the rules of the scheduler.
*/
-static inline bool _Scheduler_SMP_Enqueue_scheduled(
+static inline void _Scheduler_SMP_Enqueue_scheduled(
Scheduler_Context *context,
Scheduler_Node *const node,
Priority_Control insert_priority,
@@ -938,12 +1053,22 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled(
Scheduler_SMP_Insert insert_ready,
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
+ Thread_Control *node_idle;
+
+ node_idle = _Scheduler_Release_idle_thread_if_necessary(
+ node,
+ release_idle_node,
+ context
+ );
+
while ( true ) {
- Scheduler_Node *highest_ready;
- Scheduler_Try_to_schedule_action action;
+ Scheduler_Node *highest_ready;
+ Scheduler_SMP_Action action;
highest_ready = ( *get_highest_ready )( context, node );
@@ -952,12 +1077,13 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled(
* it now on the scheduled or ready set.
*/
if (
- node->sticky_level > 0
- && ( *order )( &insert_priority, &highest_ready->Node.Chain )
+ node->sticky_level > 0 && ( *order )(
+ &insert_priority,
+ &node->Node.Chain,
+ &highest_ready->Node.Chain
+ )
) {
- ( *insert_scheduled )( context, node, insert_priority );
-
- if ( _Scheduler_Node_get_idle( node ) != NULL ) {
+ if ( node_idle != NULL ) {
Thread_Control *owner;
ISR_lock_Context lock_context;
@@ -965,77 +1091,50 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled(
_Thread_Scheduler_acquire_critical( owner, &lock_context );
if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
- _Thread_Scheduler_cancel_need_for_help(
- owner,
- _Thread_Get_CPU( owner )
- );
- _Scheduler_Discard_idle_thread(
- context,
- owner,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
+ Per_CPU_Control *cpu;
+
+ _Scheduler_SMP_Cancel_ask_for_help( owner );
_Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
+ cpu = _Thread_Get_CPU( node_idle );
+ _Thread_Set_CPU( owner, cpu );
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
+ } else {
+ Thread_Control *new_idle;
+
+ _Thread_Scheduler_release_critical( owner, &lock_context );
+ new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
+ _Assert_Unused_variable_equals( new_idle, node_idle );
}
-
- _Thread_Scheduler_release_critical( owner, &lock_context );
}
- return false;
+ ( *insert_scheduled )( context, node, insert_priority );
+
+ return;
}
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- _Scheduler_Node_get_idle( node ),
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
- Thread_Control *idle;
-
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
highest_ready,
node,
+ node_idle,
allocate_processor
);
- ( *insert_ready )( context, node, insert_priority );
( *move_from_ready_to_scheduled )( context, highest_ready );
-
- idle = _Scheduler_Release_idle_thread(
- context,
- node,
- _Scheduler_SMP_Release_idle_thread
- );
- return ( idle == NULL );
- } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_SCHEDULED
- );
-
( *insert_ready )( context, node, insert_priority );
- ( *move_from_ready_to_scheduled )( context, highest_ready );
-
- _Scheduler_Exchange_idle_thread(
- highest_ready,
- node,
- _Scheduler_Node_get_idle( node )
- );
- return false;
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
}
}
@@ -1059,7 +1158,9 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
*
* @param context The scheduler context instance.
* @param victim The node of the thread that is repressed by the newly scheduled thread.
- * @param victim_cpu The cpu to allocate.
+ * @param cpu is the processor to allocate.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the set of
* ready nodes.
* @param get_highest_ready Function to get the highest ready node.
@@ -1071,46 +1172,44 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
static inline void _Scheduler_SMP_Schedule_highest_ready(
Scheduler_Context *context,
Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Extract extract_from_scheduled,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Scheduler_SMP_Action action;
- do {
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *extract_from_scheduled )( context, victim );
+
+ while ( true ) {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- NULL,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Allocate_processor(
context,
highest_ready,
- victim,
- victim_cpu,
+ cpu,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
- } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
/**
@@ -1118,7 +1217,6 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
*
* @param context The scheduler context instance.
* @param victim The node of the thread that is repressed by the newly scheduled thread.
- * @param victim_cpu The cpu to allocate.
* @param extract_from_ready Function to extract a node from the set of
* ready nodes.
* @param get_highest_ready Function to get the highest ready node.
@@ -1130,45 +1228,49 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
Scheduler_Context *context,
Scheduler_Node *victim,
- Per_CPU_Control *victim_cpu,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
- Scheduler_Try_to_schedule_action action;
+ Thread_Control *victim_idle;
+ Scheduler_SMP_Action action;
- do {
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
+ victim_idle = _Scheduler_Release_idle_thread_if_necessary(
+ victim,
+ release_idle_node,
+ context
+ );
+
+ while ( true ) {
Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
- action = _Scheduler_Try_to_schedule_node(
- context,
+ action = _Scheduler_SMP_Try_to_schedule(
highest_ready,
- NULL,
- _Scheduler_SMP_Get_idle_thread
+ get_idle_node,
+ context
);
- if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
_Scheduler_SMP_Preempt(
context,
highest_ready,
victim,
+ victim_idle,
allocate_processor
);
( *move_from_ready_to_scheduled )( context, highest_ready );
- } else {
- _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
-
- _Scheduler_SMP_Node_change_state(
- highest_ready,
- SCHEDULER_SMP_NODE_BLOCKED
- );
-
- ( *extract_from_ready )( context, highest_ready );
+ return;
}
- } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
+ ( *extract_from_ready )( context, highest_ready );
+ }
}
/**
@@ -1195,39 +1297,61 @@ static inline void _Scheduler_SMP_Block(
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node
)
{
+ int sticky_level;
+ ISR_lock_Context lock_context;
Scheduler_SMP_Node_state node_state;
- Per_CPU_Control *thread_cpu;
+ Per_CPU_Control *cpu;
- node_state = _Scheduler_SMP_Node_state( node );
+ sticky_level = node->sticky_level;
+ --sticky_level;
+ node->sticky_level = sticky_level;
+ _Assert( sticky_level >= 0 );
- thread_cpu = _Scheduler_Block_node(
- context,
- thread,
- node,
- node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- _Scheduler_SMP_Get_idle_thread
- );
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+ _Scheduler_SMP_Cancel_ask_for_help( thread );
+ cpu = _Thread_Get_CPU( thread );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
- if ( thread_cpu != NULL ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ node_state = _Scheduler_SMP_Node_state( node );
- if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- ( *extract_from_scheduled )( context, node );
- _Scheduler_SMP_Schedule_highest_ready(
- context,
- node,
- thread_cpu,
- extract_from_ready,
- get_highest_ready,
- move_from_ready_to_scheduled,
- allocate_processor
- );
- } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
- ( *extract_from_ready )( context, node );
+ if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
+ if (
+ node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
+ _Scheduler_Node_get_idle( node ) == NULL
+ ) {
+ Thread_Control *idle;
+
+ idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
+ _Thread_Set_CPU( idle, cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, idle );
}
+
+ return;
+ }
+
+ _Assert( _Scheduler_Node_get_user( node ) == thread );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ cpu,
+ extract_from_scheduled,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node
+ );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ ( *extract_from_ready )( context, node );
}
}
@@ -1242,52 +1366,60 @@ static inline void _Scheduler_SMP_Block(
* of a context.
*/
static inline void _Scheduler_SMP_Unblock(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Update update,
- Scheduler_SMP_Enqueue enqueue
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
- bool unblock;
+ Priority_Control priority;
+
+ _Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
+
+ ++node->sticky_level;
+ _Assert( node->sticky_level > 0 );
node_state = _Scheduler_SMP_Node_state( node );
- unblock = _Scheduler_Unblock_node(
- context,
- thread,
- node,
- node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- _Scheduler_SMP_Release_idle_thread
- );
- if ( unblock ) {
- Priority_Control priority;
- bool needs_help;
+ if ( RTEMS_PREDICT_FALSE( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) ) {
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Scheduler_Discard_idle_thread(
+ thread,
+ node,
+ release_idle_node,
+ context
+ );
- priority = _Scheduler_Node_get_priority( node );
- priority = SCHEDULER_PRIORITY_PURIFY( priority );
+ return;
+ }
- if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
- ( *update )( context, node, priority );
- }
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_READY );
- if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
- Priority_Control insert_priority;
+ priority = _Scheduler_Node_get_priority( node );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
- needs_help = ( *enqueue )( context, node, insert_priority );
- } else {
- _Assert( node_state == SCHEDULER_SMP_NODE_READY );
- _Assert( node->sticky_level > 0 );
- _Assert( node->idle == NULL );
- needs_help = true;
- }
+ if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
+ ( *update )( context, node, priority );
+ }
+
+ if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
+ Priority_Control insert_priority;
+ bool needs_help;
- if ( needs_help ) {
- _Scheduler_Ask_for_help( thread );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
+ needs_help = ( *enqueue )( context, node, insert_priority );
+
+ if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
+ _Scheduler_SMP_Request_ask_for_help( thread );
}
+ } else {
+ _Assert( node_state == SCHEDULER_SMP_NODE_READY );
+ _Assert( node->sticky_level > 0 );
+ _Assert( node->idle == NULL );
+ _Scheduler_SMP_Request_ask_for_help( thread );
}
}
@@ -1302,6 +1434,8 @@ static inline void _Scheduler_SMP_Unblock(
* @param context The scheduler instance context.
* @param thread The thread for the operation.
* @param[in, out] node The node to update the priority of.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready
* queue of the scheduler context.
* @param update Function to update the priority of a node in the scheduler
@@ -1311,14 +1445,15 @@ static inline void _Scheduler_SMP_Unblock(
* @param ask_for_help Function to perform a help request.
*/
static inline void _Scheduler_SMP_Update_priority(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Update update,
- Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Enqueue enqueue_scheduled,
- Scheduler_SMP_Ask_for_help ask_for_help
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
+ Scheduler_SMP_Ask_for_help ask_for_help
)
{
Priority_Control priority;
@@ -1339,7 +1474,7 @@ static inline void _Scheduler_SMP_Update_priority(
node_state = _Scheduler_SMP_Node_state( node );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ ( *extract_from_scheduled )( context, node );
( *update )( context, node, priority );
( *enqueue_scheduled )( context, node, insert_priority );
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
@@ -1361,21 +1496,23 @@ static inline void _Scheduler_SMP_Update_priority(
* @param context The scheduler instance context.
* @param thread The thread for the operation.
* @param node The node of the thread that yields.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready
* queue of the scheduler context.
* @param enqueue Function to enqueue a node with a given priority.
* @param enqueue_scheduled Function to enqueue a scheduled node.
*/
static inline void _Scheduler_SMP_Yield(
- Scheduler_Context *context,
- Thread_Control *thread,
- Scheduler_Node *node,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Enqueue enqueue_scheduled
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_SMP_Enqueue_scheduled enqueue_scheduled
)
{
- bool needs_help;
Scheduler_SMP_Node_state node_state;
Priority_Control insert_priority;
@@ -1384,19 +1521,11 @@ static inline void _Scheduler_SMP_Yield(
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ ( *extract_from_scheduled )( context, node );
( *enqueue_scheduled )( context, node, insert_priority );
- needs_help = false;
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
( *extract_from_ready )( context, node );
-
- needs_help = ( *enqueue )( context, node, insert_priority );
- } else {
- needs_help = true;
- }
-
- if ( needs_help ) {
- _Scheduler_Ask_for_help( thread );
+ (void) ( *enqueue )( context, node, insert_priority );
}
}
@@ -1456,7 +1585,8 @@ static inline bool _Scheduler_SMP_Ask_for_help(
Scheduler_SMP_Insert insert_scheduled,
Scheduler_SMP_Move move_from_scheduled_to_ready,
Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_Node *lowest_scheduled;
@@ -1485,49 +1615,54 @@ static inline bool _Scheduler_SMP_Ask_for_help(
insert_priority = _Scheduler_SMP_Node_priority( node );
- if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
- _Thread_Scheduler_cancel_need_for_help(
- thread,
- _Thread_Get_CPU( thread )
- );
+ if (
+ ( *order )(
+ &insert_priority,
+ &node->Node.Chain,
+ &lowest_scheduled->Node.Chain
+ )
+ ) {
+ Thread_Control *lowest_scheduled_idle;
+
+ _Scheduler_SMP_Cancel_ask_for_help( thread );
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
_Thread_Scheduler_release_critical( thread, &lock_context );
+ lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
+ lowest_scheduled,
+ release_idle_node,
+ context
+ );
+
_Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
+ lowest_scheduled_idle,
allocate_processor
);
- ( *insert_scheduled )( context, node, insert_priority );
( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+ ( *insert_scheduled )( context, node, insert_priority );
- _Scheduler_Release_idle_thread(
- context,
- lowest_scheduled,
- _Scheduler_SMP_Release_idle_thread
- );
success = true;
} else {
_Thread_Scheduler_release_critical( thread, &lock_context );
+
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
( *insert_ready )( context, node, insert_priority );
success = false;
}
} else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Thread_Scheduler_cancel_need_for_help(
- thread,
- _Thread_Get_CPU( thread )
- );
+ _Scheduler_SMP_Cancel_ask_for_help( thread );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
_Scheduler_Discard_idle_thread(
- context,
thread,
node,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ context
);
- _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
- _Thread_Scheduler_release_critical( thread, &lock_context );
success = true;
} else {
_Thread_Scheduler_release_critical( thread, &lock_context );
@@ -1580,6 +1715,8 @@ static inline void _Scheduler_SMP_Reconsider_help_request(
* @param[in, out] thread The thread to change to @a next_state.
* @param[in, out] node The node to withdraw.
* @param next_state The new state for @a thread.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready queue
* of the scheduler context.
* @param get_highest_ready Function to get the highest ready node.
@@ -1593,10 +1730,12 @@ static inline void _Scheduler_SMP_Withdraw_node(
Thread_Control *thread,
Scheduler_Node *node,
Thread_Scheduler_state next_state,
+ Scheduler_SMP_Extract extract_from_scheduled,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node
)
{
ISR_lock_Context lock_context;
@@ -1605,27 +1744,32 @@ static inline void _Scheduler_SMP_Withdraw_node(
_Thread_Scheduler_acquire_critical( thread, &lock_context );
node_state = _Scheduler_SMP_Node_state( node );
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- Per_CPU_Control *thread_cpu;
+ Per_CPU_Control *cpu;
- thread_cpu = _Thread_Get_CPU( thread );
+ _Assert( thread == _Scheduler_Node_get_user( node ) );
+ cpu = _Thread_Get_CPU( thread );
_Scheduler_Thread_change_state( thread, next_state );
_Thread_Scheduler_release_critical( thread, &lock_context );
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ _Assert( _Scheduler_Node_get_user( node ) == thread );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
_Scheduler_SMP_Schedule_highest_ready(
context,
node,
- thread_cpu,
+ cpu,
+ extract_from_scheduled,
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node
);
} else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
_Thread_Scheduler_release_critical( thread, &lock_context );
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
( *extract_from_ready )( context, node );
} else {
_Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
@@ -1634,6 +1778,97 @@ static inline void _Scheduler_SMP_Withdraw_node(
}
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+static inline void _Scheduler_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Update update,
+ Scheduler_SMP_Enqueue enqueue
+)
+{
+ Scheduler_SMP_Node_state node_state;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
+ Scheduler_Context *context;
+ Priority_Control insert_priority;
+ Priority_Control priority;
+
+ context = _Scheduler_Get_context( scheduler );
+ priority = _Scheduler_Node_get_priority( node );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
+
+ if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
+ ( *update )( context, node, priority );
+ }
+
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
+ (void) ( *enqueue )( context, node, insert_priority );
+ }
+}
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+static inline void _Scheduler_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
+)
+{
+ Scheduler_SMP_Node_state node_state;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ Thread_Control *idle;
+
+ idle = _Scheduler_Node_get_idle( node );
+
+ if ( idle != NULL ) {
+ Scheduler_Context *context;
+
+ context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_Release_idle_thread( node, idle, release_idle_node, context );
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ _Thread_Get_CPU( idle ),
+ extract_from_scheduled,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node
+ );
+ }
+ }
+}
+
+/**
* @brief Starts the idle thread on the given processor.
*
* @param context The scheduler context instance.
@@ -1660,7 +1895,6 @@ static inline void _Scheduler_SMP_Do_start_idle(
_Thread_Set_CPU( idle, cpu );
( *register_idle )( context, &node->Base, cpu );
_Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
- _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
}
/**
@@ -1673,11 +1907,11 @@ static inline void _Scheduler_SMP_Do_start_idle(
* @param register_idle Function to register the idle thread for a cpu.
*/
static inline void _Scheduler_SMP_Add_processor(
- Scheduler_Context *context,
- Thread_Control *idle,
- Scheduler_SMP_Has_ready has_ready,
- Scheduler_SMP_Enqueue enqueue_scheduled,
- Scheduler_SMP_Register_idle register_idle
+ Scheduler_Context *context,
+ Thread_Control *idle,
+ Scheduler_SMP_Has_ready has_ready,
+ Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
+ Scheduler_SMP_Register_idle register_idle
)
{
Scheduler_SMP_Context *self;
@@ -1685,7 +1919,6 @@ static inline void _Scheduler_SMP_Add_processor(
self = _Scheduler_SMP_Get_self( context );
idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
- _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
node = _Thread_Scheduler_get_home_node( idle );
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
@@ -1706,6 +1939,8 @@ static inline void _Scheduler_SMP_Add_processor(
*
* @param context The scheduler context instance.
* @param cpu The processor to remove from.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready queue
* of the scheduler context.
* @param enqueue Function to enqueue a node with a given priority.
@@ -1713,10 +1948,13 @@ static inline void _Scheduler_SMP_Add_processor(
* @return The idle thread of @a cpu.
*/
static inline Thread_Control *_Scheduler_SMP_Remove_processor(
- Scheduler_Context *context,
- Per_CPU_Control *cpu,
- Scheduler_SMP_Extract extract_from_ready,
- Scheduler_SMP_Enqueue enqueue
+ Scheduler_Context *context,
+ Per_CPU_Control *cpu,
+ Scheduler_SMP_Extract extract_from_scheduled,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Enqueue enqueue,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Context *self;
@@ -1736,39 +1974,37 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
chain_node = _Chain_Next( chain_node );
} while ( _Thread_Get_CPU( victim_user ) != cpu );
- _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
+ ( *extract_from_scheduled )( &self->Base, victim_node );
victim_owner = _Scheduler_Node_get_owner( victim_node );
if ( !victim_owner->is_idle ) {
- Scheduler_Node *idle_node;
+ Thread_Control *victim_idle;
+ Scheduler_Node *idle_node;
+ Priority_Control insert_priority;
- _Scheduler_Release_idle_thread(
- &self->Base,
+ victim_idle = _Scheduler_Release_idle_thread_if_necessary(
victim_node,
- _Scheduler_SMP_Release_idle_thread
+ release_idle_node,
+ &self->Base
);
- idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
- idle_node = _Thread_Scheduler_get_home_node( idle );
- ( *extract_from_ready )( &self->Base, idle_node );
+ idle_node = ( *get_idle_node )( &self->Base );
+ idle = _Scheduler_Node_get_owner( idle_node );
_Scheduler_SMP_Preempt(
&self->Base,
idle_node,
victim_node,
+ victim_idle,
_Scheduler_SMP_Allocate_processor_exact
);
- if ( !_Chain_Is_empty( &self->Scheduled ) ) {
- Priority_Control insert_priority;
-
- insert_priority = _Scheduler_SMP_Node_priority( victim_node );
- insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- ( *enqueue )( context, victim_node, insert_priority );
- }
+ _Assert( !_Chain_Is_empty( &self->Scheduled ) );
+ insert_priority = _Scheduler_SMP_Node_priority( victim_node );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ ( *enqueue )( &self->Base, victim_node, insert_priority );
} else {
_Assert( victim_owner == victim_user );
_Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
idle = victim_owner;
- _Scheduler_SMP_Exctract_idle_thread( idle );
}
return idle;
@@ -1784,6 +2020,8 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
* @param[in, out] node The node to set the affinity of.
* @param arg The affinity for @a node.
* @param set_affinity Function to set the affinity of a node.
+ * @param extract_from_scheduled Function to extract a node from the set of
+ * scheduled nodes.
* @param extract_from_ready Function to extract a node from the ready queue
* of the scheduler context.
* @param get_highest_ready Function to get the highest ready node.
@@ -1799,11 +2037,14 @@ static inline void _Scheduler_SMP_Set_affinity(
Scheduler_Node *node,
void *arg,
Scheduler_SMP_Set_affinity set_affinity,
+ Scheduler_SMP_Extract extract_from_scheduled,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
Scheduler_SMP_Move move_from_ready_to_scheduled,
Scheduler_SMP_Enqueue enqueue,
- Scheduler_SMP_Allocate_processor allocate_processor
+ Scheduler_SMP_Allocate_processor allocate_processor,
+ Scheduler_Get_idle_node get_idle_node,
+ Scheduler_Release_idle_node release_idle_node
)
{
Scheduler_SMP_Node_state node_state;
@@ -1814,15 +2055,16 @@ static inline void _Scheduler_SMP_Set_affinity(
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- _Scheduler_SMP_Extract_from_scheduled( context, node );
+ ( *extract_from_scheduled )( context, node );
_Scheduler_SMP_Preempt_and_schedule_highest_ready(
context,
node,
- _Thread_Get_CPU( thread ),
extract_from_ready,
get_highest_ready,
move_from_ready_to_scheduled,
- allocate_processor
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
);
( *set_affinity )( context, node, arg );
( *enqueue )( context, node, insert_priority );
diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h
index a3a19d80c1..8db3ae8634 100644
--- a/cpukit/include/rtems/score/schedulerstrongapa.h
+++ b/cpukit/include/rtems/score/schedulerstrongapa.h
@@ -161,6 +161,8 @@ typedef struct {
_Scheduler_strong_APA_Ask_for_help, \
_Scheduler_strong_APA_Reconsider_help_request, \
_Scheduler_strong_APA_Withdraw_node, \
+ _Scheduler_strong_APA_Make_sticky, \
+ _Scheduler_strong_APA_Clean_sticky, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_default_Pin_or_unpin_not_supported, \
_Scheduler_strong_APA_Add_processor, \
@@ -169,7 +171,6 @@ typedef struct {
_Scheduler_default_Node_destroy, \
_Scheduler_default_Release_job, \
_Scheduler_default_Cancel_job, \
- _Scheduler_default_Tick, \
_Scheduler_strong_APA_Start_idle, \
_Scheduler_strong_APA_Set_affinity \
}
@@ -280,6 +281,66 @@ void _Scheduler_strong_APA_Withdraw_node(
);
/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+void _Scheduler_strong_APA_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+void _Scheduler_strong_APA_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Makes the node sticky.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to make sticky.
+ */
+void _Scheduler_strong_APA_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
+ * @brief Cleans the sticky property from the node.
+ *
+ * @param scheduler is the scheduler of the node.
+ *
+ * @param[in, out] the_thread is the thread owning the node.
+ *
+ * @param[in, out] node is the scheduler node to clean the sticky property.
+ */
+void _Scheduler_strong_APA_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+/**
* @brief Adds the idle thread to a processor.
*
* @param scheduler The scheduler control instance.
diff --git a/cpukit/include/rtems/score/status.h b/cpukit/include/rtems/score/status.h
index 236ae52d7b..96c0f1f9af 100644
--- a/cpukit/include/rtems/score/status.h
+++ b/cpukit/include/rtems/score/status.h
@@ -106,6 +106,8 @@ typedef enum {
STATUS_BUILD( STATUS_CLASSIC_INCORRECT_STATE, EINVAL ),
STATUS_INTERRUPTED =
STATUS_BUILD( STATUS_CLASSIC_INTERNAL_ERROR, EINTR ),
+ STATUS_INTERNAL_ERROR =
+ STATUS_BUILD( STATUS_CLASSIC_INTERNAL_ERROR, ENOTSUP ),
STATUS_INVALID_ADDRESS =
STATUS_BUILD( STATUS_CLASSIC_INVALID_ADDRESS, EFAULT ),
STATUS_INVALID_ID =
diff --git a/cpukit/include/rtems/score/thread.h b/cpukit/include/rtems/score/thread.h
index e23261701a..c3c37eb160 100644
--- a/cpukit/include/rtems/score/thread.h
+++ b/cpukit/include/rtems/score/thread.h
@@ -76,14 +76,6 @@ extern "C" {
*@{
*/
-#define RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE
-
-/*
- * With the addition of the Constant Block Scheduler (CBS),
- * this feature is needed even when POSIX is disabled.
- */
-#define RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT
-
#if defined(RTEMS_DEBUG)
#define RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT
#endif
@@ -148,27 +140,47 @@ typedef struct {
} Thread_Entry_information;
/**
- * The following lists the algorithms used to manage the thread cpu budget.
- *
- * Reset Timeslice: At each context switch, reset the time quantum.
- * Exhaust Timeslice: Only reset the quantum once it is consumed.
- * Callout: Execute routine when budget is consumed.
+ * @brief This structure contains operations which manage the CPU budget of a
+ * thread.
*/
-typedef enum {
- THREAD_CPU_BUDGET_ALGORITHM_NONE,
- THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE,
- #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
- THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE,
- #endif
- #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
- THREAD_CPU_BUDGET_ALGORITHM_CALLOUT
- #endif
-} Thread_CPU_budget_algorithms;
+typedef struct {
+ /**
+ * @brief This operation is called at each clock tick for the executing
+ * thread.
+ */
+ void ( *at_tick )( Thread_Control * );
+
+ /**
+ * @brief This operation is called right before a context switch to the
+ * thread is performed.
+ */
+ void ( *at_context_switch )( Thread_Control * );
-/** This defines thes the entry point for the thread specific timeslice
- * budget management algorithm.
+ /**
+ * @brief This operation is called to initialize the CPU budget of the
+ * thread.
+ */
+ void ( *initialize )( Thread_Control * );
+} Thread_CPU_budget_operations;
+
+/**
+ * @brief This structure is used to control the CPU budget of a thread.
*/
-typedef void (*Thread_CPU_budget_algorithm_callout )( Thread_Control * );
+typedef struct {
+ /**
+ * @brief If this member is not NULL, then it references the CPU budget
+ * operations used to manage the CPU budget of the thread, otherwise it is
+ * NULL.
+ */
+ const Thread_CPU_budget_operations *operations;
+
+ /**
+ * @brief This member contains the count of the time quantum that this thread
+ * is allowed to consume until an action takes place defined by the CPU
+ * budget operations.
+ */
+ uint32_t available;
+} Thread_CPU_budget_control;
/**
* The following structure contains the information which defines
@@ -182,12 +194,13 @@ typedef struct {
* it started.
*/
bool is_preemptible;
- /** This field indicates the CPU budget algorith. */
- Thread_CPU_budget_algorithms budget_algorithm;
- /** This field is the routine to invoke when the CPU allotment is
- * consumed.
+
+ /**
+ * @brief This member may provide the CPU budget operations activated when a
+ * thread is initialized before it is started or restarted.
*/
- Thread_CPU_budget_algorithm_callout budget_callout;
+ const Thread_CPU_budget_operations *cpu_budget_operations;
+
/** This field is the initial ISR disable level of this thread. */
uint32_t isr_level;
/** This field is the initial priority. */
@@ -293,10 +306,24 @@ typedef struct {
Chain_Control Scheduler_nodes;
/**
- * @brief Node for the Per_CPU_Control::Threads_in_need_for_help chain.
+ * @brief If an ask for help request for the thread is pending, then this
+ * member references the processor on which the ask for help request is
+ * registered, otherwise it is NULL.
*
- * This chain is protected by the Per_CPU_Control::Lock lock of the assigned
- * processor.
+ * Depending on the state of the thread and usage context, this member is
+ * protected by the Per_CPU_Control::Lock lock of the referenced processor,
+ * the scheduler lock of the thread (Thread_Scheduler_control::Lock), or the
+ * thread state lock.
+ */
+ struct Per_CPU_Control *ask_for_help_cpu;
+
+ /**
+ * @brief This member is the node for the
+ * Per_CPU_Control::Threads_in_need_for_help chain.
+ *
+ * This chain is protected by the Per_CPU_Control::Lock lock of the processor
+ * on which the ask for help request is registered
+ * (Thread_Scheduler_control::ask_for_help_cpu).
*/
Chain_Node Help_node;
@@ -377,7 +404,7 @@ typedef union {
* The mutually exclusive wait state flags are
* - @ref THREAD_WAIT_STATE_INTEND_TO_BLOCK,
* - @ref THREAD_WAIT_STATE_BLOCKED, and
- * - @ref THREAD_WAIT_STATE_READY_AGAIN.
+ * - @ref THREAD_WAIT_STATE_READY.
*/
typedef unsigned int Thread_Wait_flags;
@@ -772,9 +799,7 @@ struct _Thread_Control {
* the following fields
*
* - RTEMS_API_Control::Signal,
- * - Thread_Control::budget_algorithm,
- * - Thread_Control::budget_callout,
- * - Thread_Control::cpu_time_budget,
+ * - Thread_Control::CPU_budget,
* - Thread_Control::current_state,
* - Thread_Control::Post_switch_actions,
* - Thread_Control::Scheduler::control, and
@@ -841,22 +866,24 @@ struct _Thread_Control {
*/
bool was_created_with_inherited_scheduler;
- /** This field is the length of the time quantum that this thread is
- * allowed to consume. The algorithm used to manage limits on CPU usage
- * is specified by budget_algorithm.
+ /**
+ * @brief This member contains the CPU budget control used to manage the CPU
+ * budget of the thread.
*/
- uint32_t cpu_time_budget;
- /** This field is the algorithm used to manage this thread's time
- * quantum. The algorithm may be specified as none which case,
- * no limit is in place.
+ Thread_CPU_budget_control CPU_budget;
+
+ /**
+ * @brief This member contains the amount of CPU time consumed by this thread
+ * since it was created.
*/
- Thread_CPU_budget_algorithms budget_algorithm;
- /** This field is the method invoked with the budgeted time is consumed. */
- Thread_CPU_budget_algorithm_callout budget_callout;
- /** This field is the amount of CPU time consumed by this thread
- * since it was created.
+ Timestamp_Control cpu_time_used;
+
+ /**
+ * @brief This member contains the amount of CPU time consumed by this thread
+ * at the time of the last reset of the CPU usage by
+ * rtems_cpu_usage_reset().
*/
- Timestamp_Control cpu_time_used;
+ Timestamp_Control cpu_time_used_at_last_reset;
/** This field contains information about the starting state of
* this thread.
diff --git a/cpukit/include/rtems/score/threadcpubudget.h b/cpukit/include/rtems/score/threadcpubudget.h
new file mode 100644
index 0000000000..bcbaa11bdb
--- /dev/null
+++ b/cpukit/include/rtems/score/threadcpubudget.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreThread
+ *
+ * @brief This header file provides interfaces used to implement the CPU budget
+ * management of threads.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_THREADCPUBUDGET_H
+#define _RTEMS_SCORE_THREADCPUBUDGET_H
+
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup RTEMSScoreThread
+ *
+ * @{
+ */
+
+/**
+ * @brief Does nothing.
+ *
+ * @param the_thread is an unused parameter.
+ */
+void _Thread_CPU_budget_do_nothing( Thread_Control *the_thread );
+
+/**
+ * @brief Sets the available CPU budget of the thread to the configured clock
+ * ticks per timeslice.
+ *
+ * @param the_thread is the thread to set the available CPU budget.
+ */
+void _Thread_CPU_budget_set_to_ticks_per_timeslice(
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Consumes one time quantum of the available CPU budget of the thread
+ * and yields the thread if the available CPU budget is fully consumed.
+ *
+ * While the thread enabled the non-preemptive mode or is not ready, no time
+ * quantum is consumed.
+ *
+ * @param the_thread is the thread to operate on.
+ */
+void _Thread_CPU_budget_consume_and_yield( Thread_Control *the_thread );
+
+/**
+ * @brief These CPU budget operations allocate timeslices to the thread.
+ *
+ * The timeslice is not reset at a context switch to the thread. Once a
+ * timeslice is consumed, the thread yields.
+ */
+extern const Thread_CPU_budget_operations _Thread_CPU_budget_exhaust_timeslice;
+
+/**
+ * @brief These CPU budget operations allocate timeslices to the thread.
+ *
+ * The timeslice is reset at a context switch to the thread. Once a timeslice
+ * is consumed, the thread yields.
+ */
+extern const Thread_CPU_budget_operations _Thread_CPU_budget_reset_timeslice;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_THREADCPUBUDGET_H */
diff --git a/cpukit/include/rtems/score/threadimpl.h b/cpukit/include/rtems/score/threadimpl.h
index ace42d8023..0a672aa837 100644
--- a/cpukit/include/rtems/score/threadimpl.h
+++ b/cpukit/include/rtems/score/threadimpl.h
@@ -176,19 +176,9 @@ typedef struct {
Priority_Control priority;
/**
- * @brief The thread's budget algorithm.
+ * @brief The thread's initial CPU budget operations.
*/
- Thread_CPU_budget_algorithms budget_algorithm;
-
- /**
- * @brief The thread's initial budget callout.
- */
- Thread_CPU_budget_algorithm_callout budget_callout;
-
- /**
- * @brief The thread's initial CPU time budget.
- */
- uint32_t cpu_time_budget;
+ const Thread_CPU_budget_operations *cpu_budget_operations;
/**
* @brief 32-bit unsigned integer name of the object for the thread.
@@ -800,17 +790,29 @@ void _Thread_Priority_replace(
*/
void _Thread_Priority_update( Thread_queue_Context *queue_context );
+#if defined(RTEMS_SMP)
/**
- * @brief Updates the priority of the thread and changes it sticky level.
+ * @brief Updates the priority of the thread and makes its home scheduler node
+ * sticky.
*
- * @param the_thread The thread.
- * @param sticky_level_change The new value for the sticky level.
+ * @param the_thread is the thread to work on.
*/
-#if defined(RTEMS_SMP)
-void _Thread_Priority_and_sticky_update(
- Thread_Control *the_thread,
- int sticky_level_change
-);
+void _Thread_Priority_update_and_make_sticky( Thread_Control *the_thread );
+
+/**
+ * @brief Updates the priority of the thread and cleans the sticky property of
+ * its home scheduler node.
+ *
+ * @param the_thread is the thread to work on.
+ */
+void _Thread_Priority_update_and_clean_sticky( Thread_Control *the_thread );
+
+/**
+ * @brief Updates the priority of the thread.
+ *
+ * @param the_thread is the thread to update the priority.
+ */
+void _Thread_Priority_update_ignore_sticky( Thread_Control *the_thread );
#endif
/**
@@ -1242,15 +1244,41 @@ RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
#endif
/**
- * @brief Gets the used cpu time of the thread and stores it in the given
- * Timestamp_Control.
+ * @brief Gets the used processor time of the thread throughout its entire
+ * lifetime.
*
- * @param the_thread The thread to get the used cpu time of.
- * @param[out] cpu_time_used Stores the used cpu time of @a the_thread.
+ * @param[in, out] the_thread is the thread.
+ *
+ * @return Returns the used processor time of the thread throughout its entire
+ * lifetime.
*/
-void _Thread_Get_CPU_time_used(
- Thread_Control *the_thread,
- Timestamp_Control *cpu_time_used
+Timestamp_Control _Thread_Get_CPU_time_used( Thread_Control *the_thread );
+
+/**
+ * @brief Gets the used processor time of the thread throughout its entire
+ * lifetime if the caller already acquired the thread state and home
+ * scheduler locks.
+ *
+ * @param[in, out] the_thread is the thread.
+ *
+ * @return Returns the used processor time of the thread throughout its entire
+ * lifetime.
+ */
+Timestamp_Control _Thread_Get_CPU_time_used_locked(
+ Thread_Control *the_thread
+);
+
+/**
+ * @brief Gets the used processor time of the thread after the last CPU usage
+ * reset.
+ *
+ * @param[in, out] the_thread is the thread.
+ *
+ * @return Returns the used processor time of the thread after the last CPU usage
+ * reset.
+ */
+Timestamp_Control _Thread_Get_CPU_time_used_after_last_reset(
+ Thread_Control *the_thread
);
/**
@@ -1466,32 +1494,6 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
}
#endif
-#if defined(RTEMS_SMP)
-/**
- * @brief Cancels the thread's need for help.
- *
- * @param the_thread The thread to cancel the help request of.
- * @param cpu The cpu to get the lock context of in order to
- * cancel the help request.
- */
-RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
- Thread_Control *the_thread,
- Per_CPU_Control *cpu
-)
-{
- ISR_lock_Context lock_context;
-
- _Per_CPU_Acquire( cpu, &lock_context );
-
- if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
- _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
- _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
- }
-
- _Per_CPU_Release( cpu, &lock_context );
-}
-#endif
-
/**
* @brief Gets the home scheduler of the thread.
*
@@ -2215,14 +2217,18 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
}
/**
- * @brief The initial thread wait flags value set by _Thread_Initialize().
+ * @brief Mask to get the thread wait state flags.
*/
-#define THREAD_WAIT_FLAGS_INITIAL 0x0U
+#define THREAD_WAIT_STATE_MASK 0xffU
/**
- * @brief Mask to get the thread wait state flags.
+ * @brief Indicates that the thread does not wait on something.
+ *
+ * In this wait state, the wait class is zero. This wait state is set
+ * initially by _Thread_Initialize() and after each wait operation once the
+ * thread is ready again.
*/
-#define THREAD_WAIT_STATE_MASK 0xffU
+#define THREAD_WAIT_STATE_READY 0x0U
/**
* @brief Indicates that the thread begins with the blocking operation.
@@ -2239,13 +2245,6 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
#define THREAD_WAIT_STATE_BLOCKED 0x2U
/**
- * @brief Indicates that a condition to end the thread wait occurred.
- *
- * This could be a timeout, a signal, an event or a resource availability.
- */
-#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
-
-/**
* @brief Mask to get the thread wait class flags.
*/
#define THREAD_WAIT_CLASS_MASK 0xff00U
@@ -2636,7 +2635,7 @@ void _Thread_Do_unpin(
RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
{
#if defined(RTEMS_SMP)
- _Assert( executing == _Thread_Executing );
+ _Assert( executing == _Thread_Get_executing() );
executing->Scheduler.pin_level += THREAD_PIN_STEP;
#else
@@ -2658,7 +2657,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Unpin(
#if defined(RTEMS_SMP)
unsigned int pin_level;
- _Assert( executing == _Thread_Executing );
+ _Assert( executing == _Per_CPU_Get_executing( cpu_self ) );
pin_level = executing->Scheduler.pin_level;
_Assert( pin_level > 0 );
@@ -2688,5 +2687,35 @@ RTEMS_INLINE_ROUTINE void _Thread_Unpin(
#include <rtems/score/threadmp.h>
#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @ingroup RTEMSScoreThread
+ *
+ * @brief Removes the watchdog timer from the thread and lets the thread
+ * continue its execution.
+ *
+ * @param[in, out] the_thread is the thread.
+ *
+ * @param status is the thread wait status.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Timer_remove_and_continue(
+ Thread_Control *the_thread,
+ Status_Control status
+)
+{
+ _Thread_Timer_remove( the_thread );
+#if defined(RTEMS_MULTIPROCESSING)
+ _Thread_MP_Extract_proxy( the_thread );
+#endif
+ _Thread_Continue( the_thread, status );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
/* end of include file */
diff --git a/cpukit/include/rtems/score/threadmp.h b/cpukit/include/rtems/score/threadmp.h
index 6cc68e6320..e10661a573 100644
--- a/cpukit/include/rtems/score/threadmp.h
+++ b/cpukit/include/rtems/score/threadmp.h
@@ -98,6 +98,19 @@ Thread_Control *_Thread_MP_Find_proxy (
((_the_thread) == _MPCI_Receive_server_tcb)
/**
+ * @brief Extracts the proxy of the thread if necessary.
+ *
+ * This routine ensures that if there is a proxy for this thread on another
+ * node, it is also dealt with. A proxy is a data that is on the thread queue
+ * on the remote node and acts as a proxy for the local thread. If the local
+ * thread was waiting on a remote operation, then the remote side of the
+ * operation must be cleaned up.
+ *
+ * @param[in, out] the_thread is the thread to determine the proxy.
+ */
+void _Thread_MP_Extract_proxy( Thread_Control *the_thread );
+
+/**
* @brief Trees a proxy control block to the inactive chain of free proxy
* control blocks.
*/
diff --git a/cpukit/include/rtems/score/threadqimpl.h b/cpukit/include/rtems/score/threadqimpl.h
index 7e6f2665be..7dd7250acf 100644
--- a/cpukit/include/rtems/score/threadqimpl.h
+++ b/cpukit/include/rtems/score/threadqimpl.h
@@ -984,24 +984,6 @@ void _Thread_queue_Resume(
void _Thread_queue_Extract( Thread_Control *the_thread );
/**
- * @brief Extracts the_thread from the_thread_queue.
- *
- * This routine extracts the_thread from the_thread_queue
- * and ensures that if there is a proxy for this task on
- * another node, it is also dealt with. A proxy is a data
- * data that is on the thread queue on the remote node and
- * acts as a proxy for the local thread. If the local thread
- * was waiting on a remote operation, then the remote side
- * of the operation must be cleaned up.
- *
- * @param[in, out] the_thread The pointer to a thread control block that
- * is to be removed
- */
-void _Thread_queue_Extract_with_proxy(
- Thread_Control *the_thread
-);
-
-/**
* @brief Surrenders the thread queue previously owned by the thread to the
* first enqueued thread.
*
@@ -1314,18 +1296,23 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Destroy(
#endif
}
+#if defined(RTEMS_MULTIPROCESSING)
/**
* @brief Does nothing.
*
* @param the_proxy This parameter is unused.
* @param mp_id This parameter is unused.
*/
-#if defined(RTEMS_MULTIPROCESSING)
void _Thread_queue_MP_callout_do_nothing(
Thread_Control *the_proxy,
Objects_Id mp_id
);
+bool _Thread_queue_MP_set_callout(
+ Thread_Control *the_thread,
+ const Thread_queue_Context *queue_context
+);
+
/**
* @brief Unblocks the proxy of the thread.
*
diff --git a/cpukit/include/rtems/score/timecounter.h b/cpukit/include/rtems/score/timecounter.h
index 8185140f9b..39f0dc353e 100644
--- a/cpukit/include/rtems/score/timecounter.h
+++ b/cpukit/include/rtems/score/timecounter.h
@@ -8,7 +8,7 @@
*/
/*
- * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2015, 2021 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -248,6 +248,28 @@ extern volatile int32_t _Timecounter_Time_uptime;
*/
extern struct timecounter *_Timecounter;
+/**
+ * @brief Handler doing the NTP update second processing shall have this type.
+ *
+ * @param[in, out] adjustment is the NTP time adjustment.
+ *
+ * @param[in, out] newsec is the number of seconds since Unix epoch.
+ */
+typedef void ( *Timecounter_NTP_update_second )(
+ int64_t *adjustment,
+ time_t *newsec
+);
+
+/**
+ * @brief Sets the NTP update second handler.
+ *
+ * @param handler is the new NTP update second handler used to carry out the
+ * NTP update second processing.
+ */
+void _Timecounter_Set_NTP_update_second(
+ Timecounter_NTP_update_second handler
+);
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/watchdogimpl.h b/cpukit/include/rtems/score/watchdogimpl.h
index 7b364b8828..ba1a884a3d 100644
--- a/cpukit/include/rtems/score/watchdogimpl.h
+++ b/cpukit/include/rtems/score/watchdogimpl.h
@@ -351,33 +351,50 @@ RTEMS_INLINE_ROUTINE bool _Watchdog_Is_scheduled(
}
/**
- * @brief Sets the first node of the header.
+ * @brief Sets the first watchdog of the watchdog collection to the next
+ * watchdog of the current first watchdog.
*
- * Sets the first node of the header to either the leftmost child node of the
- * watchdog control node, or if not present sets it to the right child node of
- * the watchdog control node. if both are not present, the new first node is
- * the parent node of the current first node.
+ * This function may be used during watchdog removals, see _Watchdog_Remove()
+ * and _Watchdog_Tickle().
*
- * @param[in, out] header The watchdog header.
- * @param the_watchdog The watchdog control node for the operation.
+ * @param[in, out] header is the watchdog collection header.
+ *
+ * @param first is the current first watchdog which should be removed
+ * afterwards.
*/
RTEMS_INLINE_ROUTINE void _Watchdog_Next_first(
- Watchdog_Header *header,
- Watchdog_Control *the_watchdog
+ Watchdog_Header *header,
+ const Watchdog_Control *first
)
{
- RBTree_Node *node = _RBTree_Right( &the_watchdog->Node.RBTree );
-
- if ( node != NULL ) {
- RBTree_Node *left;
-
- while ( ( left = _RBTree_Left( node ) ) != NULL ) {
- node = left;
- }
+ RBTree_Node *right;
- header->first = node;
+ /*
+ * This function uses the following properties of red-black trees:
+ *
+ * 1. Every leaf (NULL) is black.
+ *
+ * 2. If a node is red, then both its children are black.
+ *
+ * 3. Every simple path from a node to a descendant leaf contains the same
+ * number of black nodes.
+ *
+ * The first node has no left child. So every path from the first node has
+ * exactly one black node (including leafs). The first node cannot have a
+ * non-leaf black right child. It may have a red right child. In this case
+ * both children must be leafs.
+ */
+ _Assert( header->first == &first->Node.RBTree );
+ _Assert( _RBTree_Left( &first->Node.RBTree ) == NULL );
+ right = _RBTree_Right( &first->Node.RBTree );
+
+ if ( right != NULL ) {
+ _Assert( RB_COLOR( right, Node ) == RB_RED );
+ _Assert( _RBTree_Left( right ) == NULL );
+ _Assert( _RBTree_Right( right ) == NULL );
+ header->first = right;
} else {
- header->first = _RBTree_Parent( &the_watchdog->Node.RBTree );
+ header->first = _RBTree_Parent( &first->Node.RBTree );
}
}
diff --git a/cpukit/include/rtems/score/wkspace.h b/cpukit/include/rtems/score/wkspace.h
index a3af86e878..75660980a1 100644
--- a/cpukit/include/rtems/score/wkspace.h
+++ b/cpukit/include/rtems/score/wkspace.h
@@ -20,8 +20,6 @@
#define _RTEMS_SCORE_WKSPACE_H
#include <rtems/score/heap.h>
-#include <rtems/score/interr.h>
-#include <rtems/score/memory.h>
#ifdef __cplusplus
extern "C" {
@@ -42,17 +40,11 @@ extern "C" {
extern Heap_Control _Workspace_Area;
/**
- * @brief Initilizes the workspace handler.
+ * @brief Initializes the workspace handler.
*
* This routine performs the initialization necessary for this handler.
- *
- * @param mem The memory information
- * @param extend The extension handler for the new workspace.
*/
-void _Workspace_Handler_initialization(
- const Memory_Information *mem,
- Heap_Initialization_or_extend_handler extend
-);
+void _Workspace_Handler_initialization( void );
/**
* @brief Allocates a memory block of the specified size from the workspace.
diff --git a/cpukit/include/rtems/score/wkspaceinitmulti.h b/cpukit/include/rtems/score/wkspaceinitmulti.h
new file mode 100644
index 0000000000..18520199ce
--- /dev/null
+++ b/cpukit/include/rtems/score/wkspaceinitmulti.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreWorkspace
+ *
+ * @brief This header file provides the implementation of
+ * _Workspace_Initialize_for_multiple_areas().
+ */
+
+/*
+ * Copyright (C) 2012, 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_WKSPACEINITMULTI_H
+#define _RTEMS_SCORE_WKSPACEINITMULTI_H
+
+#include <rtems/score/wkspace.h>
+#include <rtems/score/heapimpl.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/memory.h>
+#include <rtems/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @ingroup RTEMSScoreWorkspace
+ *
+ * @brief Initializes the RTEMS Workspace with support for more than one memory
+ * area.
+ *
+ * This implementation should be used by BSPs which provide more than one
+ * memory area via _Memory_Get() to implement
+ * _Workspace_Handler_initialization().
+ */
+RTEMS_INLINE_ROUTINE void _Workspace_Initialize_for_multiple_areas( void )
+{
+ const Memory_Information *mem;
+ Heap_Initialization_or_extend_handler init_or_extend;
+ uintptr_t remaining;
+ bool unified;
+ uintptr_t page_size;
+ uintptr_t overhead;
+ size_t i;
+
+ mem = _Memory_Get();
+ page_size = CPU_HEAP_ALIGNMENT;
+ remaining = rtems_configuration_get_work_space_size();
+ init_or_extend = _Heap_Initialize;
+ unified = rtems_configuration_get_unified_work_area();
+ overhead = _Heap_Area_overhead( page_size );
+
+ for ( i = 0; i < _Memory_Get_count( mem ); ++i ) {
+ Memory_Area *area;
+ uintptr_t free_size;
+
+ area = _Memory_Get_area( mem, i );
+ free_size = _Memory_Get_free_size( area );
+
+ if ( free_size > overhead ) {
+ uintptr_t space_available;
+ uintptr_t size;
+
+ if ( unified ) {
+ size = free_size;
+ } else {
+ if ( remaining > 0 ) {
+ size = remaining < free_size - overhead ?
+ remaining + overhead : free_size;
+ } else {
+ size = 0;
+ }
+ }
+
+ space_available = ( *init_or_extend )(
+ &_Workspace_Area,
+ _Memory_Get_free_begin( area ),
+ size,
+ page_size
+ );
+
+ _Memory_Consume( area, size );
+
+ if ( space_available < remaining ) {
+ remaining -= space_available;
+ } else {
+ remaining = 0;
+ }
+
+ init_or_extend = _Heap_Extend;
+ }
+ }
+
+ if ( remaining > 0 ) {
+ _Internal_error( INTERNAL_ERROR_TOO_LITTLE_WORKSPACE );
+ }
+
+ _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_WKSPACEINITMULTI_H */
diff --git a/cpukit/include/rtems/score/wkspaceinitone.h b/cpukit/include/rtems/score/wkspaceinitone.h
new file mode 100644
index 0000000000..c68e1b5db1
--- /dev/null
+++ b/cpukit/include/rtems/score/wkspaceinitone.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreWorkspace
+ *
+ * @brief This header file provides the implementation of
+ * _Workspace_Initialize_for_one_area().
+ */
+
+/*
+ * Copyright (C) 2012, 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_WKSPACEINITONE_H
+#define _RTEMS_SCORE_WKSPACEINITONE_H
+
+#include <rtems/score/wkspace.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/heapimpl.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/memory.h>
+#include <rtems/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @ingroup RTEMSScoreWorkspace
+ *
+ * @brief Initializes the RTEMS Workspace with support for exactly one memory
+ * area.
+ *
+ * This implementation should be used by BSPs which provide exactly one memory
+ * area via _Memory_Get() to implement _Workspace_Handler_initialization().
+ */
+RTEMS_INLINE_ROUTINE void _Workspace_Initialize_for_one_area( void )
+{
+ uintptr_t page_size;
+ uintptr_t wkspace_size;
+ uintptr_t wkspace_size_with_overhead;
+ uintptr_t available_size;
+
+ page_size = CPU_HEAP_ALIGNMENT;
+ wkspace_size = rtems_configuration_get_work_space_size();
+ wkspace_size_with_overhead = wkspace_size + _Heap_Area_overhead( page_size );
+
+ if ( wkspace_size < wkspace_size_with_overhead ) {
+ const Memory_Information *mem;
+ Memory_Area *area;
+ uintptr_t free_size;
+ uintptr_t size;
+
+ mem = _Memory_Get();
+ _Assert( _Memory_Get_count( mem ) == 1 );
+
+ area = _Memory_Get_area( mem, 0 );
+ free_size = _Memory_Get_free_size( area );
+
+ if ( rtems_configuration_get_unified_work_area() ) {
+ size = free_size;
+ } else {
+ size = wkspace_size_with_overhead;
+ }
+
+ available_size = _Heap_Initialize(
+ &_Workspace_Area,
+ _Memory_Get_free_begin( area ),
+ size,
+ page_size
+ );
+
+ _Memory_Consume( area, size );
+ } else {
+ /* An unsigned integer overflow happened */
+ available_size = 0;
+ }
+
+ if ( wkspace_size > available_size ) {
+ _Internal_error( INTERNAL_ERROR_TOO_LITTLE_WORKSPACE );
+ }
+
+ _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_WKSPACEINITONE_H */
diff --git a/cpukit/libcsupport/src/__times.c b/cpukit/libcsupport/src/__times.c
index 7bb7e0e9ca..a37c662654 100644
--- a/cpukit/libcsupport/src/__times.c
+++ b/cpukit/libcsupport/src/__times.c
@@ -65,7 +65,8 @@ clock_t _times(
* of ticks since boot and the number of ticks executed by this
* this thread.
*/
- _Thread_Get_CPU_time_used( _Thread_Get_executing(), &cpu_time_used );
+ cpu_time_used =
+ _Thread_Get_CPU_time_used_after_last_reset( _Thread_Get_executing() );
ptms->tms_utime = ((clock_t) cpu_time_used) / tick_interval;
return ptms->tms_stime;
diff --git a/cpukit/libcsupport/src/malloc_initialize.c b/cpukit/libcsupport/src/malloc_initialize.c
deleted file mode 100644
index fb0999df01..0000000000
--- a/cpukit/libcsupport/src/malloc_initialize.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * @file
- *
- * @brief RTEMS_Malloc_Initialize() implementation.
- */
-
-/*
- * COPYRIGHT (c) 1989-2012.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <rtems/malloc.h>
-#include <rtems/score/wkspace.h>
-
-#include "malloc_p.h"
-
-#ifdef RTEMS_NEWLIB
-static Heap_Control _Malloc_Heap;
-
-Heap_Control *RTEMS_Malloc_Initialize(
- const Memory_Information *mem,
- Heap_Initialization_or_extend_handler extend
-)
-{
- Heap_Control *heap;
- Heap_Initialization_or_extend_handler init_or_extend;
- uintptr_t page_size;
- size_t i;
-
- heap = &_Malloc_Heap;
- RTEMS_Malloc_Heap = heap;
- init_or_extend = _Heap_Initialize;
- page_size = CPU_HEAP_ALIGNMENT;
-
- for (i = 0; i < _Memory_Get_count( mem ); ++i) {
- Memory_Area *area;
- uintptr_t space_available;
-
- area = _Memory_Get_area( mem, i );
- space_available = ( *init_or_extend )(
- heap,
- _Memory_Get_free_begin( area ),
- _Memory_Get_free_size( area ),
- page_size
- );
-
- if ( space_available > 0 ) {
- _Memory_Consume( area, _Memory_Get_free_size( area ) );
- init_or_extend = extend;
- }
- }
-
- if ( init_or_extend == _Heap_Initialize ) {
- _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_HEAP );
- }
-
- return heap;
-}
-#else
-Heap_Control *RTEMS_Malloc_Initialize(
- const Memory_Information *mem,
- Heap_Initialization_or_extend_handler extend
-)
-{
- /* FIXME: Dummy function */
- return NULL;
-}
-#endif
-
-Heap_Control *_Workspace_Malloc_initialize_separate( void )
-{
- return RTEMS_Malloc_Initialize( _Memory_Get(), _Heap_Extend );
-}
diff --git a/cpukit/libcsupport/src/mallocheap.c b/cpukit/libcsupport/src/mallocheap.c
index 006362f209..ec14e73763 100644
--- a/cpukit/libcsupport/src/mallocheap.c
+++ b/cpukit/libcsupport/src/mallocheap.c
@@ -44,7 +44,7 @@
Heap_Control *RTEMS_Malloc_Heap;
-static void _Malloc_Initialize( void )
+void _Malloc_Initialize( void )
{
RTEMS_Malloc_Heap = ( *_Workspace_Malloc_initializer )();
}
diff --git a/cpukit/libdebugger/rtems-debugger-aarch64.c b/cpukit/libdebugger/rtems-debugger-aarch64.c
new file mode 100644
index 0000000000..279c2d61ef
--- /dev/null
+++ b/cpukit/libdebugger/rtems-debugger-aarch64.c
@@ -0,0 +1,1884 @@
+/*
+ * Copyright (c) 2016-2019 Chris Johns <chrisj@rtems.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define TARGET_DEBUG 0
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+
+/* Defined by linkcmds.base */
+extern char bsp_section_text_begin[];
+extern char bsp_section_text_end[];
+extern char bsp_section_fast_text_begin[];
+extern char bsp_section_fast_text_end[];
+
+#include <libcpu/mmu-vmsav8-64.h>
+
+#include <rtems.h>
+#include <rtems/score/aarch64-system-registers.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/threadimpl.h>
+
+#include <rtems/debugger/rtems-debugger-bsp.h>
+
+#include "rtems-debugger-target.h"
+#include "rtems-debugger-threads.h"
+
+#if TARGET_DEBUG
+#include <rtems/bspIo.h>
+#endif
+
+/*
+ * Structure used to manage a task executing a function on available cores on
+ * a scheduler.
+ */
+typedef struct {
+ rtems_id allCPUsBarrier;
+ rtems_task_entry work_function;
+ rtems_task_argument arg;
+ rtems_status_code sc;
+} run_across_cpus_context;
+
+/*
+ * The function that runs as the body of the task which moves itself among the
+ * various cores registered to a scheduler.
+ */
+static rtems_task run_across_cpus_task( rtems_task_argument arg )
+{
+ uint32_t released = 0;
+ rtems_status_code sc;
+ run_across_cpus_context *ctx = (run_across_cpus_context *) arg;
+ cpu_set_t set;
+ cpu_set_t scheduler_set;
+ rtems_id scheduler_id;
+
+ sc = rtems_task_get_scheduler( RTEMS_SELF, &scheduler_id );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ ctx->sc = sc;
+ rtems_task_exit();
+ }
+
+ CPU_ZERO( &scheduler_set );
+ sc = rtems_scheduler_get_processor_set(
+ scheduler_id,
+ sizeof( scheduler_set ),
+ &scheduler_set
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ ctx->sc = sc;
+ rtems_task_exit();
+ }
+
+ for (
+ int cpu_index = 0;
+ cpu_index < rtems_scheduler_get_processor_maximum();
+ cpu_index++
+ ) {
+ if ( !CPU_ISSET( cpu_index, &scheduler_set ) ) {
+ continue;
+ }
+
+ CPU_ZERO( &set );
+ CPU_SET( cpu_index, &set );
+ sc = rtems_task_set_affinity( RTEMS_SELF, sizeof( set ), &set );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ ctx->sc = sc;
+ rtems_task_exit();
+ }
+
+ /* execute task on selected CPU */
+ ctx->work_function( ctx->arg );
+ }
+
+ sc = rtems_barrier_release( ctx->allCPUsBarrier, &released );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ ctx->sc = sc;
+ }
+
+ rtems_task_exit();
+}
+
+/*
+ * The function used to run a provided function with arbitrary argument across
+ * all cores registered to the current scheduler. This is similar to the Linux
+ * kernel's on_each_cpu() call and always waits for the task to complete before
+ * returning.
+ */
+static rtems_status_code run_across_cpus(
+ rtems_task_entry task_entry,
+ rtems_task_argument arg
+)
+{
+ rtems_status_code sc;
+ rtems_id Task_id;
+ run_across_cpus_context ctx;
+
+ ctx.work_function = task_entry;
+ ctx.arg = arg;
+ ctx.sc = RTEMS_SUCCESSFUL;
+
+ memset( &ctx.allCPUsBarrier, 0, sizeof( ctx.allCPUsBarrier ) );
+ sc = rtems_barrier_create(
+ rtems_build_name( 'B', 'c', 'p', 'u' ),
+ RTEMS_BARRIER_MANUAL_RELEASE,
+ 2,
+ &ctx.allCPUsBarrier
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ return sc;
+ }
+
+ sc = rtems_task_create(
+ rtems_build_name( 'T', 'c', 'p', 'u' ),
+ 1,
+ RTEMS_MINIMUM_STACK_SIZE * 2,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_FLOATING_POINT | RTEMS_DEFAULT_ATTRIBUTES,
+ &Task_id
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ rtems_barrier_delete( ctx.allCPUsBarrier );
+ return sc;
+ }
+
+ sc = rtems_task_start(
+ Task_id,
+ run_across_cpus_task,
+ ( rtems_task_argument ) & ctx
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ rtems_task_delete( Task_id );
+ rtems_barrier_delete( ctx.allCPUsBarrier );
+ return sc;
+ }
+
+ /* wait on task */
+ sc = rtems_barrier_wait( ctx.allCPUsBarrier, RTEMS_NO_TIMEOUT );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ rtems_task_delete( Task_id );
+ rtems_barrier_delete( ctx.allCPUsBarrier );
+ return sc;
+ }
+
+ rtems_barrier_delete( ctx.allCPUsBarrier );
+
+ if ( ctx.sc != RTEMS_SUCCESSFUL ) {
+ return ctx.sc;
+ }
+
+ return sc;
+}
+
+/*
+ * Number of registers.
+ */
+#define RTEMS_DEBUGGER_NUMREGS 68
+
+/*
+ * Number of bytes per type of register.
+ */
+#define RTEMS_DEBUGGER_REG_BYTES 8
+
+/* Debugger registers layout. See aarch64-core.xml in GDB source. */
+#define REG_X0 0
+#define REG_X1 1
+#define REG_X2 2
+#define REG_X3 3
+#define REG_X4 4
+#define REG_X5 5
+#define REG_X6 6
+#define REG_X7 7
+#define REG_X8 8
+#define REG_X9 9
+#define REG_X10 10
+#define REG_X11 11
+#define REG_X12 12
+#define REG_X13 13
+#define REG_X14 14
+#define REG_X15 15
+#define REG_X16 16
+#define REG_X17 17
+#define REG_X18 18
+#define REG_X19 19
+#define REG_X20 20
+#define REG_X21 21
+#define REG_X22 22
+#define REG_X23 23
+#define REG_X24 24
+#define REG_X25 25
+#define REG_X26 26
+#define REG_X27 27
+#define REG_X28 28
+#define REG_FP 29
+#define REG_LR 30
+#define REG_SP 31
+/*
+ * PC isn't a real directly accessible register on AArch64, but is exposed via
+ * ELR_EL1 in exception context.
+ */
+#define REG_PC 32
+/* CPSR is defined as 32-bit by GDB */
+#define REG_CPS 33
+/* Debugger registers layout. See aarch64-fpu.xml in GDB source. */
+#define REG_V0 34
+#define REG_V1 35
+#define REG_V2 36
+#define REG_V3 37
+#define REG_V4 38
+#define REG_V5 39
+#define REG_V6 40
+#define REG_V7 41
+#define REG_V8 42
+#define REG_V9 43
+#define REG_V10 44
+#define REG_V11 45
+#define REG_V12 46
+#define REG_V13 47
+#define REG_V14 48
+#define REG_V15 49
+#define REG_V16 50
+#define REG_V17 51
+#define REG_V18 52
+#define REG_V19 53
+#define REG_V20 54
+#define REG_V21 55
+#define REG_V22 56
+#define REG_V23 57
+#define REG_V24 58
+#define REG_V25 59
+#define REG_V26 60
+#define REG_V27 61
+#define REG_V28 62
+#define REG_V29 63
+#define REG_V30 64
+#define REG_V31 65
+/* FPSR and FPCR are defined as 32-bit by GDB */
+#define REG_FPS 66
+#define REG_FPC 67
+
+/**
+ * Register offset table with the total as the last entry.
+ *
+ * Check this table in gdb with the command:
+ *
+ * maint print registers
+ */
+static const size_t aarch64_reg_offsets[ RTEMS_DEBUGGER_NUMREGS + 1 ] = {
+ REG_X0 * 8,
+ REG_X1 * 8,
+ REG_X2 * 8,
+ REG_X3 * 8,
+ REG_X4 * 8,
+ REG_X5 * 8,
+ REG_X6 * 8,
+ REG_X7 * 8,
+ REG_X8 * 8,
+ REG_X9 * 8,
+ REG_X10 * 8,
+ REG_X11 * 8,
+ REG_X12 * 8,
+ REG_X13 * 8,
+ REG_X14 * 8,
+ REG_X15 * 8,
+ REG_X16 * 8,
+ REG_X17 * 8,
+ REG_X18 * 8,
+ REG_X19 * 8,
+ REG_X20 * 8,
+ REG_X21 * 8,
+ REG_X22 * 8,
+ REG_X23 * 8,
+ REG_X24 * 8,
+ REG_X25 * 8,
+ REG_X26 * 8,
+ REG_X27 * 8,
+ REG_X28 * 8,
+ REG_FP * 8,
+ REG_LR * 8,
+ REG_SP * 8,
+ REG_PC * 8,
+ REG_CPS * 8,
+/* Floating point registers, CPS is 32-bit */
+#define V0_OFFSET ( REG_CPS * 8 + 4 )
+ V0_OFFSET,
+ V0_OFFSET + 16 * ( REG_V1 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V2 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V3 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V4 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V5 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V6 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V7 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V8 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V9 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V10 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V11 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V12 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V13 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V14 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V15 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V16 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V17 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V18 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V19 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V20 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V21 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V22 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V23 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V24 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V25 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V26 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V27 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V28 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V29 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V30 - REG_V0 ),
+ V0_OFFSET + 16 * ( REG_V31 - REG_V0 ),
+/* FPSR and FPCR are defined as 32-bit by GDB */
+#define FPS_OFFSET ( V0_OFFSET + 16 * ( REG_V31 - REG_V0 ) + 16 )
+ FPS_OFFSET,
+/* FPC follows FPS */
+ FPS_OFFSET + 4,
+/* Total size */
+ FPS_OFFSET + 8,
+};
+
+/*
+ * Number of bytes of registers.
+ */
+#define RTEMS_DEBUGGER_NUMREGBYTES \
+ aarch64_reg_offsets[ RTEMS_DEBUGGER_NUMREGS ]
+
+/**
+ * Print the exception frame.
+ */
+#define EXC_FRAME_PRINT( _out, _prefix, _frame ) \
+ do { \
+ _out( \
+ _prefix " X0 = %016" PRIx64 " X1 = %016" PRIx64 \
+ " X2 = %016" PRIx64 " X3 = %016" PRIx64 "\n", \
+ _frame->register_x0, \
+ _frame->register_x1, \
+ _frame->register_x2, \
+ _frame->register_x3 \
+ ); \
+ _out( \
+ _prefix " X4 = %016" PRIx64 " X5 = %016" PRIx64 \
+ " X6 = %016" PRIx64 " X7 = %016" PRIx64 "\n", \
+ _frame->register_x4, \
+ _frame->register_x5, \
+ _frame->register_x6, \
+ _frame->register_x7 \
+ ); \
+ _out( \
+ _prefix " X8 = %016" PRIx64 " X9 = %016" PRIx64 \
+ " X10 = %016" PRIx64 " X11 = %016" PRIx64 "\n", \
+ _frame->register_x8, \
+ _frame->register_x9, \
+ _frame->register_x10, \
+ _frame->register_x11 \
+ ); \
+ _out( \
+ _prefix " X12 = %016" PRIx64 " X13 = %016" PRIx64 \
+ " X14 = %016" PRIx64 " X15 = %016" PRIx64 "\n", \
+ _frame->register_x12, \
+ _frame->register_x13, \
+ _frame->register_x14, \
+ _frame->register_x15 \
+ ); \
+ _out( \
+ _prefix " X16 = %016" PRIx64 " X17 = %016" PRIx64 \
+ " X18 = %016" PRIx64 " X19 = %016" PRIx64 "\n", \
+ _frame->register_x16, \
+ _frame->register_x17, \
+ _frame->register_x18, \
+ _frame->register_x19 \
+ ); \
+ _out( \
+ _prefix " X20 = %016" PRIx64 " X21 = %016" PRIx64 \
+ " X22 = %016" PRIx64 " X23 = %016" PRIx64 "\n", \
+ _frame->register_x20, \
+ _frame->register_x21, \
+ _frame->register_x22, \
+ _frame->register_x23 \
+ ); \
+ _out( \
+ _prefix " X24 = %016" PRIx64 " X25 = %016" PRIx64 \
+ " X26 = %016" PRIx64 " X27 = %016" PRIx64 "\n", \
+ _frame->register_x24, \
+ _frame->register_x25, \
+ _frame->register_x26, \
+ _frame->register_x27 \
+ ); \
+ _out( \
+ _prefix " X28 = %016" PRIx64 " FP = %016" PRIx64 \
+ " LR = %016" PRIxPTR " SP = %016" PRIxPTR "\n", \
+ _frame->register_x28, \
+ _frame->register_fp, \
+ (intptr_t) _frame->register_lr, \
+ (intptr_t) _frame->register_sp \
+ ); \
+ _out( \
+ _prefix " PC = %016" PRIxPTR "\n", \
+ (intptr_t) _frame->register_pc \
+ ); \
+ _out( \
+ _prefix " CPSR = %08" PRIx64 " %c%c%c%c%c%c%c%c%c" \
+ " M:%" PRIx64 " %s\n", \
+ _frame->register_cpsr, \
+ ( _frame->register_cpsr & ( 1 << 31 ) ) != 0 ? 'N' : '-', \
+ ( _frame->register_cpsr & ( 1 << 30 ) ) != 0 ? 'Z' : '-', \
+ ( _frame->register_cpsr & ( 1 << 29 ) ) != 0 ? 'C' : '-', \
+ ( _frame->register_cpsr & ( 1 << 28 ) ) != 0 ? 'V' : '-', \
+ ( _frame->register_cpsr & ( 1 << 21 ) ) != 0 ? 'S' : '-', \
+ ( _frame->register_cpsr & ( 1 << 9 ) ) != 0 ? 'D' : '-', \
+ ( _frame->register_cpsr & ( 1 << 8 ) ) != 0 ? 'A' : '-', \
+ ( _frame->register_cpsr & ( 1 << 7 ) ) != 0 ? 'I' : '-', \
+ ( _frame->register_cpsr & ( 1 << 6 ) ) != 0 ? 'F' : '-', \
+ _frame->register_cpsr & 0x1f, \
+ aarch64_mode_label( _frame->register_cpsr & 0x1f ) \
+ ); \
+ } while ( 0 )
+
+/**
+ * The breakpoint instruction.
+ */
+static const uint8_t breakpoint[ 4 ] = { 0x00, 0x00, 0x20, 0xd4 };
+
+/**
+ * Target lock.
+ */
+RTEMS_INTERRUPT_LOCK_DEFINE( static, target_lock, "target_lock" )
+
+/**
+ * Is a session active?
+ */
+static bool debug_session_active;
+
+/*
+ * AArch64 debug hardware.
+ */
+static uint64_t hw_breakpoints;
+static uint64_t hw_watchpoints;
+
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+/**
+ * Hardware break and watch points.
+ */
+typedef struct {
+ bool enabled;
+ bool loaded;
+ void *address;
+ size_t length;
+ CPU_Exception_frame *frame;
+ uint64_t control;
+ uint64_t value;
+} aarch64_debug_hwbreak;
+
+/*
+ * AArch64 guarantees that 2-16 breakpoints will be available in:
+ * DBGBCR<0-15>_EL1 (control)
+ * BT: BSP_FLD64(val, 20, 23) (breakpoint type, always 0x0 or 0x4, address match or mismatch)
+ * LBN: BSP_FLD64(val, 16, 19) (linked breakpoint number, always 0x0, not relevant given above)
+ * SSC: BSP_FLD64(val, 14, 15) (security state control, only 0x0 relevant)
+ * HMC: BSP_BIT64(13) (higher mode control, only 0x0 relevant)
+ * BAS: BSP_FLD64(val, 5, 8) (byte address select, always 0xF, other values denote debugging of AArch32 code)
+ * PMC: BSP_FLD64(val, 1, 2) (privelege mode control, only 0x1 relevant)
+ * E: BSP_BIT64(0) (enable, 0x1 when in use, 0x0 when disabled)
+ * DBGBVR<0-15>_EL1 (value, address)
+ * ID_AA64DFR0_EL1
+ * WRPs: BSP_FLD64(val, 20, 23) (watchpoints implemented - 1, 0x0 reserved so minimum 2)
+ * BRPs: BSP_FLD64(val, 12, 15) (breakpoints implemented - 1, 0x0 reserved so minimum 2)
+ * DebugVer: BSP_FLD64(val, 0, 3) (0x6 - 8, 0x7 - 8 w/ VHE, 0x8 - 8.2, 0x9 - 8.4)
+ */
+#define AARCH64_HW_BREAKPOINT_MAX ( 16 )
+
+/*
+ * Types of break points.
+ */
+#define AARCH64_HW_BP_TYPE_UNLINKED_INSTR_MATCH ( 0x0 << 20 )
+#define AARCH64_HW_BP_TYPE_UNLINKED_INSTR_MISMATCH ( 0x4 << 20 )
+
+/*
+ * Byte Address Select
+ */
+#define AARCH64_HW_BP_BAS_A64 ( 0xF << 5 )
+
+/*
+ * Privilege level, corresponds to PMC at 2:1
+ */
+#define AARCH64_HW_BP_PRIV_EL1 ( 0x1 << 1 )
+
+/*
+ * Breakpoint enable.
+ */
+#define AARCH64_HW_BP_ENABLE ( 0x1 )
+
+static aarch64_debug_hwbreak hw_breaks[ AARCH64_HW_BREAKPOINT_MAX ];
+#endif
+
+/*
+ * Target debugging support. Use this to debug the backend.
+ */
+#if TARGET_DEBUG
+
+void rtems_debugger_printk_lock( rtems_interrupt_lock_context *lock_context );
+
+void rtems_debugger_printk_unlock(
+ rtems_interrupt_lock_context *lock_context
+);
+
+static void target_printk( const char *format, ... ) RTEMS_PRINTFLIKE( 1, 2 );
+
+static void target_printk( const char *format, ... )
+{
+ rtems_interrupt_lock_context lock_context;
+ va_list ap;
+
+ va_start( ap, format );
+ rtems_debugger_printk_lock( &lock_context );
+ vprintk( format, ap );
+ rtems_debugger_printk_unlock( &lock_context );
+ va_end( ap );
+}
+
+#else
+#define target_printk( _fmt, ... )
+#endif
+
+static const char *aarch64_mode_label( int mode )
+{
+ switch ( mode ) {
+ case 0x0:
+ return "EL0t";
+ case 0x4:
+ return "EL1t";
+ case 0x5:
+ return "EL1h";
+ }
+
+ return "---";
+}
+
+static int aarch64_debug_probe( rtems_debugger_target *target )
+{
+ int debug_version;
+ uint64_t val;
+ const char *vl = "[Invalid version]";
+ const char * const labels[] = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "ARMv8.0",
+ "ARMv8.0+VHE",
+ "ARMv8.2",
+ "ARMv8.4"
+ };
+
+ val = _AArch64_Read_midr_el1();
+ rtems_debugger_printf(
+ "rtems-db: aarch64 core: Architecture: %" PRIu64 " Variant: %" PRIu64 " " \
+ "Implementor: %" PRIu64 " Part Number: %" PRIu64 " Revision: %" PRIu64 "\n",
+ AARCH64_MIDR_EL1_ARCHITECTURE_GET( val ),
+ AARCH64_MIDR_EL1_VARIANT_GET( val ),
+ AARCH64_MIDR_EL1_IMPLEMENTER_GET( val ),
+ AARCH64_MIDR_EL1_PARTNUM_GET( val ),
+ AARCH64_MIDR_EL1_REVISION_GET( val )
+ );
+
+ val = _AArch64_Read_id_aa64dfr0_el1();
+
+ debug_version = AARCH64_ID_AA64DFR0_EL1_DEBUGVER_GET( val );
+
+ if ( debug_version < 6 || debug_version > 9 ) {
+ rtems_debugger_printf(
+ "rtems-db: aarch64 debug: %d not supported\n",
+ debug_version
+ );
+ errno = EIO;
+ return -1;
+ }
+
+ vl = labels[ debug_version ];
+ hw_breakpoints = AARCH64_ID_AA64DFR0_EL1_BRPS_GET( val );
+ hw_watchpoints = AARCH64_ID_AA64DFR0_EL1_WRPS_GET( val );
+
+ rtems_debugger_printf(
+ "rtems-db: aarch64 debug: %s (%d) " \
+ "breakpoints:%" PRIu64 " watchpoints:%" PRIu64 "\n",
+ vl,
+ debug_version,
+ hw_breakpoints,
+ hw_watchpoints
+ );
+
+ return 0;
+}
+
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+static void aarch64_debug_break_write_control( int bp, uint64_t control )
+{
+ if ( bp < 15 ) {
+ switch ( bp ) {
+ case 0:
+ _AArch64_Write_dbgbcr0_el1( control );
+ break;
+ case 1:
+ _AArch64_Write_dbgbcr1_el1( control );
+ break;
+ case 2:
+ _AArch64_Write_dbgbcr2_el1( control );
+ break;
+ case 3:
+ _AArch64_Write_dbgbcr3_el1( control );
+ break;
+ case 4:
+ _AArch64_Write_dbgbcr4_el1( control );
+ break;
+ case 5:
+ _AArch64_Write_dbgbcr5_el1( control );
+ break;
+ case 6:
+ _AArch64_Write_dbgbcr6_el1( control );
+ break;
+ case 7:
+ _AArch64_Write_dbgbcr7_el1( control );
+ break;
+ case 8:
+ _AArch64_Write_dbgbcr8_el1( control );
+ break;
+ case 9:
+ _AArch64_Write_dbgbcr9_el1( control );
+ break;
+ case 10:
+ _AArch64_Write_dbgbcr10_el1( control );
+ break;
+ case 11:
+ _AArch64_Write_dbgbcr11_el1( control );
+ break;
+ case 12:
+ _AArch64_Write_dbgbcr12_el1( control );
+ break;
+ case 13:
+ _AArch64_Write_dbgbcr13_el1( control );
+ break;
+ case 14:
+ _AArch64_Write_dbgbcr14_el1( control );
+ break;
+ case 15:
+ _AArch64_Write_dbgbcr15_el1( control );
+ break;
+ }
+ }
+}
+
+static void aarch64_debug_break_write_value( int bp, uint64_t value )
+{
+ if ( bp < 15 ) {
+ switch ( bp ) {
+ case 0:
+ _AArch64_Write_dbgbvr0_el1( value );
+ break;
+ case 1:
+ _AArch64_Write_dbgbvr1_el1( value );
+ break;
+ case 2:
+ _AArch64_Write_dbgbvr2_el1( value );
+ break;
+ case 3:
+ _AArch64_Write_dbgbvr3_el1( value );
+ break;
+ case 4:
+ _AArch64_Write_dbgbvr4_el1( value );
+ break;
+ case 5:
+ _AArch64_Write_dbgbvr5_el1( value );
+ break;
+ case 6:
+ _AArch64_Write_dbgbvr6_el1( value );
+ break;
+ case 7:
+ _AArch64_Write_dbgbvr7_el1( value );
+ break;
+ case 8:
+ _AArch64_Write_dbgbvr8_el1( value );
+ break;
+ case 9:
+ _AArch64_Write_dbgbvr9_el1( value );
+ break;
+ case 10:
+ _AArch64_Write_dbgbvr10_el1( value );
+ break;
+ case 11:
+ _AArch64_Write_dbgbvr11_el1( value );
+ break;
+ case 12:
+ _AArch64_Write_dbgbvr12_el1( value );
+ break;
+ case 13:
+ _AArch64_Write_dbgbvr13_el1( value );
+ break;
+ case 14:
+ _AArch64_Write_dbgbvr14_el1( value );
+ break;
+ case 15:
+ _AArch64_Write_dbgbvr15_el1( value );
+ break;
+ }
+ }
+}
+
+static inline void aarch64_debug_break_setup(
+ uint8_t index,
+ uint64_t address
+)
+{
+ aarch64_debug_hwbreak *bp = &hw_breaks[ index ];
+
+ bp->control = AARCH64_HW_BP_TYPE_UNLINKED_INSTR_MISMATCH |
+ AARCH64_HW_BP_BAS_A64 |
+ AARCH64_HW_BP_PRIV_EL1 |
+ AARCH64_HW_BP_ENABLE;
+ uint64_t address_mask = 0x3;
+
+ bp->value = (intptr_t) ( address & ~address_mask );
+ aarch64_debug_break_write_value( index, bp->value );
+ aarch64_debug_break_write_control( index, bp->control );
+}
+
+static void aarch64_debug_break_clear( void )
+{
+ rtems_interrupt_lock_context lock_context;
+ aarch64_debug_hwbreak *bp = &hw_breaks[ 0 ];
+ int i;
+
+ rtems_interrupt_lock_acquire( &target_lock, &lock_context );
+
+ for ( i = 0; i < hw_breakpoints; ++i, ++bp ) {
+ bp->enabled = false;
+ bp->loaded = false;
+ }
+
+ rtems_interrupt_lock_release( &target_lock, &lock_context );
+}
+
+static void aarch64_debug_break_load( void )
+{
+ rtems_interrupt_lock_context lock_context;
+ aarch64_debug_hwbreak *bp = &hw_breaks[ 0 ];
+ int i;
+
+ rtems_interrupt_lock_acquire( &target_lock, &lock_context );
+
+ if ( bp->enabled && !bp->loaded ) {
+ aarch64_debug_set_context_id( 0xdead1111 );
+ aarch64_debug_break_write_value( 0, bp->value );
+ aarch64_debug_break_write_control( 0, bp->control );
+ }
+
+ ++bp;
+
+ for ( i = 1; i < hw_breakpoints; ++i, ++bp ) {
+ if ( bp->enabled && !bp->loaded ) {
+ bp->loaded = true;
+ aarch64_debug_break_write_value( i, bp->value );
+ aarch64_debug_break_write_control( i, bp->control );
+ }
+ }
+
+ rtems_interrupt_lock_release( &target_lock, &lock_context );
+}
+
+static void aarch64_debug_break_unload( void )
+{
+ rtems_interrupt_lock_context lock_context;
+ aarch64_debug_hwbreak *bp = &hw_breaks[ 0 ];
+ int i;
+
+ rtems_interrupt_lock_acquire( &target_lock, &lock_context );
+ aarch64_debug_set_context_id( 0 );
+
+ for ( i = 0; i < hw_breakpoints; ++i, ++bp ) {
+ bp->loaded = false;
+ aarch64_debug_break_write_control( i, 0 );
+ }
+
+ rtems_interrupt_lock_release( &target_lock, &lock_context );
+}
+
+static void aarch64_debug_break_dump( void )
+{
+#if TARGET_DEBUG
+ aarch64_debug_hwbreak *bp = &hw_breaks[ 0 ];
+ int i;
+
+ for ( i = 0; i < hw_breakpoints; ++i, ++bp ) {
+ if ( bp->enabled ) {
+ target_printk(
+ "[} bp: %d: control: %016" PRIx64 " addr: %016" PRIxPTR "\n",
+ i,
+ bp->control,
+ (uintptr_t) bp->value
+ );
+ }
+ }
+
+#endif
+}
+#endif
+
+static void aarch64_debug_disable_interrupts( void )
+{
+ __asm__ volatile ( "msr DAIFSet, #0x2" );
+}
+
+static void aarch64_debug_enable_interrupts( void )
+{
+ __asm__ volatile ( "msr DAIFClr, #2\n" );
+}
+
+static void aarch64_debug_disable_debug_exceptions( void )
+{
+ __asm__ volatile ( "msr DAIFSet, #0x8" );
+}
+
+static inline void aarch64_debug_set_context_id( const uint32_t id )
+{
+ _AArch64_Write_contextidr_el1( id );
+}
+
+int rtems_debugger_target_configure( rtems_debugger_target *target )
+{
+ target->capabilities = ( RTEMS_DEBUGGER_TARGET_CAP_SWBREAK );
+ target->reg_num = RTEMS_DEBUGGER_NUMREGS;
+ target->reg_offset = aarch64_reg_offsets;
+ target->breakpoint = &breakpoint[ 0 ];
+ target->breakpoint_size = sizeof( breakpoint );
+ return aarch64_debug_probe( target );
+}
+
+static void target_print_frame( CPU_Exception_frame *frame )
+{
+ EXC_FRAME_PRINT( target_printk, "[} ", frame );
+}
+
+/* returns true if cascade is required */
+static bool target_exception( CPU_Exception_frame *frame )
+{
+ target_printk(
+ "[} > frame = %016" PRIxPTR \
+ " sig=%d" \
+ " pra=%016" PRIxPTR "\n" \
+ "[} > esr=%016" PRIx64 \
+ " far=%016" PRIxPTR "\n",
+ (uintptr_t) frame,
+ rtems_debugger_target_exception_to_signal( frame ),
+ (uintptr_t) frame->register_pc,
+ (uint64_t) frame->register_syndrome,
+ (uintptr_t) frame->register_fault_address
+ );
+
+ target_print_frame( frame );
+
+ switch ( rtems_debugger_target_exception( frame ) ) {
+ case rtems_debugger_target_exc_consumed:
+ default:
+ break;
+ case rtems_debugger_target_exc_step:
+ break;
+ case rtems_debugger_target_exc_cascade:
+ target_printk( "rtems-db: unhandled exception: cascading\n" );
+ /* Continue in fatal error handler chain */
+ return true;
+ break;
+ }
+
+ target_printk(
+ "[} < resuming frame = %016" PRIxPTR "\n",
+ (uintptr_t) frame
+ );
+ target_print_frame( frame );
+
+#if TARGET_DEBUG
+ uint64_t mdscr = _AArch64_Read_mdscr_el1();
+#endif
+ target_printk(
+ "[} global stepping: %s\n",
+ mdscr & AARCH64_MDSCR_EL1_SS ? "yes" : "no"
+ );
+ target_printk(
+ "[} kernel self-debug: %s\n",
+ mdscr & AARCH64_MDSCR_EL1_KDE ? "yes" : "no"
+ );
+ target_printk(
+ "[} non-step/non-BRK debug events: %s\n",
+ mdscr & AARCH64_MDSCR_EL1_MDE ? "yes" : "no"
+ );
+ target_printk(
+ "[} OSLSR(should be 0x8): 0x%016" PRIx64 "\n",
+ _AArch64_Read_oslsr_el1()
+ );
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+ aarch64_debug_break_dump();
+#endif
+ return false;
+}
+
+#define xstr( a ) str( a )
+#define str( a ) #a
+#define FRAME_SIZE_STR xstr( AARCH64_EXCEPTION_FRAME_SIZE )
+
+/*
+ * This block of assembly must have a target branch function because GCC
+ * requires that SP not accumulate changes across an ASM block. Instead of
+ * changing the SP, we branch to a new function and never return since it was
+ * never going to return anyway.
+ */
+#define SWITCH_STACKS_AND_ALLOC( new_mode, old_frame, jump_target ) \
+ __asm__ volatile ( \
+ "msr spsel, #" new_mode "\n" /* switch to thread stack */ \
+ "sub sp, sp, #" FRAME_SIZE_STR "\n" /* reserve space for CEF */ \
+ "mov x0, sp\n" /* Set x0 to the new exception frame */ \
+ "mov x1, %[old_frame]\n" /* Set x1 to the old exception frame */ \
+ "b " #jump_target "\n" /* Jump to the specified function */ \
+ : \
+ : [ old_frame ] "r" ( old_frame ) \
+ : "x0", "x1" )
+
+#define SWITCH_STACKS_AND_ALLOC_WITH_CASCADE( new_mode, app_frame, \
+ jump_target ) \
+ __asm__ volatile ( \
+ "msr spsel, #" new_mode "\n" /* switch to thread stack */ \
+ "sub sp, sp, #" FRAME_SIZE_STR "\n" /* reserve space for CEF */ \
+ "mov x0, sp\n" /* Set x0 to the new exception frame */ \
+ "mov x1, %[app_frame]\n" /* Set x1 to the old exception frame */ \
+ "mov x2, %[needs_cascade]\n" /* pass on whether cascade is needed */ \
+ "b " #jump_target "\n" /* Jump to the specified function */ \
+ : \
+ : [ app_frame ] "r" ( app_frame ), \
+ [ needs_cascade ] "r" ( needs_cascade ) \
+ : "x0", "x1" )
+
+/*
+ * This block does not have an overall effect on SP since the spsel mode change
+ * preserves the original SP
+ */
+#define DROP_OLD_FRAME( old_frame, old_mode, new_mode ) \
+ __asm__ volatile ( \
+ "msr spsel, #" old_mode "\n" /* switch to exception stack */ \
+ "mov sp, %0\n" /* Reset SP to the beginning of the CEF */ \
+ "add sp, sp, #" FRAME_SIZE_STR "\n" /* release space for CEF on exception stack */ \
+ "msr spsel, #" new_mode "\n" /* switch to thread stack */ \
+ : \
+ : "r" ( old_frame ) ) \
+
+#define THREAD_MODE "1"
+#define EXCEPTION_MODE "0"
+
+void target_exception_stack_stage_3(
+ CPU_Exception_frame *exc_frame,
+ CPU_Exception_frame *app_frame,
+ bool needs_cascade
+);
+
+void target_exception_stack_stage_3(
+ CPU_Exception_frame *exc_frame,
+ CPU_Exception_frame *app_frame,
+ bool needs_cascade
+)
+{
+ _AArch64_Exception_frame_copy( exc_frame, app_frame );
+ DROP_OLD_FRAME( app_frame, THREAD_MODE, EXCEPTION_MODE );
+
+ if ( needs_cascade ) {
+ /* does not return */
+ _AArch64_Exception_default( exc_frame );
+ }
+
+ /* does not return */
+ _CPU_Exception_resume( exc_frame );
+}
+
+void target_exception_stack_stage_2(
+ CPU_Exception_frame *app_frame,
+ CPU_Exception_frame *exc_frame
+);
+
+void target_exception_stack_stage_2(
+ CPU_Exception_frame *app_frame,
+ CPU_Exception_frame *exc_frame
+)
+{
+ _AArch64_Exception_frame_copy( app_frame, exc_frame );
+ DROP_OLD_FRAME( exc_frame, EXCEPTION_MODE, THREAD_MODE );
+ /* breakpoints must be disabled here since other tasks could run that don't have debug masked */
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+ aarch64_debug_break_unload();
+#endif
+ /* enable interrupts here to allow this thread to be suspended as necessary */
+ aarch64_debug_enable_interrupts();
+ bool needs_cascade = target_exception( app_frame );
+
+ /* disable interrupts to return to normal operation */
+ aarch64_debug_disable_interrupts();
+ /* re-enable breakpoints disabled above */
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+ aarch64_debug_break_load();
+#endif
+ SWITCH_STACKS_AND_ALLOC_WITH_CASCADE(
+ EXCEPTION_MODE,
+ app_frame,
+ target_exception_stack_stage_3
+ );
+}
+
+/* not allowed to return since it unwinds the stack */
+static void target_exception_thread_stack( CPU_Exception_frame *old_frame )
+{
+ SWITCH_STACKS_AND_ALLOC(
+ THREAD_MODE,
+ old_frame,
+ target_exception_stack_stage_2
+ );
+}
+
+static void target_exception_application( CPU_Exception_frame *ef )
+{
+ /* Continue in fatal error handler chain */
+ if ( !debug_session_active ) {
+ /* does not return */
+ _AArch64_Exception_default( ef );
+ }
+
+ /*
+ * Set CPSR.D to disable single-step operation, this will be cleared before
+ * the thread is resumed if necessary.
+ */
+ ef->register_cpsr |= AARCH64_DSPSR_EL0_D;
+
+ /*
+ * Switching to the user stack is not possible if the stack pointer is bad.
+ * This should be a relatively rare occurrance and signals a severe problem
+ * with the application code or system.
+ */
+ if ( AARCH64_ESR_EL1_EC_GET( ef->register_syndrome ) == 0x26 ) {
+ if ( target_exception( ef ) ) {
+ /* does not return */
+ _AArch64_Exception_default( ef );
+ }
+
+ /* does not return */
+ _CPU_Exception_resume( ef );
+ }
+
+ target_exception_thread_stack( ef );
+}
+
+static void target_exception_kernel( CPU_Exception_frame *ef )
+{
+ /*
+ * If there is a stack alignment problem in exception mode, it really
+ * shouldn't happen and execution won't even make it this far.
+ */
+ if ( !debug_session_active ) {
+ /* does not return */
+ _AArch64_Exception_default( ef );
+ }
+
+ /*
+ * Set CPSR.D to disable single-step operation, this will be cleared before
+ * the thread is resumed if necessary.
+ */
+ ef->register_cpsr |= AARCH64_DSPSR_EL0_D;
+
+ if ( target_exception( ef ) ) {
+ /* does not return */
+ _AArch64_Exception_default( ef );
+ }
+
+ /* does not return */
+ _CPU_Exception_resume( ef );
+}
+
+static void rtems_debugger_target_set_vectors( void )
+{
+ /* Set vectors for both application and kernel modes */
+ AArch64_set_exception_handler(
+ AARCH64_EXCEPTION_SPx_SYNCHRONOUS,
+ (void *) target_exception_application
+ );
+ AArch64_set_exception_handler(
+ AARCH64_EXCEPTION_SP0_SYNCHRONOUS,
+ (void *) target_exception_kernel
+ );
+}
+
+static bool rtems_debugger_is_int_reg( size_t reg )
+{
+ const size_t size = aarch64_reg_offsets[ reg + 1 ] -
+ aarch64_reg_offsets[ reg ];
+
+ return size == RTEMS_DEBUGGER_REG_BYTES;
+}
+
+static void rtems_debugger_set_int_reg(
+ rtems_debugger_thread *thread,
+ size_t reg,
+ const uint64_t value
+)
+{
+ const size_t offset = aarch64_reg_offsets[ reg ];
+
+ memcpy( &thread->registers[ offset ], &value, sizeof( uint64_t ) );
+}
+
+static const uint64_t rtems_debugger_get_int_reg(
+ rtems_debugger_thread *thread,
+ size_t reg
+)
+{
+ const size_t offset = aarch64_reg_offsets[ reg ];
+ uint64_t value;
+
+ memcpy( &value, &thread->registers[ offset ], sizeof( uint64_t ) );
+ return value;
+}
+
+static void rtems_debugger_set_halfint_reg(
+ rtems_debugger_thread *thread,
+ size_t reg,
+ const uint32_t value
+)
+{
+ const size_t offset = aarch64_reg_offsets[ reg ];
+
+ memcpy( &thread->registers[ offset ], &value, sizeof( uint32_t ) );
+}
+
+static const uint32_t rtems_debugger_get_halfint_reg(
+ rtems_debugger_thread *thread,
+ size_t reg
+)
+{
+ const size_t offset = aarch64_reg_offsets[ reg ];
+ uint32_t value;
+
+ memcpy( &value, &thread->registers[ offset ], sizeof( uint32_t ) );
+ return value;
+}
+
+static void rtems_debugger_set_fp_reg(
+ rtems_debugger_thread *thread,
+ size_t reg,
+ const uint128_t value
+)
+{
+ const size_t offset = aarch64_reg_offsets[ reg ];
+
+ memcpy( &thread->registers[ offset ], &value, sizeof( uint128_t ) );
+}
+
+static const uint128_t rtems_debugger_get_fp_reg(
+ rtems_debugger_thread *thread,
+ size_t reg
+)
+{
+ const size_t offset = aarch64_reg_offsets[ reg ];
+ uint128_t value;
+
+ memcpy( &value, &thread->registers[ offset ], sizeof( uint128_t ) );
+ return value;
+}
+
+static rtems_status_code rtems_debugger_target_set_text_writable(
+ bool writable
+)
+{
+ uintptr_t text_begin = (uintptr_t) bsp_section_text_begin;
+ uintptr_t text_end = (uintptr_t) bsp_section_text_end;
+ uintptr_t fast_text_begin = (uintptr_t) bsp_section_fast_text_begin;
+ uintptr_t fast_text_end = (uintptr_t) bsp_section_fast_text_end;
+ uint64_t mmu_flags = AARCH64_MMU_CODE_RW_CACHED;
+ rtems_status_code sc;
+
+ if ( !writable ) {
+ mmu_flags = AARCH64_MMU_CODE_CACHED;
+ }
+
+ target_printk(
+ "[} MMU edit: text_begin: 0x%016" PRIxPTR
+ " text_end: 0x%016" PRIxPTR "\n",
+ text_begin,
+ text_end
+ );
+ sc = aarch64_mmu_map(
+ text_begin,
+ text_end - text_begin,
+ mmu_flags
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ target_printk( "[} MMU edit failed\n" );
+ return sc;
+ }
+
+ target_printk(
+ "[} MMU edit: fast_text_begin: 0x%016" PRIxPTR
+ " fast_text_end: 0x%016" PRIxPTR "\n",
+ fast_text_begin,
+ fast_text_end
+ );
+ sc = aarch64_mmu_map(
+ fast_text_begin,
+ fast_text_end - fast_text_begin,
+ mmu_flags
+ );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ target_printk( "[} MMU edit failed\n" );
+ }
+
+ return sc;
+}
+
+static rtems_task setup_debugger_on_cpu( rtems_task_argument arg )
+{
+ rtems_status_code sc;
+ rtems_status_code *init_error = (rtems_status_code *) arg;
+ rtems_interrupt_lock_context lock_context;
+
+ rtems_interrupt_lock_acquire( &target_lock, &lock_context );
+ sc = rtems_debugger_target_set_text_writable( true );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ *init_error = sc;
+ }
+
+ rtems_debugger_target_set_vectors();
+
+ /* enable single-step debugging */
+ uint64_t mdscr = _AArch64_Read_mdscr_el1();
+
+ mdscr |= AARCH64_MDSCR_EL1_SS;
+ mdscr |= AARCH64_MDSCR_EL1_KDE;
+ mdscr |= AARCH64_MDSCR_EL1_MDE;
+ _AArch64_Write_mdscr_el1( mdscr );
+
+ /* clear the OS lock */
+ _AArch64_Write_oslar_el1( 0 );
+ rtems_interrupt_lock_release( &target_lock, &lock_context );
+}
+
+int rtems_debugger_target_enable( void )
+{
+ rtems_status_code sc;
+ rtems_status_code init_error = RTEMS_SUCCESSFUL;
+
+ debug_session_active = true;
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+ aarch64_debug_break_unload();
+ aarch64_debug_break_clear();
+#endif
+ aarch64_debug_disable_debug_exceptions();
+ sc = run_across_cpus(
+ setup_debugger_on_cpu,
+ ( rtems_task_argument ) & init_error
+ );
+
+ if ( init_error != RTEMS_SUCCESSFUL ) {
+ return init_error;
+ }
+
+ return sc;
+}
+
+static rtems_task teardown_debugger_on_cpu( rtems_task_argument arg )
+{
+ rtems_status_code sc;
+ rtems_status_code *deinit_error = (rtems_status_code *) arg;
+ rtems_interrupt_lock_context lock_context;
+
+ rtems_interrupt_lock_acquire( &target_lock, &lock_context );
+ sc = rtems_debugger_target_set_text_writable( false );
+
+ if ( sc != RTEMS_SUCCESSFUL ) {
+ *deinit_error = sc;
+ }
+
+ /* disable single-step debugging */
+ uint64_t mdscr = _AArch64_Read_mdscr_el1();
+
+ mdscr &= ~AARCH64_MDSCR_EL1_SS;
+ mdscr &= ~AARCH64_MDSCR_EL1_KDE;
+ mdscr &= ~AARCH64_MDSCR_EL1_MDE;
+ _AArch64_Write_mdscr_el1( mdscr );
+
+ rtems_interrupt_lock_release( &target_lock, &lock_context );
+}
+
+int rtems_debugger_target_disable( void )
+{
+ rtems_status_code sc;
+ rtems_status_code deinit_error = RTEMS_SUCCESSFUL;
+
+ debug_session_active = false;
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+ aarch64_debug_break_unload();
+ aarch64_debug_break_clear();
+#endif
+ sc = run_across_cpus(
+ teardown_debugger_on_cpu,
+ ( rtems_task_argument ) & deinit_error
+ );
+
+ if ( deinit_error != RTEMS_SUCCESSFUL ) {
+ return deinit_error;
+ }
+
+ return sc;
+}
+
+int rtems_debugger_target_read_regs( rtems_debugger_thread *thread )
+{
+ if (
+ !rtems_debugger_thread_flag(
+ thread,
+ RTEMS_DEBUGGER_THREAD_FLAG_REG_VALID
+ )
+ ) {
+ static const uintptr_t good_address = (uintptr_t) &good_address;
+ int i;
+
+ memset( &thread->registers[ 0 ], 0, RTEMS_DEBUGGER_NUMREGBYTES );
+
+ /* set all integer register to a known valid address */
+ for ( i = 0; i < RTEMS_DEBUGGER_NUMREGS; ++i ) {
+ if ( rtems_debugger_is_int_reg( i ) ) {
+ rtems_debugger_set_int_reg( thread, i, (uintptr_t) &good_address );
+ }
+ }
+
+ if ( thread->frame ) {
+ CPU_Exception_frame *frame = thread->frame;
+
+ *( (CPU_Exception_frame *) thread->registers ) = *frame;
+ rtems_debugger_set_int_reg( thread, REG_X0, frame->register_x0 );
+ rtems_debugger_set_int_reg( thread, REG_X1, frame->register_x1 );
+ rtems_debugger_set_int_reg( thread, REG_X2, frame->register_x2 );
+ rtems_debugger_set_int_reg( thread, REG_X3, frame->register_x3 );
+ rtems_debugger_set_int_reg( thread, REG_X4, frame->register_x4 );
+ rtems_debugger_set_int_reg( thread, REG_X5, frame->register_x5 );
+ rtems_debugger_set_int_reg( thread, REG_X6, frame->register_x6 );
+ rtems_debugger_set_int_reg( thread, REG_X7, frame->register_x7 );
+ rtems_debugger_set_int_reg( thread, REG_X8, frame->register_x8 );
+ rtems_debugger_set_int_reg( thread, REG_X9, frame->register_x9 );
+ rtems_debugger_set_int_reg( thread, REG_X10, frame->register_x10 );
+ rtems_debugger_set_int_reg( thread, REG_X11, frame->register_x11 );
+ rtems_debugger_set_int_reg( thread, REG_X12, frame->register_x12 );
+ rtems_debugger_set_int_reg( thread, REG_X13, frame->register_x13 );
+ rtems_debugger_set_int_reg( thread, REG_X14, frame->register_x14 );
+ rtems_debugger_set_int_reg( thread, REG_X15, frame->register_x15 );
+ rtems_debugger_set_int_reg( thread, REG_X16, frame->register_x16 );
+ rtems_debugger_set_int_reg( thread, REG_X17, frame->register_x17 );
+ rtems_debugger_set_int_reg( thread, REG_X18, frame->register_x18 );
+ rtems_debugger_set_int_reg( thread, REG_X19, frame->register_x19 );
+ rtems_debugger_set_int_reg( thread, REG_X20, frame->register_x20 );
+ rtems_debugger_set_int_reg( thread, REG_X21, frame->register_x21 );
+ rtems_debugger_set_int_reg( thread, REG_X22, frame->register_x22 );
+ rtems_debugger_set_int_reg( thread, REG_X23, frame->register_x23 );
+ rtems_debugger_set_int_reg( thread, REG_X24, frame->register_x24 );
+ rtems_debugger_set_int_reg( thread, REG_X25, frame->register_x25 );
+ rtems_debugger_set_int_reg( thread, REG_X26, frame->register_x26 );
+ rtems_debugger_set_int_reg( thread, REG_X27, frame->register_x27 );
+ rtems_debugger_set_int_reg( thread, REG_X28, frame->register_x28 );
+ rtems_debugger_set_int_reg( thread, REG_FP, frame->register_fp );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_LR,
+ (intptr_t) frame->register_lr
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_SP,
+ (intptr_t) frame->register_sp
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_PC,
+ (intptr_t) frame->register_pc
+ );
+ /* GDB considers CPSR to be 32-bit because bits 63:32 are RES0 */
+ rtems_debugger_set_halfint_reg(
+ thread,
+ REG_CPS,
+ (uint32_t) frame->register_cpsr
+ );
+ rtems_debugger_set_fp_reg( thread, REG_V0, frame->register_q0 );
+ rtems_debugger_set_fp_reg( thread, REG_V1, frame->register_q1 );
+ rtems_debugger_set_fp_reg( thread, REG_V2, frame->register_q2 );
+ rtems_debugger_set_fp_reg( thread, REG_V3, frame->register_q3 );
+ rtems_debugger_set_fp_reg( thread, REG_V4, frame->register_q4 );
+ rtems_debugger_set_fp_reg( thread, REG_V5, frame->register_q5 );
+ rtems_debugger_set_fp_reg( thread, REG_V6, frame->register_q6 );
+ rtems_debugger_set_fp_reg( thread, REG_V7, frame->register_q7 );
+ rtems_debugger_set_fp_reg( thread, REG_V8, frame->register_q8 );
+ rtems_debugger_set_fp_reg( thread, REG_V9, frame->register_q9 );
+ rtems_debugger_set_fp_reg( thread, REG_V10, frame->register_q10 );
+ rtems_debugger_set_fp_reg( thread, REG_V11, frame->register_q11 );
+ rtems_debugger_set_fp_reg( thread, REG_V12, frame->register_q12 );
+ rtems_debugger_set_fp_reg( thread, REG_V13, frame->register_q13 );
+ rtems_debugger_set_fp_reg( thread, REG_V14, frame->register_q14 );
+ rtems_debugger_set_fp_reg( thread, REG_V15, frame->register_q15 );
+ rtems_debugger_set_fp_reg( thread, REG_V16, frame->register_q16 );
+ rtems_debugger_set_fp_reg( thread, REG_V17, frame->register_q17 );
+ rtems_debugger_set_fp_reg( thread, REG_V18, frame->register_q18 );
+ rtems_debugger_set_fp_reg( thread, REG_V19, frame->register_q19 );
+ rtems_debugger_set_fp_reg( thread, REG_V20, frame->register_q20 );
+ rtems_debugger_set_fp_reg( thread, REG_V21, frame->register_q21 );
+ rtems_debugger_set_fp_reg( thread, REG_V22, frame->register_q22 );
+ rtems_debugger_set_fp_reg( thread, REG_V23, frame->register_q23 );
+ rtems_debugger_set_fp_reg( thread, REG_V24, frame->register_q24 );
+ rtems_debugger_set_fp_reg( thread, REG_V25, frame->register_q25 );
+ rtems_debugger_set_fp_reg( thread, REG_V26, frame->register_q26 );
+ rtems_debugger_set_fp_reg( thread, REG_V27, frame->register_q27 );
+ rtems_debugger_set_fp_reg( thread, REG_V28, frame->register_q28 );
+ rtems_debugger_set_fp_reg( thread, REG_V29, frame->register_q29 );
+ rtems_debugger_set_fp_reg( thread, REG_V30, frame->register_q30 );
+ rtems_debugger_set_fp_reg( thread, REG_V31, frame->register_q31 );
+ /* GDB considers FPSR and FPCR to be 32-bit because bits 63:32 are RES0 */
+ rtems_debugger_set_halfint_reg( thread, REG_FPS, frame->register_fpsr );
+ rtems_debugger_set_halfint_reg( thread, REG_FPC, frame->register_fpcr );
+ /*
+ * Get the signal from the frame.
+ */
+ thread->signal = rtems_debugger_target_exception_to_signal( frame );
+ } else {
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X19,
+ thread->tcb->Registers.register_x19
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X20,
+ thread->tcb->Registers.register_x20
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X21,
+ thread->tcb->Registers.register_x21
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X22,
+ thread->tcb->Registers.register_x22
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X23,
+ thread->tcb->Registers.register_x23
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X24,
+ thread->tcb->Registers.register_x24
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X25,
+ thread->tcb->Registers.register_x25
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X26,
+ thread->tcb->Registers.register_x26
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X27,
+ thread->tcb->Registers.register_x27
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_X28,
+ thread->tcb->Registers.register_x28
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_FP,
+ thread->tcb->Registers.register_fp
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_LR,
+ (intptr_t) thread->tcb->Registers.register_lr
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_SP,
+ (intptr_t) thread->tcb->Registers.register_sp
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_PC,
+ (intptr_t) thread->tcb->Registers.register_lr
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V8,
+ thread->tcb->Registers.register_d8
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V9,
+ thread->tcb->Registers.register_d9
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V10,
+ thread->tcb->Registers.register_d10
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V11,
+ thread->tcb->Registers.register_d11
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V12,
+ thread->tcb->Registers.register_d12
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V13,
+ thread->tcb->Registers.register_d13
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V14,
+ thread->tcb->Registers.register_d14
+ );
+ rtems_debugger_set_int_reg(
+ thread,
+ REG_V15,
+ thread->tcb->Registers.register_d15
+ );
+ /*
+ * Blocked threads have no signal.
+ */
+ thread->signal = 0;
+ }
+
+ thread->flags |= RTEMS_DEBUGGER_THREAD_FLAG_REG_VALID;
+ thread->flags &= ~RTEMS_DEBUGGER_THREAD_FLAG_REG_DIRTY;
+ }
+
+ return 0;
+}
+
+int rtems_debugger_target_write_regs( rtems_debugger_thread *thread )
+{
+ if (
+ rtems_debugger_thread_flag(
+ thread,
+ RTEMS_DEBUGGER_THREAD_FLAG_REG_DIRTY
+ )
+ ) {
+ /*
+ * Only write to debugger controlled exception threads. Do not touch the
+ * registers for threads blocked in the context switcher.
+ */
+ if (
+ rtems_debugger_thread_flag(
+ thread,
+ RTEMS_DEBUGGER_THREAD_FLAG_EXCEPTION
+ )
+ ) {
+ CPU_Exception_frame *frame = thread->frame;
+ frame->register_x0 = rtems_debugger_get_int_reg( thread, REG_X0 );
+ frame->register_x1 = rtems_debugger_get_int_reg( thread, REG_X1 );
+ frame->register_x2 = rtems_debugger_get_int_reg( thread, REG_X2 );
+ frame->register_x3 = rtems_debugger_get_int_reg( thread, REG_X3 );
+ frame->register_x4 = rtems_debugger_get_int_reg( thread, REG_X4 );
+ frame->register_x5 = rtems_debugger_get_int_reg( thread, REG_X5 );
+ frame->register_x6 = rtems_debugger_get_int_reg( thread, REG_X6 );
+ frame->register_x7 = rtems_debugger_get_int_reg( thread, REG_X7 );
+ frame->register_x8 = rtems_debugger_get_int_reg( thread, REG_X8 );
+ frame->register_x9 = rtems_debugger_get_int_reg( thread, REG_X9 );
+ frame->register_x10 = rtems_debugger_get_int_reg( thread, REG_X10 );
+ frame->register_x11 = rtems_debugger_get_int_reg( thread, REG_X11 );
+ frame->register_x12 = rtems_debugger_get_int_reg( thread, REG_X12 );
+ frame->register_x13 = rtems_debugger_get_int_reg( thread, REG_X13 );
+ frame->register_x14 = rtems_debugger_get_int_reg( thread, REG_X14 );
+ frame->register_x15 = rtems_debugger_get_int_reg( thread, REG_X15 );
+ frame->register_x16 = rtems_debugger_get_int_reg( thread, REG_X16 );
+ frame->register_x17 = rtems_debugger_get_int_reg( thread, REG_X17 );
+ frame->register_x18 = rtems_debugger_get_int_reg( thread, REG_X18 );
+ frame->register_x19 = rtems_debugger_get_int_reg( thread, REG_X19 );
+ frame->register_x20 = rtems_debugger_get_int_reg( thread, REG_X20 );
+ frame->register_x21 = rtems_debugger_get_int_reg( thread, REG_X21 );
+ frame->register_x22 = rtems_debugger_get_int_reg( thread, REG_X22 );
+ frame->register_x23 = rtems_debugger_get_int_reg( thread, REG_X23 );
+ frame->register_x24 = rtems_debugger_get_int_reg( thread, REG_X24 );
+ frame->register_x25 = rtems_debugger_get_int_reg( thread, REG_X25 );
+ frame->register_x26 = rtems_debugger_get_int_reg( thread, REG_X26 );
+ frame->register_x27 = rtems_debugger_get_int_reg( thread, REG_X27 );
+ frame->register_x28 = rtems_debugger_get_int_reg( thread, REG_X28 );
+ frame->register_fp = (uintptr_t) rtems_debugger_get_int_reg(
+ thread,
+ REG_FP
+ );
+ frame->register_lr = (void *) (uintptr_t) rtems_debugger_get_int_reg(
+ thread,
+ REG_LR
+ );
+ frame->register_sp = (uintptr_t) rtems_debugger_get_int_reg(
+ thread,
+ REG_SP
+ );
+ frame->register_pc = (void *) (uintptr_t) rtems_debugger_get_int_reg(
+ thread,
+ REG_PC
+ );
+ frame->register_cpsr = rtems_debugger_get_halfint_reg( thread, REG_CPS );
+ frame->register_q0 = rtems_debugger_get_fp_reg( thread, REG_V0 );
+ frame->register_q1 = rtems_debugger_get_fp_reg( thread, REG_V1 );
+ frame->register_q2 = rtems_debugger_get_fp_reg( thread, REG_V2 );
+ frame->register_q3 = rtems_debugger_get_fp_reg( thread, REG_V3 );
+ frame->register_q4 = rtems_debugger_get_fp_reg( thread, REG_V4 );
+ frame->register_q5 = rtems_debugger_get_fp_reg( thread, REG_V5 );
+ frame->register_q6 = rtems_debugger_get_fp_reg( thread, REG_V6 );
+ frame->register_q7 = rtems_debugger_get_fp_reg( thread, REG_V7 );
+ frame->register_q8 = rtems_debugger_get_fp_reg( thread, REG_V8 );
+ frame->register_q9 = rtems_debugger_get_fp_reg( thread, REG_V9 );
+ frame->register_q10 = rtems_debugger_get_fp_reg( thread, REG_V10 );
+ frame->register_q11 = rtems_debugger_get_fp_reg( thread, REG_V11 );
+ frame->register_q12 = rtems_debugger_get_fp_reg( thread, REG_V12 );
+ frame->register_q13 = rtems_debugger_get_fp_reg( thread, REG_V13 );
+ frame->register_q14 = rtems_debugger_get_fp_reg( thread, REG_V14 );
+ frame->register_q15 = rtems_debugger_get_fp_reg( thread, REG_V15 );
+ frame->register_q16 = rtems_debugger_get_fp_reg( thread, REG_V16 );
+ frame->register_q17 = rtems_debugger_get_fp_reg( thread, REG_V17 );
+ frame->register_q18 = rtems_debugger_get_fp_reg( thread, REG_V18 );
+ frame->register_q19 = rtems_debugger_get_fp_reg( thread, REG_V19 );
+ frame->register_q20 = rtems_debugger_get_fp_reg( thread, REG_V20 );
+ frame->register_q21 = rtems_debugger_get_fp_reg( thread, REG_V21 );
+ frame->register_q22 = rtems_debugger_get_fp_reg( thread, REG_V22 );
+ frame->register_q23 = rtems_debugger_get_fp_reg( thread, REG_V23 );
+ frame->register_q24 = rtems_debugger_get_fp_reg( thread, REG_V24 );
+ frame->register_q25 = rtems_debugger_get_fp_reg( thread, REG_V25 );
+ frame->register_q26 = rtems_debugger_get_fp_reg( thread, REG_V26 );
+ frame->register_q27 = rtems_debugger_get_fp_reg( thread, REG_V27 );
+ frame->register_q28 = rtems_debugger_get_fp_reg( thread, REG_V28 );
+ frame->register_q29 = rtems_debugger_get_fp_reg( thread, REG_V29 );
+ frame->register_q30 = rtems_debugger_get_fp_reg( thread, REG_V30 );
+ frame->register_q31 = rtems_debugger_get_fp_reg( thread, REG_V31 );
+ frame->register_fpsr = rtems_debugger_get_halfint_reg( thread, REG_FPS );
+ frame->register_fpcr = rtems_debugger_get_halfint_reg( thread, REG_FPC );
+ }
+
+ thread->flags &= ~RTEMS_DEBUGGER_THREAD_FLAG_REG_DIRTY;
+ }
+
+ return 0;
+}
+
+uintptr_t rtems_debugger_target_reg_pc( rtems_debugger_thread *thread )
+{
+ int r;
+
+ r = rtems_debugger_target_read_regs( thread );
+
+ if ( r >= 0 ) {
+ return rtems_debugger_get_int_reg( thread, REG_PC );
+ }
+
+ return 0;
+}
+
+uintptr_t rtems_debugger_target_frame_pc( CPU_Exception_frame *frame )
+{
+ return (uintptr_t) frame->register_pc;
+}
+
+uintptr_t rtems_debugger_target_reg_sp( rtems_debugger_thread *thread )
+{
+ int r;
+
+ r = rtems_debugger_target_read_regs( thread );
+
+ if ( r >= 0 ) {
+ return rtems_debugger_get_int_reg( thread, REG_SP );
+ }
+
+ return 0;
+}
+
+uintptr_t rtems_debugger_target_tcb_sp( rtems_debugger_thread *thread )
+{
+ return (uintptr_t) thread->tcb->Registers.register_sp;
+}
+
+int rtems_debugger_target_thread_stepping( rtems_debugger_thread *thread )
+{
+ CPU_Exception_frame *frame = thread->frame;
+
+ if ( rtems_debugger_thread_flag(
+ thread,
+ RTEMS_DEBUGGER_THREAD_FLAG_STEP_INSTR
+ ) ) {
+ /* Especially on first startup, frame isn't guaranteed to be non-NULL */
+ if ( frame == NULL ) {
+ return -1;
+ }
+
+ /*
+ * Single stepping uses AArch64-specific single-step mode and does not
+ * involve hardware breakpoints.
+ */
+
+ /* Breakpoint instruction exceptions occur even when D is not set. */
+ uint64_t stepping_enabled =
+ !( frame->register_cpsr & AARCH64_DSPSR_EL0_D );
+
+ target_printk( "[} stepping: %s\n", stepping_enabled ? "yes" : "no" );
+
+ /*
+ * This field is unset by the CPU during the software step process and must
+ * be set again each time the debugger needs to advance one instruction. If
+ * this is not set each time, the software step exception will trigger
+ * before executing an instruction.
+ */
+ frame->register_cpsr |= AARCH64_DSPSR_EL0_SS;
+
+ if ( !stepping_enabled ) {
+ /*
+ * Clear CPSR.D to enable single-step operation. The debug mask flag is
+ * set on taking an exception to prevent unwanted stepping. The way
+ * single-stepping works will need to change if hardware breakpoints and
+ * watchpoints are ever used.
+ */
+ frame->register_cpsr &= ~AARCH64_DSPSR_EL0_D;
+ }
+ }
+
+ return 0;
+}
+
+int rtems_debugger_target_exception_to_signal( CPU_Exception_frame *frame )
+{
+ uint64_t EC = AARCH64_ESR_EL1_EC_GET( frame->register_syndrome );
+
+ switch ( EC ) {
+ case 0x1: /* WFI */
+ case 0x7: /* SVE/SIMD/FP */
+ case 0xa: /* LD64B/ST64B* */
+ case 0x15:
+ case 0x18: /* MSR/MRS/system instruction */
+ case 0x19: /* SVE */
+ case 0x31:
+ case 0x33:
+ case 0x35:
+ case 0x3c:
+ return RTEMS_DEBUGGER_SIGNAL_TRAP;
+
+ case 0x2c:
+ return RTEMS_DEBUGGER_SIGNAL_FPE;
+
+ case 0x21:
+ case 0x25:
+ return RTEMS_DEBUGGER_SIGNAL_SEGV;
+
+ default:
+ /*
+ * Covers unknown, SP/PC alignment, illegal execution state, and any new
+ * exception classes that get added.
+ */
+ return RTEMS_DEBUGGER_SIGNAL_ILL;
+ }
+}
+
+void rtems_debugger_target_exception_print( CPU_Exception_frame *frame )
+{
+ EXC_FRAME_PRINT( rtems_debugger_printf, "", frame );
+}
+
+int rtems_debugger_target_hwbreak_insert( void )
+{
+ /*
+ * Do nothing, these are loaded elsewhere if needed.
+ */
+ return 0;
+}
+
+int rtems_debugger_target_hwbreak_remove( void )
+{
+#ifdef HARDWARE_BREAKPOINTS_NOT_USED
+ aarch64_debug_break_unload();
+#endif
+ return 0;
+}
+
+int rtems_debugger_target_hwbreak_control(
+ rtems_debugger_target_watchpoint wp,
+ bool insert,
+ uintptr_t addr,
+ DB_UINT kind
+)
+{
+ /* To do. */
+ return 0;
+}
+
+int rtems_debugger_target_cache_sync( rtems_debugger_target_swbreak *swbreak )
+{
+ /*
+ * Flush the data cache and invalidate the instruction cache.
+ */
+ rtems_cache_flush_multiple_data_lines(
+ swbreak->address,
+ sizeof( breakpoint )
+ );
+ rtems_cache_instruction_sync_after_code_change(
+ swbreak->address,
+ sizeof( breakpoint )
+ );
+ return 0;
+}
diff --git a/cpukit/libdebugger/rtems-debugger-i386.c b/cpukit/libdebugger/rtems-debugger-i386.c
index a2396e5f30..02e29c25a1 100644
--- a/cpukit/libdebugger/rtems-debugger-i386.c
+++ b/cpukit/libdebugger/rtems-debugger-i386.c
@@ -376,7 +376,7 @@ rtems_debugger_target_write_regs(rtems_debugger_thread* thread)
return 0;
}
-DB_UINT
+uintptr_t
rtems_debugger_target_reg_pc(rtems_debugger_thread* thread)
{
int r;
@@ -387,13 +387,13 @@ rtems_debugger_target_reg_pc(rtems_debugger_thread* thread)
return 0;
}
-DB_UINT
+uintptr_t
rtems_debugger_target_frame_pc(CPU_Exception_frame* frame)
{
- return (DB_UINT) frame->eip;
+ return (uintptr_t) frame->eip;
}
-DB_UINT
+uintptr_t
rtems_debugger_target_reg_sp(rtems_debugger_thread* thread)
{
int r;
@@ -404,7 +404,7 @@ rtems_debugger_target_reg_sp(rtems_debugger_thread* thread)
return 0;
}
-DB_UINT
+uintptr_t
rtems_debugger_target_tcb_sp(rtems_debugger_thread* thread)
{
return (DB_UINT) thread->tcb->Registers.esp;
@@ -503,7 +503,7 @@ rtems_debugger_target_hwbreak_remove(void)
int
rtems_debugger_target_hwbreak_control(rtems_debugger_target_watchpoint wp,
bool insert,
- DB_UINT addr,
+ uintptr_t addr,
DB_UINT kind)
{
/*
diff --git a/cpukit/libdebugger/rtems-debugger-server.c b/cpukit/libdebugger/rtems-debugger-server.c
index 975ec23a30..9de9421b6b 100644
--- a/cpukit/libdebugger/rtems-debugger-server.c
+++ b/cpukit/libdebugger/rtems-debugger-server.c
@@ -154,6 +154,26 @@ hex_encode(int val)
return "0123456789abcdef"[val & 0xf];
}
+static inline uintptr_t
+hex_decode_addr(const uint8_t* data)
+{
+ uintptr_t ui = 0;
+ size_t i;
+ if (data[0] == '-') {
+ if (data[1] == '1')
+ ui = (uintptr_t) -1;
+ }
+ else {
+ for (i = 0; i < (sizeof(ui) * 2); ++i) {
+ int v = hex_decode(data[i]);
+ if (v < 0)
+ break;
+ ui = (ui << 4) | v;
+ }
+ }
+ return ui;
+}
+
static inline DB_UINT
hex_decode_uint(const uint8_t* data)
{
@@ -1438,10 +1458,10 @@ remote_read_memory(uint8_t* buffer, int size)
if (comma == NULL)
remote_packet_out_str(r_E01);
else {
- DB_UINT addr;
+ uintptr_t addr;
DB_UINT length;
int r;
- addr = hex_decode_uint(&buffer[1]);
+ addr = hex_decode_addr(&buffer[1]);
length = hex_decode_uint((const uint8_t*) comma + 1);
remote_packet_out_reset();
r = rtems_debugger_target_start_memory_access();
@@ -1468,10 +1488,10 @@ remote_write_memory(uint8_t* buffer, int size)
comma = strchr((const char*) buffer, ',');
colon = strchr((const char*) buffer, ':');
if (comma != NULL && colon != NULL) {
- DB_UINT addr;
+ uintptr_t addr;
DB_UINT length;
int r;
- addr = hex_decode_uint(&buffer[1]);
+ addr = hex_decode_addr(&buffer[1]);
length = hex_decode_uint((const uint8_t*) comma + 1);
r = rtems_debugger_target_start_memory_access();
if (r == 0) {
@@ -1519,9 +1539,9 @@ remote_breakpoints(bool insert, uint8_t* buffer, int size)
comma2 = strchr(comma1 + 1, ',');
if (comma2 != NULL) {
uint32_t capabilities;
- DB_UINT addr;
+ uintptr_t addr;
DB_UINT kind;
- addr = hex_decode_uint((const uint8_t*) comma1 + 1);
+ addr = hex_decode_addr((const uint8_t*) comma1 + 1);
kind = hex_decode_uint((const uint8_t*)comma2 + 1);
capabilities = rtems_debugger_target_capabilities();
switch (buffer[1]) {
diff --git a/cpukit/libdebugger/rtems-debugger-target.c b/cpukit/libdebugger/rtems-debugger-target.c
index bf7579700d..04b274909b 100644
--- a/cpukit/libdebugger/rtems-debugger-target.c
+++ b/cpukit/libdebugger/rtems-debugger-target.c
@@ -168,7 +168,7 @@ rtems_debugger_target_reg_table_size(void)
}
int
-rtems_debugger_target_swbreak_control(bool insert, DB_UINT addr, DB_UINT kind)
+rtems_debugger_target_swbreak_control(bool insert, uintptr_t addr, DB_UINT kind)
{
rtems_debugger_target* target = rtems_debugger->target;
rtems_debugger_target_swbreak* swbreaks;
@@ -315,7 +315,7 @@ rtems_debugger_target_exception(CPU_Exception_frame* frame)
Thread_Control* thread = _Thread_Get_executing();
const rtems_id tid = thread->Object.id;
rtems_id* excludes;
- DB_UINT pc;
+ uintptr_t pc;
const rtems_debugger_thread_stepper* stepper;
rtems_debugger_exception target_exception;
size_t i;
diff --git a/cpukit/libdebugger/rtems-debugger-target.h b/cpukit/libdebugger/rtems-debugger-target.h
index f2abbe5fd3..1e132fb28c 100644
--- a/cpukit/libdebugger/rtems-debugger-target.h
+++ b/cpukit/libdebugger/rtems-debugger-target.h
@@ -164,22 +164,22 @@ extern int rtems_debugger_target_write_regs(rtems_debugger_thread* thread);
/**
* Return the thread's program counter (PC).
*/
-extern DB_UINT rtems_debugger_target_reg_pc(rtems_debugger_thread* thread);
+extern uintptr_t rtems_debugger_target_reg_pc(rtems_debugger_thread* thread);
/**
* Return the frame's program counter (PC).
*/
-extern DB_UINT rtems_debugger_target_frame_pc(CPU_Exception_frame* frame);
+extern uintptr_t rtems_debugger_target_frame_pc(CPU_Exception_frame* frame);
/**
* Return the thread's stack pointer (SP).
*/
-extern DB_UINT rtems_debugger_target_reg_sp(rtems_debugger_thread* thread);
+extern uintptr_t rtems_debugger_target_reg_sp(rtems_debugger_thread* thread);
/**
* Return the thread's TCB stack pointer (SP).
*/
-extern DB_UINT rtems_debugger_target_tcb_sp(rtems_debugger_thread* thread);
+extern uintptr_t rtems_debugger_target_tcb_sp(rtems_debugger_thread* thread);
/**
* The thread is stepping. Setup the thread to step an instruction.
@@ -200,7 +200,7 @@ extern void rtems_debugger_target_exception_print(CPU_Exception_frame* frame);
* Software breakpoints. These are also referred to as memory breakpoints.
*/
extern int rtems_debugger_target_swbreak_control(bool insert,
- DB_UINT addr,
+ uintptr_t addr,
DB_UINT kind);
/**
@@ -228,7 +228,7 @@ extern int rtems_debugger_target_hwbreak_remove(void);
*/
extern int rtems_debugger_target_hwbreak_control(rtems_debugger_target_watchpoint type,
bool insert,
- DB_UINT addr,
+ uintptr_t addr,
DB_UINT kind);
/**
diff --git a/cpukit/libdebugger/rtems-debugger-threads.c b/cpukit/libdebugger/rtems-debugger-threads.c
index e6ffe4a080..c628c0250e 100644
--- a/cpukit/libdebugger/rtems-debugger-threads.c
+++ b/cpukit/libdebugger/rtems-debugger-threads.c
@@ -469,8 +469,8 @@ rtems_debugger_thread_step(rtems_debugger_thread* thread)
int
rtems_debugger_thread_stepping(rtems_debugger_thread* thread,
- DB_UINT start,
- DB_UINT end)
+ uintptr_t start,
+ uintptr_t end)
{
/* add lock */
rtems_debugger_threads* threads = rtems_debugger->threads;
@@ -496,7 +496,7 @@ rtems_debugger_thread_stepping(rtems_debugger_thread* thread,
}
const rtems_debugger_thread_stepper*
-rtems_debugger_thread_is_stepping(rtems_id id, DB_UINT pc)
+rtems_debugger_thread_is_stepping(rtems_id id, uintptr_t pc)
{
/* add lock */
rtems_debugger_threads* threads = rtems_debugger->threads;
diff --git a/cpukit/libdebugger/rtems-debugger-threads.h b/cpukit/libdebugger/rtems-debugger-threads.h
index 200dbbe1c7..60bc87984e 100644
--- a/cpukit/libdebugger/rtems-debugger-threads.h
+++ b/cpukit/libdebugger/rtems-debugger-threads.h
@@ -102,8 +102,8 @@ typedef struct rtems_debugger_thread
typedef struct rtems_debugger_thread_stepper
{
rtems_debugger_thread* thread;
- DB_UINT start;
- DB_UINT end;
+ uintptr_t start;
+ uintptr_t end;
} rtems_debugger_thread_stepper;
/**
@@ -165,15 +165,15 @@ extern int rtems_debugger_thread_step(rtems_debugger_thread* thread);
* Thread is stepping so record the details.
*/
extern int rtems_debugger_thread_stepping(rtems_debugger_thread* thread,
- DB_UINT start,
- DB_UINT end);
+ uintptr_t start,
+ uintptr_t end);
/**
* Thread's PC in the stepping range? Returns the stepper is in range else
* NULL.
*/
extern const rtems_debugger_thread_stepper*
-rtems_debugger_thread_is_stepping(rtems_id id, DB_UINT pc);
+rtems_debugger_thread_is_stepping(rtems_id id, uintptr_t pc);
/**
* Return the thread's current priority/
diff --git a/cpukit/libmisc/cpuuse/cpuusagereport.c b/cpukit/libmisc/cpuuse/cpuusagereport.c
index 08bc5bb541..ea21e73dc1 100644
--- a/cpukit/libmisc/cpuuse/cpuusagereport.c
+++ b/cpukit/libmisc/cpuuse/cpuusagereport.c
@@ -52,7 +52,7 @@ static bool cpu_usage_visitor( Thread_Control *the_thread, void *arg )
ctx = arg;
_Thread_Get_name( the_thread, name, sizeof( name ) );
- _Thread_Get_CPU_time_used( the_thread, &used );
+ used = _Thread_Get_CPU_time_used_after_last_reset( the_thread );
_TOD_Get_uptime( &uptime );
_Timestamp_Subtract( &ctx->uptime_at_last_reset, &uptime, &ctx->total );
_Timestamp_Divide( &used, &ctx->total, &ival, &fval );
diff --git a/cpukit/libmisc/cpuuse/cpuusagereset.c b/cpukit/libmisc/cpuuse/cpuusagereset.c
index 10d8ebec48..d1f0e65180 100644
--- a/cpukit/libmisc/cpuuse/cpuusagereset.c
+++ b/cpukit/libmisc/cpuuse/cpuusagereset.c
@@ -40,7 +40,8 @@ static bool CPU_usage_Per_thread_handler(
scheduler = _Thread_Scheduler_get_home( the_thread );
_Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
- _Timestamp_Set_to_zero( &the_thread->cpu_time_used );
+ the_thread->cpu_time_used_at_last_reset =
+ _Thread_Get_CPU_time_used_locked( the_thread );
_Scheduler_Release_critical( scheduler, &scheduler_lock_context );
_Thread_State_release( the_thread, &state_lock_context );
diff --git a/cpukit/libmisc/cpuuse/cpuusagetop.c b/cpukit/libmisc/cpuuse/cpuusagetop.c
index dad11ad748..51d049257a 100644
--- a/cpukit/libmisc/cpuuse/cpuusagetop.c
+++ b/cpukit/libmisc/cpuuse/cpuusagetop.c
@@ -178,7 +178,7 @@ task_usage(Thread_Control* thread, void* arg)
data->stack_size += thread->Start.Initial_stack.size;
- _Thread_Get_CPU_time_used(thread, &usage);
+ usage = _Thread_Get_CPU_time_used_after_last_reset(thread);
for (j = 0; j < data->last_task_count; j++)
{
diff --git a/cpukit/libtest/t-test-interrupt.c b/cpukit/libtest/t-test-interrupt.c
index 5d83b7876a..85b9b6ff69 100644
--- a/cpukit/libtest/t-test-interrupt.c
+++ b/cpukit/libtest/t-test-interrupt.c
@@ -456,9 +456,16 @@ T_interrupt_test(const T_interrupt_test_config *config, void *arg)
lower_bound[sample] = lower - delta;
sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
- } else if (state == T_INTERRUPT_TEST_LATE) {
+ } else if (state == T_INTERRUPT_TEST_LATE ||
+ state == T_INTERRUPT_TEST_ACTION) {
uint_fast32_t upper;
+ /*
+ * If the state is T_INTERRUPT_TEST_ACTION, then there
+ * was probably no interrupt during the action, so the
+ * interrupt would be late.
+ */
+
lower_sum -= lower_bound[sample];
lower_sum += busy;
lower_bound[sample] = busy;
diff --git a/cpukit/libtest/testbusy.c b/cpukit/libtest/testbusy.c
index c1d44278be..51c6a71810 100644
--- a/cpukit/libtest/testbusy.c
+++ b/cpukit/libtest/testbusy.c
@@ -28,10 +28,10 @@ void rtems_test_busy_cpu_usage( time_t seconds, long nanoseconds )
Timestamp_Control now;
executing = _Thread_Get_executing();
- _Thread_Get_CPU_time_used( executing, &start );
+ start = _Thread_Get_CPU_time_used( executing );
_Timestamp_Set( &busy, seconds, nanoseconds );
do {
- _Thread_Get_CPU_time_used( executing, &now );
+ now = _Thread_Get_CPU_time_used( executing );
} while ( now - start < busy );
}
diff --git a/cpukit/posix/src/psignalunblockthread.c b/cpukit/posix/src/psignalunblockthread.c
index de814c13cd..36680f99a2 100644
--- a/cpukit/posix/src/psignalunblockthread.c
+++ b/cpukit/posix/src/psignalunblockthread.c
@@ -196,8 +196,6 @@ bool _POSIX_signals_Unblock_thread(
if ( _States_Is_interruptible_signal( the_thread->current_state ) ) {
if ( (the_thread->Wait.option & mask) || (api->signals_unblocked & mask) ) {
- the_thread->Wait.return_code = STATUS_INTERRUPTED;
-
the_info = (siginfo_t *) the_thread->Wait.return_argument;
if ( !info ) {
@@ -208,7 +206,7 @@ bool _POSIX_signals_Unblock_thread(
*the_info = *info;
}
- _Thread_queue_Extract_with_proxy( the_thread );
+ _Thread_Timer_remove_and_continue( the_thread, STATUS_INTERRUPTED );
return _POSIX_signals_Unblock_thread_done( the_thread, api, true );
}
@@ -238,8 +236,7 @@ bool _POSIX_signals_Unblock_thread(
*/
if ( _States_Is_interruptible_by_signal( the_thread->current_state ) ) {
- the_thread->Wait.return_code = STATUS_INTERRUPTED;
- _Thread_queue_Extract_with_proxy( the_thread );
+ _Thread_Timer_remove_and_continue( the_thread, STATUS_INTERRUPTED );
}
}
return _POSIX_signals_Unblock_thread_done( the_thread, api, false );
diff --git a/cpukit/posix/src/psxtransschedparam.c b/cpukit/posix/src/psxtransschedparam.c
index eba26d4932..f97924e011 100644
--- a/cpukit/posix/src/psxtransschedparam.c
+++ b/cpukit/posix/src/psxtransschedparam.c
@@ -23,22 +23,27 @@
#include <errno.h>
#include <rtems/posix/pthreadimpl.h>
+#include <rtems/score/threadcpubudget.h>
int _POSIX_Thread_Translate_to_sched_policy(
- Thread_CPU_budget_algorithms budget_algorithm
+ const Thread_CPU_budget_operations *operations
)
{
- switch ( budget_algorithm ) {
- case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
- return SCHED_OTHER;
- case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
- return SCHED_RR;
- case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
- return SCHED_SPORADIC;
- default:
- _Assert( budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE );
- return SCHED_FIFO;
+ if ( operations == NULL ) {
+ return SCHED_FIFO;
}
+
+ if ( operations == &_Thread_CPU_budget_exhaust_timeslice ) {
+ return SCHED_RR;
+ }
+
+#if defined(RTEMS_POSIX_API)
+ if ( operations == &_POSIX_Threads_Sporadic_budget ) {
+ return SCHED_SPORADIC;
+ }
+#endif
+
+ return SCHED_OTHER;
}
int _POSIX_Thread_Translate_sched_param(
@@ -47,23 +52,19 @@ int _POSIX_Thread_Translate_sched_param(
Thread_Configuration *config
)
{
- config->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
- config->budget_callout = NULL;
- config->cpu_time_budget = 0;
+ config->cpu_budget_operations = NULL;
- if ( policy == SCHED_OTHER ) {
- config->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
+ if ( policy == SCHED_FIFO ) {
return 0;
}
- if ( policy == SCHED_FIFO ) {
- config->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
+ if ( policy == SCHED_OTHER ) {
+ config->cpu_budget_operations = &_Thread_CPU_budget_reset_timeslice;
return 0;
}
if ( policy == SCHED_RR ) {
- config->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE;
- config->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
+ config->cpu_budget_operations = &_Thread_CPU_budget_exhaust_timeslice;
return 0;
}
@@ -81,8 +82,7 @@ int _POSIX_Thread_Translate_sched_param(
_Timespec_To_ticks( &param->sched_ss_init_budget ) )
return EINVAL;
- config->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_CALLOUT;
- config->budget_callout = _POSIX_Threads_Sporadic_budget_callout;
+ config->cpu_budget_operations = &_POSIX_Threads_Sporadic_budget;
return 0;
}
#endif
diff --git a/cpukit/posix/src/pthreadcreate.c b/cpukit/posix/src/pthreadcreate.c
index 9474d07032..093ad5cfe1 100644
--- a/cpukit/posix/src/pthreadcreate.c
+++ b/cpukit/posix/src/pthreadcreate.c
@@ -34,9 +34,11 @@
#include <rtems/posix/pthreadimpl.h>
#include <rtems/posix/pthreadattrimpl.h>
#include <rtems/score/assert.h>
+#include <rtems/score/threadcpubudget.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/apimutex.h>
#include <rtems/score/stackimpl.h>
+#include <rtems/score/statesimpl.h>
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/userextimpl.h>
#include <rtems/sysinit.h>
@@ -348,7 +350,9 @@ void _POSIX_Threads_Sporadic_timer( Watchdog_Control *watchdog )
_Thread_Priority_update( &queue_context );
}
-void _POSIX_Threads_Sporadic_budget_callout( Thread_Control *the_thread )
+static void _POSIX_Threads_Sporadic_budget_callout(
+ Thread_Control *the_thread
+)
{
POSIX_API_Control *api;
Thread_queue_Context queue_context;
@@ -363,7 +367,7 @@ void _POSIX_Threads_Sporadic_budget_callout( Thread_Control *the_thread )
* This will prevent the thread from consuming its entire "budget"
* while at low priority.
*/
- the_thread->cpu_time_budget = UINT32_MAX;
+ the_thread->CPU_budget.available = UINT32_MAX;
if ( !_Priority_Node_is_active( &api->Sporadic.Low_priority ) ) {
_Thread_Priority_add(
@@ -382,6 +386,34 @@ void _POSIX_Threads_Sporadic_budget_callout( Thread_Control *the_thread )
_Thread_Priority_update( &queue_context );
}
+static void _POSIX_Threads_Sporadic_budget_at_tick( Thread_Control *the_thread )
+{
+ uint32_t budget_available;
+
+ if ( !the_thread->is_preemptible ) {
+ return;
+ }
+
+ if ( !_States_Is_ready( the_thread->current_state ) ) {
+ return;
+ }
+
+ budget_available = the_thread->CPU_budget.available;
+
+ if ( budget_available == 1 ) {
+ the_thread->CPU_budget.available = 0;
+ _POSIX_Threads_Sporadic_budget_callout ( the_thread );
+ } else {
+ the_thread->CPU_budget.available = budget_available - 1;
+ }
+}
+
+const Thread_CPU_budget_operations _POSIX_Threads_Sporadic_budget = {
+ .at_tick = _POSIX_Threads_Sporadic_budget_at_tick,
+ .at_context_switch = _Thread_CPU_budget_do_nothing,
+ .initialize = _Thread_CPU_budget_do_nothing
+};
+
static bool _POSIX_Threads_Create_extension(
Thread_Control *executing,
Thread_Control *created
diff --git a/cpukit/posix/src/pthreadgetattrnp.c b/cpukit/posix/src/pthreadgetattrnp.c
index 5572fb98a5..aa34185264 100644
--- a/cpukit/posix/src/pthreadgetattrnp.c
+++ b/cpukit/posix/src/pthreadgetattrnp.c
@@ -37,12 +37,12 @@ int pthread_getattr_np(
pthread_attr_t *attr
)
{
- Thread_Control *the_thread;
- ISR_lock_Context lock_context;
- Thread_CPU_budget_algorithms budget_algorithm;
- const Scheduler_Control *scheduler;
- Priority_Control priority;
- Status_Control status;
+ Thread_Control *the_thread;
+ ISR_lock_Context lock_context;
+ const Thread_CPU_budget_operations *cpu_budget_operations;
+ const Scheduler_Control *scheduler;
+ Priority_Control priority;
+ Status_Control status;
if ( attr == NULL ) {
return EINVAL;
@@ -89,7 +89,7 @@ int pthread_getattr_np(
attr->affinityset
);
- budget_algorithm = the_thread->budget_algorithm;
+ cpu_budget_operations = the_thread->CPU_budget.operations;
_Thread_State_release( the_thread, &lock_context );
@@ -101,7 +101,7 @@ int pthread_getattr_np(
priority
);
attr->schedpolicy =
- _POSIX_Thread_Translate_to_sched_policy( budget_algorithm );
+ _POSIX_Thread_Translate_to_sched_policy( cpu_budget_operations );
return _POSIX_Get_error( status );
}
diff --git a/cpukit/posix/src/pthreadgetschedparam.c b/cpukit/posix/src/pthreadgetschedparam.c
index a82d79c715..406ae8e7bb 100644
--- a/cpukit/posix/src/pthreadgetschedparam.c
+++ b/cpukit/posix/src/pthreadgetschedparam.c
@@ -37,11 +37,11 @@ int pthread_getschedparam(
struct sched_param *param
)
{
- Thread_Control *the_thread;
- Thread_queue_Context queue_context;
- Thread_CPU_budget_algorithms budget_algorithm;
- const Scheduler_Control *scheduler;
- Priority_Control priority;
+ Thread_Control *the_thread;
+ Thread_queue_Context queue_context;
+ const Thread_CPU_budget_operations *cpu_budget_operations;
+ const Scheduler_Control *scheduler;
+ Priority_Control priority;
if ( policy == NULL || param == NULL ) {
return EINVAL;
@@ -59,11 +59,11 @@ int pthread_getschedparam(
scheduler = _Thread_Scheduler_get_home( the_thread );
_POSIX_Threads_Get_sched_param_sporadic( the_thread, scheduler, param );
priority = the_thread->Real_priority.priority;
- budget_algorithm = the_thread->budget_algorithm;
+ cpu_budget_operations = the_thread->CPU_budget.operations;
_Thread_Wait_release( the_thread, &queue_context );
param->sched_priority = _POSIX_Priority_From_core( scheduler, priority );
- *policy = _POSIX_Thread_Translate_to_sched_policy( budget_algorithm );
+ *policy = _POSIX_Thread_Translate_to_sched_policy( cpu_budget_operations );
return 0;
}
diff --git a/cpukit/posix/src/pthreadsetschedparam.c b/cpukit/posix/src/pthreadsetschedparam.c
index 1c207e7887..165e1d86a8 100644
--- a/cpukit/posix/src/pthreadsetschedparam.c
+++ b/cpukit/posix/src/pthreadsetschedparam.c
@@ -40,14 +40,15 @@ static int _POSIX_Set_sched_param(
Thread_queue_Context *queue_context
)
{
- const Scheduler_Control *scheduler;
- int normal_prio;
- bool valid;
- Priority_Control core_normal_prio;
+ const Scheduler_Control *scheduler;
+ int normal_prio;
+ bool valid;
+ Priority_Control core_normal_prio;
+ const Thread_CPU_budget_operations *cpu_budget_operations;
#if defined(RTEMS_POSIX_API)
- POSIX_API_Control *api;
- int low_prio;
- Priority_Control core_low_prio;
+ POSIX_API_Control *api;
+ int low_prio;
+ Priority_Control core_low_prio;
#endif
normal_prio = param->sched_priority;
@@ -103,9 +104,12 @@ static int _POSIX_Set_sched_param(
}
#endif
- the_thread->cpu_time_budget = config->cpu_time_budget;
- the_thread->budget_algorithm = config->budget_algorithm;
- the_thread->budget_callout = config->budget_callout;
+ cpu_budget_operations = config->cpu_budget_operations;
+ the_thread->CPU_budget.operations = cpu_budget_operations;
+
+ if ( cpu_budget_operations != NULL ) {
+ ( *cpu_budget_operations->initialize )( the_thread );
+ }
#if defined(RTEMS_POSIX_API)
_Priority_Node_set_priority( &api->Sporadic.Low_priority, core_low_prio );
diff --git a/cpukit/rtems/src/eventsurrender.c b/cpukit/rtems/src/eventsurrender.c
index 48c08e486f..0623977c8b 100644
--- a/cpukit/rtems/src/eventsurrender.c
+++ b/cpukit/rtems/src/eventsurrender.c
@@ -42,12 +42,10 @@ static bool _Event_Is_blocking_on_event(
)
{
Thread_Wait_flags wait_flags;
- Thread_Wait_flags wait_mask;
wait_flags = _Thread_Wait_flags_get( the_thread );
- wait_mask = THREAD_WAIT_CLASS_MASK | THREAD_WAIT_STATE_READY_AGAIN;
- return ( wait_flags & wait_mask ) == wait_class;
+ return ( wait_flags & THREAD_WAIT_CLASS_MASK ) == wait_class;
}
static bool _Event_Is_satisfied(
@@ -88,16 +86,14 @@ rtems_status_code _Event_Surrender(
_Event_Is_blocking_on_event( the_thread, wait_class )
&& _Event_Is_satisfied( the_thread, pending_events, &seized_events )
) {
- Thread_Wait_flags ready_again;
- bool success;
+ bool success;
_Event_Satisfy( the_thread, event, pending_events, seized_events );
- ready_again = wait_class | THREAD_WAIT_STATE_READY_AGAIN;
success = _Thread_Wait_flags_try_change_release(
the_thread,
wait_class | THREAD_WAIT_STATE_INTEND_TO_BLOCK,
- ready_again
+ THREAD_WAIT_STATE_READY
);
if ( success ) {
@@ -107,7 +103,7 @@ rtems_status_code _Event_Surrender(
_Thread_Wait_flags_get( the_thread )
== ( wait_class | THREAD_WAIT_STATE_BLOCKED )
);
- _Thread_Wait_flags_set( the_thread, ready_again );
+ _Thread_Wait_flags_set( the_thread, THREAD_WAIT_STATE_READY );
unblock = true;
}
} else {
diff --git a/cpukit/rtems/src/ratemongetstatus.c b/cpukit/rtems/src/ratemongetstatus.c
index 745b52f026..5b46a7a435 100644
--- a/cpukit/rtems/src/ratemongetstatus.c
+++ b/cpukit/rtems/src/ratemongetstatus.c
@@ -31,7 +31,6 @@ rtems_status_code rtems_rate_monotonic_get_status(
{
Rate_monotonic_Control *the_period;
ISR_lock_Context lock_context;
- rtems_status_code status;
if ( period_status == NULL ) {
return RTEMS_INVALID_ADDRESS;
@@ -54,35 +53,28 @@ rtems_status_code rtems_rate_monotonic_get_status(
*/
_Timespec_Set_to_zero( &period_status->since_last_period );
_Timespec_Set_to_zero( &period_status->executed_since_last_period );
- status = RTEMS_SUCCESSFUL;
} else {
Timestamp_Control wall_since_last_period;
Timestamp_Control cpu_since_last_period;
- bool valid_status;
/*
* Grab the current status.
*/
- valid_status = _Rate_monotonic_Get_status(
+ _Rate_monotonic_Get_status(
the_period,
&wall_since_last_period,
&cpu_since_last_period
);
- if ( valid_status ) {
- _Timestamp_To_timespec(
- &wall_since_last_period,
- &period_status->since_last_period
- );
- _Timestamp_To_timespec(
- &cpu_since_last_period,
- &period_status->executed_since_last_period
- );
- status = RTEMS_SUCCESSFUL;
- } else {
- status = RTEMS_NOT_DEFINED;
- }
+ _Timestamp_To_timespec(
+ &wall_since_last_period,
+ &period_status->since_last_period
+ );
+ _Timestamp_To_timespec(
+ &cpu_since_last_period,
+ &period_status->executed_since_last_period
+ );
}
_Rate_monotonic_Release( the_period, &lock_context );
- return status;
+ return RTEMS_SUCCESSFUL;
}
diff --git a/cpukit/rtems/src/ratemonperiod.c b/cpukit/rtems/src/ratemonperiod.c
index 7f0d302583..a8697abf3c 100644
--- a/cpukit/rtems/src/ratemonperiod.c
+++ b/cpukit/rtems/src/ratemonperiod.c
@@ -26,7 +26,7 @@
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/todimpl.h>
-bool _Rate_monotonic_Get_status(
+void _Rate_monotonic_Get_status(
const Rate_monotonic_Control *the_period,
Timestamp_Control *wall_since_last_period,
Timestamp_Control *cpu_since_last_period
@@ -47,14 +47,7 @@ bool _Rate_monotonic_Get_status(
/*
* Determine cpu usage since period initiated.
*/
- _Thread_Get_CPU_time_used( owning_thread, &used );
-
- /*
- * The cpu usage info was reset while executing. Can't
- * determine a status.
- */
- if ( _Timestamp_Less_than( &used, &the_period->cpu_usage_period_initiated ) )
- return false;
+ used = _Thread_Get_CPU_time_used( owning_thread );
/* used = current cpu usage - cpu usage at start of period */
_Timestamp_Subtract(
@@ -62,8 +55,6 @@ bool _Rate_monotonic_Get_status(
&used,
cpu_since_last_period
);
-
- return true;
}
static void _Rate_monotonic_Release_postponed_job(
@@ -130,7 +121,7 @@ void _Rate_monotonic_Restart(
* Set the starting point and the CPU time used for the statistics.
*/
_TOD_Get_uptime( &the_period->time_period_initiated );
- _Thread_Get_CPU_time_used( owner, &the_period->cpu_usage_period_initiated );
+ the_period->cpu_usage_period_initiated = _Thread_Get_CPU_time_used( owner );
_Rate_monotonic_Release_job(
the_period,
@@ -147,7 +138,6 @@ static void _Rate_monotonic_Update_statistics(
Timestamp_Control executed;
Timestamp_Control since_last_period;
Rate_monotonic_Statistics *stats;
- bool valid_status;
/*
* Assume we are only called in states where it is appropriate
@@ -167,10 +157,7 @@ static void _Rate_monotonic_Update_statistics(
/*
* Grab status for time statistics.
*/
- valid_status =
- _Rate_monotonic_Get_status( the_period, &since_last_period, &executed );
- if (!valid_status)
- return;
+ _Rate_monotonic_Get_status( the_period, &since_last_period, &executed );
/*
* Update CPU time
@@ -260,7 +247,7 @@ static rtems_status_code _Rate_monotonic_Block_while_active(
);
if ( !success ) {
_Assert(
- _Thread_Wait_flags_get( executing ) == RATE_MONOTONIC_READY_AGAIN
+ _Thread_Wait_flags_get( executing ) == THREAD_WAIT_STATE_READY
);
_Thread_Unblock( executing );
}
diff --git a/cpukit/rtems/src/ratemontimeout.c b/cpukit/rtems/src/ratemontimeout.c
index b20e1e15db..375c5f081f 100644
--- a/cpukit/rtems/src/ratemontimeout.c
+++ b/cpukit/rtems/src/ratemontimeout.c
@@ -74,13 +74,13 @@ void _Rate_monotonic_Timeout( Watchdog_Control *the_watchdog )
success = _Thread_Wait_flags_try_change_release(
owner,
RATE_MONOTONIC_INTEND_TO_BLOCK,
- RATE_MONOTONIC_READY_AGAIN
+ THREAD_WAIT_STATE_READY
);
if ( success ) {
unblock = false;
} else {
_Assert( _Thread_Wait_flags_get( owner ) == RATE_MONOTONIC_BLOCKED );
- _Thread_Wait_flags_set( owner, RATE_MONOTONIC_READY_AGAIN );
+ _Thread_Wait_flags_set( owner, THREAD_WAIT_STATE_READY );
unblock = true;
}
diff --git a/cpukit/rtems/src/schedulergetprocessor.c b/cpukit/rtems/src/schedulergetprocessor.c
new file mode 100644
index 0000000000..5aafeae204
--- /dev/null
+++ b/cpukit/rtems/src/schedulergetprocessor.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSImplClassic
+ *
+ * @brief This source file contains the implementation of
+ * rtems_scheduler_get_processor() which may be used by binding for languages
+ * other than C/C++.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/rtems/tasks.h>
+
+static uint32_t _RTEMS_Scheduler_get_processor( void )
+{
+ return rtems_scheduler_get_processor();
+}
+
+#undef rtems_scheduler_get_processor
+
+uint32_t rtems_scheduler_get_processor( void )
+{
+ return _RTEMS_Scheduler_get_processor();
+}
diff --git a/cpukit/rtems/src/schedulergetprocessormax.c b/cpukit/rtems/src/schedulergetprocessormax.c
new file mode 100644
index 0000000000..0091a6e544
--- /dev/null
+++ b/cpukit/rtems/src/schedulergetprocessormax.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSImplClassic
+ *
+ * @brief This source file contains the implementation of
+ * rtems_scheduler_get_processor_maximum() which may be used by binding for
+ * languages other than C/C++.
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/rtems/tasks.h>
+
+static uint32_t _RTEMS_Scheduler_get_processor_maximum( void )
+{
+ return rtems_scheduler_get_processor_maximum();
+}
+
+#undef rtems_scheduler_get_processor_maximum
+
+uint32_t rtems_scheduler_get_processor_maximum( void )
+{
+ return _RTEMS_Scheduler_get_processor_maximum();
+}
diff --git a/cpukit/rtems/src/schedulerremoveprocessor.c b/cpukit/rtems/src/schedulerremoveprocessor.c
index 79c17bda49..3136a8e0c0 100644
--- a/cpukit/rtems/src/schedulerremoveprocessor.c
+++ b/cpukit/rtems/src/schedulerremoveprocessor.c
@@ -35,7 +35,7 @@ typedef struct {
rtems_status_code status;
} Scheduler_Processor_removal_context;
-static bool _Scheduler_Check_processor_removal(
+static bool _Scheduler_Check_processor_not_required(
Thread_Control *the_thread,
void *arg
)
@@ -68,6 +68,45 @@ static bool _Scheduler_Check_processor_removal(
_Thread_Wait_release( the_thread, &queue_context );
return iter_context->status != RTEMS_SUCCESSFUL;
}
+
+static bool _Scheduler_Check_no_helping(
+ Thread_Control *the_thread,
+ void *arg
+)
+{
+ Scheduler_Processor_removal_context *iter_context;
+ ISR_lock_Context lock_context;
+ const Chain_Node *node;
+ const Chain_Node *tail;
+
+ if ( the_thread->is_idle ) {
+ return false;
+ }
+
+ iter_context = arg;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ node = _Chain_Immutable_first( &the_thread->Scheduler.Scheduler_nodes );
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+ do {
+ const Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ if ( scheduler == iter_context->scheduler ) {
+ iter_context->status = RTEMS_RESOURCE_IN_USE;
+ break;
+ }
+
+ node = _Chain_Immutable_next( node );
+ } while ( node != tail );
+
+ _Thread_State_release( the_thread, &lock_context );
+ return iter_context->status != RTEMS_SUCCESSFUL;
+}
#endif
rtems_status_code rtems_scheduler_remove_processor(
@@ -116,7 +155,14 @@ rtems_status_code rtems_scheduler_remove_processor(
_Scheduler_Release_critical( scheduler, &lock_context );
_ISR_lock_ISR_enable( &lock_context );
- _Thread_Iterate( _Scheduler_Check_processor_removal, &iter_context );
+ _Thread_Iterate( _Scheduler_Check_processor_not_required, &iter_context );
+
+ if (
+ _Processor_mask_Is_zero( &scheduler_context->Processors ) &&
+ iter_context.status == RTEMS_SUCCESSFUL
+ ) {
+ _Thread_Iterate( _Scheduler_Check_no_helping, &iter_context );
+ }
_ISR_lock_ISR_disable( &lock_context );
_Scheduler_Acquire_critical( scheduler, &lock_context );
diff --git a/cpukit/rtems/src/semsetpriority.c b/cpukit/rtems/src/semsetpriority.c
index adb0320210..97e53c6584 100644
--- a/cpukit/rtems/src/semsetpriority.c
+++ b/cpukit/rtems/src/semsetpriority.c
@@ -29,20 +29,6 @@
#include <rtems/rtems/tasksimpl.h>
#include <rtems/score/schedulerimpl.h>
-static rtems_status_code _Semaphore_Is_scheduler_valid(
- const CORE_ceiling_mutex_Control *the_mutex,
- const Scheduler_Control *scheduler
-)
-{
-#if defined(RTEMS_SMP)
- if ( scheduler != _CORE_ceiling_mutex_Get_scheduler( the_mutex ) ) {
- return RTEMS_NOT_DEFINED;
- }
-#endif
-
- return RTEMS_SUCCESSFUL;
-}
-
static rtems_status_code _Semaphore_Set_priority(
Semaphore_Control *the_semaphore,
const Scheduler_Control *scheduler,
@@ -51,7 +37,6 @@ static rtems_status_code _Semaphore_Set_priority(
Thread_queue_Context *queue_context
)
{
- rtems_status_code sc;
bool valid;
Priority_Control core_priority;
Priority_Control old_priority;
@@ -73,20 +58,29 @@ static rtems_status_code _Semaphore_Set_priority(
switch ( variant ) {
case SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING:
- sc = _Semaphore_Is_scheduler_valid(
- &the_semaphore->Core_control.Mutex,
- scheduler
- );
+#if defined(RTEMS_SMP)
+ if (
+ scheduler != _CORE_ceiling_mutex_Get_scheduler(
+ &the_semaphore->Core_control.Mutex
+ )
+ ) {
+ _Thread_queue_Release(
+ &the_semaphore->Core_control.Wait_queue,
+ queue_context
+ );
+
+ return RTEMS_NOT_DEFINED;
+ }
+#endif
old_priority = _CORE_ceiling_mutex_Get_priority(
&the_semaphore->Core_control.Mutex
);
- if ( sc == RTEMS_SUCCESSFUL && new_priority != RTEMS_CURRENT_PRIORITY ) {
+ if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
_CORE_ceiling_mutex_Set_priority(
&the_semaphore->Core_control.Mutex,
- core_priority,
- queue_context
+ core_priority
);
}
@@ -106,7 +100,6 @@ static rtems_status_code _Semaphore_Set_priority(
);
}
- sc = RTEMS_SUCCESSFUL;
break;
#endif
default:
@@ -116,9 +109,12 @@ static rtems_status_code _Semaphore_Set_priority(
|| variant == SEMAPHORE_VARIANT_SIMPLE_BINARY
|| variant == SEMAPHORE_VARIANT_COUNTING
);
- old_priority = 0;
- sc = RTEMS_NOT_DEFINED;
- break;
+ _Thread_queue_Release(
+ &the_semaphore->Core_control.Wait_queue,
+ queue_context
+ );
+
+ return RTEMS_NOT_DEFINED;
}
cpu_self = _Thread_queue_Dispatch_disable( queue_context );
@@ -130,7 +126,7 @@ static rtems_status_code _Semaphore_Set_priority(
_Thread_Dispatch_enable( cpu_self );
*old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority );
- return sc;
+ return RTEMS_SUCCESSFUL;
}
rtems_status_code rtems_semaphore_set_priority(
diff --git a/cpukit/rtems/src/signalsend.c b/cpukit/rtems/src/signalsend.c
index 72407e2b01..6ce59f2e74 100644
--- a/cpukit/rtems/src/signalsend.c
+++ b/cpukit/rtems/src/signalsend.c
@@ -35,16 +35,15 @@ static void _Signal_Action_handler(
ISR_lock_Context *lock_context
)
{
- RTEMS_API_Control *api;
- ASR_Information *asr;
- rtems_signal_set signal_set;
- bool normal_is_preemptible;
- uint32_t normal_cpu_time_budget;
- Thread_CPU_budget_algorithms normal_budget_algorithm;
- uint32_t normal_isr_level;
- uint32_t before_call_isr_level;
- bool after_call_is_preemptible;
- bool after_call_asr_is_enabled;
+ RTEMS_API_Control *api;
+ ASR_Information *asr;
+ rtems_signal_set signal_set;
+ bool normal_is_preemptible;
+ Thread_CPU_budget_control normal_cpu_budget;
+ uint32_t normal_isr_level;
+ uint32_t before_call_isr_level;
+ bool after_call_is_preemptible;
+ bool after_call_asr_is_enabled;
(void) action;
@@ -69,8 +68,7 @@ static void _Signal_Action_handler(
_Assert( asr->is_enabled );
normal_is_preemptible = executing->is_preemptible;
- normal_cpu_time_budget = executing->cpu_time_budget;
- normal_budget_algorithm = executing->budget_algorithm;
+ normal_cpu_budget = executing->CPU_budget;
/* Set mode for ASR processing */
@@ -102,8 +100,7 @@ static void _Signal_Action_handler(
_Thread_State_acquire( executing, lock_context );
- executing->cpu_time_budget = normal_cpu_time_budget ;
- executing->budget_algorithm = normal_budget_algorithm ;
+ executing->CPU_budget = normal_cpu_budget;
after_call_is_preemptible = executing->is_preemptible;
executing->is_preemptible = normal_is_preemptible;
diff --git a/cpukit/rtems/src/taskconstruct.c b/cpukit/rtems/src/taskconstruct.c
index 6e03440aed..05fbf32ff5 100644
--- a/cpukit/rtems/src/taskconstruct.c
+++ b/cpukit/rtems/src/taskconstruct.c
@@ -154,14 +154,17 @@ rtems_status_code _RTEMS_tasks_Create(
attributes = _Attributes_Clear( attributes, ATTRIBUTES_NOT_SUPPORTED );
memset( &thread_config, 0, sizeof( thread_config ) );
- thread_config.budget_algorithm = _Modes_Is_timeslice( config->initial_modes ) ?
- THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE
- : THREAD_CPU_BUDGET_ALGORITHM_NONE,
thread_config.isr_level = _Modes_Get_interrupt_level( config->initial_modes );
thread_config.name = config->name;
thread_config.is_fp = _Attributes_Is_floating_point( attributes );
thread_config.is_preemptible = _Modes_Is_preempt( config->initial_modes );
+ if ( _Modes_Is_timeslice( config->initial_modes ) ) {
+ thread_config.cpu_budget_operations = &_Thread_CPU_budget_reset_timeslice;
+ } else {
+ thread_config.cpu_budget_operations = NULL;
+ }
+
/*
* Validate the RTEMS API priority and convert it to the core priority range.
*/
diff --git a/cpukit/rtems/src/taskident.c b/cpukit/rtems/src/taskident.c
index 74a0a53982..89c2c7ce7e 100644
--- a/cpukit/rtems/src/taskident.c
+++ b/cpukit/rtems/src/taskident.c
@@ -40,6 +40,7 @@
#endif
#include <rtems/rtems/tasksimpl.h>
+#include <rtems/rtems/object.h>
#include <rtems/rtems/objectimpl.h>
#include <rtems/score/percpu.h>
@@ -53,7 +54,7 @@ rtems_status_code rtems_task_ident(
return RTEMS_INVALID_ADDRESS;
}
- if ( name == OBJECTS_ID_OF_SELF ) {
+ if ( name == RTEMS_WHO_AM_I ) {
*id = _Thread_Get_executing()->Object.id;
return RTEMS_SUCCESSFUL;
}
diff --git a/cpukit/rtems/src/taskmode.c b/cpukit/rtems/src/taskmode.c
index 96bed470f4..3300eafa28 100644
--- a/cpukit/rtems/src/taskmode.c
+++ b/cpukit/rtems/src/taskmode.c
@@ -73,7 +73,7 @@ rtems_status_code rtems_task_mode(
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
- if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
+ if ( executing->CPU_budget.operations == NULL )
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index 2a4ddbcc61..c7c9d03465 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -72,7 +72,7 @@
* * The exception returns to the previous execution state
*/
- .macro JUMP_HANDLER_SHORT
+ .macro JUMP_HANDLER
/* Mask to use in BIC, lower 7 bits */
mov x0, #0x7f
/* LR contains PC, mask off to the base of the current vector */
@@ -109,10 +109,6 @@
nop
nop
nop
- .endm
-
- .macro JUMP_HANDLER
- JUMP_HANDLER_SHORT
nop
.endm
@@ -144,11 +140,48 @@ Vector_table_el3:
* using SP0.
*/
curr_el_sp0_sync:
- stp x0, lr, [sp, #-0x10]! /* Push x0,lr on to the stack */
- bl curr_el_sp0_sync_get_pc /* Get current execution address */
-curr_el_sp0_sync_get_pc: /* The current PC is now in LR */
- JUMP_HANDLER
- JUMP_TARGET_SP0
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* shove lr into CEF */
+ bl .push_exception_context_start /* bl to CEF store routine */
+/* Save original sp in x0 for .push_exception_context_finish */
+ add x0, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* save original sp */
+/* Push the remainder of the context */
+ bl .push_exception_context_finish
+/* get jump target and branch/link */
+ bl curr_el_sp0_sync_get_pc /* Get current execution address */
+curr_el_sp0_sync_get_pc: /* The current PC is now in LR */
+ mov x0, #0x7f /* Mask to use in BIC, lower 7 bits */
+ bic x0, lr, x0 /* Mask LR to base of current vector */
+ ldr x1, [x0, #0x78] /* Load target from last word in vector */
+ and lr, lr, #0x780 /* Mask off bits for vector number */
+ lsr lr, lr, #7 /* Shift the vector bits down */
+/* Store the vector */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
+ mov x0, sp
+ blr x1
+ b twiddle
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+/* Takes up the space of 2 instructions */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ .word _AArch64_Exception_default
+ .word 0x0
+#else
+ .dword _AArch64_Exception_default
+#endif
.balign 0x80
/* The exception handler for IRQ exceptions from the current EL using SP0. */
curr_el_sp0_irq:
@@ -204,13 +237,11 @@ curr_el_spx_sync_get_pc: /* The current PC is now in LR */
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
mov x0, sp
blr x1
-/* bl to CEF restore routine (doesn't restore lr) */
- bl .pop_exception_context
- ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* get lr from CEF */
-/* drop space reserved for CEF and clear exclusive */
- add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
- msr spsel, #1 /* switch to thread stack */
- eret /* exception return */
+ b twiddle
+ nop
+ nop
+ nop
+ nop
nop
nop
nop
@@ -475,69 +506,3 @@ twiddle:
stp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
/* Done, return to exception handler */
ret
-
-/*
- * Apply the exception frame to the current register status, SP points to the EF
- */
-.pop_exception_context:
-/* Pop daif and spsr */
- ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
-/* Restore daif and spsr */
- msr DAIF, x2
- msr SPSR_EL1, x3
-/* Pop FAR and ESR */
- ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
-/* Restore ESR and FAR */
- msr ESR_EL1, x2
- msr FAR_EL1, x3
-/* Pop fpcr and fpsr */
- ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
-/* Restore fpcr and fpsr */
- msr FPSR, x2
- msr FPCR, x3
-/* Pop VFP registers */
- ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
- ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
- ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
- ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
- ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
- ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
- ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
- ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
- ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
- ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
- ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
- ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
- ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
- ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
- ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
- ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
-/* Pop x0-x29(fp) */
- ldp x2, x3, [sp, #0x10]
- ldp x4, x5, [sp, #0x20]
- ldp x6, x7, [sp, #0x30]
- ldp x8, x9, [sp, #0x40]
- ldp x10, x11, [sp, #0x50]
- ldp x12, x13, [sp, #0x60]
- ldp x14, x15, [sp, #0x70]
- ldp x16, x17, [sp, #0x80]
- ldp x18, x19, [sp, #0x90]
- ldp x20, x21, [sp, #0xa0]
- ldp x22, x23, [sp, #0xb0]
- ldp x24, x25, [sp, #0xc0]
- ldp x26, x27, [sp, #0xd0]
- ldp x28, x29, [sp, #0xe0]
-/* Pop sp and ELR */
- ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
-/* Restore thread SP */
- msr spsel, #1
- mov sp, x0
- msr spsel, #0
-/* Restore exception LR */
- msr ELR_EL1, x1
- ldp x0, x1, [sp, #0x00]
-
-/* We must clear reservations to ensure consistency with atomic operations */
- clrex
-
- ret
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.c b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
index 2ebb3dee9f..3494c88ea6 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.c
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
@@ -41,10 +41,67 @@
#include "config.h"
#endif
-#include <rtems/score/cpu.h>
#include <rtems/fatal.h>
+#include <rtems/score/aarch64-system-registers.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/percpu.h>
void _AArch64_Exception_default( CPU_Exception_frame *frame )
{
rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
}
+
+void _CPU_Exception_disable_thread_dispatch( void )
+{
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+ /* Increment interrupt nest and thread dispatch disable level */
+ ++cpu_self->thread_dispatch_disable_level;
+ ++cpu_self->isr_nest_level;
+}
+
+void _AArch64_Exception_frame_copy(
+ CPU_Exception_frame *new_ef,
+ CPU_Exception_frame *old_ef
+)
+{
+ *new_ef = *old_ef;
+}
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *ef )
+{
+ uint64_t EC = AARCH64_ESR_EL1_EC_GET( ef->register_syndrome );
+
+ switch ( EC ) {
+ case 0x1: /* WFI */
+ case 0x7: /* SVE/SIMD/FP */
+ case 0xa: /* LD64B/ST64B* */
+ case 0x18: /* MSR/MRS/system instruction */
+ case 0x19: /* SVE */
+ case 0x15: /* Supervisor call */
+ case 0x26: /* SP Alignment */
+ case 0x31: /* Breakpoint */
+ case 0x33: /* Step */
+ case 0x35: /* Watchpoint */
+ case 0x3c: /* Break Instruction */
+ return -1;
+ case 0x2c: /* FPU */
+ return SIGFPE;
+ case 0x21: /* Instruction Abort */
+ case 0x25: /* Data Abort */
+ return SIGSEGV;
+ default:
+ return SIGILL;
+ }
+}
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *ef, void *address )
+{
+ ef->register_pc = address;
+}
+
+#define AARCH64_INSTRUCTION_SIZE 4
+void _CPU_Exception_frame_make_resume_next_instruction( CPU_Exception_frame *ef )
+{
+ ef->register_pc += AARCH64_INSTRUCTION_SIZE;
+}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index b206f5764b..6344dce63a 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -45,6 +45,8 @@
.globl _AArch64_Exception_interrupt_no_nest
.globl _AArch64_Exception_interrupt_nest
+.globl _CPU_Exception_dispatch_and_resume
+.globl _CPU_Exception_resume
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
#ifdef RTEMS_SMP
@@ -324,3 +326,166 @@ Return to embedded exception vector code
pop_interrupt_context
/* Return to vector for final cleanup */
ret
+
+/*
+ * This function is expected to resume execution using the CPU_Exception_frame
+ * provided in x0. This function does not adhere to the AAPCS64 calling
+ * convention because all necessary state is contained within the exception
+ * frame.
+ */
+_CPU_Exception_resume:
+/* Reset stack pointer */
+ mov sp, x0
+
+/* call CEF restore routine (doesn't restore lr) */
+ bl .pop_exception_context
+
+/* get lr from CEF */
+ ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+
+/* switch to thread stack */
+ msr spsel, #1
+ eret
+
+/*
+ * This function is expected to undo dispatch disabling, perform dispatch, and
+ * resume execution using the CPU_Exception_frame provided in x0. This function
+ * does not adhere to the AAPCS64 calling convention because all necessary
+ * state is contained within the exception frame.
+ */
+_CPU_Exception_dispatch_and_resume:
+/* Get per-CPU control of current processor */
+ GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
+
+/* Reset stack pointer */
+ mov sp, x0
+
+/* Check dispatch disable and perform dispatch if necessary */
+/* Load some per-CPU variables */
+ ldr w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+ ldrb w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
+ ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
+ ldr w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* Decrement levels and determine thread dispatch state */
+ eor w1, w1, w0
+ sub w0, w0, #1
+ orr w1, w1, w0
+ orr w1, w1, w2
+ sub w3, w3, #1
+
+/* Store thread dispatch disable and ISR nest levels */
+ str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+ str w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* store should_skip_thread_dispatch in x22 */
+ mov x22, x1
+
+/*
+ * It is now safe to assume that the source of the exception has been resolved.
+ * Copy the exception frame to the thread stack to be compatible with thread
+ * dispatch. This may arbitrarily clobber corruptible registers since all
+ * important state is contained in the exception frame.
+ *
+ * No need to save current LR since this will never return to the caller.
+ */
+ bl .move_exception_frame_and_switch_to_thread_stack
+
+/*
+ * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
+ * disable level.
+ */
+ cmp x22, #0
+ bne .Lno_need_thread_dispatch_resume
+ bl .AArch64_Perform_Thread_Dispatch
+.Lno_need_thread_dispatch_resume:
+/* call CEF restore routine (doesn't restore lr) */
+ bl .pop_exception_context
+
+/* get lr from CEF */
+ ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+ eret
+
+/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 */
+.move_exception_frame_and_switch_to_thread_stack:
+ mov x1, sp /* Set x1 to the current exception frame */
+ msr spsel, #1 /* switch to thread stack */
+ ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET] /* Get thread SP from exception frame since it may have been updated */
+ mov sp, x0
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ mov x0, sp /* Set x0 to the new exception frame */
+ mov x20, lr /* Save LR */
+ bl _AArch64_Exception_frame_copy /* Copy exception frame to reserved thread stack space */
+ mov lr, x20 /* Restore LR */
+ msr spsel, #0 /* switch to exception stack */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* release space for CEF on exception stack */
+ msr spsel, #1 /* switch to thread stack */
+ ret
+
+/*
+ * Apply the exception frame to the current register status, SP points to the EF
+ */
+.pop_exception_context:
+/* Pop daif and spsr */
+ ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
+/* Restore daif and spsr */
+ msr DAIF, x2
+ msr SPSR_EL1, x3
+/* Pop FAR and ESR */
+ ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
+/* Restore ESR and FAR */
+ msr ESR_EL1, x2
+ msr FAR_EL1, x3
+/* Pop fpcr and fpsr */
+ ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
+/* Restore fpcr and fpsr */
+ msr FPSR, x2
+ msr FPCR, x3
+/* Pop VFP registers */
+ ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
+ ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
+ ldp q4, q5, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
+ ldp q6, q7, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
+ ldp q8, q9, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
+ ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
+ ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
+ ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
+ ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
+ ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
+ ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
+ ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
+ ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
+ ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
+ ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
+ ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
+/* Pop x0-x29(fp) */
+ ldp x2, x3, [sp, #0x10]
+ ldp x4, x5, [sp, #0x20]
+ ldp x6, x7, [sp, #0x30]
+ ldp x8, x9, [sp, #0x40]
+ ldp x10, x11, [sp, #0x50]
+ ldp x12, x13, [sp, #0x60]
+ ldp x14, x15, [sp, #0x70]
+ ldp x16, x17, [sp, #0x80]
+ ldp x18, x19, [sp, #0x90]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x26, x27, [sp, #0xd0]
+ ldp x28, x29, [sp, #0xe0]
+/* Pop ELR, SP already popped */
+ ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)]
+/* Restore exception LR */
+ msr ELR_EL1, x1
+ ldp x0, x1, [sp, #0x00]
+
+/* We must clear reservations to ensure consistency with atomic operations */
+ clrex
+
+ ret
diff --git a/cpukit/score/cpu/aarch64/cpu.c b/cpukit/score/cpu/aarch64/cpu.c
index b36f55ae17..88e7ad8a8c 100644
--- a/cpukit/score/cpu/aarch64/cpu.c
+++ b/cpukit/score/cpu/aarch64/cpu.c
@@ -149,11 +149,17 @@ void _CPU_Context_Initialize(
void _CPU_ISR_Set_level( uint32_t level )
{
/* Set the mask bit if interrupts are disabled */
- level = level ? AARCH64_PSTATE_I : 0;
- __asm__ volatile (
- "msr DAIF, %[level]\n"
- : : [level] "r" (level)
- );
+ if ( level ) {
+ __asm__ volatile (
+ "msr DAIFSet, #0x2\n"
+ : : [level] "r" (level)
+ );
+ } else {
+ __asm__ volatile (
+ "msr DAIFClr, #0x2\n"
+ : : [level] "r" (level)
+ );
+ }
}
uint32_t _CPU_ISR_Get_level( void )
diff --git a/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
new file mode 100644
index 0000000000..6b6296bb7a
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/libcpu/mmu-vmsav8-64.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUAArch64
+ *
+ * @brief Definitions used in MMU setup.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LIBCPU_AARCH64_MMU_VMSAV8_64_H
+#define LIBCPU_AARCH64_MMU_VMSAV8_64_H
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <rtems.h>
+
+/* VMSAv8 Long-descriptor fields */
+#define MMU_DESC_AF ( 1 << 10 )
+#define MMU_DESC_SH_INNER ( ( 1 << 9 ) | ( 1 << 8 ) )
+#define MMU_DESC_WRITE_DISABLE ( 1 << 7 )
+/* PAGE and TABLE flags are the same bit, but only apply on certain levels */
+#define MMU_DESC_TYPE_TABLE ( 1 << 1 )
+#define MMU_DESC_TYPE_PAGE ( 1 << 1 )
+#define MMU_DESC_VALID ( 1 << 0 )
+#define MMU_DESC_MAIR_ATTR( val ) ( ( val & 0x3 ) << 2 )
+#define MMU_DESC_PAGE_TABLE_MASK 0xFFFFFFFFF000LL
+
+/* Page table configuration */
+#define MMU_PAGE_BITS 12
+#define MMU_PAGE_SIZE ( 1 << MMU_PAGE_BITS )
+#define MMU_BITS_PER_LEVEL 9
+#define MMU_TOP_LEVEL_PAGE_BITS ( 2 * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS )
+
+#define AARCH64_MMU_FLAGS_BASE \
+ ( MMU_DESC_VALID | MMU_DESC_SH_INNER | MMU_DESC_AF )
+
+#define AARCH64_MMU_DATA_RO_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE_CACHED AARCH64_MMU_DATA_RO_CACHED
+#define AARCH64_MMU_CODE_RW_CACHED AARCH64_MMU_DATA_RW_CACHED
+
+#define AARCH64_MMU_DATA_RO \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 1 ) | MMU_DESC_WRITE_DISABLE )
+#define AARCH64_MMU_CODE AARCH64_MMU_DATA_RO
+#define AARCH64_MMU_CODE_RW AARCH64_MMU_DATA_RW
+
+/* RW implied by not ORing in RO */
+#define AARCH64_MMU_DATA_RW_CACHED \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 3 ) )
+#define AARCH64_MMU_DATA_RW \
+ ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 1 ) )
+#define AARCH64_MMU_DEVICE ( AARCH64_MMU_FLAGS_BASE | MMU_DESC_MAIR_ATTR( 0 ) )
+
+rtems_status_code aarch64_mmu_map(
+ uintptr_t addr,
+ uint64_t size,
+ uint64_t flags
+);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* ASM */
+
+#endif /* LIBCPU_AARCH64_MMU_VMSAV8_64_H */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
index dc2afdeca8..5bfddb6dad 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
@@ -6719,21 +6719,291 @@ static inline uint64_t _AArch64_Read_dbgauthstatus_el1( void )
#define AARCH64_DBGBCR_N_EL1_BT_GET( _reg ) \
( ( ( _reg ) >> 20 ) & 0xfU )
-static inline uint64_t _AArch64_Read_dbgbcr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgbcr0_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGBCR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGBCR0_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgbcr0_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGBCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGBCR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbcr15_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR15_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr15_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -6781,21 +7051,291 @@ static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
#define AARCH64_DBGBVR_N_EL1_RESS_14_4_GET( _reg ) \
( ( ( _reg ) >> 53 ) & 0x7ffULL )
-static inline uint64_t _AArch64_Read_dbgbvr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgbvr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgbvr15_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGBVR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGBVR15_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgbvr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgbvr15_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGBVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGBVR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -7027,21 +7567,291 @@ static inline void _AArch64_Write_dbgvcr32_el2( uint64_t value )
#define AARCH64_DBGWCR_N_EL1_MASK_GET( _reg ) \
( ( ( _reg ) >> 24 ) & 0x1fU )
-static inline uint64_t _AArch64_Read_dbgwcr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgwcr0_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGWCR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGWCR0_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgwcr0_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGWCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGWCR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwcr15_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR15_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr15_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
@@ -7065,21 +7875,291 @@ static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
#define AARCH64_DBGWVR_N_EL1_RESS_14_4_GET( _reg ) \
( ( ( _reg ) >> 53 ) & 0x7ffULL )
-static inline uint64_t _AArch64_Read_dbgwvr_n_el1( void )
+static inline uint64_t _AArch64_Read_dbgwvr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr4_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR4_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr5_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR5_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr6_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR6_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr7_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR7_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr7_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR7_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr8_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR8_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr8_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR8_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr9_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR9_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr9_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR9_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr10_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR10_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr10_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR10_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr11_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR11_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr11_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR11_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr12_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR12_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr12_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR12_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr13_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR13_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr13_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR13_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr14_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR14_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr14_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR14_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+static inline uint64_t _AArch64_Read_dbgwvr15_el1( void )
{
uint64_t value;
__asm__ volatile (
- "mrs %0, DBGWVR_N_EL1" : "=&r" ( value ) : : "memory"
+ "mrs %0, DBGWVR15_EL1" : "=&r" ( value ) : : "memory"
);
return value;
}
-static inline void _AArch64_Write_dbgwvr_n_el1( uint64_t value )
+static inline void _AArch64_Write_dbgwvr15_el1( uint64_t value )
{
__asm__ volatile (
- "msr DBGWVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ "msr DBGWVR15_EL1, %0" : : "r" ( value ) : "memory"
);
}
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index ae7e2bdcba..e1d9f0a5c2 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -524,6 +524,27 @@ typedef struct {
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
+RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame );
+
+RTEMS_NO_RETURN void
+_CPU_Exception_dispatch_and_resume( CPU_Exception_frame *frame );
+
+void _CPU_Exception_disable_thread_dispatch( void );
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame );
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame,
+ void *address );
+
+void _CPU_Exception_frame_make_resume_next_instruction(
+ CPU_Exception_frame *frame
+);
+
+void _AArch64_Exception_frame_copy(
+ CPU_Exception_frame *new_ef,
+ CPU_Exception_frame *old_ef
+);
+
void _AArch64_Exception_default( CPU_Exception_frame *frame );
/** Type that can store a 32-bit integer or a pointer. */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h b/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h
index a12bf994f1..47b034813c 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h
@@ -37,8 +37,7 @@
#ifndef _RTEMS_SCORE_AARCH32_PMSA_H
#define _RTEMS_SCORE_AARCH32_PMSA_H
-#include <stddef.h>
-#include <stdint.h>
+#include <rtems/score/basedefs.h>
#ifdef __cplusplus
extern "C" {
@@ -154,7 +153,7 @@ extern "C" {
#define AARCH32_PMSA_DATA_READ_WRITE_CACHED \
( AARCH32_PMSA_ATTR_EN | \
AARCH32_PMSA_ATTR_XN | \
- AARCH32_PMSA_ATTR_AP_EL1_RW_EL0_NO | \
+ AARCH32_PMSA_ATTR_AP( AARCH32_PMSA_ATTR_AP_EL1_RW_EL0_NO ) | \
AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \
AARCH32_PMSA_ATTR_IDX( 0U ) )
@@ -165,6 +164,13 @@ extern "C" {
AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \
AARCH32_PMSA_ATTR_IDX( 1U ) )
+#define AARCH32_PMSA_DATA_READ_WRITE_SHARED \
+ ( AARCH32_PMSA_ATTR_EN | \
+ AARCH32_PMSA_ATTR_XN | \
+ AARCH32_PMSA_ATTR_AP( AARCH32_PMSA_ATTR_AP_EL1_RW_EL0_NO ) | \
+ AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_OUTER ) | \
+ AARCH32_PMSA_ATTR_IDX( 1U ) )
+
#define AARCH32_PMSA_DEVICE \
( AARCH32_PMSA_ATTR_EN | \
AARCH32_PMSA_ATTR_XN | \
@@ -172,6 +178,20 @@ extern "C" {
AARCH32_PMSA_ATTR_SH( AARCH32_PMSA_ATTR_SH_NO ) | \
AARCH32_PMSA_ATTR_IDX( 2U ) )
+/*
+ * The Cortex-R52 processor is not coherent and the inner shareability domain
+ * consists of an individual Cortex-R52 core. Thus for an SMP configuration,
+ * the read-write data must be configured as Non-cachable and Shareable. The
+ * outer shareability domain is external to the Cortex-R52 processor.
+ */
+#if defined(RTEMS_SMP)
+#define AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
+ AARCH32_PMSA_DATA_READ_WRITE_SHARED
+#else
+#define AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
+ AARCH32_PMSA_DATA_READ_WRITE_CACHED
+#endif
+
/**
* @brief The default section definitions shall be used by the BSP to define
* ::_AArch32_PMSA_Sections.
@@ -187,7 +207,7 @@ extern "C" {
}, { \
.begin = (uint32_t) bsp_section_fast_data_begin, \
.end = (uint32_t) bsp_section_fast_data_end, \
- .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \
+ .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
}, { \
.begin = (uint32_t) bsp_section_start_begin, \
.end = (uint32_t) bsp_section_start_end, \
@@ -207,23 +227,23 @@ extern "C" {
}, { \
.begin = (uint32_t) bsp_section_data_begin, \
.end = (uint32_t) bsp_section_data_end, \
- .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \
+ .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
}, { \
.begin = (uint32_t) bsp_section_bss_begin, \
.end = (uint32_t) bsp_section_bss_end, \
- .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \
+ .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
}, { \
.begin = (uint32_t) bsp_section_rtemsstack_begin, \
.end = (uint32_t) bsp_section_rtemsstack_end, \
- .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \
+ .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
}, { \
.begin = (uint32_t) bsp_section_work_begin, \
.end = (uint32_t) bsp_section_work_end, \
- .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \
+ .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
}, { \
.begin = (uint32_t) bsp_section_stack_begin, \
.end = (uint32_t) bsp_section_stack_end, \
- .attributes = AARCH32_PMSA_DATA_READ_WRITE_CACHED \
+ .attributes = AARCH32_PMSA_DATA_READ_WRITE_DEFAULT \
}, { \
.begin = (uint32_t) bsp_section_nocache_begin, \
.end = (uint32_t) bsp_section_nocache_end, \
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu.h b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
index b8e3604fbb..c8d4442417 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
@@ -12,7 +12,7 @@
*
* Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
*
- * Copyright (c) 2006 OAR Corporation
+ * Copyright (c) 2006 On-Line Applications Research Corporation (OAR)
*
* Copyright (c) 2002 Advent Networks, Inc.
* Jay Monkman <jmonkman@adventnetworks.com>
diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
index e224a5e56e..f2b2a75a39 100644
--- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
@@ -1142,6 +1142,69 @@ typedef struct {
*/
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
+#ifdef RTEMS_EXCEPTION_EXTENSIONS
+ /**
+ * @brief Resumes normal execution using the provided exception frame.
+ *
+ * This routine helps to avoid dead code in the exception handler epilogue and
+ * does not return. This routine may assume that the provided pointer is valid
+ * for resetting the exception stack.
+ *
+ * @param frame The CPU_Exception_frame describing the machine exception.
+ */
+ RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame );
+
+ /**
+ * @brief Performs thread dispatch and resumes normal execution.
+ *
+ * This routine helps to avoid dead code in the exception handler epilogue and
+ * does not return. This routine may assume that the provided pointer is valid
+ * for resetting the exception stack. This function is expected to decrement
+ * the ISR nest level and thread dispatch disable level in the Per_CPU_Control
+ * structure.
+ *
+ * @param frame The CPU_Exception_frame describing the machine
+ * exception.
+ */
+ RTEMS_NO_RETURN void _CPU_Exception_dispatch_and_resume(
+ CPU_Exception_frame *frame
+ );
+
+ /**
+ * @brief Disables thread dispatch.
+ *
+ * This must be called before calling _CPU_Exception_dispatch_and_resume
+ * since that function is expected to reduce the levels incremented below.
+ */
+ void _CPU_Exception_disable_thread_dispatch( void );
+
+ /**
+ * @brief Retrieves the generic exception class of the machine exception.
+ *
+ * @param frame The CPU_Exception_frame describing the machine
+ * exception.
+ * @return The signal associated with the CPU_Exception_frame.
+ */
+ int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame );
+
+ /**
+ * @brief Sets the execution address of the exception frame.
+ *
+ * @param frame The CPU_Exception_frame describing the machine exception.
+ * @param address The address at which execution should resume.
+ */
+ void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame, void *address );
+
+ /**
+ * @brief Sets the execution address of the exception frame to the next
+ * instruction.
+ *
+ * @param frame The CPU_Exception_frame describing the machine
+ * exception.
+ */
+ void _CPU_Exception_frame_make_resume_next_instruction( CPU_Exception_frame *frame );
+#endif
+
/**
* @defgroup RTEMSScoreCPUExampleCPUEndian CPUEndian
*
diff --git a/cpukit/score/cpu/sparc/cpu_asm.S b/cpukit/score/cpu/sparc/cpu_asm.S
index 45d1495af7..afd9b9644b 100644
--- a/cpukit/score/cpu/sparc/cpu_asm.S
+++ b/cpukit/score/cpu/sparc/cpu_asm.S
@@ -190,6 +190,18 @@ done_flushing:
! Try to update the is executing indicator of the heir context
mov 1, %g1
+#if defined(__FIX_LEON3FT_B2BST)
+ /*
+ * This is a workaround for GRLIB-TN-0011 (Technical Note on LEON3/FT
+ * AHB Lock Release During Atomic Operation). Affected components are
+ * the GR712RC, UT699, UT699E, UT700, and LEON3FT-RTAX. Strictly, the
+ * workaround is only necessary if the MMU is enabled. Using the
+ * __FIX_LEON3FT_B2BST is not 100% appropriate, but the best thing we
+ * can use to enable the workaround. An alignment padding is filled
+ * with nops.
+ */
+.align 16
+#endif
.Ltry_update_is_executing:
swap [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
diff --git a/cpukit/score/src/coremsginsert.c b/cpukit/score/src/coremsginsert.c
index 14b023d9e1..d9e88ae0eb 100644
--- a/cpukit/score/src/coremsginsert.c
+++ b/cpukit/score/src/coremsginsert.c
@@ -24,14 +24,16 @@
#if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY)
static bool _CORE_message_queue_Order(
- const void *left,
+ const void *key,
+ const Chain_Node *left,
const Chain_Node *right
)
{
const int *left_priority;
const CORE_message_queue_Buffer *right_message;
- left_priority = (const int *) left;
+ (void) left;
+ left_priority = (const int *) key;
right_message = (const CORE_message_queue_Buffer *) right;
return *left_priority <
diff --git a/cpukit/score/src/coretodset.c b/cpukit/score/src/coretodset.c
index b04242a0da..644cac554b 100644
--- a/cpukit/score/src/coretodset.c
+++ b/cpukit/score/src/coretodset.c
@@ -52,13 +52,13 @@ Status_Control _TOD_Set(
for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
Per_CPU_Control *cpu;
Watchdog_Header *header;
- ISR_lock_Context lock_context;
+ ISR_lock_Context lock_context_2;
Watchdog_Control *first;
cpu = _Per_CPU_Get_by_index( cpu_index );
header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
- _ISR_lock_ISR_disable_and_acquire( &cpu->Watchdog.Lock, &lock_context );
+ _ISR_lock_ISR_disable_and_acquire( &cpu->Watchdog.Lock, &lock_context_2 );
first = _Watchdog_Header_first( header );
@@ -68,11 +68,11 @@ Status_Control _TOD_Set(
first,
tod_as_ticks,
&cpu->Watchdog.Lock,
- &lock_context
+ &lock_context_2
);
}
- _ISR_lock_Release_and_ISR_enable( &cpu->Watchdog.Lock, &lock_context );
+ _ISR_lock_Release_and_ISR_enable( &cpu->Watchdog.Lock, &lock_context_2 );
}
_TOD.is_set = true;
diff --git a/cpukit/score/src/exceptionmapping.c b/cpukit/score/src/exceptionmapping.c
new file mode 100644
index 0000000000..19f04cc31d
--- /dev/null
+++ b/cpukit/score/src/exceptionmapping.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreExceptionMapping
+ *
+ * @brief AArch64 machine exception to POSIX signal mapping.
+ */
+
+/*
+ * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <pthread.h>
+#include <signal.h>
+#include <rtems/score/exception.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadimpl.h>
+
+static _Thread_local int raise_signal;
+static _Thread_local Thread_Action _Exception_Raise_signal_action;
+
+static void _Exception_Raise_handler(
+ Thread_Control *executing,
+ Thread_Action *action,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_State_release( executing, lock_context );
+ raise( raise_signal );
+ _Thread_State_acquire( executing, lock_context );
+}
+
+/*
+ * Exception handler. Map the exception class to SIGFPE, SIGSEGV
+ * or SIGILL for Ada or other runtimes.
+ */
+void _Exception_Raise_signal(
+ Internal_errors_Source source,
+ bool always_set_to_false,
+ Internal_errors_t code
+)
+{
+ CPU_Exception_frame *ef;
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+ bool system_up;
+
+ if ( source != RTEMS_FATAL_SOURCE_EXCEPTION ) {
+ return;
+ }
+
+ /* If the CPU isn't UP yet, there isn't anything to send a signal to */
+#ifdef RTEMS_SMP
+ system_up = ( _Per_CPU_Get_state( cpu_self ) == PER_CPU_STATE_UP );
+#else
+ system_up = ( _System_state_Get() == SYSTEM_STATE_UP );
+#endif
+
+ if ( !system_up ) {
+ return;
+ }
+
+ ef = (rtems_exception_frame *) code;
+ raise_signal = _CPU_Exception_frame_get_signal( ef );
+
+ if ( raise_signal < 0 ) {
+ return;
+ }
+
+ _Thread_Add_post_switch_action(
+ _Per_CPU_Get_executing( cpu_self ),
+ &_Exception_Raise_signal_action,
+ _Exception_Raise_handler
+ );
+
+ /* Disable thread dispatch so that dispatch can occur */
+ _CPU_Exception_disable_thread_dispatch();
+
+ /* Perform dispatch and resume execution */
+ _CPU_Exception_dispatch_and_resume( ef );
+}
diff --git a/cpukit/score/src/kern_tc.c b/cpukit/score/src/kern_tc.c
index b5f761aae0..ea7d9ec953 100644
--- a/cpukit/score/src/kern_tc.c
+++ b/cpukit/score/src/kern_tc.c
@@ -5,9 +5,10 @@
*
* @brief This source file contains the definition of
* ::_Timecounter, ::_Timecounter_Time_second, and ::_Timecounter_Time_uptime
- * and the implementation of _Timecounter_Binuptime(),
- * _Timecounter_Nanouptime(), _Timecounter_Microuptime(),
- * _Timecounter_Bintime(), _Timecounter_Nanotime(), _Timecounter_Microtime(),
+ * and the implementation of _Timecounter_Set_NTP_update_second(),
+ * _Timecounter_Binuptime(), _Timecounter_Nanouptime(),
+ * _Timecounter_Microuptime(), _Timecounter_Bintime(),
+ * _Timecounter_Nanotime(), _Timecounter_Microtime(),
* _Timecounter_Getbinuptime(), _Timecounter_Getnanouptime(),
* _Timecounter_Getmicrouptime(), _Timecounter_Getbintime(),
* _Timecounter_Getnanotime(), _Timecounter_Getmicrotime(),
@@ -16,6 +17,8 @@
*/
/*-
+ * SPDX-License-Identifier: Beerware
+ *
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
@@ -24,7 +27,6 @@
* ----------------------------------------------------------------------------
*
* Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
- * All rights reserved.
*
* Portions of this software were developed by Julien Ridoux at the University
* of Melbourne under sponsorship from the FreeBSD Foundation.
@@ -59,11 +61,11 @@
#include <rtems/score/smp.h>
#include <rtems/score/todimpl.h>
#include <rtems/score/watchdogimpl.h>
+#include <rtems/rtems/clock.h>
#endif /* __rtems__ */
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 324528 2017-10-11 11:03:11Z kib $");
+__FBSDID("$FreeBSD$");
-#include "opt_compat.h"
#include "opt_ntp.h"
#include "opt_ffclock.h"
@@ -96,10 +98,7 @@ ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
_ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
#define hz rtems_clock_get_ticks_per_second()
#define printf(...)
-#define bcopy(x, y, z) memcpy(y, x, z);
#define log(...)
-/* FIXME: https://devel.rtems.org/ticket/2348 */
-#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
static inline void
atomic_thread_fence_acq(void)
@@ -128,6 +127,24 @@ atomic_store_rel_int(Atomic_Uint *i, u_int val)
_Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
}
+
+static inline void *
+atomic_load_ptr(void *ptr)
+{
+
+ return ((void *)_Atomic_Load_uintptr(ptr, ATOMIC_ORDER_RELAXED));
+}
+
+static Timecounter_NTP_update_second _Timecounter_NTP_update_second;
+
+void
+_Timecounter_Set_NTP_update_second(Timecounter_NTP_update_second handler)
+{
+
+ _Timecounter_NTP_update_second = handler;
+}
+
+#define ntp_update_second(a, b) (*ntp_update_second_handler)(a, b)
#endif /* __rtems__ */
/*
@@ -169,6 +186,7 @@ struct timehands {
struct timecounter *th_counter;
int64_t th_adjustment;
uint64_t th_scale;
+ uint32_t th_large_delta;
uint32_t th_offset_count;
struct bintime th_offset;
struct bintime th_bintime;
@@ -184,6 +202,40 @@ struct timehands {
struct timehands *th_next;
};
+#ifndef __rtems__
+static struct timehands ths[16] = {
+ [0] = {
+ .th_counter = &dummy_timecounter,
+ .th_scale = (uint64_t)-1 / 1000000,
+ .th_large_delta = 1000000,
+ .th_offset = { .sec = 1 },
+ .th_generation = 1,
+ },
+};
+
+static struct timehands *volatile timehands = &ths[0];
+struct timecounter *timecounter = &dummy_timecounter;
+static struct timecounter *timecounters = &dummy_timecounter;
+
+/* Mutex to protect the timecounter list. */
+static struct mtx tc_lock;
+MTX_SYSINIT(tc_lock, &tc_lock, "tc", MTX_DEF);
+
+int tc_min_ticktock_freq = 1;
+#else /* __rtems__ */
+/*
+ * In FreeBSD, the timehands count is a tuning option from two to 16. The
+ * tuning option was added since it is inexpensive and some FreeBSD users asked
+ * for it to play around. The default value is two. One system which did not
+ * work with two timehands was a system with one processor and a specific PPS
+ * device.
+ *
+ * For RTEMS, in uniprocessor configurations, just use one timehand since the
+ * update is done with interrupt disabled.
+ *
+ * In SMP configurations, use a fixed set of two timehands until someone
+ * reports an issue.
+ */
#if defined(RTEMS_SMP)
static struct timehands th0;
static struct timehands th1 = {
@@ -194,7 +246,8 @@ static struct timehands th0 = {
.th_counter = &dummy_timecounter,
.th_scale = (uint64_t)-1 / 1000000,
.th_offset = { .sec = 1 },
- .th_generation = 1,
+ .th_large_delta = 1000000,
+ .th_generation = UINT_MAX,
#ifdef __rtems__
.th_bintime = { .sec = TOD_SECONDS_1970_THROUGH_1988 },
.th_microtime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
@@ -210,10 +263,6 @@ static struct timehands th0 = {
static struct timehands *volatile timehands = &th0;
struct timecounter *timecounter = &dummy_timecounter;
-#ifndef __rtems__
-static struct timecounter *timecounters = &dummy_timecounter;
-
-int tc_min_ticktock_freq = 1;
#endif /* __rtems__ */
#ifndef __rtems__
@@ -225,17 +274,33 @@ volatile int32_t time_uptime = 1;
#endif /* __rtems__ */
#ifndef __rtems__
+/*
+ * The system time is always computed by summing the estimated boot time and the
+ * system uptime. The timehands track boot time, but it changes when the system
+ * time is set by the user, stepped by ntpd or adjusted when resuming. It
+ * is set to new_time - uptime.
+ */
static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
-SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
- NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
+SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
+ sysctl_kern_boottime, "S,timeval",
+ "Estimated system boottime");
-SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
-static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
+SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
+static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc,
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
static int timestepwarnings;
-SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
+SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RWTUN,
&timestepwarnings, 0, "Log time steps");
+static int timehands_count = 2;
+SYSCTL_INT(_kern_timecounter, OID_AUTO, timehands_count,
+ CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &timehands_count, 0, "Count of timehands in rotation");
+
struct bintime bt_timethreshold;
struct bintime bt_tickthreshold;
sbintime_t sbt_timethreshold;
@@ -253,6 +318,7 @@ SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
volatile int rtc_generation = 1;
static int tc_chosen; /* Non-zero if a specific tc was chosen via sysctl. */
+static char tc_from_tunable[16];
#endif /* __rtems__ */
static void tc_windup(struct bintime *new_boottimebin);
@@ -264,6 +330,7 @@ static void _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
void dtrace_getnanotime(struct timespec *tsp);
+void dtrace_getnanouptime(struct timespec *tsp);
#ifndef __rtems__
static int
@@ -273,7 +340,8 @@ sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
getboottime(&boottime);
-#ifndef __mips__
+/* i386 is the only arch which uses a 32bits time_t */
+#ifdef __amd64__
#ifdef SCTL_MASK32
int tv[2];
@@ -328,20 +396,85 @@ tc_delta(struct timehands *th)
* the comment in <sys/time.h> for a description of these 12 functions.
*/
-#ifdef FFCLOCK
-void
-fbclock_binuptime(struct bintime *bt)
+static __inline void
+bintime_off(struct bintime *bt, u_int off)
+{
+ struct timehands *th;
+ struct bintime *btp;
+ uint64_t scale, x;
+#ifndef __rtems__
+ u_int delta, gen, large_delta;
+#else /* __rtems__ */
+ uint32_t delta, large_delta;
+ u_int gen;
+#endif /* __rtems__ */
+
+ do {
+ th = timehands;
+ gen = atomic_load_acq_int(&th->th_generation);
+ btp = (struct bintime *)((vm_offset_t)th + off);
+ *bt = *btp;
+ scale = th->th_scale;
+ delta = tc_delta(th);
+ large_delta = th->th_large_delta;
+ atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
+ } while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+
+ if (__predict_false(delta >= large_delta)) {
+ /* Avoid overflow for scale * delta. */
+ x = (scale >> 32) * delta;
+ bt->sec += x >> 32;
+ bintime_addx(bt, x << 32);
+ bintime_addx(bt, (scale & 0xffffffff) * delta);
+ } else {
+ bintime_addx(bt, scale * delta);
+ }
+}
+#define GETTHBINTIME(dst, member) \
+do { \
+ _Static_assert(_Generic(((struct timehands *)NULL)->member, \
+ struct bintime: 1, default: 0) == 1, \
+ "struct timehands member is not of struct bintime type"); \
+ bintime_off(dst, __offsetof(struct timehands, member)); \
+} while (0)
+
+static __inline void
+getthmember(void *out, size_t out_size, u_int off)
{
struct timehands *th;
- unsigned int gen;
+ u_int gen;
do {
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- bintime_addx(bt, th->th_scale * tc_delta(th));
+ memcpy(out, (char *)th + off, out_size);
atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
} while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+}
+#define GETTHMEMBER(dst, member) \
+do { \
+ _Static_assert(_Generic(*dst, \
+ __typeof(((struct timehands *)NULL)->member): 1, \
+ default: 0) == 1, \
+ "*dst and struct timehands member have different types"); \
+ getthmember(dst, sizeof(*dst), __offsetof(struct timehands, \
+ member)); \
+} while (0)
+
+#ifdef FFCLOCK
+void
+fbclock_binuptime(struct bintime *bt)
+{
+
+ GETTHBINTIME(bt, th_offset);
}
void
@@ -365,16 +498,8 @@ fbclock_microuptime(struct timeval *tvp)
void
fbclock_bintime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_bintime);
}
void
@@ -398,116 +523,88 @@ fbclock_microtime(struct timeval *tvp)
void
fbclock_getbinuptime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_offset);
}
void
fbclock_getnanouptime(struct timespec *tsp)
{
- struct timehands *th;
- unsigned int gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timespec(&th->th_offset, tsp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
void
fbclock_getmicrouptime(struct timeval *tvp)
{
- struct timehands *th;
- unsigned int gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timeval(&th->th_offset, tvp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timeval(&bt, tvp);
}
void
fbclock_getbintime(struct bintime *bt)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_bintime);
}
void
fbclock_getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
}
void
fbclock_getmicrotime(struct timeval *tvp)
{
- struct timehands *th;
- unsigned int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tvp = th->th_microtime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tvp, th_microtime);
}
#else /* !FFCLOCK */
+
void
binuptime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_offset);
}
#ifdef __rtems__
sbintime_t
_Timecounter_Sbinuptime(void)
{
struct timehands *th;
- uint32_t gen;
sbintime_t sbt;
+ uint64_t scale;
+ uint32_t delta;
+ uint32_t large_delta;
+ u_int gen;
do {
th = timehands;
gen = atomic_load_acq_int(&th->th_generation);
sbt = bttosbt(th->th_offset);
- sbt += (th->th_scale * tc_delta(th)) >> 32;
+ scale = th->th_scale;
+ delta = tc_delta(th);
+ large_delta = th->th_large_delta;
atomic_thread_fence_acq();
+#if defined(RTEMS_SMP)
} while (gen == 0 || gen != th->th_generation);
+#else
+ } while (gen != th->th_generation);
+#endif
+
+ if (__predict_false(delta >= large_delta)) {
+ /* Avoid overflow for scale * delta. */
+ sbt += (scale >> 32) * delta;
+ sbt += ((scale & 0xffffffff) * delta) >> 32;
+ } else {
+ sbt += (scale * delta) >> 32;
+ }
return (sbt);
}
@@ -534,16 +631,8 @@ microuptime(struct timeval *tvp)
void
bintime(struct bintime *bt)
{
- struct timehands *th;
- u_int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- bintime_addx(bt, th->th_scale * tc_delta(th));
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHBINTIME(bt, th_bintime);
}
void
@@ -567,88 +656,60 @@ microtime(struct timeval *tvp)
void
getbinuptime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_offset;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_offset);
}
void
getnanouptime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timespec(&th->th_offset, tsp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
void
getmicrouptime(struct timeval *tvp)
{
- struct timehands *th;
- uint32_t gen;
+ struct bintime bt;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- bintime2timeval(&th->th_offset, tvp);
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timeval(&bt, tvp);
}
void
getbintime(struct bintime *bt)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *bt = th->th_bintime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(bt, th_bintime);
}
void
getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
}
void
getmicrotime(struct timeval *tvp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tvp = th->th_microtime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tvp, th_microtime);
}
#endif /* FFCLOCK */
+#ifdef __rtems__
+void
+rtems_clock_get_boot_time(struct timespec *boottime)
+{
+ struct bintime boottimebin;
+
+ getboottimebin(&boottimebin);
+ bintime2timespec(&boottimebin, boottime);
+}
+#endif /* __rtems__ */
void
getboottime(struct timeval *boottime)
{
@@ -661,15 +722,8 @@ getboottime(struct timeval *boottime)
void
getboottimebin(struct bintime *boottimebin)
{
- struct timehands *th;
- u_int gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *boottimebin = th->th_boottime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(boottimebin, th_boottime);
}
#ifdef FFCLOCK
@@ -1186,15 +1240,22 @@ getmicrotime(struct timeval *tvp)
void
dtrace_getnanotime(struct timespec *tsp)
{
- struct timehands *th;
- uint32_t gen;
- do {
- th = timehands;
- gen = atomic_load_acq_int(&th->th_generation);
- *tsp = th->th_nanotime;
- atomic_thread_fence_acq();
- } while (gen == 0 || gen != th->th_generation);
+ GETTHMEMBER(tsp, th_nanotime);
+}
+
+/*
+ * This is a clone of getnanouptime used for time since boot.
+ * The dtrace_ prefix prevents fbt from creating probes for
+ * it so an uptime that can be safely used in all fbt probes.
+ */
+void
+dtrace_getnanouptime(struct timespec *tsp)
+{
+ struct bintime bt;
+
+ GETTHMEMBER(&bt, th_offset);
+ bintime2timespec(&bt, tsp);
}
#endif /* __rtems__ */
@@ -1375,26 +1436,32 @@ tc_init(struct timecounter *tc)
tc->tc_quality);
}
- tc->tc_next = timecounters;
- timecounters = tc;
/*
* Set up sysctl tree for this counter.
*/
tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
- CTLFLAG_RW, 0, "timecounter description", "timecounter");
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "timecounter description", "timecounter");
SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
"mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
"mask for implemented bits");
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
- "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
- sysctl_kern_timecounter_get, "IU", "current timecounter value");
+ "counter", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
+ sizeof(*tc), sysctl_kern_timecounter_get, "IU",
+ "current timecounter value");
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
- "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
- sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
+ "frequency", CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
+ sizeof(*tc), sysctl_kern_timecounter_freq, "QU",
+ "timecounter frequency");
SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
"quality", CTLFLAG_RD, &(tc->tc_quality), 0,
"goodness of time counter");
+
+ mtx_lock(&tc_lock);
+ tc->tc_next = timecounters;
+ timecounters = tc;
+
/*
* Do not automatically switch if the current tc was specifically
* chosen. Never automatically use a timecounter with negative quality.
@@ -1402,21 +1469,31 @@ tc_init(struct timecounter *tc)
* worse since this timecounter may not be monotonic.
*/
if (tc_chosen)
- return;
+ goto unlock;
if (tc->tc_quality < 0)
- return;
-#endif /* __rtems__ */
+ goto unlock;
+ if (tc_from_tunable[0] != '\0' &&
+ strcmp(tc->tc_name, tc_from_tunable) == 0) {
+ tc_chosen = 1;
+ tc_from_tunable[0] = '\0';
+ } else {
+ if (tc->tc_quality < timecounter->tc_quality)
+ goto unlock;
+ if (tc->tc_quality == timecounter->tc_quality &&
+ tc->tc_frequency < timecounter->tc_frequency)
+ goto unlock;
+ }
+ (void)tc->tc_get_timecount(tc);
+ timecounter = tc;
+unlock:
+ mtx_unlock(&tc_lock);
+#else /* __rtems__ */
if (tc->tc_quality < timecounter->tc_quality)
return;
if (tc->tc_quality == timecounter->tc_quality &&
tc->tc_frequency < timecounter->tc_frequency)
return;
-#ifndef __rtems__
- (void)tc->tc_get_timecount(tc);
- (void)tc->tc_get_timecount(tc);
-#endif /* __rtems__ */
timecounter = tc;
-#ifdef __rtems__
tc_windup(NULL);
#endif /* __rtems__ */
}
@@ -1504,6 +1581,40 @@ _Timecounter_Set_clock(const struct bintime *_bt,
}
/*
+ * Recalculate the scaling factor. We want the number of 1/2^64
+ * fractions of a second per period of the hardware counter, taking
+ * into account the th_adjustment factor which the NTP PLL/adjtime(2)
+ * processing provides us with.
+ *
+ * The th_adjustment is nanoseconds per second with 32 bit binary
+ * fraction and we want 64 bit binary fraction of second:
+ *
+ * x = a * 2^32 / 10^9 = a * 4.294967296
+ *
+ * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
+ * we can only multiply by about 850 without overflowing, that
+ * leaves no suitably precise fractions for multiply before divide.
+ *
+ * Divide before multiply with a fraction of 2199/512 results in a
+ * systematic undercompensation of 10PPM of th_adjustment. On a
+ * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
+ *
+ * We happily sacrifice the lowest of the 64 bits of our result
+ * to the goddess of code clarity.
+ */
+static void
+recalculate_scaling_factor_and_large_delta(struct timehands *th)
+{
+ uint64_t scale;
+
+ scale = (uint64_t)1 << 63;
+ scale += (th->th_adjustment / 1024) * 2199;
+ scale /= th->th_counter->tc_frequency;
+ th->th_scale = scale * 2;
+ th->th_large_delta = MIN(((uint64_t)1 << 63) / scale, UINT_MAX);
+}
+
+/*
* Initialize the next struct timehands in the ring and make
* it the active timehands. Along the way we might switch to a different
* timecounter and/or do seconds processing in NTP. Slightly magic.
@@ -1524,11 +1635,17 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
{
struct bintime bt;
+ struct timecounter *tc;
struct timehands *th, *tho;
- uint64_t scale;
- uint32_t delta, ncount, ogen;
+ uint32_t delta, ncount;
+#if defined(RTEMS_SMP)
+ u_int ogen;
+#endif
int i;
time_t t;
+#ifdef __rtems__
+ Timecounter_NTP_update_second ntp_update_second_handler;
+#endif
/*
* Make the next timehands a copy of the current one, but do
@@ -1542,14 +1659,12 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
tho = timehands;
#if defined(RTEMS_SMP)
th = tho->th_next;
-#else
- th = tho;
-#endif
ogen = th->th_generation;
th->th_generation = 0;
atomic_thread_fence_rel();
-#if defined(RTEMS_SMP)
- bcopy(tho, th, offsetof(struct timehands, th_generation));
+ memcpy(th, tho, offsetof(struct timehands, th_generation));
+#else
+ th = tho;
#endif
if (new_boottimebin != NULL)
th->th_boottime = *new_boottimebin;
@@ -1559,9 +1674,10 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
* changing timecounters, a counter value from the new timecounter.
* Update the offset fields accordingly.
*/
+ tc = atomic_load_ptr(&timecounter);
delta = tc_delta(th);
- if (th->th_counter != timecounter)
- ncount = timecounter->tc_get_timecount(timecounter);
+ if (th->th_counter != tc)
+ ncount = tc->tc_get_timecount(tc);
else
ncount = 0;
#ifdef FFCLOCK
@@ -1595,7 +1711,7 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
#endif /* __rtems__ */
/*
- * Deal with NTP second processing. The for loop normally
+ * Deal with NTP second processing. The loop normally
* iterates at most once, but in extreme situations it might
* keep NTP sane if timeouts are not run for several seconds.
* At boot, the time step can be large when the TOD hardware
@@ -1605,69 +1721,57 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
*/
bt = th->th_offset;
bintime_add(&bt, &th->th_boottime);
+#ifdef __rtems__
+ ntp_update_second_handler = _Timecounter_NTP_update_second;
+ if (ntp_update_second_handler != NULL) {
+#endif /* __rtems__ */
i = bt.sec - tho->th_microtime.tv_sec;
- if (i > LARGE_STEP)
- i = 2;
- for (; i > 0; i--) {
- t = bt.sec;
- ntp_update_second(&th->th_adjustment, &bt.sec);
- if (bt.sec != t)
- th->th_boottime.sec += bt.sec - t;
+ if (i > 0) {
+ if (i > LARGE_STEP)
+ i = 2;
+
+ do {
+ t = bt.sec;
+ ntp_update_second(&th->th_adjustment, &bt.sec);
+ if (bt.sec != t)
+ th->th_boottime.sec += bt.sec - t;
+ --i;
+ } while (i > 0);
+
+ recalculate_scaling_factor_and_large_delta(th);
+ }
+#ifdef __rtems__
}
+#endif /* __rtems__ */
+
/* Update the UTC timestamps used by the get*() functions. */
th->th_bintime = bt;
bintime2timeval(&bt, &th->th_microtime);
bintime2timespec(&bt, &th->th_nanotime);
/* Now is a good time to change timecounters. */
- if (th->th_counter != timecounter) {
+ if (th->th_counter != tc) {
#ifndef __rtems__
#ifndef __arm__
- if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
+ if ((tc->tc_flags & TC_FLAGS_C2STOP) != 0)
cpu_disable_c2_sleep++;
if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
cpu_disable_c2_sleep--;
#endif
#endif /* __rtems__ */
- th->th_counter = timecounter;
+ th->th_counter = tc;
th->th_offset_count = ncount;
#ifndef __rtems__
- tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
- (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
+ tc_min_ticktock_freq = max(1, tc->tc_frequency /
+ (((uint64_t)tc->tc_counter_mask + 1) / 3));
#endif /* __rtems__ */
+ recalculate_scaling_factor_and_large_delta(th);
#ifdef FFCLOCK
ffclock_change_tc(th);
#endif
}
- /*-
- * Recalculate the scaling factor. We want the number of 1/2^64
- * fractions of a second per period of the hardware counter, taking
- * into account the th_adjustment factor which the NTP PLL/adjtime(2)
- * processing provides us with.
- *
- * The th_adjustment is nanoseconds per second with 32 bit binary
- * fraction and we want 64 bit binary fraction of second:
- *
- * x = a * 2^32 / 10^9 = a * 4.294967296
- *
- * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
- * we can only multiply by about 850 without overflowing, that
- * leaves no suitably precise fractions for multiply before divide.
- *
- * Divide before multiply with a fraction of 2199/512 results in a
- * systematic undercompensation of 10PPM of th_adjustment. On a
- * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
- *
- * We happily sacrifice the lowest of the 64 bits of our result
- * to the goddess of code clarity.
- *
- */
- scale = (uint64_t)1 << 63;
- scale += (th->th_adjustment / 1024) * 2199;
- scale /= th->th_counter->tc_frequency;
- th->th_scale = scale * 2;
-
+#if defined(RTEMS_SMP)
/*
* Now that the struct timehands is again consistent, set the new
* generation number, making sure to not make it zero.
@@ -1675,6 +1779,9 @@ _Timecounter_Windup(struct bintime *new_boottimebin,
if (++ogen == 0)
ogen = 1;
atomic_store_rel_int(&th->th_generation, ogen);
+#else
+ atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
+#endif
/* Go live with the new struct timehands. */
#ifdef FFCLOCK
@@ -1712,23 +1819,28 @@ sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
struct timecounter *newtc, *tc;
int error;
+ mtx_lock(&tc_lock);
tc = timecounter;
strlcpy(newname, tc->tc_name, sizeof(newname));
+ mtx_unlock(&tc_lock);
error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
if (error != 0 || req->newptr == NULL)
return (error);
+
+ mtx_lock(&tc_lock);
/* Record that the tc in use now was specifically chosen. */
tc_chosen = 1;
- if (strcmp(newname, tc->tc_name) == 0)
+ if (strcmp(newname, tc->tc_name) == 0) {
+ mtx_unlock(&tc_lock);
return (0);
+ }
for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
if (strcmp(newname, newtc->tc_name) != 0)
continue;
/* Warm up new timecounter. */
(void)newtc->tc_get_timecount(newtc);
- (void)newtc->tc_get_timecount(newtc);
timecounter = newtc;
@@ -1740,16 +1852,16 @@ sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
* use any locking and that it can be called in hard interrupt
* context via 'tc_windup()'.
*/
- return (0);
+ break;
}
- return (EINVAL);
+ mtx_unlock(&tc_lock);
+ return (newtc != NULL ? 0 : EINVAL);
}
-
-SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
- 0, 0, sysctl_kern_timecounter_hardware, "A",
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware,
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_hardware, "A",
"Timecounter hardware selected");
-
/* Report the available timecounter hardware. */
static int
sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
@@ -1758,19 +1870,26 @@ sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
struct timecounter *tc;
int error;
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
sbuf_new_for_sysctl(&sb, NULL, 0, req);
+ mtx_lock(&tc_lock);
for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
if (tc != timecounters)
sbuf_putc(&sb, ' ');
sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
}
+ mtx_unlock(&tc_lock);
error = sbuf_finish(&sb);
sbuf_delete(&sb);
return (error);
}
-SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
- 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
+SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
+ sysctl_kern_timecounter_choice, "A",
+ "Timecounter hardware detected");
#endif /* __rtems__ */
#ifndef __rtems__
@@ -1814,10 +1933,10 @@ pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
tv.tv_usec = fapi->timeout.tv_nsec / 1000;
timo = tvtohz(&tv);
}
- aseq = pps->ppsinfo.assert_sequence;
- cseq = pps->ppsinfo.clear_sequence;
- while (aseq == pps->ppsinfo.assert_sequence &&
- cseq == pps->ppsinfo.clear_sequence) {
+ aseq = atomic_load_int(&pps->ppsinfo.assert_sequence);
+ cseq = atomic_load_int(&pps->ppsinfo.clear_sequence);
+ while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) &&
+ cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) {
if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
if (pps->flags & PPSFLAG_MTX_SPIN) {
err = msleep_spin(pps, pps->driver_mtx,
@@ -2155,27 +2274,38 @@ _Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
{
struct bintime bt;
struct timehands *th;
- uint32_t ogen;
+#if defined(RTEMS_SMP)
+ u_int ogen;
+#endif
th = timehands;
+#if defined(RTEMS_SMP)
ogen = th->th_generation;
+ th->th_generation = 0;
+ atomic_thread_fence_rel();
+#endif
+
th->th_offset_count = offset;
bintime_addx(&th->th_offset, th->th_scale * delta);
-
bt = th->th_offset;
bintime_add(&bt, &th->th_boottime);
+
/* Update the UTC timestamps used by the get*() functions. */
th->th_bintime = bt;
bintime2timeval(&bt, &th->th_microtime);
bintime2timespec(&bt, &th->th_nanotime);
+#if defined(RTEMS_SMP)
/*
* Now that the struct timehands is again consistent, set the new
* generation number, making sure to not make it zero.
*/
if (++ogen == 0)
ogen = 1;
- th->th_generation = ogen;
+ atomic_store_rel_int(&th->th_generation, ogen);
+#else
+ atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
+#endif
/* Go live with the new struct timehands. */
time_second = th->th_microtime.tv_sec;
@@ -2229,6 +2359,28 @@ done:
return (0);
}
+/* Set up the requested number of timehands. */
+static void
+inittimehands(void *dummy)
+{
+ struct timehands *thp;
+ int i;
+
+ TUNABLE_INT_FETCH("kern.timecounter.timehands_count",
+ &timehands_count);
+ if (timehands_count < 1)
+ timehands_count = 1;
+ if (timehands_count > nitems(ths))
+ timehands_count = nitems(ths);
+ for (i = 1, thp = &ths[0]; i < timehands_count; thp = &ths[i++])
+ thp->th_next = &ths[i];
+ thp->th_next = &ths[0];
+
+ TUNABLE_STR_FETCH("kern.timecounter.hardware", tc_from_tunable,
+ sizeof(tc_from_tunable));
+}
+SYSINIT(timehands, SI_SUB_TUNABLES, SI_ORDER_ANY, inittimehands, NULL);
+
static void
inittimecounter(void *dummy)
{
@@ -2259,9 +2411,9 @@ inittimecounter(void *dummy)
#ifdef FFCLOCK
ffclock_init();
#endif
+
/* warm up new timecounter (again) and get rolling. */
(void)timecounter->tc_get_timecount(timecounter);
- (void)timecounter->tc_get_timecount(timecounter);
mtx_lock_spin(&tc_setclock_mtx);
tc_windup(NULL);
mtx_unlock_spin(&tc_setclock_mtx);
@@ -2274,8 +2426,8 @@ SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
static int cpu_tick_variable;
static uint64_t cpu_tick_frequency;
-static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
-static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
+DPCPU_DEFINE_STATIC(uint64_t, tc_cpu_ticks_base);
+DPCPU_DEFINE_STATIC(unsigned, tc_cpu_ticks_last);
static uint64_t
tc_cpu_ticks(void)
@@ -2449,7 +2601,6 @@ tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
enabled = 0;
return (enabled);
}
-#endif /* __rtems__ */
#ifdef COMPAT_FREEBSD32
uint32_t
@@ -2476,3 +2627,79 @@ tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
return (enabled);
}
#endif
+
+#include "opt_ddb.h"
+#ifdef DDB
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(timecounter, db_show_timecounter)
+{
+ struct timehands *th;
+ struct timecounter *tc;
+ u_int val1, val2;
+
+ th = timehands;
+ tc = th->th_counter;
+ val1 = tc->tc_get_timecount(tc);
+ __compiler_membar();
+ val2 = tc->tc_get_timecount(tc);
+
+ db_printf("timecounter %p %s\n", tc, tc->tc_name);
+ db_printf(" mask %#x freq %ju qual %d flags %#x priv %p\n",
+ tc->tc_counter_mask, (uintmax_t)tc->tc_frequency, tc->tc_quality,
+ tc->tc_flags, tc->tc_priv);
+ db_printf(" val %#x %#x\n", val1, val2);
+ db_printf("timehands adj %#jx scale %#jx ldelta %d off_cnt %d gen %d\n",
+ (uintmax_t)th->th_adjustment, (uintmax_t)th->th_scale,
+ th->th_large_delta, th->th_offset_count, th->th_generation);
+ db_printf(" offset %jd %jd boottime %jd %jd\n",
+ (intmax_t)th->th_offset.sec, (uintmax_t)th->th_offset.frac,
+ (intmax_t)th->th_boottime.sec, (uintmax_t)th->th_boottime.frac);
+}
+#endif
+#else /* __rtems__ */
+RTEMS_ALIAS(_Timecounter_Nanotime)
+void rtems_clock_get_realtime(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Bintime)
+void rtems_clock_get_realtime_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Microtime)
+void rtems_clock_get_realtime_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Getnanotime)
+void rtems_clock_get_realtime_coarse(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Getbintime)
+void rtems_clock_get_realtime_coarse_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Getmicrotime)
+void rtems_clock_get_realtime_coarse_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Nanouptime)
+void rtems_clock_get_monotonic(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Binuptime)
+void rtems_clock_get_monotonic_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Sbinuptime)
+sbintime_t rtems_clock_get_monotonic_sbintime(void);
+
+RTEMS_ALIAS(_Timecounter_Microuptime)
+void rtems_clock_get_monotonic_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Getnanouptime)
+void rtems_clock_get_monotonic_coarse(struct timespec *);
+
+RTEMS_ALIAS(_Timecounter_Getbinuptime)
+void rtems_clock_get_monotonic_coarse_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Getmicrouptime)
+void rtems_clock_get_monotonic_coarse_timeval(struct timeval *);
+
+RTEMS_ALIAS(_Timecounter_Getboottimebin)
+void rtems_clock_get_boot_time_bintime(struct bintime *);
+
+RTEMS_ALIAS(_Timecounter_Getboottime)
+void rtems_clock_get_boot_time_timeval(struct timeval *);
+#endif /* __rtems__ */
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index cb306c9763..63a7eb13ef 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -144,7 +144,6 @@ static void _MPCI_Create_server( void )
config.scheduler = &_Scheduler_Table[ 0 ];
config.name = _Objects_Build_name( 'M', 'P', 'C', 'I' );
config.priority = PRIORITY_PSEUDO_ISR;
- config.budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
config.is_fp = CPU_ALL_TASKS_ARE_FP;
config.stack_size = _Stack_Minimum()
+ _MPCI_Configuration.extra_mpci_receive_server_stack
diff --git a/cpukit/score/src/rbtreeappend.c b/cpukit/score/src/rbtreeappend.c
new file mode 100644
index 0000000000..e36f6bc805
--- /dev/null
+++ b/cpukit/score/src/rbtreeappend.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreRBTree
+ *
+ * @brief This source file contains the implementation of
+ * _RBTree_Append().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/rbtreeimpl.h>
+
+void _RBTree_Append( RBTree_Control *the_rbtree, RBTree_Node *the_node )
+{
+ RBTree_Node **link;
+ RBTree_Node *parent;
+
+ link = _RBTree_Root_reference( the_rbtree );
+ parent = NULL;
+
+ while ( *link != NULL ) {
+ parent = *link;
+ link = _RBTree_Right_reference( parent );
+ }
+
+ _RBTree_Add_child( the_node, parent, link );
+ _RBTree_Insert_color( the_rbtree, the_node );
+}
diff --git a/cpukit/score/src/rbtreeprepend.c b/cpukit/score/src/rbtreeprepend.c
new file mode 100644
index 0000000000..f154f51d36
--- /dev/null
+++ b/cpukit/score/src/rbtreeprepend.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreRBTree
+ *
+ * @brief This source file contains the implementation of
+ * _RBTree_Prepend().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/rbtreeimpl.h>
+
+void _RBTree_Prepend( RBTree_Control *the_rbtree, RBTree_Node *the_node )
+{
+ RBTree_Node **link;
+ RBTree_Node *parent;
+
+ link = _RBTree_Root_reference( the_rbtree );
+ parent = NULL;
+
+ while ( *link != NULL ) {
+ parent = *link;
+ link = _RBTree_Left_reference( parent );
+ }
+
+ _RBTree_Add_child( the_node, parent, link );
+ _RBTree_Insert_color( the_rbtree, the_node );
+}
diff --git a/cpukit/score/src/schedulercbs.c b/cpukit/score/src/schedulercbs.c
index 8e5bf86f27..bedaf8e1da 100644
--- a/cpukit/score/src/schedulercbs.c
+++ b/cpukit/score/src/schedulercbs.c
@@ -21,10 +21,17 @@
#endif
#include <rtems/score/schedulercbsimpl.h>
+#include <rtems/score/statesimpl.h>
+#include <rtems/score/threadcpubudget.h>
-void _Scheduler_CBS_Budget_callout(
- Thread_Control *the_thread
-)
+/**
+ * @brief Invoked when a limited time quantum is exceeded.
+ *
+ * This routine is invoked when a limited time quantum is exceeded.
+ *
+ * @param the_thread The thread that exceeded a limited time quantum.
+ */
+static void _Scheduler_CBS_Budget_callout( Thread_Control *the_thread )
{
Scheduler_CBS_Node *node;
Scheduler_CBS_Server_id server_id;
@@ -52,6 +59,34 @@ void _Scheduler_CBS_Budget_callout(
}
}
+static void _Scheduler_CBS_Budget_at_tick( Thread_Control *the_thread )
+{
+ uint32_t budget_available;
+
+ if ( !the_thread->is_preemptible ) {
+ return;
+ }
+
+ if ( !_States_Is_ready( the_thread->current_state ) ) {
+ return;
+ }
+
+ budget_available = the_thread->CPU_budget.available;
+
+ if ( budget_available == 1 ) {
+ the_thread->CPU_budget.available = 0;
+ _Scheduler_CBS_Budget_callout ( the_thread );
+ } else {
+ the_thread->CPU_budget.available = budget_available - 1;
+ }
+}
+
+const Thread_CPU_budget_operations _Scheduler_CBS_Budget = {
+ .at_tick = _Scheduler_CBS_Budget_at_tick,
+ .at_context_switch = _Thread_CPU_budget_do_nothing,
+ .initialize = _Thread_CPU_budget_do_nothing
+};
+
int _Scheduler_CBS_Initialize(void)
{
return SCHEDULER_CBS_OK;
diff --git a/cpukit/score/src/schedulercbsattachthread.c b/cpukit/score/src/schedulercbsattachthread.c
index 0cb59fa2e8..d6c5b3b9eb 100644
--- a/cpukit/score/src/schedulercbsattachthread.c
+++ b/cpukit/score/src/schedulercbsattachthread.c
@@ -64,9 +64,8 @@ int _Scheduler_CBS_Attach_thread (
server->task_id = task_id;
- the_thread->budget_callout = _Scheduler_CBS_Budget_callout;
- the_thread->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_CALLOUT;
- the_thread->is_preemptible = true;
+ the_thread->is_preemptible = true;
+ the_thread->CPU_budget.operations = &_Scheduler_CBS_Budget;
_ISR_lock_ISR_enable( &lock_context );
return SCHEDULER_CBS_OK;
diff --git a/cpukit/score/src/schedulercbsdetachthread.c b/cpukit/score/src/schedulercbsdetachthread.c
index 687b37804f..5aa5eeb7a2 100644
--- a/cpukit/score/src/schedulercbsdetachthread.c
+++ b/cpukit/score/src/schedulercbsdetachthread.c
@@ -28,10 +28,11 @@ int _Scheduler_CBS_Detach_thread (
rtems_id task_id
)
{
- Scheduler_CBS_Server *server;
- ISR_lock_Context lock_context;
- Thread_Control *the_thread;
- Scheduler_CBS_Node *node;
+ Scheduler_CBS_Server *server;
+ ISR_lock_Context lock_context;
+ Thread_Control *the_thread;
+ Scheduler_CBS_Node *node;
+ const Thread_CPU_budget_operations *cpu_budget_operations;
if ( server_id >= _Scheduler_CBS_Maximum_servers ) {
return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;
@@ -58,9 +59,14 @@ int _Scheduler_CBS_Detach_thread (
server->task_id = -1;
- the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
- the_thread->budget_callout = the_thread->Start.budget_callout;
- the_thread->is_preemptible = the_thread->Start.is_preemptible;
+ the_thread->is_preemptible = the_thread->Start.is_preemptible;
+
+ cpu_budget_operations = the_thread->Start.cpu_budget_operations;
+ the_thread->CPU_budget.operations = cpu_budget_operations;
+
+ if ( cpu_budget_operations != NULL ) {
+ ( *cpu_budget_operations->initialize )( the_thread );
+ }
_ISR_lock_ISR_enable( &lock_context );
return SCHEDULER_CBS_OK;
diff --git a/cpukit/score/src/schedulercbsgetexecutiontime.c b/cpukit/score/src/schedulercbsgetexecutiontime.c
index 28709151c0..c8e999a788 100644
--- a/cpukit/score/src/schedulercbsgetexecutiontime.c
+++ b/cpukit/score/src/schedulercbsgetexecutiontime.c
@@ -51,7 +51,7 @@ int _Scheduler_CBS_Get_execution_time (
the_thread = _Thread_Get( server->task_id, &lock_context );
if ( the_thread != NULL ) {
- *exec_time = server->parameters.budget - the_thread->cpu_time_budget;
+ *exec_time = server->parameters.budget - the_thread->CPU_budget.available;
_ISR_lock_ISR_enable( &lock_context );
} else {
*exec_time = server->parameters.budget;
diff --git a/cpukit/score/src/schedulercbsgetremainingbudget.c b/cpukit/score/src/schedulercbsgetremainingbudget.c
index 5cb299d67e..338fd56190 100644
--- a/cpukit/score/src/schedulercbsgetremainingbudget.c
+++ b/cpukit/score/src/schedulercbsgetremainingbudget.c
@@ -50,7 +50,7 @@ int _Scheduler_CBS_Get_remaining_budget (
the_thread = _Thread_Get( server->task_id, &lock_context );
if ( the_thread != NULL ) {
- *remaining_budget = the_thread->cpu_time_budget;
+ *remaining_budget = the_thread->CPU_budget.available;
_ISR_lock_ISR_enable( &lock_context );
} else {
*remaining_budget = 0;
diff --git a/cpukit/score/src/schedulercbsreleasejob.c b/cpukit/score/src/schedulercbsreleasejob.c
index 376906b996..27ca33ad56 100644
--- a/cpukit/score/src/schedulercbsreleasejob.c
+++ b/cpukit/score/src/schedulercbsreleasejob.c
@@ -38,7 +38,7 @@ void _Scheduler_CBS_Release_job(
/* Budget replenishment for the next job. */
if ( serv_info != NULL ) {
- the_thread->cpu_time_budget = serv_info->parameters.budget;
+ the_thread->CPU_budget.available = serv_info->parameters.budget;
}
node->deadline_node = priority_node;
diff --git a/cpukit/score/src/schedulercbsunblock.c b/cpukit/score/src/schedulercbsunblock.c
index 700d7b1202..ca985150fb 100644
--- a/cpukit/score/src/schedulercbsunblock.c
+++ b/cpukit/score/src/schedulercbsunblock.c
@@ -50,7 +50,7 @@ void _Scheduler_CBS_Unblock(
if ( serv_info != NULL && ( priority & SCHEDULER_EDF_PRIO_MSB ) == 0 ) {
time_t deadline = serv_info->parameters.deadline;
time_t budget = serv_info->parameters.budget;
- uint32_t deadline_left = the_thread->cpu_time_budget;
+ uint32_t deadline_left = the_thread->CPU_budget.available;
Priority_Control budget_left = priority - _Watchdog_Ticks_since_boot;
if ( deadline * budget_left > budget * deadline_left ) {
diff --git a/cpukit/score/src/schedulerdefaultmakecleansticky.c b/cpukit/score/src/schedulerdefaultmakecleansticky.c
new file mode 100644
index 0000000000..e2b2d659f1
--- /dev/null
+++ b/cpukit/score/src/schedulerdefaultmakecleansticky.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreScheduler
+ *
+ * @brief This source file contains the implementation of
+ * _Scheduler_default_Sticky_do_nothing().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/scheduler.h>
+
+void _Scheduler_default_Sticky_do_nothing(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ (void) scheduler;
+ (void) the_thread;
+ (void) node;
+}
diff --git a/cpukit/score/src/schedulerdefaulttick.c b/cpukit/score/src/schedulerdefaulttick.c
deleted file mode 100644
index f4b6ba8578..0000000000
--- a/cpukit/score/src/schedulerdefaulttick.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * @file
- *
- * @ingroup RTEMSScoreScheduler
- *
- * @brief This source file contains the implementation of
- * _Scheduler_default_Tick().
- */
-
-/*
- * COPYRIGHT (c) 1989-2009.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <rtems/score/schedulerimpl.h>
-#include <rtems/score/threadimpl.h>
-#include <rtems/score/smp.h>
-#include <rtems/config.h>
-
-void _Scheduler_default_Tick(
- const Scheduler_Control *scheduler,
- Thread_Control *executing
-)
-{
- (void) scheduler;
-
- /*
- * If the thread is not preemptible or is not ready, then
- * just return.
- */
-
- if ( !executing->is_preemptible )
- return;
-
- if ( !_States_Is_ready( executing->current_state ) )
- return;
-
- /*
- * The cpu budget algorithm determines what happens next.
- */
-
- switch ( executing->budget_algorithm ) {
- case THREAD_CPU_BUDGET_ALGORITHM_NONE:
- break;
-
- case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
- #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
- case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
- #endif
- if ( (int)(--executing->cpu_time_budget) <= 0 ) {
-
- /*
- * A yield performs the ready chain mechanics needed when
- * resetting a timeslice. If no other thread's are ready
- * at the priority of the currently executing thread, then the
- * executing thread's timeslice is reset. Otherwise, the
- * currently executing thread is placed at the rear of the
- * FIFO for this priority and a new heir is selected.
- */
- _Thread_Yield( executing );
- executing->cpu_time_budget =
- rtems_configuration_get_ticks_per_timeslice();
- }
- break;
-
- #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
- case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
- if ( --executing->cpu_time_budget == 0 )
- (*executing->budget_callout)( executing );
- break;
- #endif
- }
-}
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index 13d512118e..ef7f4bca7c 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -11,7 +11,8 @@
* _Scheduler_EDF_SMP_Remove_processor(), _Scheduler_EDF_SMP_Set_affinity(),
* _Scheduler_EDF_SMP_Start_idle(), _Scheduler_EDF_SMP_Unblock(),
* _Scheduler_EDF_SMP_Unpin(), _Scheduler_EDF_SMP_Update_priority(),
- * _Scheduler_EDF_SMP_Withdraw_node(), and _Scheduler_EDF_SMP_Yield().
+ * _Scheduler_EDF_SMP_Withdraw_node(), _Scheduler_EDF_SMP_Make_sticky(),
+ * _Scheduler_EDF_SMP_Clean_sticky(), and _Scheduler_EDF_SMP_Yield().
*/
/*
@@ -66,6 +67,28 @@ static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
return prio_left <= prio_right;
}
+static inline bool _Scheduler_EDF_SMP_Overall_less_equal(
+ const void *key,
+ const Chain_Node *to_insert,
+ const Chain_Node *next
+)
+{
+ Priority_Control insert_priority;
+ Priority_Control next_priority;
+ const Scheduler_EDF_SMP_Node *node_to_insert;
+ const Scheduler_EDF_SMP_Node *node_next;
+
+ insert_priority = *(const Priority_Control *) key;
+ insert_priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
+ node_to_insert = (const Scheduler_EDF_SMP_Node *) to_insert;
+ node_next = (const Scheduler_EDF_SMP_Node *) next;
+ next_priority = node_next->Base.priority;
+
+ return insert_priority < next_priority ||
+ ( insert_priority == next_priority &&
+ node_to_insert->generation <= node_next->generation );
+}
+
void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
{
Scheduler_EDF_SMP_Context *self =
@@ -196,21 +219,21 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
return &highest_ready->Base.Base;
}
-static inline void _Scheduler_EDF_SMP_Set_scheduled(
+static inline void _Scheduler_EDF_SMP_Set_allocated(
Scheduler_EDF_SMP_Context *self,
- Scheduler_EDF_SMP_Node *scheduled,
+ Scheduler_EDF_SMP_Node *allocated,
const Per_CPU_Control *cpu
)
{
- self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
+ self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].allocated = allocated;
}
-static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
+static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_allocated(
const Scheduler_EDF_SMP_Context *self,
uint8_t rqi
)
{
- return self->Ready[ rqi ].scheduled;
+ return self->Ready[ rqi ].allocated;
}
static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
@@ -226,45 +249,106 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
if ( rqi != 0 ) {
Scheduler_EDF_SMP_Context *self;
- Scheduler_EDF_SMP_Node *node;
+ Scheduler_EDF_SMP_Node *affine_scheduled;
self = _Scheduler_EDF_SMP_Get_self( context );
- node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
+ affine_scheduled = self->Ready[ rqi ].affine_scheduled;
- if ( node->ready_queue_index > 0 ) {
- _Assert( node->ready_queue_index == rqi );
- return &node->Base.Base;
+ if ( affine_scheduled != NULL ) {
+ _Assert( affine_scheduled->ready_queue_index == rqi );
+ return &affine_scheduled->Base.Base;
}
}
return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
}
-static inline void _Scheduler_EDF_SMP_Insert_ready(
+static inline void _Scheduler_EDF_SMP_Update_generation(
Scheduler_Context *context,
Scheduler_Node *node_base,
Priority_Control insert_priority
)
{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ int generation_index;
+ int increment;
+ int64_t generation;
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
+ increment = ( generation_index << 1 ) - 1;
+
+ generation = self->generations[ generation_index ];
+ node->generation = generation;
+ self->generations[ generation_index ] = generation + increment;
+}
+
+static inline void _Scheduler_EDF_SMP_Insert_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node_base,
+ Priority_Control priority_to_insert
+)
+{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
uint8_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
- int generation_index;
- int increment;
- int64_t generation;
self = _Scheduler_EDF_SMP_Get_self( context );
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
rqi = node->ready_queue_index;
- generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
- increment = ( generation_index << 1 ) - 1;
ready_queue = &self->Ready[ rqi ];
- generation = self->generations[ generation_index ];
- node->generation = generation;
- self->generations[ generation_index ] = generation + increment;
+ _Chain_Insert_ordered_unprotected(
+ &self->Base.Scheduled,
+ &node_base->Node.Chain,
+ &priority_to_insert,
+ _Scheduler_EDF_SMP_Overall_less_equal
+ );
+
+ if ( rqi != 0 ) {
+ ready_queue->affine_scheduled = node;
+
+ if ( !_RBTree_Is_empty( &ready_queue->Queue ) ) {
+ _Chain_Extract_unprotected( &ready_queue->Node );
+ }
+ }
+}
+static inline void _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary(
+ Scheduler_EDF_SMP_Context *self,
+ uint8_t rqi,
+ Scheduler_EDF_SMP_Ready_queue *ready_queue
+)
+{
+ if (
+ rqi != 0 &&
+ _RBTree_Is_empty( &ready_queue->Queue ) &&
+ ready_queue->affine_scheduled == NULL
+ ) {
+ _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
+ }
+}
+
+static inline void _Scheduler_EDF_SMP_Insert_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *node_base,
+ Priority_Control insert_priority
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ uint8_t rqi;
+ Scheduler_EDF_SMP_Ready_queue *ready_queue;
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ rqi = node->ready_queue_index;
+ ready_queue = &self->Ready[ rqi ];
+
+ _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
_RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
_RBTree_Insert_inline(
&ready_queue->Queue,
@@ -272,16 +356,6 @@ static inline void _Scheduler_EDF_SMP_Insert_ready(
&insert_priority,
_Scheduler_EDF_SMP_Priority_less_equal
);
-
- if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
- Scheduler_EDF_SMP_Node *scheduled;
-
- scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
-
- if ( scheduled->ready_queue_index == 0 ) {
- _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
- }
- }
}
static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
@@ -305,6 +379,8 @@ static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
if ( rqi != 0 && !_RBTree_Is_empty( &ready_queue->Queue ) ) {
_Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
}
+
+ ready_queue->affine_scheduled = NULL;
}
static inline void _Scheduler_EDF_SMP_Extract_from_ready(
@@ -328,10 +404,9 @@ static inline void _Scheduler_EDF_SMP_Extract_from_ready(
if (
rqi != 0
&& _RBTree_Is_empty( &ready_queue->Queue )
- && !_Chain_Is_node_off_chain( &ready_queue->Node )
+ && ready_queue->affine_scheduled == NULL
) {
_Chain_Extract_unprotected( &ready_queue->Node );
- _Chain_Set_off_chain( &ready_queue->Node );
}
}
@@ -340,15 +415,21 @@ static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
Scheduler_Node *scheduled_to_ready
)
{
- Priority_Control insert_priority;
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ uint8_t rqi;
+ Scheduler_EDF_SMP_Ready_queue *ready_queue;
- _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
- insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
- _Scheduler_EDF_SMP_Insert_ready(
- context,
- scheduled_to_ready,
- insert_priority
- );
+ _Scheduler_EDF_SMP_Extract_from_scheduled( context, scheduled_to_ready );
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready );
+ rqi = node->ready_queue_index;
+ ready_queue = &self->Ready[ rqi ];
+
+ _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
+ _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
+ _RBTree_Prepend( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
}
static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
@@ -361,64 +442,78 @@ static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
_Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- _Scheduler_SMP_Insert_scheduled(
+ _Scheduler_EDF_SMP_Insert_scheduled(
context,
ready_to_scheduled,
insert_priority
);
}
+static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_idle( void *arg )
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_Node *lowest_ready;
+
+ self = _Scheduler_EDF_SMP_Get_self( arg );
+ lowest_ready = (Scheduler_Node *) _RBTree_Maximum( &self->Ready[ 0 ].Queue );
+ _Assert( lowest_ready != NULL );
+ _RBTree_Extract( &self->Ready[ 0 ].Queue, &lowest_ready->Node.RBTree );
+ _Chain_Initialize_node( &lowest_ready->Node.Chain );
+
+ return lowest_ready;
+}
+
+static inline void _Scheduler_EDF_SMP_Release_idle(
+ Scheduler_Node *node,
+ void *arg
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+
+ self = _Scheduler_EDF_SMP_Get_self( arg );
+ _RBTree_Initialize_node( &node->Node.RBTree );
+ _RBTree_Append( &self->Ready[ 0 ].Queue, &node->Node.RBTree );
+}
+
static inline void _Scheduler_EDF_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled_base,
- Scheduler_Node *victim_base,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
)
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *scheduled;
uint8_t rqi;
- (void) victim_base;
self = _Scheduler_EDF_SMP_Get_self( context );
scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
rqi = scheduled->ready_queue_index;
if ( rqi != 0 ) {
- Scheduler_EDF_SMP_Ready_