summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/Makefile.am17
-rw-r--r--cpukit/dev/i2c/i2c-bus.c45
-rw-r--r--cpukit/include/dev/i2c/i2c.h44
-rw-r--r--cpukit/include/rtems/confdefs/iodrivers.h21
-rw-r--r--cpukit/include/rtems/confdefs/libio.h4
-rw-r--r--cpukit/include/rtems/imfs.h35
-rw-r--r--cpukit/include/rtems/io.h7
-rw-r--r--cpukit/include/rtems/libio.h55
-rw-r--r--cpukit/include/rtems/libio_.h94
-rw-r--r--cpukit/include/rtems/rtems/barrier.h16
-rw-r--r--cpukit/include/rtems/rtems/clock.h30
-rw-r--r--cpukit/include/rtems/rtems/clockimpl.h40
-rw-r--r--cpukit/include/rtems/rtems/dpmem.h20
-rw-r--r--cpukit/include/rtems/rtems/intr.h4
-rw-r--r--cpukit/include/rtems/rtems/message.h42
-rw-r--r--cpukit/include/rtems/rtems/object.h10
-rw-r--r--cpukit/include/rtems/rtems/part.h16
-rw-r--r--cpukit/include/rtems/rtems/ratemon.h24
-rw-r--r--cpukit/include/rtems/rtems/region.h32
-rw-r--r--cpukit/include/rtems/rtems/sem.h18
-rw-r--r--cpukit/include/rtems/rtems/support.h10
-rw-r--r--cpukit/include/rtems/rtems/tasks.h150
-rw-r--r--cpukit/include/rtems/rtems/timer.h18
-rw-r--r--cpukit/include/rtems/scheduler.h10
-rw-r--r--cpukit/include/rtems/score/basedefs.h2
-rw-r--r--cpukit/include/rtems/score/isr.h12
-rw-r--r--cpukit/include/rtems/score/isrlevel.h11
-rw-r--r--cpukit/include/rtems/score/objectimpl.h7
-rw-r--r--cpukit/include/rtems/score/percpu.h17
-rw-r--r--cpukit/include/rtems/score/percpudata.h14
-rw-r--r--cpukit/include/rtems/score/schedulerstrongapa.h162
-rw-r--r--cpukit/include/rtems/score/stackimpl.h7
-rw-r--r--cpukit/include/rtems/score/threadimpl.h61
-rw-r--r--cpukit/include/rtems/score/threadq.h9
-rw-r--r--cpukit/include/rtems/score/threadqimpl.h48
-rw-r--r--cpukit/include/rtems/score/timespec.h44
-rw-r--r--cpukit/include/rtems/score/watchdogimpl.h16
-rw-r--r--cpukit/include/rtems/thread.h12
-rw-r--r--cpukit/include/rtems/version.h21
-rw-r--r--cpukit/libcsupport/src/__usrenv.c39
-rw-r--r--cpukit/libcsupport/src/alignedalloc.c4
-rw-r--r--cpukit/libcsupport/src/calloc.c9
-rw-r--r--cpukit/libcsupport/src/futimens.c86
-rw-r--r--cpukit/libcsupport/src/libio_init.c6
-rw-r--r--cpukit/libcsupport/src/malloc.c4
-rw-r--r--cpukit/libcsupport/src/malloc_deferred.c4
-rw-r--r--cpukit/libcsupport/src/posix_memalign.c4
-rw-r--r--cpukit/libcsupport/src/rtems_memalign.c4
-rw-r--r--cpukit/libcsupport/src/rtemscalloc.c9
-rw-r--r--cpukit/libcsupport/src/utime.c71
-rw-r--r--cpukit/libcsupport/src/utimensat.c225
-rw-r--r--cpukit/libcsupport/src/utimes.c63
-rw-r--r--cpukit/libdebugger/rtems-debugger-remote-tcp.c2
-rw-r--r--cpukit/libfs/src/defaults/default_ops.c34
-rw-r--r--cpukit/libfs/src/defaults/default_utime.c32
-rw-r--r--cpukit/libfs/src/defaults/default_utimens.c49
-rw-r--r--cpukit/libfs/src/dosfs/msdos_init.c11
-rw-r--r--cpukit/libfs/src/ftpfs/ftpfs.c44
-rw-r--r--cpukit/libfs/src/ftpfs/tftpDriver.c2
-rw-r--r--cpukit/libfs/src/imfs/imfs_init.c30
-rw-r--r--cpukit/libfs/src/imfs/imfs_utime.c41
-rw-r--r--cpukit/libfs/src/imfs/imfs_utimens.c58
-rw-r--r--cpukit/libfs/src/jffs2/src/fs-rtems.c11
-rw-r--r--cpukit/libfs/src/rfs/rtems-rfs-rtems.c16
-rw-r--r--cpukit/libmisc/monitor/mon-editor.c12
-rw-r--r--cpukit/libmisc/shell/main_cp.c6
-rw-r--r--cpukit/libmisc/shell/main_edit.c16
-rw-r--r--cpukit/libmisc/shell/main_help.c2
-rw-r--r--cpukit/libmisc/uuid/gen_uuid.c6
-rw-r--r--cpukit/libpci/pci_cfg_print_code.c4
-rw-r--r--cpukit/posix/src/cancel.c25
-rw-r--r--cpukit/posix/src/clocknanosleep.c128
-rw-r--r--cpukit/posix/src/condwaitsupp.c2
-rw-r--r--cpukit/posix/src/mqueuerecvsupp.c2
-rw-r--r--cpukit/posix/src/mqueuesendsupp.c2
-rw-r--r--cpukit/posix/src/mutexlocksupp.c2
-rw-r--r--cpukit/posix/src/nanosleep.c91
-rw-r--r--cpukit/posix/src/prwlocktimedrdlock.c3
-rw-r--r--cpukit/posix/src/prwlocktimedwrlock.c3
-rw-r--r--cpukit/posix/src/pthreadcreate.c2
-rw-r--r--cpukit/posix/src/pthreadexit.c11
-rw-r--r--cpukit/posix/src/semtimedwait.c3
-rw-r--r--cpukit/posix/src/sigtimedwait.c8
-rw-r--r--cpukit/posix/src/sysconf.c4
-rw-r--r--cpukit/rtems/src/clockset.c33
-rw-r--r--cpukit/rtems/src/clocktodvalidate.c34
-rw-r--r--cpukit/rtems/src/msgqconstruct.c7
-rw-r--r--cpukit/rtems/src/taskconstruct.c2
-rw-r--r--cpukit/rtems/src/taskdelete.c20
-rw-r--r--cpukit/rtems/src/taskexit.c15
-rw-r--r--cpukit/rtems/src/taskrestart.c13
-rw-r--r--cpukit/rtems/src/taskstart.c4
-rw-r--r--cpukit/rtems/src/taskwakewhen.c19
-rw-r--r--cpukit/rtems/src/timercreate.c10
-rw-r--r--cpukit/sapi/src/exinit.c13
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-context-validate.S29
-rw-r--r--cpukit/score/cpu/aarch64/aarch64-exception-default.S66
-rwxr-xr-xcpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h9985
-rw-r--r--cpukit/score/cpu/arm/cpu_asm.S3
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h2
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/armv7m.h4
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpu.h5
-rw-r--r--cpukit/score/cpu/bfin/include/rtems/score/cpu.h9
-rw-r--r--cpukit/score/cpu/lm32/include/rtems/score/cpu.h9
-rw-r--r--cpukit/score/cpu/m68k/include/rtems/score/cpu.h3
-rw-r--r--cpukit/score/cpu/moxie/include/rtems/score/cpu.h5
-rw-r--r--cpukit/score/cpu/nios2/include/rtems/score/cpu.h2
-rw-r--r--cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h43
-rw-r--r--cpukit/score/cpu/or1k/include/rtems/score/cpu.h10
-rw-r--r--cpukit/score/cpu/powerpc/include/rtems/score/cpu.h5
-rw-r--r--cpukit/score/cpu/riscv/include/rtems/score/cpu.h7
-rw-r--r--cpukit/score/cpu/riscv/riscv-context-switch.S2
-rw-r--r--cpukit/score/cpu/sh/include/rtems/score/cpu.h8
-rw-r--r--cpukit/score/cpu/sparc/cpu.c230
-rw-r--r--cpukit/score/cpu/sparc/cpu_asm.S159
-rw-r--r--cpukit/score/cpu/sparc/include/rtems/score/cpu.h69
-rw-r--r--cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h43
-rw-r--r--cpukit/score/cpu/sparc/sparc-bad-trap.S231
-rw-r--r--cpukit/score/cpu/sparc/sparc-exception-frame-print.c157
-rw-r--r--cpukit/score/cpu/sparc/sparc-isr-handler.S620
-rw-r--r--cpukit/score/cpu/sparc/sparc-isr-install.c194
-rw-r--r--cpukit/score/cpu/sparc64/include/rtems/score/cpu.h6
-rw-r--r--cpukit/score/cpu/x86_64/include/rtems/score/cpu.h2
-rw-r--r--cpukit/score/src/condition.c4
-rw-r--r--cpukit/score/src/coremsgclose.c8
-rw-r--r--cpukit/score/src/isr.c16
-rw-r--r--cpukit/score/src/isrvectortable.c48
-rw-r--r--cpukit/score/src/mutex.c6
-rw-r--r--cpukit/score/src/objectfreenothing.c (renamed from cpukit/score/src/stackallocatorfreenothing.c)10
-rw-r--r--cpukit/score/src/percpudata.c98
-rw-r--r--cpukit/score/src/schedulerstrongapa.c981
-rw-r--r--cpukit/score/src/smp.c42
-rw-r--r--cpukit/score/src/threadq.c36
-rw-r--r--cpukit/score/src/threadqenqueue.c8
-rw-r--r--cpukit/score/src/threadqgetnameandid.c76
-rw-r--r--cpukit/score/src/threadqtimeout.c10
-rw-r--r--cpukit/score/src/threadrestart.c187
-rw-r--r--cpukit/score/src/threadstartmultitasking.c2
-rw-r--r--cpukit/score/src/timespecisnonnegative.c54
-rw-r--r--cpukit/telnetd/telnetd.c11
140 files changed, 14458 insertions, 1612 deletions
diff --git a/cpukit/Makefile.am b/cpukit/Makefile.am
index c9600f7242..c83167668d 100644
--- a/cpukit/Makefile.am
+++ b/cpukit/Makefile.am
@@ -262,6 +262,8 @@ librtemscpu_a_SOURCES += libcsupport/src/unmount.c
librtemscpu_a_SOURCES += libcsupport/src/__usrenv.c
librtemscpu_a_SOURCES += libcsupport/src/utime.c
librtemscpu_a_SOURCES += libcsupport/src/utimes.c
+librtemscpu_a_SOURCES += libcsupport/src/futimens.c
+librtemscpu_a_SOURCES += libcsupport/src/utimensat.c
librtemscpu_a_SOURCES += libcsupport/src/utsname.c
librtemscpu_a_SOURCES += libcsupport/src/vprintk.c
librtemscpu_a_SOURCES += libcsupport/src/write.c
@@ -375,7 +377,7 @@ librtemscpu_a_SOURCES += libfs/src/defaults/default_rmnod.c
librtemscpu_a_SOURCES += libfs/src/defaults/default_statvfs.c
librtemscpu_a_SOURCES += libfs/src/defaults/default_symlink.c
librtemscpu_a_SOURCES += libfs/src/defaults/default_unmount.c
-librtemscpu_a_SOURCES += libfs/src/defaults/default_utime.c
+librtemscpu_a_SOURCES += libfs/src/defaults/default_utimens.c
librtemscpu_a_SOURCES += libfs/src/defaults/default_write.c
librtemscpu_a_SOURCES += libfs/src/defaults/default_writev.c
librtemscpu_a_SOURCES += libfs/src/dosfs/fat.c
@@ -432,7 +434,7 @@ librtemscpu_a_SOURCES += libfs/src/imfs/imfs_stat.c
librtemscpu_a_SOURCES += libfs/src/imfs/imfs_stat_file.c
librtemscpu_a_SOURCES += libfs/src/imfs/imfs_symlink.c
librtemscpu_a_SOURCES += libfs/src/imfs/imfs_unmount.c
-librtemscpu_a_SOURCES += libfs/src/imfs/imfs_utime.c
+librtemscpu_a_SOURCES += libfs/src/imfs/imfs_utimens.c
librtemscpu_a_SOURCES += libfs/src/imfs/ioman.c
librtemscpu_a_SOURCES += libfs/src/pipe/fifo.c
librtemscpu_a_SOURCES += libfs/src/pipe/pipe.c
@@ -490,6 +492,7 @@ librtemscpu_a_SOURCES += posix/src/cleanuppush.c
librtemscpu_a_SOURCES += posix/src/clockgetcpuclockid.c
librtemscpu_a_SOURCES += posix/src/clockgetres.c
librtemscpu_a_SOURCES += posix/src/clockgettime.c
+librtemscpu_a_SOURCES += posix/src/clocknanosleep.c
librtemscpu_a_SOURCES += posix/src/clocksettime.c
librtemscpu_a_SOURCES += posix/src/condattrdestroy.c
librtemscpu_a_SOURCES += posix/src/condattrgetclock.c
@@ -837,6 +840,7 @@ librtemscpu_a_SOURCES += score/src/coremsgwkspace.c
librtemscpu_a_SOURCES += score/src/coremutexseize.c
librtemscpu_a_SOURCES += score/src/percpu.c
librtemscpu_a_SOURCES += score/src/percpuasm.c
+librtemscpu_a_SOURCES += score/src/percpudata.c
librtemscpu_a_SOURCES += score/src/corerwlock.c
librtemscpu_a_SOURCES += score/src/corerwlockobtainread.c
librtemscpu_a_SOURCES += score/src/corerwlockobtainwrite.c
@@ -867,6 +871,7 @@ librtemscpu_a_SOURCES += score/src/objectallocateunlimited.c
librtemscpu_a_SOURCES += score/src/objectclose.c
librtemscpu_a_SOURCES += score/src/objectextendinformation.c
librtemscpu_a_SOURCES += score/src/objectfree.c
+librtemscpu_a_SOURCES += score/src/objectfreenothing.c
librtemscpu_a_SOURCES += score/src/objectfreestatic.c
librtemscpu_a_SOURCES += score/src/objectgetnext.c
librtemscpu_a_SOURCES += score/src/objectinitializeinformation.c
@@ -931,7 +936,6 @@ librtemscpu_a_SOURCES += score/src/schedulercbsreleasejob.c
librtemscpu_a_SOURCES += score/src/schedulercbsunblock.c
librtemscpu_a_SOURCES += score/src/stackallocator.c
librtemscpu_a_SOURCES += score/src/stackallocatorfree.c
-librtemscpu_a_SOURCES += score/src/stackallocatorfreenothing.c
librtemscpu_a_SOURCES += score/src/stackallocatorinit.c
librtemscpu_a_SOURCES += score/src/pheapallocate.c
librtemscpu_a_SOURCES += score/src/pheapextend.c
@@ -984,6 +988,7 @@ librtemscpu_a_SOURCES += score/src/threadqenqueue.c
librtemscpu_a_SOURCES += score/src/threadqextractwithproxy.c
librtemscpu_a_SOURCES += score/src/threadqfirst.c
librtemscpu_a_SOURCES += score/src/threadqflush.c
+librtemscpu_a_SOURCES += score/src/threadqgetnameandid.c
librtemscpu_a_SOURCES += score/src/threadqops.c
librtemscpu_a_SOURCES += score/src/threadqtimeout.c
librtemscpu_a_SOURCES += score/src/timespecaddto.c
@@ -993,6 +998,7 @@ librtemscpu_a_SOURCES += score/src/timespeclessthan.c
librtemscpu_a_SOURCES += score/src/timespecsubtract.c
librtemscpu_a_SOURCES += score/src/timespectoticks.c
librtemscpu_a_SOURCES += score/src/timespecdivide.c
+librtemscpu_a_SOURCES += score/src/timespecisnonnegative.c
librtemscpu_a_SOURCES += score/src/timespecdividebyinteger.c
librtemscpu_a_SOURCES += score/src/timespecgetasnanoseconds.c
librtemscpu_a_SOURCES += score/src/coretod.c
@@ -1017,6 +1023,7 @@ librtemscpu_a_SOURCES += score/src/chainnodecount.c
librtemscpu_a_SOURCES += score/src/debugisthreaddispatchingallowed.c
librtemscpu_a_SOURCES += score/src/interr.c
librtemscpu_a_SOURCES += score/src/isr.c
+librtemscpu_a_SOURCES += score/src/isrvectortable.c
librtemscpu_a_SOURCES += score/src/wkspace.c
librtemscpu_a_SOURCES += score/src/wkspaceisunifieddefault.c
librtemscpu_a_SOURCES += score/src/wkspacemallocinitdefault.c
@@ -1611,9 +1618,13 @@ librtemscpu_a_SOURCES += score/cpu/sparc/access_le.c
librtemscpu_a_SOURCES += score/cpu/sparc/cpu.c
librtemscpu_a_SOURCES += score/cpu/sparc/cpu_asm.S
librtemscpu_a_SOURCES += score/cpu/sparc/sparc-access.S
+librtemscpu_a_SOURCES += score/cpu/sparc/sparc-bad-trap.S
librtemscpu_a_SOURCES += score/cpu/sparc/sparc-context-validate.S
librtemscpu_a_SOURCES += score/cpu/sparc/sparc-context-volatile-clobber.S
librtemscpu_a_SOURCES += score/cpu/sparc/sparc-counter-asm.S
+librtemscpu_a_SOURCES += score/cpu/sparc/sparc-exception-frame-print.c
+librtemscpu_a_SOURCES += score/cpu/sparc/sparc-isr-handler.S
+librtemscpu_a_SOURCES += score/cpu/sparc/sparc-isr-install.c
librtemscpu_a_SOURCES += score/cpu/sparc/syscall.S
librtemscpu_a_SOURCES += score/cpu/sparc/window.S
diff --git a/cpukit/dev/i2c/i2c-bus.c b/cpukit/dev/i2c/i2c-bus.c
index 472222c4ab..618a817b1a 100644
--- a/cpukit/dev/i2c/i2c-bus.c
+++ b/cpukit/dev/i2c/i2c-bus.c
@@ -31,6 +31,11 @@
#include <stdlib.h>
#include <string.h>
+int i2c_bus_try_obtain(i2c_bus *bus)
+{
+ return rtems_recursive_mutex_try_lock(&bus->mutex);
+}
+
void i2c_bus_obtain(i2c_bus *bus)
{
rtems_recursive_mutex_lock(&bus->mutex);
@@ -41,7 +46,12 @@ void i2c_bus_release(i2c_bus *bus)
rtems_recursive_mutex_unlock(&bus->mutex);
}
-int i2c_bus_transfer(i2c_bus *bus, i2c_msg *msgs, uint32_t msg_count)
+int i2c_bus_do_transfer(
+ i2c_bus *bus,
+ i2c_msg *msgs,
+ uint32_t msg_count,
+ uint32_t flags
+)
{
int err;
uint32_t i;
@@ -63,13 +73,24 @@ int i2c_bus_transfer(i2c_bus *bus, i2c_msg *msgs, uint32_t msg_count)
}
}
- i2c_bus_obtain(bus);
+ if ((flags & I2C_BUS_NOBLOCK) != 0) {
+ if (i2c_bus_try_obtain(bus) != 0) {
+ return -EAGAIN;
+ }
+ } else {
+ i2c_bus_obtain(bus);
+ }
err = (*bus->transfer)(bus, msgs, msg_count);
i2c_bus_release(bus);
return err;
}
+int i2c_bus_transfer(i2c_bus *bus, i2c_msg *msgs, uint32_t msg_count)
+{
+ return i2c_bus_do_transfer(bus, msgs, msg_count, 0);
+}
+
static ssize_t i2c_bus_read(
rtems_libio_t *iop,
void *buffer,
@@ -84,12 +105,17 @@ static ssize_t i2c_bus_read(
.buf = buffer
};
int err;
+ unsigned flags = 0;
if (bus->ten_bit_address) {
msg.flags |= I2C_M_TEN;
}
- err = i2c_bus_transfer(bus, &msg, 1);
+ if (rtems_libio_iop_is_no_delay(iop)) {
+ flags |= I2C_BUS_NOBLOCK;
+ }
+
+ err = i2c_bus_do_transfer(bus, &msg, 1, flags);
if (err == 0) {
return msg.len;
} else {
@@ -111,12 +137,17 @@ static ssize_t i2c_bus_write(
.buf = RTEMS_DECONST(void *, buffer)
};
int err;
+ unsigned flags = 0;
if (bus->ten_bit_address) {
msg.flags |= I2C_M_TEN;
}
- err = i2c_bus_transfer(bus, &msg, 1);
+ if (rtems_libio_iop_is_no_delay(iop)) {
+ flags |= I2C_BUS_NOBLOCK;
+ }
+
+ err = i2c_bus_do_transfer(bus, &msg, 1, flags);
if (err == 0) {
return msg.len;
} else {
@@ -133,12 +164,16 @@ static int i2c_bus_ioctl(
i2c_bus *bus = IMFS_generic_get_context_by_iop(iop);
i2c_rdwr_ioctl_data *rdwr;
int err;
+ unsigned flags = 0;
switch (command) {
case I2C_RDWR:
rdwr = arg;
if (rdwr->nmsgs > 0) {
- err = i2c_bus_transfer(bus, rdwr->msgs, rdwr->nmsgs);
+ if (rtems_libio_iop_is_no_delay(iop)) {
+ flags |= I2C_BUS_NOBLOCK;
+ }
+ err = i2c_bus_do_transfer(bus, rdwr->msgs, rdwr->nmsgs, flags);
} else {
err = 0;
}
diff --git a/cpukit/include/dev/i2c/i2c.h b/cpukit/include/dev/i2c/i2c.h
index ac2c369785..5aa45e390c 100644
--- a/cpukit/include/dev/i2c/i2c.h
+++ b/cpukit/include/dev/i2c/i2c.h
@@ -243,6 +243,16 @@ int i2c_bus_register(
);
/**
+ * @brief Try to obtain the bus.
+ *
+ * @param[in] bus The bus control.
+ *
+ * @retval 0 Successful operation.
+ * @retval EBUSY if mutex is already locked.
+ */
+int i2c_bus_try_obtain(i2c_bus *bus);
+
+/**
* @brief Obtains the bus.
*
* @param[in] bus The bus control.
@@ -259,7 +269,8 @@ void i2c_bus_release(i2c_bus *bus);
/**
* @brief Transfers I2C messages.
*
- * The bus is obtained before the transfer and released afterwards.
+ * The bus is obtained before the transfer and released afterwards. This is the
+ * same like calling @ref i2c_bus_do_transfer with flags set to 0.
*
* @param[in] bus The bus control.
* @param[in] msgs The messages to transfer.
@@ -271,6 +282,37 @@ void i2c_bus_release(i2c_bus *bus);
*/
int i2c_bus_transfer(i2c_bus *bus, i2c_msg *msgs, uint32_t msg_count);
+/**
+ * @brief Transfers I2C messages with optional flags.
+ *
+ * The bus is obtained before the transfer and released afterwards. If the flag
+ * I2C_BUS_NOBLOCK is set and the bus is already obtained, nothing will be
+ * transfered and the function returns with an -EAGAIN.
+ *
+ * @param[in] bus The bus control.
+ * @param[in] msgs The messages to transfer.
+ * @param[in] msg_count The count of messages to transfer. It must be
+ * positive.
+ * @param[in] flags Options for the whole transfer.
+ *
+ * @retval 0 Successful operation.
+ * @retval -EAGAIN if @ref I2C_BUS_NOBLOCK is set and the bus is already
+ * obtained.
+ * @retval negative Negative error number in case of an error.
+ */
+int i2c_bus_do_transfer(
+ i2c_bus *bus,
+ i2c_msg *msgs,
+ uint32_t msg_count,
+ uint32_t flags
+);
+
+/**
+ * @brief I2C bus transfer flag to indicate that the task should not block if
+ * the bus is busy on a new transfer.
+ */
+#define I2C_BUS_NOBLOCK (1u << 0)
+
/** @} */
/**
diff --git a/cpukit/include/rtems/confdefs/iodrivers.h b/cpukit/include/rtems/confdefs/iodrivers.h
index e12640624d..a7de77a8c3 100644
--- a/cpukit/include/rtems/confdefs/iodrivers.h
+++ b/cpukit/include/rtems/confdefs/iodrivers.h
@@ -189,6 +189,27 @@ RTEMS_SYSINIT_ITEM(
|| CONFIGURE_APPLICATION_NEEDS_ZERO_DRIVER
|| CONFIGURE_MAXIMUM_DRIVERS */
+
+/*
+ * If any flavor of console driver is configured, then configure the post
+ * driver hook which opens /dev/console as stdin, stdout, and stderr.
+ *
+ * NOTE: This also results in an atexit() handler being registered to close
+ * /dev/console.
+ */
+#if defined(CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER) || \
+ defined(CONFIGURE_APPLICATION_NEEDS_SIMPLE_TASK_CONSOLE_DRIVER)
+
+ #include <rtems/libio.h>
+
+ RTEMS_SYSINIT_ITEM(
+ rtems_libio_post_driver,
+ RTEMS_SYSINIT_STD_FILE_DESCRIPTORS,
+ RTEMS_SYSINIT_ORDER_MIDDLE
+ );
+#endif
+
#endif /* CONFIGURE_INIT */
#endif /* _RTEMS_CONFDEFS_IODRIVERS_H */
diff --git a/cpukit/include/rtems/confdefs/libio.h b/cpukit/include/rtems/confdefs/libio.h
index 16a4fb6962..1b84f8c20f 100644
--- a/cpukit/include/rtems/confdefs/libio.h
+++ b/cpukit/include/rtems/confdefs/libio.h
@@ -231,9 +231,9 @@ static const rtems_filesystem_operations_table IMFS_root_ops = {
#endif
rtems_filesystem_default_fsunmount,
#ifdef CONFIGURE_IMFS_DISABLE_UTIME
- rtems_filesystem_default_utime,
+ rtems_filesystem_default_utimens,
#else
- IMFS_utime,
+ IMFS_utimens,
#endif
#ifdef CONFIGURE_IMFS_DISABLE_SYMLINK
rtems_filesystem_default_symlink,
diff --git a/cpukit/include/rtems/imfs.h b/cpukit/include/rtems/imfs.h
index b2a9868b38..57c498cfe8 100644
--- a/cpukit/include/rtems/imfs.h
+++ b/cpukit/include/rtems/imfs.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -5,12 +7,28 @@
*/
/*
- * COPYRIGHT (c) 1989-2011.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTEMS_IMFS_H
@@ -954,10 +972,9 @@ extern int device_ftruncate(
* This routine is the implementation of the utime() system
* call for the IMFS.
*/
-extern int IMFS_utime(
+extern int IMFS_utimens(
const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
+ struct timespec times[2]
);
/**
diff --git a/cpukit/include/rtems/io.h b/cpukit/include/rtems/io.h
index d8ce527b0e..181da9fe4f 100644
--- a/cpukit/include/rtems/io.h
+++ b/cpukit/include/rtems/io.h
@@ -194,9 +194,10 @@ typedef struct {
*
* @param driver_table is the device driver address table.
*
- * @param[out] registered_major is the pointer to a device major number
- * variable. When the directive call is successful, the device major number
- * of the registered device will be stored in this variable.
+ * @param[out] registered_major is the pointer to an
+ * ::rtems_device_major_number object. When the directive call is
+ * successful, the device major number of the registered device will be
+ * stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
diff --git a/cpukit/include/rtems/libio.h b/cpukit/include/rtems/libio.h
index 519e797dba..5379d92e4a 100644
--- a/cpukit/include/rtems/libio.h
+++ b/cpukit/include/rtems/libio.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -11,15 +13,31 @@
*/
/*
- * COPYRIGHT (c) 1989-2008.
- * On-Line Applications Research Corporation (OAR).
- *
- * Modifications to support reference counting in the file system are
- * Copyright (c) 2012 embedded brains GmbH.
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Modifications to support reference counting in the file system are
+ * Copyright (C) 2012 embedded brains GmbH.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTEMS_RTEMS_LIBIO_H
@@ -375,18 +393,16 @@ typedef int (*rtems_filesystem_rmnod_t)(
* @brief Set node access and modification times.
*
* @param[in] loc The location of the node.
- * @param[in] actime Access time for the node.
- * @param[in] modtime Modification for the node.
+ * @param[in] times Access and modification times for the node
*
* @retval 0 Successful operation.
* @retval -1 An error occurred. The errno is set to indicate the error.
*
- * @see rtems_filesystem_default_utime().
+ * @see rtems_filesystem_default_utimens().
*/
-typedef int (*rtems_filesystem_utime_t)(
+typedef int (*rtems_filesystem_utimens_t)(
const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
+ struct timespec times[2]
);
/**
@@ -484,7 +500,7 @@ struct _rtems_filesystem_operations_table {
rtems_filesystem_mount_t mount_h;
rtems_filesystem_unmount_t unmount_h;
rtems_filesystem_fsunmount_me_t fsunmount_me_h;
- rtems_filesystem_utime_t utime_h;
+ rtems_filesystem_utimens_t utimens_h;
rtems_filesystem_symlink_t symlink_h;
rtems_filesystem_readlink_t readlink_h;
rtems_filesystem_rename_t rename_h;
@@ -644,12 +660,11 @@ void rtems_filesystem_default_fsunmount(
/**
* @retval -1 Always. The errno is set to ENOTSUP.
*
- * @see rtems_filesystem_utime_t.
+ * @see rtems_filesystem_utimens_t.
*/
-int rtems_filesystem_default_utime(
+int rtems_filesystem_default_utimens(
const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
+ struct timespec times[2]
);
/**
diff --git a/cpukit/include/rtems/libio_.h b/cpukit/include/rtems/libio_.h
index e9eb46263e..8d4a2dc861 100644
--- a/cpukit/include/rtems/libio_.h
+++ b/cpukit/include/rtems/libio_.h
@@ -1,21 +1,39 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
* @brief LibIO Internal Interface
- *
+ *
* This file is the libio internal interface.
*/
/*
- * COPYRIGHT (c) 1989-2011.
- * On-Line Applications Research Corporation (OAR).
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Modifications to support reference counting in the file system are
+ * Copyright (c) 2012 embedded brains GmbH.
*
- * Modifications to support reference counting in the file system are
- * Copyright (c) 2012 embedded brains GmbH.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTEMS_RTEMS_LIBIO__H
@@ -30,6 +48,7 @@
#include <rtems/libio.h>
#include <rtems/seterr.h>
#include <rtems/score/assert.h>
+#include <rtems/score/timespec.h>
#ifdef __cplusplus
extern "C" {
@@ -357,6 +376,65 @@ static inline void rtems_filesystem_instance_unlock(
(*mt_entry->ops->unlock_h)( mt_entry );
}
+/**
+ * @brief Checks the tv_nsec member of a timespec struct
+ *
+ * This function is used with utimensat() and futimens() only. This ensures
+ * that the value in the tv_nsec member is equal to either UTIME_NOW,
+ * UTIME_OMIT, or a value greater-than or equal to zero and less than a
+ * billion.
+ *
+ * @param[in] time The timespec struct to be validated
+ *
+ * @retval true The tv_nsec member is a valid value.
+ * @retval false The tv_nsec member is not a valid value.
+ */
+bool rtems_filesystem_utime_tv_nsec_valid( struct timespec time );
+
+/**
+ * @brief Checks for errors and if the process has write permissions to the file.
+ *
+ * This function is only used with utimensat() and futimens().It checks for
+ * EACCES and EPERM errors depending on what values are in @a times and if the
+ * process has write permissions to the file.
+ *
+ * @param[in] currentloc The current location to a file
+ * @param[in] times The timespecs used to check for errors. The timespec at
+ * index 0 is the access time, and the timespec at index 1 is
+ * the modification time.
+ *
+ * @retval 0 An error was not found.
+ * @retval -1 An error was found.
+ */
+int rtems_filesystem_utime_check_permissions(
+ const rtems_filesystem_location_info_t *currentloc,
+ const struct timespec times[2]
+);
+
+/**
+ * @brief Checks @a times and fills @a new_times with the time to be written
+ *
+ * This function is only used with utimensat() and futimens(). @a times contains
+ * the constant values passed into utimensat/futimens. @a new_times contains the
+ * values that will be written to the file. These values depend on @a times. If
+ * @a times is NULL, or either of its elements' tv_nsec members are UTIME_NOW,
+ * the current elapsed time in nanoseconds will be saved in the corresponding
+ * location in @a new_times.
+ *
+ * For each of the arguments, the timespec at index 0 is the access time, and
+ * the timespec at index 1 is the modification time.
+ *
+ * @param[in] times The timespecs to be checked
+ * @param[out] new_times The timespecs containing the time to be written
+ *
+ * @retval 0 @a times is valid.
+ * @retval -1 @a times is not valid.
+ */
+int rtems_filesystem_utime_update(
+ const struct timespec times[2],
+ struct timespec new_times[2]
+);
+
/*
* File Descriptor Routine Prototypes
*/
diff --git a/cpukit/include/rtems/rtems/barrier.h b/cpukit/include/rtems/rtems/barrier.h
index 0f692e1995..6327a7c831 100644
--- a/cpukit/include/rtems/rtems/barrier.h
+++ b/cpukit/include/rtems/rtems/barrier.h
@@ -88,9 +88,9 @@ extern "C" {
* @param maximum_waiters is the maximum count of waiters on an automatic
* release barrier.
*
- * @param id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created barrier will
- * be stored in this variable.
+ * @param id is the pointer to an ::rtems_id object. When the directive call
+ * is successful, the identifier of the created barrier will be stored in
+ * this object.
*
* This directive creates a barrier which resides on the local node. The
* barrier has the user-defined object name specified in ``name`` and the
@@ -178,9 +178,9 @@ rtems_status_code rtems_barrier_create(
*
* @param name is the object name to look up.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a barrier identifier associated with the barrier name
* specified in ``name``.
@@ -318,9 +318,9 @@ rtems_status_code rtems_barrier_wait( rtems_id id, rtems_interval timeout );
*
* @param id is the barrier identifier.
*
- * @param[out] released is the pointer to an integer variable. When the
+ * @param[out] released is the pointer to an uint32_t object. When the
* directive call is successful, the number of released tasks will be stored
- * in this variable.
+ * in this object.
*
* This directive releases the barrier specified by ``id``. All tasks waiting
* at the barrier will be unblocked. The number of released tasks will be
diff --git a/cpukit/include/rtems/rtems/clock.h b/cpukit/include/rtems/rtems/clock.h
index 3e94a1f609..1eeb0ce54c 100644
--- a/cpukit/include/rtems/rtems/clock.h
+++ b/cpukit/include/rtems/rtems/clock.h
@@ -144,10 +144,10 @@ rtems_status_code rtems_clock_set( const rtems_time_of_day *time_of_day );
*
* @brief Gets the time of day associated with the current CLOCK_REALTIME.
*
- * @param time_of_day is the pointer to a RTEMS time of day variable. When the
+ * @param time_of_day is the pointer to an rtems_time_of_day object. When the
* directive call is successful, the time of day associated with the
* CLOCK_REALTIME at some point during the directive call will be stored in
- * this variable.
+ * this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -177,10 +177,10 @@ rtems_status_code rtems_clock_get_tod( rtems_time_of_day *time_of_day );
* @brief Gets the seconds and microseconds elapsed since the Unix epoch and
* the current CLOCK_REALTIME.
*
- * @param[out] time_of_day is the pointer to a timeval structure variable.
- * When the directive call is successful, the seconds and microseconds
- * elapsed since the Unix epoch and the CLOCK_REALTIME at some point during
- * the directive call will be stored in this variable.
+ * @param[out] time_of_day is the pointer to a struct timeval object. When the
+ * directive call is successful, the seconds and microseconds elapsed since
+ * the Unix epoch and the CLOCK_REALTIME at some point during the directive
+ * call will be stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -210,10 +210,10 @@ rtems_status_code rtems_clock_get_tod_timeval( struct timeval *time_of_day );
* @brief Gets the seconds elapsed since the RTEMS epoch and the current
* CLOCK_REALTIME.
*
- * @param[out] seconds_since_rtems_epoch is the pointer to an interval
- * variable. When the directive call is successful, the seconds elapsed
- * since the RTEMS epoch and the CLOCK_REALTIME at some point during the
- * directive call will be stored in this variable.
+ * @param[out] seconds_since_rtems_epoch is the pointer to an ::rtems_interval
+ * object. When the directive call is successful, the seconds elapsed since
+ * the RTEMS epoch and the CLOCK_REALTIME at some point during the directive
+ * call will be stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -306,10 +306,10 @@ rtems_interval rtems_clock_get_ticks_since_boot( void );
* @brief Gets the seconds and nanoseconds elapsed since some time point during
* the system initialization using CLOCK_MONOTONIC.
*
- * @param[out] uptime is the pointer to a timeval structure variable. When the
+ * @param[out] uptime is the pointer to a struct timeval object. When the
* directive call is successful, the seconds and nanoseconds elapsed since
* some time point during the system initialization and some point during the
- * directive call using CLOCK_MONOTONIC will be stored in this variable.
+ * directive call using CLOCK_MONOTONIC will be stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -336,10 +336,10 @@ rtems_status_code rtems_clock_get_uptime( struct timespec *uptime );
* @brief Gets the seconds and microseconds elapsed since some time point
* during the system initialization using CLOCK_MONOTONIC.
*
- * @param[out] uptime is the pointer to a timeval structure variable. The
- * seconds and microseconds elapsed since some time point during the system
+ * @param[out] uptime is the pointer to a struct timeval object. The seconds
+ * and microseconds elapsed since some time point during the system
* initialization and some point during the directive call using
- * CLOCK_MONOTONIC will be stored in this variable. The pointer shall be
+ * CLOCK_MONOTONIC will be stored in this object. The pointer shall be
* valid, otherwise the behaviour is undefined.
*
* @par Constraints
diff --git a/cpukit/include/rtems/rtems/clockimpl.h b/cpukit/include/rtems/rtems/clockimpl.h
index c13c158410..c8334afaf3 100644
--- a/cpukit/include/rtems/rtems/clockimpl.h
+++ b/cpukit/include/rtems/rtems/clockimpl.h
@@ -35,19 +35,41 @@ extern "C" {
*/
/**
- * @brief TOD Validate
- *
- * This support function returns true if @a the_tod contains
- * a valid time of day, and false otherwise.
+ * @brief The enumerators of this type determine if the ticks member is
+ * validated in _TOD_Validate().
+ */
+typedef enum {
+ /**
+ * @brief Use this option to disable the validation of the ticks member in
+ * _TOD_Validate().
+ */
+ TOD_DISABLE_TICKS_VALIDATION = 0,
+
+ /**
+ * @brief Use this option to enable the validation of the ticks member in
+ * _TOD_Validate().
+ */
+ TOD_ENABLE_TICKS_VALIDATION = -1
+} TOD_Ticks_validation;
+
+/**
+ * @brief Validates the time of day.
*
- * @param[in] the_tod is the TOD structure to validate
+ * @param the_tod is the reference to the time of day structure to validate or
+ * NULL.
*
- * @retval This method returns true if the TOD is valid and false otherwise.
+ * @param ticks_validation indicates if the ticks member of the time of day
+ * should be validated. Use #TOD_ENABLE_TICKS_VALIDATION to validate the
+ * ticks member. Use #TOD_DISABLE_TICKS_VALIDATION to skip the validation of
+ * the ticks member.
*
- * @note This routine only works for leap-years through 2099.
+ * @retval RTEMS_SUCCESSFUL @a the_tod references a valid time of day.
+ * @retval RTEMS_INVALID_CLOCK @a the_tod references an invalid time of day.
+ * @retval RTEMS_INVALID_ADDRESS @a the_tod reference is @c NULL.
*/
-bool _TOD_Validate(
- const rtems_time_of_day *the_tod
+rtems_status_code _TOD_Validate(
+ const rtems_time_of_day *the_tod,
+ TOD_Ticks_validation ticks_validation
);
/**
diff --git a/cpukit/include/rtems/rtems/dpmem.h b/cpukit/include/rtems/rtems/dpmem.h
index 0dfa70a82e..9ecdf3a170 100644
--- a/cpukit/include/rtems/rtems/dpmem.h
+++ b/cpukit/include/rtems/rtems/dpmem.h
@@ -89,9 +89,9 @@ extern "C" {
*
* @param length is the length in bytes of the memory area.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created port will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created port will be stored in
+ * this object.
*
* This directive creates a port which resides on the local node. The port has
* the user-defined object name specified in ``name``. The assigned object
@@ -160,9 +160,9 @@ rtems_status_code rtems_port_create(
*
* @param name is the object name to look up.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a port identifier associated with the port name
* specified in ``name``.
@@ -251,9 +251,9 @@ rtems_status_code rtems_port_delete( rtems_id id );
*
* @param external is the external address to convert.
*
- * @param[out] internal is the pointer to a pointer variable. When the
+ * @param[out] internal is the pointer to a ``void`` pointer object. When the
* directive call is successful, the external address associated with the
- * internal address will be stored in this variable.
+ * internal address will be stored in this object.
*
* This directive converts a dual-ported memory address from external to
* internal representation for the specified port. If the given external
@@ -297,9 +297,9 @@ rtems_status_code rtems_port_external_to_internal(
*
* @param internal is the internal address to convert.
*
- * @param[out] external is the pointer to a pointer variable. When the
+ * @param[out] external is the pointer to a ``void`` pointer object. When the
* directive call is successful, the external address associated with the
- * internal address will be stored in this variable.
+ * internal address will be stored in this object.
*
* This directive converts a dual-ported memory address from internal to
* external representation so that it can be passed to owner of the DPMA
diff --git a/cpukit/include/rtems/rtems/intr.h b/cpukit/include/rtems/rtems/intr.h
index c9222fcc4d..178cf342df 100644
--- a/cpukit/include/rtems/rtems/intr.h
+++ b/cpukit/include/rtems/rtems/intr.h
@@ -155,10 +155,10 @@ typedef ISR_Vector_number rtems_vector_number;
*
* @param vector is the interrupt vector number.
*
- * @param[out] old_isr_handler is the pointer to an ::rtems_isr_entry variable.
+ * @param[out] old_isr_handler is the pointer to an ::rtems_isr_entry object.
* When the directive call is successful, the previous interrupt service
* routine established for this interrupt vector will be stored in this
- * variable.
+ * object.
*
* This directive establishes an interrupt service routine (ISR) for the
* interrupt specified by the ``vector`` number. The ``new_isr_handler``
diff --git a/cpukit/include/rtems/rtems/message.h b/cpukit/include/rtems/rtems/message.h
index 747daf4e1b..01173d4029 100644
--- a/cpukit/include/rtems/rtems/message.h
+++ b/cpukit/include/rtems/rtems/message.h
@@ -151,9 +151,9 @@ typedef struct {
*
* @param attribute_set is the attribute set of the message queue.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created message queue
- * will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created message queue will be
+ * stored in this object.
*
* This directive creates a message queue which resides on the local node. The
* message queue has the user-defined object name specified in ``name``.
@@ -288,9 +288,9 @@ rtems_status_code rtems_message_queue_create(
*
* @param config is the message queue configuration.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the constructed message
- * queue will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the constructed message queue will
+ * be stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -388,9 +388,9 @@ rtems_status_code rtems_message_queue_construct(
*
* @param node is the node or node set to search for a matching object.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a message queue identifier associated with the
* message queue name specified in ``name``.
@@ -647,9 +647,9 @@ rtems_status_code rtems_message_queue_urgent(
*
* @param size is the size in bytes of the message buffer to broadcast.
*
- * @param[out] count is the pointer to an uint32_t variable. When the
- * directive call is successful, the number of unblocked tasks will be stored
- * in this variable.
+ * @param[out] count is the pointer to an uint32_t object. When the directive
+ * call is successful, the number of unblocked tasks will be stored in this
+ * object.
*
* This directive causes all tasks that are waiting at the queue specified by
* ``id`` to be unblocked and sent the message contained in ``buffer``. Before
@@ -713,9 +713,9 @@ rtems_status_code rtems_message_queue_broadcast(
* rtems_message_queue_construct(). The ``size`` parameter cannot be used to
* specify the size of the buffer.
*
- * @param size is the pointer to a size_t variable. When the directive call is
- * successful, the size in bytes of the received messages will be stored in
- * this variable. This parameter cannot be used to specify the size of the
+ * @param[out] size is the pointer to a size_t object. When the directive call
+ * is successful, the size in bytes of the received messages will be stored
+ * in this object. This parameter cannot be used to specify the size of the
* buffer.
*
* @param option_set is the option set.
@@ -819,9 +819,9 @@ rtems_status_code rtems_message_queue_receive(
*
* @param id is the queue identifier.
*
- * @param[out] count is the pointer to an uint32_t variable. When the
- * directive call is successful, the number of pending messages will be
- * stored in this variable.
+ * @param[out] count is the pointer to an uint32_t object. When the directive
+ * call is successful, the number of pending messages will be stored in this
+ * object.
*
* This directive returns the number of messages pending on the queue specified
* by ``id`` in ``count``. If no messages are present on the queue, count is
@@ -861,9 +861,9 @@ rtems_status_code rtems_message_queue_get_number_pending(
*
* @param id is the queue identifier.
*
- * @param[out] count is the pointer to an uint32_t variable. When the
- * directive call is successful, the number of unblocked tasks will be stored
- * in this variable.
+ * @param[out] count is the pointer to an uint32_t object. When the directive
+ * call is successful, the number of unblocked tasks will be stored in this
+ * object.
*
* This directive removes all pending messages from the queue specified by
* ``id``. The number of messages removed is returned in ``count``. If no
diff --git a/cpukit/include/rtems/rtems/object.h b/cpukit/include/rtems/rtems/object.h
index 5f3e050b3c..e80303da28 100644
--- a/cpukit/include/rtems/rtems/object.h
+++ b/cpukit/include/rtems/rtems/object.h
@@ -304,9 +304,9 @@ rtems_name rtems_build_name( char c1, char c2, char c3, char c4 );
*
* @param id is the object identifier to get the name.
*
- * @param[out] name is the pointer to an object name variable. When the
+ * @param[out] name is the pointer to an ::rtems_name object. When the
* directive call is successful, the object name associated with the object
- * identifier will be stored in this variable.
+ * identifier will be stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -730,9 +730,9 @@ const char *rtems_object_get_api_class_name( int the_api, int the_class );
* @param the_class is the object class of the object API to get the class
* information.
*
- * @param info is the pointer to an object class information variable. When
- * the directive call is successful, the object class information of the
- * class of the API will be stored in this variable.
+ * @param[out] info is the pointer to an rtems_object_api_class_information
+ * object. When the directive call is successful, the object class
+ * information of the class of the API will be stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
diff --git a/cpukit/include/rtems/rtems/part.h b/cpukit/include/rtems/rtems/part.h
index 7a829d96af..10091b48f4 100644
--- a/cpukit/include/rtems/rtems/part.h
+++ b/cpukit/include/rtems/rtems/part.h
@@ -112,9 +112,9 @@ extern "C" {
*
* @param attribute_set is the attribute set of the partition.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created partition will
- * be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created partition will be stored
+ * in this object.
*
* This directive creates a partition of fixed size buffers from a physically
* contiguous memory space which starts at ``starting_address`` and is
@@ -252,9 +252,9 @@ rtems_status_code rtems_partition_create(
*
* @param node is the node or node set to search for a matching object.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a partition identifier associated with the partition
* name specified in ``name``.
@@ -388,9 +388,9 @@ rtems_status_code rtems_partition_delete( rtems_id id );
*
* @param id is the partition identifier.
*
- * @param[out] buffer is the pointer to a buffer pointer variable. When the
+ * @param[out] buffer is the pointer to a ``void`` pointer object. When the
* directive call is successful, the pointer to the allocated buffer will be
- * stored in this variable.
+ * stored in this object.
*
* This directive allows a buffer to be obtained from the partition specified
* by ``id``. The address of the allocated buffer is returned through the
diff --git a/cpukit/include/rtems/rtems/ratemon.h b/cpukit/include/rtems/rtems/ratemon.h
index eee4a37319..54c2709ab8 100644
--- a/cpukit/include/rtems/rtems/ratemon.h
+++ b/cpukit/include/rtems/rtems/ratemon.h
@@ -228,9 +228,9 @@ struct rtems_printer;
*
* @param name is the object name of the period.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created period will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created period will be stored in
+ * this object.
*
* This directive creates a period which resides on the local node. The period
* has the user-defined object name specified in ``name`` The assigned object
@@ -285,9 +285,9 @@ rtems_status_code rtems_rate_monotonic_create( rtems_name name, rtems_id *id );
*
* @param name is the object name to look up.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a period identifier associated with the period name
* specified in ``name``.
@@ -467,9 +467,9 @@ rtems_status_code rtems_rate_monotonic_period(
*
* @param id is the rate monotonic period identifier.
*
- * @param[out] status is the pointer to a rtems_rate_monotonic_period_status
- * variable. When the directive call is successful, the detailed period
- * status will be stored in this variable.
+ * @param[out] status is the pointer to an rtems_rate_monotonic_period_status
+ * object. When the directive call is successful, the detailed period status
+ * will be stored in this object.
*
* This directive returns the detailed status of the rate monotonic period
* specified by ``id``. The detailed status of the period will be returned in
@@ -528,9 +528,9 @@ rtems_status_code rtems_rate_monotonic_get_status(
*
* @param id is the rate monotonic period identifier.
*
- * @param[out] status is the pointer to a
- * rtems_rate_monotonic_period_statistics variable. When the directive call
- * is successful, the period statistics will be stored in this variable.
+ * @param[out] status is the pointer to an
+ * rtems_rate_monotonic_period_statistics object. When the directive call is
+ * successful, the period statistics will be stored in this object.
*
* This directive returns the statistics of the rate monotonic period specified
* by ``id``. The statistics of the period will be returned in the members of
diff --git a/cpukit/include/rtems/rtems/region.h b/cpukit/include/rtems/rtems/region.h
index d4921eec88..1e35344f7d 100644
--- a/cpukit/include/rtems/rtems/region.h
+++ b/cpukit/include/rtems/rtems/region.h
@@ -87,9 +87,9 @@ extern "C" {
*
* @param segment is the begin address of the segment.
*
- * @param[out] size is the pointer to a uintptr_t variable. When the directive
+ * @param[out] size is the pointer to a uintptr_t object. When the directive
* call is successful, the size of the segment in bytes will be stored in
- * this variable.
+ * this object.
*
* This directive obtains the size in bytes of the segment specified by
* ``segment`` of the region specified by ``id`` in ``size``.
@@ -148,9 +148,9 @@ rtems_status_code rtems_region_get_segment_size(
*
* @param attribute_set is the attribute set of the region.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created region will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created region will be stored in
+ * this object.
*
* This directive creates a region which resides on the local node. The region
* has the user-defined object name specified in ``name``. The assigned object
@@ -242,9 +242,9 @@ rtems_status_code rtems_region_create(
*
* @param name is the object name to look up.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a region identifier associated with the region name
* specified in ``name``.
@@ -400,9 +400,9 @@ rtems_status_code rtems_region_extend(
* @param timeout is the timeout in clock ticks if the #RTEMS_WAIT option is
* set. Use #RTEMS_NO_TIMEOUT to wait potentially forever.
*
- * @param segment is the pointer to a void pointer variable. When the
+ * @param[out] segment is the pointer to a ``void`` pointer object. When the
* directive call is successful, the begin address of the allocated segment
- * will be stored in this variable.
+ * will be stored in this object.
*
* This directive gets a segment from the region specified by ``id``.
*
@@ -558,9 +558,9 @@ rtems_status_code rtems_region_return_segment( rtems_id id, void *segment );
*
* @param size is the requested new size of the segment.
*
- * @param[out] old_size is the pointer to an uintptr_t variable. When the
+ * @param[out] old_size is the pointer to an uintptr_t object. When the
* directive call is successful, the old size of the segment will be stored
- * in this variable.
+ * in this object.
*
* This directive is used to increase or decrease the size of the ``segment``
* of the region specified by ``id``. When increasing the size of a segment,
@@ -613,9 +613,9 @@ rtems_status_code rtems_region_resize_segment(
*
* @param id is the region identifier.
*
- * @param[out] the_info is the pointer to a Heap_Information_block variable.
+ * @param[out] the_info is the pointer to a Heap_Information_block object.
* When the directive call is successful, the information of the region will
- * be stored in this variable.
+ * be stored in this object.
*
* This directive is used to obtain information about the used and free memory
* in the region specified by ``id``. This is a snapshot at the time of the
@@ -668,9 +668,9 @@ rtems_status_code rtems_region_get_information(
*
* @param id is the region identifier.
*
- * @param[out] the_info is the pointer to a Heap_Information_block variable.
+ * @param[out] the_info is the pointer to a Heap_Information_block object.
* When the directive call is successful, the free information of the region
- * will be stored in this variable.
+ * will be stored in this object.
*
* This directive is used to obtain information about the free memory in the
* region specified by ``id``. This is a snapshot at the time of the call. The
diff --git a/cpukit/include/rtems/rtems/sem.h b/cpukit/include/rtems/rtems/sem.h
index d079926720..2cf3ba232b 100644
--- a/cpukit/include/rtems/rtems/sem.h
+++ b/cpukit/include/rtems/rtems/sem.h
@@ -95,9 +95,9 @@ extern "C" {
* semaphore with the priority ceiling or MrsP locking protocol as defined by
* the attribute set.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created semaphore will
- * be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created semaphore will be stored
+ * in this object.
*
* This directive creates a semaphore which resides on the local node. The
* semaphore has the user-defined object name specified in ``name`` and the
@@ -272,9 +272,9 @@ rtems_status_code rtems_semaphore_create(
*
* @param node is the node or node set to search for a matching object.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a semaphore identifier associated with the semaphore
* name specified in ``name``.
@@ -737,9 +737,9 @@ rtems_status_code rtems_semaphore_flush( rtems_id id );
* @param new_priority is the new priority corresponding to the specified
* scheduler.
*
- * @param[out] old_priority is the pointer to a task priority variable. When
- * the directive call is successful, the old priority of the semaphore
- * corresponding to the specified scheduler will be stored in this variable.
+ * @param[out] old_priority is the pointer to an ::rtems_task_priority object.
+ * When the directive call is successful, the old priority of the semaphore
+ * corresponding to the specified scheduler will be stored in this object.
*
* This directive sets the priority of the semaphore specified by
* ``semaphore_id``. The priority corresponds to the scheduler specified by
diff --git a/cpukit/include/rtems/rtems/support.h b/cpukit/include/rtems/rtems/support.h
index 829548aae2..60e090ccec 100644
--- a/cpukit/include/rtems/rtems/support.h
+++ b/cpukit/include/rtems/rtems/support.h
@@ -228,9 +228,9 @@ static inline void rtems_name_to_characters(
*
* @param bytes is the number of bytes to allocated.
*
- * @param[out] pointer is the pointer to a pointer variable. When the
+ * @param[out] pointer is the pointer to a ``void`` pointer object. When the
* directive call is successful, the begin address of the allocated memory
- * area will be stored in this variable.
+ * area will be stored in this object.
*
* @return Returns true, if the allocation was successful, otherwise false.
*
@@ -286,9 +286,9 @@ bool rtems_workspace_free( void *pointer );
*
* @brief Gets information about the RTEMS Workspace.
*
- * @param the_info is the pointer to a heap information variable. When the
- * directive call is successful, the heap information will be stored in this
- * variable.
+ * @param[out] the_info is the pointer to a Heap_Information_block object.
+ * When the directive call is successful, the heap information will be stored
+ * in this object.
*
* @return Returns true, if getting the information was successful, otherwise
* false.
diff --git a/cpukit/include/rtems/rtems/tasks.h b/cpukit/include/rtems/rtems/tasks.h
index cb620a4c09..f0fb7365e3 100644
--- a/cpukit/include/rtems/rtems/tasks.h
+++ b/cpukit/include/rtems/rtems/tasks.h
@@ -449,9 +449,9 @@ rtems_task_priority _RTEMS_Maximum_priority( void );
*
* @param name is the scheduler name to look up.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the scheduler will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the scheduler will be stored in this
+ * object.
*
* This directive obtains a scheduler identifier associated with the scheduler
* name specified in ``name``.
@@ -491,9 +491,9 @@ rtems_status_code rtems_scheduler_ident( rtems_name name, rtems_id *id );
*
* @param cpu_index is the processor index to identify the scheduler.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the scheduler will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the scheduler will be stored in this
+ * object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -525,15 +525,15 @@ rtems_status_code rtems_scheduler_ident_by_processor(
*
* @brief Identifies a scheduler by the processor set.
*
- * @param cpusetsize is the size of the referenced processor set variable in
- * bytes. This value shall be positive.
+ * @param cpusetsize is the size of the processor set referenced by ``cpuset``
+ * in bytes. The size shall be positive.
*
- * @param cpuset is the pointer to a processor set variable. The referenced
- * processor set will be used to identify the scheduler.
+ * @param cpuset is the pointer to a cpu_set_t. The referenced processor set
+ * will be used to identify the scheduler.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the scheduler will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the scheduler will be stored in this
+ * object.
*
* The scheduler is selected according to the highest numbered online processor
* in the specified processor set.
@@ -542,6 +542,8 @@ rtems_status_code rtems_scheduler_ident_by_processor(
*
* @retval ::RTEMS_INVALID_ADDRESS The ``id`` parameter was NULL.
*
+ * @retval ::RTEMS_INVALID_ADDRESS The ``cpuset`` parameter was NULL.
+ *
* @retval ::RTEMS_INVALID_SIZE The processor set size was invalid.
*
* @retval ::RTEMS_INVALID_NAME The processor set contained no online
@@ -575,9 +577,9 @@ rtems_status_code rtems_scheduler_ident_by_processor_set(
*
* @param scheduler_id is the scheduler identifier.
*
- * @param[out] priority is the pointer to a task priority variable. The
- * maximum priority of the scheduler will be stored in this variable, if the
- * operation is successful.
+ * @param[out] priority is the pointer to an ::rtems_task_priority object.
+ * When the directive the maximum priority of the scheduler will be stored in
+ * this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -612,10 +614,10 @@ rtems_status_code rtems_scheduler_get_maximum_priority(
*
* @param priority is the Classic API task priority to map.
*
- * @param[out] posix_priority is the pointer to a POSIX thread priority
- * variable. When the directive call is successful, the POSIX thread
- * priority value corresponding to the specified Classic API task priority
- * value will be stored in this variable.
+ * @param[out] posix_priority is the pointer to an ``int`` object. When the
+ * directive call is successful, the POSIX thread priority value
+ * corresponding to the specified Classic API task priority value will be
+ * stored in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -653,10 +655,10 @@ rtems_status_code rtems_scheduler_map_priority_to_posix(
*
* @param posix_priority is the POSIX thread priority to map.
*
- * @param[out] priority is the pointer to a Classic API task priority variable.
+ * @param[out] priority is the pointer to an ::rtems_task_priority object.
* When the directive call is successful, the Classic API task priority value
* corresponding to the specified POSIX thread priority value will be stored
- * in this variable.
+ * in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -762,14 +764,13 @@ uint32_t rtems_scheduler_get_processor_maximum( void );
*
* @param scheduler_id is the scheduler identifier.
*
- * @param cpusetsize is the size of the referenced processor set variable in
- * bytes.
+ * @param cpusetsize is the size of the processor set referenced by ``cpuset``
+ * in bytes.
*
- * @param[out] cpuset is the pointer to a processor set variable. When the
- * directive call is successful, the processor set of the scheduler will be
- * stored in this variable. A set bit in the processor set means that the
- * corresponding processor is owned by the scheduler, otherwise the bit is
- * cleared.
+ * @param[out] cpuset is the pointer to a cpu_set_t object. When the directive
+ * call is successful, the processor set of the scheduler will be stored in
+ * this object. A set bit in the processor set means that the corresponding
+ * processor is owned by the scheduler, otherwise the bit is cleared.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -778,7 +779,7 @@ uint32_t rtems_scheduler_get_processor_maximum( void );
* @retval ::RTEMS_INVALID_ID There was no scheduler associated with the
* identifier specified by ``scheduler_id``.
*
- * @retval ::RTEMS_INVALID_NUMBER The provided processor set was too small for
+ * @retval ::RTEMS_INVALID_SIZE The provided processor set was too small for
* the set of processors owned by the scheduler.
*
* @par Constraints
@@ -907,9 +908,9 @@ rtems_status_code rtems_scheduler_remove_processor(
*
* @param attribute_set is the attribute set of the task.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created task will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created task will be stored in
+ * this object.
*
* This directive creates a task which resides on the local node. The task has
* the user-defined object name specified in ``name``. The assigned object
@@ -1143,9 +1144,9 @@ rtems_status_code rtems_task_create(
*
* @param config is the task configuration.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the constructed task will
- * be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the constructed task will be stored
+ * in this object.
*
* @retval ::RTEMS_SUCCESSFUL The requested operation was successful.
*
@@ -1257,9 +1258,9 @@ rtems_status_code rtems_task_construct(
*
* @param node is the node or node set to search for a matching object.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a task identifier associated with the task name
* specified in ``name``.
@@ -1499,6 +1500,9 @@ rtems_status_code rtems_task_restart(
* @retval ::RTEMS_INVALID_ID There was no task associated with the identifier
* specified by ``id``.
*
+ * @retval ::RTEMS_CALLED_FROM_ISR The directive was called from within
+ * interrupt context.
+ *
* @retval ::RTEMS_ILLEGAL_ON_REMOTE_OBJECT The task resided on a remote node.
*
* @par Notes
@@ -1743,10 +1747,9 @@ rtems_status_code rtems_task_is_suspended( rtems_id id );
* @param new_priority is the new real priority or #RTEMS_CURRENT_PRIORITY to
* get the current priority.
*
- * @param[out] old_priority is the pointer to an ::rtems_task_priority
- * variable. When the directive call is successful, the current or previous
- * priority of the task with respect to its home scheduler will be stored in
- * this variable.
+ * @param[out] old_priority is the pointer to an ::rtems_task_priority object.
+ * When the directive call is successful, the current or previous priority of
+ * the task with respect to its home scheduler will be stored in this object.
*
* This directive manipulates the priority of the task specified by ``id``.
* When ``new_priority`` is not equal to #RTEMS_CURRENT_PRIORITY, the specified
@@ -1818,9 +1821,9 @@ rtems_status_code rtems_task_set_priority(
*
* @param scheduler_id is the scheduler identifier.
*
- * @param[out] priority is the pointer to an ::rtems_task_priority variable.
+ * @param[out] priority is the pointer to an ::rtems_task_priority object.
* When the directive call is successful, the current priority of the task
- * with respect to the specified scheduler will be stored in this variable.
+ * with respect to the specified scheduler will be stored in this object.
*
* This directive returns the current priority in ``priority`` of the task
* specified by ``task_id`` with respect to the scheduler specified by
@@ -1843,8 +1846,8 @@ rtems_status_code rtems_task_set_priority(
*
* @par Notes
* The current priority reflects temporary priority adjustments due to locking
- * protocols, the rate-monotonic period objects on some schedulers, and other
- * mechanisms.
+ * protocols, the rate-monotonic period objects on some schedulers such as EDF,
+ * and the POSIX sporadic server.
*
* @par Constraints
* @parblock
@@ -1881,9 +1884,9 @@ rtems_status_code rtems_task_get_priority(
* applied to the calling task. When the value is #RTEMS_CURRENT_MODE, the
* mode of the calling task is not changed.
*
- * @param previous_mode_set is the pointer to a mode variable. When the
+ * @param previous_mode_set is the pointer to an ::rtems_mode object. When the
* directive call is successful, the mode of the task before any mode changes
- * done by the directive call will be stored in this variable.
+ * done by the directive call will be stored in this object.
*
* This directive queries and optionally manipulates the execution mode of the
* calling task. A task's execution mode enables and disables preemption,
@@ -2069,7 +2072,7 @@ rtems_status_code rtems_task_wake_after( rtems_interval ticks );
* occur.
* @endparblock
*/
-rtems_status_code rtems_task_wake_when( rtems_time_of_day *time_buffer );
+rtems_status_code rtems_task_wake_when( const rtems_time_of_day *time_buffer );
/* Generated from spec:/rtems/task/if/get-scheduler */
@@ -2081,9 +2084,9 @@ rtems_status_code rtems_task_wake_when( rtems_time_of_day *time_buffer );
* @param task_id is the task identifier. The constant #RTEMS_SELF may be used
* to specify the calling task.
*
- * @param[out] scheduler_id is the pointer to an ::rtems_id variable. When the
+ * @param[out] scheduler_id is the pointer to an ::rtems_id object. When the
* directive call is successful, the identifier of the home scheduler of the
- * task will be stored in this variable.
+ * task will be stored in this object.
*
* This directive returns the identifier of the home scheduler of the task
* specified by ``task_id`` in ``scheduler_id``.
@@ -2140,13 +2143,30 @@ rtems_status_code rtems_task_get_scheduler(
* @retval ::RTEMS_INVALID_ID There was no scheduler associated with the
* identifier specified by ``scheduler_id``.
*
- * @retval ::RTEMS_INVALID_PRIORITY There task priority specified in
- * ``priority`` was invalid with respect to the scheduler specified by
- * ``scheduler_id``.
+ * @retval ::RTEMS_INVALID_PRIORITY The task priority specified by ``priority``
+ * was invalid with respect to the scheduler specified by ``scheduler_id``.
*
* @retval ::RTEMS_INVALID_ID There was no task associated with the identifier
* specified by ``task_id``.
*
+ * @retval ::RTEMS_RESOURCE_IN_USE The task specified by ``task_id`` was
+ * enqueued on a wait queue.
+ *
+ * @retval ::RTEMS_RESOURCE_IN_USE The task specified by ``task_id`` had a
+ * current priority which consisted of more than the real priority.
+ *
+ * @retval ::RTEMS_RESOURCE_IN_USE The task specified by ``task_id`` had a
+ * helping scheduler.
+ *
+ * @retval ::RTEMS_RESOURCE_IN_USE The task specified by ``task_id`` was
+ * pinned.
+ *
+ * @retval ::RTEMS_UNSATISFIED The scheduler specified by ``scheduler_id``
+ * owned no processor.
+ *
+ * @retval ::RTEMS_UNSATISFIED The scheduler specified by ``scheduler_id`` did
+ * not support the affinity set of the task specified by ``task_id``.
+ *
* @retval ::RTEMS_ILLEGAL_ON_REMOTE_OBJECT The task resided on a remote node.
*
* @par Constraints
@@ -2180,12 +2200,12 @@ rtems_status_code rtems_task_set_scheduler(
* @param id is the task identifier. The constant #RTEMS_SELF may be used to
* specify the calling task.
*
- * @param cpusetsize is the size of the referenced processor set variable in
- * bytes.
+ * @param cpusetsize is the size of the processor set referenced by ``cpuset``
+ * in bytes.
*
- * @param[out] cpuset is the pointer to a processor set variable. When the
- * directive call is successful, the processor affinity set of the task will
- * be stored in this variable. A set bit in the processor set means that the
+ * @param[out] cpuset is the pointer to a cpu_set_t object. When the directive
+ * call is successful, the processor affinity set of the task will be stored
+ * in this object. A set bit in the processor set means that the
* corresponding processor is in the processor affinity set of the task,
* otherwise the bit is cleared.
*
@@ -2199,8 +2219,8 @@ rtems_status_code rtems_task_set_scheduler(
* @retval ::RTEMS_INVALID_ID There was no task associated with the identifier
* specified by ``id``.
*
- * @retval ::RTEMS_INVALID_SIZE The provided processor set was too small for
- * the processor affinity set of the task.
+ * @retval ::RTEMS_INVALID_SIZE The size specified by ``cpusetsize`` of the
+ * processor set was too small for the processor affinity set of the task.
*
* @retval ::RTEMS_ILLEGAL_ON_REMOTE_OBJECT The task resided on a remote node.
*
@@ -2234,10 +2254,10 @@ rtems_status_code rtems_task_get_affinity(
* @param id is the task identifier. The constant #RTEMS_SELF may be used to
* specify the calling task.
*
- * @param cpusetsize is the size of the referenced processor set variable in
- * bytes.
+ * @param cpusetsize is the size of the processor set referenced by ``cpuset``
+ * in bytes.
*
- * @param cpuset is the pointer to a processor set variable. The processor set
+ * @param cpuset is the pointer to a cpu_set_t object. The processor set
* defines the new processor affinity set of the task. A set bit in the
* processor set means that the corresponding processor shall be in the
* processor affinity set of the task, otherwise the bit shall be cleared.
diff --git a/cpukit/include/rtems/rtems/timer.h b/cpukit/include/rtems/rtems/timer.h
index eb8bef2532..0f13c04bda 100644
--- a/cpukit/include/rtems/rtems/timer.h
+++ b/cpukit/include/rtems/rtems/timer.h
@@ -191,9 +191,9 @@ typedef struct {
*
* @param id is the timer identifier.
*
- * @param[out] the_info is the pointer to a timer information variable. When
- * the directive call is successful, the information about the timer will be
- * stored in this variable.
+ * @param[out] the_info is the pointer to an rtems_timer_information object.
+ * When the directive call is successful, the information about the timer
+ * will be stored in this object.
*
* This directive returns information about the timer.
*
@@ -267,9 +267,9 @@ typedef rtems_timer_service_routine ( *rtems_timer_service_routine_entry )( rtem
*
* @param name is the object name of the timer.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the identifier of the created timer will be
- * stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the identifier of the created timer will be stored in
+ * this object.
*
* This directive creates a timer which resides on the local node. The timer
* has the user-defined object name specified in ``name``. The assigned object
@@ -326,9 +326,9 @@ rtems_status_code rtems_timer_create( rtems_name name, rtems_id *id );
*
* @param name is the object name to look up.
*
- * @param[out] id is the pointer to an object identifier variable. When the
- * directive call is successful, the object identifier of an object with the
- * specified name will be stored in this variable.
+ * @param[out] id is the pointer to an ::rtems_id object. When the directive
+ * call is successful, the object identifier of an object with the specified
+ * name will be stored in this object.
*
* This directive obtains a timer identifier associated with the timer name
* specified in ``name``.
diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h
index 955a83cfb4..76d84fd787 100644
--- a/cpukit/include/rtems/scheduler.h
+++ b/cpukit/include/rtems/scheduler.h
@@ -251,22 +251,24 @@
#ifdef CONFIGURE_SCHEDULER_STRONG_APA
#include <rtems/score/schedulerstrongapa.h>
+ #ifndef CONFIGURE_MAXIMUM_PROCESSORS
+ #error "CONFIGURE_MAXIMUM_PROCESSORS must be defined to configure the Strong APA scheduler"
+ #endif
+
#define SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) \
SCHEDULER_CONTEXT_NAME( strong_APA_ ## name )
#define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \
static struct { \
Scheduler_strong_APA_Context Base; \
- Chain_Control Ready[ ( prio_count ) ]; \
+ Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; \
} SCHEDULER_STRONG_APA_CONTEXT_NAME( name )
#define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \
{ \
&SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Base.Base.Base, \
SCHEDULER_STRONG_APA_ENTRY_POINTS, \
- RTEMS_ARRAY_SIZE( \
- SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Ready \
- ) - 1, \
+ SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY, \
( obj_name ) \
SCHEDULER_CONTROL_IS_NON_PREEMPT_MODE_SUPPORTED( false ) \
}
diff --git a/cpukit/include/rtems/score/basedefs.h b/cpukit/include/rtems/score/basedefs.h
index 33fb272291..c682106c53 100644
--- a/cpukit/include/rtems/score/basedefs.h
+++ b/cpukit/include/rtems/score/basedefs.h
@@ -842,6 +842,8 @@ extern "C" {
/* Generated from spec:/rtems/basedefs/if/unreachable */
/**
+ * @ingroup RTEMSAPIBaseDefs
+ *
* @brief Tells the compiler that this program point is unreachable.
*/
#if defined(__GNUC__)
diff --git a/cpukit/include/rtems/score/isr.h b/cpukit/include/rtems/score/isr.h
index 3c6a9f1e2c..47c24f3a72 100644
--- a/cpukit/include/rtems/score/isr.h
+++ b/cpukit/include/rtems/score/isr.h
@@ -147,18 +147,6 @@ void _ISR_Handler_initialization ( void );
*/
void _ISR_Handler( void );
-/**
- * @brief Checks if an ISR in progress.
- *
- * This function returns true if the processor is currently servicing
- * and interrupt and false otherwise. A return value of true indicates
- * that the caller is an interrupt service routine, NOT a thread.
- *
- * @retval true Returns true when called from an ISR.
- * @retval false Returns false when not called from an ISR.
- */
-bool _ISR_Is_in_progress( void );
-
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/include/rtems/score/isrlevel.h b/cpukit/include/rtems/score/isrlevel.h
index 3981f2c688..d578a32c48 100644
--- a/cpukit/include/rtems/score/isrlevel.h
+++ b/cpukit/include/rtems/score/isrlevel.h
@@ -144,6 +144,17 @@ typedef uint32_t ISR_Level;
RTEMS_COMPILER_MEMORY_BARRIER(); \
} while (0)
+/**
+ * @brief Checks if an ISR in progress.
+ *
+ * This function returns true, if the processor is currently servicing
+ * and interrupt, and false otherwise. A return value of true indicates
+ * that the caller is an interrupt service routine, **not** a thread.
+ *
+ * @return true Returns true, if called from within an ISR, otherwise false.
+ */
+bool _ISR_Is_in_progress( void );
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/objectimpl.h b/cpukit/include/rtems/score/objectimpl.h
index 54d6f0841b..0c9c85e062 100644
--- a/cpukit/include/rtems/score/objectimpl.h
+++ b/cpukit/include/rtems/score/objectimpl.h
@@ -954,6 +954,13 @@ RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Allocate_with_extend(
return the_object;
}
+/**
+ * @brief This function does nothing.
+ *
+ * @param ptr is not used.
+ */
+void _Objects_Free_nothing( void *ptr );
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 58a89ec7a9..3242383b9d 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -683,6 +683,16 @@ static inline struct _Thread_Control *_Per_CPU_Get_executing(
return cpu->executing;
}
+static inline bool _Per_CPU_Is_ISR_in_progress( const Per_CPU_Control *cpu )
+{
+#if CPU_PROVIDES_ISR_IS_IN_PROGRESS == TRUE
+ (void) cpu;
+ return _ISR_Is_in_progress();
+#else
+ return cpu->isr_nest_level != 0;
+#endif
+}
+
static inline bool _Per_CPU_Is_processor_online(
const Per_CPU_Control *cpu
)
@@ -765,13 +775,6 @@ RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
#if defined( RTEMS_SMP )
-/**
- * @brief Allocate and Initialize Per CPU Structures
- *
- * This method allocates and initialize the per CPU structure.
- */
-void _Per_CPU_Initialize(void);
-
void _Per_CPU_State_change(
Per_CPU_Control *cpu,
Per_CPU_State new_state
diff --git a/cpukit/include/rtems/score/percpudata.h b/cpukit/include/rtems/score/percpudata.h
index cae73a62b0..da454fd7bf 100644
--- a/cpukit/include/rtems/score/percpudata.h
+++ b/cpukit/include/rtems/score/percpudata.h
@@ -51,6 +51,20 @@ extern "C" {
RTEMS_LINKER_RWSET_DECLARE( _Per_CPU_Data, char );
/**
+ * @brief Translation units which define per-CPU items shall call this macro
+ * exactly once at file scope.
+ */
+#ifdef RTEMS_SMP
+#define PER_CPU_DATA_NEED_INITIALIZATION() \
+ static const char * const _Per_CPU_Data_reference \
+ RTEMS_SECTION( ".rtemsroset.reference" ) RTEMS_USED = \
+ RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data )
+#else
+#define PER_CPU_DATA_NEED_INITIALIZATION() \
+ RTEMS_LINKER_RWSET_DECLARE( _Per_CPU_Data, char )
+#endif
+
+/**
* @brief Declares a per-CPU item of the specified type.
*
* Items declared with this macro have external linkage.
diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h
index 530eadc279..9ee922d46d 100644
--- a/cpukit/include/rtems/score/schedulerstrongapa.h
+++ b/cpukit/include/rtems/score/schedulerstrongapa.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -8,30 +10,44 @@
*/
/*
- * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2020 Richi Dubey
+ * Copyright (C) 2013, 2018 embedded brains GmbH (http://www.embedded-brains.de)
*
- * embedded brains GmbH
- * Dornierstr. 4
- * 82178 Puchheim
- * Germany
- * <rtems@embedded-brains.de>
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H
#define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H
#include <rtems/score/scheduler.h>
-#include <rtems/score/schedulerpriority.h>
#include <rtems/score/schedulersmp.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
+/* Forward Declaration of Per_CPU_Control */
+struct Per_CPU_Control;
+
/**
* @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler
*
@@ -39,42 +55,95 @@ extern "C" {
*
* @brief This group contains the Strong APA Scheduler implementation.
*
- * This is an implementation of the global fixed priority scheduler (G-FP). It
- * uses one ready chain per priority to ensure constant time insert operations.
- * The scheduled chain uses linear insert operations and has at most processor
- * count entries. Since the processor and priority count are constants all
- * scheduler operations complete in a bounded execution time.
- *
- * The the_thread preempt mode will be ignored.
+ * This is an implementation of the Strong APA scheduler defined by
+ * Cerqueira et al. in Linux's Processor Affinity API, Refined:
+ * Shifting Real-Time Tasks Towards Higher Schedulability.
*
+ * The scheduled and ready nodes are accessed via the
+ * Scheduler_strong_APA_Context::Ready which helps in backtracking when a
+ * node which is executing on a CPU gets blocked. New node is allocated to
+ * the cpu by checking all the executing nodes in the affinity set of the
+ * node and the subsequent nodes executing on the processors in its
+ * affinity set.
* @{
*/
/**
- * @brief Scheduler context specialization for Strong APA
- * schedulers.
+ * @brief Scheduler node specialization for Strong APA schedulers.
*/
typedef struct {
- Scheduler_SMP_Context Base;
- Priority_bit_map_Control Bit_map;
- Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
-} Scheduler_strong_APA_Context;
+ /**
+ * @brief SMP scheduler node.
+ */
+ Scheduler_SMP_Node Base;
+
+ /**
+ * @brief Chain node for Scheduler_strong_APA_Context::Ready.
+ */
+ Chain_Node Ready_node;
+
+ /**
+ * @brief CPU that this node would preempt in the backtracking part of
+ * _Scheduler_strong_APA_Get_highest_ready and
+ * _Scheduler_strong_APA_Do_Enqueue.
+ */
+ struct Per_CPU_Control *cpu_to_preempt;
+
+ /**
+ * @brief The associated affinity set of this node.
+ */
+ Processor_mask Affinity;
+} Scheduler_strong_APA_Node;
+
/**
- * @brief Scheduler node specialization for Strong APA
- * schedulers.
+ * @brief CPU related variables and a CPU_Control to implement BFS.
*/
typedef struct {
/**
- * @brief SMP scheduler node.
+ * @brief CPU in a queue.
*/
- Scheduler_SMP_Node Base;
+ struct Per_CPU_Control *cpu;
/**
- * @brief The associated ready queue of this node.
+ * @brief The node that would preempt this CPU.
*/
- Scheduler_priority_Ready_queue Ready_queue;
-} Scheduler_strong_APA_Node;
+ Scheduler_Node *preempting_node;
+
+ /**
+ * @brief Whether or not this cpu has been added to the queue
+ * (visited in BFS).
+ */
+ bool visited;
+
+ /**
+ * @brief The node currently executing on this cpu.
+ */
+ Scheduler_Node *executing;
+} Scheduler_strong_APA_CPU;
+
+/**
+ * @brief Scheduler context and node definition for Strong APA scheduler.
+ */
+typedef struct {
+ /**
+ * @brief @see Scheduler_SMP_Context.
+ */
+ Scheduler_SMP_Context Base;
+
+ /**
+ * @brief Chain of all the ready and scheduled nodes present in
+ * the Strong APA scheduler.
+ */
+ Chain_Control Ready;
+
+ /**
+ * @brief Stores cpu-specific variables.
+ */
+ Scheduler_strong_APA_CPU CPU[ RTEMS_ZERO_LENGTH_ARRAY ];
+} Scheduler_strong_APA_Context;
+
+#define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255
/**
* @brief Entry points for the Strong APA Scheduler.
@@ -101,8 +170,8 @@ typedef struct {
_Scheduler_default_Release_job, \
_Scheduler_default_Cancel_job, \
_Scheduler_default_Tick, \
- _Scheduler_SMP_Start_idle \
- SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ _Scheduler_strong_APA_Start_idle, \
+ _Scheduler_strong_APA_Set_affinity \
}
/**
@@ -169,7 +238,7 @@ void _Scheduler_strong_APA_Update_priority(
/**
* @brief Asks for help.
*
- * @param scheduler The scheduler control instance.
+ * @param scheduler The scheduler control instance.
* @param the_thread The thread that asks for help.
* @param node The node of @a the_thread.
*
@@ -247,6 +316,33 @@ void _Scheduler_strong_APA_Yield(
Scheduler_Node *node
);
+/**
+ * @brief Starts an idle thread.
+ *
+ * @param scheduler The scheduler instance.
+ * @param[in, out] the_thread An idle thread.
+ * @param cpu The cpu for the operation.
+ */
+void _Scheduler_strong_APA_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ struct Per_CPU_Control *cpu
+);
+
+/**
+ * @brief Sets the affinity .
+ *
+ * @param scheduler The scheduler control instance.
+ * @param the_thread The thread to yield.
+ * @param[in, out] node The node of @a the_thread.
+ */
+Status_Control _Scheduler_strong_APA_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node_base,
+ const Processor_mask *affinity
+);
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/stackimpl.h b/cpukit/include/rtems/score/stackimpl.h
index c261f8bd4f..330fd32be7 100644
--- a/cpukit/include/rtems/score/stackimpl.h
+++ b/cpukit/include/rtems/score/stackimpl.h
@@ -194,13 +194,6 @@ void *_Stack_Allocate( size_t stack_size );
*/
void _Stack_Free( void *stack_area );
-/**
- * @brief This function does nothing.
- *
- * @param stack_area is not used.
- */
-void _Stack_Free_nothing( void *stack_area );
-
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/include/rtems/score/threadimpl.h b/cpukit/include/rtems/score/threadimpl.h
index c861e8b119..ecc8eee058 100644
--- a/cpukit/include/rtems/score/threadimpl.h
+++ b/cpukit/include/rtems/score/threadimpl.h
@@ -144,7 +144,7 @@ typedef struct {
/**
* @brief This member contains the handler to free the stack.
*
- * It shall not be NULL. Use _Stack_Free_nothing() if nothing is to free.
+ * It shall not be NULL. Use _Objects_Free_nothing() if nothing is to free.
*/
void ( *stack_free )( void * );
@@ -271,29 +271,19 @@ Status_Control _Thread_Start(
);
/**
- * @brief Restarts the currently executing thread.
- *
- * @param[in, out] executing The currently executing thread.
- * @param entry The start entry information for @a executing.
- * @param lock_context The lock context.
- */
-RTEMS_NO_RETURN void _Thread_Restart_self(
- Thread_Control *executing,
- const Thread_Entry_information *entry,
- ISR_lock_Context *lock_context
-);
-
-/**
* @brief Restarts the thread.
*
- * @param[in, out] the_thread The thread to restart.
- * @param entry The start entry information for @a the_thread.
- * @param lock_context The lock context.
+ * @param[in, out] the_thread is the thread to restart.
+ *
+ * @param entry is the new start entry information for the thread to restart.
+ *
+ * @param[in, out] lock_context is the lock context with interrupts disabled.
*
- * @retval true The operation was successful.
- * @retval false The operation failed.
+ * @retval STATUS_SUCCESSFUL The operation was successful.
+ *
+ * @retval STATUS_INCORRECT_STATE The thread was dormant.
*/
-bool _Thread_Restart_other(
+Status_Control _Thread_Restart(
Thread_Control *the_thread,
const Thread_Entry_information *entry,
ISR_lock_Context *lock_context
@@ -307,18 +297,20 @@ bool _Thread_Restart_other(
void _Thread_Yield( Thread_Control *executing );
/**
- * @brief Changes the currently executing thread to a new state with the sets.
+ * @brief Changes the life of currently executing thread.
*
- * @param clear States to clear.
- * @param set States to set.
- * @param ignore States to ignore.
+ * @param life_states_to_clear are the thread life states to clear.
*
- * @return The previous state the thread was in.
+ * @param life_states_to_set are the thread life states to set.
+ *
+ * @param ignored_life_states are the ignored thread life states.
+ *
+ * @return Returns the thread life state before the changes.
*/
Thread_Life_state _Thread_Change_life(
- Thread_Life_state clear,
- Thread_Life_state set,
- Thread_Life_state ignore
+ Thread_Life_state life_states_to_clear,
+ Thread_Life_state life_states_to_set,
+ Thread_Life_state ignored_life_states
);
/**
@@ -347,14 +339,13 @@ void _Thread_Kill_zombies( void );
/**
* @brief Exits the currently executing thread.
*
- * @param[in, out] executing The currently executing thread.
- * @param set The states to set.
- * @param[out] exit_value Contains the exit value of the thread.
+ * @param exit_value is the exit value of the thread.
+ *
+ * @param life_states_to_set are the thread life states to set.
*/
-void _Thread_Exit(
- Thread_Control *executing,
- Thread_Life_state set,
- void *exit_value
+RTEMS_NO_RETURN void _Thread_Exit(
+ void *exit_value,
+ Thread_Life_state life_states_to_set
);
/**
diff --git a/cpukit/include/rtems/score/threadq.h b/cpukit/include/rtems/score/threadq.h
index 5234019b81..10476888d4 100644
--- a/cpukit/include/rtems/score/threadq.h
+++ b/cpukit/include/rtems/score/threadq.h
@@ -214,7 +214,8 @@ struct Thread_queue_Context {
* callout must be used to install the thread watchdog for timeout handling.
*
* @see _Thread_queue_Enqueue_do_nothing_extra().
- * _Thread_queue_Add_timeout_ticks(), and
+ * _Thread_queue_Add_timeout_ticks(),
+ * _Thread_queue_Add_timeout_monotonic_timespec(), and
* _Thread_queue_Add_timeout_realtime_timespec().
*/
Thread_queue_Enqueue_callout enqueue_callout;
@@ -236,6 +237,12 @@ struct Thread_queue_Context {
const void *arg;
} Timeout;
+ /**
+ * @brief If this member is true, the timeout shall be absolute, otherwise it
+ * shall be relative to the current time of the clock.
+ */
+ bool timeout_absolute;
+
#if defined(RTEMS_SMP)
/**
* @brief Representation of a thread queue path from a start thread queue to
diff --git a/cpukit/include/rtems/score/threadqimpl.h b/cpukit/include/rtems/score/threadqimpl.h
index ca59de9e31..44efc1fcd0 100644
--- a/cpukit/include/rtems/score/threadqimpl.h
+++ b/cpukit/include/rtems/score/threadqimpl.h
@@ -201,18 +201,24 @@ _Thread_queue_Context_set_timeout_ticks(
/**
* @brief Sets the timeout argument in the thread queue context.
*
- * @param[out] queue_context The thread queue context.
- * @param arg The timeout argument.
+ * @param[out] queue_context is the thread queue context.
+ *
+ * @param arg is the timeout argument.
+ *
+ * @param absolute is true, if the timeout shall be absolute, otherwise it
+ * shall be relative to the current time of the clock.
*
* @see _Thread_queue_Enqueue().
*/
RTEMS_INLINE_ROUTINE void
_Thread_queue_Context_set_timeout_argument(
Thread_queue_Context *queue_context,
- const void *arg
+ const void *arg,
+ bool absolute
)
{
queue_context->Timeout.arg = arg;
+ queue_context->timeout_absolute = absolute;
}
/**
@@ -267,41 +273,53 @@ _Thread_queue_Context_set_enqueue_timeout_ticks(
}
/**
- * @brief Sets the enqueue callout to add an absolute monotonic timeout in
- * timespec format.
+ * @brief Sets the enqueue callout to add a timeout in timespec format using
+ * CLOCK_MONOTONIC.
*
- * @param[out] queue_context The thread queue context.
- * @param abstime The absolute monotonic timeout.
+ * @param[out] queue_context is the thread queue context.
+ *
+ * @param timeout is the absolute or relative timeout.
+ *
+ * @param absolute is true, if the timeout shall be absolute, otherwise it
+ * shall be relative to the current time of the clock.
*
* @see _Thread_queue_Enqueue().
*/
RTEMS_INLINE_ROUTINE void
_Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
Thread_queue_Context *queue_context,
- const struct timespec *abstime
+ const struct timespec *timeout,
+ bool absolute
)
{
- queue_context->Timeout.arg = abstime;
+ queue_context->Timeout.arg = timeout;
+ queue_context->timeout_absolute = absolute;
queue_context->enqueue_callout =
_Thread_queue_Add_timeout_monotonic_timespec;
}
/**
- * @brief Sets the enqueue callout to add an absolute realtime timeout in
- * timespec format.
+ * @brief Sets the enqueue callout to add a timeout in timespec format using
+ * CLOCK_REALTIME.
*
- * @param[out] queue_context The thread queue context.
- * @param abstime The absolute realtime timeout.
+ * @param[out] queue_context is the thread queue context.
+ *
+ * @param timeout is the absolute or relative timeout.
+ *
+ * @param absolute is true, if the timeout shall be absolute, otherwise it
+ * shall be relative to the current time of the clock.
*
* @see _Thread_queue_Enqueue().
*/
RTEMS_INLINE_ROUTINE void
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
Thread_queue_Context *queue_context,
- const struct timespec *abstime
+ const struct timespec *timeout,
+ bool absolute
)
{
- queue_context->Timeout.arg = abstime;
+ queue_context->Timeout.arg = timeout;
+ queue_context->timeout_absolute = absolute;
queue_context->enqueue_callout = _Thread_queue_Add_timeout_realtime_timespec;
}
diff --git a/cpukit/include/rtems/score/timespec.h b/cpukit/include/rtems/score/timespec.h
index 314d804f7d..2090f19b32 100644
--- a/cpukit/include/rtems/score/timespec.h
+++ b/cpukit/include/rtems/score/timespec.h
@@ -1,19 +1,37 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
* @ingroup Timespec
*
* @brief This header file provides the interfaces of the
- * @ref RTEMSScoreTimespec.
+ * @ref RTEMSScoreTimespec.
*/
/*
- * COPYRIGHT (c) 1989-2009.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTEMS_SCORE_TIMESPEC_H
@@ -106,6 +124,18 @@ uint64_t _Timespec_Get_as_nanoseconds(
);
/**
+ * @brief Checks if the values in @a time are non-negative.
+ *
+ * @param[in] time is the timespec instance to validate.
+ *
+ * @retval true If @a time is filled with non-negative values.
+ * @retval false If @a time is not filled with non-negative values.
+ */
+bool _Timespec_Is_non_negative(
+ const struct timespec *time
+);
+
+/**
* @brief Checks if timespec is valid.
*
* This method determines the validity of a timespec.
diff --git a/cpukit/include/rtems/score/watchdogimpl.h b/cpukit/include/rtems/score/watchdogimpl.h
index a8e6de4fbe..7b364b8828 100644
--- a/cpukit/include/rtems/score/watchdogimpl.h
+++ b/cpukit/include/rtems/score/watchdogimpl.h
@@ -535,6 +535,22 @@ RTEMS_INLINE_ROUTINE uint64_t _Watchdog_Ticks_from_timespec(
}
/**
+ * @brief Converts the ticks to timespec.
+ *
+ * @param ticks are the ticks to convert.
+ *
+ * @param[out] ts is the timespec to return the converted ticks.
+ */
+RTEMS_INLINE_ROUTINE void _Watchdog_Ticks_to_timespec(
+ uint64_t ticks,
+ struct timespec *ts
+)
+{
+ ts->tv_sec = ticks >> WATCHDOG_BITS_FOR_1E9_NANOSECONDS;
+ ts->tv_nsec = ticks & ( ( 1U << WATCHDOG_BITS_FOR_1E9_NANOSECONDS ) - 1 );
+}
+
+/**
* @brief Converts the sbintime in ticks.
*
* @param sbt The sbintime to convert to ticks.
diff --git a/cpukit/include/rtems/thread.h b/cpukit/include/rtems/thread.h
index feee612d22..d0cb03c284 100644
--- a/cpukit/include/rtems/thread.h
+++ b/cpukit/include/rtems/thread.h
@@ -54,6 +54,11 @@ static __inline void rtems_mutex_lock( rtems_mutex *mutex )
_Mutex_Acquire( mutex );
}
+static __inline int rtems_mutex_try_lock( rtems_mutex *mutex )
+{
+ return _Mutex_Try_acquire( mutex );
+}
+
static __inline void rtems_mutex_unlock( rtems_mutex *mutex )
{
_Mutex_Release( mutex );
@@ -97,6 +102,13 @@ static __inline void rtems_recursive_mutex_lock(
_Mutex_recursive_Acquire( mutex );
}
+static __inline int rtems_recursive_mutex_try_lock(
+ rtems_recursive_mutex *mutex
+)
+{
+ return _Mutex_recursive_Try_acquire( mutex );
+}
+
static __inline void rtems_recursive_mutex_unlock(
rtems_recursive_mutex *mutex
)
diff --git a/cpukit/include/rtems/version.h b/cpukit/include/rtems/version.h
index 87d5e1492c..cdd8905735 100644
--- a/cpukit/include/rtems/version.h
+++ b/cpukit/include/rtems/version.h
@@ -32,6 +32,27 @@ extern "C" {
* @brief The Version API provides functions to return the version or parts of
* the version of RTEMS you are using.
*
+ * A branch in the version control system will always fall back to a
+ * NOT-RELEASED version number with a minor number of 0. Only the release
+ * archives have a VERSION file with a final release number. That means for
+ * example that the 5 development branch will still show a version 5.0.0 even
+ * after the 5.1 release.
+ *
+ * The reason for that are the following:
+ *
+ * 1. All pre-release tests are performed with a specific git hash. A committed
+ * VERSION file would need to be changed and committed afterwards for releasing
+ * with the required release version causing the released version to have a
+ * different git hash and the test results couldn't be linked to the released
+ * version.
+ *
+ * 2. Users deploying RTEMS would need to commit a local change to a committed
+ * VERSION file and that would clash with the project changes. Deployment can
+ * use the project repos directly.
+ *
+ * 3. The VERSION file management and generation is the responsibility of the
+ * release manager and the release process.
+ *
* @{
*/
diff --git a/cpukit/libcsupport/src/__usrenv.c b/cpukit/libcsupport/src/__usrenv.c
index 5c25eaa8d6..ebafa9e8b0 100644
--- a/cpukit/libcsupport/src/__usrenv.c
+++ b/cpukit/libcsupport/src/__usrenv.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -6,15 +8,31 @@
*/
/*
- * COPYRIGHT (c) 1989-2008.
- * On-Line Applications Research Corporation (OAR).
+ * COPYRIGHT (C) 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Modifications to support reference counting in the file system are
+ * Copyright (c) 2012 embedded brains GmbH.
*
- * Modifications to support reference counting in the file system are
- * Copyright (c) 2012 embedded brains GmbH.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -145,10 +163,9 @@ static void null_op_fsunmount_me(
/* Do nothing */
}
-static int null_op_utime(
+static int null_op_utimens(
const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
+ struct timespec times[2]
)
{
return -1;
@@ -207,7 +224,7 @@ static const rtems_filesystem_operations_table null_ops = {
.mount_h = null_op_mount,
.unmount_h = null_op_unmount,
.fsunmount_me_h = null_op_fsunmount_me,
- .utime_h = null_op_utime,
+ .utimens_h = null_op_utimens,
.symlink_h = null_op_symlink,
.readlink_h = null_op_readlink,
.rename_h = null_op_rename,
diff --git a/cpukit/libcsupport/src/alignedalloc.c b/cpukit/libcsupport/src/alignedalloc.c
index b552fc2a0f..9c9ea83bd8 100644
--- a/cpukit/libcsupport/src/alignedalloc.c
+++ b/cpukit/libcsupport/src/alignedalloc.c
@@ -35,6 +35,10 @@
void *aligned_alloc( size_t alignment, size_t size )
{
+ if ( size == 0 ) {
+ return NULL;
+ }
+
return rtems_heap_allocate_aligned_with_boundary( size, alignment, 0 );
}
diff --git a/cpukit/libcsupport/src/calloc.c b/cpukit/libcsupport/src/calloc.c
index 693aa21453..d5cefb382a 100644
--- a/cpukit/libcsupport/src/calloc.c
+++ b/cpukit/libcsupport/src/calloc.c
@@ -35,14 +35,15 @@ void *calloc(
size_t length;
if ( nelem == 0 ) {
- length = 0;
- } else if ( elsize > SIZE_MAX / nelem ) {
+ return NULL;
+ }
+
+ if ( elsize > SIZE_MAX / nelem ) {
errno = ENOMEM;
return NULL;
- } else {
- length = nelem * elsize;
}
+ length = nelem * elsize;
cptr = malloc( length );
RTEMS_OBFUSCATE_VARIABLE( cptr );
if ( RTEMS_PREDICT_FALSE( cptr == NULL ) ) {
diff --git a/cpukit/libcsupport/src/futimens.c b/cpukit/libcsupport/src/futimens.c
new file mode 100644
index 0000000000..d4a9282e73
--- /dev/null
+++ b/cpukit/libcsupport/src/futimens.c
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup libcsupport
+ *
+ * @brief Set file access and modification times based on file descriptor in
+ * nanoseconds.
+ */
+
+/*
+ * COPYRIGHT (C) 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/libio_.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+
+/**
+ * https://pubs.opengroup.org/onlinepubs/9699919799.2008edition/functions/futimens.html
+ *
+ * Set file access and modification times
+ */
+int futimens(
+ int fd,
+ const struct timespec times[2]
+)
+{
+ int rv;
+ rtems_libio_t *iop;
+ struct timespec new_times[2];
+ const rtems_filesystem_location_info_t *currentloc = NULL;
+
+ LIBIO_GET_IOP_WITH_ACCESS( fd, iop, LIBIO_FLAGS_READ, EBADF );
+
+ currentloc = &iop->pathinfo;
+
+ rv = rtems_filesystem_utime_update( times, new_times );
+ if ( rv != 0 ) {
+ rtems_libio_iop_drop( iop );
+ return rv;
+ }
+
+ rv = rtems_filesystem_utime_check_permissions( currentloc, times );
+ if ( rv != 0 ) {
+ rtems_libio_iop_drop( iop );
+ return rv;
+ }
+
+ rv = (*currentloc->mt_entry->ops->utimens_h)(
+ currentloc,
+ new_times
+ );
+
+ rtems_libio_iop_drop( iop );
+
+ return rv;
+}
diff --git a/cpukit/libcsupport/src/libio_init.c b/cpukit/libcsupport/src/libio_init.c
index d5814ca5a2..2bfc7664d7 100644
--- a/cpukit/libcsupport/src/libio_init.c
+++ b/cpukit/libcsupport/src/libio_init.c
@@ -65,9 +65,3 @@ RTEMS_SYSINIT_ITEM(
RTEMS_SYSINIT_LIBIO,
RTEMS_SYSINIT_ORDER_MIDDLE
);
-
-RTEMS_SYSINIT_ITEM(
- rtems_libio_post_driver,
- RTEMS_SYSINIT_STD_FILE_DESCRIPTORS,
- RTEMS_SYSINIT_ORDER_MIDDLE
-);
diff --git a/cpukit/libcsupport/src/malloc.c b/cpukit/libcsupport/src/malloc.c
index 795254fbab..3e55a94c83 100644
--- a/cpukit/libcsupport/src/malloc.c
+++ b/cpukit/libcsupport/src/malloc.c
@@ -30,6 +30,10 @@ void *malloc(
{
void *return_this;
+ if ( size == 0 ) {
+ return NULL;
+ }
+
return_this = rtems_heap_allocate_aligned_with_boundary( size, 0, 0 );
if ( !return_this ) {
errno = ENOMEM;
diff --git a/cpukit/libcsupport/src/malloc_deferred.c b/cpukit/libcsupport/src/malloc_deferred.c
index aab76406c7..b319d1213e 100644
--- a/cpukit/libcsupport/src/malloc_deferred.c
+++ b/cpukit/libcsupport/src/malloc_deferred.c
@@ -106,6 +106,10 @@ void *rtems_heap_allocate_aligned_with_boundary(
void *rtems_malloc( size_t size )
{
+ if ( size == 0 ) {
+ return NULL;
+ }
+
return rtems_heap_allocate_aligned_with_boundary( size, 0, 0 );
}
#endif
diff --git a/cpukit/libcsupport/src/posix_memalign.c b/cpukit/libcsupport/src/posix_memalign.c
index 316ed7315c..418de99275 100644
--- a/cpukit/libcsupport/src/posix_memalign.c
+++ b/cpukit/libcsupport/src/posix_memalign.c
@@ -45,6 +45,10 @@ int posix_memalign(
return EINVAL;
}
+ if ( size == 0 ) {
+ return 0;
+ }
+
*memptr = rtems_heap_allocate_aligned_with_boundary( size, alignment, 0 );
if ( *memptr == NULL ) {
diff --git a/cpukit/libcsupport/src/rtems_memalign.c b/cpukit/libcsupport/src/rtems_memalign.c
index aa938ac66f..aa67c74a29 100644
--- a/cpukit/libcsupport/src/rtems_memalign.c
+++ b/cpukit/libcsupport/src/rtems_memalign.c
@@ -40,6 +40,10 @@ int rtems_memalign(
*pointer = NULL;
+ if ( size == 0 ) {
+ return 0;
+ }
+
/*
* Perform the aligned allocation requested
*/
diff --git a/cpukit/libcsupport/src/rtemscalloc.c b/cpukit/libcsupport/src/rtemscalloc.c
index 836f1da64d..7e05a14bb1 100644
--- a/cpukit/libcsupport/src/rtemscalloc.c
+++ b/cpukit/libcsupport/src/rtemscalloc.c
@@ -47,13 +47,14 @@ void *rtems_calloc( size_t nelem, size_t elsize )
void *p;
if ( nelem == 0 ) {
- length = 0;
- } else if ( elsize > SIZE_MAX / nelem ) {
return NULL;
- } else {
- length = nelem * elsize;
}
+ if ( elsize > SIZE_MAX / nelem ) {
+ return NULL;
+ }
+
+ length = nelem * elsize;
p = rtems_malloc( length );
RTEMS_OBFUSCATE_VARIABLE( p );
if ( RTEMS_PREDICT_FALSE( p == NULL ) ) {
diff --git a/cpukit/libcsupport/src/utime.c b/cpukit/libcsupport/src/utime.c
index e2d8883592..b5b70348fa 100644
--- a/cpukit/libcsupport/src/utime.c
+++ b/cpukit/libcsupport/src/utime.c
@@ -1,58 +1,65 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
- * @brief Set File Access and Modification Times
* @ingroup libcsupport
+ *
+ * @brief Set file access and modification times in seconds.
*/
/*
- * COPYRIGHT (c) 1989-1999.
- * On-Line Applications Research Corporation (OAR).
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
-/* FIXME: This include is a workaround for a broken <utime.h> in Newlib */
-#include <sys/types.h>
+#include <rtems/libio_.h>
+#include <fcntl.h>
#include <utime.h>
-#include <rtems/libio_.h>
-
/**
- * POSIX 1003.1b 5.5.6 - Set File Access and Modification Times
+ * https://pubs.opengroup.org/onlinepubs/009604599/functions/utime.html
+ *
+ * Set file access and modification times
*/
-int utime( const char *path, const struct utimbuf *times )
+int utime(
+ const char *path,
+ const struct utimbuf *times
+)
{
- int rv = 0;
- rtems_filesystem_eval_path_context_t ctx;
- int eval_flags = RTEMS_FS_FOLLOW_LINK;
- const rtems_filesystem_location_info_t *currentloc =
- rtems_filesystem_eval_path_start( &ctx, path, eval_flags );
- struct utimbuf now_times;
+ struct timespec new_times[2];
if ( times == NULL ) {
- time_t now = time( NULL );
-
- now_times.actime = now;
- now_times.modtime = now;
-
- times = &now_times;
+ return utimensat(AT_FDCWD, path, NULL, 0);
}
- rv = (*currentloc->mt_entry->ops->utime_h)(
- currentloc,
- times->actime,
- times->modtime
- );
-
- rtems_filesystem_eval_path_cleanup( &ctx );
+ _Timespec_Set(&new_times[0], times->actime, 0);
+ _Timespec_Set(&new_times[1], times->modtime, 0);
- return rv;
+ return utimensat(AT_FDCWD, path, new_times, 0);
}
diff --git a/cpukit/libcsupport/src/utimensat.c b/cpukit/libcsupport/src/utimensat.c
new file mode 100644
index 0000000000..c053218ad0
--- /dev/null
+++ b/cpukit/libcsupport/src/utimensat.c
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup libcsupport
+ *
+ * @brief Set file access and modification times in nanoseconds.
+ */
+
+/*
+ * COPYRIGHT (C) 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/libio_.h>
+#include <rtems/score/todimpl.h>
+
+#include <fcntl.h>
+#include <string.h>
+
+/*
+ * Make sure that tv_nsec is either UTIME_NOW, UTIME_OMIT, a value
+ * greater than zero, or a value less-than a billion.
+ *
+ * These guidelines come from the description of the EINVAL errors on
+ * https://pubs.opengroup.org/onlinepubs/9699919799/functions/futimens.html
+ */
+bool rtems_filesystem_utime_tv_nsec_valid(struct timespec time)
+{
+ if ( time.tv_nsec == UTIME_NOW ) {
+ return true;
+ }
+
+ if ( time.tv_nsec == UTIME_OMIT ) {
+ return true;
+ }
+
+ if ( time.tv_nsec < 0 ) {
+ return false;
+ }
+
+ if ( time.tv_nsec >= TOD_NANOSECONDS_PER_SECOND ) {
+ return false;
+ }
+
+ return true;
+}
+
+/* Determine whether the access and modified timestamps can be updated */
+int rtems_filesystem_utime_check_permissions(
+ const rtems_filesystem_location_info_t * currentloc,
+ const struct timespec times[2]
+)
+{
+ struct stat st = {};
+ int rv;
+ bool write_access;
+
+ rv = (*currentloc->handlers->fstat_h)( currentloc, &st );
+ if ( rv != 0 ) {
+ rtems_set_errno_and_return_minus_one( ENOENT );
+ }
+
+ write_access = rtems_filesystem_check_access(
+ RTEMS_FS_PERMS_WRITE,
+ st.st_mode,
+ st.st_uid,
+ st.st_gid
+ );
+
+ /*
+ * The logic for the EACCES error is an inverted subset of the EPERM
+ * conditional according to the POSIX standard.
+ */
+ if ( (times == NULL) ||
+ ( (times[0].tv_nsec == UTIME_NOW) && (times[1].tv_nsec == UTIME_NOW) )) {
+ if ( !write_access ) {
+ rtems_set_errno_and_return_minus_one( EACCES );
+ }
+ } else {
+ if ( times[0].tv_nsec != UTIME_OMIT || times[1].tv_nsec != UTIME_OMIT ) {
+ if ( !write_access ) {
+ rtems_set_errno_and_return_minus_one( EPERM );
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Determine if the current time needs to be gotten, and then check
+ * whether the values in times are valid.
+ */
+int rtems_filesystem_utime_update(
+ const struct timespec times[2],
+ struct timespec new_times[2]
+)
+{
+ bool got_time = false;
+ struct timespec now;
+
+ /*
+ * If times is NULL, it's equivalent to adding UTIME_NOW in both time
+ * elements
+ */
+ if ( times == NULL ) {
+ _Timespec_Set( &new_times[0], 0, UTIME_NOW );
+ _Timespec_Set( &new_times[1], 0, UTIME_NOW );
+ } else {
+ new_times[0] = times[0];
+ new_times[1] = times[1];
+ }
+
+ if ( new_times[0].tv_nsec == UTIME_NOW ) {
+ clock_gettime( CLOCK_REALTIME, &now );
+ new_times[0] = now;
+ got_time = true;
+ }
+
+ if ( new_times[1].tv_nsec == UTIME_NOW ) {
+ if ( !got_time ) {
+ clock_gettime( CLOCK_REALTIME, &now );
+ }
+ new_times[1] = now;
+ }
+
+ if ( !_Timespec_Is_non_negative( &new_times[0] ) ) {
+ rtems_set_errno_and_return_minus_one( EINVAL );
+ }
+
+ if ( !_Timespec_Is_non_negative( &new_times[1] ) ) {
+ rtems_set_errno_and_return_minus_one( EINVAL );
+ }
+
+ if ( !rtems_filesystem_utime_tv_nsec_valid( new_times[0] ) ) {
+ rtems_set_errno_and_return_minus_one( EINVAL );
+ }
+
+ if ( !rtems_filesystem_utime_tv_nsec_valid( new_times[1] ) ) {
+ rtems_set_errno_and_return_minus_one( EINVAL );
+ }
+
+ return 0;
+}
+
+/**
+ * https://pubs.opengroup.org/onlinepubs/9699919799.2008edition/functions/futimens.html
+ *
+ * Set file access and modification times
+ */
+int utimensat(
+ int fd,
+ const char *path,
+ const struct timespec times[2],
+ int flag
+)
+{
+ int rv = 0;
+ rtems_filesystem_eval_path_context_t ctx;
+ int eval_flags = RTEMS_FS_FOLLOW_LINK;
+ const rtems_filesystem_location_info_t *currentloc = NULL;
+ struct timespec new_times[2];
+
+ /*
+ * RTEMS does not currently support operating on a real file descriptor
+ */
+ if ( fd != AT_FDCWD ) {
+ rtems_set_errno_and_return_minus_one( ENOSYS );
+ }
+
+ /*
+ * RTEMS does not currently support AT_SYMLINK_NOFOLLOW
+ */
+ if ( flag != 0 ) {
+ rtems_set_errno_and_return_minus_one( ENOSYS );
+ }
+
+ rv = rtems_filesystem_utime_update( times, new_times );
+ if ( rv != 0 ) {
+ return rv;
+ }
+
+ currentloc = rtems_filesystem_eval_path_start( &ctx, path, eval_flags );
+
+ rv = rtems_filesystem_utime_check_permissions( currentloc, times );
+ if ( rv != 0 ) {
+ rtems_filesystem_eval_path_cleanup( &ctx );
+ return rv;
+ }
+
+ rv = (*currentloc->mt_entry->ops->utimens_h)(
+ currentloc,
+ new_times
+ );
+
+ rtems_filesystem_eval_path_cleanup( &ctx );
+
+ return rv;
+}
diff --git a/cpukit/libcsupport/src/utimes.c b/cpukit/libcsupport/src/utimes.c
index 3dc47c0000..9748abb5fd 100644
--- a/cpukit/libcsupport/src/utimes.c
+++ b/cpukit/libcsupport/src/utimes.c
@@ -1,38 +1,73 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
- * @brief Change File Last Access and Modification Times
* @ingroup libcsupport
+ *
+ * @brief Set file access and modification times in milliseconds.
*/
/*
- * Written by: Vinu Rajashekhar <vinutheraj@gmail.com>
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
-#include <sys/types.h>
-#include <utime.h>
#include <sys/time.h>
+#include <rtems/score/todimpl.h>
+
+#include <fcntl.h>
+/**
+ * https://pubs.opengroup.org/onlinepubs/9699919799.2008edition/functions/futimens.html
+ *
+ * Set file access and modification times
+ */
int utimes(
const char *path,
const struct timeval times[2]
-)
+)
{
- struct utimbuf timeinsecs;
+ struct timespec new_times[2];
- if ( times == NULL )
- return utime( path, NULL );
+ if ( times == NULL ) {
+ return utimensat( AT_FDCWD, path, NULL , 0 );
+ }
- timeinsecs.actime = times[0].tv_sec;
- timeinsecs.modtime = times[1].tv_sec;
+ _Timespec_Set(
+ &new_times[0],
+ times[0].tv_sec,
+ times[0].tv_usec * TOD_NANOSECONDS_PER_MICROSECOND
+ );
+ _Timespec_Set(
+ &new_times[1],
+ times[1].tv_sec,
+ times[1].tv_usec * TOD_NANOSECONDS_PER_MICROSECOND
+ );
- return utime( path, &timeinsecs );
+ return utimensat( AT_FDCWD, path, new_times, 0 );
}
diff --git a/cpukit/libdebugger/rtems-debugger-remote-tcp.c b/cpukit/libdebugger/rtems-debugger-remote-tcp.c
index 696e2deb8c..440baa9b66 100644
--- a/cpukit/libdebugger/rtems-debugger-remote-tcp.c
+++ b/cpukit/libdebugger/rtems-debugger-remote-tcp.c
@@ -122,7 +122,7 @@ static int
tcp_remote_connect(rtems_debugger_remote* remote)
{
int ld;
- struct sockaddr_in addr;
+ struct sockaddr_in addr = {0};
socklen_t opt;
socklen_t len;
bool running;
diff --git a/cpukit/libfs/src/defaults/default_ops.c b/cpukit/libfs/src/defaults/default_ops.c
index 63ad2cefe1..45069e76d7 100644
--- a/cpukit/libfs/src/defaults/default_ops.c
+++ b/cpukit/libfs/src/defaults/default_ops.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -7,16 +9,28 @@
*/
/*
- * Copyright (c) 2010
- * embedded brains GmbH
- * Obere Lagerstr. 30
- * D-82178 Puchheim
- * Germany
- * <rtems@embedded-brains.de>
+ * Copyright (C) 2010 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#include <rtems/libio.h>
@@ -36,7 +50,7 @@ const rtems_filesystem_operations_table rtems_filesystem_operations_default = {
.mount_h = rtems_filesystem_default_mount,
.unmount_h = rtems_filesystem_default_unmount,
.fsunmount_me_h = rtems_filesystem_default_fsunmount,
- .utime_h = rtems_filesystem_default_utime,
+ .utimens_h = rtems_filesystem_default_utimens,
.symlink_h = rtems_filesystem_default_symlink,
.readlink_h = rtems_filesystem_default_readlink,
.rename_h = rtems_filesystem_default_rename,
diff --git a/cpukit/libfs/src/defaults/default_utime.c b/cpukit/libfs/src/defaults/default_utime.c
deleted file mode 100644
index aaf4e445b6..0000000000
--- a/cpukit/libfs/src/defaults/default_utime.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * @file
- *
- * @ingroup LibIOFSOps File System Operations
- *
- * @brief RTEMS Default File System sets file access and modification times
- */
-
-/*
- * COPYRIGHT (c) 2010.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <rtems/libio_.h>
-#include <rtems/seterr.h>
-
-int rtems_filesystem_default_utime(
- const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
-)
-{
- rtems_set_errno_and_return_minus_one( ENOTSUP );
-}
diff --git a/cpukit/libfs/src/defaults/default_utimens.c b/cpukit/libfs/src/defaults/default_utimens.c
new file mode 100644
index 0000000000..c2321dc214
--- /dev/null
+++ b/cpukit/libfs/src/defaults/default_utimens.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup LibIOFSOps File System Operations
+ *
+ * @brief RTEMS Default File System sets file access and modification times
+ */
+
+/*
+ * COPYRIGHT (C) 2010, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/libio_.h>
+#include <rtems/seterr.h>
+
+int rtems_filesystem_default_utimens(
+ const rtems_filesystem_location_info_t *loc,
+ struct timespec times[2]
+)
+{
+ rtems_set_errno_and_return_minus_one( ENOTSUP );
+}
diff --git a/cpukit/libfs/src/dosfs/msdos_init.c b/cpukit/libfs/src/dosfs/msdos_init.c
index 67b16b64ed..a96b973cba 100644
--- a/cpukit/libfs/src/dosfs/msdos_init.c
+++ b/cpukit/libfs/src/dosfs/msdos_init.c
@@ -36,18 +36,17 @@ static int msdos_clone_node_info(rtems_filesystem_location_info_t *loc)
return fat_file_reopen(fat_fd);
}
-static int msdos_utime(
+static int msdos_utimens(
const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
+ struct timespec times[2]
)
{
fat_file_fd_t *fat_fd = loc->node_access;
- if (actime != modtime)
+ if (times[0].tv_sec != times[1].tv_sec)
rtems_set_errno_and_return_minus_one( ENOTSUP );
- fat_file_set_mtime(fat_fd, modtime);
+ fat_file_set_mtime(fat_fd, times[1].tv_sec);
return RC_OK;
}
@@ -67,7 +66,7 @@ const rtems_filesystem_operations_table msdos_ops = {
.mount_h = rtems_filesystem_default_mount,
.unmount_h = rtems_filesystem_default_unmount,
.fsunmount_me_h = msdos_shut_down,
- .utime_h = msdos_utime,
+ .utimens_h = msdos_utimens,
.symlink_h = rtems_filesystem_default_symlink,
.readlink_h = rtems_filesystem_default_readlink,
.rename_h = msdos_rename,
diff --git a/cpukit/libfs/src/ftpfs/ftpfs.c b/cpukit/libfs/src/ftpfs/ftpfs.c
index 5e0cb95dd3..06d06bc9cc 100644
--- a/cpukit/libfs/src/ftpfs/ftpfs.c
+++ b/cpukit/libfs/src/ftpfs/ftpfs.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -5,27 +7,29 @@
*/
/*
- * Copyright (c) 2009-2012 embedded brains GmbH.
- *
- * embedded brains GmbH
- * Obere Lagerstr. 30
- * 82178 Puchheim
- * Germany
- * <rtems@embedded-brains.de>
- *
- * (c) Copyright 2002
- * Thomas Doerfler
- * IMD Ingenieurbuero fuer Microcomputertechnik
- * Herbststr. 8
- * 82178 Puchheim, Germany
- * <Thomas.Doerfler@imd-systems.de>
+ * COPYRIGHT (C) 2009-2012 embedded brains GmbH.
+ * COPYRIGHT (C) 2002 IMD Ingenieurbuero fuer Microcomputertechnik.
*
- * This code has been created after closly inspecting "tftpdriver.c" from Eric
- * Norum.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -1384,7 +1388,7 @@ static const rtems_filesystem_operations_table rtems_ftpfs_ops = {
.mount_h = rtems_filesystem_default_mount,
.unmount_h = rtems_filesystem_default_unmount,
.fsunmount_me_h = rtems_ftpfs_unmount_me,
- .utime_h = rtems_filesystem_default_utime,
+ .utimens_h = rtems_filesystem_default_utimens,
.symlink_h = rtems_filesystem_default_symlink,
.readlink_h = rtems_filesystem_default_readlink,
.rename_h = rtems_filesystem_default_rename,
diff --git a/cpukit/libfs/src/ftpfs/tftpDriver.c b/cpukit/libfs/src/ftpfs/tftpDriver.c
index 7cbb402b63..bc0e74ad86 100644
--- a/cpukit/libfs/src/ftpfs/tftpDriver.c
+++ b/cpukit/libfs/src/ftpfs/tftpDriver.c
@@ -1039,7 +1039,7 @@ static const rtems_filesystem_operations_table rtems_tftp_ops = {
.mount_h = rtems_filesystem_default_mount,
.unmount_h = rtems_filesystem_default_unmount,
.fsunmount_me_h = rtems_tftpfs_shutdown,
- .utime_h = rtems_filesystem_default_utime,
+ .utimens_h = rtems_filesystem_default_utimens,
.symlink_h = rtems_filesystem_default_symlink,
.readlink_h = rtems_filesystem_default_readlink,
.rename_h = rtems_filesystem_default_rename,
diff --git a/cpukit/libfs/src/imfs/imfs_init.c b/cpukit/libfs/src/imfs/imfs_init.c
index 1b9b76912a..8685caae68 100644
--- a/cpukit/libfs/src/imfs/imfs_init.c
+++ b/cpukit/libfs/src/imfs/imfs_init.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -7,12 +9,28 @@
*/
/*
- * COPYRIGHT (c) 1989-1999.
- * On-Line Applications Research Corporation (OAR).
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -40,7 +58,7 @@ static const rtems_filesystem_operations_table IMFS_ops = {
.mount_h = IMFS_mount,
.unmount_h = IMFS_unmount,
.fsunmount_me_h = IMFS_fsunmount,
- .utime_h = IMFS_utime,
+ .utimens_h = IMFS_utimens,
.symlink_h = IMFS_symlink,
.readlink_h = IMFS_readlink,
.rename_h = IMFS_rename,
diff --git a/cpukit/libfs/src/imfs/imfs_utime.c b/cpukit/libfs/src/imfs/imfs_utime.c
deleted file mode 100644
index 21e5139ce7..0000000000
--- a/cpukit/libfs/src/imfs/imfs_utime.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * @file
- *
- * @ingroup IMFS
- *
- * @brief Set IMFS File Access and Modification Times
- */
-
-/*
- * COPYRIGHT (c) 1989-1999.
- * On-Line Applications Research Corporation (OAR).
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <rtems/imfs.h>
-
-#include <sys/time.h>
-
-int IMFS_utime(
- const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
-)
-{
- IMFS_jnode_t *the_jnode;
-
- the_jnode = (IMFS_jnode_t *) loc->node_access;
-
- the_jnode->stat_atime = actime;
- the_jnode->stat_mtime = modtime;
- the_jnode->stat_ctime = time( NULL );
-
- return 0;
-}
diff --git a/cpukit/libfs/src/imfs/imfs_utimens.c b/cpukit/libfs/src/imfs/imfs_utimens.c
new file mode 100644
index 0000000000..78cc766ab0
--- /dev/null
+++ b/cpukit/libfs/src/imfs/imfs_utimens.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup IMFS
+ *
+ * @brief Set IMFS File Access and Modification Times
+ */
+
+/*
+ * COPYRIGHT (C) 1989, 2021 On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/imfs.h>
+
+#include <sys/time.h>
+
+int IMFS_utimens(
+ const rtems_filesystem_location_info_t *loc,
+ struct timespec times[2]
+)
+{
+ IMFS_jnode_t *the_jnode;
+
+ the_jnode = (IMFS_jnode_t *) loc->node_access;
+
+ the_jnode->stat_atime = times[0].tv_sec;
+ the_jnode->stat_mtime = times[1].tv_sec;
+ the_jnode->stat_ctime = time( NULL );
+
+ return 0;
+}
diff --git a/cpukit/libfs/src/jffs2/src/fs-rtems.c b/cpukit/libfs/src/jffs2/src/fs-rtems.c
index aae208ccef..8bc3d85cc3 100644
--- a/cpukit/libfs/src/jffs2/src/fs-rtems.c
+++ b/cpukit/libfs/src/jffs2/src/fs-rtems.c
@@ -1118,10 +1118,9 @@ static int rtems_jffs2_statvfs(
return 0;
}
-static int rtems_jffs2_utime(
+static int rtems_jffs2_utimens(
const rtems_filesystem_location_info_t *loc,
- time_t actime,
- time_t modtime
+ struct timespec times[2]
)
{
struct _inode *inode = rtems_jffs2_get_inode_by_location(loc);
@@ -1129,8 +1128,8 @@ static int rtems_jffs2_utime(
int eno;
iattr.ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_CTIME;
- iattr.ia_atime = actime;
- iattr.ia_mtime = modtime;
+ iattr.ia_atime = times[0].tv_sec;
+ iattr.ia_mtime = times[1].tv_sec;
iattr.ia_ctime = get_seconds();
eno = -jffs2_do_setattr(inode, &iattr);
@@ -1186,7 +1185,7 @@ static const rtems_filesystem_operations_table rtems_jffs2_ops = {
.mount_h = rtems_filesystem_default_mount,
.unmount_h = rtems_filesystem_default_unmount,
.fsunmount_me_h = rtems_jffs2_fsunmount,
- .utime_h = rtems_jffs2_utime,
+ .utimens_h = rtems_jffs2_utimens,
.symlink_h = rtems_jffs2_symlink,
.readlink_h = rtems_jffs2_readlink,
.rename_h = rtems_jffs2_rename,
diff --git a/cpukit/libfs/src/rfs/rtems-rfs-rtems.c b/cpukit/libfs/src/rfs/rtems-rfs-rtems.c
index 35db9eeca1..0efab2cca6 100644
--- a/cpukit/libfs/src/rfs/rtems-rfs-rtems.c
+++ b/cpukit/libfs/src/rfs/rtems-rfs-rtems.c
@@ -296,16 +296,14 @@ rtems_rfs_rtems_chown (const rtems_filesystem_location_info_t *pathloc,
* This routine is the implementation of the utime() system call for the
* RFS.
*
- * @param pathloc
- * @param atime
- * @param mtime
+ * @param pathloc The path to the file to be modified
+ * @param times The times to update the file to
* return int
*/
static int
-rtems_rfs_rtems_utime(const rtems_filesystem_location_info_t* pathloc,
- time_t atime,
- time_t mtime)
+rtems_rfs_rtems_utimens(const rtems_filesystem_location_info_t* pathloc,
+ struct timespec times[2])
{
rtems_rfs_file_system* fs = rtems_rfs_rtems_pathloc_dev (pathloc);
rtems_rfs_ino ino = rtems_rfs_rtems_get_pathloc_ino (pathloc);
@@ -318,8 +316,8 @@ rtems_rfs_rtems_utime(const rtems_filesystem_location_info_t* pathloc,
return rtems_rfs_rtems_error ("utime: read inode", rc);
}
- rtems_rfs_inode_set_atime (&inode, atime);
- rtems_rfs_inode_set_mtime (&inode, mtime);
+ rtems_rfs_inode_set_atime (&inode, times[0].tv_sec);
+ rtems_rfs_inode_set_mtime (&inode, times[1].tv_sec);
rc = rtems_rfs_inode_close (fs, &inode);
if (rc)
@@ -735,7 +733,7 @@ const rtems_filesystem_operations_table rtems_rfs_ops =
.mount_h = rtems_filesystem_default_mount,
.unmount_h = rtems_filesystem_default_unmount,
.fsunmount_me_h = rtems_rfs_rtems_shutdown,
- .utime_h = rtems_rfs_rtems_utime,
+ .utimens_h = rtems_rfs_rtems_utimens,
.symlink_h = rtems_rfs_rtems_symlink,
.readlink_h = rtems_rfs_rtems_readlink,
.rename_h = rtems_rfs_rtems_rename,
diff --git a/cpukit/libmisc/monitor/mon-editor.c b/cpukit/libmisc/monitor/mon-editor.c
index dcea9fcc69..6957fee9c8 100644
--- a/cpukit/libmisc/monitor/mon-editor.c
+++ b/cpukit/libmisc/monitor/mon-editor.c
@@ -360,7 +360,17 @@ rtems_monitor_line_editor (
{
int bs;
pos--;
- strcpy (buffer + pos, buffer + pos + 1);
+
+ /*
+ * Memory operation used here instead of string
+ * method due the src and dest of buffer overlapping.
+ */
+ memmove(
+ buffer + pos,
+ buffer + pos + 1,
+ RTEMS_COMMAND_BUFFER_SIZE - pos - 1
+ );
+ buffer[RTEMS_COMMAND_BUFFER_SIZE - 1] = '\0';
fprintf(stdout,"\b%s \b", buffer + pos);
for (bs = 0; bs < ((int) strlen (buffer) - pos); bs++)
putchar ('\b');
diff --git a/cpukit/libmisc/shell/main_cp.c b/cpukit/libmisc/shell/main_cp.c
index 913ece184f..bbadb514b4 100644
--- a/cpukit/libmisc/shell/main_cp.c
+++ b/cpukit/libmisc/shell/main_cp.c
@@ -255,8 +255,14 @@ main_cp(rtems_shell_cp_globals* cp_globals, int argc, char *argv[])
*/
if (r == -1) {
if (Rflag && (Lflag || Hflag))
+ #ifdef __rtems__
+ (void)
+ #endif
stat(*argv, &tmp_stat);
else
+ #ifdef __rtems__
+ (void)
+ #endif
lstat(*argv, &tmp_stat);
if (S_ISDIR(tmp_stat.st_mode) && Rflag)
diff --git a/cpukit/libmisc/shell/main_edit.c b/cpukit/libmisc/shell/main_edit.c
index 8ac7eeea5b..ed1371f7fa 100644
--- a/cpukit/libmisc/shell/main_edit.c
+++ b/cpukit/libmisc/shell/main_edit.c
@@ -684,7 +684,7 @@ static void moveto(struct editor *ed, int pos, int center) {
// Text selection
//
-static int get_selection(struct editor *ed, int *start, int *end) {
+static int get_selection(struct editor *ed, size_t *start, size_t *end) {
if (ed->anchor == -1) {
*start = *end = -1;
return 0;
@@ -705,7 +705,7 @@ static int get_selection(struct editor *ed, int *start, int *end) {
}
static int get_selected_text(struct editor *ed, char *buffer, int size) {
- int selstart, selend, len;
+ size_t selstart, selend, len;
if (!get_selection(ed, &selstart, &selend)) return 0;
len = selend - selstart;
@@ -726,7 +726,7 @@ static void update_selection(struct editor *ed, int select) {
}
static int erase_selection(struct editor *ed) {
- int selstart, selend;
+ size_t selstart, selend;
if (!get_selection(ed, &selstart, &selend)) return 0;
moveto(ed, selstart, 0);
@@ -1086,7 +1086,7 @@ static void display_line(struct editor *ed, int pos, int fullline) {
int maxcol = ed->env->cols + margin;
unsigned char *bufptr = ed->env->linebuf;
unsigned char *p = text_ptr(ed, pos);
- int selstart, selend, ch;
+ size_t selstart, selend, ch;
char *s;
(void) get_selection(ed, &selstart, &selend);
@@ -1545,9 +1545,9 @@ static void del(struct editor *ed) {
}
static void indent(struct editor *ed, unsigned char *indentation) {
- int start, end, i, lines, toplines, newline, ch;
+ size_t start, end, i, lines, toplines, newline, ch;
unsigned char *buffer, *p;
- int buflen;
+ size_t buflen;
int width = strlen((const char*) indentation);
int pos = ed->linepos + ed->col;
@@ -1602,7 +1602,7 @@ static void indent(struct editor *ed, unsigned char *indentation) {
}
static void unindent(struct editor *ed, unsigned char *indentation) {
- int start, end, i, newline, ch, shrinkage, topofs;
+ size_t start, end, i, newline, ch, shrinkage, topofs;
unsigned char *buffer, *p;
int width = strlen((const char*) indentation);
int pos = ed->linepos + ed->col;
@@ -1686,7 +1686,7 @@ static void redo(struct editor *ed) {
//
static void copy_selection(struct editor *ed) {
- int selstart, selend;
+ size_t selstart, selend;
if (!get_selection(ed, &selstart, &selend)) return;
ed->env->clipsize = selend - selstart;
diff --git a/cpukit/libmisc/shell/main_help.c b/cpukit/libmisc/shell/main_help.c
index 9f59e9df4b..564bc30a9c 100644
--- a/cpukit/libmisc/shell/main_help.c
+++ b/cpukit/libmisc/shell/main_help.c
@@ -148,7 +148,7 @@ static int rtems_shell_help(
line+= rtems_shell_help_cmd(shell_cmd);
if (lines && (line > lines)) {
printf("Press any key to continue...");
- getchar();
+ (void) getchar();
printf("\n");
line = 0;
}
diff --git a/cpukit/libmisc/uuid/gen_uuid.c b/cpukit/libmisc/uuid/gen_uuid.c
index 3ca75a08ce..71b8a569bb 100644
--- a/cpukit/libmisc/uuid/gen_uuid.c
+++ b/cpukit/libmisc/uuid/gen_uuid.c
@@ -165,6 +165,9 @@ static int get_random_fd(void)
if (fd >= 0) {
i = fcntl(fd, F_GETFD);
if (i >= 0)
+ #ifdef __rtems__
+ (void)
+ #endif
fcntl(fd, F_SETFD, i | FD_CLOEXEC);
}
#endif
@@ -426,6 +429,9 @@ try_again:
}
rewind(state_f);
fl.l_type = F_UNLCK;
+ #ifdef __rtems__
+ (void)
+ #endif
fcntl(state_fd, F_SETLK, &fl);
}
diff --git a/cpukit/libpci/pci_cfg_print_code.c b/cpukit/libpci/pci_cfg_print_code.c
index e758fa661a..e0979db74a 100644
--- a/cpukit/libpci/pci_cfg_print_code.c
+++ b/cpukit/libpci/pci_cfg_print_code.c
@@ -65,8 +65,8 @@ static void pci_cfg_print_device(struct pci_dev *dev, char *prefix)
char name[32];
char buf[8];
printf("%s.resources = {\n", prefix);
- strcpy(buf, prefix);
- strcat(buf, "\t");
+ strlcpy(buf, prefix, sizeof(buf));
+ strlcat(buf, "\t", sizeof(buf));
pci_cfg_print_resources(dev->resources, buf);
printf("%s},\n", prefix);
if (dev->next == NULL) {
diff --git a/cpukit/posix/src/cancel.c b/cpukit/posix/src/cancel.c
index 4756f10389..aa4a434037 100644
--- a/cpukit/posix/src/cancel.c
+++ b/cpukit/posix/src/cancel.c
@@ -38,31 +38,26 @@ int pthread_cancel( pthread_t thread )
Thread_Control *executing;
Per_CPU_Control *cpu_self;
- /*
- * Don't even think about deleting a resource from an ISR.
- */
-
- if ( _ISR_Is_in_progress() ) {
- return EPROTO;
- }
-
the_thread = _Thread_Get( thread, &lock_context );
if ( the_thread == NULL ) {
return ESRCH;
}
- cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
- _ISR_lock_ISR_enable( &lock_context );
-
+ cpu_self = _Per_CPU_Get();
executing = _Per_CPU_Get_executing( cpu_self );
- if ( the_thread == executing ) {
- _Thread_Exit( executing, THREAD_LIFE_TERMINATING, PTHREAD_CANCELED );
+ if (
+ the_thread == executing &&
+ !_Per_CPU_Is_ISR_in_progress( cpu_self )
+ ) {
+ _ISR_lock_ISR_enable( &lock_context );
+ _Thread_Exit( PTHREAD_CANCELED, THREAD_LIFE_TERMINATING );
} else {
+ _Thread_Dispatch_disable_with_CPU( cpu_self, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
_Thread_Cancel( the_thread, executing, PTHREAD_CANCELED );
+ _Thread_Dispatch_enable( cpu_self );
}
-
- _Thread_Dispatch_enable( cpu_self );
return 0;
}
diff --git a/cpukit/posix/src/clocknanosleep.c b/cpukit/posix/src/clocknanosleep.c
new file mode 100644
index 0000000000..bfb78466df
--- /dev/null
+++ b/cpukit/posix/src/clocknanosleep.c
@@ -0,0 +1,128 @@
+/**
+ * @file
+ *
+ * @ingroup POSIXAPI
+ *
+ * @brief Suspends Execution of calling thread until Time elapses
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2015.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2016. Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <time.h>
+
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
+#include <rtems/score/timespec.h>
+#include <rtems/score/timecounter.h>
+#include <rtems/score/watchdogimpl.h>
+#include <rtems/posix/posixapi.h>
+
+static Thread_queue_Control _Nanosleep_Pseudo_queue =
+ THREAD_QUEUE_INITIALIZER( "Nanosleep" );
+
+/*
+ * High Resolution Sleep with Specifiable Clock, IEEE Std 1003.1, 2001
+ */
+int clock_nanosleep(
+ clockid_t clock_id,
+ int flags,
+ const struct timespec *rqtp,
+ struct timespec *rmtp
+)
+{
+ Thread_queue_Context queue_context;
+ bool absolute;
+ Thread_Control *executing;
+ int eno;
+
+ if ( clock_id != CLOCK_REALTIME && clock_id != CLOCK_MONOTONIC ) {
+ return ENOTSUP;
+ }
+
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_set_thread_state(
+ &queue_context,
+ STATES_WAITING_FOR_TIME | STATES_INTERRUPTIBLE_BY_SIGNAL
+ );
+
+ if ( ( flags & TIMER_ABSTIME ) != 0 ) {
+ absolute = true;
+ rmtp = NULL;
+ } else {
+ absolute = false;
+ }
+
+ if ( clock_id == CLOCK_REALTIME ) {
+ _Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
+ &queue_context,
+ rqtp,
+ absolute
+ );
+ } else {
+ _Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
+ &queue_context,
+ rqtp,
+ absolute
+ );
+ }
+
+ _Thread_queue_Acquire( &_Nanosleep_Pseudo_queue, &queue_context );
+ executing = _Thread_Executing;
+ _Thread_queue_Enqueue(
+ &_Nanosleep_Pseudo_queue.Queue,
+ &_Thread_queue_Operations_FIFO,
+ executing,
+ &queue_context
+ );
+ eno = _POSIX_Get_error_after_wait( executing );
+
+ if ( eno == ETIMEDOUT ) {
+ eno = 0;
+ }
+
+ if ( rmtp != NULL ) {
+#if defined( RTEMS_POSIX_API )
+ if ( eno == EINTR ) {
+ struct timespec actual_end;
+ struct timespec planned_end;
+
+ if ( clock_id == CLOCK_REALTIME ) {
+ _Timecounter_Nanotime( &actual_end );
+ } else {
+ _Timecounter_Nanouptime( &actual_end );
+ }
+
+ _Watchdog_Ticks_to_timespec(
+ executing->Timer.Watchdog.expire,
+ &planned_end
+ );
+
+ if ( _Timespec_Less_than( &actual_end, &planned_end ) ) {
+ _Timespec_Subtract( &actual_end, &planned_end, rmtp );
+ } else {
+ _Timespec_Set_to_zero( rmtp );
+ }
+ } else {
+ _Timespec_Set_to_zero( rmtp );
+ }
+#else
+ _Assert( eno != EINTR );
+ _Timespec_Set_to_zero( rmtp );
+#endif
+ }
+
+ return eno;
+}
diff --git a/cpukit/posix/src/condwaitsupp.c b/cpukit/posix/src/condwaitsupp.c
index 296c03d1c6..ee2f8a0787 100644
--- a/cpukit/posix/src/condwaitsupp.c
+++ b/cpukit/posix/src/condwaitsupp.c
@@ -109,7 +109,7 @@ int _POSIX_Condition_variables_Wait_support(
_Thread_queue_Context_initialize( &queue_context );
if ( abstime != NULL ) {
- _Thread_queue_Context_set_timeout_argument( &queue_context, abstime );
+ _Thread_queue_Context_set_timeout_argument( &queue_context, abstime, true );
if ( _POSIX_Condition_variables_Get_clock( flags ) == CLOCK_MONOTONIC ) {
_Thread_queue_Context_set_enqueue_callout(
diff --git a/cpukit/posix/src/mqueuerecvsupp.c b/cpukit/posix/src/mqueuerecvsupp.c
index 2adcb7b3b2..9c26bf100b 100644
--- a/cpukit/posix/src/mqueuerecvsupp.c
+++ b/cpukit/posix/src/mqueuerecvsupp.c
@@ -69,7 +69,7 @@ ssize_t _POSIX_Message_queue_Receive_support(
}
_Thread_queue_Context_set_enqueue_callout( &queue_context, enqueue_callout );
- _Thread_queue_Context_set_timeout_argument( &queue_context, abstime );
+ _Thread_queue_Context_set_timeout_argument( &queue_context, abstime, true );
/*
* Now if something goes wrong, we return a "length" of -1
diff --git a/cpukit/posix/src/mqueuesendsupp.c b/cpukit/posix/src/mqueuesendsupp.c
index 7be23fc595..328a01ece4 100644
--- a/cpukit/posix/src/mqueuesendsupp.c
+++ b/cpukit/posix/src/mqueuesendsupp.c
@@ -70,7 +70,7 @@ int _POSIX_Message_queue_Send_support(
}
_Thread_queue_Context_set_enqueue_callout( &queue_context, enqueue_callout );
- _Thread_queue_Context_set_timeout_argument( &queue_context, abstime );
+ _Thread_queue_Context_set_timeout_argument( &queue_context, abstime, true );
_CORE_message_queue_Acquire_critical(
&the_mq->Message_queue,
diff --git a/cpukit/posix/src/mutexlocksupp.c b/cpukit/posix/src/mutexlocksupp.c
index e5bd1784ea..983ee578ad 100644
--- a/cpukit/posix/src/mutexlocksupp.c
+++ b/cpukit/posix/src/mutexlocksupp.c
@@ -69,7 +69,7 @@ int _POSIX_Mutex_Lock_support(
executing = _POSIX_Mutex_Acquire( the_mutex, &queue_context );
_Thread_queue_Context_set_enqueue_callout( &queue_context, enqueue_callout);
- _Thread_queue_Context_set_timeout_argument( &queue_context, abstime );
+ _Thread_queue_Context_set_timeout_argument( &queue_context, abstime, true );
switch ( _POSIX_Mutex_Get_protocol( flags ) ) {
case POSIX_MUTEX_PRIORITY_CEILING:
diff --git a/cpukit/posix/src/nanosleep.c b/cpukit/posix/src/nanosleep.c
index 8d1a4b84e4..167dcbc787 100644
--- a/cpukit/posix/src/nanosleep.c
+++ b/cpukit/posix/src/nanosleep.c
@@ -23,17 +23,8 @@
#include <time.h>
-#include <rtems/score/threadimpl.h>
-#include <rtems/score/threadqimpl.h>
-#include <rtems/score/timespec.h>
-#include <rtems/score/timecounter.h>
-#include <rtems/score/watchdogimpl.h>
-#include <rtems/posix/posixapi.h>
#include <rtems/seterr.h>
-static Thread_queue_Control _Nanosleep_Pseudo_queue =
- THREAD_QUEUE_INITIALIZER( "Nanosleep" );
-
/*
* 14.2.5 High Resolution Sleep, P1003.1b-1993, p. 269
*/
@@ -52,85 +43,3 @@ int nanosleep(
return eno;
}
-
-/*
- * High Resolution Sleep with Specifiable Clock, IEEE Std 1003.1, 2001
- */
-int clock_nanosleep(
- clockid_t clock_id,
- int flags,
- const struct timespec *rqtp,
- struct timespec *rmtp
-)
-{
- Thread_queue_Context queue_context;
- struct timespec uptime;
- const struct timespec *end;
- Thread_Control *executing;
- int eno;
-
- if ( clock_id != CLOCK_REALTIME && clock_id != CLOCK_MONOTONIC ) {
- return ENOTSUP;
- }
-
- _Thread_queue_Context_initialize( &queue_context );
- _Thread_queue_Context_set_thread_state(
- &queue_context,
- STATES_WAITING_FOR_TIME | STATES_INTERRUPTIBLE_BY_SIGNAL
- );
-
- if ( ( flags & TIMER_ABSTIME ) != 0 ) {
- end = rqtp;
-
- if ( clock_id == CLOCK_REALTIME ) {
- _Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
- &queue_context,
- end
- );
- } else {
- _Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
- &queue_context,
- end
- );
- }
- } else {
- _Timecounter_Nanouptime( &uptime );
- end = _Watchdog_Future_timespec( &uptime, rqtp );
- _Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
- &queue_context,
- end
- );
- }
-
- _Thread_queue_Acquire( &_Nanosleep_Pseudo_queue, &queue_context );
- executing = _Thread_Executing;
- _Thread_queue_Enqueue(
- &_Nanosleep_Pseudo_queue.Queue,
- &_Thread_queue_Operations_FIFO,
- executing,
- &queue_context
- );
- eno = _POSIX_Get_error_after_wait( executing );
-
- if ( eno == ETIMEDOUT ) {
- eno = 0;
- }
-
- if ( rmtp != NULL && ( flags & TIMER_ABSTIME ) == 0 ) {
- if ( eno == EINTR ) {
- struct timespec actual_end;
-
- _Timecounter_Nanouptime( &actual_end );
-
- if ( _Timespec_Less_than( &actual_end, end ) ) {
- _Timespec_Subtract( &actual_end, end, rmtp );
- } else {
- _Timespec_Set_to_zero( rmtp );
- }
- } else {
- _Timespec_Set_to_zero( rmtp );
- }
- }
-
- return eno;
-}
diff --git a/cpukit/posix/src/prwlocktimedrdlock.c b/cpukit/posix/src/prwlocktimedrdlock.c
index 79059800bf..809f355359 100644
--- a/cpukit/posix/src/prwlocktimedrdlock.c
+++ b/cpukit/posix/src/prwlocktimedrdlock.c
@@ -37,7 +37,8 @@ int pthread_rwlock_timedrdlock(
_Thread_queue_Context_initialize( &queue_context );
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
&queue_context,
- abstime
+ abstime,
+ true
);
status = _CORE_RWLock_Seize_for_reading(
&the_rwlock->RWLock,
diff --git a/cpukit/posix/src/prwlocktimedwrlock.c b/cpukit/posix/src/prwlocktimedwrlock.c
index 9fb9a880a0..614d230ba9 100644
--- a/cpukit/posix/src/prwlocktimedwrlock.c
+++ b/cpukit/posix/src/prwlocktimedwrlock.c
@@ -39,7 +39,8 @@ int pthread_rwlock_timedwrlock(
_Thread_queue_Context_initialize( &queue_context );
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
&queue_context,
- abstime
+ abstime,
+ true
);
status = _CORE_RWLock_Seize_for_writing(
&the_rwlock->RWLock,
diff --git a/cpukit/posix/src/pthreadcreate.c b/cpukit/posix/src/pthreadcreate.c
index 055d304699..9474d07032 100644
--- a/cpukit/posix/src/pthreadcreate.c
+++ b/cpukit/posix/src/pthreadcreate.c
@@ -221,7 +221,7 @@ int pthread_create(
config.stack_free = _Stack_Free;
config.stack_area = _Stack_Allocate( config.stack_size );
} else {
- config.stack_free = _Stack_Free_nothing;
+ config.stack_free = _Objects_Free_nothing;
}
if ( config.stack_area == NULL ) {
diff --git a/cpukit/posix/src/pthreadexit.c b/cpukit/posix/src/pthreadexit.c
index 657497010b..d5b53bb45f 100644
--- a/cpukit/posix/src/pthreadexit.c
+++ b/cpukit/posix/src/pthreadexit.c
@@ -27,14 +27,5 @@
void pthread_exit( void *value_ptr )
{
- Thread_Control *executing;
- Per_CPU_Control *cpu_self;
-
- cpu_self = _Thread_Dispatch_disable();
- executing = _Per_CPU_Get_executing( cpu_self );
-
- _Thread_Exit( executing, THREAD_LIFE_TERMINATING, value_ptr );
-
- _Thread_Dispatch_direct_no_return( cpu_self );
- RTEMS_UNREACHABLE();
+ _Thread_Exit( value_ptr, THREAD_LIFE_TERMINATING );
}
diff --git a/cpukit/posix/src/semtimedwait.c b/cpukit/posix/src/semtimedwait.c
index 21a8320b50..ae83e90540 100644
--- a/cpukit/posix/src/semtimedwait.c
+++ b/cpukit/posix/src/semtimedwait.c
@@ -60,7 +60,8 @@ int sem_timedwait(
);
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
&queue_context,
- abstime
+ abstime,
+ true
);
_Thread_queue_Context_set_ISR_level( &queue_context, level );
_Thread_queue_Enqueue(
diff --git a/cpukit/posix/src/sigtimedwait.c b/cpukit/posix/src/sigtimedwait.c
index 4e2b6c2658..0bdb65fd45 100644
--- a/cpukit/posix/src/sigtimedwait.c
+++ b/cpukit/posix/src/sigtimedwait.c
@@ -76,7 +76,6 @@ int sigtimedwait(
siginfo_t signal_information;
siginfo_t *the_info;
int signo;
- struct timespec uptime;
Thread_queue_Context queue_context;
int error;
@@ -93,13 +92,10 @@ int sigtimedwait(
*/
if ( timeout != NULL ) {
- const struct timespec *end;
-
- _Timecounter_Nanouptime( &uptime );
- end = _Watchdog_Future_timespec( &uptime, timeout );
_Thread_queue_Context_set_enqueue_timeout_monotonic_timespec(
&queue_context,
- end
+ timeout,
+ false
);
} else {
_Thread_queue_Context_set_enqueue_do_nothing_extra( &queue_context );
diff --git a/cpukit/posix/src/sysconf.c b/cpukit/posix/src/sysconf.c
index 1696ec51bb..439a27e49f 100644
--- a/cpukit/posix/src/sysconf.c
+++ b/cpukit/posix/src/sysconf.c
@@ -54,10 +54,6 @@ long sysconf(
return (long) rtems_scheduler_get_processor_maximum();
case _SC_POSIX_26_VERSION:
return (long) _POSIX_26_VERSION;
-#if defined(__sparc__)
- case 515: /* Solaris _SC_STACK_PROT */
- return 0;
-#endif
default:
rtems_set_errno_and_return_minus_one( EINVAL );
}
diff --git a/cpukit/rtems/src/clockset.c b/cpukit/rtems/src/clockset.c
index 7a085ada69..07384290b8 100644
--- a/cpukit/rtems/src/clockset.c
+++ b/cpukit/rtems/src/clockset.c
@@ -29,26 +29,25 @@ rtems_status_code rtems_clock_set(
const rtems_time_of_day *tod
)
{
- Status_Control status;
+ rtems_status_code status;
+ Status_Control score_status;
+ struct timespec tod_as_timespec;
+ ISR_lock_Context lock_context;
- if ( !tod )
- return RTEMS_INVALID_ADDRESS;
+ status = _TOD_Validate( tod, TOD_ENABLE_TICKS_VALIDATION );
- if ( _TOD_Validate( tod ) ) {
- struct timespec tod_as_timespec;
- ISR_lock_Context lock_context;
-
- tod_as_timespec.tv_sec = _TOD_To_seconds( tod );
- tod_as_timespec.tv_nsec = tod->ticks
- * rtems_configuration_get_nanoseconds_per_tick();
+ if ( status != RTEMS_SUCCESSFUL ) {
+ return status;
+ }
- _TOD_Lock();
- _TOD_Acquire( &lock_context );
- status = _TOD_Set( &tod_as_timespec, &lock_context );
- _TOD_Unlock();
+ tod_as_timespec.tv_sec = _TOD_To_seconds( tod );
+ tod_as_timespec.tv_nsec = tod->ticks
+ * rtems_configuration_get_nanoseconds_per_tick();
- return _Status_Get( status );
- }
+ _TOD_Lock();
+ _TOD_Acquire( &lock_context );
+ score_status = _TOD_Set( &tod_as_timespec, &lock_context );
+ _TOD_Unlock();
- return RTEMS_INVALID_CLOCK;
+ return _Status_Get( score_status );
}
diff --git a/cpukit/rtems/src/clocktodvalidate.c b/cpukit/rtems/src/clocktodvalidate.c
index 2685bfd6e7..14b3f79d8e 100644
--- a/cpukit/rtems/src/clocktodvalidate.c
+++ b/cpukit/rtems/src/clocktodvalidate.c
@@ -35,17 +35,23 @@ const uint32_t _TOD_Days_per_month[ 2 ][ 13 ] = {
{ 0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }
};
-bool _TOD_Validate(
- const rtems_time_of_day *the_tod
+rtems_status_code _TOD_Validate(
+ const rtems_time_of_day *the_tod,
+ TOD_Ticks_validation ticks_validation
)
{
- uint32_t days_in_month;
- uint32_t ticks_per_second;
+ uint32_t days_in_month;
+ uint32_t ticks_per_second;
+ uint32_t ticks_mask;
- ticks_per_second = TOD_MICROSECONDS_PER_SECOND /
- rtems_configuration_get_microseconds_per_tick();
- if ((!the_tod) ||
- (the_tod->ticks >= ticks_per_second) ||
+ if ( the_tod == NULL ) {
+ return RTEMS_INVALID_ADDRESS;
+ }
+
+ ticks_per_second = rtems_clock_get_ticks_per_second();
+ ticks_mask = (uint32_t) ticks_validation;
+
+ if ( ( ( the_tod->ticks & ticks_mask ) >= ticks_per_second ) ||
(the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
(the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
@@ -53,8 +59,9 @@ bool _TOD_Validate(
(the_tod->month > TOD_MONTHS_PER_YEAR) ||
(the_tod->year < TOD_BASE_YEAR) ||
(the_tod->year > TOD_LATEST_YEAR) ||
- (the_tod->day == 0) )
- return false;
+ (the_tod->day == 0) ) {
+ return RTEMS_INVALID_CLOCK;
+ }
if (((the_tod->year % 4) == 0 && (the_tod->year % 100 != 0)) ||
(the_tod->year % 400 == 0))
@@ -62,8 +69,9 @@ bool _TOD_Validate(
else
days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
- if ( the_tod->day > days_in_month )
- return false;
+ if ( the_tod->day > days_in_month ) {
+ return RTEMS_INVALID_CLOCK;
+ }
- return true;
+ return RTEMS_SUCCESSFUL;
}
diff --git a/cpukit/rtems/src/msgqconstruct.c b/cpukit/rtems/src/msgqconstruct.c
index 6af57454cc..63ec75edff 100644
--- a/cpukit/rtems/src/msgqconstruct.c
+++ b/cpukit/rtems/src/msgqconstruct.c
@@ -41,7 +41,12 @@ static void *_Message_queue_Get_buffers(
return NULL;
}
- the_message_queue->free_message_buffers = config->storage_free;
+ if ( config->storage_free != NULL ) {
+ the_message_queue->free_message_buffers = config->storage_free;
+ } else {
+ the_message_queue->free_message_buffers = _Objects_Free_nothing;
+ }
+
return config->storage_area;
}
diff --git a/cpukit/rtems/src/taskconstruct.c b/cpukit/rtems/src/taskconstruct.c
index e267db2fc5..6e03440aed 100644
--- a/cpukit/rtems/src/taskconstruct.c
+++ b/cpukit/rtems/src/taskconstruct.c
@@ -92,7 +92,7 @@ static rtems_status_code _RTEMS_tasks_Prepare_user_stack(
if ( config->storage_free != NULL ) {
thread_config->stack_free = config->storage_free;
} else {
- thread_config->stack_free = _Stack_Free_nothing;
+ thread_config->stack_free = _Objects_Free_nothing;
}
return RTEMS_SUCCESSFUL;
diff --git a/cpukit/rtems/src/taskdelete.c b/cpukit/rtems/src/taskdelete.c
index 852cf3b4c1..05321934ff 100644
--- a/cpukit/rtems/src/taskdelete.c
+++ b/cpukit/rtems/src/taskdelete.c
@@ -29,6 +29,7 @@ rtems_status_code rtems_task_delete(
{
Thread_Control *the_thread;
Thread_Close_context context;
+ Per_CPU_Control *cpu_self;
Thread_Control *executing;
_Thread_queue_Context_initialize( &context.Base );
@@ -44,24 +45,23 @@ rtems_status_code rtems_task_delete(
return RTEMS_INVALID_ID;
}
- executing = _Thread_Executing;
+ cpu_self = _Per_CPU_Get();
- if ( the_thread == executing ) {
- Per_CPU_Control *cpu_self;
+ if ( _Per_CPU_Is_ISR_in_progress( cpu_self ) ) {
+ _ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
+ return RTEMS_CALLED_FROM_ISR;
+ }
- cpu_self = _Thread_queue_Dispatch_disable( &context.Base );
+ executing = _Per_CPU_Get_executing( cpu_self );
+
+ if ( the_thread == executing ) {
_ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
/*
* The Classic tasks are neither detached nor joinable. In case of
* self deletion, they are detached, otherwise joinable by default.
*/
- _Thread_Exit(
- executing,
- THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED,
- NULL
- );
- _Thread_Dispatch_enable( cpu_self );
+ _Thread_Exit( NULL, THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED );
} else {
_Thread_Close( the_thread, executing, &context );
}
diff --git a/cpukit/rtems/src/taskexit.c b/cpukit/rtems/src/taskexit.c
index 4c8420d255..178e668581 100644
--- a/cpukit/rtems/src/taskexit.c
+++ b/cpukit/rtems/src/taskexit.c
@@ -30,18 +30,5 @@
void rtems_task_exit( void )
{
- Thread_Control *executing;
- Per_CPU_Control *cpu_self;
-
- cpu_self = _Thread_Dispatch_disable();
- executing = _Per_CPU_Get_executing( cpu_self );
-
- _Thread_Exit(
- executing,
- THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED,
- NULL
- );
-
- _Thread_Dispatch_direct_no_return( cpu_self );
- RTEMS_UNREACHABLE();
+ _Thread_Exit( NULL, THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED );
}
diff --git a/cpukit/rtems/src/taskrestart.c b/cpukit/rtems/src/taskrestart.c
index 6a56ff571b..6bf7358384 100644
--- a/cpukit/rtems/src/taskrestart.c
+++ b/cpukit/rtems/src/taskrestart.c
@@ -21,6 +21,7 @@
#endif
#include <rtems/rtems/tasks.h>
+#include <rtems/rtems/statusimpl.h>
#include <rtems/score/threadimpl.h>
rtems_status_code rtems_task_restart(
@@ -31,7 +32,7 @@ rtems_status_code rtems_task_restart(
Thread_Control *the_thread;
ISR_lock_Context lock_context;
Thread_Entry_information entry;
- bool ok;
+ Status_Control status;
the_thread = _Thread_Get( id, &lock_context );
@@ -47,13 +48,7 @@ rtems_status_code rtems_task_restart(
entry = the_thread->Start.Entry;
entry.Kinds.Numeric.argument = argument;
+ status = _Thread_Restart( the_thread, &entry, &lock_context );
- if ( the_thread == _Thread_Executing ) {
- _Thread_Restart_self( the_thread, &entry, &lock_context );
- RTEMS_UNREACHABLE();
- }
-
- ok = _Thread_Restart_other( the_thread, &entry, &lock_context );
-
- return ok ? RTEMS_SUCCESSFUL : RTEMS_INCORRECT_STATE;
+ return _Status_Get( status );
}
diff --git a/cpukit/rtems/src/taskstart.c b/cpukit/rtems/src/taskstart.c
index eca9b5795d..57dbfc83b9 100644
--- a/cpukit/rtems/src/taskstart.c
+++ b/cpukit/rtems/src/taskstart.c
@@ -43,6 +43,10 @@ rtems_status_code rtems_task_start(
ISR_lock_Context lock_context;
Status_Control status;
+ if ( entry_point == NULL ) {
+ return RTEMS_INVALID_ADDRESS;
+ }
+
the_thread = _Thread_Get( id, &lock_context );
if ( the_thread == NULL ) {
diff --git a/cpukit/rtems/src/taskwakewhen.c b/cpukit/rtems/src/taskwakewhen.c
index 5f6a5795fc..4dfa6dfef2 100644
--- a/cpukit/rtems/src/taskwakewhen.c
+++ b/cpukit/rtems/src/taskwakewhen.c
@@ -27,23 +27,22 @@
#include <rtems/score/watchdogimpl.h>
rtems_status_code rtems_task_wake_when(
- rtems_time_of_day *time_buffer
+ const rtems_time_of_day *time_buffer
)
{
- uint32_t seconds;
- Thread_Control *executing;
- Per_CPU_Control *cpu_self;
+ uint32_t seconds;
+ Thread_Control *executing;
+ Per_CPU_Control *cpu_self;
+ rtems_status_code status;
if ( !_TOD_Is_set() )
return RTEMS_NOT_DEFINED;
- if ( !time_buffer )
- return RTEMS_INVALID_ADDRESS;
+ status = _TOD_Validate( time_buffer, TOD_DISABLE_TICKS_VALIDATION );
- time_buffer->ticks = 0;
-
- if ( !_TOD_Validate( time_buffer ) )
- return RTEMS_INVALID_CLOCK;
+ if ( status != RTEMS_SUCCESSFUL ) {
+ return status;
+ }
seconds = _TOD_To_seconds( time_buffer );
diff --git a/cpukit/rtems/src/timercreate.c b/cpukit/rtems/src/timercreate.c
index a3ece5cc4d..59fa353b22 100644
--- a/cpukit/rtems/src/timercreate.c
+++ b/cpukit/rtems/src/timercreate.c
@@ -132,7 +132,8 @@ rtems_status_code _Timer_Fire_when(
Watchdog_Service_routine_entry adaptor
)
{
- rtems_interval seconds;
+ rtems_status_code status;
+ rtems_interval seconds;
if ( !_TOD_Is_set() )
return RTEMS_NOT_DEFINED;
@@ -140,8 +141,11 @@ rtems_status_code _Timer_Fire_when(
if ( !routine )
return RTEMS_INVALID_ADDRESS;
- if ( !_TOD_Validate( wall_time ) )
- return RTEMS_INVALID_CLOCK;
+ status = _TOD_Validate( wall_time, TOD_ENABLE_TICKS_VALIDATION );
+
+ if ( status != RTEMS_SUCCESSFUL ) {
+ return status;
+ }
seconds = _TOD_To_seconds( wall_time );
if ( seconds <= _TOD_Seconds_since_epoch() )
diff --git a/cpukit/sapi/src/exinit.c b/cpukit/sapi/src/exinit.c
index 9015a369a2..c98ed08f10 100644
--- a/cpukit/sapi/src/exinit.c
+++ b/cpukit/sapi/src/exinit.c
@@ -32,7 +32,6 @@
#include <rtems/score/heap.h>
#include <rtems/score/interr.h>
#include <rtems/score/isr.h>
-#include <rtems/score/percpudata.h>
#include <rtems/score/priority.h>
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/smpimpl.h>
@@ -58,18 +57,6 @@ _Objects_Information_table[ OBJECTS_APIS_LAST + 1 ] = {
&_POSIX_Objects[ 0 ]
};
-RTEMS_LINKER_RWSET(
- _Per_CPU_Data,
-#if defined(RTEMS_SMP)
- /*
- * In SMP configurations, prevent false cache line sharing of per-processor
- * data with a proper alignment.
- */
- RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
-#endif
- char
-);
-
static void rtems_initialize_data_structures(void)
{
/*
diff --git a/cpukit/score/cpu/aarch64/aarch64-context-validate.S b/cpukit/score/cpu/aarch64/aarch64-context-validate.S
index 57f634934b..1e71bc5b3a 100644
--- a/cpukit/score/cpu/aarch64/aarch64-context-validate.S
+++ b/cpukit/score/cpu/aarch64/aarch64-context-validate.S
@@ -42,7 +42,9 @@
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
+#include <rtems/score/basedefs.h>
+/* These must be 8 byte aligned to avoid misaligned accesses */
#define FRAME_OFFSET_X4 0x00
#define FRAME_OFFSET_X5 0x08
#define FRAME_OFFSET_X6 0x10
@@ -54,18 +56,23 @@
#define FRAME_OFFSET_LR 0x40
#ifdef AARCH64_MULTILIB_VFP
- #define FRAME_OFFSET_V8 0x48
- #define FRAME_OFFSET_V9 0x58
- #define FRAME_OFFSET_V10 0x68
- #define FRAME_OFFSET_V11 0x78
- #define FRAME_OFFSET_V12 0x88
- #define FRAME_OFFSET_V13 0x98
- #define FRAME_OFFSET_V14 0xA8
- #define FRAME_OFFSET_V15 0xB8
-
- #define FRAME_SIZE (FRAME_OFFSET_V15 + 0x10)
+ /* These must be 16 byte aligned to avoid misaligned accesses */
+ #define FRAME_OFFSET_V8 0x50
+ #define FRAME_OFFSET_V9 0x60
+ #define FRAME_OFFSET_V10 0x70
+ #define FRAME_OFFSET_V11 0x80
+ #define FRAME_OFFSET_V12 0x90
+ #define FRAME_OFFSET_V13 0xA0
+ #define FRAME_OFFSET_V14 0xB0
+ #define FRAME_OFFSET_V15 0xC0
+
+ /*
+ * Force 16 byte alignment of the frame size to avoid stack pointer alignment
+ * exceptions.
+ */
+ #define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_V15, 16 )
#else
- #define FRAME_SIZE (FRAME_OFFSET_LR + 0x08)
+ #define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_LR, 16 )
#endif
.section .text
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index d139fdc6a4..2a4ddbcc61 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -72,10 +72,6 @@
* * The exception returns to the previous execution state
*/
-/*
- * TODO(kmoore) The current implementation here assumes that SP is not
- * misaligned.
- */
.macro JUMP_HANDLER_SHORT
/* Mask to use in BIC, lower 7 bits */
mov x0, #0x7f
@@ -186,13 +182,50 @@ curr_el_sp0_serror_get_pc: /* The current PC is now in LR */
* the current SP.
*/
curr_el_spx_sync:
- msr SCTLR_EL1, XZR
- stp x0, lr, [sp, #-0x10]! /* Push x0,lr on to the stack */
- bl curr_el_spx_sync_get_pc /* Get current execution address */
-curr_el_spx_sync_get_pc: /* The current PC is now in LR */
-/* Use short jump handler since this has an extra instruction to clear SCTLR */
- JUMP_HANDLER_SHORT
- JUMP_TARGET_SPx
+ msr spsel, #0 /* switch to exception stack */
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* shove lr into CEF */
+ bl .push_exception_context_start /* bl to CEF store routine */
+/* Save original sp in x0 for .push_exception_context_finish */
+ msr spsel, #1
+ mov x0, sp
+ msr spsel, #0
+/* Push the remainder of the context */
+ bl .push_exception_context_finish
+/* get jump target and branch/link */
+ bl curr_el_spx_sync_get_pc /* Get current execution address */
+curr_el_spx_sync_get_pc: /* The current PC is now in LR */
+ mov x0, #0x7f /* Mask to use in BIC, lower 7 bits */
+ bic x0, lr, x0 /* Mask LR to base of current vector */
+ ldr x1, [x0, #0x78] /* Load target from last word in vector */
+ and lr, lr, #0x780 /* Mask off bits for vector number */
+ lsr lr, lr, #7 /* Shift the vector bits down */
+/* Store the vector */
+ str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
+ mov x0, sp
+ blr x1
+/* bl to CEF restore routine (doesn't restore lr) */
+ bl .pop_exception_context
+ ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* get lr from CEF */
+/* drop space reserved for CEF and clear exclusive */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+ msr spsel, #1 /* switch to thread stack */
+ eret /* exception return */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+/* Takes up the space of 2 instructions */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+ .word _AArch64_Exception_default
+ .word 0x0
+#else
+ .dword _AArch64_Exception_default
+#endif
.balign 0x80
/*
* The exception handler for IRQ exceptions from the current EL using the
@@ -446,7 +479,7 @@ twiddle:
/*
* Apply the exception frame to the current register status, SP points to the EF
*/
-.pop_exception_context_and_ret:
+.pop_exception_context:
/* Pop daif and spsr */
ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
/* Restore daif and spsr */
@@ -462,8 +495,6 @@ twiddle:
/* Restore fpcr and fpsr */
msr FPSR, x2
msr FPCR, x3
-/* Restore LR */
- ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
/* Pop VFP registers */
ldp q0, q1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
ldp q2, q3, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
@@ -496,12 +527,15 @@ twiddle:
ldp x24, x25, [sp, #0xc0]
ldp x26, x27, [sp, #0xd0]
ldp x28, x29, [sp, #0xe0]
-/* Pop sp (ignored since sp should be shortly restored anyway) and ELR */
+/* Pop sp and ELR */
ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
+/* Restore thread SP */
+ msr spsel, #1
+ mov sp, x0
+ msr spsel, #0
/* Restore exception LR */
msr ELR_EL1, x1
ldp x0, x1, [sp, #0x00]
- add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
/* We must clear reservations to ensure consistency with atomic operations */
clrex
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
new file mode 100755
index 0000000000..dc2afdeca8
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/aarch64-system-registers.h
@@ -0,0 +1,9985 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @brief This header file provides the API to read and write the AArch64
+ * system registers.
+ */
+
+/*
+ * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_AARCH64_SYSTEM_REGISTERS_H
+#define _RTEMS_SCORE_AARCH64_SYSTEM_REGISTERS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ACTLR_EL1, Auxiliary Control Register (EL1) */
+
+static inline uint64_t _AArch64_Read_actlr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ACTLR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_actlr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ACTLR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ACTLR_EL2, Auxiliary Control Register (EL2) */
+
+static inline uint64_t _AArch64_Read_actlr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ACTLR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_actlr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ACTLR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ACTLR_EL3, Auxiliary Control Register (EL3) */
+
+static inline uint64_t _AArch64_Read_actlr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ACTLR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_actlr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ACTLR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AFSR0_EL1, Auxiliary Fault Status Register 0 (EL1) */
+
+static inline uint64_t _AArch64_Read_afsr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AFSR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_afsr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AFSR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AFSR0_EL2, Auxiliary Fault Status Register 0 (EL2) */
+
+static inline uint64_t _AArch64_Read_afsr0_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AFSR0_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_afsr0_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AFSR0_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AFSR0_EL3, Auxiliary Fault Status Register 0 (EL3) */
+
+static inline uint64_t _AArch64_Read_afsr0_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AFSR0_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_afsr0_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AFSR0_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AFSR1_EL1, Auxiliary Fault Status Register 1 (EL1) */
+
+static inline uint64_t _AArch64_Read_afsr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AFSR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_afsr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AFSR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AFSR1_EL2, Auxiliary Fault Status Register 1 (EL2) */
+
+static inline uint64_t _AArch64_Read_afsr1_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AFSR1_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_afsr1_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AFSR1_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AFSR1_EL3, Auxiliary Fault Status Register 1 (EL3) */
+
+static inline uint64_t _AArch64_Read_afsr1_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AFSR1_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_afsr1_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AFSR1_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AIDR_EL1, Auxiliary ID Register */
+
+static inline uint64_t _AArch64_Read_aidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* AMAIR_EL1, Auxiliary Memory Attribute Indirection Register (EL1) */
+
+static inline uint64_t _AArch64_Read_amair_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMAIR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amair_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMAIR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMAIR_EL2, Auxiliary Memory Attribute Indirection Register (EL2) */
+
+static inline uint64_t _AArch64_Read_amair_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMAIR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amair_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMAIR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMAIR_EL3, Auxiliary Memory Attribute Indirection Register (EL3) */
+
+static inline uint64_t _AArch64_Read_amair_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMAIR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amair_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMAIR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APDAKEYHI_EL1, Pointer Authentication Key A for Data (bits[127:64]) */
+
+static inline uint64_t _AArch64_Read_apdakeyhi_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APDAKEYHI_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apdakeyhi_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APDAKEYHI_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APDAKEYLO_EL1, Pointer Authentication Key A for Data (bits[63:0]) */
+
+static inline uint64_t _AArch64_Read_apdakeylo_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APDAKEYLO_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apdakeylo_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APDAKEYLO_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APDBKEYHI_EL1, Pointer Authentication Key B for Data (bits[127:64]) */
+
+static inline uint64_t _AArch64_Read_apdbkeyhi_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APDBKEYHI_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apdbkeyhi_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APDBKEYHI_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APDBKEYLO_EL1, Pointer Authentication Key B for Data (bits[63:0]) */
+
+static inline uint64_t _AArch64_Read_apdbkeylo_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APDBKEYLO_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apdbkeylo_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APDBKEYLO_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APGAKEYHI_EL1, Pointer Authentication Key A for Code (bits[127:64]) */
+
+static inline uint64_t _AArch64_Read_apgakeyhi_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APGAKEYHI_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apgakeyhi_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APGAKEYHI_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APGAKEYLO_EL1, Pointer Authentication Key A for Code (bits[63:0]) */
+
+static inline uint64_t _AArch64_Read_apgakeylo_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APGAKEYLO_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apgakeylo_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APGAKEYLO_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APIAKEYHI_EL1, Pointer Authentication Key A for Instruction (bits[127:64]) */
+
+static inline uint64_t _AArch64_Read_apiakeyhi_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APIAKEYHI_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apiakeyhi_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APIAKEYHI_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APIAKEYLO_EL1, Pointer Authentication Key A for Instruction (bits[63:0]) */
+
+static inline uint64_t _AArch64_Read_apiakeylo_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APIAKEYLO_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apiakeylo_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APIAKEYLO_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APIBKEYHI_EL1, Pointer Authentication Key B for Instruction (bits[127:64]) */
+
+static inline uint64_t _AArch64_Read_apibkeyhi_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APIBKEYHI_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apibkeyhi_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APIBKEYHI_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* APIBKEYLO_EL1, Pointer Authentication Key B for Instruction (bits[63:0]) */
+
+static inline uint64_t _AArch64_Read_apibkeylo_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, APIBKEYLO_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_apibkeylo_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr APIBKEYLO_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CCSIDR2_EL1, Current Cache Size ID Register 2 */
+
+#define AARCH64_CCSIDR2_EL1_NUMSETS( _val ) ( ( _val ) << 0 )
+#define AARCH64_CCSIDR2_EL1_NUMSETS_SHIFT 0
+#define AARCH64_CCSIDR2_EL1_NUMSETS_MASK 0xffffffU
+#define AARCH64_CCSIDR2_EL1_NUMSETS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffU )
+
+static inline uint64_t _AArch64_Read_ccsidr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CCSIDR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* CCSIDR_EL1, Current Cache Size ID Register */
+
+#define AARCH64_CCSIDR_EL1_LINESIZE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CCSIDR_EL1_LINESIZE_SHIFT 0
+#define AARCH64_CCSIDR_EL1_LINESIZE_MASK 0x7U
+#define AARCH64_CCSIDR_EL1_LINESIZE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x7U )
+
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_0( _val ) ( ( _val ) << 3 )
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_SHIFT_0 3
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_MASK_0 0x1ff8U
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_GET_0( _reg ) \
+ ( ( ( _reg ) >> 3 ) & 0x3ffU )
+
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_1( _val ) ( ( _val ) << 3 )
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_SHIFT_1 3
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_MASK_1 0xfffff8U
+#define AARCH64_CCSIDR_EL1_ASSOCIATIVITY_GET_1( _reg ) \
+ ( ( ( _reg ) >> 3 ) & 0x1fffffU )
+
+#define AARCH64_CCSIDR_EL1_NUMSETS_0( _val ) ( ( _val ) << 13 )
+#define AARCH64_CCSIDR_EL1_NUMSETS_SHIFT_0 13
+#define AARCH64_CCSIDR_EL1_NUMSETS_MASK_0 0xfffe000U
+#define AARCH64_CCSIDR_EL1_NUMSETS_GET_0( _reg ) \
+ ( ( ( _reg ) >> 13 ) & 0x7fffU )
+
+#define AARCH64_CCSIDR_EL1_NUMSETS_1( _val ) ( ( _val ) << 32 )
+#define AARCH64_CCSIDR_EL1_NUMSETS_SHIFT_1 32
+#define AARCH64_CCSIDR_EL1_NUMSETS_MASK_1 0xffffff00000000ULL
+#define AARCH64_CCSIDR_EL1_NUMSETS_GET_1( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xffffffULL )
+
+static inline uint64_t _AArch64_Read_ccsidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CCSIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* CLIDR_EL1, Cache Level ID Register */
+
+#define AARCH64_CLIDR_EL1_CTYPE1( _val ) ( ( _val ) << 0 )
+#define AARCH64_CLIDR_EL1_CTYPE1_SHIFT 0
+#define AARCH64_CLIDR_EL1_CTYPE1_MASK ( 0x7U << 0 )
+#define AARCH64_CLIDR_EL1_CTYPE1_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_CTYPE2( _val ) ( ( _val ) << 3 )
+#define AARCH64_CLIDR_EL1_CTYPE2_SHIFT 3
+#define AARCH64_CLIDR_EL1_CTYPE2_MASK ( 0x7U << 3 )
+#define AARCH64_CLIDR_EL1_CTYPE2_GET( _reg ) \
+ ( ( ( _reg ) >> 3 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_CTYPE3( _val ) ( ( _val ) << 6 )
+#define AARCH64_CLIDR_EL1_CTYPE3_SHIFT 6
+#define AARCH64_CLIDR_EL1_CTYPE3_MASK ( 0x7U << 6 )
+#define AARCH64_CLIDR_EL1_CTYPE3_GET( _reg ) \
+ ( ( ( _reg ) >> 6 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_CTYPE4( _val ) ( ( _val ) << 9 )
+#define AARCH64_CLIDR_EL1_CTYPE4_SHIFT 9
+#define AARCH64_CLIDR_EL1_CTYPE4_MASK ( 0x7U << 9 )
+#define AARCH64_CLIDR_EL1_CTYPE4_GET( _reg ) \
+ ( ( ( _reg ) >> 9 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_CTYPE5( _val ) ( ( _val ) << 12 )
+#define AARCH64_CLIDR_EL1_CTYPE5_SHIFT 12
+#define AARCH64_CLIDR_EL1_CTYPE5_MASK ( 0x7U << 12 )
+#define AARCH64_CLIDR_EL1_CTYPE5_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_CTYPE6( _val ) ( ( _val ) << 15 )
+#define AARCH64_CLIDR_EL1_CTYPE6_SHIFT 15
+#define AARCH64_CLIDR_EL1_CTYPE6_MASK ( 0x7U << 15 )
+#define AARCH64_CLIDR_EL1_CTYPE6_GET( _reg ) \
+ ( ( ( _reg ) >> 15 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_CTYPE7( _val ) ( ( _val ) << 18 )
+#define AARCH64_CLIDR_EL1_CTYPE7_SHIFT 18
+#define AARCH64_CLIDR_EL1_CTYPE7_MASK ( 0x7U << 18 )
+#define AARCH64_CLIDR_EL1_CTYPE7_GET( _reg ) \
+ ( ( ( _reg ) >> 18 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_LOUIS( _val ) ( ( _val ) << 21 )
+#define AARCH64_CLIDR_EL1_LOUIS_SHIFT 21
+#define AARCH64_CLIDR_EL1_LOUIS_MASK 0xe00000U
+#define AARCH64_CLIDR_EL1_LOUIS_GET( _reg ) \
+ ( ( ( _reg ) >> 21 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_LOC( _val ) ( ( _val ) << 24 )
+#define AARCH64_CLIDR_EL1_LOC_SHIFT 24
+#define AARCH64_CLIDR_EL1_LOC_MASK 0x7000000U
+#define AARCH64_CLIDR_EL1_LOC_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_LOUU( _val ) ( ( _val ) << 27 )
+#define AARCH64_CLIDR_EL1_LOUU_SHIFT 27
+#define AARCH64_CLIDR_EL1_LOUU_MASK 0x38000000U
+#define AARCH64_CLIDR_EL1_LOUU_GET( _reg ) \
+ ( ( ( _reg ) >> 27 ) & 0x7U )
+
+#define AARCH64_CLIDR_EL1_ICB( _val ) ( ( _val ) << 30 )
+#define AARCH64_CLIDR_EL1_ICB_SHIFT 30
+#define AARCH64_CLIDR_EL1_ICB_MASK 0x1c0000000ULL
+#define AARCH64_CLIDR_EL1_ICB_GET( _reg ) \
+ ( ( ( _reg ) >> 30 ) & 0x7ULL )
+
+static inline uint64_t _AArch64_Read_clidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CLIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* CONTEXTIDR_EL1, Context ID Register (EL1) */
+
+#define AARCH64_CONTEXTIDR_EL1_PROCID( _val ) ( ( _val ) << 0 )
+#define AARCH64_CONTEXTIDR_EL1_PROCID_SHIFT 0
+#define AARCH64_CONTEXTIDR_EL1_PROCID_MASK 0xffffffffU
+#define AARCH64_CONTEXTIDR_EL1_PROCID_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_contextidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CONTEXTIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_contextidr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CONTEXTIDR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CONTEXTIDR_EL2, Context ID Register (EL2) */
+
+#define AARCH64_CONTEXTIDR_EL2_PROCID( _val ) ( ( _val ) << 0 )
+#define AARCH64_CONTEXTIDR_EL2_PROCID_SHIFT 0
+#define AARCH64_CONTEXTIDR_EL2_PROCID_MASK 0xffffffffU
+#define AARCH64_CONTEXTIDR_EL2_PROCID_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_contextidr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CONTEXTIDR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_contextidr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CONTEXTIDR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CPACR_EL1, Architectural Feature Access Control Register */
+
+#define AARCH64_CPACR_EL1_ZEN( _val ) ( ( _val ) << 16 )
+#define AARCH64_CPACR_EL1_ZEN_SHIFT 16
+#define AARCH64_CPACR_EL1_ZEN_MASK 0x30000U
+#define AARCH64_CPACR_EL1_ZEN_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x3U )
+
+#define AARCH64_CPACR_EL1_FPEN( _val ) ( ( _val ) << 20 )
+#define AARCH64_CPACR_EL1_FPEN_SHIFT 20
+#define AARCH64_CPACR_EL1_FPEN_MASK 0x300000U
+#define AARCH64_CPACR_EL1_FPEN_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0x3U )
+
+#define AARCH64_CPACR_EL1_TTA 0x10000000U
+
+static inline uint64_t _AArch64_Read_cpacr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CPACR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cpacr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CPACR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CPTR_EL2, Architectural Feature Trap Register (EL2) */
+
+#define AARCH64_CPTR_EL2_TZ 0x100U
+
+#define AARCH64_CPTR_EL2_TFP 0x400U
+
+#define AARCH64_CPTR_EL2_ZEN( _val ) ( ( _val ) << 16 )
+#define AARCH64_CPTR_EL2_ZEN_SHIFT 16
+#define AARCH64_CPTR_EL2_ZEN_MASK 0x30000U
+#define AARCH64_CPTR_EL2_ZEN_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x3U )
+
+#define AARCH64_CPTR_EL2_TTA_0 0x100000U
+
+#define AARCH64_CPTR_EL2_FPEN( _val ) ( ( _val ) << 20 )
+#define AARCH64_CPTR_EL2_FPEN_SHIFT 20
+#define AARCH64_CPTR_EL2_FPEN_MASK 0x300000U
+#define AARCH64_CPTR_EL2_FPEN_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0x3U )
+
+#define AARCH64_CPTR_EL2_TTA_1 0x10000000U
+
+#define AARCH64_CPTR_EL2_TAM 0x40000000U
+
+#define AARCH64_CPTR_EL2_TCPAC 0x80000000U
+
+static inline uint64_t _AArch64_Read_cptr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CPTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cptr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CPTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CPTR_EL3, Architectural Feature Trap Register (EL3) */
+
+#define AARCH64_CPTR_EL3_EZ 0x100U
+
+#define AARCH64_CPTR_EL3_TFP 0x400U
+
+#define AARCH64_CPTR_EL3_TTA 0x100000U
+
+#define AARCH64_CPTR_EL3_TAM 0x40000000U
+
+#define AARCH64_CPTR_EL3_TCPAC 0x80000000U
+
+static inline uint64_t _AArch64_Read_cptr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CPTR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cptr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CPTR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CSSELR_EL1, Cache Size Selection Register */
+
+#define AARCH64_CSSELR_EL1_IND 0x1U
+
+#define AARCH64_CSSELR_EL1_LEVEL( _val ) ( ( _val ) << 1 )
+#define AARCH64_CSSELR_EL1_LEVEL_SHIFT 1
+#define AARCH64_CSSELR_EL1_LEVEL_MASK 0xeU
+#define AARCH64_CSSELR_EL1_LEVEL_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7U )
+
+#define AARCH64_CSSELR_EL1_TND 0x10U
+
+static inline uint64_t _AArch64_Read_csselr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CSSELR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_csselr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CSSELR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CTR_EL0, Cache Type Register */
+
+#define AARCH64_CTR_EL0_IMINLINE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CTR_EL0_IMINLINE_SHIFT 0
+#define AARCH64_CTR_EL0_IMINLINE_MASK 0xfU
+#define AARCH64_CTR_EL0_IMINLINE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_CTR_EL0_L1IP( _val ) ( ( _val ) << 14 )
+#define AARCH64_CTR_EL0_L1IP_SHIFT 14
+#define AARCH64_CTR_EL0_L1IP_MASK 0xc000U
+#define AARCH64_CTR_EL0_L1IP_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_CTR_EL0_DMINLINE( _val ) ( ( _val ) << 16 )
+#define AARCH64_CTR_EL0_DMINLINE_SHIFT 16
+#define AARCH64_CTR_EL0_DMINLINE_MASK 0xf0000U
+#define AARCH64_CTR_EL0_DMINLINE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_CTR_EL0_ERG( _val ) ( ( _val ) << 20 )
+#define AARCH64_CTR_EL0_ERG_SHIFT 20
+#define AARCH64_CTR_EL0_ERG_MASK 0xf00000U
+#define AARCH64_CTR_EL0_ERG_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_CTR_EL0_CWG( _val ) ( ( _val ) << 24 )
+#define AARCH64_CTR_EL0_CWG_SHIFT 24
+#define AARCH64_CTR_EL0_CWG_MASK 0xf000000U
+#define AARCH64_CTR_EL0_CWG_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_CTR_EL0_IDC 0x10000000U
+
+#define AARCH64_CTR_EL0_DIC 0x20000000U
+
+#define AARCH64_CTR_EL0_TMINLINE( _val ) ( ( _val ) << 32 )
+#define AARCH64_CTR_EL0_TMINLINE_SHIFT 32
+#define AARCH64_CTR_EL0_TMINLINE_MASK 0x3f00000000ULL
+#define AARCH64_CTR_EL0_TMINLINE_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0x3fULL )
+
+static inline uint64_t _AArch64_Read_ctr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CTR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* DACR32_EL2, Domain Access Control Register */
+
+static inline uint64_t _AArch64_Read_dacr32_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DACR32_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dacr32_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DACR32_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DCZID_EL0, Data Cache Zero ID Register */
+
+#define AARCH64_DCZID_EL0_BS( _val ) ( ( _val ) << 0 )
+#define AARCH64_DCZID_EL0_BS_SHIFT 0
+#define AARCH64_DCZID_EL0_BS_MASK 0xfU
+#define AARCH64_DCZID_EL0_BS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_DCZID_EL0_DZP 0x10U
+
+static inline uint64_t _AArch64_Read_dczid_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DCZID_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ESR_EL1, Exception Syndrome Register (EL1) */
+
+#define AARCH64_ESR_EL1_DIRECTION 0x1U
+
+#define AARCH64_ESR_EL1_ERETA 0x1U
+
+#define AARCH64_ESR_EL1_IOF 0x1U
+
+#define AARCH64_ESR_EL1_TI 0x1U
+
+#define AARCH64_ESR_EL1_BTYPE( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL1_BTYPE_SHIFT 0
+#define AARCH64_ESR_EL1_BTYPE_MASK 0x3U
+#define AARCH64_ESR_EL1_BTYPE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3U )
+
+#define AARCH64_ESR_EL1_DFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL1_DFSC_SHIFT 0
+#define AARCH64_ESR_EL1_DFSC_MASK 0x3fU
+#define AARCH64_ESR_EL1_DFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_ESR_EL1_IFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL1_IFSC_SHIFT 0
+#define AARCH64_ESR_EL1_IFSC_MASK 0x3fU
+#define AARCH64_ESR_EL1_IFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_ESR_EL1_COMMENT( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL1_COMMENT_SHIFT 0
+#define AARCH64_ESR_EL1_COMMENT_MASK 0xffffU
+#define AARCH64_ESR_EL1_COMMENT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_ESR_EL1_IMM16( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL1_IMM16_SHIFT 0
+#define AARCH64_ESR_EL1_IMM16_MASK 0xffffU
+#define AARCH64_ESR_EL1_IMM16_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_ESR_EL1_ISS( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL1_ISS_SHIFT 0
+#define AARCH64_ESR_EL1_ISS_MASK 0x1ffffffU
+#define AARCH64_ESR_EL1_ISS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x1ffffffU )
+
+#define AARCH64_ESR_EL1_DZF 0x2U
+
+#define AARCH64_ESR_EL1_ERET 0x2U
+
+#define AARCH64_ESR_EL1_AM( _val ) ( ( _val ) << 1 )
+#define AARCH64_ESR_EL1_AM_SHIFT 1
+#define AARCH64_ESR_EL1_AM_MASK 0xeU
+#define AARCH64_ESR_EL1_AM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_CRM( _val ) ( ( _val ) << 1 )
+#define AARCH64_ESR_EL1_CRM_SHIFT 1
+#define AARCH64_ESR_EL1_CRM_MASK 0x1eU
+#define AARCH64_ESR_EL1_CRM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0xfU )
+
+#define AARCH64_ESR_EL1_OFF 0x4U
+
+#define AARCH64_ESR_EL1_UFF 0x8U
+
+#define AARCH64_ESR_EL1_IXF 0x10U
+
+#define AARCH64_ESR_EL1_OFFSET 0x10U
+
+#define AARCH64_ESR_EL1_RN( _val ) ( ( _val ) << 5 )
+#define AARCH64_ESR_EL1_RN_SHIFT 5
+#define AARCH64_ESR_EL1_RN_MASK 0x3e0U
+#define AARCH64_ESR_EL1_RN_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x1fU )
+
+#define AARCH64_ESR_EL1_RT( _val ) ( ( _val ) << 5 )
+#define AARCH64_ESR_EL1_RT_SHIFT 5
+#define AARCH64_ESR_EL1_RT_MASK 0x3e0U
+#define AARCH64_ESR_EL1_RT_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x1fU )
+
+#define AARCH64_ESR_EL1_EX 0x40U
+
+#define AARCH64_ESR_EL1_WNR 0x40U
+
+#define AARCH64_ESR_EL1_IDF 0x80U
+
+#define AARCH64_ESR_EL1_S1PTW 0x80U
+
+#define AARCH64_ESR_EL1_CM 0x100U
+
+#define AARCH64_ESR_EL1_VECITR( _val ) ( ( _val ) << 8 )
+#define AARCH64_ESR_EL1_VECITR_SHIFT 8
+#define AARCH64_ESR_EL1_VECITR_MASK 0x700U
+#define AARCH64_ESR_EL1_VECITR_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_EA 0x200U
+
+#define AARCH64_ESR_EL1_FNV 0x400U
+
+#define AARCH64_ESR_EL1_AET( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL1_AET_SHIFT 10
+#define AARCH64_ESR_EL1_AET_MASK 0x1c00U
+#define AARCH64_ESR_EL1_AET_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_CRN( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL1_CRN_SHIFT 10
+#define AARCH64_ESR_EL1_CRN_MASK 0x3c00U
+#define AARCH64_ESR_EL1_CRN_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0xfU )
+
+#define AARCH64_ESR_EL1_RT2( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL1_RT2_SHIFT 10
+#define AARCH64_ESR_EL1_RT2_MASK 0x7c00U
+#define AARCH64_ESR_EL1_RT2_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x1fU )
+
+#define AARCH64_ESR_EL1_SET( _val ) ( ( _val ) << 11 )
+#define AARCH64_ESR_EL1_SET_SHIFT 11
+#define AARCH64_ESR_EL1_SET_MASK 0x1800U
+#define AARCH64_ESR_EL1_SET_GET( _reg ) \
+ ( ( ( _reg ) >> 11 ) & 0x3U )
+
+#define AARCH64_ESR_EL1_IMM8( _val ) ( ( _val ) << 12 )
+#define AARCH64_ESR_EL1_IMM8_SHIFT 12
+#define AARCH64_ESR_EL1_IMM8_MASK 0xff000U
+#define AARCH64_ESR_EL1_IMM8_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xffU )
+
+#define AARCH64_ESR_EL1_IESB 0x2000U
+
+#define AARCH64_ESR_EL1_VNCR 0x2000U
+
+#define AARCH64_ESR_EL1_AR 0x4000U
+
+#define AARCH64_ESR_EL1_OP1( _val ) ( ( _val ) << 14 )
+#define AARCH64_ESR_EL1_OP1_SHIFT 14
+#define AARCH64_ESR_EL1_OP1_MASK 0x1c000U
+#define AARCH64_ESR_EL1_OP1_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_OPC1_0( _val ) ( ( _val ) << 14 )
+#define AARCH64_ESR_EL1_OPC1_SHIFT_0 14
+#define AARCH64_ESR_EL1_OPC1_MASK_0 0x1c000U
+#define AARCH64_ESR_EL1_OPC1_GET_0( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_SF 0x8000U
+
+#define AARCH64_ESR_EL1_OPC1_1( _val ) ( ( _val ) << 16 )
+#define AARCH64_ESR_EL1_OPC1_SHIFT_1 16
+#define AARCH64_ESR_EL1_OPC1_MASK_1 0xf0000U
+#define AARCH64_ESR_EL1_OPC1_GET_1( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ESR_EL1_SRT( _val ) ( ( _val ) << 16 )
+#define AARCH64_ESR_EL1_SRT_SHIFT 16
+#define AARCH64_ESR_EL1_SRT_MASK 0x1f0000U
+#define AARCH64_ESR_EL1_SRT_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x1fU )
+
+#define AARCH64_ESR_EL1_OP2( _val ) ( ( _val ) << 17 )
+#define AARCH64_ESR_EL1_OP2_SHIFT 17
+#define AARCH64_ESR_EL1_OP2_MASK 0xe0000U
+#define AARCH64_ESR_EL1_OP2_GET( _reg ) \
+ ( ( ( _reg ) >> 17 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_OPC2( _val ) ( ( _val ) << 17 )
+#define AARCH64_ESR_EL1_OPC2_SHIFT 17
+#define AARCH64_ESR_EL1_OPC2_MASK 0xe0000U
+#define AARCH64_ESR_EL1_OPC2_GET( _reg ) \
+ ( ( ( _reg ) >> 17 ) & 0x7U )
+
+#define AARCH64_ESR_EL1_CCKNOWNPASS 0x80000U
+
+#define AARCH64_ESR_EL1_OP0( _val ) ( ( _val ) << 20 )
+#define AARCH64_ESR_EL1_OP0_SHIFT 20
+#define AARCH64_ESR_EL1_OP0_MASK 0x300000U
+#define AARCH64_ESR_EL1_OP0_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0x3U )
+
+#define AARCH64_ESR_EL1_COND( _val ) ( ( _val ) << 20 )
+#define AARCH64_ESR_EL1_COND_SHIFT 20
+#define AARCH64_ESR_EL1_COND_MASK 0xf00000U
+#define AARCH64_ESR_EL1_COND_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ESR_EL1_SSE 0x200000U
+
+#define AARCH64_ESR_EL1_SAS( _val ) ( ( _val ) << 22 )
+#define AARCH64_ESR_EL1_SAS_SHIFT 22
+#define AARCH64_ESR_EL1_SAS_MASK 0xc00000U
+#define AARCH64_ESR_EL1_SAS_GET( _reg ) \
+ ( ( ( _reg ) >> 22 ) & 0x3U )
+
+#define AARCH64_ESR_EL1_TFV 0x800000U
+
+#define AARCH64_ESR_EL1_CV 0x1000000U
+
+#define AARCH64_ESR_EL1_IDS 0x1000000U
+
+#define AARCH64_ESR_EL1_ISV 0x1000000U
+
+#define AARCH64_ESR_EL1_IL 0x2000000U
+
+#define AARCH64_ESR_EL1_EC( _val ) ( ( _val ) << 26 )
+#define AARCH64_ESR_EL1_EC_SHIFT 26
+#define AARCH64_ESR_EL1_EC_MASK 0xfc000000U
+#define AARCH64_ESR_EL1_EC_GET( _reg ) \
+ ( ( ( _reg ) >> 26 ) & 0x3fU )
+
+static inline uint64_t _AArch64_Read_esr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ESR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_esr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ESR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ESR_EL2, Exception Syndrome Register (EL2) */
+
+#define AARCH64_ESR_EL2_DIRECTION 0x1U
+
+#define AARCH64_ESR_EL2_ERETA 0x1U
+
+#define AARCH64_ESR_EL2_IOF 0x1U
+
+#define AARCH64_ESR_EL2_TI 0x1U
+
+#define AARCH64_ESR_EL2_BTYPE( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL2_BTYPE_SHIFT 0
+#define AARCH64_ESR_EL2_BTYPE_MASK 0x3U
+#define AARCH64_ESR_EL2_BTYPE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3U )
+
+#define AARCH64_ESR_EL2_DFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL2_DFSC_SHIFT 0
+#define AARCH64_ESR_EL2_DFSC_MASK 0x3fU
+#define AARCH64_ESR_EL2_DFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_ESR_EL2_IFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL2_IFSC_SHIFT 0
+#define AARCH64_ESR_EL2_IFSC_MASK 0x3fU
+#define AARCH64_ESR_EL2_IFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_ESR_EL2_COMMENT( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL2_COMMENT_SHIFT 0
+#define AARCH64_ESR_EL2_COMMENT_MASK 0xffffU
+#define AARCH64_ESR_EL2_COMMENT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_ESR_EL2_IMM16( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL2_IMM16_SHIFT 0
+#define AARCH64_ESR_EL2_IMM16_MASK 0xffffU
+#define AARCH64_ESR_EL2_IMM16_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_ESR_EL2_ISS( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL2_ISS_SHIFT 0
+#define AARCH64_ESR_EL2_ISS_MASK 0x1ffffffU
+#define AARCH64_ESR_EL2_ISS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x1ffffffU )
+
+#define AARCH64_ESR_EL2_DZF 0x2U
+
+#define AARCH64_ESR_EL2_ERET 0x2U
+
+#define AARCH64_ESR_EL2_AM( _val ) ( ( _val ) << 1 )
+#define AARCH64_ESR_EL2_AM_SHIFT 1
+#define AARCH64_ESR_EL2_AM_MASK 0xeU
+#define AARCH64_ESR_EL2_AM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_CRM( _val ) ( ( _val ) << 1 )
+#define AARCH64_ESR_EL2_CRM_SHIFT 1
+#define AARCH64_ESR_EL2_CRM_MASK 0x1eU
+#define AARCH64_ESR_EL2_CRM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0xfU )
+
+#define AARCH64_ESR_EL2_OFF 0x4U
+
+#define AARCH64_ESR_EL2_UFF 0x8U
+
+#define AARCH64_ESR_EL2_IXF 0x10U
+
+#define AARCH64_ESR_EL2_OFFSET 0x10U
+
+#define AARCH64_ESR_EL2_RN( _val ) ( ( _val ) << 5 )
+#define AARCH64_ESR_EL2_RN_SHIFT 5
+#define AARCH64_ESR_EL2_RN_MASK 0x3e0U
+#define AARCH64_ESR_EL2_RN_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x1fU )
+
+#define AARCH64_ESR_EL2_RT( _val ) ( ( _val ) << 5 )
+#define AARCH64_ESR_EL2_RT_SHIFT 5
+#define AARCH64_ESR_EL2_RT_MASK 0x3e0U
+#define AARCH64_ESR_EL2_RT_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x1fU )
+
+#define AARCH64_ESR_EL2_EX 0x40U
+
+#define AARCH64_ESR_EL2_WNR 0x40U
+
+#define AARCH64_ESR_EL2_IDF 0x80U
+
+#define AARCH64_ESR_EL2_S1PTW 0x80U
+
+#define AARCH64_ESR_EL2_CM 0x100U
+
+#define AARCH64_ESR_EL2_VECITR( _val ) ( ( _val ) << 8 )
+#define AARCH64_ESR_EL2_VECITR_SHIFT 8
+#define AARCH64_ESR_EL2_VECITR_MASK 0x700U
+#define AARCH64_ESR_EL2_VECITR_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_EA 0x200U
+
+#define AARCH64_ESR_EL2_FNV 0x400U
+
+#define AARCH64_ESR_EL2_AET( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL2_AET_SHIFT 10
+#define AARCH64_ESR_EL2_AET_MASK 0x1c00U
+#define AARCH64_ESR_EL2_AET_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_CRN( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL2_CRN_SHIFT 10
+#define AARCH64_ESR_EL2_CRN_MASK 0x3c00U
+#define AARCH64_ESR_EL2_CRN_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0xfU )
+
+#define AARCH64_ESR_EL2_RT2( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL2_RT2_SHIFT 10
+#define AARCH64_ESR_EL2_RT2_MASK 0x7c00U
+#define AARCH64_ESR_EL2_RT2_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x1fU )
+
+#define AARCH64_ESR_EL2_SET( _val ) ( ( _val ) << 11 )
+#define AARCH64_ESR_EL2_SET_SHIFT 11
+#define AARCH64_ESR_EL2_SET_MASK 0x1800U
+#define AARCH64_ESR_EL2_SET_GET( _reg ) \
+ ( ( ( _reg ) >> 11 ) & 0x3U )
+
+#define AARCH64_ESR_EL2_IMM8( _val ) ( ( _val ) << 12 )
+#define AARCH64_ESR_EL2_IMM8_SHIFT 12
+#define AARCH64_ESR_EL2_IMM8_MASK 0xff000U
+#define AARCH64_ESR_EL2_IMM8_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xffU )
+
+#define AARCH64_ESR_EL2_IESB 0x2000U
+
+#define AARCH64_ESR_EL2_VNCR 0x2000U
+
+#define AARCH64_ESR_EL2_AR 0x4000U
+
+#define AARCH64_ESR_EL2_OP1( _val ) ( ( _val ) << 14 )
+#define AARCH64_ESR_EL2_OP1_SHIFT 14
+#define AARCH64_ESR_EL2_OP1_MASK 0x1c000U
+#define AARCH64_ESR_EL2_OP1_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_OPC1_0( _val ) ( ( _val ) << 14 )
+#define AARCH64_ESR_EL2_OPC1_SHIFT_0 14
+#define AARCH64_ESR_EL2_OPC1_MASK_0 0x1c000U
+#define AARCH64_ESR_EL2_OPC1_GET_0( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_SF 0x8000U
+
+#define AARCH64_ESR_EL2_OPC1_1( _val ) ( ( _val ) << 16 )
+#define AARCH64_ESR_EL2_OPC1_SHIFT_1 16
+#define AARCH64_ESR_EL2_OPC1_MASK_1 0xf0000U
+#define AARCH64_ESR_EL2_OPC1_GET_1( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ESR_EL2_SRT( _val ) ( ( _val ) << 16 )
+#define AARCH64_ESR_EL2_SRT_SHIFT 16
+#define AARCH64_ESR_EL2_SRT_MASK 0x1f0000U
+#define AARCH64_ESR_EL2_SRT_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x1fU )
+
+#define AARCH64_ESR_EL2_OP2( _val ) ( ( _val ) << 17 )
+#define AARCH64_ESR_EL2_OP2_SHIFT 17
+#define AARCH64_ESR_EL2_OP2_MASK 0xe0000U
+#define AARCH64_ESR_EL2_OP2_GET( _reg ) \
+ ( ( ( _reg ) >> 17 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_OPC2( _val ) ( ( _val ) << 17 )
+#define AARCH64_ESR_EL2_OPC2_SHIFT 17
+#define AARCH64_ESR_EL2_OPC2_MASK 0xe0000U
+#define AARCH64_ESR_EL2_OPC2_GET( _reg ) \
+ ( ( ( _reg ) >> 17 ) & 0x7U )
+
+#define AARCH64_ESR_EL2_CCKNOWNPASS 0x80000U
+
+#define AARCH64_ESR_EL2_OP0( _val ) ( ( _val ) << 20 )
+#define AARCH64_ESR_EL2_OP0_SHIFT 20
+#define AARCH64_ESR_EL2_OP0_MASK 0x300000U
+#define AARCH64_ESR_EL2_OP0_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0x3U )
+
+#define AARCH64_ESR_EL2_COND( _val ) ( ( _val ) << 20 )
+#define AARCH64_ESR_EL2_COND_SHIFT 20
+#define AARCH64_ESR_EL2_COND_MASK 0xf00000U
+#define AARCH64_ESR_EL2_COND_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ESR_EL2_SSE 0x200000U
+
+#define AARCH64_ESR_EL2_SAS( _val ) ( ( _val ) << 22 )
+#define AARCH64_ESR_EL2_SAS_SHIFT 22
+#define AARCH64_ESR_EL2_SAS_MASK 0xc00000U
+#define AARCH64_ESR_EL2_SAS_GET( _reg ) \
+ ( ( ( _reg ) >> 22 ) & 0x3U )
+
+#define AARCH64_ESR_EL2_TFV 0x800000U
+
+#define AARCH64_ESR_EL2_CV 0x1000000U
+
+#define AARCH64_ESR_EL2_IDS 0x1000000U
+
+#define AARCH64_ESR_EL2_ISV 0x1000000U
+
+#define AARCH64_ESR_EL2_IL 0x2000000U
+
+#define AARCH64_ESR_EL2_EC( _val ) ( ( _val ) << 26 )
+#define AARCH64_ESR_EL2_EC_SHIFT 26
+#define AARCH64_ESR_EL2_EC_MASK 0xfc000000U
+#define AARCH64_ESR_EL2_EC_GET( _reg ) \
+ ( ( ( _reg ) >> 26 ) & 0x3fU )
+
+static inline uint64_t _AArch64_Read_esr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ESR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_esr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ESR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ESR_EL3, Exception Syndrome Register (EL3) */
+
+#define AARCH64_ESR_EL3_DIRECTION 0x1U
+
+#define AARCH64_ESR_EL3_ERETA 0x1U
+
+#define AARCH64_ESR_EL3_IOF 0x1U
+
+#define AARCH64_ESR_EL3_TI 0x1U
+
+#define AARCH64_ESR_EL3_BTYPE( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL3_BTYPE_SHIFT 0
+#define AARCH64_ESR_EL3_BTYPE_MASK 0x3U
+#define AARCH64_ESR_EL3_BTYPE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3U )
+
+#define AARCH64_ESR_EL3_DFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL3_DFSC_SHIFT 0
+#define AARCH64_ESR_EL3_DFSC_MASK 0x3fU
+#define AARCH64_ESR_EL3_DFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_ESR_EL3_IFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL3_IFSC_SHIFT 0
+#define AARCH64_ESR_EL3_IFSC_MASK 0x3fU
+#define AARCH64_ESR_EL3_IFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_ESR_EL3_COMMENT( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL3_COMMENT_SHIFT 0
+#define AARCH64_ESR_EL3_COMMENT_MASK 0xffffU
+#define AARCH64_ESR_EL3_COMMENT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_ESR_EL3_IMM16( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL3_IMM16_SHIFT 0
+#define AARCH64_ESR_EL3_IMM16_MASK 0xffffU
+#define AARCH64_ESR_EL3_IMM16_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_ESR_EL3_ISS( _val ) ( ( _val ) << 0 )
+#define AARCH64_ESR_EL3_ISS_SHIFT 0
+#define AARCH64_ESR_EL3_ISS_MASK 0x1ffffffU
+#define AARCH64_ESR_EL3_ISS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x1ffffffU )
+
+#define AARCH64_ESR_EL3_DZF 0x2U
+
+#define AARCH64_ESR_EL3_ERET 0x2U
+
+#define AARCH64_ESR_EL3_AM( _val ) ( ( _val ) << 1 )
+#define AARCH64_ESR_EL3_AM_SHIFT 1
+#define AARCH64_ESR_EL3_AM_MASK 0xeU
+#define AARCH64_ESR_EL3_AM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_CRM( _val ) ( ( _val ) << 1 )
+#define AARCH64_ESR_EL3_CRM_SHIFT 1
+#define AARCH64_ESR_EL3_CRM_MASK 0x1eU
+#define AARCH64_ESR_EL3_CRM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0xfU )
+
+#define AARCH64_ESR_EL3_OFF 0x4U
+
+#define AARCH64_ESR_EL3_UFF 0x8U
+
+#define AARCH64_ESR_EL3_IXF 0x10U
+
+#define AARCH64_ESR_EL3_OFFSET 0x10U
+
+#define AARCH64_ESR_EL3_RN( _val ) ( ( _val ) << 5 )
+#define AARCH64_ESR_EL3_RN_SHIFT 5
+#define AARCH64_ESR_EL3_RN_MASK 0x3e0U
+#define AARCH64_ESR_EL3_RN_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x1fU )
+
+#define AARCH64_ESR_EL3_RT( _val ) ( ( _val ) << 5 )
+#define AARCH64_ESR_EL3_RT_SHIFT 5
+#define AARCH64_ESR_EL3_RT_MASK 0x3e0U
+#define AARCH64_ESR_EL3_RT_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x1fU )
+
+#define AARCH64_ESR_EL3_EX 0x40U
+
+#define AARCH64_ESR_EL3_WNR 0x40U
+
+#define AARCH64_ESR_EL3_IDF 0x80U
+
+#define AARCH64_ESR_EL3_S1PTW 0x80U
+
+#define AARCH64_ESR_EL3_CM 0x100U
+
+#define AARCH64_ESR_EL3_VECITR( _val ) ( ( _val ) << 8 )
+#define AARCH64_ESR_EL3_VECITR_SHIFT 8
+#define AARCH64_ESR_EL3_VECITR_MASK 0x700U
+#define AARCH64_ESR_EL3_VECITR_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_EA 0x200U
+
+#define AARCH64_ESR_EL3_FNV 0x400U
+
+#define AARCH64_ESR_EL3_AET( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL3_AET_SHIFT 10
+#define AARCH64_ESR_EL3_AET_MASK 0x1c00U
+#define AARCH64_ESR_EL3_AET_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_CRN( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL3_CRN_SHIFT 10
+#define AARCH64_ESR_EL3_CRN_MASK 0x3c00U
+#define AARCH64_ESR_EL3_CRN_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0xfU )
+
+#define AARCH64_ESR_EL3_RT2( _val ) ( ( _val ) << 10 )
+#define AARCH64_ESR_EL3_RT2_SHIFT 10
+#define AARCH64_ESR_EL3_RT2_MASK 0x7c00U
+#define AARCH64_ESR_EL3_RT2_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x1fU )
+
+#define AARCH64_ESR_EL3_SET( _val ) ( ( _val ) << 11 )
+#define AARCH64_ESR_EL3_SET_SHIFT 11
+#define AARCH64_ESR_EL3_SET_MASK 0x1800U
+#define AARCH64_ESR_EL3_SET_GET( _reg ) \
+ ( ( ( _reg ) >> 11 ) & 0x3U )
+
+#define AARCH64_ESR_EL3_IMM8( _val ) ( ( _val ) << 12 )
+#define AARCH64_ESR_EL3_IMM8_SHIFT 12
+#define AARCH64_ESR_EL3_IMM8_MASK 0xff000U
+#define AARCH64_ESR_EL3_IMM8_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xffU )
+
+#define AARCH64_ESR_EL3_IESB 0x2000U
+
+#define AARCH64_ESR_EL3_VNCR 0x2000U
+
+#define AARCH64_ESR_EL3_AR 0x4000U
+
+#define AARCH64_ESR_EL3_OP1( _val ) ( ( _val ) << 14 )
+#define AARCH64_ESR_EL3_OP1_SHIFT 14
+#define AARCH64_ESR_EL3_OP1_MASK 0x1c000U
+#define AARCH64_ESR_EL3_OP1_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_OPC1_0( _val ) ( ( _val ) << 14 )
+#define AARCH64_ESR_EL3_OPC1_SHIFT_0 14
+#define AARCH64_ESR_EL3_OPC1_MASK_0 0x1c000U
+#define AARCH64_ESR_EL3_OPC1_GET_0( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_SF 0x8000U
+
+#define AARCH64_ESR_EL3_OPC1_1( _val ) ( ( _val ) << 16 )
+#define AARCH64_ESR_EL3_OPC1_SHIFT_1 16
+#define AARCH64_ESR_EL3_OPC1_MASK_1 0xf0000U
+#define AARCH64_ESR_EL3_OPC1_GET_1( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ESR_EL3_SRT( _val ) ( ( _val ) << 16 )
+#define AARCH64_ESR_EL3_SRT_SHIFT 16
+#define AARCH64_ESR_EL3_SRT_MASK 0x1f0000U
+#define AARCH64_ESR_EL3_SRT_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x1fU )
+
+#define AARCH64_ESR_EL3_OP2( _val ) ( ( _val ) << 17 )
+#define AARCH64_ESR_EL3_OP2_SHIFT 17
+#define AARCH64_ESR_EL3_OP2_MASK 0xe0000U
+#define AARCH64_ESR_EL3_OP2_GET( _reg ) \
+ ( ( ( _reg ) >> 17 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_OPC2( _val ) ( ( _val ) << 17 )
+#define AARCH64_ESR_EL3_OPC2_SHIFT 17
+#define AARCH64_ESR_EL3_OPC2_MASK 0xe0000U
+#define AARCH64_ESR_EL3_OPC2_GET( _reg ) \
+ ( ( ( _reg ) >> 17 ) & 0x7U )
+
+#define AARCH64_ESR_EL3_CCKNOWNPASS 0x80000U
+
+#define AARCH64_ESR_EL3_OP0( _val ) ( ( _val ) << 20 )
+#define AARCH64_ESR_EL3_OP0_SHIFT 20
+#define AARCH64_ESR_EL3_OP0_MASK 0x300000U
+#define AARCH64_ESR_EL3_OP0_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0x3U )
+
+#define AARCH64_ESR_EL3_COND( _val ) ( ( _val ) << 20 )
+#define AARCH64_ESR_EL3_COND_SHIFT 20
+#define AARCH64_ESR_EL3_COND_MASK 0xf00000U
+#define AARCH64_ESR_EL3_COND_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ESR_EL3_SSE 0x200000U
+
+#define AARCH64_ESR_EL3_SAS( _val ) ( ( _val ) << 22 )
+#define AARCH64_ESR_EL3_SAS_SHIFT 22
+#define AARCH64_ESR_EL3_SAS_MASK 0xc00000U
+#define AARCH64_ESR_EL3_SAS_GET( _reg ) \
+ ( ( ( _reg ) >> 22 ) & 0x3U )
+
+#define AARCH64_ESR_EL3_TFV 0x800000U
+
+#define AARCH64_ESR_EL3_CV 0x1000000U
+
+#define AARCH64_ESR_EL3_IDS 0x1000000U
+
+#define AARCH64_ESR_EL3_ISV 0x1000000U
+
+#define AARCH64_ESR_EL3_IL 0x2000000U
+
+#define AARCH64_ESR_EL3_EC( _val ) ( ( _val ) << 26 )
+#define AARCH64_ESR_EL3_EC_SHIFT 26
+#define AARCH64_ESR_EL3_EC_MASK 0xfc000000U
+#define AARCH64_ESR_EL3_EC_GET( _reg ) \
+ ( ( ( _reg ) >> 26 ) & 0x3fU )
+
+static inline uint64_t _AArch64_Read_esr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ESR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_esr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ESR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* FAR_EL1, Fault Address Register (EL1) */
+
+static inline uint64_t _AArch64_Read_far_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, FAR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_far_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr FAR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* FAR_EL2, Fault Address Register (EL2) */
+
+static inline uint64_t _AArch64_Read_far_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, FAR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_far_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr FAR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* FAR_EL3, Fault Address Register (EL3) */
+
+static inline uint64_t _AArch64_Read_far_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, FAR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_far_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr FAR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* FPEXC32_EL2, Floating-Point Exception Control Register */
+
+#define AARCH64_FPEXC32_EL2_IOF 0x1U
+
+#define AARCH64_FPEXC32_EL2_DZF 0x2U
+
+#define AARCH64_FPEXC32_EL2_OFF 0x4U
+
+#define AARCH64_FPEXC32_EL2_UFF 0x8U
+
+#define AARCH64_FPEXC32_EL2_IXF 0x10U
+
+#define AARCH64_FPEXC32_EL2_IDF 0x80U
+
+#define AARCH64_FPEXC32_EL2_VECITR( _val ) ( ( _val ) << 8 )
+#define AARCH64_FPEXC32_EL2_VECITR_SHIFT 8
+#define AARCH64_FPEXC32_EL2_VECITR_MASK 0x700U
+#define AARCH64_FPEXC32_EL2_VECITR_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x7U )
+
+#define AARCH64_FPEXC32_EL2_TFV 0x4000000U
+
+#define AARCH64_FPEXC32_EL2_VV 0x8000000U
+
+#define AARCH64_FPEXC32_EL2_FP2V 0x10000000U
+
+#define AARCH64_FPEXC32_EL2_DEX 0x20000000U
+
+#define AARCH64_FPEXC32_EL2_EN 0x40000000U
+
+#define AARCH64_FPEXC32_EL2_EX 0x80000000U
+
+static inline uint64_t _AArch64_Read_fpexc32_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, FPEXC32_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_fpexc32_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr FPEXC32_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* GCR_EL1, Tag Control Register. */
+
+#define AARCH64_GCR_EL1_EXCLUDE( _val ) ( ( _val ) << 0 )
+#define AARCH64_GCR_EL1_EXCLUDE_SHIFT 0
+#define AARCH64_GCR_EL1_EXCLUDE_MASK 0xffffU
+#define AARCH64_GCR_EL1_EXCLUDE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_GCR_EL1_RRND 0x10000U
+
+static inline uint64_t _AArch64_Read_gcr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, GCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_gcr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr GCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* GMID_EL1, Multiple tag transfer ID Register */
+
+#define AARCH64_GMID_EL1_BS( _val ) ( ( _val ) << 0 )
+#define AARCH64_GMID_EL1_BS_SHIFT 0
+#define AARCH64_GMID_EL1_BS_MASK 0xfU
+#define AARCH64_GMID_EL1_BS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_gmid_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, GMID_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* HACR_EL2, Hypervisor Auxiliary Control Register */
+
+static inline uint64_t _AArch64_Read_hacr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HACR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hacr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HACR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HAFGRTR_EL2, Hypervisor Activity Monitors Fine-Grained Read Trap Register */
+
+#define AARCH64_HAFGRTR_EL2_AMCNTEN0 0x1U
+
+#define AARCH64_HAFGRTR_EL2_AMCNTEN1 0x20000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR10_EL0 0x40000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER10_EL0 0x80000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR11_EL0 0x100000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER11_EL0 0x200000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR12_EL0 0x400000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER12_EL0 0x800000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR13_EL0 0x1000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER13_EL0 0x2000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR14_EL0 0x4000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER14_EL0 0x8000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR15_EL0 0x10000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER15_EL0 0x20000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR16_EL0 0x40000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER16_EL0 0x80000000U
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR17_EL0 0x100000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER17_EL0 0x200000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR18_EL0 0x400000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER18_EL0 0x800000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR19_EL0 0x1000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER19_EL0 0x2000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR110_EL0 0x4000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER110_EL0 0x8000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR111_EL0 0x10000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER111_EL0 0x20000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR112_EL0 0x40000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER112_EL0 0x80000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR113_EL0 0x100000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER113_EL0 0x200000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR114_EL0 0x400000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER114_EL0 0x800000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVCNTR115_EL0 0x1000000000000ULL
+
+#define AARCH64_HAFGRTR_EL2_AMEVTYPER115_EL0 0x2000000000000ULL
+
+static inline uint64_t _AArch64_Read_hafgrtr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HAFGRTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hafgrtr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HAFGRTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HCR_EL2, Hypervisor Configuration Register */
+
+#define AARCH64_HCR_EL2_VM 0x1U
+
+#define AARCH64_HCR_EL2_SWIO 0x2U
+
+#define AARCH64_HCR_EL2_PTW 0x4U
+
+#define AARCH64_HCR_EL2_FMO 0x8U
+
+#define AARCH64_HCR_EL2_IMO 0x10U
+
+#define AARCH64_HCR_EL2_AMO 0x20U
+
+#define AARCH64_HCR_EL2_VF 0x40U
+
+#define AARCH64_HCR_EL2_VI 0x80U
+
+#define AARCH64_HCR_EL2_VSE 0x100U
+
+#define AARCH64_HCR_EL2_FB 0x200U
+
+#define AARCH64_HCR_EL2_BSU( _val ) ( ( _val ) << 10 )
+#define AARCH64_HCR_EL2_BSU_SHIFT 10
+#define AARCH64_HCR_EL2_BSU_MASK 0xc00U
+#define AARCH64_HCR_EL2_BSU_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3U )
+
+#define AARCH64_HCR_EL2_DC 0x1000U
+
+#define AARCH64_HCR_EL2_TWI 0x2000U
+
+#define AARCH64_HCR_EL2_TWE 0x4000U
+
+#define AARCH64_HCR_EL2_TID0 0x8000U
+
+#define AARCH64_HCR_EL2_TID1 0x10000U
+
+#define AARCH64_HCR_EL2_TID2 0x20000U
+
+#define AARCH64_HCR_EL2_TID3 0x40000U
+
+#define AARCH64_HCR_EL2_TSC 0x80000U
+
+#define AARCH64_HCR_EL2_TIDCP 0x100000U
+
+#define AARCH64_HCR_EL2_TACR 0x200000U
+
+#define AARCH64_HCR_EL2_TSW 0x400000U
+
+#define AARCH64_HCR_EL2_TPCP 0x800000U
+
+#define AARCH64_HCR_EL2_TPU 0x1000000U
+
+#define AARCH64_HCR_EL2_TTLB 0x2000000U
+
+#define AARCH64_HCR_EL2_TVM 0x4000000U
+
+#define AARCH64_HCR_EL2_TGE 0x8000000U
+
+#define AARCH64_HCR_EL2_TDZ 0x10000000U
+
+#define AARCH64_HCR_EL2_HCD 0x20000000U
+
+#define AARCH64_HCR_EL2_TRVM 0x40000000U
+
+#define AARCH64_HCR_EL2_RW 0x80000000U
+
+#define AARCH64_HCR_EL2_CD 0x100000000ULL
+
+#define AARCH64_HCR_EL2_ID 0x200000000ULL
+
+#define AARCH64_HCR_EL2_E2H 0x400000000ULL
+
+#define AARCH64_HCR_EL2_TLOR 0x800000000ULL
+
+#define AARCH64_HCR_EL2_TERR 0x1000000000ULL
+
+#define AARCH64_HCR_EL2_TEA 0x2000000000ULL
+
+#define AARCH64_HCR_EL2_MIOCNCE 0x4000000000ULL
+
+#define AARCH64_HCR_EL2_APK 0x10000000000ULL
+
+#define AARCH64_HCR_EL2_API 0x20000000000ULL
+
+#define AARCH64_HCR_EL2_NV 0x40000000000ULL
+
+#define AARCH64_HCR_EL2_NV1 0x80000000000ULL
+
+#define AARCH64_HCR_EL2_AT 0x100000000000ULL
+
+#define AARCH64_HCR_EL2_NV2 0x200000000000ULL
+
+#define AARCH64_HCR_EL2_FWB 0x400000000000ULL
+
+#define AARCH64_HCR_EL2_FIEN 0x800000000000ULL
+
+#define AARCH64_HCR_EL2_TID4 0x2000000000000ULL
+
+#define AARCH64_HCR_EL2_TICAB 0x4000000000000ULL
+
+#define AARCH64_HCR_EL2_AMVOFFEN 0x8000000000000ULL
+
+#define AARCH64_HCR_EL2_TOCU 0x10000000000000ULL
+
+#define AARCH64_HCR_EL2_ENSCXT 0x20000000000000ULL
+
+#define AARCH64_HCR_EL2_TTLBIS 0x40000000000000ULL
+
+#define AARCH64_HCR_EL2_TTLBOS 0x80000000000000ULL
+
+#define AARCH64_HCR_EL2_ATA 0x100000000000000ULL
+
+#define AARCH64_HCR_EL2_DCT 0x200000000000000ULL
+
+#define AARCH64_HCR_EL2_TID5 0x400000000000000ULL
+
+#define AARCH64_HCR_EL2_TWEDEN 0x800000000000000ULL
+
+#define AARCH64_HCR_EL2_TWEDEL( _val ) ( ( _val ) << 60 )
+#define AARCH64_HCR_EL2_TWEDEL_SHIFT 60
+#define AARCH64_HCR_EL2_TWEDEL_MASK 0xf000000000000000ULL
+#define AARCH64_HCR_EL2_TWEDEL_GET( _reg ) \
+ ( ( ( _reg ) >> 60 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_hcr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hcr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HDFGRTR_EL2, Hypervisor Debug Fine-Grained Read Trap Register */
+
+#define AARCH64_HDFGRTR_EL2_DBGBCRN_EL1 0x1U
+
+#define AARCH64_HDFGRTR_EL2_DBGBVRN_EL1 0x2U
+
+#define AARCH64_HDFGRTR_EL2_DBGWCRN_EL1 0x4U
+
+#define AARCH64_HDFGRTR_EL2_DBGWVRN_EL1 0x8U
+
+#define AARCH64_HDFGRTR_EL2_MDSCR_EL1 0x10U
+
+#define AARCH64_HDFGRTR_EL2_DBGCLAIM 0x20U
+
+#define AARCH64_HDFGRTR_EL2_DBGAUTHSTATUS_EL1 0x40U
+
+#define AARCH64_HDFGRTR_EL2_DBGPRCR_EL1 0x80U
+
+#define AARCH64_HDFGRTR_EL2_OSLSR_EL1 0x200U
+
+#define AARCH64_HDFGRTR_EL2_OSECCR_EL1 0x400U
+
+#define AARCH64_HDFGRTR_EL2_OSDLR_EL1 0x800U
+
+#define AARCH64_HDFGRTR_EL2_PMEVCNTRN_EL0 0x1000U
+
+#define AARCH64_HDFGRTR_EL2_PMEVTYPERN_EL0 0x2000U
+
+#define AARCH64_HDFGRTR_EL2_PMCCFILTR_EL0 0x4000U
+
+#define AARCH64_HDFGRTR_EL2_PMCCNTR_EL0 0x8000U
+
+#define AARCH64_HDFGRTR_EL2_PMCNTEN 0x10000U
+
+#define AARCH64_HDFGRTR_EL2_PMINTEN 0x20000U
+
+#define AARCH64_HDFGRTR_EL2_PMOVS 0x40000U
+
+#define AARCH64_HDFGRTR_EL2_PMSELR_EL0 0x80000U
+
+#define AARCH64_HDFGRTR_EL2_PMMIR_EL1 0x400000U
+
+#define AARCH64_HDFGRTR_EL2_PMBLIMITR_EL1 0x800000U
+
+#define AARCH64_HDFGRTR_EL2_PMBPTR_EL1 0x1000000U
+
+#define AARCH64_HDFGRTR_EL2_PMBSR_EL1 0x2000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSCR_EL1 0x4000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSEVFR_EL1 0x8000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSFCR_EL1 0x10000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSICR_EL1 0x20000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSIDR_EL1 0x40000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSIRR_EL1 0x80000000U
+
+#define AARCH64_HDFGRTR_EL2_PMSLATFR_EL1 0x100000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRC 0x200000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCAUTHSTATUS 0x400000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCAUXCTLR 0x800000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCCLAIM 0x1000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCCNTVRN 0x2000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCID 0x10000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCIMSPECN 0x20000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCOSLSR 0x80000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCPRGCTLR 0x100000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCSEQSTR 0x200000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCSSCSRN 0x400000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCSTATR 0x800000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_TRCVICTLR 0x1000000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_PMUSERENR_EL0 0x200000000000000ULL
+
+#define AARCH64_HDFGRTR_EL2_PMCEIDN_EL0 0x400000000000000ULL
+
+static inline uint64_t _AArch64_Read_hdfgrtr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HDFGRTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hdfgrtr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HDFGRTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HDFGWTR_EL2, Hypervisor Debug Fine-Grained Write Trap Register */
+
+#define AARCH64_HDFGWTR_EL2_DBGBCRN_EL1 0x1U
+
+#define AARCH64_HDFGWTR_EL2_DBGBVRN_EL1 0x2U
+
+#define AARCH64_HDFGWTR_EL2_DBGWCRN_EL1 0x4U
+
+#define AARCH64_HDFGWTR_EL2_DBGWVRN_EL1 0x8U
+
+#define AARCH64_HDFGWTR_EL2_MDSCR_EL1 0x10U
+
+#define AARCH64_HDFGWTR_EL2_DBGCLAIM 0x20U
+
+#define AARCH64_HDFGWTR_EL2_DBGPRCR_EL1 0x80U
+
+#define AARCH64_HDFGWTR_EL2_OSLAR_EL1 0x100U
+
+#define AARCH64_HDFGWTR_EL2_OSECCR_EL1 0x400U
+
+#define AARCH64_HDFGWTR_EL2_OSDLR_EL1 0x800U
+
+#define AARCH64_HDFGWTR_EL2_PMEVCNTRN_EL0 0x1000U
+
+#define AARCH64_HDFGWTR_EL2_PMEVTYPERN_EL0 0x2000U
+
+#define AARCH64_HDFGWTR_EL2_PMCCFILTR_EL0 0x4000U
+
+#define AARCH64_HDFGWTR_EL2_PMCCNTR_EL0 0x8000U
+
+#define AARCH64_HDFGWTR_EL2_PMCNTEN 0x10000U
+
+#define AARCH64_HDFGWTR_EL2_PMINTEN 0x20000U
+
+#define AARCH64_HDFGWTR_EL2_PMOVS 0x40000U
+
+#define AARCH64_HDFGWTR_EL2_PMSELR_EL0 0x80000U
+
+#define AARCH64_HDFGWTR_EL2_PMSWINC_EL0 0x100000U
+
+#define AARCH64_HDFGWTR_EL2_PMCR_EL0 0x200000U
+
+#define AARCH64_HDFGWTR_EL2_PMBLIMITR_EL1 0x800000U
+
+#define AARCH64_HDFGWTR_EL2_PMBPTR_EL1 0x1000000U
+
+#define AARCH64_HDFGWTR_EL2_PMBSR_EL1 0x2000000U
+
+#define AARCH64_HDFGWTR_EL2_PMSCR_EL1 0x4000000U
+
+#define AARCH64_HDFGWTR_EL2_PMSEVFR_EL1 0x8000000U
+
+#define AARCH64_HDFGWTR_EL2_PMSFCR_EL1 0x10000000U
+
+#define AARCH64_HDFGWTR_EL2_PMSICR_EL1 0x20000000U
+
+#define AARCH64_HDFGWTR_EL2_PMSIRR_EL1 0x80000000U
+
+#define AARCH64_HDFGWTR_EL2_PMSLATFR_EL1 0x100000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRC 0x200000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCAUXCTLR 0x800000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCCLAIM 0x1000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCCNTVRN 0x2000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCIMSPECN 0x20000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCOSLAR 0x40000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCPRGCTLR 0x100000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCSEQSTR 0x200000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCSSCSRN 0x400000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRCVICTLR 0x1000000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_TRFCR_EL1 0x2000000000000ULL
+
+#define AARCH64_HDFGWTR_EL2_PMUSERENR_EL0 0x200000000000000ULL
+
+static inline uint64_t _AArch64_Read_hdfgwtr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HDFGWTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hdfgwtr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HDFGWTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HFGITR_EL2, Hypervisor Fine-Grained Instruction Trap Register */
+
+#define AARCH64_HFGITR_EL2_ICIALLUIS 0x1U
+
+#define AARCH64_HFGITR_EL2_ICIALLU 0x2U
+
+#define AARCH64_HFGITR_EL2_ICIVAU 0x4U
+
+#define AARCH64_HFGITR_EL2_DCIVAC 0x8U
+
+#define AARCH64_HFGITR_EL2_DCISW 0x10U
+
+#define AARCH64_HFGITR_EL2_DCCSW 0x20U
+
+#define AARCH64_HFGITR_EL2_DCCISW 0x40U
+
+#define AARCH64_HFGITR_EL2_DCCVAU 0x80U
+
+#define AARCH64_HFGITR_EL2_DCCVAP 0x100U
+
+#define AARCH64_HFGITR_EL2_DCCVADP 0x200U
+
+#define AARCH64_HFGITR_EL2_DCCIVAC 0x400U
+
+#define AARCH64_HFGITR_EL2_DCZVA 0x800U
+
+#define AARCH64_HFGITR_EL2_ATS1E1R 0x1000U
+
+#define AARCH64_HFGITR_EL2_ATS1E1W 0x2000U
+
+#define AARCH64_HFGITR_EL2_ATS1E0R 0x4000U
+
+#define AARCH64_HFGITR_EL2_ATS1E0W 0x8000U
+
+#define AARCH64_HFGITR_EL2_ATS1E1RP 0x10000U
+
+#define AARCH64_HFGITR_EL2_ATS1E1WP 0x20000U
+
+#define AARCH64_HFGITR_EL2_TLBIVMALLE1OS 0x40000U
+
+#define AARCH64_HFGITR_EL2_TLBIVAE1OS 0x80000U
+
+#define AARCH64_HFGITR_EL2_TLBIASIDE1OS 0x100000U
+
+#define AARCH64_HFGITR_EL2_TLBIVAAE1OS 0x200000U
+
+#define AARCH64_HFGITR_EL2_TLBIVALE1OS 0x400000U
+
+#define AARCH64_HFGITR_EL2_TLBIVAALE1OS 0x800000U
+
+#define AARCH64_HFGITR_EL2_TLBIRVAE1OS 0x1000000U
+
+#define AARCH64_HFGITR_EL2_TLBIRVAAE1OS 0x2000000U
+
+#define AARCH64_HFGITR_EL2_TLBIRVALE1OS 0x4000000U
+
+#define AARCH64_HFGITR_EL2_TLBIRVAALE1OS 0x8000000U
+
+#define AARCH64_HFGITR_EL2_TLBIVMALLE1IS 0x10000000U
+
+#define AARCH64_HFGITR_EL2_TLBIVAE1IS 0x20000000U
+
+#define AARCH64_HFGITR_EL2_TLBIASIDE1IS 0x40000000U
+
+#define AARCH64_HFGITR_EL2_TLBIVAAE1IS 0x80000000U
+
+#define AARCH64_HFGITR_EL2_TLBIVALE1IS 0x100000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIVAALE1IS 0x200000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVAE1IS 0x400000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVAAE1IS 0x800000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVALE1IS 0x1000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVAALE1IS 0x2000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVAE1 0x4000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVAAE1 0x8000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVALE1 0x10000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIRVAALE1 0x20000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIVMALLE1 0x40000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIVAE1 0x80000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIASIDE1 0x100000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIVAAE1 0x200000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIVALE1 0x400000000000ULL
+
+#define AARCH64_HFGITR_EL2_TLBIVAALE1 0x800000000000ULL
+
+#define AARCH64_HFGITR_EL2_CFPRCTX 0x1000000000000ULL
+
+#define AARCH64_HFGITR_EL2_DVPRCTX 0x2000000000000ULL
+
+#define AARCH64_HFGITR_EL2_CPPRCTX 0x4000000000000ULL
+
+#define AARCH64_HFGITR_EL2_ERET 0x8000000000000ULL
+
+#define AARCH64_HFGITR_EL2_SVC_EL0 0x10000000000000ULL
+
+#define AARCH64_HFGITR_EL2_SVC_EL1 0x20000000000000ULL
+
+#define AARCH64_HFGITR_EL2_DCCVAC 0x40000000000000ULL
+
+static inline uint64_t _AArch64_Read_hfgitr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HFGITR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hfgitr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HFGITR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HFGRTR_EL2, Hypervisor Fine-Grained Read Trap Register */
+
+#define AARCH64_HFGRTR_EL2_AFSR0_EL1 0x1U
+
+#define AARCH64_HFGRTR_EL2_AFSR1_EL1 0x2U
+
+#define AARCH64_HFGRTR_EL2_AIDR_EL1 0x4U
+
+#define AARCH64_HFGRTR_EL2_AMAIR_EL1 0x8U
+
+#define AARCH64_HFGRTR_EL2_APDAKEY 0x10U
+
+#define AARCH64_HFGRTR_EL2_APDBKEY 0x20U
+
+#define AARCH64_HFGRTR_EL2_APGAKEY 0x40U
+
+#define AARCH64_HFGRTR_EL2_APIAKEY 0x80U
+
+#define AARCH64_HFGRTR_EL2_APIBKEY 0x100U
+
+#define AARCH64_HFGRTR_EL2_CCSIDR_EL1 0x200U
+
+#define AARCH64_HFGRTR_EL2_CLIDR_EL1 0x400U
+
+#define AARCH64_HFGRTR_EL2_CONTEXTIDR_EL1 0x800U
+
+#define AARCH64_HFGRTR_EL2_CPACR_EL1 0x1000U
+
+#define AARCH64_HFGRTR_EL2_CSSELR_EL1 0x2000U
+
+#define AARCH64_HFGRTR_EL2_CTR_EL0 0x4000U
+
+#define AARCH64_HFGRTR_EL2_DCZID_EL0 0x8000U
+
+#define AARCH64_HFGRTR_EL2_ESR_EL1 0x10000U
+
+#define AARCH64_HFGRTR_EL2_FAR_EL1 0x20000U
+
+#define AARCH64_HFGRTR_EL2_ISR_EL1 0x40000U
+
+#define AARCH64_HFGRTR_EL2_LORC_EL1 0x80000U
+
+#define AARCH64_HFGRTR_EL2_LOREA_EL1 0x100000U
+
+#define AARCH64_HFGRTR_EL2_LORID_EL1 0x200000U
+
+#define AARCH64_HFGRTR_EL2_LORN_EL1 0x400000U
+
+#define AARCH64_HFGRTR_EL2_LORSA_EL1 0x800000U
+
+#define AARCH64_HFGRTR_EL2_MAIR_EL1 0x1000000U
+
+#define AARCH64_HFGRTR_EL2_MIDR_EL1 0x2000000U
+
+#define AARCH64_HFGRTR_EL2_MPIDR_EL1 0x4000000U
+
+#define AARCH64_HFGRTR_EL2_PAR_EL1 0x8000000U
+
+#define AARCH64_HFGRTR_EL2_REVIDR_EL1 0x10000000U
+
+#define AARCH64_HFGRTR_EL2_SCTLR_EL1 0x20000000U
+
+#define AARCH64_HFGRTR_EL2_SCXTNUM_EL1 0x40000000U
+
+#define AARCH64_HFGRTR_EL2_SCXTNUM_EL0 0x80000000U
+
+#define AARCH64_HFGRTR_EL2_TCR_EL1 0x100000000ULL
+
+#define AARCH64_HFGRTR_EL2_TPIDR_EL1 0x200000000ULL
+
+#define AARCH64_HFGRTR_EL2_TPIDRRO_EL0 0x400000000ULL
+
+#define AARCH64_HFGRTR_EL2_TPIDR_EL0 0x800000000ULL
+
+#define AARCH64_HFGRTR_EL2_TTBR0_EL1 0x1000000000ULL
+
+#define AARCH64_HFGRTR_EL2_TTBR1_EL1 0x2000000000ULL
+
+#define AARCH64_HFGRTR_EL2_VBAR_EL1 0x4000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ICC_IGRPENN_EL1 0x8000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERRIDR_EL1 0x10000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERRSELR_EL1 0x20000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXFR_EL1 0x40000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXCTLR_EL1 0x80000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXSTATUS_EL1 0x100000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXMISCN_EL1 0x200000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXPFGF_EL1 0x400000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXPFGCTL_EL1 0x800000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXPFGCDN_EL1 0x1000000000000ULL
+
+#define AARCH64_HFGRTR_EL2_ERXADDR_EL1 0x2000000000000ULL
+
+static inline uint64_t _AArch64_Read_hfgrtr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HFGRTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hfgrtr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HFGRTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HFGWTR_EL2, Hypervisor Fine-Grained Write Trap Register */
+
+#define AARCH64_HFGWTR_EL2_AFSR0_EL1 0x1U
+
+#define AARCH64_HFGWTR_EL2_AFSR1_EL1 0x2U
+
+#define AARCH64_HFGWTR_EL2_AMAIR_EL1 0x8U
+
+#define AARCH64_HFGWTR_EL2_APDAKEY 0x10U
+
+#define AARCH64_HFGWTR_EL2_APDBKEY 0x20U
+
+#define AARCH64_HFGWTR_EL2_APGAKEY 0x40U
+
+#define AARCH64_HFGWTR_EL2_APIAKEY 0x80U
+
+#define AARCH64_HFGWTR_EL2_APIBKEY 0x100U
+
+#define AARCH64_HFGWTR_EL2_CONTEXTIDR_EL1 0x800U
+
+#define AARCH64_HFGWTR_EL2_CPACR_EL1 0x1000U
+
+#define AARCH64_HFGWTR_EL2_CSSELR_EL1 0x2000U
+
+#define AARCH64_HFGWTR_EL2_ESR_EL1 0x10000U
+
+#define AARCH64_HFGWTR_EL2_FAR_EL1 0x20000U
+
+#define AARCH64_HFGWTR_EL2_LORC_EL1 0x80000U
+
+#define AARCH64_HFGWTR_EL2_LOREA_EL1 0x100000U
+
+#define AARCH64_HFGWTR_EL2_LORN_EL1 0x400000U
+
+#define AARCH64_HFGWTR_EL2_LORSA_EL1 0x800000U
+
+#define AARCH64_HFGWTR_EL2_MAIR_EL1 0x1000000U
+
+#define AARCH64_HFGWTR_EL2_PAR_EL1 0x8000000U
+
+#define AARCH64_HFGWTR_EL2_SCTLR_EL1 0x20000000U
+
+#define AARCH64_HFGWTR_EL2_SCXTNUM_EL1 0x40000000U
+
+#define AARCH64_HFGWTR_EL2_SCXTNUM_EL0 0x80000000U
+
+#define AARCH64_HFGWTR_EL2_TCR_EL1 0x100000000ULL
+
+#define AARCH64_HFGWTR_EL2_TPIDR_EL1 0x200000000ULL
+
+#define AARCH64_HFGWTR_EL2_TPIDRRO_EL0 0x400000000ULL
+
+#define AARCH64_HFGWTR_EL2_TPIDR_EL0 0x800000000ULL
+
+#define AARCH64_HFGWTR_EL2_TTBR0_EL1 0x1000000000ULL
+
+#define AARCH64_HFGWTR_EL2_TTBR1_EL1 0x2000000000ULL
+
+#define AARCH64_HFGWTR_EL2_VBAR_EL1 0x4000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ICC_IGRPENN_EL1 0x8000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERRSELR_EL1 0x20000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERXCTLR_EL1 0x80000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERXSTATUS_EL1 0x100000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERXMISCN_EL1 0x200000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERXPFGCTL_EL1 0x800000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERXPFGCDN_EL1 0x1000000000000ULL
+
+#define AARCH64_HFGWTR_EL2_ERXADDR_EL1 0x2000000000000ULL
+
+static inline uint64_t _AArch64_Read_hfgwtr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HFGWTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hfgwtr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HFGWTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HPFAR_EL2, Hypervisor IPA Fault Address Register */
+
+#define AARCH64_HPFAR_EL2_FIPA_47_12( _val ) ( ( _val ) << 4 )
+#define AARCH64_HPFAR_EL2_FIPA_47_12_SHIFT 4
+#define AARCH64_HPFAR_EL2_FIPA_47_12_MASK 0xfffffffff0ULL
+#define AARCH64_HPFAR_EL2_FIPA_47_12_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfffffffffULL )
+
+#define AARCH64_HPFAR_EL2_FIPA_51_48( _val ) ( ( _val ) << 40 )
+#define AARCH64_HPFAR_EL2_FIPA_51_48_SHIFT 40
+#define AARCH64_HPFAR_EL2_FIPA_51_48_MASK 0xf0000000000ULL
+#define AARCH64_HPFAR_EL2_FIPA_51_48_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_HPFAR_EL2_NS 0x8000000000000000ULL
+
+static inline uint64_t _AArch64_Read_hpfar_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HPFAR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hpfar_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HPFAR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* HSTR_EL2, Hypervisor System Trap Register */
+
+static inline uint64_t _AArch64_Read_hstr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, HSTR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_hstr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr HSTR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ID_AA64AFR0_EL1, AArch64 Auxiliary Feature Register 0 */
+
+static inline uint64_t _AArch64_Read_id_aa64afr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64AFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64AFR1_EL1, AArch64 Auxiliary Feature Register 1 */
+
+static inline uint64_t _AArch64_Read_id_aa64afr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64AFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64DFR0_EL1, AArch64 Debug Feature Register 0 */
+
+#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER_SHIFT 0
+#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER_MASK 0xfU
+#define AARCH64_ID_AA64DFR0_EL1_DEBUGVER_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64DFR0_EL1_TRACEVER( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64DFR0_EL1_TRACEVER_SHIFT 4
+#define AARCH64_ID_AA64DFR0_EL1_TRACEVER_MASK 0xf0U
+#define AARCH64_ID_AA64DFR0_EL1_TRACEVER_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64DFR0_EL1_PMUVER( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64DFR0_EL1_PMUVER_SHIFT 8
+#define AARCH64_ID_AA64DFR0_EL1_PMUVER_MASK 0xf00U
+#define AARCH64_ID_AA64DFR0_EL1_PMUVER_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64DFR0_EL1_BRPS( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64DFR0_EL1_BRPS_SHIFT 12
+#define AARCH64_ID_AA64DFR0_EL1_BRPS_MASK 0xf000U
+#define AARCH64_ID_AA64DFR0_EL1_BRPS_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64DFR0_EL1_WRPS( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64DFR0_EL1_WRPS_SHIFT 20
+#define AARCH64_ID_AA64DFR0_EL1_WRPS_MASK 0xf00000U
+#define AARCH64_ID_AA64DFR0_EL1_WRPS_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS_SHIFT 28
+#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS_MASK 0xf0000000U
+#define AARCH64_ID_AA64DFR0_EL1_CTX_CMPS_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64DFR0_EL1_PMSVER( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64DFR0_EL1_PMSVER_SHIFT 32
+#define AARCH64_ID_AA64DFR0_EL1_PMSVER_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64DFR0_EL1_PMSVER_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK_SHIFT 36
+#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64DFR0_EL1_DOUBLELOCK_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT( _val ) ( ( _val ) << 40 )
+#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT_SHIFT 40
+#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT_MASK 0xf0000000000ULL
+#define AARCH64_ID_AA64DFR0_EL1_TRACEFILT_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_ID_AA64DFR0_EL1_MTPMU( _val ) ( ( _val ) << 48 )
+#define AARCH64_ID_AA64DFR0_EL1_MTPMU_SHIFT 48
+#define AARCH64_ID_AA64DFR0_EL1_MTPMU_MASK 0xf000000000000ULL
+#define AARCH64_ID_AA64DFR0_EL1_MTPMU_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64dfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64DFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64DFR1_EL1, AArch64 Debug Feature Register 1 */
+
+static inline uint64_t _AArch64_Read_id_aa64dfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64DFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64ISAR0_EL1, AArch64 Instruction Set Attribute Register 0 */
+
+#define AARCH64_ID_AA64ISAR0_EL1_AES( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64ISAR0_EL1_AES_SHIFT 4
+#define AARCH64_ID_AA64ISAR0_EL1_AES_MASK 0xf0U
+#define AARCH64_ID_AA64ISAR0_EL1_AES_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR0_EL1_SHA1( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64ISAR0_EL1_SHA1_SHIFT 8
+#define AARCH64_ID_AA64ISAR0_EL1_SHA1_MASK 0xf00U
+#define AARCH64_ID_AA64ISAR0_EL1_SHA1_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR0_EL1_SHA2( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64ISAR0_EL1_SHA2_SHIFT 12
+#define AARCH64_ID_AA64ISAR0_EL1_SHA2_MASK 0xf000U
+#define AARCH64_ID_AA64ISAR0_EL1_SHA2_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR0_EL1_CRC32( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64ISAR0_EL1_CRC32_SHIFT 16
+#define AARCH64_ID_AA64ISAR0_EL1_CRC32_MASK 0xf0000U
+#define AARCH64_ID_AA64ISAR0_EL1_CRC32_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC_SHIFT 20
+#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC_MASK 0xf00000U
+#define AARCH64_ID_AA64ISAR0_EL1_ATOMIC_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR0_EL1_RDM( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64ISAR0_EL1_RDM_SHIFT 28
+#define AARCH64_ID_AA64ISAR0_EL1_RDM_MASK 0xf0000000U
+#define AARCH64_ID_AA64ISAR0_EL1_RDM_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR0_EL1_SHA3( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64ISAR0_EL1_SHA3_SHIFT 32
+#define AARCH64_ID_AA64ISAR0_EL1_SHA3_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_SHA3_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_SM3( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64ISAR0_EL1_SM3_SHIFT 36
+#define AARCH64_ID_AA64ISAR0_EL1_SM3_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_SM3_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_SM4( _val ) ( ( _val ) << 40 )
+#define AARCH64_ID_AA64ISAR0_EL1_SM4_SHIFT 40
+#define AARCH64_ID_AA64ISAR0_EL1_SM4_MASK 0xf0000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_SM4_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_DP( _val ) ( ( _val ) << 44 )
+#define AARCH64_ID_AA64ISAR0_EL1_DP_SHIFT 44
+#define AARCH64_ID_AA64ISAR0_EL1_DP_MASK 0xf00000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_DP_GET( _reg ) \
+ ( ( ( _reg ) >> 44 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_FHM( _val ) ( ( _val ) << 48 )
+#define AARCH64_ID_AA64ISAR0_EL1_FHM_SHIFT 48
+#define AARCH64_ID_AA64ISAR0_EL1_FHM_MASK 0xf000000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_FHM_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_TS( _val ) ( ( _val ) << 52 )
+#define AARCH64_ID_AA64ISAR0_EL1_TS_SHIFT 52
+#define AARCH64_ID_AA64ISAR0_EL1_TS_MASK 0xf0000000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_TS_GET( _reg ) \
+ ( ( ( _reg ) >> 52 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_TLB( _val ) ( ( _val ) << 56 )
+#define AARCH64_ID_AA64ISAR0_EL1_TLB_SHIFT 56
+#define AARCH64_ID_AA64ISAR0_EL1_TLB_MASK 0xf00000000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_TLB_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR0_EL1_RNDR( _val ) ( ( _val ) << 60 )
+#define AARCH64_ID_AA64ISAR0_EL1_RNDR_SHIFT 60
+#define AARCH64_ID_AA64ISAR0_EL1_RNDR_MASK 0xf000000000000000ULL
+#define AARCH64_ID_AA64ISAR0_EL1_RNDR_GET( _reg ) \
+ ( ( ( _reg ) >> 60 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64isar0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64ISAR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64ISAR1_EL1, AArch64 Instruction Set Attribute Register 1 */
+
+#define AARCH64_ID_AA64ISAR1_EL1_DPB( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64ISAR1_EL1_DPB_SHIFT 0
+#define AARCH64_ID_AA64ISAR1_EL1_DPB_MASK 0xfU
+#define AARCH64_ID_AA64ISAR1_EL1_DPB_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_APA( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64ISAR1_EL1_APA_SHIFT 4
+#define AARCH64_ID_AA64ISAR1_EL1_APA_MASK 0xf0U
+#define AARCH64_ID_AA64ISAR1_EL1_APA_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_API( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64ISAR1_EL1_API_SHIFT 8
+#define AARCH64_ID_AA64ISAR1_EL1_API_MASK 0xf00U
+#define AARCH64_ID_AA64ISAR1_EL1_API_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_JSCVT( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64ISAR1_EL1_JSCVT_SHIFT 12
+#define AARCH64_ID_AA64ISAR1_EL1_JSCVT_MASK 0xf000U
+#define AARCH64_ID_AA64ISAR1_EL1_JSCVT_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_FCMA( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64ISAR1_EL1_FCMA_SHIFT 16
+#define AARCH64_ID_AA64ISAR1_EL1_FCMA_MASK 0xf0000U
+#define AARCH64_ID_AA64ISAR1_EL1_FCMA_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_LRCPC( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64ISAR1_EL1_LRCPC_SHIFT 20
+#define AARCH64_ID_AA64ISAR1_EL1_LRCPC_MASK 0xf00000U
+#define AARCH64_ID_AA64ISAR1_EL1_LRCPC_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_GPA( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_AA64ISAR1_EL1_GPA_SHIFT 24
+#define AARCH64_ID_AA64ISAR1_EL1_GPA_MASK 0xf000000U
+#define AARCH64_ID_AA64ISAR1_EL1_GPA_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_GPI( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64ISAR1_EL1_GPI_SHIFT 28
+#define AARCH64_ID_AA64ISAR1_EL1_GPI_MASK 0xf0000000U
+#define AARCH64_ID_AA64ISAR1_EL1_GPI_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS_SHIFT 32
+#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64ISAR1_EL1_FRINTTS_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR1_EL1_SB( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64ISAR1_EL1_SB_SHIFT 36
+#define AARCH64_ID_AA64ISAR1_EL1_SB_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64ISAR1_EL1_SB_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR1_EL1_SPECRES( _val ) ( ( _val ) << 40 )
+#define AARCH64_ID_AA64ISAR1_EL1_SPECRES_SHIFT 40
+#define AARCH64_ID_AA64ISAR1_EL1_SPECRES_MASK 0xf0000000000ULL
+#define AARCH64_ID_AA64ISAR1_EL1_SPECRES_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR1_EL1_BF16( _val ) ( ( _val ) << 44 )
+#define AARCH64_ID_AA64ISAR1_EL1_BF16_SHIFT 44
+#define AARCH64_ID_AA64ISAR1_EL1_BF16_MASK 0xf00000000000ULL
+#define AARCH64_ID_AA64ISAR1_EL1_BF16_GET( _reg ) \
+ ( ( ( _reg ) >> 44 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR1_EL1_DGH( _val ) ( ( _val ) << 48 )
+#define AARCH64_ID_AA64ISAR1_EL1_DGH_SHIFT 48
+#define AARCH64_ID_AA64ISAR1_EL1_DGH_MASK 0xf000000000000ULL
+#define AARCH64_ID_AA64ISAR1_EL1_DGH_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+#define AARCH64_ID_AA64ISAR1_EL1_I8MM( _val ) ( ( _val ) << 52 )
+#define AARCH64_ID_AA64ISAR1_EL1_I8MM_SHIFT 52
+#define AARCH64_ID_AA64ISAR1_EL1_I8MM_MASK 0xf0000000000000ULL
+#define AARCH64_ID_AA64ISAR1_EL1_I8MM_GET( _reg ) \
+ ( ( ( _reg ) >> 52 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64isar1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64ISAR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64MMFR0_EL1, AArch64 Memory Model Feature Register 0 */
+
+#define AARCH64_ID_AA64MMFR0_EL1_PARANGE( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64MMFR0_EL1_PARANGE_SHIFT 0
+#define AARCH64_ID_AA64MMFR0_EL1_PARANGE_MASK 0xfU
+#define AARCH64_ID_AA64MMFR0_EL1_PARANGE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS_SHIFT 4
+#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS_MASK 0xf0U
+#define AARCH64_ID_AA64MMFR0_EL1_ASIDBITS_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_BIGEND( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64MMFR0_EL1_BIGEND_SHIFT 8
+#define AARCH64_ID_AA64MMFR0_EL1_BIGEND_MASK 0xf00U
+#define AARCH64_ID_AA64MMFR0_EL1_BIGEND_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12
+#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM_MASK 0xf000U
+#define AARCH64_ID_AA64MMFR0_EL1_SNSMEM_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16
+#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0_MASK 0xf0000U
+#define AARCH64_ID_AA64MMFR0_EL1_BIGENDEL0_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_SHIFT 20
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_MASK 0xf00000U
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_SHIFT 24
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_MASK 0xf000000U
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_SHIFT 28
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_MASK 0xf0000000U
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT 32
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN16_2_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT 36
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN64_2_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2( _val ) ( ( _val ) << 40 )
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT 40
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2_MASK 0xf0000000000ULL
+#define AARCH64_ID_AA64MMFR0_EL1_TGRAN4_2_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR0_EL1_EXS( _val ) ( ( _val ) << 44 )
+#define AARCH64_ID_AA64MMFR0_EL1_EXS_SHIFT 44
+#define AARCH64_ID_AA64MMFR0_EL1_EXS_MASK 0xf00000000000ULL
+#define AARCH64_ID_AA64MMFR0_EL1_EXS_GET( _reg ) \
+ ( ( ( _reg ) >> 44 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR0_EL1_FGT( _val ) ( ( _val ) << 56 )
+#define AARCH64_ID_AA64MMFR0_EL1_FGT_SHIFT 56
+#define AARCH64_ID_AA64MMFR0_EL1_FGT_MASK 0xf00000000000000ULL
+#define AARCH64_ID_AA64MMFR0_EL1_FGT_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR0_EL1_ECV( _val ) ( ( _val ) << 60 )
+#define AARCH64_ID_AA64MMFR0_EL1_ECV_SHIFT 60
+#define AARCH64_ID_AA64MMFR0_EL1_ECV_MASK 0xf000000000000000ULL
+#define AARCH64_ID_AA64MMFR0_EL1_ECV_GET( _reg ) \
+ ( ( ( _reg ) >> 60 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64mmfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64MMFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64MMFR1_EL1, AArch64 Memory Model Feature Register 1 */
+
+#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS_SHIFT 0
+#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS_MASK 0xfU
+#define AARCH64_ID_AA64MMFR1_EL1_HAFDBS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS_SHIFT 4
+#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS_MASK 0xf0U
+#define AARCH64_ID_AA64MMFR1_EL1_VMIDBITS_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_VH( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64MMFR1_EL1_VH_SHIFT 8
+#define AARCH64_ID_AA64MMFR1_EL1_VH_MASK 0xf00U
+#define AARCH64_ID_AA64MMFR1_EL1_VH_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_HPDS( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64MMFR1_EL1_HPDS_SHIFT 12
+#define AARCH64_ID_AA64MMFR1_EL1_HPDS_MASK 0xf000U
+#define AARCH64_ID_AA64MMFR1_EL1_HPDS_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_LO( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64MMFR1_EL1_LO_SHIFT 16
+#define AARCH64_ID_AA64MMFR1_EL1_LO_MASK 0xf0000U
+#define AARCH64_ID_AA64MMFR1_EL1_LO_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_PAN( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64MMFR1_EL1_PAN_SHIFT 20
+#define AARCH64_ID_AA64MMFR1_EL1_PAN_MASK 0xf00000U
+#define AARCH64_ID_AA64MMFR1_EL1_PAN_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI_SHIFT 24
+#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI_MASK 0xf000000U
+#define AARCH64_ID_AA64MMFR1_EL1_SPECSEI_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_XNX( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64MMFR1_EL1_XNX_SHIFT 28
+#define AARCH64_ID_AA64MMFR1_EL1_XNX_MASK 0xf0000000U
+#define AARCH64_ID_AA64MMFR1_EL1_XNX_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR1_EL1_TWED( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64MMFR1_EL1_TWED_SHIFT 32
+#define AARCH64_ID_AA64MMFR1_EL1_TWED_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64MMFR1_EL1_TWED_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR1_EL1_ETS( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64MMFR1_EL1_ETS_SHIFT 36
+#define AARCH64_ID_AA64MMFR1_EL1_ETS_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64MMFR1_EL1_ETS_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64mmfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64MMFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64MMFR2_EL1, AArch64 Memory Model Feature Register 2 */
+
+#define AARCH64_ID_AA64MMFR2_EL1_CNP( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64MMFR2_EL1_CNP_SHIFT 0
+#define AARCH64_ID_AA64MMFR2_EL1_CNP_MASK 0xfU
+#define AARCH64_ID_AA64MMFR2_EL1_CNP_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_UAO( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64MMFR2_EL1_UAO_SHIFT 4
+#define AARCH64_ID_AA64MMFR2_EL1_UAO_MASK 0xf0U
+#define AARCH64_ID_AA64MMFR2_EL1_UAO_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_LSM( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64MMFR2_EL1_LSM_SHIFT 8
+#define AARCH64_ID_AA64MMFR2_EL1_LSM_MASK 0xf00U
+#define AARCH64_ID_AA64MMFR2_EL1_LSM_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_IESB( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64MMFR2_EL1_IESB_SHIFT 12
+#define AARCH64_ID_AA64MMFR2_EL1_IESB_MASK 0xf000U
+#define AARCH64_ID_AA64MMFR2_EL1_IESB_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_VARANGE( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64MMFR2_EL1_VARANGE_SHIFT 16
+#define AARCH64_ID_AA64MMFR2_EL1_VARANGE_MASK 0xf0000U
+#define AARCH64_ID_AA64MMFR2_EL1_VARANGE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_CCIDX( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64MMFR2_EL1_CCIDX_SHIFT 20
+#define AARCH64_ID_AA64MMFR2_EL1_CCIDX_MASK 0xf00000U
+#define AARCH64_ID_AA64MMFR2_EL1_CCIDX_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_NV( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_AA64MMFR2_EL1_NV_SHIFT 24
+#define AARCH64_ID_AA64MMFR2_EL1_NV_MASK 0xf000000U
+#define AARCH64_ID_AA64MMFR2_EL1_NV_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_ST( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64MMFR2_EL1_ST_SHIFT 28
+#define AARCH64_ID_AA64MMFR2_EL1_ST_MASK 0xf0000000U
+#define AARCH64_ID_AA64MMFR2_EL1_ST_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64MMFR2_EL1_AT( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64MMFR2_EL1_AT_SHIFT 32
+#define AARCH64_ID_AA64MMFR2_EL1_AT_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_AT_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR2_EL1_IDS( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64MMFR2_EL1_IDS_SHIFT 36
+#define AARCH64_ID_AA64MMFR2_EL1_IDS_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_IDS_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR2_EL1_FWB( _val ) ( ( _val ) << 40 )
+#define AARCH64_ID_AA64MMFR2_EL1_FWB_SHIFT 40
+#define AARCH64_ID_AA64MMFR2_EL1_FWB_MASK 0xf0000000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_FWB_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR2_EL1_TTL( _val ) ( ( _val ) << 48 )
+#define AARCH64_ID_AA64MMFR2_EL1_TTL_SHIFT 48
+#define AARCH64_ID_AA64MMFR2_EL1_TTL_MASK 0xf000000000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_TTL_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR2_EL1_BBM( _val ) ( ( _val ) << 52 )
+#define AARCH64_ID_AA64MMFR2_EL1_BBM_SHIFT 52
+#define AARCH64_ID_AA64MMFR2_EL1_BBM_MASK 0xf0000000000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_BBM_GET( _reg ) \
+ ( ( ( _reg ) >> 52 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR2_EL1_EVT( _val ) ( ( _val ) << 56 )
+#define AARCH64_ID_AA64MMFR2_EL1_EVT_SHIFT 56
+#define AARCH64_ID_AA64MMFR2_EL1_EVT_MASK 0xf00000000000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_EVT_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xfULL )
+
+#define AARCH64_ID_AA64MMFR2_EL1_E0PD( _val ) ( ( _val ) << 60 )
+#define AARCH64_ID_AA64MMFR2_EL1_E0PD_SHIFT 60
+#define AARCH64_ID_AA64MMFR2_EL1_E0PD_MASK 0xf000000000000000ULL
+#define AARCH64_ID_AA64MMFR2_EL1_E0PD_GET( _reg ) \
+ ( ( ( _reg ) >> 60 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64mmfr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64MMFR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64PFR0_EL1, AArch64 Processor Feature Register 0 */
+
+#define AARCH64_ID_AA64PFR0_EL1_EL0( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64PFR0_EL1_EL0_SHIFT 0
+#define AARCH64_ID_AA64PFR0_EL1_EL0_MASK 0xfU
+#define AARCH64_ID_AA64PFR0_EL1_EL0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_EL1( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64PFR0_EL1_EL1_SHIFT 4
+#define AARCH64_ID_AA64PFR0_EL1_EL1_MASK 0xf0U
+#define AARCH64_ID_AA64PFR0_EL1_EL1_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_EL2( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64PFR0_EL1_EL2_SHIFT 8
+#define AARCH64_ID_AA64PFR0_EL1_EL2_MASK 0xf00U
+#define AARCH64_ID_AA64PFR0_EL1_EL2_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_EL3( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64PFR0_EL1_EL3_SHIFT 12
+#define AARCH64_ID_AA64PFR0_EL1_EL3_MASK 0xf000U
+#define AARCH64_ID_AA64PFR0_EL1_EL3_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_FP( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64PFR0_EL1_FP_SHIFT 16
+#define AARCH64_ID_AA64PFR0_EL1_FP_MASK 0xf0000U
+#define AARCH64_ID_AA64PFR0_EL1_FP_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD_SHIFT 20
+#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD_MASK 0xf00000U
+#define AARCH64_ID_AA64PFR0_EL1_ADVSIMD_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_GIC( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_AA64PFR0_EL1_GIC_SHIFT 24
+#define AARCH64_ID_AA64PFR0_EL1_GIC_MASK 0xf000000U
+#define AARCH64_ID_AA64PFR0_EL1_GIC_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_RAS( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_AA64PFR0_EL1_RAS_SHIFT 28
+#define AARCH64_ID_AA64PFR0_EL1_RAS_MASK 0xf0000000U
+#define AARCH64_ID_AA64PFR0_EL1_RAS_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR0_EL1_SVE( _val ) ( ( _val ) << 32 )
+#define AARCH64_ID_AA64PFR0_EL1_SVE_SHIFT 32
+#define AARCH64_ID_AA64PFR0_EL1_SVE_MASK 0xf00000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_SVE_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xfULL )
+
+#define AARCH64_ID_AA64PFR0_EL1_SEL2( _val ) ( ( _val ) << 36 )
+#define AARCH64_ID_AA64PFR0_EL1_SEL2_SHIFT 36
+#define AARCH64_ID_AA64PFR0_EL1_SEL2_MASK 0xf000000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_SEL2_GET( _reg ) \
+ ( ( ( _reg ) >> 36 ) & 0xfULL )
+
+#define AARCH64_ID_AA64PFR0_EL1_MPAM( _val ) ( ( _val ) << 40 )
+#define AARCH64_ID_AA64PFR0_EL1_MPAM_SHIFT 40
+#define AARCH64_ID_AA64PFR0_EL1_MPAM_MASK 0xf0000000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_MPAM_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xfULL )
+
+#define AARCH64_ID_AA64PFR0_EL1_AMU( _val ) ( ( _val ) << 44 )
+#define AARCH64_ID_AA64PFR0_EL1_AMU_SHIFT 44
+#define AARCH64_ID_AA64PFR0_EL1_AMU_MASK 0xf00000000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_AMU_GET( _reg ) \
+ ( ( ( _reg ) >> 44 ) & 0xfULL )
+
+#define AARCH64_ID_AA64PFR0_EL1_DIT( _val ) ( ( _val ) << 48 )
+#define AARCH64_ID_AA64PFR0_EL1_DIT_SHIFT 48
+#define AARCH64_ID_AA64PFR0_EL1_DIT_MASK 0xf000000000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_DIT_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+#define AARCH64_ID_AA64PFR0_EL1_CSV2( _val ) ( ( _val ) << 56 )
+#define AARCH64_ID_AA64PFR0_EL1_CSV2_SHIFT 56
+#define AARCH64_ID_AA64PFR0_EL1_CSV2_MASK 0xf00000000000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_CSV2_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xfULL )
+
+#define AARCH64_ID_AA64PFR0_EL1_CSV3( _val ) ( ( _val ) << 60 )
+#define AARCH64_ID_AA64PFR0_EL1_CSV3_SHIFT 60
+#define AARCH64_ID_AA64PFR0_EL1_CSV3_MASK 0xf000000000000000ULL
+#define AARCH64_ID_AA64PFR0_EL1_CSV3_GET( _reg ) \
+ ( ( ( _reg ) >> 60 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_id_aa64pfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64PFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AA64PFR1_EL1, AArch64 Processor Feature Register 1 */
+
+#define AARCH64_ID_AA64PFR1_EL1_BT( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_AA64PFR1_EL1_BT_SHIFT 0
+#define AARCH64_ID_AA64PFR1_EL1_BT_MASK 0xfU
+#define AARCH64_ID_AA64PFR1_EL1_BT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR1_EL1_SSBS( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_AA64PFR1_EL1_SSBS_SHIFT 4
+#define AARCH64_ID_AA64PFR1_EL1_SSBS_MASK 0xf0U
+#define AARCH64_ID_AA64PFR1_EL1_SSBS_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR1_EL1_MTE( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_AA64PFR1_EL1_MTE_SHIFT 8
+#define AARCH64_ID_AA64PFR1_EL1_MTE_MASK 0xf00U
+#define AARCH64_ID_AA64PFR1_EL1_MTE_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC_SHIFT 12
+#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC_MASK 0xf000U
+#define AARCH64_ID_AA64PFR1_EL1_RAS_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC_SHIFT 16
+#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC_MASK 0xf0000U
+#define AARCH64_ID_AA64PFR1_EL1_MPAM_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_aa64pfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AA64PFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_AFR0_EL1, AArch64 Auxiliary Feature Register 0 */
+
+static inline uint64_t _AArch64_Read_id_afr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_AFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_DFR0_EL1, AArch64 Debug Feature Register 0 */
+
+#define AARCH64_ID_DFR0_EL1_COPDBG( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_DFR0_EL1_COPDBG_SHIFT 0
+#define AARCH64_ID_DFR0_EL1_COPDBG_MASK 0xfU
+#define AARCH64_ID_DFR0_EL1_COPDBG_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_COPSDBG( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_DFR0_EL1_COPSDBG_SHIFT 4
+#define AARCH64_ID_DFR0_EL1_COPSDBG_MASK 0xf0U
+#define AARCH64_ID_DFR0_EL1_COPSDBG_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_MMAPDBG( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_DFR0_EL1_MMAPDBG_SHIFT 8
+#define AARCH64_ID_DFR0_EL1_MMAPDBG_MASK 0xf00U
+#define AARCH64_ID_DFR0_EL1_MMAPDBG_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_COPTRC( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_DFR0_EL1_COPTRC_SHIFT 12
+#define AARCH64_ID_DFR0_EL1_COPTRC_MASK 0xf000U
+#define AARCH64_ID_DFR0_EL1_COPTRC_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_MMAPTRC( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_DFR0_EL1_MMAPTRC_SHIFT 16
+#define AARCH64_ID_DFR0_EL1_MMAPTRC_MASK 0xf0000U
+#define AARCH64_ID_DFR0_EL1_MMAPTRC_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_MPROFDBG( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_DFR0_EL1_MPROFDBG_SHIFT 20
+#define AARCH64_ID_DFR0_EL1_MPROFDBG_MASK 0xf00000U
+#define AARCH64_ID_DFR0_EL1_MPROFDBG_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_PERFMON( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_DFR0_EL1_PERFMON_SHIFT 24
+#define AARCH64_ID_DFR0_EL1_PERFMON_MASK 0xf000000U
+#define AARCH64_ID_DFR0_EL1_PERFMON_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_DFR0_EL1_TRACEFILT( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_DFR0_EL1_TRACEFILT_SHIFT 28
+#define AARCH64_ID_DFR0_EL1_TRACEFILT_MASK 0xf0000000U
+#define AARCH64_ID_DFR0_EL1_TRACEFILT_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_dfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_DFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_DFR1_EL1, Debug Feature Register 1 */
+
+#define AARCH64_ID_DFR1_EL1_MTPMU( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_DFR1_EL1_MTPMU_SHIFT 0
+#define AARCH64_ID_DFR1_EL1_MTPMU_MASK 0xfU
+#define AARCH64_ID_DFR1_EL1_MTPMU_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_dfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_DFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR0_EL1, AArch64 Instruction Set Attribute Register 0 */
+
+#define AARCH64_ID_ISAR0_EL1_SWAP( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR0_EL1_SWAP_SHIFT 0
+#define AARCH64_ID_ISAR0_EL1_SWAP_MASK 0xfU
+#define AARCH64_ID_ISAR0_EL1_SWAP_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR0_EL1_BITCOUNT( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR0_EL1_BITCOUNT_SHIFT 4
+#define AARCH64_ID_ISAR0_EL1_BITCOUNT_MASK 0xf0U
+#define AARCH64_ID_ISAR0_EL1_BITCOUNT_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR0_EL1_BITFIELD( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR0_EL1_BITFIELD_SHIFT 8
+#define AARCH64_ID_ISAR0_EL1_BITFIELD_MASK 0xf00U
+#define AARCH64_ID_ISAR0_EL1_BITFIELD_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR0_EL1_CMPBRANCH( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR0_EL1_CMPBRANCH_SHIFT 12
+#define AARCH64_ID_ISAR0_EL1_CMPBRANCH_MASK 0xf000U
+#define AARCH64_ID_ISAR0_EL1_CMPBRANCH_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR0_EL1_COPROC( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR0_EL1_COPROC_SHIFT 16
+#define AARCH64_ID_ISAR0_EL1_COPROC_MASK 0xf0000U
+#define AARCH64_ID_ISAR0_EL1_COPROC_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR0_EL1_DEBUG( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_ISAR0_EL1_DEBUG_SHIFT 20
+#define AARCH64_ID_ISAR0_EL1_DEBUG_MASK 0xf00000U
+#define AARCH64_ID_ISAR0_EL1_DEBUG_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_ISAR0_EL1_DIVIDE( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR0_EL1_DIVIDE_SHIFT 24
+#define AARCH64_ID_ISAR0_EL1_DIVIDE_MASK 0xf000000U
+#define AARCH64_ID_ISAR0_EL1_DIVIDE_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR1_EL1, AArch64 Instruction Set Attribute Register 1 */
+
+#define AARCH64_ID_ISAR1_EL1_ENDIAN( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR1_EL1_ENDIAN_SHIFT 0
+#define AARCH64_ID_ISAR1_EL1_ENDIAN_MASK 0xfU
+#define AARCH64_ID_ISAR1_EL1_ENDIAN_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_EXCEPT( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_SHIFT 4
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_MASK 0xf0U
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR_SHIFT 8
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR_MASK 0xf00U
+#define AARCH64_ID_ISAR1_EL1_EXCEPT_AR_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_EXTEND( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR1_EL1_EXTEND_SHIFT 12
+#define AARCH64_ID_ISAR1_EL1_EXTEND_MASK 0xf000U
+#define AARCH64_ID_ISAR1_EL1_EXTEND_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_IFTHEN( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR1_EL1_IFTHEN_SHIFT 16
+#define AARCH64_ID_ISAR1_EL1_IFTHEN_MASK 0xf0000U
+#define AARCH64_ID_ISAR1_EL1_IFTHEN_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_IMMEDIATE( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_ISAR1_EL1_IMMEDIATE_SHIFT 20
+#define AARCH64_ID_ISAR1_EL1_IMMEDIATE_MASK 0xf00000U
+#define AARCH64_ID_ISAR1_EL1_IMMEDIATE_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_INTERWORK( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR1_EL1_INTERWORK_SHIFT 24
+#define AARCH64_ID_ISAR1_EL1_INTERWORK_MASK 0xf000000U
+#define AARCH64_ID_ISAR1_EL1_INTERWORK_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_ISAR1_EL1_JAZELLE( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_ISAR1_EL1_JAZELLE_SHIFT 28
+#define AARCH64_ID_ISAR1_EL1_JAZELLE_MASK 0xf0000000U
+#define AARCH64_ID_ISAR1_EL1_JAZELLE_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR2_EL1, AArch64 Instruction Set Attribute Register 2 */
+
+#define AARCH64_ID_ISAR2_EL1_LOADSTORE( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR2_EL1_LOADSTORE_SHIFT 0
+#define AARCH64_ID_ISAR2_EL1_LOADSTORE_MASK 0xfU
+#define AARCH64_ID_ISAR2_EL1_LOADSTORE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_MEMHINT( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR2_EL1_MEMHINT_SHIFT 4
+#define AARCH64_ID_ISAR2_EL1_MEMHINT_MASK 0xf0U
+#define AARCH64_ID_ISAR2_EL1_MEMHINT_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT_SHIFT 8
+#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT_MASK 0xf00U
+#define AARCH64_ID_ISAR2_EL1_MULTIACCESSINT_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_MULT( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR2_EL1_MULT_SHIFT 12
+#define AARCH64_ID_ISAR2_EL1_MULT_MASK 0xf000U
+#define AARCH64_ID_ISAR2_EL1_MULT_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_MULTS( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR2_EL1_MULTS_SHIFT 16
+#define AARCH64_ID_ISAR2_EL1_MULTS_MASK 0xf0000U
+#define AARCH64_ID_ISAR2_EL1_MULTS_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_MULTU( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_ISAR2_EL1_MULTU_SHIFT 20
+#define AARCH64_ID_ISAR2_EL1_MULTU_MASK 0xf00000U
+#define AARCH64_ID_ISAR2_EL1_MULTU_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_PSR_AR( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR2_EL1_PSR_AR_SHIFT 24
+#define AARCH64_ID_ISAR2_EL1_PSR_AR_MASK 0xf000000U
+#define AARCH64_ID_ISAR2_EL1_PSR_AR_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_ISAR2_EL1_REVERSAL( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_ISAR2_EL1_REVERSAL_SHIFT 28
+#define AARCH64_ID_ISAR2_EL1_REVERSAL_MASK 0xf0000000U
+#define AARCH64_ID_ISAR2_EL1_REVERSAL_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR3_EL1, AArch64 Instruction Set Attribute Register 3 */
+
+#define AARCH64_ID_ISAR3_EL1_SATURATE( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR3_EL1_SATURATE_SHIFT 0
+#define AARCH64_ID_ISAR3_EL1_SATURATE_MASK 0xfU
+#define AARCH64_ID_ISAR3_EL1_SATURATE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_SIMD( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR3_EL1_SIMD_SHIFT 4
+#define AARCH64_ID_ISAR3_EL1_SIMD_MASK 0xf0U
+#define AARCH64_ID_ISAR3_EL1_SIMD_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_SVC( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR3_EL1_SVC_SHIFT 8
+#define AARCH64_ID_ISAR3_EL1_SVC_MASK 0xf00U
+#define AARCH64_ID_ISAR3_EL1_SVC_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM_SHIFT 12
+#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM_MASK 0xf000U
+#define AARCH64_ID_ISAR3_EL1_SYNCHPRIM_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_TABBRANCH( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR3_EL1_TABBRANCH_SHIFT 16
+#define AARCH64_ID_ISAR3_EL1_TABBRANCH_MASK 0xf0000U
+#define AARCH64_ID_ISAR3_EL1_TABBRANCH_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_T32COPY( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_ISAR3_EL1_T32COPY_SHIFT 20
+#define AARCH64_ID_ISAR3_EL1_T32COPY_MASK 0xf00000U
+#define AARCH64_ID_ISAR3_EL1_T32COPY_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_TRUENOP( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR3_EL1_TRUENOP_SHIFT 24
+#define AARCH64_ID_ISAR3_EL1_TRUENOP_MASK 0xf000000U
+#define AARCH64_ID_ISAR3_EL1_TRUENOP_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_ISAR3_EL1_T32EE( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_ISAR3_EL1_T32EE_SHIFT 28
+#define AARCH64_ID_ISAR3_EL1_T32EE_MASK 0xf0000000U
+#define AARCH64_ID_ISAR3_EL1_T32EE_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR4_EL1, AArch64 Instruction Set Attribute Register 4 */
+
+#define AARCH64_ID_ISAR4_EL1_UNPRIV( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR4_EL1_UNPRIV_SHIFT 0
+#define AARCH64_ID_ISAR4_EL1_UNPRIV_MASK 0xfU
+#define AARCH64_ID_ISAR4_EL1_UNPRIV_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS_SHIFT 4
+#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS_MASK 0xf0U
+#define AARCH64_ID_ISAR4_EL1_WITHSHIFTS_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_WRITEBACK( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR4_EL1_WRITEBACK_SHIFT 8
+#define AARCH64_ID_ISAR4_EL1_WRITEBACK_MASK 0xf00U
+#define AARCH64_ID_ISAR4_EL1_WRITEBACK_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_SMC( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR4_EL1_SMC_SHIFT 12
+#define AARCH64_ID_ISAR4_EL1_SMC_MASK 0xf000U
+#define AARCH64_ID_ISAR4_EL1_SMC_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_BARRIER( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR4_EL1_BARRIER_SHIFT 16
+#define AARCH64_ID_ISAR4_EL1_BARRIER_MASK 0xf0000U
+#define AARCH64_ID_ISAR4_EL1_BARRIER_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC_SHIFT 20
+#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC_MASK 0xf00000U
+#define AARCH64_ID_ISAR4_EL1_SYNCHPRIM_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_PSR_M( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR4_EL1_PSR_M_SHIFT 24
+#define AARCH64_ID_ISAR4_EL1_PSR_M_MASK 0xf000000U
+#define AARCH64_ID_ISAR4_EL1_PSR_M_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_ISAR4_EL1_SWP_FRAC( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_ISAR4_EL1_SWP_FRAC_SHIFT 28
+#define AARCH64_ID_ISAR4_EL1_SWP_FRAC_MASK 0xf0000000U
+#define AARCH64_ID_ISAR4_EL1_SWP_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR5_EL1, AArch64 Instruction Set Attribute Register 5 */
+
+#define AARCH64_ID_ISAR5_EL1_SEVL( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR5_EL1_SEVL_SHIFT 0
+#define AARCH64_ID_ISAR5_EL1_SEVL_MASK 0xfU
+#define AARCH64_ID_ISAR5_EL1_SEVL_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR5_EL1_AES( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR5_EL1_AES_SHIFT 4
+#define AARCH64_ID_ISAR5_EL1_AES_MASK 0xf0U
+#define AARCH64_ID_ISAR5_EL1_AES_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR5_EL1_SHA1( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR5_EL1_SHA1_SHIFT 8
+#define AARCH64_ID_ISAR5_EL1_SHA1_MASK 0xf00U
+#define AARCH64_ID_ISAR5_EL1_SHA1_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR5_EL1_SHA2( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR5_EL1_SHA2_SHIFT 12
+#define AARCH64_ID_ISAR5_EL1_SHA2_MASK 0xf000U
+#define AARCH64_ID_ISAR5_EL1_SHA2_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR5_EL1_CRC32( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR5_EL1_CRC32_SHIFT 16
+#define AARCH64_ID_ISAR5_EL1_CRC32_MASK 0xf0000U
+#define AARCH64_ID_ISAR5_EL1_CRC32_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR5_EL1_RDM( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR5_EL1_RDM_SHIFT 24
+#define AARCH64_ID_ISAR5_EL1_RDM_MASK 0xf000000U
+#define AARCH64_ID_ISAR5_EL1_RDM_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_ISAR5_EL1_VCMA( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_ISAR5_EL1_VCMA_SHIFT 28
+#define AARCH64_ID_ISAR5_EL1_VCMA_MASK 0xf0000000U
+#define AARCH64_ID_ISAR5_EL1_VCMA_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_ISAR6_EL1, AArch64 Instruction Set Attribute Register 6 */
+
+#define AARCH64_ID_ISAR6_EL1_JSCVT( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_ISAR6_EL1_JSCVT_SHIFT 0
+#define AARCH64_ID_ISAR6_EL1_JSCVT_MASK 0xfU
+#define AARCH64_ID_ISAR6_EL1_JSCVT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_ISAR6_EL1_DP( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_ISAR6_EL1_DP_SHIFT 4
+#define AARCH64_ID_ISAR6_EL1_DP_MASK 0xf0U
+#define AARCH64_ID_ISAR6_EL1_DP_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_ISAR6_EL1_FHM( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_ISAR6_EL1_FHM_SHIFT 8
+#define AARCH64_ID_ISAR6_EL1_FHM_MASK 0xf00U
+#define AARCH64_ID_ISAR6_EL1_FHM_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_ISAR6_EL1_SB( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_ISAR6_EL1_SB_SHIFT 12
+#define AARCH64_ID_ISAR6_EL1_SB_MASK 0xf000U
+#define AARCH64_ID_ISAR6_EL1_SB_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_ISAR6_EL1_SPECRES( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_ISAR6_EL1_SPECRES_SHIFT 16
+#define AARCH64_ID_ISAR6_EL1_SPECRES_MASK 0xf0000U
+#define AARCH64_ID_ISAR6_EL1_SPECRES_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_ISAR6_EL1_BF16( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_ISAR6_EL1_BF16_SHIFT 20
+#define AARCH64_ID_ISAR6_EL1_BF16_MASK 0xf00000U
+#define AARCH64_ID_ISAR6_EL1_BF16_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_ISAR6_EL1_I8MM( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_ISAR6_EL1_I8MM_SHIFT 24
+#define AARCH64_ID_ISAR6_EL1_I8MM_MASK 0xf000000U
+#define AARCH64_ID_ISAR6_EL1_I8MM_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_isar6_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_ISAR6_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_MMFR0_EL1, AArch64 Memory Model Feature Register 0 */
+
+#define AARCH64_ID_MMFR0_EL1_VMSA( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_MMFR0_EL1_VMSA_SHIFT 0
+#define AARCH64_ID_MMFR0_EL1_VMSA_MASK 0xfU
+#define AARCH64_ID_MMFR0_EL1_VMSA_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_PMSA( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_MMFR0_EL1_PMSA_SHIFT 4
+#define AARCH64_ID_MMFR0_EL1_PMSA_MASK 0xf0U
+#define AARCH64_ID_MMFR0_EL1_PMSA_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_OUTERSHR( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_MMFR0_EL1_OUTERSHR_SHIFT 8
+#define AARCH64_ID_MMFR0_EL1_OUTERSHR_MASK 0xf00U
+#define AARCH64_ID_MMFR0_EL1_OUTERSHR_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_SHARELVL( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_MMFR0_EL1_SHARELVL_SHIFT 12
+#define AARCH64_ID_MMFR0_EL1_SHARELVL_MASK 0xf000U
+#define AARCH64_ID_MMFR0_EL1_SHARELVL_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_TCM( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_MMFR0_EL1_TCM_SHIFT 16
+#define AARCH64_ID_MMFR0_EL1_TCM_MASK 0xf0000U
+#define AARCH64_ID_MMFR0_EL1_TCM_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_AUXREG( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_MMFR0_EL1_AUXREG_SHIFT 20
+#define AARCH64_ID_MMFR0_EL1_AUXREG_MASK 0xf00000U
+#define AARCH64_ID_MMFR0_EL1_AUXREG_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_FCSE( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_MMFR0_EL1_FCSE_SHIFT 24
+#define AARCH64_ID_MMFR0_EL1_FCSE_MASK 0xf000000U
+#define AARCH64_ID_MMFR0_EL1_FCSE_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_MMFR0_EL1_INNERSHR( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_MMFR0_EL1_INNERSHR_SHIFT 28
+#define AARCH64_ID_MMFR0_EL1_INNERSHR_MASK 0xf0000000U
+#define AARCH64_ID_MMFR0_EL1_INNERSHR_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_mmfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_MMFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_MMFR1_EL1, AArch64 Memory Model Feature Register 1 */
+
+#define AARCH64_ID_MMFR1_EL1_L1HVDVA( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_MMFR1_EL1_L1HVDVA_SHIFT 0
+#define AARCH64_ID_MMFR1_EL1_L1HVDVA_MASK 0xfU
+#define AARCH64_ID_MMFR1_EL1_L1HVDVA_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_L1UNIVA( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_MMFR1_EL1_L1UNIVA_SHIFT 4
+#define AARCH64_ID_MMFR1_EL1_L1UNIVA_MASK 0xf0U
+#define AARCH64_ID_MMFR1_EL1_L1UNIVA_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_L1HVDSW( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_MMFR1_EL1_L1HVDSW_SHIFT 8
+#define AARCH64_ID_MMFR1_EL1_L1HVDSW_MASK 0xf00U
+#define AARCH64_ID_MMFR1_EL1_L1HVDSW_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_L1UNISW( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_MMFR1_EL1_L1UNISW_SHIFT 12
+#define AARCH64_ID_MMFR1_EL1_L1UNISW_MASK 0xf000U
+#define AARCH64_ID_MMFR1_EL1_L1UNISW_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_L1HVD( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_MMFR1_EL1_L1HVD_SHIFT 16
+#define AARCH64_ID_MMFR1_EL1_L1HVD_MASK 0xf0000U
+#define AARCH64_ID_MMFR1_EL1_L1HVD_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_L1UNI( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_MMFR1_EL1_L1UNI_SHIFT 20
+#define AARCH64_ID_MMFR1_EL1_L1UNI_MASK 0xf00000U
+#define AARCH64_ID_MMFR1_EL1_L1UNI_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_L1TSTCLN( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_MMFR1_EL1_L1TSTCLN_SHIFT 24
+#define AARCH64_ID_MMFR1_EL1_L1TSTCLN_MASK 0xf000000U
+#define AARCH64_ID_MMFR1_EL1_L1TSTCLN_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_MMFR1_EL1_BPRED( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_MMFR1_EL1_BPRED_SHIFT 28
+#define AARCH64_ID_MMFR1_EL1_BPRED_MASK 0xf0000000U
+#define AARCH64_ID_MMFR1_EL1_BPRED_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_mmfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_MMFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_MMFR2_EL1, AArch64 Memory Model Feature Register 2 */
+
+#define AARCH64_ID_MMFR2_EL1_L1HVDFG( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_MMFR2_EL1_L1HVDFG_SHIFT 0
+#define AARCH64_ID_MMFR2_EL1_L1HVDFG_MASK 0xfU
+#define AARCH64_ID_MMFR2_EL1_L1HVDFG_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_L1HVDBG( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_MMFR2_EL1_L1HVDBG_SHIFT 4
+#define AARCH64_ID_MMFR2_EL1_L1HVDBG_MASK 0xf0U
+#define AARCH64_ID_MMFR2_EL1_L1HVDBG_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_L1HVDRNG( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_MMFR2_EL1_L1HVDRNG_SHIFT 8
+#define AARCH64_ID_MMFR2_EL1_L1HVDRNG_MASK 0xf00U
+#define AARCH64_ID_MMFR2_EL1_L1HVDRNG_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_HVDTLB( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_MMFR2_EL1_HVDTLB_SHIFT 12
+#define AARCH64_ID_MMFR2_EL1_HVDTLB_MASK 0xf000U
+#define AARCH64_ID_MMFR2_EL1_HVDTLB_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_UNITLB( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_MMFR2_EL1_UNITLB_SHIFT 16
+#define AARCH64_ID_MMFR2_EL1_UNITLB_MASK 0xf0000U
+#define AARCH64_ID_MMFR2_EL1_UNITLB_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_MEMBARR( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_MMFR2_EL1_MEMBARR_SHIFT 20
+#define AARCH64_ID_MMFR2_EL1_MEMBARR_MASK 0xf00000U
+#define AARCH64_ID_MMFR2_EL1_MEMBARR_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_WFISTALL( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_MMFR2_EL1_WFISTALL_SHIFT 24
+#define AARCH64_ID_MMFR2_EL1_WFISTALL_MASK 0xf000000U
+#define AARCH64_ID_MMFR2_EL1_WFISTALL_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_MMFR2_EL1_HWACCFLG( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_MMFR2_EL1_HWACCFLG_SHIFT 28
+#define AARCH64_ID_MMFR2_EL1_HWACCFLG_MASK 0xf0000000U
+#define AARCH64_ID_MMFR2_EL1_HWACCFLG_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_mmfr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_MMFR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_MMFR3_EL1, AArch64 Memory Model Feature Register 3 */
+
+#define AARCH64_ID_MMFR3_EL1_CMAINTVA( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_MMFR3_EL1_CMAINTVA_SHIFT 0
+#define AARCH64_ID_MMFR3_EL1_CMAINTVA_MASK 0xfU
+#define AARCH64_ID_MMFR3_EL1_CMAINTVA_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_CMAINTSW( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_MMFR3_EL1_CMAINTSW_SHIFT 4
+#define AARCH64_ID_MMFR3_EL1_CMAINTSW_MASK 0xf0U
+#define AARCH64_ID_MMFR3_EL1_CMAINTSW_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_BPMAINT( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_MMFR3_EL1_BPMAINT_SHIFT 8
+#define AARCH64_ID_MMFR3_EL1_BPMAINT_MASK 0xf00U
+#define AARCH64_ID_MMFR3_EL1_BPMAINT_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_MAINTBCST( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_MMFR3_EL1_MAINTBCST_SHIFT 12
+#define AARCH64_ID_MMFR3_EL1_MAINTBCST_MASK 0xf000U
+#define AARCH64_ID_MMFR3_EL1_MAINTBCST_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_PAN( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_MMFR3_EL1_PAN_SHIFT 16
+#define AARCH64_ID_MMFR3_EL1_PAN_MASK 0xf0000U
+#define AARCH64_ID_MMFR3_EL1_PAN_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_COHWALK( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_MMFR3_EL1_COHWALK_SHIFT 20
+#define AARCH64_ID_MMFR3_EL1_COHWALK_MASK 0xf00000U
+#define AARCH64_ID_MMFR3_EL1_COHWALK_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_CMEMSZ( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_MMFR3_EL1_CMEMSZ_SHIFT 24
+#define AARCH64_ID_MMFR3_EL1_CMEMSZ_MASK 0xf000000U
+#define AARCH64_ID_MMFR3_EL1_CMEMSZ_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_MMFR3_EL1_SUPERSEC( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_MMFR3_EL1_SUPERSEC_SHIFT 28
+#define AARCH64_ID_MMFR3_EL1_SUPERSEC_MASK 0xf0000000U
+#define AARCH64_ID_MMFR3_EL1_SUPERSEC_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_mmfr3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_MMFR3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_MMFR4_EL1, AArch64 Memory Model Feature Register 4 */
+
+#define AARCH64_ID_MMFR4_EL1_SPECSEI( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_MMFR4_EL1_SPECSEI_SHIFT 0
+#define AARCH64_ID_MMFR4_EL1_SPECSEI_MASK 0xfU
+#define AARCH64_ID_MMFR4_EL1_SPECSEI_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_AC2( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_MMFR4_EL1_AC2_SHIFT 4
+#define AARCH64_ID_MMFR4_EL1_AC2_MASK 0xf0U
+#define AARCH64_ID_MMFR4_EL1_AC2_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_XNX( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_MMFR4_EL1_XNX_SHIFT 8
+#define AARCH64_ID_MMFR4_EL1_XNX_MASK 0xf00U
+#define AARCH64_ID_MMFR4_EL1_XNX_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_CNP( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_MMFR4_EL1_CNP_SHIFT 12
+#define AARCH64_ID_MMFR4_EL1_CNP_MASK 0xf000U
+#define AARCH64_ID_MMFR4_EL1_CNP_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_HPDS( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_MMFR4_EL1_HPDS_SHIFT 16
+#define AARCH64_ID_MMFR4_EL1_HPDS_MASK 0xf0000U
+#define AARCH64_ID_MMFR4_EL1_HPDS_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_LSM( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_MMFR4_EL1_LSM_SHIFT 20
+#define AARCH64_ID_MMFR4_EL1_LSM_MASK 0xf00000U
+#define AARCH64_ID_MMFR4_EL1_LSM_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_CCIDX( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_MMFR4_EL1_CCIDX_SHIFT 24
+#define AARCH64_ID_MMFR4_EL1_CCIDX_MASK 0xf000000U
+#define AARCH64_ID_MMFR4_EL1_CCIDX_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_MMFR4_EL1_EVT( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_MMFR4_EL1_EVT_SHIFT 28
+#define AARCH64_ID_MMFR4_EL1_EVT_MASK 0xf0000000U
+#define AARCH64_ID_MMFR4_EL1_EVT_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_mmfr4_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_MMFR4_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_MMFR5_EL1, AArch64 Memory Model Feature Register 5 */
+
+#define AARCH64_ID_MMFR5_EL1_ETS( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_MMFR5_EL1_ETS_SHIFT 0
+#define AARCH64_ID_MMFR5_EL1_ETS_MASK 0xfU
+#define AARCH64_ID_MMFR5_EL1_ETS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_mmfr5_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_MMFR5_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_PFR0_EL1, AArch64 Processor Feature Register 0 */
+
+#define AARCH64_ID_PFR0_EL1_STATE0( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_PFR0_EL1_STATE0_SHIFT 0
+#define AARCH64_ID_PFR0_EL1_STATE0_MASK 0xfU
+#define AARCH64_ID_PFR0_EL1_STATE0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_STATE1( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_PFR0_EL1_STATE1_SHIFT 4
+#define AARCH64_ID_PFR0_EL1_STATE1_MASK 0xf0U
+#define AARCH64_ID_PFR0_EL1_STATE1_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_STATE2( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_PFR0_EL1_STATE2_SHIFT 8
+#define AARCH64_ID_PFR0_EL1_STATE2_MASK 0xf00U
+#define AARCH64_ID_PFR0_EL1_STATE2_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_STATE3( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_PFR0_EL1_STATE3_SHIFT 12
+#define AARCH64_ID_PFR0_EL1_STATE3_MASK 0xf000U
+#define AARCH64_ID_PFR0_EL1_STATE3_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_CSV2( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_PFR0_EL1_CSV2_SHIFT 16
+#define AARCH64_ID_PFR0_EL1_CSV2_MASK 0xf0000U
+#define AARCH64_ID_PFR0_EL1_CSV2_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_AMU( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_PFR0_EL1_AMU_SHIFT 20
+#define AARCH64_ID_PFR0_EL1_AMU_MASK 0xf00000U
+#define AARCH64_ID_PFR0_EL1_AMU_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_DIT( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_PFR0_EL1_DIT_SHIFT 24
+#define AARCH64_ID_PFR0_EL1_DIT_MASK 0xf000000U
+#define AARCH64_ID_PFR0_EL1_DIT_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_PFR0_EL1_RAS( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_PFR0_EL1_RAS_SHIFT 28
+#define AARCH64_ID_PFR0_EL1_RAS_MASK 0xf0000000U
+#define AARCH64_ID_PFR0_EL1_RAS_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_pfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_PFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_PFR1_EL1, AArch64 Processor Feature Register 1 */
+
+#define AARCH64_ID_PFR1_EL1_PROGMOD( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_PFR1_EL1_PROGMOD_SHIFT 0
+#define AARCH64_ID_PFR1_EL1_PROGMOD_MASK 0xfU
+#define AARCH64_ID_PFR1_EL1_PROGMOD_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_SECURITY( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_PFR1_EL1_SECURITY_SHIFT 4
+#define AARCH64_ID_PFR1_EL1_SECURITY_MASK 0xf0U
+#define AARCH64_ID_PFR1_EL1_SECURITY_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_MPROGMOD( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_PFR1_EL1_MPROGMOD_SHIFT 8
+#define AARCH64_ID_PFR1_EL1_MPROGMOD_MASK 0xf00U
+#define AARCH64_ID_PFR1_EL1_MPROGMOD_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION( _val ) ( ( _val ) << 12 )
+#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION_SHIFT 12
+#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION_MASK 0xf000U
+#define AARCH64_ID_PFR1_EL1_VIRTUALIZATION_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_GENTIMER( _val ) ( ( _val ) << 16 )
+#define AARCH64_ID_PFR1_EL1_GENTIMER_SHIFT 16
+#define AARCH64_ID_PFR1_EL1_GENTIMER_MASK 0xf0000U
+#define AARCH64_ID_PFR1_EL1_GENTIMER_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_SEC_FRAC( _val ) ( ( _val ) << 20 )
+#define AARCH64_ID_PFR1_EL1_SEC_FRAC_SHIFT 20
+#define AARCH64_ID_PFR1_EL1_SEC_FRAC_MASK 0xf00000U
+#define AARCH64_ID_PFR1_EL1_SEC_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_VIRT_FRAC( _val ) ( ( _val ) << 24 )
+#define AARCH64_ID_PFR1_EL1_VIRT_FRAC_SHIFT 24
+#define AARCH64_ID_PFR1_EL1_VIRT_FRAC_MASK 0xf000000U
+#define AARCH64_ID_PFR1_EL1_VIRT_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_ID_PFR1_EL1_GIC( _val ) ( ( _val ) << 28 )
+#define AARCH64_ID_PFR1_EL1_GIC_SHIFT 28
+#define AARCH64_ID_PFR1_EL1_GIC_MASK 0xf0000000U
+#define AARCH64_ID_PFR1_EL1_GIC_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_pfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_PFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ID_PFR2_EL1, AArch64 Processor Feature Register 2 */
+
+#define AARCH64_ID_PFR2_EL1_CSV3( _val ) ( ( _val ) << 0 )
+#define AARCH64_ID_PFR2_EL1_CSV3_SHIFT 0
+#define AARCH64_ID_PFR2_EL1_CSV3_MASK 0xfU
+#define AARCH64_ID_PFR2_EL1_CSV3_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_ID_PFR2_EL1_SSBS( _val ) ( ( _val ) << 4 )
+#define AARCH64_ID_PFR2_EL1_SSBS_SHIFT 4
+#define AARCH64_ID_PFR2_EL1_SSBS_MASK 0xf0U
+#define AARCH64_ID_PFR2_EL1_SSBS_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_ID_PFR2_EL1_RAS_FRAC( _val ) ( ( _val ) << 8 )
+#define AARCH64_ID_PFR2_EL1_RAS_FRAC_SHIFT 8
+#define AARCH64_ID_PFR2_EL1_RAS_FRAC_MASK 0xf00U
+#define AARCH64_ID_PFR2_EL1_RAS_FRAC_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_id_pfr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ID_PFR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* IFSR32_EL2, Instruction Fault Status Register (EL2) */
+
+#define AARCH64_IFSR32_EL2_FS_3_0( _val ) ( ( _val ) << 0 )
+#define AARCH64_IFSR32_EL2_FS_3_0_SHIFT 0
+#define AARCH64_IFSR32_EL2_FS_3_0_MASK 0xfU
+#define AARCH64_IFSR32_EL2_FS_3_0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_IFSR32_EL2_STATUS( _val ) ( ( _val ) << 0 )
+#define AARCH64_IFSR32_EL2_STATUS_SHIFT 0
+#define AARCH64_IFSR32_EL2_STATUS_MASK 0x3fU
+#define AARCH64_IFSR32_EL2_STATUS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_IFSR32_EL2_LPAE 0x200U
+
+#define AARCH64_IFSR32_EL2_FS_4 0x400U
+
+#define AARCH64_IFSR32_EL2_EXT 0x1000U
+
+#define AARCH64_IFSR32_EL2_FNV 0x10000U
+
+static inline uint64_t _AArch64_Read_ifsr32_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, IFSR32_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_ifsr32_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr IFSR32_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ISR_EL1, Interrupt Status Register */
+
+#define AARCH64_ISR_EL1_F 0x40U
+
+#define AARCH64_ISR_EL1_I 0x80U
+
+#define AARCH64_ISR_EL1_A 0x100U
+
+static inline uint64_t _AArch64_Read_isr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ISR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* LORC_EL1, LORegion Control (EL1) */
+
+#define AARCH64_LORC_EL1_EN 0x1U
+
+#define AARCH64_LORC_EL1_DS( _val ) ( ( _val ) << 2 )
+#define AARCH64_LORC_EL1_DS_SHIFT 2
+#define AARCH64_LORC_EL1_DS_MASK 0x3fcU
+#define AARCH64_LORC_EL1_DS_GET( _reg ) \
+ ( ( ( _reg ) >> 2 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_lorc_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, LORC_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_lorc_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr LORC_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* LOREA_EL1, LORegion End Address (EL1) */
+
+#define AARCH64_LOREA_EL1_EA_47_16( _val ) ( ( _val ) << 16 )
+#define AARCH64_LOREA_EL1_EA_47_16_SHIFT 16
+#define AARCH64_LOREA_EL1_EA_47_16_MASK 0xffffffff0000ULL
+#define AARCH64_LOREA_EL1_EA_47_16_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xffffffffULL )
+
+#define AARCH64_LOREA_EL1_EA_51_48( _val ) ( ( _val ) << 48 )
+#define AARCH64_LOREA_EL1_EA_51_48_SHIFT 48
+#define AARCH64_LOREA_EL1_EA_51_48_MASK 0xf000000000000ULL
+#define AARCH64_LOREA_EL1_EA_51_48_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_lorea_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, LOREA_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_lorea_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr LOREA_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* LORID_EL1, LORegionID (EL1) */
+
+#define AARCH64_LORID_EL1_LR( _val ) ( ( _val ) << 0 )
+#define AARCH64_LORID_EL1_LR_SHIFT 0
+#define AARCH64_LORID_EL1_LR_MASK 0xffU
+#define AARCH64_LORID_EL1_LR_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+#define AARCH64_LORID_EL1_LD( _val ) ( ( _val ) << 16 )
+#define AARCH64_LORID_EL1_LD_SHIFT 16
+#define AARCH64_LORID_EL1_LD_MASK 0xff0000U
+#define AARCH64_LORID_EL1_LD_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_lorid_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, LORID_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* LORN_EL1, LORegion Number (EL1) */
+
+#define AARCH64_LORN_EL1_NUM( _val ) ( ( _val ) << 0 )
+#define AARCH64_LORN_EL1_NUM_SHIFT 0
+#define AARCH64_LORN_EL1_NUM_MASK 0xffU
+#define AARCH64_LORN_EL1_NUM_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_lorn_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, LORN_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_lorn_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr LORN_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* LORSA_EL1, LORegion Start Address (EL1) */
+
+#define AARCH64_LORSA_EL1_VALID 0x1U
+
+#define AARCH64_LORSA_EL1_SA_47_16( _val ) ( ( _val ) << 16 )
+#define AARCH64_LORSA_EL1_SA_47_16_SHIFT 16
+#define AARCH64_LORSA_EL1_SA_47_16_MASK 0xffffffff0000ULL
+#define AARCH64_LORSA_EL1_SA_47_16_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xffffffffULL )
+
+#define AARCH64_LORSA_EL1_SA_51_48( _val ) ( ( _val ) << 48 )
+#define AARCH64_LORSA_EL1_SA_51_48_SHIFT 48
+#define AARCH64_LORSA_EL1_SA_51_48_MASK 0xf000000000000ULL
+#define AARCH64_LORSA_EL1_SA_51_48_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_lorsa_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, LORSA_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_lorsa_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr LORSA_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MAIR_EL1, Memory Attribute Indirection Register (EL1) */
+
+#define AARCH64_MAIR_EL1_ATTR0( _val ) ( ( _val ) << 0 )
+#define AARCH64_MAIR_EL1_ATTR1( _val ) ( ( _val ) << 8 )
+#define AARCH64_MAIR_EL1_ATTR2( _val ) ( ( _val ) << 16 )
+#define AARCH64_MAIR_EL1_ATTR3( _val ) ( ( _val ) << 24 )
+#define AARCH64_MAIR_EL1_ATTR4( _val ) ( ( _val ) << 32 )
+#define AARCH64_MAIR_EL1_ATTR5( _val ) ( ( _val ) << 40 )
+#define AARCH64_MAIR_EL1_ATTR6( _val ) ( ( _val ) << 48 )
+#define AARCH64_MAIR_EL1_ATTR7( _val ) ( ( _val ) << 56 )
+
+static inline uint64_t _AArch64_Read_mair_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MAIR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mair_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MAIR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MAIR_EL2, Memory Attribute Indirection Register (EL2) */
+
+static inline uint64_t _AArch64_Read_mair_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MAIR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mair_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MAIR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MAIR_EL3, Memory Attribute Indirection Register (EL3) */
+
+static inline uint64_t _AArch64_Read_mair_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MAIR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mair_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MAIR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MIDR_EL1, Main ID Register */
+
+#define AARCH64_MIDR_EL1_REVISION( _val ) ( ( _val ) << 0 )
+#define AARCH64_MIDR_EL1_REVISION_SHIFT 0
+#define AARCH64_MIDR_EL1_REVISION_MASK 0xfU
+#define AARCH64_MIDR_EL1_REVISION_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_MIDR_EL1_PARTNUM( _val ) ( ( _val ) << 4 )
+#define AARCH64_MIDR_EL1_PARTNUM_SHIFT 4
+#define AARCH64_MIDR_EL1_PARTNUM_MASK 0xfff0U
+#define AARCH64_MIDR_EL1_PARTNUM_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfffU )
+
+#define AARCH64_MIDR_EL1_ARCHITECTURE( _val ) ( ( _val ) << 16 )
+#define AARCH64_MIDR_EL1_ARCHITECTURE_SHIFT 16
+#define AARCH64_MIDR_EL1_ARCHITECTURE_MASK 0xf0000U
+#define AARCH64_MIDR_EL1_ARCHITECTURE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_MIDR_EL1_VARIANT( _val ) ( ( _val ) << 20 )
+#define AARCH64_MIDR_EL1_VARIANT_SHIFT 20
+#define AARCH64_MIDR_EL1_VARIANT_MASK 0xf00000U
+#define AARCH64_MIDR_EL1_VARIANT_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_MIDR_EL1_IMPLEMENTER( _val ) ( ( _val ) << 24 )
+#define AARCH64_MIDR_EL1_IMPLEMENTER_SHIFT 24
+#define AARCH64_MIDR_EL1_IMPLEMENTER_MASK 0xff000000U
+#define AARCH64_MIDR_EL1_IMPLEMENTER_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_midr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* MPIDR_EL1, Multiprocessor Affinity Register */
+
+#define AARCH64_MPIDR_EL1_AFF0( _val ) ( ( _val ) << 0 )
+#define AARCH64_MPIDR_EL1_AFF0_SHIFT 0
+#define AARCH64_MPIDR_EL1_AFF0_MASK 0xffU
+#define AARCH64_MPIDR_EL1_AFF0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+#define AARCH64_MPIDR_EL1_AFF1( _val ) ( ( _val ) << 8 )
+#define AARCH64_MPIDR_EL1_AFF1_SHIFT 8
+#define AARCH64_MPIDR_EL1_AFF1_MASK 0xff00U
+#define AARCH64_MPIDR_EL1_AFF1_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xffU )
+
+#define AARCH64_MPIDR_EL1_AFF2( _val ) ( ( _val ) << 16 )
+#define AARCH64_MPIDR_EL1_AFF2_SHIFT 16
+#define AARCH64_MPIDR_EL1_AFF2_MASK 0xff0000U
+#define AARCH64_MPIDR_EL1_AFF2_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xffU )
+
+#define AARCH64_MPIDR_EL1_MT 0x1000000U
+
+#define AARCH64_MPIDR_EL1_U 0x40000000U
+
+#define AARCH64_MPIDR_EL1_AFF3( _val ) ( ( _val ) << 32 )
+#define AARCH64_MPIDR_EL1_AFF3_SHIFT 32
+#define AARCH64_MPIDR_EL1_AFF3_MASK 0xff00000000ULL
+#define AARCH64_MPIDR_EL1_AFF3_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xffULL )
+
+static inline uint64_t _AArch64_Read_mpidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MPIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* MVFR0_EL1, AArch64 Media and VFP Feature Register 0 */
+
+#define AARCH64_MVFR0_EL1_SIMDREG( _val ) ( ( _val ) << 0 )
+#define AARCH64_MVFR0_EL1_SIMDREG_SHIFT 0
+#define AARCH64_MVFR0_EL1_SIMDREG_MASK 0xfU
+#define AARCH64_MVFR0_EL1_SIMDREG_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPSP( _val ) ( ( _val ) << 4 )
+#define AARCH64_MVFR0_EL1_FPSP_SHIFT 4
+#define AARCH64_MVFR0_EL1_FPSP_MASK 0xf0U
+#define AARCH64_MVFR0_EL1_FPSP_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPDP( _val ) ( ( _val ) << 8 )
+#define AARCH64_MVFR0_EL1_FPDP_SHIFT 8
+#define AARCH64_MVFR0_EL1_FPDP_MASK 0xf00U
+#define AARCH64_MVFR0_EL1_FPDP_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPTRAP( _val ) ( ( _val ) << 12 )
+#define AARCH64_MVFR0_EL1_FPTRAP_SHIFT 12
+#define AARCH64_MVFR0_EL1_FPTRAP_MASK 0xf000U
+#define AARCH64_MVFR0_EL1_FPTRAP_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPDIVIDE( _val ) ( ( _val ) << 16 )
+#define AARCH64_MVFR0_EL1_FPDIVIDE_SHIFT 16
+#define AARCH64_MVFR0_EL1_FPDIVIDE_MASK 0xf0000U
+#define AARCH64_MVFR0_EL1_FPDIVIDE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPSQRT( _val ) ( ( _val ) << 20 )
+#define AARCH64_MVFR0_EL1_FPSQRT_SHIFT 20
+#define AARCH64_MVFR0_EL1_FPSQRT_MASK 0xf00000U
+#define AARCH64_MVFR0_EL1_FPSQRT_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPSHVEC( _val ) ( ( _val ) << 24 )
+#define AARCH64_MVFR0_EL1_FPSHVEC_SHIFT 24
+#define AARCH64_MVFR0_EL1_FPSHVEC_MASK 0xf000000U
+#define AARCH64_MVFR0_EL1_FPSHVEC_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_MVFR0_EL1_FPROUND( _val ) ( ( _val ) << 28 )
+#define AARCH64_MVFR0_EL1_FPROUND_SHIFT 28
+#define AARCH64_MVFR0_EL1_FPROUND_MASK 0xf0000000U
+#define AARCH64_MVFR0_EL1_FPROUND_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_mvfr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MVFR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* MVFR1_EL1, AArch64 Media and VFP Feature Register 1 */
+
+#define AARCH64_MVFR1_EL1_FPFTZ( _val ) ( ( _val ) << 0 )
+#define AARCH64_MVFR1_EL1_FPFTZ_SHIFT 0
+#define AARCH64_MVFR1_EL1_FPFTZ_MASK 0xfU
+#define AARCH64_MVFR1_EL1_FPFTZ_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_FPDNAN( _val ) ( ( _val ) << 4 )
+#define AARCH64_MVFR1_EL1_FPDNAN_SHIFT 4
+#define AARCH64_MVFR1_EL1_FPDNAN_MASK 0xf0U
+#define AARCH64_MVFR1_EL1_FPDNAN_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_SIMDLS( _val ) ( ( _val ) << 8 )
+#define AARCH64_MVFR1_EL1_SIMDLS_SHIFT 8
+#define AARCH64_MVFR1_EL1_SIMDLS_MASK 0xf00U
+#define AARCH64_MVFR1_EL1_SIMDLS_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_SIMDINT( _val ) ( ( _val ) << 12 )
+#define AARCH64_MVFR1_EL1_SIMDINT_SHIFT 12
+#define AARCH64_MVFR1_EL1_SIMDINT_MASK 0xf000U
+#define AARCH64_MVFR1_EL1_SIMDINT_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_SIMDSP( _val ) ( ( _val ) << 16 )
+#define AARCH64_MVFR1_EL1_SIMDSP_SHIFT 16
+#define AARCH64_MVFR1_EL1_SIMDSP_MASK 0xf0000U
+#define AARCH64_MVFR1_EL1_SIMDSP_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_SIMDHP( _val ) ( ( _val ) << 20 )
+#define AARCH64_MVFR1_EL1_SIMDHP_SHIFT 20
+#define AARCH64_MVFR1_EL1_SIMDHP_MASK 0xf00000U
+#define AARCH64_MVFR1_EL1_SIMDHP_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_FPHP( _val ) ( ( _val ) << 24 )
+#define AARCH64_MVFR1_EL1_FPHP_SHIFT 24
+#define AARCH64_MVFR1_EL1_FPHP_MASK 0xf000000U
+#define AARCH64_MVFR1_EL1_FPHP_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xfU )
+
+#define AARCH64_MVFR1_EL1_SIMDFMAC( _val ) ( ( _val ) << 28 )
+#define AARCH64_MVFR1_EL1_SIMDFMAC_SHIFT 28
+#define AARCH64_MVFR1_EL1_SIMDFMAC_MASK 0xf0000000U
+#define AARCH64_MVFR1_EL1_SIMDFMAC_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_mvfr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MVFR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* MVFR2_EL1, AArch64 Media and VFP Feature Register 2 */
+
+#define AARCH64_MVFR2_EL1_SIMDMISC( _val ) ( ( _val ) << 0 )
+#define AARCH64_MVFR2_EL1_SIMDMISC_SHIFT 0
+#define AARCH64_MVFR2_EL1_SIMDMISC_MASK 0xfU
+#define AARCH64_MVFR2_EL1_SIMDMISC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_MVFR2_EL1_FPMISC( _val ) ( ( _val ) << 4 )
+#define AARCH64_MVFR2_EL1_FPMISC_SHIFT 4
+#define AARCH64_MVFR2_EL1_FPMISC_MASK 0xf0U
+#define AARCH64_MVFR2_EL1_FPMISC_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_mvfr2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MVFR2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* PAR_EL1, Physical Address Register */
+
+#define AARCH64_PAR_EL1_F 0x1U
+
+#define AARCH64_PAR_EL1_FST( _val ) ( ( _val ) << 1 )
+#define AARCH64_PAR_EL1_FST_SHIFT 1
+#define AARCH64_PAR_EL1_FST_MASK 0x7eU
+#define AARCH64_PAR_EL1_FST_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x3fU )
+
+#define AARCH64_PAR_EL1_SH( _val ) ( ( _val ) << 7 )
+#define AARCH64_PAR_EL1_SH_SHIFT 7
+#define AARCH64_PAR_EL1_SH_MASK 0x180U
+#define AARCH64_PAR_EL1_SH_GET( _reg ) \
+ ( ( ( _reg ) >> 7 ) & 0x3U )
+
+#define AARCH64_PAR_EL1_PTW 0x100U
+
+#define AARCH64_PAR_EL1_NS 0x200U
+
+#define AARCH64_PAR_EL1_S 0x200U
+
+#define AARCH64_PAR_EL1_PA_47_12( _val ) ( ( _val ) << 12 )
+#define AARCH64_PAR_EL1_PA_47_12_SHIFT 12
+#define AARCH64_PAR_EL1_PA_47_12_MASK 0xfffffffff000ULL
+#define AARCH64_PAR_EL1_PA_47_12_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfffffffffULL )
+
+#define AARCH64_PAR_EL1_PA_51_48( _val ) ( ( _val ) << 48 )
+#define AARCH64_PAR_EL1_PA_51_48_SHIFT 48
+#define AARCH64_PAR_EL1_PA_51_48_MASK 0xf000000000000ULL
+#define AARCH64_PAR_EL1_PA_51_48_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+#define AARCH64_PAR_EL1_ATTR( _val ) ( ( _val ) << 56 )
+#define AARCH64_PAR_EL1_ATTR_SHIFT 56
+#define AARCH64_PAR_EL1_ATTR_MASK 0xff00000000000000ULL
+#define AARCH64_PAR_EL1_ATTR_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xffULL )
+
+static inline uint64_t _AArch64_Read_par_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PAR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_par_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PAR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* REVIDR_EL1, Revision ID Register */
+
+static inline uint64_t _AArch64_Read_revidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, REVIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* RGSR_EL1, Random Allocation Tag Seed Register. */
+
+#define AARCH64_RGSR_EL1_TAG( _val ) ( ( _val ) << 0 )
+#define AARCH64_RGSR_EL1_TAG_SHIFT 0
+#define AARCH64_RGSR_EL1_TAG_MASK 0xfU
+#define AARCH64_RGSR_EL1_TAG_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_RGSR_EL1_SEED( _val ) ( ( _val ) << 8 )
+#define AARCH64_RGSR_EL1_SEED_SHIFT 8
+#define AARCH64_RGSR_EL1_SEED_MASK 0xffff00U
+#define AARCH64_RGSR_EL1_SEED_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xffffU )
+
+static inline uint64_t _AArch64_Read_rgsr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RGSR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_rgsr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr RGSR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* RMR_EL1, Reset Management Register (EL1) */
+
+#define AARCH64_RMR_EL1_AA64 0x1U
+
+#define AARCH64_RMR_EL1_RR 0x2U
+
+static inline uint64_t _AArch64_Read_rmr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RMR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_rmr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr RMR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* RMR_EL2, Reset Management Register (EL2) */
+
+#define AARCH64_RMR_EL2_AA64 0x1U
+
+#define AARCH64_RMR_EL2_RR 0x2U
+
+static inline uint64_t _AArch64_Read_rmr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RMR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_rmr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr RMR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* RMR_EL3, Reset Management Register (EL3) */
+
+#define AARCH64_RMR_EL3_AA64 0x1U
+
+#define AARCH64_RMR_EL3_RR 0x2U
+
+static inline uint64_t _AArch64_Read_rmr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RMR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_rmr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr RMR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* RNDR, Random Number */
+
+static inline uint64_t _AArch64_Read_rndr( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RNDR" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* RNDRRS, Reseeded Random Number */
+
+static inline uint64_t _AArch64_Read_rndrrs( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RNDRRS" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* RVBAR_EL1, Reset Vector Base Address Register (if EL2 and EL3 not implemented) */
+
+static inline uint64_t _AArch64_Read_rvbar_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RVBAR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* RVBAR_EL2, Reset Vector Base Address Register (if EL3 not implemented) */
+
+static inline uint64_t _AArch64_Read_rvbar_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RVBAR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* RVBAR_EL3, Reset Vector Base Address Register (if EL3 implemented) */
+
+static inline uint64_t _AArch64_Read_rvbar_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, RVBAR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* SCR_EL3, Secure Configuration Register */
+
+#define AARCH64_SCR_EL3_NS 0x1U
+
+#define AARCH64_SCR_EL3_IRQ 0x2U
+
+#define AARCH64_SCR_EL3_FIQ 0x4U
+
+#define AARCH64_SCR_EL3_EA 0x8U
+
+#define AARCH64_SCR_EL3_SMD 0x80U
+
+#define AARCH64_SCR_EL3_HCE 0x100U
+
+#define AARCH64_SCR_EL3_SIF 0x200U
+
+#define AARCH64_SCR_EL3_RW 0x400U
+
+#define AARCH64_SCR_EL3_ST 0x800U
+
+#define AARCH64_SCR_EL3_TWI 0x1000U
+
+#define AARCH64_SCR_EL3_TWE 0x2000U
+
+#define AARCH64_SCR_EL3_TLOR 0x4000U
+
+#define AARCH64_SCR_EL3_TERR 0x8000U
+
+#define AARCH64_SCR_EL3_APK 0x10000U
+
+#define AARCH64_SCR_EL3_API 0x20000U
+
+#define AARCH64_SCR_EL3_EEL2 0x40000U
+
+#define AARCH64_SCR_EL3_EASE 0x80000U
+
+#define AARCH64_SCR_EL3_NMEA 0x100000U
+
+#define AARCH64_SCR_EL3_FIEN 0x200000U
+
+#define AARCH64_SCR_EL3_ENSCXT 0x2000000U
+
+#define AARCH64_SCR_EL3_ATA 0x4000000U
+
+#define AARCH64_SCR_EL3_FGTEN 0x8000000U
+
+#define AARCH64_SCR_EL3_ECVEN 0x10000000U
+
+#define AARCH64_SCR_EL3_TWEDEN 0x20000000U
+
+#define AARCH64_SCR_EL3_TWEDEL( _val ) ( ( _val ) << 30 )
+#define AARCH64_SCR_EL3_TWEDEL_SHIFT 30
+#define AARCH64_SCR_EL3_TWEDEL_MASK 0x3c0000000ULL
+#define AARCH64_SCR_EL3_TWEDEL_GET( _reg ) \
+ ( ( ( _reg ) >> 30 ) & 0xfULL )
+
+#define AARCH64_SCR_EL3_AMVOFFEN 0x800000000ULL
+
+static inline uint64_t _AArch64_Read_scr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_scr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCTLR_EL1, System Control Register (EL1) */
+
+#define AARCH64_SCTLR_EL1_M 0x1U
+
+#define AARCH64_SCTLR_EL1_A 0x2U
+
+#define AARCH64_SCTLR_EL1_C 0x4U
+
+#define AARCH64_SCTLR_EL1_SA 0x8U
+
+#define AARCH64_SCTLR_EL1_SA0 0x10U
+
+#define AARCH64_SCTLR_EL1_CP15BEN 0x20U
+
+#define AARCH64_SCTLR_EL1_NAA 0x40U
+
+#define AARCH64_SCTLR_EL1_ITD 0x80U
+
+#define AARCH64_SCTLR_EL1_SED 0x100U
+
+#define AARCH64_SCTLR_EL1_UMA 0x200U
+
+#define AARCH64_SCTLR_EL1_ENRCTX 0x400U
+
+#define AARCH64_SCTLR_EL1_EOS 0x800U
+
+#define AARCH64_SCTLR_EL1_I 0x1000U
+
+#define AARCH64_SCTLR_EL1_ENDB 0x2000U
+
+#define AARCH64_SCTLR_EL1_DZE 0x4000U
+
+#define AARCH64_SCTLR_EL1_UCT 0x8000U
+
+#define AARCH64_SCTLR_EL1_NTWI 0x10000U
+
+#define AARCH64_SCTLR_EL1_NTWE 0x40000U
+
+#define AARCH64_SCTLR_EL1_WXN 0x80000U
+
+#define AARCH64_SCTLR_EL1_TSCXT 0x100000U
+
+#define AARCH64_SCTLR_EL1_IESB 0x200000U
+
+#define AARCH64_SCTLR_EL1_EIS 0x400000U
+
+#define AARCH64_SCTLR_EL1_SPAN 0x800000U
+
+#define AARCH64_SCTLR_EL1_E0E 0x1000000U
+
+#define AARCH64_SCTLR_EL1_EE 0x2000000U
+
+#define AARCH64_SCTLR_EL1_UCI 0x4000000U
+
+#define AARCH64_SCTLR_EL1_ENDA 0x8000000U
+
+#define AARCH64_SCTLR_EL1_NTLSMD 0x10000000U
+
+#define AARCH64_SCTLR_EL1_LSMAOE 0x20000000U
+
+#define AARCH64_SCTLR_EL1_ENIB 0x40000000U
+
+#define AARCH64_SCTLR_EL1_ENIA 0x80000000U
+
+#define AARCH64_SCTLR_EL1_BT0 0x800000000ULL
+
+#define AARCH64_SCTLR_EL1_BT1 0x1000000000ULL
+
+#define AARCH64_SCTLR_EL1_ITFSB 0x2000000000ULL
+
+#define AARCH64_SCTLR_EL1_TCF0( _val ) ( ( _val ) << 38 )
+#define AARCH64_SCTLR_EL1_TCF0_SHIFT 38
+#define AARCH64_SCTLR_EL1_TCF0_MASK 0xc000000000ULL
+#define AARCH64_SCTLR_EL1_TCF0_GET( _reg ) \
+ ( ( ( _reg ) >> 38 ) & 0x3ULL )
+
+#define AARCH64_SCTLR_EL1_TCF( _val ) ( ( _val ) << 40 )
+#define AARCH64_SCTLR_EL1_TCF_SHIFT 40
+#define AARCH64_SCTLR_EL1_TCF_MASK 0x30000000000ULL
+#define AARCH64_SCTLR_EL1_TCF_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0x3ULL )
+
+#define AARCH64_SCTLR_EL1_ATA0 0x40000000000ULL
+
+#define AARCH64_SCTLR_EL1_ATA 0x80000000000ULL
+
+#define AARCH64_SCTLR_EL1_DSSBS 0x100000000000ULL
+
+#define AARCH64_SCTLR_EL1_TWEDEN 0x200000000000ULL
+
+#define AARCH64_SCTLR_EL1_TWEDEL( _val ) ( ( _val ) << 46 )
+#define AARCH64_SCTLR_EL1_TWEDEL_SHIFT 46
+#define AARCH64_SCTLR_EL1_TWEDEL_MASK 0x3c00000000000ULL
+#define AARCH64_SCTLR_EL1_TWEDEL_GET( _reg ) \
+ ( ( ( _reg ) >> 46 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_sctlr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCTLR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_sctlr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCTLR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCTLR_EL2, System Control Register (EL2) */
+
+#define AARCH64_SCTLR_EL2_M 0x1U
+
+#define AARCH64_SCTLR_EL2_A 0x2U
+
+#define AARCH64_SCTLR_EL2_C 0x4U
+
+#define AARCH64_SCTLR_EL2_SA 0x8U
+
+#define AARCH64_SCTLR_EL2_SA0 0x10U
+
+#define AARCH64_SCTLR_EL2_CP15BEN 0x20U
+
+#define AARCH64_SCTLR_EL2_NAA 0x40U
+
+#define AARCH64_SCTLR_EL2_ITD 0x80U
+
+#define AARCH64_SCTLR_EL2_SED 0x100U
+
+#define AARCH64_SCTLR_EL2_ENRCTX 0x400U
+
+#define AARCH64_SCTLR_EL2_EOS 0x800U
+
+#define AARCH64_SCTLR_EL2_I 0x1000U
+
+#define AARCH64_SCTLR_EL2_ENDB 0x2000U
+
+#define AARCH64_SCTLR_EL2_DZE 0x4000U
+
+#define AARCH64_SCTLR_EL2_UCT 0x8000U
+
+#define AARCH64_SCTLR_EL2_NTWI 0x10000U
+
+#define AARCH64_SCTLR_EL2_NTWE 0x40000U
+
+#define AARCH64_SCTLR_EL2_WXN 0x80000U
+
+#define AARCH64_SCTLR_EL2_TSCXT 0x100000U
+
+#define AARCH64_SCTLR_EL2_IESB 0x200000U
+
+#define AARCH64_SCTLR_EL2_EIS 0x400000U
+
+#define AARCH64_SCTLR_EL2_SPAN 0x800000U
+
+#define AARCH64_SCTLR_EL2_E0E 0x1000000U
+
+#define AARCH64_SCTLR_EL2_EE 0x2000000U
+
+#define AARCH64_SCTLR_EL2_UCI 0x4000000U
+
+#define AARCH64_SCTLR_EL2_ENDA 0x8000000U
+
+#define AARCH64_SCTLR_EL2_NTLSMD 0x10000000U
+
+#define AARCH64_SCTLR_EL2_LSMAOE 0x20000000U
+
+#define AARCH64_SCTLR_EL2_ENIB 0x40000000U
+
+#define AARCH64_SCTLR_EL2_ENIA 0x80000000U
+
+#define AARCH64_SCTLR_EL2_BT0 0x800000000ULL
+
+#define AARCH64_SCTLR_EL2_BT 0x1000000000ULL
+
+#define AARCH64_SCTLR_EL2_BT1 0x1000000000ULL
+
+#define AARCH64_SCTLR_EL2_ITFSB 0x2000000000ULL
+
+#define AARCH64_SCTLR_EL2_TCF0( _val ) ( ( _val ) << 38 )
+#define AARCH64_SCTLR_EL2_TCF0_SHIFT 38
+#define AARCH64_SCTLR_EL2_TCF0_MASK 0xc000000000ULL
+#define AARCH64_SCTLR_EL2_TCF0_GET( _reg ) \
+ ( ( ( _reg ) >> 38 ) & 0x3ULL )
+
+#define AARCH64_SCTLR_EL2_TCF( _val ) ( ( _val ) << 40 )
+#define AARCH64_SCTLR_EL2_TCF_SHIFT 40
+#define AARCH64_SCTLR_EL2_TCF_MASK 0x30000000000ULL
+#define AARCH64_SCTLR_EL2_TCF_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0x3ULL )
+
+#define AARCH64_SCTLR_EL2_ATA0 0x40000000000ULL
+
+#define AARCH64_SCTLR_EL2_ATA 0x80000000000ULL
+
+#define AARCH64_SCTLR_EL2_DSSBS 0x100000000000ULL
+
+#define AARCH64_SCTLR_EL2_TWEDEN 0x200000000000ULL
+
+#define AARCH64_SCTLR_EL2_TWEDEL( _val ) ( ( _val ) << 46 )
+#define AARCH64_SCTLR_EL2_TWEDEL_SHIFT 46
+#define AARCH64_SCTLR_EL2_TWEDEL_MASK 0x3c00000000000ULL
+#define AARCH64_SCTLR_EL2_TWEDEL_GET( _reg ) \
+ ( ( ( _reg ) >> 46 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_sctlr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCTLR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_sctlr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCTLR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCTLR_EL3, System Control Register (EL3) */
+
+#define AARCH64_SCTLR_EL3_M 0x1U
+
+#define AARCH64_SCTLR_EL3_A 0x2U
+
+#define AARCH64_SCTLR_EL3_C 0x4U
+
+#define AARCH64_SCTLR_EL3_SA 0x8U
+
+#define AARCH64_SCTLR_EL3_NAA 0x40U
+
+#define AARCH64_SCTLR_EL3_EOS 0x800U
+
+#define AARCH64_SCTLR_EL3_I 0x1000U
+
+#define AARCH64_SCTLR_EL3_ENDB 0x2000U
+
+#define AARCH64_SCTLR_EL3_WXN 0x80000U
+
+#define AARCH64_SCTLR_EL3_IESB 0x200000U
+
+#define AARCH64_SCTLR_EL3_EIS 0x400000U
+
+#define AARCH64_SCTLR_EL3_EE 0x2000000U
+
+#define AARCH64_SCTLR_EL3_ENDA 0x8000000U
+
+#define AARCH64_SCTLR_EL3_ENIB 0x40000000U
+
+#define AARCH64_SCTLR_EL3_ENIA 0x80000000U
+
+#define AARCH64_SCTLR_EL3_BT 0x1000000000ULL
+
+#define AARCH64_SCTLR_EL3_ITFSB 0x2000000000ULL
+
+#define AARCH64_SCTLR_EL3_TCF( _val ) ( ( _val ) << 40 )
+#define AARCH64_SCTLR_EL3_TCF_SHIFT 40
+#define AARCH64_SCTLR_EL3_TCF_MASK 0x30000000000ULL
+#define AARCH64_SCTLR_EL3_TCF_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0x3ULL )
+
+#define AARCH64_SCTLR_EL3_ATA 0x80000000000ULL
+
+#define AARCH64_SCTLR_EL3_DSSBS 0x100000000000ULL
+
+static inline uint64_t _AArch64_Read_sctlr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCTLR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_sctlr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCTLR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCXTNUM_EL0, EL0 Read/Write Software Context Number */
+
+static inline uint64_t _AArch64_Read_scxtnum_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCXTNUM_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_scxtnum_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCXTNUM_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCXTNUM_EL1, EL1 Read/Write Software Context Number */
+
+static inline uint64_t _AArch64_Read_scxtnum_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCXTNUM_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_scxtnum_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCXTNUM_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCXTNUM_EL2, EL2 Read/Write Software Context Number */
+
+static inline uint64_t _AArch64_Read_scxtnum_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCXTNUM_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_scxtnum_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCXTNUM_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SCXTNUM_EL3, EL3 Read/Write Software Context Number */
+
+static inline uint64_t _AArch64_Read_scxtnum_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SCXTNUM_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_scxtnum_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SCXTNUM_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TCR_EL1, Translation Control Register (EL1) */
+
+#define AARCH64_TCR_EL1_T0SZ( _val ) ( ( _val ) << 0 )
+#define AARCH64_TCR_EL1_T0SZ_SHIFT 0
+#define AARCH64_TCR_EL1_T0SZ_MASK 0x3fU
+#define AARCH64_TCR_EL1_T0SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_TCR_EL1_EPD0 0x80U
+
+#define AARCH64_TCR_EL1_IRGN0( _val ) ( ( _val ) << 8 )
+#define AARCH64_TCR_EL1_IRGN0_SHIFT 8
+#define AARCH64_TCR_EL1_IRGN0_MASK 0x300U
+#define AARCH64_TCR_EL1_IRGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_ORGN0( _val ) ( ( _val ) << 10 )
+#define AARCH64_TCR_EL1_ORGN0_SHIFT 10
+#define AARCH64_TCR_EL1_ORGN0_MASK 0xc00U
+#define AARCH64_TCR_EL1_ORGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_SH0( _val ) ( ( _val ) << 12 )
+#define AARCH64_TCR_EL1_SH0_SHIFT 12
+#define AARCH64_TCR_EL1_SH0_MASK 0x3000U
+#define AARCH64_TCR_EL1_SH0_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_TG0( _val ) ( ( _val ) << 14 )
+#define AARCH64_TCR_EL1_TG0_SHIFT 14
+#define AARCH64_TCR_EL1_TG0_MASK 0xc000U
+#define AARCH64_TCR_EL1_TG0_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_T1SZ( _val ) ( ( _val ) << 16 )
+#define AARCH64_TCR_EL1_T1SZ_SHIFT 16
+#define AARCH64_TCR_EL1_T1SZ_MASK 0x3f0000U
+#define AARCH64_TCR_EL1_T1SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x3fU )
+
+#define AARCH64_TCR_EL1_A1 0x400000U
+
+#define AARCH64_TCR_EL1_EPD1 0x800000U
+
+#define AARCH64_TCR_EL1_IRGN1( _val ) ( ( _val ) << 24 )
+#define AARCH64_TCR_EL1_IRGN1_SHIFT 24
+#define AARCH64_TCR_EL1_IRGN1_MASK 0x3000000U
+#define AARCH64_TCR_EL1_IRGN1_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_ORGN1( _val ) ( ( _val ) << 26 )
+#define AARCH64_TCR_EL1_ORGN1_SHIFT 26
+#define AARCH64_TCR_EL1_ORGN1_MASK 0xc000000U
+#define AARCH64_TCR_EL1_ORGN1_GET( _reg ) \
+ ( ( ( _reg ) >> 26 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_SH1( _val ) ( ( _val ) << 28 )
+#define AARCH64_TCR_EL1_SH1_SHIFT 28
+#define AARCH64_TCR_EL1_SH1_MASK 0x30000000U
+#define AARCH64_TCR_EL1_SH1_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_TG1( _val ) ( ( _val ) << 30 )
+#define AARCH64_TCR_EL1_TG1_SHIFT 30
+#define AARCH64_TCR_EL1_TG1_MASK 0xc0000000U
+#define AARCH64_TCR_EL1_TG1_GET( _reg ) \
+ ( ( ( _reg ) >> 30 ) & 0x3U )
+
+#define AARCH64_TCR_EL1_IPS( _val ) ( ( _val ) << 32 )
+#define AARCH64_TCR_EL1_IPS_SHIFT 32
+#define AARCH64_TCR_EL1_IPS_MASK 0x700000000ULL
+#define AARCH64_TCR_EL1_IPS_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0x7ULL )
+
+#define AARCH64_TCR_EL1_AS 0x1000000000ULL
+
+#define AARCH64_TCR_EL1_TBI0 0x2000000000ULL
+
+#define AARCH64_TCR_EL1_TBI1 0x4000000000ULL
+
+#define AARCH64_TCR_EL1_HA 0x8000000000ULL
+
+#define AARCH64_TCR_EL1_HD 0x10000000000ULL
+
+#define AARCH64_TCR_EL1_HPD0 0x20000000000ULL
+
+#define AARCH64_TCR_EL1_HPD1 0x40000000000ULL
+
+#define AARCH64_TCR_EL1_HWU059 0x80000000000ULL
+
+#define AARCH64_TCR_EL1_HWU060 0x100000000000ULL
+
+#define AARCH64_TCR_EL1_HWU061 0x200000000000ULL
+
+#define AARCH64_TCR_EL1_HWU062 0x400000000000ULL
+
+#define AARCH64_TCR_EL1_HWU159 0x800000000000ULL
+
+#define AARCH64_TCR_EL1_HWU160 0x1000000000000ULL
+
+#define AARCH64_TCR_EL1_HWU161 0x2000000000000ULL
+
+#define AARCH64_TCR_EL1_HWU162 0x4000000000000ULL
+
+#define AARCH64_TCR_EL1_TBID0 0x8000000000000ULL
+
+#define AARCH64_TCR_EL1_TBID1 0x10000000000000ULL
+
+#define AARCH64_TCR_EL1_NFD0 0x20000000000000ULL
+
+#define AARCH64_TCR_EL1_NFD1 0x40000000000000ULL
+
+#define AARCH64_TCR_EL1_E0PD0 0x80000000000000ULL
+
+#define AARCH64_TCR_EL1_E0PD1 0x100000000000000ULL
+
+#define AARCH64_TCR_EL1_TCMA0 0x200000000000000ULL
+
+#define AARCH64_TCR_EL1_TCMA1 0x400000000000000ULL
+
+static inline uint64_t _AArch64_Read_tcr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tcr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TCR_EL2, Translation Control Register (EL2) */
+
+#define AARCH64_TCR_EL2_T0SZ( _val ) ( ( _val ) << 0 )
+#define AARCH64_TCR_EL2_T0SZ_SHIFT 0
+#define AARCH64_TCR_EL2_T0SZ_MASK 0x3fU
+#define AARCH64_TCR_EL2_T0SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_TCR_EL2_EPD0 0x80U
+
+#define AARCH64_TCR_EL2_IRGN0( _val ) ( ( _val ) << 8 )
+#define AARCH64_TCR_EL2_IRGN0_SHIFT 8
+#define AARCH64_TCR_EL2_IRGN0_MASK 0x300U
+#define AARCH64_TCR_EL2_IRGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_ORGN0( _val ) ( ( _val ) << 10 )
+#define AARCH64_TCR_EL2_ORGN0_SHIFT 10
+#define AARCH64_TCR_EL2_ORGN0_MASK 0xc00U
+#define AARCH64_TCR_EL2_ORGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_SH0( _val ) ( ( _val ) << 12 )
+#define AARCH64_TCR_EL2_SH0_SHIFT 12
+#define AARCH64_TCR_EL2_SH0_MASK 0x3000U
+#define AARCH64_TCR_EL2_SH0_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_TG0( _val ) ( ( _val ) << 14 )
+#define AARCH64_TCR_EL2_TG0_SHIFT 14
+#define AARCH64_TCR_EL2_TG0_MASK 0xc000U
+#define AARCH64_TCR_EL2_TG0_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_PS( _val ) ( ( _val ) << 16 )
+#define AARCH64_TCR_EL2_PS_SHIFT 16
+#define AARCH64_TCR_EL2_PS_MASK 0x70000U
+#define AARCH64_TCR_EL2_PS_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x7U )
+
+#define AARCH64_TCR_EL2_T1SZ( _val ) ( ( _val ) << 16 )
+#define AARCH64_TCR_EL2_T1SZ_SHIFT 16
+#define AARCH64_TCR_EL2_T1SZ_MASK 0x3f0000U
+#define AARCH64_TCR_EL2_T1SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x3fU )
+
+#define AARCH64_TCR_EL2_TBI 0x100000U
+
+#define AARCH64_TCR_EL2_HA_0 0x200000U
+
+#define AARCH64_TCR_EL2_A1 0x400000U
+
+#define AARCH64_TCR_EL2_HD_0 0x400000U
+
+#define AARCH64_TCR_EL2_EPD1 0x800000U
+
+#define AARCH64_TCR_EL2_HPD 0x1000000U
+
+#define AARCH64_TCR_EL2_IRGN1( _val ) ( ( _val ) << 24 )
+#define AARCH64_TCR_EL2_IRGN1_SHIFT 24
+#define AARCH64_TCR_EL2_IRGN1_MASK 0x3000000U
+#define AARCH64_TCR_EL2_IRGN1_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_HWU59 0x2000000U
+
+#define AARCH64_TCR_EL2_HWU60 0x4000000U
+
+#define AARCH64_TCR_EL2_ORGN1( _val ) ( ( _val ) << 26 )
+#define AARCH64_TCR_EL2_ORGN1_SHIFT 26
+#define AARCH64_TCR_EL2_ORGN1_MASK 0xc000000U
+#define AARCH64_TCR_EL2_ORGN1_GET( _reg ) \
+ ( ( ( _reg ) >> 26 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_HWU61 0x8000000U
+
+#define AARCH64_TCR_EL2_HWU62 0x10000000U
+
+#define AARCH64_TCR_EL2_SH1( _val ) ( ( _val ) << 28 )
+#define AARCH64_TCR_EL2_SH1_SHIFT 28
+#define AARCH64_TCR_EL2_SH1_MASK 0x30000000U
+#define AARCH64_TCR_EL2_SH1_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_TBID 0x20000000U
+
+#define AARCH64_TCR_EL2_TCMA 0x40000000U
+
+#define AARCH64_TCR_EL2_TG1( _val ) ( ( _val ) << 30 )
+#define AARCH64_TCR_EL2_TG1_SHIFT 30
+#define AARCH64_TCR_EL2_TG1_MASK 0xc0000000U
+#define AARCH64_TCR_EL2_TG1_GET( _reg ) \
+ ( ( ( _reg ) >> 30 ) & 0x3U )
+
+#define AARCH64_TCR_EL2_IPS( _val ) ( ( _val ) << 32 )
+#define AARCH64_TCR_EL2_IPS_SHIFT 32
+#define AARCH64_TCR_EL2_IPS_MASK 0x700000000ULL
+#define AARCH64_TCR_EL2_IPS_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0x7ULL )
+
+#define AARCH64_TCR_EL2_AS 0x1000000000ULL
+
+#define AARCH64_TCR_EL2_TBI0 0x2000000000ULL
+
+#define AARCH64_TCR_EL2_TBI1 0x4000000000ULL
+
+#define AARCH64_TCR_EL2_HA_1 0x8000000000ULL
+
+#define AARCH64_TCR_EL2_HD_1 0x10000000000ULL
+
+#define AARCH64_TCR_EL2_HPD0 0x20000000000ULL
+
+#define AARCH64_TCR_EL2_HPD1 0x40000000000ULL
+
+#define AARCH64_TCR_EL2_HWU059 0x80000000000ULL
+
+#define AARCH64_TCR_EL2_HWU060 0x100000000000ULL
+
+#define AARCH64_TCR_EL2_HWU061 0x200000000000ULL
+
+#define AARCH64_TCR_EL2_HWU062 0x400000000000ULL
+
+#define AARCH64_TCR_EL2_HWU159 0x800000000000ULL
+
+#define AARCH64_TCR_EL2_HWU160 0x1000000000000ULL
+
+#define AARCH64_TCR_EL2_HWU161 0x2000000000000ULL
+
+#define AARCH64_TCR_EL2_HWU162 0x4000000000000ULL
+
+#define AARCH64_TCR_EL2_TBID0 0x8000000000000ULL
+
+#define AARCH64_TCR_EL2_TBID1 0x10000000000000ULL
+
+#define AARCH64_TCR_EL2_NFD0 0x20000000000000ULL
+
+#define AARCH64_TCR_EL2_NFD1 0x40000000000000ULL
+
+#define AARCH64_TCR_EL2_E0PD0 0x80000000000000ULL
+
+#define AARCH64_TCR_EL2_E0PD1 0x100000000000000ULL
+
+#define AARCH64_TCR_EL2_TCMA0 0x200000000000000ULL
+
+#define AARCH64_TCR_EL2_TCMA1 0x400000000000000ULL
+
+static inline uint64_t _AArch64_Read_tcr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tcr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TCR_EL3, Translation Control Register (EL3) */
+
+#define AARCH64_TCR_EL3_T0SZ( _val ) ( ( _val ) << 0 )
+#define AARCH64_TCR_EL3_T0SZ_SHIFT 0
+#define AARCH64_TCR_EL3_T0SZ_MASK 0x3fU
+#define AARCH64_TCR_EL3_T0SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_TCR_EL3_IRGN0( _val ) ( ( _val ) << 8 )
+#define AARCH64_TCR_EL3_IRGN0_SHIFT 8
+#define AARCH64_TCR_EL3_IRGN0_MASK 0x300U
+#define AARCH64_TCR_EL3_IRGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x3U )
+
+#define AARCH64_TCR_EL3_ORGN0( _val ) ( ( _val ) << 10 )
+#define AARCH64_TCR_EL3_ORGN0_SHIFT 10
+#define AARCH64_TCR_EL3_ORGN0_MASK 0xc00U
+#define AARCH64_TCR_EL3_ORGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3U )
+
+#define AARCH64_TCR_EL3_SH0( _val ) ( ( _val ) << 12 )
+#define AARCH64_TCR_EL3_SH0_SHIFT 12
+#define AARCH64_TCR_EL3_SH0_MASK 0x3000U
+#define AARCH64_TCR_EL3_SH0_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x3U )
+
+#define AARCH64_TCR_EL3_TG0( _val ) ( ( _val ) << 14 )
+#define AARCH64_TCR_EL3_TG0_SHIFT 14
+#define AARCH64_TCR_EL3_TG0_MASK 0xc000U
+#define AARCH64_TCR_EL3_TG0_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_TCR_EL3_PS( _val ) ( ( _val ) << 16 )
+#define AARCH64_TCR_EL3_PS_SHIFT 16
+#define AARCH64_TCR_EL3_PS_MASK 0x70000U
+#define AARCH64_TCR_EL3_PS_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x7U )
+
+#define AARCH64_TCR_EL3_TBI 0x100000U
+
+#define AARCH64_TCR_EL3_HA 0x200000U
+
+#define AARCH64_TCR_EL3_HD 0x400000U
+
+#define AARCH64_TCR_EL3_HPD 0x1000000U
+
+#define AARCH64_TCR_EL3_HWU59 0x2000000U
+
+#define AARCH64_TCR_EL3_HWU60 0x4000000U
+
+#define AARCH64_TCR_EL3_HWU61 0x8000000U
+
+#define AARCH64_TCR_EL3_HWU62 0x10000000U
+
+#define AARCH64_TCR_EL3_TBID 0x20000000U
+
+#define AARCH64_TCR_EL3_TCMA 0x40000000U
+
+static inline uint64_t _AArch64_Read_tcr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TCR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tcr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TCR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TFSRE0_EL1, Tag Fault Status Register (EL0). */
+
+#define AARCH64_TFSRE0_EL1_TF0 0x1U
+
+#define AARCH64_TFSRE0_EL1_TF1 0x2U
+
+static inline uint64_t _AArch64_Read_tfsre0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TFSRE0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tfsre0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TFSRE0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TFSR_EL1, Tag Fault Status Register (EL1) */
+
+#define AARCH64_TFSR_EL1_TF0 0x1U
+
+#define AARCH64_TFSR_EL1_TF1 0x2U
+
+static inline uint64_t _AArch64_Read_tfsr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TFSR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tfsr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TFSR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TFSR_EL2, Tag Fault Status Register (EL2) */
+
+#define AARCH64_TFSR_EL2_TF0 0x1U
+
+#define AARCH64_TFSR_EL2_TF1 0x2U
+
+static inline uint64_t _AArch64_Read_tfsr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TFSR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tfsr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TFSR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TFSR_EL3, Tag Fault Status Register (EL3) */
+
+#define AARCH64_TFSR_EL3_TF0 0x1U
+
+static inline uint64_t _AArch64_Read_tfsr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TFSR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tfsr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TFSR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TPIDR_EL0, EL0 Read/Write Software Thread ID Register */
+
+static inline uint64_t _AArch64_Read_tpidr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tpidr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TPIDR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TPIDR_EL1, EL1 Software Thread ID Register */
+
+static inline uint64_t _AArch64_Read_tpidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tpidr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TPIDR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TPIDR_EL2, EL2 Software Thread ID Register */
+
+static inline uint64_t _AArch64_Read_tpidr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tpidr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TPIDR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TPIDR_EL3, EL3 Software Thread ID Register */
+
+static inline uint64_t _AArch64_Read_tpidr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tpidr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TPIDR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TPIDRRO_EL0, EL0 Read-Only Software Thread ID Register */
+
+static inline uint64_t _AArch64_Read_tpidrro_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TPIDRRO_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_tpidrro_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TPIDRRO_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TTBR0_EL1, Translation Table Base Register 0 (EL1) */
+
+#define AARCH64_TTBR0_EL1_CNP 0x1U
+
+#define AARCH64_TTBR0_EL1_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_TTBR0_EL1_BADDR_SHIFT 1
+#define AARCH64_TTBR0_EL1_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_TTBR0_EL1_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+#define AARCH64_TTBR0_EL1_ASID( _val ) ( ( _val ) << 48 )
+#define AARCH64_TTBR0_EL1_ASID_SHIFT 48
+#define AARCH64_TTBR0_EL1_ASID_MASK 0xffff000000000000ULL
+#define AARCH64_TTBR0_EL1_ASID_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xffffULL )
+
+static inline uint64_t _AArch64_Read_ttbr0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TTBR0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_ttbr0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TTBR0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TTBR0_EL2, Translation Table Base Register 0 (EL2) */
+
+#define AARCH64_TTBR0_EL2_CNP 0x1U
+
+#define AARCH64_TTBR0_EL2_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_TTBR0_EL2_BADDR_SHIFT 1
+#define AARCH64_TTBR0_EL2_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_TTBR0_EL2_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+#define AARCH64_TTBR0_EL2_ASID( _val ) ( ( _val ) << 48 )
+#define AARCH64_TTBR0_EL2_ASID_SHIFT 48
+#define AARCH64_TTBR0_EL2_ASID_MASK 0xffff000000000000ULL
+#define AARCH64_TTBR0_EL2_ASID_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xffffULL )
+
+static inline uint64_t _AArch64_Read_ttbr0_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TTBR0_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_ttbr0_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TTBR0_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TTBR0_EL3, Translation Table Base Register 0 (EL3) */
+
+#define AARCH64_TTBR0_EL3_CNP 0x1U
+
+#define AARCH64_TTBR0_EL3_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_TTBR0_EL3_BADDR_SHIFT 1
+#define AARCH64_TTBR0_EL3_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_TTBR0_EL3_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+static inline uint64_t _AArch64_Read_ttbr0_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TTBR0_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_ttbr0_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TTBR0_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TTBR1_EL1, Translation Table Base Register 1 (EL1) */
+
+#define AARCH64_TTBR1_EL1_CNP 0x1U
+
+#define AARCH64_TTBR1_EL1_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_TTBR1_EL1_BADDR_SHIFT 1
+#define AARCH64_TTBR1_EL1_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_TTBR1_EL1_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+#define AARCH64_TTBR1_EL1_ASID( _val ) ( ( _val ) << 48 )
+#define AARCH64_TTBR1_EL1_ASID_SHIFT 48
+#define AARCH64_TTBR1_EL1_ASID_MASK 0xffff000000000000ULL
+#define AARCH64_TTBR1_EL1_ASID_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xffffULL )
+
+static inline uint64_t _AArch64_Read_ttbr1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TTBR1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_ttbr1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TTBR1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TTBR1_EL2, Translation Table Base Register 1 (EL2) */
+
+#define AARCH64_TTBR1_EL2_CNP 0x1U
+
+#define AARCH64_TTBR1_EL2_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_TTBR1_EL2_BADDR_SHIFT 1
+#define AARCH64_TTBR1_EL2_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_TTBR1_EL2_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+#define AARCH64_TTBR1_EL2_ASID( _val ) ( ( _val ) << 48 )
+#define AARCH64_TTBR1_EL2_ASID_SHIFT 48
+#define AARCH64_TTBR1_EL2_ASID_MASK 0xffff000000000000ULL
+#define AARCH64_TTBR1_EL2_ASID_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xffffULL )
+
+static inline uint64_t _AArch64_Read_ttbr1_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TTBR1_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_ttbr1_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TTBR1_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VBAR_EL1, Vector Base Address Register (EL1) */
+
+static inline uint64_t _AArch64_Read_vbar_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VBAR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vbar_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VBAR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VBAR_EL2, Vector Base Address Register (EL2) */
+
+static inline uint64_t _AArch64_Read_vbar_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VBAR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vbar_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VBAR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VBAR_EL3, Vector Base Address Register (EL3) */
+
+static inline uint64_t _AArch64_Read_vbar_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VBAR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vbar_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VBAR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VMPIDR_EL2, Virtualization Multiprocessor ID Register */
+
+#define AARCH64_VMPIDR_EL2_AFF0( _val ) ( ( _val ) << 0 )
+#define AARCH64_VMPIDR_EL2_AFF0_SHIFT 0
+#define AARCH64_VMPIDR_EL2_AFF0_MASK 0xffU
+#define AARCH64_VMPIDR_EL2_AFF0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+#define AARCH64_VMPIDR_EL2_AFF1( _val ) ( ( _val ) << 8 )
+#define AARCH64_VMPIDR_EL2_AFF1_SHIFT 8
+#define AARCH64_VMPIDR_EL2_AFF1_MASK 0xff00U
+#define AARCH64_VMPIDR_EL2_AFF1_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xffU )
+
+#define AARCH64_VMPIDR_EL2_AFF2( _val ) ( ( _val ) << 16 )
+#define AARCH64_VMPIDR_EL2_AFF2_SHIFT 16
+#define AARCH64_VMPIDR_EL2_AFF2_MASK 0xff0000U
+#define AARCH64_VMPIDR_EL2_AFF2_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xffU )
+
+#define AARCH64_VMPIDR_EL2_MT 0x1000000U
+
+#define AARCH64_VMPIDR_EL2_U 0x40000000U
+
+#define AARCH64_VMPIDR_EL2_AFF3( _val ) ( ( _val ) << 32 )
+#define AARCH64_VMPIDR_EL2_AFF3_SHIFT 32
+#define AARCH64_VMPIDR_EL2_AFF3_MASK 0xff00000000ULL
+#define AARCH64_VMPIDR_EL2_AFF3_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xffULL )
+
+static inline uint64_t _AArch64_Read_vmpidr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VMPIDR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vmpidr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VMPIDR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VNCR_EL2, Virtual Nested Control Register */
+
+#define AARCH64_VNCR_EL2_BADDR( _val ) ( ( _val ) << 12 )
+#define AARCH64_VNCR_EL2_BADDR_SHIFT 12
+#define AARCH64_VNCR_EL2_BADDR_MASK 0x1ffffffffff000ULL
+#define AARCH64_VNCR_EL2_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x1ffffffffffULL )
+
+#define AARCH64_VNCR_EL2_RESS( _val ) ( ( _val ) << 53 )
+#define AARCH64_VNCR_EL2_RESS_SHIFT 53
+#define AARCH64_VNCR_EL2_RESS_MASK 0xffe0000000000000ULL
+#define AARCH64_VNCR_EL2_RESS_GET( _reg ) \
+ ( ( ( _reg ) >> 53 ) & 0x7ffULL )
+
+static inline uint64_t _AArch64_Read_vncr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VNCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vncr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VNCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VPIDR_EL2, Virtualization Processor ID Register */
+
+#define AARCH64_VPIDR_EL2_REVISION( _val ) ( ( _val ) << 0 )
+#define AARCH64_VPIDR_EL2_REVISION_SHIFT 0
+#define AARCH64_VPIDR_EL2_REVISION_MASK 0xfU
+#define AARCH64_VPIDR_EL2_REVISION_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_VPIDR_EL2_PARTNUM( _val ) ( ( _val ) << 4 )
+#define AARCH64_VPIDR_EL2_PARTNUM_SHIFT 4
+#define AARCH64_VPIDR_EL2_PARTNUM_MASK 0xfff0U
+#define AARCH64_VPIDR_EL2_PARTNUM_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfffU )
+
+#define AARCH64_VPIDR_EL2_ARCHITECTURE( _val ) ( ( _val ) << 16 )
+#define AARCH64_VPIDR_EL2_ARCHITECTURE_SHIFT 16
+#define AARCH64_VPIDR_EL2_ARCHITECTURE_MASK 0xf0000U
+#define AARCH64_VPIDR_EL2_ARCHITECTURE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_VPIDR_EL2_VARIANT( _val ) ( ( _val ) << 20 )
+#define AARCH64_VPIDR_EL2_VARIANT_SHIFT 20
+#define AARCH64_VPIDR_EL2_VARIANT_MASK 0xf00000U
+#define AARCH64_VPIDR_EL2_VARIANT_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+#define AARCH64_VPIDR_EL2_IMPLEMENTER( _val ) ( ( _val ) << 24 )
+#define AARCH64_VPIDR_EL2_IMPLEMENTER_SHIFT 24
+#define AARCH64_VPIDR_EL2_IMPLEMENTER_MASK 0xff000000U
+#define AARCH64_VPIDR_EL2_IMPLEMENTER_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_vpidr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VPIDR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vpidr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VPIDR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VSTCR_EL2, Virtualization Secure Translation Control Register */
+
+#define AARCH64_VSTCR_EL2_T0SZ( _val ) ( ( _val ) << 0 )
+#define AARCH64_VSTCR_EL2_T0SZ_SHIFT 0
+#define AARCH64_VSTCR_EL2_T0SZ_MASK 0x3fU
+#define AARCH64_VSTCR_EL2_T0SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_VSTCR_EL2_SL0( _val ) ( ( _val ) << 6 )
+#define AARCH64_VSTCR_EL2_SL0_SHIFT 6
+#define AARCH64_VSTCR_EL2_SL0_MASK 0xc0U
+#define AARCH64_VSTCR_EL2_SL0_GET( _reg ) \
+ ( ( ( _reg ) >> 6 ) & 0x3U )
+
+#define AARCH64_VSTCR_EL2_TG0( _val ) ( ( _val ) << 14 )
+#define AARCH64_VSTCR_EL2_TG0_SHIFT 14
+#define AARCH64_VSTCR_EL2_TG0_MASK 0xc000U
+#define AARCH64_VSTCR_EL2_TG0_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_VSTCR_EL2_SW 0x20000000U
+
+#define AARCH64_VSTCR_EL2_SA 0x40000000U
+
+static inline uint64_t _AArch64_Read_vstcr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VSTCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vstcr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VSTCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VSTTBR_EL2, Virtualization Secure Translation Table Base Register */
+
+#define AARCH64_VSTTBR_EL2_CNP 0x1U
+
+#define AARCH64_VSTTBR_EL2_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_VSTTBR_EL2_BADDR_SHIFT 1
+#define AARCH64_VSTTBR_EL2_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_VSTTBR_EL2_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+static inline uint64_t _AArch64_Read_vsttbr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VSTTBR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vsttbr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VSTTBR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VTCR_EL2, Virtualization Translation Control Register */
+
+#define AARCH64_VTCR_EL2_T0SZ( _val ) ( ( _val ) << 0 )
+#define AARCH64_VTCR_EL2_T0SZ_SHIFT 0
+#define AARCH64_VTCR_EL2_T0SZ_MASK 0x3fU
+#define AARCH64_VTCR_EL2_T0SZ_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_VTCR_EL2_SL0( _val ) ( ( _val ) << 6 )
+#define AARCH64_VTCR_EL2_SL0_SHIFT 6
+#define AARCH64_VTCR_EL2_SL0_MASK 0xc0U
+#define AARCH64_VTCR_EL2_SL0_GET( _reg ) \
+ ( ( ( _reg ) >> 6 ) & 0x3U )
+
+#define AARCH64_VTCR_EL2_IRGN0( _val ) ( ( _val ) << 8 )
+#define AARCH64_VTCR_EL2_IRGN0_SHIFT 8
+#define AARCH64_VTCR_EL2_IRGN0_MASK 0x300U
+#define AARCH64_VTCR_EL2_IRGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x3U )
+
+#define AARCH64_VTCR_EL2_ORGN0( _val ) ( ( _val ) << 10 )
+#define AARCH64_VTCR_EL2_ORGN0_SHIFT 10
+#define AARCH64_VTCR_EL2_ORGN0_MASK 0xc00U
+#define AARCH64_VTCR_EL2_ORGN0_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3U )
+
+#define AARCH64_VTCR_EL2_SH0( _val ) ( ( _val ) << 12 )
+#define AARCH64_VTCR_EL2_SH0_SHIFT 12
+#define AARCH64_VTCR_EL2_SH0_MASK 0x3000U
+#define AARCH64_VTCR_EL2_SH0_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x3U )
+
+#define AARCH64_VTCR_EL2_TG0( _val ) ( ( _val ) << 14 )
+#define AARCH64_VTCR_EL2_TG0_SHIFT 14
+#define AARCH64_VTCR_EL2_TG0_MASK 0xc000U
+#define AARCH64_VTCR_EL2_TG0_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_VTCR_EL2_PS( _val ) ( ( _val ) << 16 )
+#define AARCH64_VTCR_EL2_PS_SHIFT 16
+#define AARCH64_VTCR_EL2_PS_MASK 0x70000U
+#define AARCH64_VTCR_EL2_PS_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0x7U )
+
+#define AARCH64_VTCR_EL2_VS 0x80000U
+
+#define AARCH64_VTCR_EL2_HA 0x200000U
+
+#define AARCH64_VTCR_EL2_HD 0x400000U
+
+#define AARCH64_VTCR_EL2_HWU59 0x2000000U
+
+#define AARCH64_VTCR_EL2_HWU60 0x4000000U
+
+#define AARCH64_VTCR_EL2_HWU61 0x8000000U
+
+#define AARCH64_VTCR_EL2_HWU62 0x10000000U
+
+#define AARCH64_VTCR_EL2_NSW 0x20000000U
+
+#define AARCH64_VTCR_EL2_NSA 0x40000000U
+
+static inline uint64_t _AArch64_Read_vtcr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VTCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vtcr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VTCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VTTBR_EL2, Virtualization Translation Table Base Register */
+
+#define AARCH64_VTTBR_EL2_CNP 0x1U
+
+#define AARCH64_VTTBR_EL2_BADDR( _val ) ( ( _val ) << 1 )
+#define AARCH64_VTTBR_EL2_BADDR_SHIFT 1
+#define AARCH64_VTTBR_EL2_BADDR_MASK 0xfffffffffffeULL
+#define AARCH64_VTTBR_EL2_BADDR_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x7fffffffffffULL )
+
+#define AARCH64_VTTBR_EL2_VMID_7_0( _val ) ( ( _val ) << 48 )
+#define AARCH64_VTTBR_EL2_VMID_7_0_SHIFT 48
+#define AARCH64_VTTBR_EL2_VMID_7_0_MASK 0xff000000000000ULL
+#define AARCH64_VTTBR_EL2_VMID_7_0_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xffULL )
+
+#define AARCH64_VTTBR_EL2_VMID_15_8( _val ) ( ( _val ) << 56 )
+#define AARCH64_VTTBR_EL2_VMID_15_8_SHIFT 56
+#define AARCH64_VTTBR_EL2_VMID_15_8_MASK 0xff00000000000000ULL
+#define AARCH64_VTTBR_EL2_VMID_15_8_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xffULL )
+
+static inline uint64_t _AArch64_Read_vttbr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VTTBR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vttbr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VTTBR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGAUTHSTATUS_EL1, Debug Authentication Status Register */
+
+#define AARCH64_DBGAUTHSTATUS_EL1_NSID( _val ) ( ( _val ) << 0 )
+#define AARCH64_DBGAUTHSTATUS_EL1_NSID_SHIFT 0
+#define AARCH64_DBGAUTHSTATUS_EL1_NSID_MASK 0x3U
+#define AARCH64_DBGAUTHSTATUS_EL1_NSID_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3U )
+
+#define AARCH64_DBGAUTHSTATUS_EL1_NSNID( _val ) ( ( _val ) << 2 )
+#define AARCH64_DBGAUTHSTATUS_EL1_NSNID_SHIFT 2
+#define AARCH64_DBGAUTHSTATUS_EL1_NSNID_MASK 0xcU
+#define AARCH64_DBGAUTHSTATUS_EL1_NSNID_GET( _reg ) \
+ ( ( ( _reg ) >> 2 ) & 0x3U )
+
+#define AARCH64_DBGAUTHSTATUS_EL1_SID( _val ) ( ( _val ) << 4 )
+#define AARCH64_DBGAUTHSTATUS_EL1_SID_SHIFT 4
+#define AARCH64_DBGAUTHSTATUS_EL1_SID_MASK 0x30U
+#define AARCH64_DBGAUTHSTATUS_EL1_SID_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0x3U )
+
+#define AARCH64_DBGAUTHSTATUS_EL1_SNID( _val ) ( ( _val ) << 6 )
+#define AARCH64_DBGAUTHSTATUS_EL1_SNID_SHIFT 6
+#define AARCH64_DBGAUTHSTATUS_EL1_SNID_MASK 0xc0U
+#define AARCH64_DBGAUTHSTATUS_EL1_SNID_GET( _reg ) \
+ ( ( ( _reg ) >> 6 ) & 0x3U )
+
+static inline uint64_t _AArch64_Read_dbgauthstatus_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGAUTHSTATUS_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* DBGBCR_N_EL1, Debug Breakpoint Control Registers, n = 0 - 15 */
+
+#define AARCH64_DBGBCR_N_EL1_E 0x1U
+
+#define AARCH64_DBGBCR_N_EL1_PMC( _val ) ( ( _val ) << 1 )
+#define AARCH64_DBGBCR_N_EL1_PMC_SHIFT 1
+#define AARCH64_DBGBCR_N_EL1_PMC_MASK 0x6U
+#define AARCH64_DBGBCR_N_EL1_PMC_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x3U )
+
+#define AARCH64_DBGBCR_N_EL1_BAS( _val ) ( ( _val ) << 5 )
+#define AARCH64_DBGBCR_N_EL1_BAS_SHIFT 5
+#define AARCH64_DBGBCR_N_EL1_BAS_MASK 0x1e0U
+#define AARCH64_DBGBCR_N_EL1_BAS_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0xfU )
+
+#define AARCH64_DBGBCR_N_EL1_HMC 0x2000U
+
+#define AARCH64_DBGBCR_N_EL1_SSC( _val ) ( ( _val ) << 14 )
+#define AARCH64_DBGBCR_N_EL1_SSC_SHIFT 14
+#define AARCH64_DBGBCR_N_EL1_SSC_MASK 0xc000U
+#define AARCH64_DBGBCR_N_EL1_SSC_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_DBGBCR_N_EL1_LBN( _val ) ( ( _val ) << 16 )
+#define AARCH64_DBGBCR_N_EL1_LBN_SHIFT 16
+#define AARCH64_DBGBCR_N_EL1_LBN_MASK 0xf0000U
+#define AARCH64_DBGBCR_N_EL1_LBN_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_DBGBCR_N_EL1_BT( _val ) ( ( _val ) << 20 )
+#define AARCH64_DBGBCR_N_EL1_BT_SHIFT 20
+#define AARCH64_DBGBCR_N_EL1_BT_MASK 0xf00000U
+#define AARCH64_DBGBCR_N_EL1_BT_GET( _reg ) \
+ ( ( ( _reg ) >> 20 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_dbgbcr_n_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBCR_N_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbcr_n_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGBVR_N_EL1, Debug Breakpoint Value Registers, n = 0 - 15 */
+
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID( _val ) ( ( _val ) << 0 )
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID_SHIFT 0
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID_MASK 0xffffffffU
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+#define AARCH64_DBGBVR_N_EL1_VA_48_2( _val ) ( ( _val ) << 2 )
+#define AARCH64_DBGBVR_N_EL1_VA_48_2_SHIFT 2
+#define AARCH64_DBGBVR_N_EL1_VA_48_2_MASK 0x1fffffffffffcULL
+#define AARCH64_DBGBVR_N_EL1_VA_48_2_GET( _reg ) \
+ ( ( ( _reg ) >> 2 ) & 0x7fffffffffffULL )
+
+#define AARCH64_DBGBVR_N_EL1_VMID_7_0( _val ) ( ( _val ) << 32 )
+#define AARCH64_DBGBVR_N_EL1_VMID_7_0_SHIFT 32
+#define AARCH64_DBGBVR_N_EL1_VMID_7_0_MASK 0xff00000000ULL
+#define AARCH64_DBGBVR_N_EL1_VMID_7_0_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xffULL )
+
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID2( _val ) ( ( _val ) << 32 )
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID2_SHIFT 32
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID2_MASK 0xffffffff00000000ULL
+#define AARCH64_DBGBVR_N_EL1_CONTEXTID2_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xffffffffULL )
+
+#define AARCH64_DBGBVR_N_EL1_VMID_15_8( _val ) ( ( _val ) << 40 )
+#define AARCH64_DBGBVR_N_EL1_VMID_15_8_SHIFT 40
+#define AARCH64_DBGBVR_N_EL1_VMID_15_8_MASK 0xff0000000000ULL
+#define AARCH64_DBGBVR_N_EL1_VMID_15_8_GET( _reg ) \
+ ( ( ( _reg ) >> 40 ) & 0xffULL )
+
+#define AARCH64_DBGBVR_N_EL1_VA_52_49( _val ) ( ( _val ) << 49 )
+#define AARCH64_DBGBVR_N_EL1_VA_52_49_SHIFT 49
+#define AARCH64_DBGBVR_N_EL1_VA_52_49_MASK 0x1e000000000000ULL
+#define AARCH64_DBGBVR_N_EL1_VA_52_49_GET( _reg ) \
+ ( ( ( _reg ) >> 49 ) & 0xfULL )
+
+#define AARCH64_DBGBVR_N_EL1_RESS_14_4( _val ) ( ( _val ) << 53 )
+#define AARCH64_DBGBVR_N_EL1_RESS_14_4_SHIFT 53
+#define AARCH64_DBGBVR_N_EL1_RESS_14_4_MASK 0xffe0000000000000ULL
+#define AARCH64_DBGBVR_N_EL1_RESS_14_4_GET( _reg ) \
+ ( ( ( _reg ) >> 53 ) & 0x7ffULL )
+
+static inline uint64_t _AArch64_Read_dbgbvr_n_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGBVR_N_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgbvr_n_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGBVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGCLAIMCLR_EL1, Debug CLAIM Tag Clear Register */
+
+#define AARCH64_DBGCLAIMCLR_EL1_CLAIM( _val ) ( ( _val ) << 0 )
+#define AARCH64_DBGCLAIMCLR_EL1_CLAIM_SHIFT 0
+#define AARCH64_DBGCLAIMCLR_EL1_CLAIM_MASK 0xffU
+#define AARCH64_DBGCLAIMCLR_EL1_CLAIM_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_dbgclaimclr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGCLAIMCLR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgclaimclr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGCLAIMCLR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGCLAIMSET_EL1, Debug CLAIM Tag Set Register */
+
+#define AARCH64_DBGCLAIMSET_EL1_CLAIM( _val ) ( ( _val ) << 0 )
+#define AARCH64_DBGCLAIMSET_EL1_CLAIM_SHIFT 0
+#define AARCH64_DBGCLAIMSET_EL1_CLAIM_MASK 0xffU
+#define AARCH64_DBGCLAIMSET_EL1_CLAIM_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_dbgclaimset_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGCLAIMSET_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgclaimset_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGCLAIMSET_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGDTR_EL0, Debug Data Transfer Register, half-duplex */
+
+#define AARCH64_DBGDTR_EL0_LOWWORD( _val ) ( ( _val ) << 0 )
+#define AARCH64_DBGDTR_EL0_LOWWORD_SHIFT 0
+#define AARCH64_DBGDTR_EL0_LOWWORD_MASK 0xffffffffU
+#define AARCH64_DBGDTR_EL0_LOWWORD_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+#define AARCH64_DBGDTR_EL0_HIGHWORD( _val ) ( ( _val ) << 32 )
+#define AARCH64_DBGDTR_EL0_HIGHWORD_SHIFT 32
+#define AARCH64_DBGDTR_EL0_HIGHWORD_MASK 0xffffffff00000000ULL
+#define AARCH64_DBGDTR_EL0_HIGHWORD_GET( _reg ) \
+ ( ( ( _reg ) >> 32 ) & 0xffffffffULL )
+
+static inline uint64_t _AArch64_Read_dbgdtr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGDTR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgdtr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGDTR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGDTRRX_EL0, Debug Data Transfer Register, Receive */
+
+static inline uint64_t _AArch64_Read_dbgdtrrx_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGDTRRX_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* DBGDTRTX_EL0, Debug Data Transfer Register, Transmit */
+
+static inline void _AArch64_Write_dbgdtrtx_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGDTRTX_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGPRCR_EL1, Debug Power Control Register */
+
+#define AARCH64_DBGPRCR_EL1_CORENPDRQ 0x1U
+
+static inline uint64_t _AArch64_Read_dbgprcr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGPRCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgprcr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGPRCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGVCR32_EL2, Debug Vector Catch Register */
+
+#define AARCH64_DBGVCR32_EL2_SU 0x2U
+
+#define AARCH64_DBGVCR32_EL2_U 0x2U
+
+#define AARCH64_DBGVCR32_EL2_S 0x4U
+
+#define AARCH64_DBGVCR32_EL2_SS 0x4U
+
+#define AARCH64_DBGVCR32_EL2_P 0x8U
+
+#define AARCH64_DBGVCR32_EL2_SP 0x8U
+
+#define AARCH64_DBGVCR32_EL2_D 0x10U
+
+#define AARCH64_DBGVCR32_EL2_SD 0x10U
+
+#define AARCH64_DBGVCR32_EL2_I 0x40U
+
+#define AARCH64_DBGVCR32_EL2_SI 0x40U
+
+#define AARCH64_DBGVCR32_EL2_F 0x80U
+
+#define AARCH64_DBGVCR32_EL2_SF 0x80U
+
+#define AARCH64_DBGVCR32_EL2_NSU 0x2000000U
+
+#define AARCH64_DBGVCR32_EL2_NSS 0x4000000U
+
+#define AARCH64_DBGVCR32_EL2_NSP 0x8000000U
+
+#define AARCH64_DBGVCR32_EL2_NSD 0x10000000U
+
+#define AARCH64_DBGVCR32_EL2_NSI 0x40000000U
+
+#define AARCH64_DBGVCR32_EL2_NSF 0x80000000U
+
+static inline uint64_t _AArch64_Read_dbgvcr32_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGVCR32_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgvcr32_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGVCR32_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGWCR_N_EL1, Debug Watchpoint Control Registers, n = 0 - 15 */
+
+#define AARCH64_DBGWCR_N_EL1_E 0x1U
+
+#define AARCH64_DBGWCR_N_EL1_PAC( _val ) ( ( _val ) << 1 )
+#define AARCH64_DBGWCR_N_EL1_PAC_SHIFT 1
+#define AARCH64_DBGWCR_N_EL1_PAC_MASK 0x6U
+#define AARCH64_DBGWCR_N_EL1_PAC_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x3U )
+
+#define AARCH64_DBGWCR_N_EL1_LSC( _val ) ( ( _val ) << 3 )
+#define AARCH64_DBGWCR_N_EL1_LSC_SHIFT 3
+#define AARCH64_DBGWCR_N_EL1_LSC_MASK 0x18U
+#define AARCH64_DBGWCR_N_EL1_LSC_GET( _reg ) \
+ ( ( ( _reg ) >> 3 ) & 0x3U )
+
+#define AARCH64_DBGWCR_N_EL1_BAS( _val ) ( ( _val ) << 5 )
+#define AARCH64_DBGWCR_N_EL1_BAS_SHIFT 5
+#define AARCH64_DBGWCR_N_EL1_BAS_MASK 0x1fe0U
+#define AARCH64_DBGWCR_N_EL1_BAS_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0xffU )
+
+#define AARCH64_DBGWCR_N_EL1_HMC 0x2000U
+
+#define AARCH64_DBGWCR_N_EL1_SSC( _val ) ( ( _val ) << 14 )
+#define AARCH64_DBGWCR_N_EL1_SSC_SHIFT 14
+#define AARCH64_DBGWCR_N_EL1_SSC_MASK 0xc000U
+#define AARCH64_DBGWCR_N_EL1_SSC_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_DBGWCR_N_EL1_LBN( _val ) ( ( _val ) << 16 )
+#define AARCH64_DBGWCR_N_EL1_LBN_SHIFT 16
+#define AARCH64_DBGWCR_N_EL1_LBN_MASK 0xf0000U
+#define AARCH64_DBGWCR_N_EL1_LBN_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_DBGWCR_N_EL1_WT 0x100000U
+
+#define AARCH64_DBGWCR_N_EL1_MASK( _val ) ( ( _val ) << 24 )
+#define AARCH64_DBGWCR_N_EL1_MASK_SHIFT 24
+#define AARCH64_DBGWCR_N_EL1_MASK_MASK 0x1f000000U
+#define AARCH64_DBGWCR_N_EL1_MASK_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0x1fU )
+
+static inline uint64_t _AArch64_Read_dbgwcr_n_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWCR_N_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwcr_n_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWCR_N_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DBGWVR_N_EL1, Debug Watchpoint Value Registers, n = 0 - 15 */
+
+#define AARCH64_DBGWVR_N_EL1_VA_48_2( _val ) ( ( _val ) << 2 )
+#define AARCH64_DBGWVR_N_EL1_VA_48_2_SHIFT 2
+#define AARCH64_DBGWVR_N_EL1_VA_48_2_MASK 0x1fffffffffffcULL
+#define AARCH64_DBGWVR_N_EL1_VA_48_2_GET( _reg ) \
+ ( ( ( _reg ) >> 2 ) & 0x7fffffffffffULL )
+
+#define AARCH64_DBGWVR_N_EL1_VA_52_49( _val ) ( ( _val ) << 49 )
+#define AARCH64_DBGWVR_N_EL1_VA_52_49_SHIFT 49
+#define AARCH64_DBGWVR_N_EL1_VA_52_49_MASK 0x1e000000000000ULL
+#define AARCH64_DBGWVR_N_EL1_VA_52_49_GET( _reg ) \
+ ( ( ( _reg ) >> 49 ) & 0xfULL )
+
+#define AARCH64_DBGWVR_N_EL1_RESS_14_4( _val ) ( ( _val ) << 53 )
+#define AARCH64_DBGWVR_N_EL1_RESS_14_4_SHIFT 53
+#define AARCH64_DBGWVR_N_EL1_RESS_14_4_MASK 0xffe0000000000000ULL
+#define AARCH64_DBGWVR_N_EL1_RESS_14_4_GET( _reg ) \
+ ( ( ( _reg ) >> 53 ) & 0x7ffULL )
+
+static inline uint64_t _AArch64_Read_dbgwvr_n_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DBGWVR_N_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dbgwvr_n_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DBGWVR_N_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DLR_EL0, Debug Link Register */
+
+static inline uint64_t _AArch64_Read_dlr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DLR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dlr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DLR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DSPSR_EL0, Debug Saved Program Status Register */
+
+#define AARCH64_DSPSR_EL0_M_3_0( _val ) ( ( _val ) << 0 )
+#define AARCH64_DSPSR_EL0_M_3_0_SHIFT 0
+#define AARCH64_DSPSR_EL0_M_3_0_MASK 0xfU
+#define AARCH64_DSPSR_EL0_M_3_0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_DSPSR_EL0_M_4 0x10U
+
+#define AARCH64_DSPSR_EL0_T 0x20U
+
+#define AARCH64_DSPSR_EL0_F 0x40U
+
+#define AARCH64_DSPSR_EL0_I 0x80U
+
+#define AARCH64_DSPSR_EL0_A 0x100U
+
+#define AARCH64_DSPSR_EL0_D 0x200U
+
+#define AARCH64_DSPSR_EL0_E 0x200U
+
+#define AARCH64_DSPSR_EL0_BTYPE( _val ) ( ( _val ) << 10 )
+#define AARCH64_DSPSR_EL0_BTYPE_SHIFT 10
+#define AARCH64_DSPSR_EL0_BTYPE_MASK 0xc00U
+#define AARCH64_DSPSR_EL0_BTYPE_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3U )
+
+#define AARCH64_DSPSR_EL0_IT_7_2( _val ) ( ( _val ) << 10 )
+#define AARCH64_DSPSR_EL0_IT_7_2_SHIFT 10
+#define AARCH64_DSPSR_EL0_IT_7_2_MASK 0xfc00U
+#define AARCH64_DSPSR_EL0_IT_7_2_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3fU )
+
+#define AARCH64_DSPSR_EL0_SSBS_0 0x1000U
+
+#define AARCH64_DSPSR_EL0_GE( _val ) ( ( _val ) << 16 )
+#define AARCH64_DSPSR_EL0_GE_SHIFT 16
+#define AARCH64_DSPSR_EL0_GE_MASK 0xf0000U
+#define AARCH64_DSPSR_EL0_GE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+#define AARCH64_DSPSR_EL0_IL 0x100000U
+
+#define AARCH64_DSPSR_EL0_SS 0x200000U
+
+#define AARCH64_DSPSR_EL0_PAN 0x400000U
+
+#define AARCH64_DSPSR_EL0_SSBS_1 0x800000U
+
+#define AARCH64_DSPSR_EL0_UAO 0x800000U
+
+#define AARCH64_DSPSR_EL0_DIT 0x1000000U
+
+#define AARCH64_DSPSR_EL0_TCO 0x2000000U
+
+#define AARCH64_DSPSR_EL0_IT_1_0( _val ) ( ( _val ) << 25 )
+#define AARCH64_DSPSR_EL0_IT_1_0_SHIFT 25
+#define AARCH64_DSPSR_EL0_IT_1_0_MASK 0x6000000U
+#define AARCH64_DSPSR_EL0_IT_1_0_GET( _reg ) \
+ ( ( ( _reg ) >> 25 ) & 0x3U )
+
+#define AARCH64_DSPSR_EL0_Q 0x8000000U
+
+#define AARCH64_DSPSR_EL0_V 0x10000000U
+
+#define AARCH64_DSPSR_EL0_C 0x20000000U
+
+#define AARCH64_DSPSR_EL0_Z 0x40000000U
+
+#define AARCH64_DSPSR_EL0_N 0x80000000U
+
+static inline uint64_t _AArch64_Read_dspsr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DSPSR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_dspsr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DSPSR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MDCCINT_EL1, Monitor DCC Interrupt Enable Register */
+
+#define AARCH64_MDCCINT_EL1_TX 0x20000000U
+
+#define AARCH64_MDCCINT_EL1_RX 0x40000000U
+
+static inline uint64_t _AArch64_Read_mdccint_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MDCCINT_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mdccint_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MDCCINT_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MDCCSR_EL0, Monitor DCC Status Register */
+
+#define AARCH64_MDCCSR_EL0_TXFULL 0x20000000U
+
+#define AARCH64_MDCCSR_EL0_RXFULL 0x40000000U
+
+static inline uint64_t _AArch64_Read_mdccsr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MDCCSR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* MDCR_EL2, Monitor Debug Configuration Register (EL2) */
+
+#define AARCH64_MDCR_EL2_HPMN( _val ) ( ( _val ) << 0 )
+#define AARCH64_MDCR_EL2_HPMN_SHIFT 0
+#define AARCH64_MDCR_EL2_HPMN_MASK 0x1fU
+#define AARCH64_MDCR_EL2_HPMN_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x1fU )
+
+#define AARCH64_MDCR_EL2_TPMCR 0x20U
+
+#define AARCH64_MDCR_EL2_TPM 0x40U
+
+#define AARCH64_MDCR_EL2_HPME 0x80U
+
+#define AARCH64_MDCR_EL2_TDE 0x100U
+
+#define AARCH64_MDCR_EL2_TDA 0x200U
+
+#define AARCH64_MDCR_EL2_TDOSA 0x400U
+
+#define AARCH64_MDCR_EL2_TDRA 0x800U
+
+#define AARCH64_MDCR_EL2_E2PB( _val ) ( ( _val ) << 12 )
+#define AARCH64_MDCR_EL2_E2PB_SHIFT 12
+#define AARCH64_MDCR_EL2_E2PB_MASK 0x3000U
+#define AARCH64_MDCR_EL2_E2PB_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x3U )
+
+#define AARCH64_MDCR_EL2_TPMS 0x4000U
+
+#define AARCH64_MDCR_EL2_HPMD 0x20000U
+
+#define AARCH64_MDCR_EL2_TTRF 0x80000U
+
+#define AARCH64_MDCR_EL2_HCCD 0x800000U
+
+#define AARCH64_MDCR_EL2_HLP 0x4000000U
+
+#define AARCH64_MDCR_EL2_TDCC 0x8000000U
+
+#define AARCH64_MDCR_EL2_MTPME 0x10000000U
+
+static inline uint64_t _AArch64_Read_mdcr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MDCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mdcr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MDCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MDCR_EL3, Monitor Debug Configuration Register (EL3) */
+
+#define AARCH64_MDCR_EL3_TPM 0x40U
+
+#define AARCH64_MDCR_EL3_TDA 0x200U
+
+#define AARCH64_MDCR_EL3_TDOSA 0x400U
+
+#define AARCH64_MDCR_EL3_NSPB( _val ) ( ( _val ) << 12 )
+#define AARCH64_MDCR_EL3_NSPB_SHIFT 12
+#define AARCH64_MDCR_EL3_NSPB_MASK 0x3000U
+#define AARCH64_MDCR_EL3_NSPB_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0x3U )
+
+#define AARCH64_MDCR_EL3_SPD32( _val ) ( ( _val ) << 14 )
+#define AARCH64_MDCR_EL3_SPD32_SHIFT 14
+#define AARCH64_MDCR_EL3_SPD32_MASK 0xc000U
+#define AARCH64_MDCR_EL3_SPD32_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_MDCR_EL3_SDD 0x10000U
+
+#define AARCH64_MDCR_EL3_SPME 0x20000U
+
+#define AARCH64_MDCR_EL3_STE 0x40000U
+
+#define AARCH64_MDCR_EL3_TTRF 0x80000U
+
+#define AARCH64_MDCR_EL3_EDAD 0x100000U
+
+#define AARCH64_MDCR_EL3_EPMAD 0x200000U
+
+#define AARCH64_MDCR_EL3_SCCD 0x800000U
+
+#define AARCH64_MDCR_EL3_TDCC 0x8000000U
+
+#define AARCH64_MDCR_EL3_MTPME 0x10000000U
+
+static inline uint64_t _AArch64_Read_mdcr_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MDCR_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mdcr_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MDCR_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* MDRAR_EL1, Monitor Debug ROM Address Register */
+
+#define AARCH64_MDRAR_EL1_VALID( _val ) ( ( _val ) << 0 )
+#define AARCH64_MDRAR_EL1_VALID_SHIFT 0
+#define AARCH64_MDRAR_EL1_VALID_MASK 0x3U
+#define AARCH64_MDRAR_EL1_VALID_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3U )
+
+#define AARCH64_MDRAR_EL1_ROMADDR_47_12( _val ) ( ( _val ) << 12 )
+#define AARCH64_MDRAR_EL1_ROMADDR_47_12_SHIFT 12
+#define AARCH64_MDRAR_EL1_ROMADDR_47_12_MASK 0xfffffffff000ULL
+#define AARCH64_MDRAR_EL1_ROMADDR_47_12_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfffffffffULL )
+
+#define AARCH64_MDRAR_EL1_ROMADDR_51_48( _val ) ( ( _val ) << 48 )
+#define AARCH64_MDRAR_EL1_ROMADDR_51_48_SHIFT 48
+#define AARCH64_MDRAR_EL1_ROMADDR_51_48_MASK 0xf000000000000ULL
+#define AARCH64_MDRAR_EL1_ROMADDR_51_48_GET( _reg ) \
+ ( ( ( _reg ) >> 48 ) & 0xfULL )
+
+static inline uint64_t _AArch64_Read_mdrar_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MDRAR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* MDSCR_EL1, Monitor Debug System Control Register */
+
+#define AARCH64_MDSCR_EL1_SS 0x1U
+
+#define AARCH64_MDSCR_EL1_ERR 0x40U
+
+#define AARCH64_MDSCR_EL1_TDCC 0x1000U
+
+#define AARCH64_MDSCR_EL1_KDE 0x2000U
+
+#define AARCH64_MDSCR_EL1_HDE 0x4000U
+
+#define AARCH64_MDSCR_EL1_MDE 0x8000U
+
+#define AARCH64_MDSCR_EL1_SC2 0x80000U
+
+#define AARCH64_MDSCR_EL1_TDA 0x200000U
+
+#define AARCH64_MDSCR_EL1_INTDIS( _val ) ( ( _val ) << 22 )
+#define AARCH64_MDSCR_EL1_INTDIS_SHIFT 22
+#define AARCH64_MDSCR_EL1_INTDIS_MASK 0xc00000U
+#define AARCH64_MDSCR_EL1_INTDIS_GET( _reg ) \
+ ( ( ( _reg ) >> 22 ) & 0x3U )
+
+#define AARCH64_MDSCR_EL1_TXU 0x4000000U
+
+#define AARCH64_MDSCR_EL1_RXO 0x8000000U
+
+#define AARCH64_MDSCR_EL1_TXFULL 0x20000000U
+
+#define AARCH64_MDSCR_EL1_RXFULL 0x40000000U
+
+#define AARCH64_MDSCR_EL1_TFO 0x80000000U
+
+static inline uint64_t _AArch64_Read_mdscr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, MDSCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_mdscr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr MDSCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* OSDLR_EL1, OS Double Lock Register */
+
+#define AARCH64_OSDLR_EL1_DLK 0x1U
+
+static inline uint64_t _AArch64_Read_osdlr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, OSDLR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_osdlr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr OSDLR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* OSDTRRX_EL1, OS Lock Data Transfer Register, Receive */
+
+static inline uint64_t _AArch64_Read_osdtrrx_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, OSDTRRX_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_osdtrrx_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr OSDTRRX_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* OSDTRTX_EL1, OS Lock Data Transfer Register, Transmit */
+
+static inline uint64_t _AArch64_Read_osdtrtx_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, OSDTRTX_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_osdtrtx_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr OSDTRTX_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* OSECCR_EL1, OS Lock Exception Catch Control Register */
+
+#define AARCH64_OSECCR_EL1_EDECCR( _val ) ( ( _val ) << 0 )
+#define AARCH64_OSECCR_EL1_EDECCR_SHIFT 0
+#define AARCH64_OSECCR_EL1_EDECCR_MASK 0xffffffffU
+#define AARCH64_OSECCR_EL1_EDECCR_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_oseccr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, OSECCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_oseccr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr OSECCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* OSLAR_EL1, OS Lock Access Register */
+
+#define AARCH64_OSLAR_EL1_OSLK 0x1U
+
+static inline void _AArch64_Write_oslar_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr OSLAR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* OSLSR_EL1, OS Lock Status Register */
+
+#define AARCH64_OSLSR_EL1_OSLM_0 0x1U
+
+#define AARCH64_OSLSR_EL1_OSLK 0x2U
+
+#define AARCH64_OSLSR_EL1_NTT 0x4U
+
+#define AARCH64_OSLSR_EL1_OSLM_1 0x8U
+
+static inline uint64_t _AArch64_Read_oslsr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, OSLSR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* SDER32_EL2, AArch64 Secure Debug Enable Register */
+
+#define AARCH64_SDER32_EL2_SUIDEN 0x1U
+
+#define AARCH64_SDER32_EL2_SUNIDEN 0x2U
+
+static inline uint64_t _AArch64_Read_sder32_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SDER32_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_sder32_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SDER32_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* SDER32_EL3, AArch64 Secure Debug Enable Register */
+
+#define AARCH64_SDER32_EL3_SUIDEN 0x1U
+
+#define AARCH64_SDER32_EL3_SUNIDEN 0x2U
+
+static inline uint64_t _AArch64_Read_sder32_el3( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, SDER32_EL3" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_sder32_el3( uint64_t value )
+{
+ __asm__ volatile (
+ "msr SDER32_EL3, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TRFCR_EL1, Trace Filter Control Register (EL1) */
+
+#define AARCH64_TRFCR_EL1_E0TRE 0x1U
+
+#define AARCH64_TRFCR_EL1_E1TRE 0x2U
+
+#define AARCH64_TRFCR_EL1_TS( _val ) ( ( _val ) << 5 )
+#define AARCH64_TRFCR_EL1_TS_SHIFT 5
+#define AARCH64_TRFCR_EL1_TS_MASK 0x60U
+#define AARCH64_TRFCR_EL1_TS_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x3U )
+
+static inline uint64_t _AArch64_Read_trfcr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TRFCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_trfcr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TRFCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* TRFCR_EL2, Trace Filter Control Register (EL2) */
+
+#define AARCH64_TRFCR_EL2_E0HTRE 0x1U
+
+#define AARCH64_TRFCR_EL2_E2TRE 0x2U
+
+#define AARCH64_TRFCR_EL2_CX 0x8U
+
+#define AARCH64_TRFCR_EL2_TS( _val ) ( ( _val ) << 5 )
+#define AARCH64_TRFCR_EL2_TS_SHIFT 5
+#define AARCH64_TRFCR_EL2_TS_MASK 0x60U
+#define AARCH64_TRFCR_EL2_TS_GET( _reg ) \
+ ( ( ( _reg ) >> 5 ) & 0x3U )
+
+static inline uint64_t _AArch64_Read_trfcr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, TRFCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_trfcr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr TRFCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMCCFILTR_EL0, Performance Monitors Cycle Count Filter Register */
+
+#define AARCH64_PMCCFILTR_EL0_SH 0x1000000U
+
+#define AARCH64_PMCCFILTR_EL0_M 0x4000000U
+
+#define AARCH64_PMCCFILTR_EL0_NSH 0x8000000U
+
+#define AARCH64_PMCCFILTR_EL0_NSU 0x10000000U
+
+#define AARCH64_PMCCFILTR_EL0_NSK 0x20000000U
+
+#define AARCH64_PMCCFILTR_EL0_U 0x40000000U
+
+#define AARCH64_PMCCFILTR_EL0_P 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmccfiltr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCCFILTR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmccfiltr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMCCFILTR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMCCNTR_EL0, Performance Monitors Cycle Count Register */
+
+static inline uint64_t _AArch64_Read_pmccntr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCCNTR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmccntr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMCCNTR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMCEID0_EL0, Performance Monitors Common Event Identification Register 0 */
+
+static inline uint64_t _AArch64_Read_pmceid0_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCEID0_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* PMCEID1_EL0, Performance Monitors Common Event Identification Register 1 */
+
+static inline uint64_t _AArch64_Read_pmceid1_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCEID1_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* PMCNTENCLR_EL0, Performance Monitors Count Enable Clear Register */
+
+#define AARCH64_PMCNTENCLR_EL0_C 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmcntenclr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCNTENCLR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmcntenclr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMCNTENCLR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMCNTENSET_EL0, Performance Monitors Count Enable Set Register */
+
+#define AARCH64_PMCNTENSET_EL0_C 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmcntenset_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCNTENSET_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmcntenset_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMCNTENSET_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMCR_EL0, Performance Monitors Control Register */
+
+#define AARCH64_PMCR_EL0_E 0x1U
+
+#define AARCH64_PMCR_EL0_P 0x2U
+
+#define AARCH64_PMCR_EL0_C 0x4U
+
+#define AARCH64_PMCR_EL0_D 0x8U
+
+#define AARCH64_PMCR_EL0_X 0x10U
+
+#define AARCH64_PMCR_EL0_DP 0x20U
+
+#define AARCH64_PMCR_EL0_LC 0x40U
+
+#define AARCH64_PMCR_EL0_LP 0x80U
+
+#define AARCH64_PMCR_EL0_N( _val ) ( ( _val ) << 11 )
+#define AARCH64_PMCR_EL0_N_SHIFT 11
+#define AARCH64_PMCR_EL0_N_MASK 0xf800U
+#define AARCH64_PMCR_EL0_N_GET( _reg ) \
+ ( ( ( _reg ) >> 11 ) & 0x1fU )
+
+#define AARCH64_PMCR_EL0_IDCODE( _val ) ( ( _val ) << 16 )
+#define AARCH64_PMCR_EL0_IDCODE_SHIFT 16
+#define AARCH64_PMCR_EL0_IDCODE_MASK 0xff0000U
+#define AARCH64_PMCR_EL0_IDCODE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xffU )
+
+#define AARCH64_PMCR_EL0_IMP( _val ) ( ( _val ) << 24 )
+#define AARCH64_PMCR_EL0_IMP_SHIFT 24
+#define AARCH64_PMCR_EL0_IMP_MASK 0xff000000U
+#define AARCH64_PMCR_EL0_IMP_GET( _reg ) \
+ ( ( ( _reg ) >> 24 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_pmcr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMCR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmcr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMCR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMEVCNTR_N_EL0, Performance Monitors Event Count Registers, n = 0 - 30 */
+
+static inline uint64_t _AArch64_Read_pmevcntr_n_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMEVCNTR_N_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmevcntr_n_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMEVCNTR_N_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMEVTYPER_N_EL0, Performance Monitors Event Type Registers, n = 0 - 30 */
+
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0_SHIFT 0
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0_MASK 0x3ffU
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_9_0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3ffU )
+
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10( _val ) ( ( _val ) << 10 )
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10_SHIFT 10
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10_MASK 0xfc00U
+#define AARCH64_PMEVTYPER_N_EL0_EVTCOUNT_15_10_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x3fU )
+
+#define AARCH64_PMEVTYPER_N_EL0_SH 0x1000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_MT 0x2000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_M 0x4000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_NSH 0x8000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_NSU 0x10000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_NSK 0x20000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_U 0x40000000U
+
+#define AARCH64_PMEVTYPER_N_EL0_P 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmevtyper_n_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMEVTYPER_N_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmevtyper_n_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMEVTYPER_N_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMINTENCLR_EL1, Performance Monitors Interrupt Enable Clear Register */
+
+#define AARCH64_PMINTENCLR_EL1_C 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmintenclr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMINTENCLR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmintenclr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMINTENCLR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMINTENSET_EL1, Performance Monitors Interrupt Enable Set Register */
+
+#define AARCH64_PMINTENSET_EL1_C 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmintenset_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMINTENSET_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmintenset_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMINTENSET_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMMIR_EL1, Performance Monitors Machine Identification Register */
+
+#define AARCH64_PMMIR_EL1_SLOTS( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMMIR_EL1_SLOTS_SHIFT 0
+#define AARCH64_PMMIR_EL1_SLOTS_MASK 0xffU
+#define AARCH64_PMMIR_EL1_SLOTS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_pmmir_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMMIR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* PMOVSCLR_EL0, Performance Monitors Overflow Flag Status Clear Register */
+
+#define AARCH64_PMOVSCLR_EL0_C 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmovsclr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMOVSCLR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmovsclr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMOVSCLR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMOVSSET_EL0, Performance Monitors Overflow Flag Status Set Register */
+
+#define AARCH64_PMOVSSET_EL0_C 0x80000000U
+
+static inline uint64_t _AArch64_Read_pmovsset_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMOVSSET_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmovsset_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMOVSSET_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSELR_EL0, Performance Monitors Event Counter Selection Register */
+
+#define AARCH64_PMSELR_EL0_SEL( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMSELR_EL0_SEL_SHIFT 0
+#define AARCH64_PMSELR_EL0_SEL_MASK 0x1fU
+#define AARCH64_PMSELR_EL0_SEL_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x1fU )
+
+static inline uint64_t _AArch64_Read_pmselr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSELR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmselr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSELR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSWINC_EL0, Performance Monitors Software Increment Register */
+
+static inline void _AArch64_Write_pmswinc_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSWINC_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMUSERENR_EL0, Performance Monitors User Enable Register */
+
+#define AARCH64_PMUSERENR_EL0_EN 0x1U
+
+#define AARCH64_PMUSERENR_EL0_SW 0x2U
+
+#define AARCH64_PMUSERENR_EL0_CR 0x4U
+
+#define AARCH64_PMUSERENR_EL0_ER 0x8U
+
+static inline uint64_t _AArch64_Read_pmuserenr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMUSERENR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmuserenr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMUSERENR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMXEVCNTR_EL0, Performance Monitors Selected Event Count Register */
+
+#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N_SHIFT 0
+#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N_MASK 0xffffffffU
+#define AARCH64_PMXEVCNTR_EL0_PMEVCNTR_N_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_pmxevcntr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMXEVCNTR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmxevcntr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMXEVCNTR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMXEVTYPER_EL0, Performance Monitors Selected Event Type Register */
+
+static inline uint64_t _AArch64_Read_pmxevtyper_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMXEVTYPER_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmxevtyper_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMXEVTYPER_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMCFGR_EL0, Activity Monitors Configuration Register */
+
+#define AARCH64_AMCFGR_EL0_N( _val ) ( ( _val ) << 0 )
+#define AARCH64_AMCFGR_EL0_N_SHIFT 0
+#define AARCH64_AMCFGR_EL0_N_MASK 0xffU
+#define AARCH64_AMCFGR_EL0_N_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+#define AARCH64_AMCFGR_EL0_SIZE( _val ) ( ( _val ) << 8 )
+#define AARCH64_AMCFGR_EL0_SIZE_SHIFT 8
+#define AARCH64_AMCFGR_EL0_SIZE_MASK 0x3f00U
+#define AARCH64_AMCFGR_EL0_SIZE_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0x3fU )
+
+#define AARCH64_AMCFGR_EL0_HDBG 0x1000000U
+
+#define AARCH64_AMCFGR_EL0_NCG( _val ) ( ( _val ) << 28 )
+#define AARCH64_AMCFGR_EL0_NCG_SHIFT 28
+#define AARCH64_AMCFGR_EL0_NCG_MASK 0xf0000000U
+#define AARCH64_AMCFGR_EL0_NCG_GET( _reg ) \
+ ( ( ( _reg ) >> 28 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_amcfgr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCFGR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* AMCG1IDR_EL0, Activity Monitors Counter Group 1 Identification Register */
+
+static inline uint64_t _AArch64_Read_amcg1idr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCG1IDR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* AMCGCR_EL0, Activity Monitors Counter Group Configuration Register */
+
+#define AARCH64_AMCGCR_EL0_CG0NC( _val ) ( ( _val ) << 0 )
+#define AARCH64_AMCGCR_EL0_CG0NC_SHIFT 0
+#define AARCH64_AMCGCR_EL0_CG0NC_MASK 0xffU
+#define AARCH64_AMCGCR_EL0_CG0NC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffU )
+
+#define AARCH64_AMCGCR_EL0_CG1NC( _val ) ( ( _val ) << 8 )
+#define AARCH64_AMCGCR_EL0_CG1NC_SHIFT 8
+#define AARCH64_AMCGCR_EL0_CG1NC_MASK 0xff00U
+#define AARCH64_AMCGCR_EL0_CG1NC_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xffU )
+
+static inline uint64_t _AArch64_Read_amcgcr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCGCR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* AMCNTENCLR0_EL0, Activity Monitors Count Enable Clear Register 0 */
+
+static inline uint64_t _AArch64_Read_amcntenclr0_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCNTENCLR0_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amcntenclr0_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMCNTENCLR0_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMCNTENCLR1_EL0, Activity Monitors Count Enable Clear Register 1 */
+
+static inline uint64_t _AArch64_Read_amcntenclr1_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCNTENCLR1_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amcntenclr1_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMCNTENCLR1_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMCNTENSET0_EL0, Activity Monitors Count Enable Set Register 0 */
+
+static inline uint64_t _AArch64_Read_amcntenset0_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCNTENSET0_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amcntenset0_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMCNTENSET0_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMCNTENSET1_EL0, Activity Monitors Count Enable Set Register 1 */
+
+static inline uint64_t _AArch64_Read_amcntenset1_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCNTENSET1_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amcntenset1_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMCNTENSET1_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMCR_EL0, Activity Monitors Control Register */
+
+#define AARCH64_AMCR_EL0_HDBG 0x400U
+
+#define AARCH64_AMCR_EL0_CG1RZ 0x20000U
+
+static inline uint64_t _AArch64_Read_amcr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMCR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amcr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMCR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMEVCNTR0_N_EL0, Activity Monitors Event Counter Registers 0, n = 0 - 15 */
+
+static inline uint64_t _AArch64_Read_amevcntr0_n_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMEVCNTR0_N_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amevcntr0_n_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMEVCNTR0_N_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMEVCNTR1_N_EL0, Activity Monitors Event Counter Registers 1, n = 0 - 15 */
+
+static inline uint64_t _AArch64_Read_amevcntr1_n_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMEVCNTR1_N_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amevcntr1_n_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMEVCNTR1_N_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMEVCNTVOFF0_N_EL2, Activity Monitors Event Counter Virtual Offset Registers 0, n = 0 - */
+
+static inline uint64_t _AArch64_Read_amevcntvoff0_n_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMEVCNTVOFF0_N_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amevcntvoff0_n_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMEVCNTVOFF0_N_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMEVCNTVOFF1_N_EL2, Activity Monitors Event Counter Virtual Offset Registers 1, n = 0 - */
+
+static inline uint64_t _AArch64_Read_amevcntvoff1_n_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMEVCNTVOFF1_N_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amevcntvoff1_n_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMEVCNTVOFF1_N_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMEVTYPER0_N_EL0, Activity Monitors Event Type Registers 0, n = 0 - 15 */
+
+#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT( _val ) ( ( _val ) << 0 )
+#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT_SHIFT 0
+#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT_MASK 0xffffU
+#define AARCH64_AMEVTYPER0_N_EL0_EVTCOUNT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+static inline uint64_t _AArch64_Read_amevtyper0_n_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMEVTYPER0_N_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* AMEVTYPER1_N_EL0, Activity Monitors Event Type Registers 1, n = 0 - 15 */
+
+#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT( _val ) ( ( _val ) << 0 )
+#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT_SHIFT 0
+#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT_MASK 0xffffU
+#define AARCH64_AMEVTYPER1_N_EL0_EVTCOUNT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+static inline uint64_t _AArch64_Read_amevtyper1_n_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMEVTYPER1_N_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amevtyper1_n_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMEVTYPER1_N_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* AMUSERENR_EL0, Activity Monitors User Enable Register */
+
+#define AARCH64_AMUSERENR_EL0_EN 0x1U
+
+static inline uint64_t _AArch64_Read_amuserenr_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, AMUSERENR_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_amuserenr_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr AMUSERENR_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMBIDR_EL1, Profiling Buffer ID Register */
+
+#define AARCH64_PMBIDR_EL1_ALIGN( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMBIDR_EL1_ALIGN_SHIFT 0
+#define AARCH64_PMBIDR_EL1_ALIGN_MASK 0xfU
+#define AARCH64_PMBIDR_EL1_ALIGN_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_PMBIDR_EL1_P 0x10U
+
+#define AARCH64_PMBIDR_EL1_F 0x20U
+
+static inline uint64_t _AArch64_Read_pmbidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMBIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* PMBLIMITR_EL1, Profiling Buffer Limit Address Register */
+
+#define AARCH64_PMBLIMITR_EL1_E 0x1U
+
+#define AARCH64_PMBLIMITR_EL1_FM( _val ) ( ( _val ) << 1 )
+#define AARCH64_PMBLIMITR_EL1_FM_SHIFT 1
+#define AARCH64_PMBLIMITR_EL1_FM_MASK 0x6U
+#define AARCH64_PMBLIMITR_EL1_FM_GET( _reg ) \
+ ( ( ( _reg ) >> 1 ) & 0x3U )
+
+#define AARCH64_PMBLIMITR_EL1_LIMIT( _val ) ( ( _val ) << 12 )
+#define AARCH64_PMBLIMITR_EL1_LIMIT_SHIFT 12
+#define AARCH64_PMBLIMITR_EL1_LIMIT_MASK 0xfffffffffffff000ULL
+#define AARCH64_PMBLIMITR_EL1_LIMIT_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfffffffffffffULL )
+
+static inline uint64_t _AArch64_Read_pmblimitr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMBLIMITR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmblimitr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMBLIMITR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMBPTR_EL1, Profiling Buffer Write Pointer Register */
+
+static inline uint64_t _AArch64_Read_pmbptr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMBPTR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmbptr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMBPTR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMBSR_EL1, Profiling Buffer Status/syndrome Register */
+
+#define AARCH64_PMBSR_EL1_BSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMBSR_EL1_BSC_SHIFT 0
+#define AARCH64_PMBSR_EL1_BSC_MASK 0x3fU
+#define AARCH64_PMBSR_EL1_BSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_PMBSR_EL1_FSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMBSR_EL1_FSC_SHIFT 0
+#define AARCH64_PMBSR_EL1_FSC_MASK 0x3fU
+#define AARCH64_PMBSR_EL1_FSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_PMBSR_EL1_MSS( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMBSR_EL1_MSS_SHIFT 0
+#define AARCH64_PMBSR_EL1_MSS_MASK 0xffffU
+#define AARCH64_PMBSR_EL1_MSS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+#define AARCH64_PMBSR_EL1_COLL 0x10000U
+
+#define AARCH64_PMBSR_EL1_S 0x20000U
+
+#define AARCH64_PMBSR_EL1_EA 0x40000U
+
+#define AARCH64_PMBSR_EL1_DL 0x80000U
+
+#define AARCH64_PMBSR_EL1_EC( _val ) ( ( _val ) << 26 )
+#define AARCH64_PMBSR_EL1_EC_SHIFT 26
+#define AARCH64_PMBSR_EL1_EC_MASK 0xfc000000U
+#define AARCH64_PMBSR_EL1_EC_GET( _reg ) \
+ ( ( ( _reg ) >> 26 ) & 0x3fU )
+
+static inline uint64_t _AArch64_Read_pmbsr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMBSR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmbsr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMBSR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSCR_EL1, Statistical Profiling Control Register (EL1) */
+
+#define AARCH64_PMSCR_EL1_E0SPE 0x1U
+
+#define AARCH64_PMSCR_EL1_E1SPE 0x2U
+
+#define AARCH64_PMSCR_EL1_CX 0x8U
+
+#define AARCH64_PMSCR_EL1_PA 0x10U
+
+#define AARCH64_PMSCR_EL1_TS 0x20U
+
+#define AARCH64_PMSCR_EL1_PCT( _val ) ( ( _val ) << 6 )
+#define AARCH64_PMSCR_EL1_PCT_SHIFT 6
+#define AARCH64_PMSCR_EL1_PCT_MASK 0xc0U
+#define AARCH64_PMSCR_EL1_PCT_GET( _reg ) \
+ ( ( ( _reg ) >> 6 ) & 0x3U )
+
+static inline uint64_t _AArch64_Read_pmscr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmscr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSCR_EL2, Statistical Profiling Control Register (EL2) */
+
+#define AARCH64_PMSCR_EL2_E0HSPE 0x1U
+
+#define AARCH64_PMSCR_EL2_E2SPE 0x2U
+
+#define AARCH64_PMSCR_EL2_CX 0x8U
+
+#define AARCH64_PMSCR_EL2_PA 0x10U
+
+#define AARCH64_PMSCR_EL2_TS 0x20U
+
+#define AARCH64_PMSCR_EL2_PCT( _val ) ( ( _val ) << 6 )
+#define AARCH64_PMSCR_EL2_PCT_SHIFT 6
+#define AARCH64_PMSCR_EL2_PCT_MASK 0xc0U
+#define AARCH64_PMSCR_EL2_PCT_GET( _reg ) \
+ ( ( ( _reg ) >> 6 ) & 0x3U )
+
+static inline uint64_t _AArch64_Read_pmscr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSCR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmscr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSCR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSEVFR_EL1, Sampling Event Filter Register */
+
+#define AARCH64_PMSEVFR_EL1_E_1 0x2U
+
+#define AARCH64_PMSEVFR_EL1_E_3 0x8U
+
+#define AARCH64_PMSEVFR_EL1_E_5 0x20U
+
+#define AARCH64_PMSEVFR_EL1_E_7 0x80U
+
+#define AARCH64_PMSEVFR_EL1_E_11 0x800U
+
+#define AARCH64_PMSEVFR_EL1_E_12 0x1000U
+
+#define AARCH64_PMSEVFR_EL1_E_13 0x2000U
+
+#define AARCH64_PMSEVFR_EL1_E_14 0x4000U
+
+#define AARCH64_PMSEVFR_EL1_E_15 0x8000U
+
+#define AARCH64_PMSEVFR_EL1_E_17 0x20000U
+
+#define AARCH64_PMSEVFR_EL1_E_18 0x40000U
+
+#define AARCH64_PMSEVFR_EL1_E_24 0x1000000U
+
+#define AARCH64_PMSEVFR_EL1_E_25 0x2000000U
+
+#define AARCH64_PMSEVFR_EL1_E_26 0x4000000U
+
+#define AARCH64_PMSEVFR_EL1_E_27 0x8000000U
+
+#define AARCH64_PMSEVFR_EL1_E_28 0x10000000U
+
+#define AARCH64_PMSEVFR_EL1_E_29 0x20000000U
+
+#define AARCH64_PMSEVFR_EL1_E_30 0x40000000U
+
+#define AARCH64_PMSEVFR_EL1_E_31 0x80000000U
+
+#define AARCH64_PMSEVFR_EL1_E_48 0x1000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_49 0x2000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_50 0x4000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_51 0x8000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_52 0x10000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_53 0x20000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_54 0x40000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_55 0x80000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_56 0x100000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_57 0x200000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_58 0x400000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_59 0x800000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_60 0x1000000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_61 0x2000000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_62 0x4000000000000000ULL
+
+#define AARCH64_PMSEVFR_EL1_E_63 0x8000000000000000ULL
+
+static inline uint64_t _AArch64_Read_pmsevfr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSEVFR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmsevfr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSEVFR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSFCR_EL1, Sampling Filter Control Register */
+
+#define AARCH64_PMSFCR_EL1_FE 0x1U
+
+#define AARCH64_PMSFCR_EL1_FT 0x2U
+
+#define AARCH64_PMSFCR_EL1_FL 0x4U
+
+#define AARCH64_PMSFCR_EL1_B 0x10000U
+
+#define AARCH64_PMSFCR_EL1_LD 0x20000U
+
+#define AARCH64_PMSFCR_EL1_ST 0x40000U
+
+static inline uint64_t _AArch64_Read_pmsfcr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSFCR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmsfcr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSFCR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSICR_EL1, Sampling Interval Counter Register */
+
+#define AARCH64_PMSICR_EL1_COUNT( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMSICR_EL1_COUNT_SHIFT 0
+#define AARCH64_PMSICR_EL1_COUNT_MASK 0xffffffffU
+#define AARCH64_PMSICR_EL1_COUNT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+#define AARCH64_PMSICR_EL1_ECOUNT( _val ) ( ( _val ) << 56 )
+#define AARCH64_PMSICR_EL1_ECOUNT_SHIFT 56
+#define AARCH64_PMSICR_EL1_ECOUNT_MASK 0xff00000000000000ULL
+#define AARCH64_PMSICR_EL1_ECOUNT_GET( _reg ) \
+ ( ( ( _reg ) >> 56 ) & 0xffULL )
+
+static inline uint64_t _AArch64_Read_pmsicr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSICR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmsicr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSICR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSIDR_EL1, Sampling Profiling ID Register */
+
+#define AARCH64_PMSIDR_EL1_FE 0x1U
+
+#define AARCH64_PMSIDR_EL1_FT 0x2U
+
+#define AARCH64_PMSIDR_EL1_FL 0x4U
+
+#define AARCH64_PMSIDR_EL1_ARCHINST 0x8U
+
+#define AARCH64_PMSIDR_EL1_LDS 0x10U
+
+#define AARCH64_PMSIDR_EL1_ERND 0x20U
+
+#define AARCH64_PMSIDR_EL1_INTERVAL( _val ) ( ( _val ) << 8 )
+#define AARCH64_PMSIDR_EL1_INTERVAL_SHIFT 8
+#define AARCH64_PMSIDR_EL1_INTERVAL_MASK 0xf00U
+#define AARCH64_PMSIDR_EL1_INTERVAL_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xfU )
+
+#define AARCH64_PMSIDR_EL1_MAXSIZE( _val ) ( ( _val ) << 12 )
+#define AARCH64_PMSIDR_EL1_MAXSIZE_SHIFT 12
+#define AARCH64_PMSIDR_EL1_MAXSIZE_MASK 0xf000U
+#define AARCH64_PMSIDR_EL1_MAXSIZE_GET( _reg ) \
+ ( ( ( _reg ) >> 12 ) & 0xfU )
+
+#define AARCH64_PMSIDR_EL1_COUNTSIZE( _val ) ( ( _val ) << 16 )
+#define AARCH64_PMSIDR_EL1_COUNTSIZE_SHIFT 16
+#define AARCH64_PMSIDR_EL1_COUNTSIZE_MASK 0xf0000U
+#define AARCH64_PMSIDR_EL1_COUNTSIZE_GET( _reg ) \
+ ( ( ( _reg ) >> 16 ) & 0xfU )
+
+static inline uint64_t _AArch64_Read_pmsidr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* PMSIRR_EL1, Sampling Interval Reload Register */
+
+#define AARCH64_PMSIRR_EL1_RND 0x1U
+
+#define AARCH64_PMSIRR_EL1_INTERVAL( _val ) ( ( _val ) << 8 )
+#define AARCH64_PMSIRR_EL1_INTERVAL_SHIFT 8
+#define AARCH64_PMSIRR_EL1_INTERVAL_MASK 0xffffff00U
+#define AARCH64_PMSIRR_EL1_INTERVAL_GET( _reg ) \
+ ( ( ( _reg ) >> 8 ) & 0xffffffU )
+
+static inline uint64_t _AArch64_Read_pmsirr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSIRR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmsirr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSIRR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* PMSLATFR_EL1, Sampling Latency Filter Register */
+
+#define AARCH64_PMSLATFR_EL1_MINLAT( _val ) ( ( _val ) << 0 )
+#define AARCH64_PMSLATFR_EL1_MINLAT_SHIFT 0
+#define AARCH64_PMSLATFR_EL1_MINLAT_MASK 0xfffU
+#define AARCH64_PMSLATFR_EL1_MINLAT_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfffU )
+
+static inline uint64_t _AArch64_Read_pmslatfr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, PMSLATFR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_pmslatfr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr PMSLATFR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* DISR_EL1, Deferred Interrupt Status Register */
+
+#define AARCH64_DISR_EL1_DFSC( _val ) ( ( _val ) << 0 )
+#define AARCH64_DISR_EL1_DFSC_SHIFT 0
+#define AARCH64_DISR_EL1_DFSC_MASK 0x3fU
+#define AARCH64_DISR_EL1_DFSC_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_DISR_EL1_ISS( _val ) ( ( _val ) << 0 )
+#define AARCH64_DISR_EL1_ISS_SHIFT 0
+#define AARCH64_DISR_EL1_ISS_MASK 0xffffffU
+#define AARCH64_DISR_EL1_ISS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffU )
+
+#define AARCH64_DISR_EL1_EA 0x200U
+
+#define AARCH64_DISR_EL1_AET( _val ) ( ( _val ) << 10 )
+#define AARCH64_DISR_EL1_AET_SHIFT 10
+#define AARCH64_DISR_EL1_AET_MASK 0x1c00U
+#define AARCH64_DISR_EL1_AET_GET( _reg ) \
+ ( ( ( _reg ) >> 10 ) & 0x7U )
+
+#define AARCH64_DISR_EL1_IDS 0x1000000U
+
+#define AARCH64_DISR_EL1_A 0x80000000U
+
+static inline uint64_t _AArch64_Read_disr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, DISR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_disr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr DISR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERRIDR_EL1, Error Record ID Register */
+
+#define AARCH64_ERRIDR_EL1_NUM( _val ) ( ( _val ) << 0 )
+#define AARCH64_ERRIDR_EL1_NUM_SHIFT 0
+#define AARCH64_ERRIDR_EL1_NUM_MASK 0xffffU
+#define AARCH64_ERRIDR_EL1_NUM_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+static inline uint64_t _AArch64_Read_erridr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERRIDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ERRSELR_EL1, Error Record Select Register */
+
+#define AARCH64_ERRSELR_EL1_SEL( _val ) ( ( _val ) << 0 )
+#define AARCH64_ERRSELR_EL1_SEL_SHIFT 0
+#define AARCH64_ERRSELR_EL1_SEL_MASK 0xffffU
+#define AARCH64_ERRSELR_EL1_SEL_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffU )
+
+static inline uint64_t _AArch64_Read_errselr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERRSELR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_errselr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERRSELR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXADDR_EL1, Selected Error Record Address Register */
+
+static inline uint64_t _AArch64_Read_erxaddr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXADDR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxaddr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXADDR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXCTLR_EL1, Selected Error Record Control Register */
+
+static inline uint64_t _AArch64_Read_erxctlr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXCTLR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxctlr_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXCTLR_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXFR_EL1, Selected Error Record Feature Register */
+
+static inline uint64_t _AArch64_Read_erxfr_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXFR_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ERXMISC0_EL1, Selected Error Record Miscellaneous Register 0 */
+
+static inline uint64_t _AArch64_Read_erxmisc0_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXMISC0_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxmisc0_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXMISC0_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXMISC1_EL1, Selected Error Record Miscellaneous Register 1 */
+
+static inline uint64_t _AArch64_Read_erxmisc1_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXMISC1_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxmisc1_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXMISC1_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXMISC2_EL1, Selected Error Record Miscellaneous Register 2 */
+
+static inline uint64_t _AArch64_Read_erxmisc2_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXMISC2_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxmisc2_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXMISC2_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXMISC3_EL1, Selected Error Record Miscellaneous Register 3 */
+
+static inline uint64_t _AArch64_Read_erxmisc3_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXMISC3_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxmisc3_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXMISC3_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXPFGCDN_EL1, Selected Pseudo-fault Generation Countdown Register */
+
+static inline uint64_t _AArch64_Read_erxpfgcdn_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXPFGCDN_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxpfgcdn_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXPFGCDN_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXPFGCTL_EL1, Selected Pseudo-fault Generation Control Register */
+
+static inline uint64_t _AArch64_Read_erxpfgctl_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXPFGCTL_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxpfgctl_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXPFGCTL_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* ERXPFGF_EL1, Selected Pseudo-fault Generation Feature Register */
+
+static inline uint64_t _AArch64_Read_erxpfgf_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXPFGF_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* ERXSTATUS_EL1, Selected Error Record Primary Status Register */
+
+static inline uint64_t _AArch64_Read_erxstatus_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, ERXSTATUS_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_erxstatus_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr ERXSTATUS_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VDISR_EL2, Virtual Deferred Interrupt Status Register */
+
+#define AARCH64_VDISR_EL2_FS_3_0( _val ) ( ( _val ) << 0 )
+#define AARCH64_VDISR_EL2_FS_3_0_SHIFT 0
+#define AARCH64_VDISR_EL2_FS_3_0_MASK 0xfU
+#define AARCH64_VDISR_EL2_FS_3_0_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xfU )
+
+#define AARCH64_VDISR_EL2_STATUS( _val ) ( ( _val ) << 0 )
+#define AARCH64_VDISR_EL2_STATUS_SHIFT 0
+#define AARCH64_VDISR_EL2_STATUS_MASK 0x3fU
+#define AARCH64_VDISR_EL2_STATUS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0x3fU )
+
+#define AARCH64_VDISR_EL2_ISS( _val ) ( ( _val ) << 0 )
+#define AARCH64_VDISR_EL2_ISS_SHIFT 0
+#define AARCH64_VDISR_EL2_ISS_MASK 0xffffffU
+#define AARCH64_VDISR_EL2_ISS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffU )
+
+#define AARCH64_VDISR_EL2_LPAE 0x200U
+
+#define AARCH64_VDISR_EL2_FS_4 0x400U
+
+#define AARCH64_VDISR_EL2_EXT 0x1000U
+
+#define AARCH64_VDISR_EL2_AET( _val ) ( ( _val ) << 14 )
+#define AARCH64_VDISR_EL2_AET_SHIFT 14
+#define AARCH64_VDISR_EL2_AET_MASK 0xc000U
+#define AARCH64_VDISR_EL2_AET_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_VDISR_EL2_IDS 0x1000000U
+
+#define AARCH64_VDISR_EL2_A 0x80000000U
+
+static inline uint64_t _AArch64_Read_vdisr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VDISR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vdisr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VDISR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* VSESR_EL2, Virtual SError Exception Syndrome Register */
+
+#define AARCH64_VSESR_EL2_ISS( _val ) ( ( _val ) << 0 )
+#define AARCH64_VSESR_EL2_ISS_SHIFT 0
+#define AARCH64_VSESR_EL2_ISS_MASK 0xffffffU
+#define AARCH64_VSESR_EL2_ISS_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffU )
+
+#define AARCH64_VSESR_EL2_EXT 0x1000U
+
+#define AARCH64_VSESR_EL2_AET( _val ) ( ( _val ) << 14 )
+#define AARCH64_VSESR_EL2_AET_SHIFT 14
+#define AARCH64_VSESR_EL2_AET_MASK 0xc000U
+#define AARCH64_VSESR_EL2_AET_GET( _reg ) \
+ ( ( ( _reg ) >> 14 ) & 0x3U )
+
+#define AARCH64_VSESR_EL2_IDS 0x1000000U
+
+static inline uint64_t _AArch64_Read_vsesr_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, VSESR_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_vsesr_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr VSESR_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTFRQ_EL0, Counter-timer Frequency Register */
+
+static inline uint64_t _AArch64_Read_cntfrq_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTFRQ_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntfrq_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTFRQ_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHCTL_EL2, Counter-timer Hypervisor Control Register */
+
+#define AARCH64_CNTHCTL_EL2_EL0PCTEN 0x1U
+
+#define AARCH64_CNTHCTL_EL2_EL1PCTEN_0 0x1U
+
+#define AARCH64_CNTHCTL_EL2_EL0VCTEN 0x2U
+
+#define AARCH64_CNTHCTL_EL2_EL1PCEN 0x2U
+
+#define AARCH64_CNTHCTL_EL2_EVNTEN 0x4U
+
+#define AARCH64_CNTHCTL_EL2_EVNTDIR 0x8U
+
+#define AARCH64_CNTHCTL_EL2_EVNTI( _val ) ( ( _val ) << 4 )
+#define AARCH64_CNTHCTL_EL2_EVNTI_SHIFT 4
+#define AARCH64_CNTHCTL_EL2_EVNTI_MASK 0xf0U
+#define AARCH64_CNTHCTL_EL2_EVNTI_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_CNTHCTL_EL2_EL0VTEN 0x100U
+
+#define AARCH64_CNTHCTL_EL2_EL0PTEN 0x200U
+
+#define AARCH64_CNTHCTL_EL2_EL1PCTEN_1 0x400U
+
+#define AARCH64_CNTHCTL_EL2_EL1PTEN 0x800U
+
+#define AARCH64_CNTHCTL_EL2_ECV 0x1000U
+
+#define AARCH64_CNTHCTL_EL2_EL1TVT 0x2000U
+
+#define AARCH64_CNTHCTL_EL2_EL1TVCT 0x4000U
+
+#define AARCH64_CNTHCTL_EL2_EL1NVPCT 0x8000U
+
+#define AARCH64_CNTHCTL_EL2_EL1NVVCT 0x10000U
+
+#define AARCH64_CNTHCTL_EL2_EVNTIS 0x20000U
+
+static inline uint64_t _AArch64_Read_cnthctl_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHCTL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthctl_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHCTL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHP_CTL_EL2, Counter-timer Hypervisor Physical Timer Control Register */
+
+#define AARCH64_CNTHP_CTL_EL2_ENABLE 0x1U
+
+#define AARCH64_CNTHP_CTL_EL2_IMASK 0x2U
+
+#define AARCH64_CNTHP_CTL_EL2_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cnthp_ctl_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHP_CTL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthp_ctl_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHP_CTL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHP_CVAL_EL2, Counter-timer Physical Timer CompareValue Register (EL2) */
+
+static inline uint64_t _AArch64_Read_cnthp_cval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHP_CVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthp_cval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHP_CVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHP_TVAL_EL2, Counter-timer Physical Timer TimerValue Register (EL2) */
+
+#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTHP_TVAL_EL2_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cnthp_tval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHP_TVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthp_tval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHP_TVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHPS_CTL_EL2, Counter-timer Secure Physical Timer Control Register (EL2) */
+
+#define AARCH64_CNTHPS_CTL_EL2_ENABLE 0x1U
+
+#define AARCH64_CNTHPS_CTL_EL2_IMASK 0x2U
+
+#define AARCH64_CNTHPS_CTL_EL2_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cnthps_ctl_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHPS_CTL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthps_ctl_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHPS_CTL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHPS_CVAL_EL2, Counter-timer Secure Physical Timer CompareValue Register (EL2) */
+
+static inline uint64_t _AArch64_Read_cnthps_cval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHPS_CVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthps_cval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHPS_CVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHPS_TVAL_EL2, Counter-timer Secure Physical Timer TimerValue Register (EL2) */
+
+#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTHPS_TVAL_EL2_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cnthps_tval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHPS_TVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthps_tval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHPS_TVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHV_CTL_EL2, Counter-timer Virtual Timer Control Register (EL2) */
+
+#define AARCH64_CNTHV_CTL_EL2_ENABLE 0x1U
+
+#define AARCH64_CNTHV_CTL_EL2_IMASK 0x2U
+
+#define AARCH64_CNTHV_CTL_EL2_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cnthv_ctl_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHV_CTL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthv_ctl_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHV_CTL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHV_CVAL_EL2, Counter-timer Virtual Timer CompareValue Register (EL2) */
+
+static inline uint64_t _AArch64_Read_cnthv_cval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHV_CVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthv_cval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHV_CVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHV_TVAL_EL2, Counter-timer Virtual Timer TimerValue Register (EL2) */
+
+#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTHV_TVAL_EL2_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cnthv_tval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHV_TVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthv_tval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHV_TVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHVS_CTL_EL2, Counter-timer Secure Virtual Timer Control Register (EL2) */
+
+#define AARCH64_CNTHVS_CTL_EL2_ENABLE 0x1U
+
+#define AARCH64_CNTHVS_CTL_EL2_IMASK 0x2U
+
+#define AARCH64_CNTHVS_CTL_EL2_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cnthvs_ctl_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHVS_CTL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthvs_ctl_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHVS_CTL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHVS_CVAL_EL2, Counter-timer Secure Virtual Timer CompareValue Register (EL2) */
+
+static inline uint64_t _AArch64_Read_cnthvs_cval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHVS_CVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthvs_cval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHVS_CVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTHVS_TVAL_EL2, Counter-timer Secure Virtual Timer TimerValue Register (EL2) */
+
+#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTHVS_TVAL_EL2_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cnthvs_tval_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTHVS_TVAL_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cnthvs_tval_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTHVS_TVAL_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTKCTL_EL1, Counter-timer Kernel Control Register */
+
+#define AARCH64_CNTKCTL_EL1_EL0PCTEN 0x1U
+
+#define AARCH64_CNTKCTL_EL1_EL0VCTEN 0x2U
+
+#define AARCH64_CNTKCTL_EL1_EVNTEN 0x4U
+
+#define AARCH64_CNTKCTL_EL1_EVNTDIR 0x8U
+
+#define AARCH64_CNTKCTL_EL1_EVNTI( _val ) ( ( _val ) << 4 )
+#define AARCH64_CNTKCTL_EL1_EVNTI_SHIFT 4
+#define AARCH64_CNTKCTL_EL1_EVNTI_MASK 0xf0U
+#define AARCH64_CNTKCTL_EL1_EVNTI_GET( _reg ) \
+ ( ( ( _reg ) >> 4 ) & 0xfU )
+
+#define AARCH64_CNTKCTL_EL1_EL0VTEN 0x100U
+
+#define AARCH64_CNTKCTL_EL1_EL0PTEN 0x200U
+
+#define AARCH64_CNTKCTL_EL1_EVNTIS 0x20000U
+
+static inline uint64_t _AArch64_Read_cntkctl_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTKCTL_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntkctl_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTKCTL_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTP_CTL_EL0, Counter-timer Physical Timer Control Register */
+
+#define AARCH64_CNTP_CTL_EL0_ENABLE 0x1U
+
+#define AARCH64_CNTP_CTL_EL0_IMASK 0x2U
+
+#define AARCH64_CNTP_CTL_EL0_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cntp_ctl_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTP_CTL_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntp_ctl_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTP_CTL_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTP_CVAL_EL0, Counter-timer Physical Timer CompareValue Register */
+
+static inline uint64_t _AArch64_Read_cntp_cval_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTP_CVAL_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntp_cval_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTP_CVAL_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTP_TVAL_EL0, Counter-timer Physical Timer TimerValue Register */
+
+#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTP_TVAL_EL0_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cntp_tval_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTP_TVAL_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntp_tval_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTP_TVAL_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTPCTSS_EL0, Counter-timer Self-Synchronized Physical Count Register */
+
+static inline uint64_t _AArch64_Read_cntpctss_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTPCTSS_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* CNTPCT_EL0, Counter-timer Physical Count Register */
+
+static inline uint64_t _AArch64_Read_cntpct_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTPCT_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* CNTPS_CTL_EL1, Counter-timer Physical Secure Timer Control Register */
+
+#define AARCH64_CNTPS_CTL_EL1_ENABLE 0x1U
+
+#define AARCH64_CNTPS_CTL_EL1_IMASK 0x2U
+
+#define AARCH64_CNTPS_CTL_EL1_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cntps_ctl_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTPS_CTL_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntps_ctl_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTPS_CTL_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTPOFF_EL2, Counter-timer Physical Offset Register */
+
+static inline uint64_t _AArch64_Read_cntpoff_el2( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTPOFF_EL2" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntpoff_el2( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTPOFF_EL2, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTPS_CVAL_EL1, Counter-timer Physical Secure Timer CompareValue Register */
+
+static inline uint64_t _AArch64_Read_cntps_cval_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTPS_CVAL_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntps_cval_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTPS_CVAL_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTPS_TVAL_EL1, Counter-timer Physical Secure Timer TimerValue Register */
+
+#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTPS_TVAL_EL1_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cntps_tval_el1( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTPS_TVAL_EL1" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntps_tval_el1( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTPS_TVAL_EL1, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTV_CTL_EL0, Counter-timer Virtual Timer Control Register */
+
+#define AARCH64_CNTV_CTL_EL0_ENABLE 0x1U
+
+#define AARCH64_CNTV_CTL_EL0_IMASK 0x2U
+
+#define AARCH64_CNTV_CTL_EL0_ISTATUS 0x4U
+
+static inline uint64_t _AArch64_Read_cntv_ctl_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTV_CTL_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntv_ctl_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTV_CTL_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTV_CVAL_EL0, Counter-timer Virtual Timer CompareValue Register */
+
+static inline uint64_t _AArch64_Read_cntv_cval_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTV_CVAL_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntv_cval_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTV_CVAL_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTV_TVAL_EL0, Counter-timer Virtual Timer TimerValue Register */
+
+#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE( _val ) ( ( _val ) << 0 )
+#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE_SHIFT 0
+#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE_MASK 0xffffffffU
+#define AARCH64_CNTV_TVAL_EL0_TIMERVALUE_GET( _reg ) \
+ ( ( ( _reg ) >> 0 ) & 0xffffffffU )
+
+static inline uint64_t _AArch64_Read_cntv_tval_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTV_TVAL_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+static inline void _AArch64_Write_cntv_tval_el0( uint64_t value )
+{
+ __asm__ volatile (
+ "msr CNTV_TVAL_EL0, %0" : : "r" ( value ) : "memory"
+ );
+}
+
+/* CNTVCTSS_EL0, Counter-timer Self-Synchronized Virtual Count Register */
+
+static inline uint64_t _AArch64_Read_cntvctss_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTVCTSS_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+/* CNTVCT_EL0, Counter-timer Virtual Count Register */
+
+static inline uint64_t _AArch64_Read_cntvct_el0( void )
+{
+ uint64_t value;
+
+ __asm__ volatile (
+ "mrs %0, CNTVCT_EL0" : "=&r" ( value ) : : "memory"
+ );
+
+ return value;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_AARCH64_SYSTEM_REGISTERS_H */
diff --git a/cpukit/score/cpu/arm/cpu_asm.S b/cpukit/score/cpu/arm/cpu_asm.S
index 66f8ba6032..46eb46b914 100644
--- a/cpukit/score/cpu/arm/cpu_asm.S
+++ b/cpukit/score/cpu/arm/cpu_asm.S
@@ -54,6 +54,9 @@
*/
DEFINE_FUNCTION_ARM(_CPU_Context_switch)
+ .globl _CPU_Context_switch_no_return
+ .set _CPU_Context_switch_no_return, _CPU_Context_switch
+
/* Start saving context */
GET_SELF_CPU_CONTROL r2
ldr r3, [r2, #PER_CPU_ISR_DISPATCH_DISABLE]
diff --git a/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h b/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h
index ea25828bb6..2bfe672480 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/aarch32-pmsa.h
@@ -108,7 +108,7 @@ extern "C" {
#define AARCH32_PMSA_MEM_INNER_WA 0x01U
#define AARCH32_PMSA_MEM_ATTR( _ma0, _ma1, _ma2, _ma3 ) \
- ( ( _ma0 ) | ( ( _ma1 ) << 8 ) | ( ( _ma1 ) << 16 ) | ( ( _ma1 ) << 24 ) )
+ ( ( _ma0 ) | ( ( _ma1 ) << 8 ) | ( ( _ma2 ) << 16 ) | ( ( _ma3 ) << 24 ) )
#define AARCH32_PMSA_MEM_ATTR_DEFAULT_CACHED \
( AARCH32_PMSA_MEM_OUTER_WBNT | \
diff --git a/cpukit/score/cpu/arm/include/rtems/score/armv7m.h b/cpukit/score/cpu/arm/include/rtems/score/armv7m.h
index 8f926e826a..1803c8d8ca 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/armv7m.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/armv7m.h
@@ -656,11 +656,11 @@ static inline void _ARMV7M_MPU_Set_region(
RTEMS_OBFUSCATE_VARIABLE(end);
size = (uintptr_t) end - (uintptr_t) begin;
- if ( size > 0 ) {
+ if ( (uintptr_t) end > (uintptr_t) begin ) {
rbar = (uintptr_t) begin | region | ARMV7M_MPU_RBAR_VALID;
rasr |= _ARMV7M_MPU_Get_region_size(size);
} else {
- rbar = region;
+ rbar = ARMV7M_MPU_RBAR_VALID | region;
rasr = 0;
}
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu.h b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
index e5b23e7100..dcda4d525c 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
@@ -465,6 +465,11 @@ void _CPU_ISR_install_vector(
*/
void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
#if defined(ARM_MULTILIB_ARCH_V7M)
diff --git a/cpukit/score/cpu/bfin/include/rtems/score/cpu.h b/cpukit/score/cpu/bfin/include/rtems/score/cpu.h
index d17dfbd30a..b1063c9eee 100644
--- a/cpukit/score/cpu/bfin/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/bfin/include/rtems/score/cpu.h
@@ -308,15 +308,6 @@ typedef struct {
/**@{**/
/**
- * Support routine to initialize the RTEMS vector table after it is allocated.
- *
- * Port Specific Information:
- *
- * XXX document implementation including references if appropriate
- */
-#define _CPU_Initialize_vectors()
-
-/**
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in @a _isr_cookie.
*
diff --git a/cpukit/score/cpu/lm32/include/rtems/score/cpu.h b/cpukit/score/cpu/lm32/include/rtems/score/cpu.h
index 5c890de53e..29af53a53b 100644
--- a/cpukit/score/cpu/lm32/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/lm32/include/rtems/score/cpu.h
@@ -415,15 +415,6 @@ extern Context_Control_fp _CPU_Null_fp_context;
/**@{**/
/**
- * Support routine to initialize the RTEMS vector table after it is allocated.
- *
- * Port Specific Information:
- *
- * XXX document implementation including references if appropriate
- */
-#define _CPU_Initialize_vectors()
-
-/**
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in @a _isr_cookie.
*
diff --git a/cpukit/score/cpu/m68k/include/rtems/score/cpu.h b/cpukit/score/cpu/m68k/include/rtems/score/cpu.h
index de7f8215bb..65ba0ef496 100644
--- a/cpukit/score/cpu/m68k/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/m68k/include/rtems/score/cpu.h
@@ -350,15 +350,12 @@ extern void* _VBR;
* ISR handler macros
*
* These macros perform the following functions:
- * + initialize the RTEMS vector table
* + disable all maskable CPU interrupts
* + restore previous interrupt level (enable)
* + temporarily restore interrupts (flash)
* + set a particular level
*/
-#define _CPU_Initialize_vectors()
-
#define _CPU_ISR_Disable( _level ) \
m68k_disable_interrupts( _level )
diff --git a/cpukit/score/cpu/moxie/include/rtems/score/cpu.h b/cpukit/score/cpu/moxie/include/rtems/score/cpu.h
index c857734a2e..9959892689 100644
--- a/cpukit/score/cpu/moxie/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/moxie/include/rtems/score/cpu.h
@@ -256,11 +256,6 @@ typedef struct {
*/
/*
- * Support routine to initialize the RTEMS vector table after it is allocated.
- */
-#define _CPU_Initialize_vectors()
-
-/*
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in _level.
*
diff --git a/cpukit/score/cpu/nios2/include/rtems/score/cpu.h b/cpukit/score/cpu/nios2/include/rtems/score/cpu.h
index 8caec19b5b..69c96280f4 100644
--- a/cpukit/score/cpu/nios2/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/nios2/include/rtems/score/cpu.h
@@ -172,8 +172,6 @@ typedef struct {
uint32_t ipending;
} CPU_Exception_frame;
-#define _CPU_Initialize_vectors()
-
/**
* @brief Macro to disable interrupts.
*
diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
index c067501502..120b51b633 100644
--- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
@@ -618,17 +618,6 @@ extern Context_Control_fp _CPU_Null_fp_context;
/**
* @addtogroup RTEMSScoreCPUExampleInterrupt
*
- * Support routine to initialize the RTEMS vector table after it is allocated.
- *
- * Port Specific Information:
- *
- * XXX document implementation including references if appropriate
- */
-#define _CPU_Initialize_vectors()
-
-/**
- * @addtogroup RTEMSScoreCPUExampleInterrupt
- *
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in @a _isr_cookie.
*
@@ -1054,23 +1043,47 @@ void _CPU_ISR_install_vector(
void *_CPU_Thread_Idle_body( uintptr_t ignored );
/**
+ * @brief Performs a context switch from the executing thread to the heir
+ * thread.
+ *
* @addtogroup RTEMSScoreCPUExampleContext
*
- * This routine switches from the run context to the heir context.
+ * This routine switches from the executing context to the heir context.
*
- * @param[in] run points to the context of the currently executing task
- * @param[in] heir points to the context of the heir task
+ * @param[out] executing points to the context of the currently executing task.
+ *
+ * @param[in, out] heir points to the context of the heir task.
*
* Port Specific Information:
*
* XXX document implementation including references if appropriate
*/
void _CPU_Context_switch(
- Context_Control *run,
+ Context_Control *executing,
Context_Control *heir
);
/**
+ * @brief Performs a context switch from the executing thread to the heir
+ * thread and does not return.
+ *
+ * @addtogroup RTEMSScoreCPUExampleContext
+ *
+ * This routine shall be a strong alias to _CPU_Context_switch(). It shall be
+ * provided for all target architectures which support an SMP build
+ * configuration (RTEMS_SMP). The purpose is help to compiler to avoid
+ * generation of dead code in _Thread_Start_multitasking().
+ *
+ * @param[out] executing points to the context of the currently executing task.
+ *
+ * @param[in, out] heir points to the context of the heir task.
+ */
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
+/**
* @addtogroup RTEMSScoreCPUExampleContext
*
* This routine is generally used only to restart self in an
diff --git a/cpukit/score/cpu/or1k/include/rtems/score/cpu.h b/cpukit/score/cpu/or1k/include/rtems/score/cpu.h
index 22acfd590e..5cd59df3f4 100644
--- a/cpukit/score/cpu/or1k/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/or1k/include/rtems/score/cpu.h
@@ -244,16 +244,6 @@ typedef Context_Control CPU_Interrupt_frame;
/* ISR handler macros */
/*
- * Support routine to initialize the RTEMS vector table after it is allocated.
- *
- * NO_CPU Specific Information:
- *
- * XXX document implementation including references if appropriate
- */
-
-#define _CPU_Initialize_vectors()
-
-/*
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in _level.
*
diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
index 996b6f8e60..f22e1cd7ec 100644
--- a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
@@ -914,6 +914,11 @@ void _CPU_Context_switch(
Context_Control *heir
);
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
/*
* _CPU_Context_restore
*
diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
index 38eb92394d..d9056d0ad1 100644
--- a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
@@ -147,8 +147,6 @@ typedef struct {
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
-#define _CPU_Initialize_vectors()
-
static inline uint32_t riscv_interrupt_disable( void )
{
unsigned long mstatus;
@@ -383,6 +381,11 @@ void _CPU_Context_switch(
Context_Control *heir
);
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
/*
* _CPU_Context_restore
*
diff --git a/cpukit/score/cpu/riscv/riscv-context-switch.S b/cpukit/score/cpu/riscv/riscv-context-switch.S
index 96c117b3de..830f8629a4 100644
--- a/cpukit/score/cpu/riscv/riscv-context-switch.S
+++ b/cpukit/score/cpu/riscv/riscv-context-switch.S
@@ -37,9 +37,11 @@
.align 2
PUBLIC(_CPU_Context_switch)
+PUBLIC(_CPU_Context_switch_no_return)
PUBLIC(_CPU_Context_restore)
SYM(_CPU_Context_switch):
+SYM(_CPU_Context_switch_no_return):
GET_SELF_CPU_CONTROL a2
lw a3, PER_CPU_ISR_DISPATCH_DISABLE(a2)
diff --git a/cpukit/score/cpu/sh/include/rtems/score/cpu.h b/cpukit/score/cpu/sh/include/rtems/score/cpu.h
index 0df6aa3f83..364a2bc310 100644
--- a/cpukit/score/cpu/sh/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sh/include/rtems/score/cpu.h
@@ -367,14 +367,6 @@ void CPU_delay( uint32_t microseconds );
*/
/*
- * Support routine to initialize the RTEMS vector table after it is allocated.
- *
- * SH Specific Information: NONE
- */
-
-#define _CPU_Initialize_vectors()
-
-/*
* Disable all interrupts for an RTEMS critical section. The previous
* level is returned in _level.
*/
diff --git a/cpukit/score/cpu/sparc/cpu.c b/cpukit/score/cpu/sparc/cpu.c
index 9f84c28fc5..10ad44049a 100644
--- a/cpukit/score/cpu/sparc/cpu.c
+++ b/cpukit/score/cpu/sparc/cpu.c
@@ -1,7 +1,12 @@
/**
- * @file
+ * @file
*
- * @brief SPARC CPU Dependent Source
+ * @ingroup RTEMSScoreCPUSPARC
+ *
+ * @brief This source file contains static assertions to ensure the consistency
+ * of interfaces used in C and assembler and it contains the SPARC-specific
+ * implementation of _CPU_Initialize(), _CPU_ISR_Get_level(), and
+ * _CPU_Context_Initialize().
*/
/*
@@ -19,11 +24,9 @@
#include "config.h"
#endif
-#include <rtems/score/isr.h>
#include <rtems/score/percpu.h>
#include <rtems/score/tls.h>
#include <rtems/score/thread.h>
-#include <rtems/rtems/cache.h>
#if SPARC_HAS_FPU == 1
RTEMS_STATIC_ASSERT(
@@ -144,21 +147,56 @@ RTEMS_STATIC_ASSERT(
CPU_Interrupt_frame_alignment
);
-/*
- * This initializes the set of opcodes placed in each trap
- * table entry. The routine which installs a handler is responsible
- * for filling in the fields for the _handler address and the _vector
- * trap type.
- *
- * The constants following this structure are masks for the fields which
- * must be filled in when the handler is installed.
- */
-const CPU_Trap_table_entry _CPU_Trap_slot_template = {
- 0xa1480000, /* mov %psr, %l0 */
- 0x29000000, /* sethi %hi(_handler), %l4 */
- 0x81c52000, /* jmp %l4 + %lo(_handler) */
- 0xa6102000 /* mov _vector, %l3 */
-};
+#define SPARC_ASSERT_REGISTER_WINDOW_OFFSET( member, off ) \
+ RTEMS_STATIC_ASSERT( \
+ offsetof( SPARC_Register_window, member ) == \
+ RTEMS_XCONCAT( SPARC_REGISTER_WINDOW_OFFSET_, off ), \
+ SPARC_Register_window ## member \
+ )
+
+SPARC_ASSERT_REGISTER_WINDOW_OFFSET( local[ 0 ], LOCAL( 0 ) );
+SPARC_ASSERT_REGISTER_WINDOW_OFFSET( local[ 1 ], LOCAL( 1 ) );
+SPARC_ASSERT_REGISTER_WINDOW_OFFSET( input[ 0 ], INPUT( 0 ) );
+SPARC_ASSERT_REGISTER_WINDOW_OFFSET( input[ 1 ], INPUT( 1 ) );
+
+RTEMS_STATIC_ASSERT(
+ sizeof( SPARC_Register_window ) == SPARC_REGISTER_WINDOW_SIZE,
+ SPARC_REGISTER_WINDOW_SIZE
+);
+
+#define SPARC_ASSERT_EXCEPTION_OFFSET( member, off ) \
+ RTEMS_STATIC_ASSERT( \
+ offsetof( CPU_Exception_frame, member ) == \
+ RTEMS_XCONCAT( SPARC_EXCEPTION_OFFSET_, off ), \
+ CPU_Exception_frame_offset_ ## member \
+ )
+
+SPARC_ASSERT_EXCEPTION_OFFSET( psr, PSR );
+SPARC_ASSERT_EXCEPTION_OFFSET( pc, PC );
+SPARC_ASSERT_EXCEPTION_OFFSET( npc, NPC );
+SPARC_ASSERT_EXCEPTION_OFFSET( trap, TRAP );
+SPARC_ASSERT_EXCEPTION_OFFSET( wim, WIM );
+SPARC_ASSERT_EXCEPTION_OFFSET( y, Y );
+SPARC_ASSERT_EXCEPTION_OFFSET( global[ 0 ], GLOBAL( 0 ) );
+SPARC_ASSERT_EXCEPTION_OFFSET( global[ 1 ], GLOBAL( 1 ) );
+SPARC_ASSERT_EXCEPTION_OFFSET( output[ 0 ], OUTPUT( 0 ) );
+SPARC_ASSERT_EXCEPTION_OFFSET( output[ 1 ], OUTPUT( 1 ) );
+
+#if SPARC_HAS_FPU == 1
+SPARC_ASSERT_EXCEPTION_OFFSET( fsr, FSR );
+SPARC_ASSERT_EXCEPTION_OFFSET( fp[ 0 ], FP( 0 ) );
+SPARC_ASSERT_EXCEPTION_OFFSET( fp[ 1 ], FP( 1 ) );
+#endif
+
+RTEMS_STATIC_ASSERT(
+ sizeof( CPU_Exception_frame ) == SPARC_EXCEPTION_FRAME_SIZE,
+ SPARC_EXCEPTION_FRAME_SIZE
+);
+
+RTEMS_STATIC_ASSERT(
+ sizeof( CPU_Exception_frame ) % CPU_ALIGNMENT == 0,
+ CPU_Exception_frame_alignment
+);
/*
* _CPU_Initialize
@@ -197,160 +235,6 @@ uint32_t _CPU_ISR_Get_level( void )
return level;
}
-/*
- * _CPU_ISR_install_raw_handler
- *
- * This routine installs the specified handler as a "raw" non-executive
- * supported trap handler (a.k.a. interrupt service routine).
- *
- * Input Parameters:
- * vector - trap table entry number plus synchronous
- * vs. asynchronous information
- * new_handler - address of the handler to be installed
- * old_handler - pointer to an address of the handler previously installed
- *
- * Output Parameters: NONE
- * *new_handler - address of the handler previously installed
- *
- * NOTE:
- *
- * On the SPARC, there are really only 256 vectors. However, the executive
- * has no easy, fast, reliable way to determine which traps are synchronous
- * and which are asynchronous. By default, synchronous traps return to the
- * instruction which caused the interrupt. So if you install a software
- * trap handler as an executive interrupt handler (which is desirable since
- * RTEMS takes care of window and register issues), then the executive needs
- * to know that the return address is to the trap rather than the instruction
- * following the trap.
- *
- * So vectors 0 through 255 are treated as regular asynchronous traps which
- * provide the "correct" return address. Vectors 256 through 512 are assumed
- * by the executive to be synchronous and to require that the return address
- * be fudged.
- *
- * If you use this mechanism to install a trap handler which must reexecute
- * the instruction which caused the trap, then it should be installed as
- * an asynchronous trap. This will avoid the executive changing the return
- * address.
- */
-
-void _CPU_ISR_install_raw_handler(
- uint32_t vector,
- CPU_ISR_raw_handler new_handler,
- CPU_ISR_raw_handler *old_handler
-)
-{
- uint32_t real_vector;
- CPU_Trap_table_entry *tbr;
- CPU_Trap_table_entry *slot;
- uint32_t u32_tbr;
- uint32_t u32_handler;
-
- /*
- * Get the "real" trap number for this vector ignoring the synchronous
- * versus asynchronous indicator included with our vector numbers.
- */
-
- real_vector = SPARC_REAL_TRAP_NUMBER( vector );
-
- /*
- * Get the current base address of the trap table and calculate a pointer
- * to the slot we are interested in.
- */
-
- sparc_get_tbr( u32_tbr );
-
- u32_tbr &= 0xfffff000;
-
- tbr = (CPU_Trap_table_entry *) u32_tbr;
-
- slot = &tbr[ real_vector ];
-
- /*
- * Get the address of the old_handler from the trap table.
- *
- * NOTE: The old_handler returned will be bogus if it does not follow
- * the RTEMS model.
- */
-
-#define HIGH_BITS_MASK 0xFFFFFC00
-#define HIGH_BITS_SHIFT 10
-#define LOW_BITS_MASK 0x000003FF
-
- if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
- u32_handler =
- (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) |
- (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
- *old_handler = (CPU_ISR_raw_handler) u32_handler;
- } else
- *old_handler = 0;
-
- /*
- * Copy the template to the slot and then fix it.
- */
-
- *slot = _CPU_Trap_slot_template;
-
- u32_handler = (uint32_t) new_handler;
-
- slot->mov_vector_l3 |= vector;
- slot->sethi_of_handler_to_l4 |=
- (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
- slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
-
- /*
- * There is no instruction cache snooping, so we need to invalidate
- * the instruction cache to make sure that the processor sees the
- * changes to the trap table. This step is required on both single-
- * and multiprocessor systems.
- *
- * In a SMP configuration a change to the trap table might be
- * missed by other cores. If the system state is up, the other
- * cores can be notified using SMP messages that they need to
- * flush their icache. If the up state has not been reached
- * there is no need to notify other cores. They will do an
- * automatic flush of the icache just after entering the up
- * state, but before enabling interrupts.
- */
- rtems_cache_invalidate_entire_instruction();
-}
-
-void _CPU_ISR_install_vector(
- uint32_t vector,
- CPU_ISR_handler new_handler,
- CPU_ISR_handler *old_handler
-)
-{
- uint32_t real_vector;
- CPU_ISR_raw_handler ignored;
-
- /*
- * Get the "real" trap number for this vector ignoring the synchronous
- * versus asynchronous indicator included with our vector numbers.
- */
-
- real_vector = SPARC_REAL_TRAP_NUMBER( vector );
-
- /*
- * Return the previous ISR handler.
- */
-
- *old_handler = _ISR_Vector_table[ real_vector ];
-
- /*
- * Install the wrapper so this ISR can be invoked properly.
- */
-
- _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
-
- /*
- * We put the actual user ISR address in '_ISR_vector_table'. This will
- * be used by the _ISR_Handler so the user gets control.
- */
-
- _ISR_Vector_table[ real_vector ] = new_handler;
-}
-
void _CPU_Context_Initialize(
Context_Control *the_context,
uint32_t *stack_base,
diff --git a/cpukit/score/cpu/sparc/cpu_asm.S b/cpukit/score/cpu/sparc/cpu_asm.S
index e884fb2f9e..45d1495af7 100644
--- a/cpukit/score/cpu/sparc/cpu_asm.S
+++ b/cpukit/score/cpu/sparc/cpu_asm.S
@@ -25,27 +25,6 @@
#include <rtems/score/percpu.h>
#include <libcpu/grlib-tn-0018.h>
-#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH)
- #define FP_FRAME_OFFSET_FO_F1 (SPARC_MINIMUM_STACK_FRAME_SIZE + 0)
- #define FP_FRAME_OFFSET_F2_F3 (FP_FRAME_OFFSET_FO_F1 + 8)
- #define FP_FRAME_OFFSET_F4_F5 (FP_FRAME_OFFSET_F2_F3 + 8)
- #define FP_FRAME_OFFSET_F6_F7 (FP_FRAME_OFFSET_F4_F5 + 8)
- #define FP_FRAME_OFFSET_F8_F9 (FP_FRAME_OFFSET_F6_F7 + 8)
- #define FP_FRAME_OFFSET_F1O_F11 (FP_FRAME_OFFSET_F8_F9 + 8)
- #define FP_FRAME_OFFSET_F12_F13 (FP_FRAME_OFFSET_F1O_F11 + 8)
- #define FP_FRAME_OFFSET_F14_F15 (FP_FRAME_OFFSET_F12_F13 + 8)
- #define FP_FRAME_OFFSET_F16_F17 (FP_FRAME_OFFSET_F14_F15 + 8)
- #define FP_FRAME_OFFSET_F18_F19 (FP_FRAME_OFFSET_F16_F17 + 8)
- #define FP_FRAME_OFFSET_F2O_F21 (FP_FRAME_OFFSET_F18_F19 + 8)
- #define FP_FRAME_OFFSET_F22_F23 (FP_FRAME_OFFSET_F2O_F21 + 8)
- #define FP_FRAME_OFFSET_F24_F25 (FP_FRAME_OFFSET_F22_F23 + 8)
- #define FP_FRAME_OFFSET_F26_F27 (FP_FRAME_OFFSET_F24_F25 + 8)
- #define FP_FRAME_OFFSET_F28_F29 (FP_FRAME_OFFSET_F26_F27 + 8)
- #define FP_FRAME_OFFSET_F3O_F31 (FP_FRAME_OFFSET_F28_F29 + 8)
- #define FP_FRAME_OFFSET_FSR (FP_FRAME_OFFSET_F3O_F31 + 8)
- #define FP_FRAME_SIZE (FP_FRAME_OFFSET_FSR + 8)
-#endif
-
/*
* void _CPU_Context_switch(
* Context_Control *run,
@@ -57,7 +36,9 @@
.align 4
PUBLIC(_CPU_Context_switch)
+ PUBLIC(_CPU_Context_switch_no_return)
SYM(_CPU_Context_switch):
+SYM(_CPU_Context_switch_no_return):
st %g5, [%o0 + G5_OFFSET] ! save the global registers
/*
@@ -314,7 +295,7 @@ SYM(_CPU_Context_restore):
mov %i0, %o1 ! in the delay slot
/*
- * void _ISR_Handler()
+ * void _SPARC_Interrupt_trap()
*
* This routine provides the RTEMS interrupt management.
*
@@ -324,28 +305,14 @@ SYM(_CPU_Context_restore):
* l0 = PSR
* l1 = PC
* l2 = nPC
- * l3 = trap type
+ * l3 = interrupt vector number (this is not the trap type)
*
- * NOTE: By an executive defined convention, trap type is between 0 and 255 if
- * it is an asynchonous trap and 256 and 511 if it is synchronous.
+ * NOTE: This trap handler is intended to service external interrupts.
*/
.align 4
- PUBLIC(_ISR_Handler)
-SYM(_ISR_Handler):
- /*
- * Fix the return address for synchronous traps.
- */
-
- andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
- ! Is this a synchronous trap?
- be,a win_ovflow ! No, then skip the adjustment
- nop ! DELAY
- mov %l1, %l6 ! save trapped pc for debug info
- mov %l2, %l1 ! do not return to the instruction
- add %l2, 4, %l2 ! indicated
-
-win_ovflow:
+ PUBLIC(_SPARC_Interrupt_trap)
+SYM(_SPARC_Interrupt_trap):
/*
* Save the globals this block uses.
*
@@ -432,7 +399,7 @@ dont_do_the_window:
* includes a regular minimum stack frame which will be used if
* needed by register window overflow and underflow handlers.
*
- * REGISTERS SAME AS AT _ISR_Handler
+ * REGISTERS SAME AS AT _SPARC_Interrupt_trap()
*/
sub %fp, CPU_INTERRUPT_FRAME_SIZE, %sp
@@ -458,9 +425,6 @@ dont_do_the_window:
rd %y, %g1
st %g1, [%sp + ISF_Y_OFFSET] ! save y
- st %l6, [%sp + ISF_TPC_OFFSET] ! save real trapped pc
-
- mov %sp, %o1 ! 2nd arg to ISR Handler
/*
* Increment ISR nest level and Thread dispatch disable level.
@@ -539,26 +503,16 @@ dont_switch_stacks:
sub %sp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
/*
- * Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
- * set the PIL in the %psr to mask off interrupts with lower priority.
+ * Set the PIL in the %psr to mask off interrupts with lower priority.
* The original %psr in %l0 is not modified since it will be restored
* when the interrupt handler returns.
*/
mov %l0, %g5
- and %l3, 0x0ff, %g4
- subcc %g4, 0x11, %g0
- bl dont_fix_pil
- subcc %g4, 0x1f, %g0
- bg dont_fix_pil
- sll %g4, 8, %g4
+ sll %l3, 8, %g4
and %g4, SPARC_PSR_PIL_MASK, %g4
andn %l0, SPARC_PSR_PIL_MASK, %g5
- ba pil_fixed
or %g4, %g5, %g5
-dont_fix_pil:
- or %g5, SPARC_PSR_PIL_MASK, %g5
-pil_fixed:
#if SPARC_HAS_FPU == 1
/*
@@ -571,23 +525,10 @@ pil_fixed:
wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
/*
- * Vector to user's handler.
- *
- * NOTE: TBR may no longer have vector number in it since
- * we just enabled traps. It is definitely in l3.
+ * Call _SPARC_Interrupt_dispatch( %l3 )
*/
-
- sethi %hi(SYM(_ISR_Vector_table)), %g4
- or %g4, %lo(SYM(_ISR_Vector_table)), %g4
- and %l3, 0xFF, %g5 ! remove synchronous trap indicator
- sll %g5, 2, %g5 ! g5 = offset into table
- ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ]
-
-
- ! o1 = 2nd arg = address of the ISF
- ! WAS LOADED WHEN ISF WAS SAVED!!!
mov %l3, %o0 ! o0 = 1st arg = vector number
- call %g4
+ call SYM(_SPARC_Interrupt_dispatch)
#if defined(RTEMS_PROFILING)
mov %o5, %l3 ! save interrupt entry instant
#else
@@ -705,40 +646,40 @@ isr_dispatch:
* Post-switch actions (e.g. signal handlers) and context switch
* extensions may safely use the floating point unit.
*/
- sub %sp, FP_FRAME_SIZE, %sp
- std %f0, [%sp + FP_FRAME_OFFSET_FO_F1]
+ sub %sp, SPARC_FP_FRAME_SIZE, %sp
+ std %f0, [%sp + SPARC_FP_FRAME_OFFSET_FO_F1]
SPARC_LEON3FT_B2BST_NOP
- std %f2, [%sp + FP_FRAME_OFFSET_F2_F3]
+ std %f2, [%sp + SPARC_FP_FRAME_OFFSET_F2_F3]
SPARC_LEON3FT_B2BST_NOP
- std %f4, [%sp + FP_FRAME_OFFSET_F4_F5]
+ std %f4, [%sp + SPARC_FP_FRAME_OFFSET_F4_F5]
SPARC_LEON3FT_B2BST_NOP
- std %f6, [%sp + FP_FRAME_OFFSET_F6_F7]
+ std %f6, [%sp + SPARC_FP_FRAME_OFFSET_F6_F7]
SPARC_LEON3FT_B2BST_NOP
- std %f8, [%sp + FP_FRAME_OFFSET_F8_F9]
+ std %f8, [%sp + SPARC_FP_FRAME_OFFSET_F8_F9]
SPARC_LEON3FT_B2BST_NOP
- std %f10, [%sp + FP_FRAME_OFFSET_F1O_F11]
+ std %f10, [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11]
SPARC_LEON3FT_B2BST_NOP
- std %f12, [%sp + FP_FRAME_OFFSET_F12_F13]
+ std %f12, [%sp + SPARC_FP_FRAME_OFFSET_F12_F13]
SPARC_LEON3FT_B2BST_NOP
- std %f14, [%sp + FP_FRAME_OFFSET_F14_F15]
+ std %f14, [%sp + SPARC_FP_FRAME_OFFSET_F14_F15]
SPARC_LEON3FT_B2BST_NOP
- std %f16, [%sp + FP_FRAME_OFFSET_F16_F17]
+ std %f16, [%sp + SPARC_FP_FRAME_OFFSET_F16_F17]
SPARC_LEON3FT_B2BST_NOP
- std %f18, [%sp + FP_FRAME_OFFSET_F18_F19]
+ std %f18, [%sp + SPARC_FP_FRAME_OFFSET_F18_F19]
SPARC_LEON3FT_B2BST_NOP
- std %f20, [%sp + FP_FRAME_OFFSET_F2O_F21]
+ std %f20, [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21]
SPARC_LEON3FT_B2BST_NOP
- std %f22, [%sp + FP_FRAME_OFFSET_F22_F23]
+ std %f22, [%sp + SPARC_FP_FRAME_OFFSET_F22_F23]
SPARC_LEON3FT_B2BST_NOP
- std %f24, [%sp + FP_FRAME_OFFSET_F24_F25]
+ std %f24, [%sp + SPARC_FP_FRAME_OFFSET_F24_F25]
SPARC_LEON3FT_B2BST_NOP
- std %f26, [%sp + FP_FRAME_OFFSET_F26_F27]
+ std %f26, [%sp + SPARC_FP_FRAME_OFFSET_F26_F27]
SPARC_LEON3FT_B2BST_NOP
- std %f28, [%sp + FP_FRAME_OFFSET_F28_F29]
+ std %f28, [%sp + SPARC_FP_FRAME_OFFSET_F28_F29]
SPARC_LEON3FT_B2BST_NOP
- std %f30, [%sp + FP_FRAME_OFFSET_F3O_F31]
+ std %f30, [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31]
SPARC_LEON3FT_B2BST_NOP
- st %fsr, [%sp + FP_FRAME_OFFSET_FSR]
+ st %fsr, [%sp + SPARC_FP_FRAME_OFFSET_FSR]
call SYM(_Thread_Do_dispatch)
mov %g6, %o0
@@ -746,25 +687,25 @@ isr_dispatch:
* Restore the floating point context from stack frame and release the
* stack frame.
*/
- ldd [%sp + FP_FRAME_OFFSET_FO_F1], %f0
- ldd [%sp + FP_FRAME_OFFSET_F2_F3], %f2
- ldd [%sp + FP_FRAME_OFFSET_F4_F5], %f4
- ldd [%sp + FP_FRAME_OFFSET_F6_F7], %f6
- ldd [%sp + FP_FRAME_OFFSET_F8_F9], %f8
- ldd [%sp + FP_FRAME_OFFSET_F1O_F11], %f10
- ldd [%sp + FP_FRAME_OFFSET_F12_F13], %f12
- ldd [%sp + FP_FRAME_OFFSET_F14_F15], %f14
- ldd [%sp + FP_FRAME_OFFSET_F16_F17], %f16
- ldd [%sp + FP_FRAME_OFFSET_F18_F19], %f18
- ldd [%sp + FP_FRAME_OFFSET_F2O_F21], %f20
- ldd [%sp + FP_FRAME_OFFSET_F22_F23], %f22
- ldd [%sp + FP_FRAME_OFFSET_F24_F25], %f24
- ldd [%sp + FP_FRAME_OFFSET_F26_F27], %f26
- ldd [%sp + FP_FRAME_OFFSET_F28_F29], %f28
- ldd [%sp + FP_FRAME_OFFSET_F3O_F31], %f30
- ld [%sp + FP_FRAME_OFFSET_FSR], %fsr
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_FO_F1], %f0
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F2_F3], %f2
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F4_F5], %f4
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F6_F7], %f6
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F8_F9], %f8
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11], %f10
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F12_F13], %f12
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F14_F15], %f14
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F16_F17], %f16
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F18_F19], %f18
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21], %f20
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F22_F23], %f22
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F24_F25], %f24
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F26_F27], %f26
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F28_F29], %f28
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31], %f30
+ ld [%sp + SPARC_FP_FRAME_OFFSET_FSR], %fsr
ba .Lthread_dispatch_done
- add %sp, FP_FRAME_SIZE, %sp
+ add %sp, SPARC_FP_FRAME_SIZE, %sp
.Lnon_fp_thread_dispatch:
#endif
@@ -799,8 +740,8 @@ isr_dispatch:
* The CWP in place at this point may be different from
* that which was in effect at the beginning of the ISR if we
* have been context switched between the beginning of this invocation
- * of _ISR_Handler and this point. Thus the CWP and WIM should
- * not be changed back to their values at ISR entry time. Any
+ * of _SPARC_Interrupt_trap() and this point. Thus the CWP and WIM
+ * should not be changed back to their values at ISR entry time. Any
* changes to the PSR must preserve the CWP.
*/
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
index 8c5330b8ce..0abc929c54 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
@@ -743,14 +743,13 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
#ifndef ASM
-/*
- * ISR handler macros
- */
-
/**
- * Support routine to initialize the RTEMS vector table after it is allocated.
+ * @brief Dispatches the installed interrupt handlers.
+ *
+ * @param irq is the interrupt vector number of the external interrupt ranging
+ * from 0 to 15. This is not a trap number.
*/
-#define _CPU_Initialize_vectors()
+void _SPARC_Interrupt_dispatch( uint32_t irq );
/**
* Disable all interrupts for a critical section. The previous
@@ -971,6 +970,11 @@ void _CPU_Context_switch(
Context_Control *heir
);
+RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
+ Context_Control *executing,
+ Context_Control *heir
+);
+
/**
* @brief SPARC specific context restore.
*
@@ -1023,9 +1027,60 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
} while ( 0 )
#endif
+/**
+ * @brief This structure contains the local and input registers of a register
+ * window.
+ */
+typedef struct {
+ /** @brief This member contains the local 0..7 register values. */
+ uint32_t local[ 8 ];
+
+ /** @brief This member contains the input 0..7 register values. */
+ uint32_t input[ 8 ];
+} SPARC_Register_window;
+
+/**
+ * @brief This structure contains the register set of a context which caused an
+ * unexpected trap.
+ */
typedef struct {
+ /** @brief This member contains the PSR register value. */
+ uint32_t psr;
+
+ /** @brief This member contains the PC value. */
+ uint32_t pc;
+
+ /** @brief This member contains the nPC value. */
+ uint32_t npc;
+
+ /** @brief This member contains the trap number. */
uint32_t trap;
- CPU_Interrupt_frame *isf;
+
+ /** @brief This member contains the WIM register value. */
+ uint32_t wim;
+
+ /** @brief This member contains the Y register value. */
+ uint32_t y;
+
+ /** @brief This member contains the global 0..7 register values. */
+ uint32_t global[ 8 ];
+
+ /** @brief This member contains the output 0..7 register values. */
+ uint32_t output[ 8 ] ;
+
+ /**
+ * @brief This member contains the additional register windows according to
+ * the saved WIM.
+ */
+ SPARC_Register_window windows[ SPARC_NUMBER_OF_REGISTER_WINDOWS - 1 ];
+
+#if SPARC_HAS_FPU == 1
+ /** This member contain the FSR register value. */
+ uint32_t fsr;
+
+ /** @brief This member contains the floating point 0..31 register values. */
+ uint64_t fp[ 16 ];
+#endif
} CPU_Exception_frame;
void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
index a99da74fa9..8aa4030a45 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpuimpl.h
@@ -117,6 +117,49 @@
#endif
#endif
+#define SPARC_REGISTER_WINDOW_OFFSET_LOCAL( i ) ( ( i ) * 4 )
+#define SPARC_REGISTER_WINDOW_OFFSET_INPUT( i ) ( ( i ) * 4 + 32 )
+#define SPARC_REGISTER_WINDOW_SIZE 64
+
+#define SPARC_EXCEPTION_OFFSET_PSR 0
+#define SPARC_EXCEPTION_OFFSET_PC 4
+#define SPARC_EXCEPTION_OFFSET_NPC 8
+#define SPARC_EXCEPTION_OFFSET_TRAP 12
+#define SPARC_EXCEPTION_OFFSET_WIM 16
+#define SPARC_EXCEPTION_OFFSET_Y 20
+#define SPARC_EXCEPTION_OFFSET_GLOBAL( i ) ( ( i ) * 4 + 24 )
+#define SPARC_EXCEPTION_OFFSET_OUTPUT( i ) ( ( i ) * 4 + 56 )
+#define SPARC_EXCEPTION_OFFSET_WINDOWS( i ) ( ( i ) * 64 + 88 )
+
+#if SPARC_HAS_FPU == 1
+#define SPARC_EXCEPTION_OFFSET_FSR 536
+#define SPARC_EXCEPTION_OFFSET_FP( i ) ( ( i ) * 8 + 544 )
+#define SPARC_EXCEPTION_FRAME_SIZE 672
+#else
+#define SPARC_EXCEPTION_FRAME_SIZE 536
+#endif
+
+#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH)
+#define SPARC_FP_FRAME_OFFSET_FO_F1 (SPARC_MINIMUM_STACK_FRAME_SIZE + 0)
+#define SPARC_FP_FRAME_OFFSET_F2_F3 (SPARC_FP_FRAME_OFFSET_FO_F1 + 8)
+#define SPARC_FP_FRAME_OFFSET_F4_F5 (SPARC_FP_FRAME_OFFSET_F2_F3 + 8)
+#define SPARC_FP_FRAME_OFFSET_F6_F7 (SPARC_FP_FRAME_OFFSET_F4_F5 + 8)
+#define SPARC_FP_FRAME_OFFSET_F8_F9 (SPARC_FP_FRAME_OFFSET_F6_F7 + 8)
+#define SPARC_FP_FRAME_OFFSET_F1O_F11 (SPARC_FP_FRAME_OFFSET_F8_F9 + 8)
+#define SPARC_FP_FRAME_OFFSET_F12_F13 (SPARC_FP_FRAME_OFFSET_F1O_F11 + 8)
+#define SPARC_FP_FRAME_OFFSET_F14_F15 (SPARC_FP_FRAME_OFFSET_F12_F13 + 8)
+#define SPARC_FP_FRAME_OFFSET_F16_F17 (SPARC_FP_FRAME_OFFSET_F14_F15 + 8)
+#define SPARC_FP_FRAME_OFFSET_F18_F19 (SPARC_FP_FRAME_OFFSET_F16_F17 + 8)
+#define SPARC_FP_FRAME_OFFSET_F2O_F21 (SPARC_FP_FRAME_OFFSET_F18_F19 + 8)
+#define SPARC_FP_FRAME_OFFSET_F22_F23 (SPARC_FP_FRAME_OFFSET_F2O_F21 + 8)
+#define SPARC_FP_FRAME_OFFSET_F24_F25 (SPARC_FP_FRAME_OFFSET_F22_F23 + 8)
+#define SPARC_FP_FRAME_OFFSET_F26_F27 (SPARC_FP_FRAME_OFFSET_F24_F25 + 8)
+#define SPARC_FP_FRAME_OFFSET_F28_F29 (SPARC_FP_FRAME_OFFSET_F26_F27 + 8)
+#define SPARC_FP_FRAME_OFFSET_F3O_F31 (SPARC_FP_FRAME_OFFSET_F28_F29 + 8)
+#define SPARC_FP_FRAME_OFFSET_FSR (SPARC_FP_FRAME_OFFSET_F3O_F31 + 8)
+#define SPARC_FP_FRAME_SIZE (SPARC_FP_FRAME_OFFSET_FSR + 8)
+#endif
+
#ifndef ASM
#ifdef __cplusplus
diff --git a/cpukit/score/cpu/sparc/sparc-bad-trap.S b/cpukit/score/cpu/sparc/sparc-bad-trap.S
new file mode 100644
index 0000000000..2e73a4a7da
--- /dev/null
+++ b/cpukit/score/cpu/sparc/sparc-bad-trap.S
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUSPARC
+ *
+ * @brief This source file contains the implementation of _SPARC_Bad_trap().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/asm.h>
+#include <rtems/score/percpu.h>
+
+ /*
+ * The trap handler entry was set up by TRAP().
+ */
+ PUBLIC(_SPARC_Bad_trap)
+SYM(_SPARC_Bad_trap):
+
+ /*
+ * Do not use the existing stack since it may be invalid. Use the ISR
+ * stack for this processor. If the trap was caused from within
+ * interrupt context, then a return to the context which caused the
+ * trap would be unreliable.
+ */
+ set SYM(_ISR_Stack_size), %l5
+
+#if defined(RTEMS_SMP) && defined(__leon__)
+ rd %asr17, %l6
+ srl %l6, LEON3_ASR17_PROCESSOR_INDEX_SHIFT, %l6
+ add %l6, 1, %l4
+ smul %l4, %l5, %l5
+#endif
+ set SYM(_ISR_Stack_area_begin), %l7
+ add %l7, %l5, %l7
+ andn %l7, CPU_STACK_ALIGNMENT - 1, %l7
+
+ /*
+ * Establish an area on the stack for a CPU_Exception_frame.
+ */
+ sub %l7, SPARC_EXCEPTION_FRAME_SIZE, %l7
+
+ /*
+ * Start saving the context which caused the trap.
+ */
+ mov %wim, %l4
+ rd %y, %l5
+ std %l0, [%l7 + SPARC_EXCEPTION_OFFSET_PSR]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l2, [%l7 + SPARC_EXCEPTION_OFFSET_NPC]
+ SPARC_LEON3FT_B2BST_NOP
+ st %l4, [%l7 + SPARC_EXCEPTION_OFFSET_WIM]
+ st %l5, [%l7 + SPARC_EXCEPTION_OFFSET_Y]
+ std %g0, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(0)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %g2, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(2)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %g4, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(4)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %g6, [%l7 + SPARC_EXCEPTION_OFFSET_GLOBAL(6)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i0, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(0)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i2, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(2)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i4, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(4)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i6, [%l7 + SPARC_EXCEPTION_OFFSET_OUTPUT(6)]
+
+ /*
+ * Initialize %g6 since it may be corrupt.
+ */
+ set SYM(_Per_CPU_Information), %g6
+#if defined(RTEMS_SMP) && defined(__leon__)
+ sll %l6, PER_CPU_CONTROL_SIZE_LOG2, %l4
+ add %g6, %l4, %g6
+#endif
+
+ /*
+ * Disable WIM traps.
+ */
+ mov %g0, %wim
+ nop
+ nop
+ nop
+
+ /*
+ * Save the remaining register windows.
+ */
+ set SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g2
+ add %l7, SPARC_EXCEPTION_OFFSET_WINDOWS(0), %g3
+
+.Lsave_register_windows:
+
+ restore
+ std %l0, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(0)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l2, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(2)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l4, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(4)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l6, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_LOCAL(6)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i0, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(0)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i2, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(2)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i4, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(4)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i6, [%g3 + SPARC_REGISTER_WINDOW_OFFSET_INPUT(6)]
+ add %g3, SPARC_REGISTER_WINDOW_SIZE, %g3
+ subcc %g2, 1, %g2
+ bne .Lsave_register_windows
+ nop
+
+ /*
+ * Go back to register window at trap entry.
+ */
+ restore
+
+ /*
+ * Initialize the WIM based on the PSR[CWP] to have all register
+ * windows available for the fatal error procedure.
+ */
+ and %l0, SPARC_PSR_CWP_MASK, %l4
+ set 1, %l5
+ sll %l5, %l4, %l5
+ mov %l5, %wim
+
+#if SPARC_HAS_FPU == 1
+ /*
+ * Enable the FPU in the new PSR (PSR[EF] == 1).
+ */
+ sethi %hi(SPARC_PSR_EF_MASK), %l4
+ or %l0, %l4, %l0
+#endif
+
+ /*
+ * Enable traps and disable interrupts.
+ */
+ or %l0, 0xf20, %l0
+ wr %l0, %psr
+ nop
+ nop
+ nop
+
+#if SPARC_HAS_FPU == 1
+ st %fsr, [%l7 + SPARC_EXCEPTION_OFFSET_FSR]
+ std %f0, [%l7 + SPARC_EXCEPTION_OFFSET_FP(0)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f2, [%l7 + SPARC_EXCEPTION_OFFSET_FP(1)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f4, [%l7 + SPARC_EXCEPTION_OFFSET_FP(2)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f6, [%l7 + SPARC_EXCEPTION_OFFSET_FP(3)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f8, [%l7 + SPARC_EXCEPTION_OFFSET_FP(4)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f10, [%l7 + SPARC_EXCEPTION_OFFSET_FP(5)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f12, [%l7 + SPARC_EXCEPTION_OFFSET_FP(6)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f14, [%l7 + SPARC_EXCEPTION_OFFSET_FP(7)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f16, [%l7 + SPARC_EXCEPTION_OFFSET_FP(8)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f18, [%l7 + SPARC_EXCEPTION_OFFSET_FP(9)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f20, [%l7 + SPARC_EXCEPTION_OFFSET_FP(10)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f22, [%l7 + SPARC_EXCEPTION_OFFSET_FP(11)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f24, [%l7 + SPARC_EXCEPTION_OFFSET_FP(12)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f26, [%l7 + SPARC_EXCEPTION_OFFSET_FP(13)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f28, [%l7 + SPARC_EXCEPTION_OFFSET_FP(14)]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f30, [%l7 + SPARC_EXCEPTION_OFFSET_FP(15)]
+#endif
+
+#if !defined(SPARC_USE_LAZY_FP_SWITCH)
+ /*
+ * Call
+ * _Internal_error( INTERNAL_ERROR_ILLEGAL_USE_OF_FLOATING_POINT_UNIT )
+ * if necessary.
+ */
+ cmp %l3, 4
+ bne .Lno_fp_disable_trap
+ nop
+ call SYM(_Internal_error)
+ set 38, %o0
+.Lno_fp_disable_trap:
+#endif
+
+ /*
+ * Call _Terminate( RTEMS_FATAL_SOURCE_EXCEPTION, %l0 ).
+ */
+ sub %l7, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
+ set 9, %o0
+ call SYM(_Terminate)
+ mov %l7, %o1
diff --git a/cpukit/score/cpu/sparc/sparc-exception-frame-print.c b/cpukit/score/cpu/sparc/sparc-exception-frame-print.c
new file mode 100644
index 0000000000..f216c1dc63
--- /dev/null
+++ b/cpukit/score/cpu/sparc/sparc-exception-frame-print.c
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUSPARC
+ *
+ * @brief This source file contains the SPARC-specific implementation of
+ * _CPU_Exception_frame_print().
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/cpu.h>
+#include <rtems/bspIo.h>
+#include <inttypes.h>
+
+void _CPU_Exception_frame_print( const CPU_Exception_frame *frame )
+{
+ size_t i;
+ size_t j;
+ const char *desc;
+
+ switch ( SPARC_REAL_TRAP_NUMBER( frame->trap ) ) {
+ case 0x01:
+ desc = " (instruction access exception)";
+ break;
+ case 0x02:
+ desc = " (illegal instruction)";
+ break;
+ case 0x03:
+ desc = " (privileged instruction)";
+ break;
+ case 0x04:
+ desc = " (fp disabled)";
+ break;
+ case 0x05:
+ desc = " (window overflow)";
+ break;
+ case 0x06:
+ desc = " (window underflow)";
+ break;
+ case 0x07:
+ desc = " (memory address not aligned)";
+ break;
+ case 0x08:
+ desc = " (fp exception)";
+ break;
+ case 0x09:
+ desc = " (data access exception)";
+ break;
+ case 0x0A:
+ desc = " (tag overflow)";
+ break;
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ case 0x18:
+ case 0x19:
+ case 0x1A:
+ case 0x1B:
+ case 0x1C:
+ case 0x1D:
+ case 0x1E:
+ case 0x1F:
+ desc = " (external interrupt)";
+ break;
+ case 0x24:
+ desc = " (cp disabled)";
+ break;
+ case 0x28:
+ desc = " (cp exception)";
+ break;
+ default:
+ desc = "";
+ break;
+ }
+
+ printk(
+ "\n"
+ "unexpected trap %" PRIu32 "%s\n"
+ "PSR = 0x%08" PRIx32 "\n"
+ "PC = 0x%08" PRIx32 "\n"
+ "nPC = 0x%08" PRIx32 "\n"
+ "WIM = 0x%08" PRIx32 "\n"
+ "Y = 0x%08" PRIx32 "\n",
+ frame->trap,
+ desc,
+ frame->psr,
+ frame->pc,
+ frame->npc,
+ frame->wim,
+ frame->y
+ );
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->global ); ++i ) {
+ printk( "g%zu = 0x%08" PRIx32 "\n", i, frame->global[ i ] );
+ }
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->output ); ++i ) {
+ printk( "o%zu[CWP - 0] = 0x%08" PRIx32 "\n", i, frame->output[ i ] );
+ }
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->windows ); ++i ) {
+ const SPARC_Register_window *win;
+
+ win = &frame->windows[ i ];
+
+ for ( j = 0; j < RTEMS_ARRAY_SIZE( win->local ); ++j ) {
+ printk( "l%zu[CWP - %zu] = 0x%08" PRIx32 "\n", j, i, win->local[ j ] );
+ }
+
+ for ( j = 0; j < RTEMS_ARRAY_SIZE( win->input ); ++j ) {
+ printk( "i%zu[CWP - %zu] = 0x%08" PRIx32 "\n", j, i, win->input[ j ] );
+ }
+ }
+
+#if SPARC_HAS_FPU == 1
+ printk( "FSR = 0x%08" PRIx32 "\n", frame->fsr );
+
+ for ( i = 0; i < RTEMS_ARRAY_SIZE( frame->fp ); ++i ) {
+ j = i * 2;
+ printk( "fp%zu:fp%zu = 0x%016" PRIx64 "\n", j, j + 1, frame->fp[ i ] );
+ }
+#endif
+}
diff --git a/cpukit/score/cpu/sparc/sparc-isr-handler.S b/cpukit/score/cpu/sparc/sparc-isr-handler.S
new file mode 100644
index 0000000000..068fad1e84
--- /dev/null
+++ b/cpukit/score/cpu/sparc/sparc-isr-handler.S
@@ -0,0 +1,620 @@
+/*
+ * This file contains the basic algorithms for all assembly code used
+ * in an specific CPU port of RTEMS. These algorithms must be implemented
+ * in assembly language.
+ *
+ * COPYRIGHT (c) 1989-2011.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2014, 2017 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * Ported to ERC32 implementation of the SPARC by On-Line Applications
+ * Research Corporation (OAR) under contract to the European Space
+ * Agency (ESA).
+ *
+ * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
+ * European Space Agency.
+ */
+
+#include <rtems/asm.h>
+#include <rtems/score/percpu.h>
+#include <libcpu/grlib-tn-0018.h>
+
+/*
+ * void _ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * We enter this handler from the 4 instructions in the trap table with
+ * the following registers assumed to be set as shown:
+ *
+ * l0 = PSR
+ * l1 = PC
+ * l2 = nPC
+ * l3 = trap type
+ *
+ * NOTE: By an executive defined convention, trap type is between 0 and 255 if
+ * it is an asynchonous trap and 256 and 511 if it is synchronous.
+ */
+
+ .align 4
+ PUBLIC(_ISR_Handler)
+SYM(_ISR_Handler):
+ /*
+ * Fix the return address for synchronous traps.
+ */
+
+ andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
+ ! Is this a synchronous trap?
+ be,a win_ovflow ! No, then skip the adjustment
+ nop ! DELAY
+ mov %l1, %l6 ! save trapped pc for debug info
+ mov %l2, %l1 ! do not return to the instruction
+ add %l2, 4, %l2 ! indicated
+
+win_ovflow:
+ /*
+ * Save the globals this block uses.
+ *
+ * These registers are not restored from the locals. Their contents
+ * are saved directly from the locals into the ISF below.
+ */
+
+ mov %g4, %l4 ! save the globals this block uses
+ mov %g5, %l5
+
+ /*
+ * When at a "window overflow" trap, (wim == (1 << cwp)).
+ * If we get here like that, then process a window overflow.
+ */
+
+ rd %wim, %g4
+ srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP
+ ! are LS 5 bits ; how convenient :)
+ cmp %g5, 1 ! Is this an invalid window?
+ bne dont_do_the_window ! No, then skip all this stuff
+ ! we are using the delay slot
+
+ /*
+ * The following is same as a 1 position right rotate of WIM
+ */
+
+ srl %g4, 1, %g5 ! g5 = WIM >> 1
+ sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
+ ! g4 = WIM << (Number Windows - 1)
+ or %g4, %g5, %g4 ! g4 = (WIM >> 1) |
+ ! (WIM << (Number Windows - 1))
+
+ /*
+ * At this point:
+ *
+ * g4 = the new WIM
+ * g5 is free
+ */
+
+ /*
+ * Since we are tinkering with the register windows, we need to
+ * make sure that all the required information is in global registers.
+ */
+
+ save ! Save into the window
+ wr %g4, 0, %wim ! WIM = new WIM
+ nop ! delay slots
+ nop
+ nop
+
+ /*
+ * Now save the window just as if we overflowed to it.
+ */
+
+ std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+ std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+
+ std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
+ SPARC_LEON3FT_B2BST_NOP
+ std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
+
+ restore
+ nop
+
+dont_do_the_window:
+ /*
+ * Global registers %g4 and %g5 are saved directly from %l4 and
+ * %l5 directly into the ISF below.
+ */
+
+ /*
+ * Save the state of the interrupted task -- especially the global
+ * registers -- in the Interrupt Stack Frame. Note that the ISF
+ * includes a regular minimum stack frame which will be used if
+ * needed by register window overflow and underflow handlers.
+ *
+ * REGISTERS SAME AS AT _ISR_Handler
+ */
+
+ sub %fp, CPU_INTERRUPT_FRAME_SIZE, %sp
+ ! make space for ISF
+
+ std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC
+ SPARC_LEON3FT_B2BST_NOP
+ st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC
+ st %g1, [%sp + ISF_G1_OFFSET] ! save g1
+ std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3
+ SPARC_LEON3FT_B2BST_NOP
+ std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above
+ SPARC_LEON3FT_B2BST_NOP
+ st %g7, [%sp + ISF_G7_OFFSET] ! save g7
+
+ std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1
+ SPARC_LEON3FT_B2BST_NOP
+ std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3
+ SPARC_LEON3FT_B2BST_NOP
+ std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5
+ SPARC_LEON3FT_B2BST_NOP
+ std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7
+
+ rd %y, %g1
+ st %g1, [%sp + ISF_Y_OFFSET] ! save y
+ st %l6, [%sp + ISF_TPC_OFFSET] ! save real trapped pc
+
+ mov %sp, %o1 ! 2nd arg to ISR Handler
+
+ /*
+ * Increment ISR nest level and Thread dispatch disable level.
+ *
+ * Register usage for this section:
+ *
+ * l6 = _Thread_Dispatch_disable_level value
+ * l7 = _ISR_Nest_level value
+ *
+ * NOTE: It is assumed that l6 - l7 will be preserved until the ISR
+ * nest and thread dispatch disable levels are unnested.
+ */
+
+ ld [%g6 + PER_CPU_ISR_NEST_LEVEL], %l7
+ ld [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
+
+ add %l7, 1, %l7
+ st %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
+ SPARC_LEON3FT_B2BST_NOP
+
+ add %l6, 1, %l6
+ st %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+
+#if SPARC_HAS_FPU == 1
+ /*
+ * We cannot use an intermediate value for operations with the PSR[EF]
+ * bit since they use a 13-bit sign extension and PSR[EF] is bit 12.
+ */
+ sethi %hi(SPARC_PSR_EF_MASK), %l5
+#endif
+
+ /*
+ * If ISR nest level was zero (now 1), then switch stack.
+ */
+
+ mov %sp, %fp
+ subcc %l7, 1, %l7 ! outermost interrupt handler?
+ bnz dont_switch_stacks ! No, then do not switch stacks
+
+#if defined(RTEMS_PROFILING)
+ sethi %hi(_SPARC_Counter), %o5
+ ld [%o5 + %lo(_SPARC_Counter)], %l4
+ call %l4
+ nop
+ mov %o0, %o5
+#else
+ nop
+#endif
+
+ ld [%g6 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
+
+#if SPARC_HAS_FPU == 1
+ /*
+ * Test if the interrupted thread uses the floating point unit
+ * (PSR[EF] == 1). In case it uses the floating point unit, then store
+ * the floating point status register. This has the side-effect that
+ * all pending floating point operations complete before the store
+ * completes. The PSR[EF] bit is restored after the call to the
+ * interrupt handler. Thus post-switch actions (e.g. signal handlers)
+ * and context switch extensions may still corrupt the floating point
+ * context.
+ */
+ andcc %l0, %l5, %g0
+ beq dont_switch_stacks
+ nop
+ st %fsr, [%g6 + SPARC_PER_CPU_FSR_OFFSET]
+#endif
+
+dont_switch_stacks:
+ /*
+ * Make sure we have a place on the stack for the window overflow
+ * trap handler to write into. At this point it is safe to
+ * enable traps again.
+ */
+
+ sub %sp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
+
+ /*
+ * Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
+ * set the PIL in the %psr to mask off interrupts with lower priority.
+ * The original %psr in %l0 is not modified since it will be restored
+ * when the interrupt handler returns.
+ */
+
+ mov %l0, %g5
+ and %l3, 0x0ff, %g4
+ subcc %g4, 0x11, %g0
+ bl dont_fix_pil
+ subcc %g4, 0x1f, %g0
+ bg dont_fix_pil
+ sll %g4, 8, %g4
+ and %g4, SPARC_PSR_PIL_MASK, %g4
+ andn %l0, SPARC_PSR_PIL_MASK, %g5
+ ba pil_fixed
+ or %g4, %g5, %g5
+dont_fix_pil:
+ or %g5, SPARC_PSR_PIL_MASK, %g5
+pil_fixed:
+
+#if SPARC_HAS_FPU == 1
+ /*
+ * Clear the PSR[EF] bit of the interrupted context to ensure that
+ * interrupt service routines cannot corrupt the floating point context.
+ */
+ andn %g5, %l5, %g5
+#endif
+
+ wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
+
+ /*
+ * Vector to user's handler.
+ *
+ * NOTE: TBR may no longer have vector number in it since
+ * we just enabled traps. It is definitely in l3.
+ */
+
+ sethi %hi(SYM(_ISR_Vector_table)), %g4
+ or %g4, %lo(SYM(_ISR_Vector_table)), %g4
+ and %l3, 0xFF, %g5 ! remove synchronous trap indicator
+ sll %g5, 2, %g5 ! g5 = offset into table
+ ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ]
+
+
+ ! o1 = 2nd arg = address of the ISF
+ ! WAS LOADED WHEN ISF WAS SAVED!!!
+ mov %l3, %o0 ! o0 = 1st arg = vector number
+ call %g4
+#if defined(RTEMS_PROFILING)
+ mov %o5, %l3 ! save interrupt entry instant
+#else
+ nop ! delay slot
+#endif
+
+#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH)
+ mov %l0, %g1 ! PSR[EF] value of interrupted context
+ ta SPARC_SWTRAP_IRQDIS_FP ! **** DISABLE INTERRUPTS ****
+#else
+ ta SPARC_SWTRAP_IRQDIS ! **** DISABLE INTERRUPTS ****
+#endif
+
+#if defined(RTEMS_PROFILING)
+ cmp %l7, 0
+ bne profiling_not_outer_most_exit
+ nop
+ call %l4 ! Call _SPARC_Counter.counter_read
+ mov %g1, %l4 ! Save previous interrupt status
+ mov %o0, %o2 ! o2 = 3rd arg = interrupt exit instant
+ mov %l3, %o1 ! o1 = 2nd arg = interrupt entry instant
+ call SYM(_Profiling_Outer_most_interrupt_entry_and_exit)
+ mov %g6, %o0 ! o0 = 1st arg = per-CPU control
+profiling_not_outer_most_exit:
+#endif
+
+ /*
+ * Decrement ISR nest level and Thread dispatch disable level.
+ *
+ * Register usage for this section:
+ *
+ * o2 = g6->dispatch_necessary value
+ * o3 = g6->isr_dispatch_disable value
+ * l6 = g6->thread_dispatch_disable_level value
+ * l7 = g6->isr_nest_level value
+ */
+
+ ldub [%g6 + PER_CPU_DISPATCH_NEEDED], %o2
+ ld [%g6 + PER_CPU_ISR_DISPATCH_DISABLE], %o3
+ st %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
+ SPARC_LEON3FT_B2BST_NOP
+ sub %l6, 1, %l6
+ st %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+
+ /*
+ * Thread dispatching is necessary and allowed if and only if
+ * g6->dispatch_necessary == 1 and
+ * g6->isr_dispatch_disable == 0 and
+ * g6->thread_dispatch_disable_level == 0.
+ *
+ * Otherwise, continue with the simple return.
+ */
+ xor %o2, 1, %o2
+ or %o2, %l6, %o2
+ orcc %o2, %o3, %o2
+ bnz simple_return
+
+ /*
+ * Switch back on the interrupted tasks stack and add enough room to
+ * invoke the dispatcher. Doing this in the delay slot causes no harm,
+ * since the stack pointer (%sp) is not used in the simple return path.
+ */
+ sub %fp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
+
+isr_dispatch:
+
+ /* Set ISR dispatch disable and thread dispatch disable level to one */
+ mov 1, %l6
+ st %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+ st %l6, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
+
+ /* Call _Thread_Do_dispatch(), this function will enable interrupts */
+
+ mov 0, %o1 ! ISR level for _Thread_Do_dispatch()
+
+#if defined(SPARC_USE_LAZY_FP_SWITCH)
+ /* Test if we interrupted a floating point thread (PSR[EF] == 1) */
+ andcc %l0, %l5, %g0
+ be .Lnon_fp_thread_dispatch
+ ld [%g6 + PER_CPU_OFFSET_EXECUTING], %l6
+
+ /* Set new floating point unit owner to executing thread */
+ st %l6, [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET]
+
+ call SYM(_Thread_Do_dispatch)
+ mov %g6, %o0
+
+ /*
+ * If we are still the floating point unit owner, then reset the
+ * floating point unit owner to NULL, otherwise clear PSR[EF] in the
+ * interrupt frame and let the FP disabled system call do the floating
+ * point context save/restore.
+ */
+ ld [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET], %l7
+ cmp %l6, %l7
+ bne,a .Ldisable_fp
+ andn %l0, %l5, %l0
+ st %g0, [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET]
+ ba .Lthread_dispatch_done
+ nop
+.Ldisable_fp:
+ st %l0, [%fp + ISF_PSR_OFFSET]
+ ba .Lthread_dispatch_done
+ nop
+.Lnon_fp_thread_dispatch:
+#elif defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH)
+ /* Test if we interrupted a floating point thread (PSR[EF] == 1) */
+ andcc %l0, %l5, %g0
+ be .Lnon_fp_thread_dispatch
+ nop
+
+ /*
+ * Yes, this is a floating point thread, then save the floating point
+ * context to a new stack frame. Then do the thread dispatch.
+ * Post-switch actions (e.g. signal handlers) and context switch
+ * extensions may safely use the floating point unit.
+ */
+ sub %sp, SPARC_FP_FRAME_SIZE, %sp
+ std %f0, [%sp + SPARC_FP_FRAME_OFFSET_FO_F1]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f2, [%sp + SPARC_FP_FRAME_OFFSET_F2_F3]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f4, [%sp + SPARC_FP_FRAME_OFFSET_F4_F5]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f6, [%sp + SPARC_FP_FRAME_OFFSET_F6_F7]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f8, [%sp + SPARC_FP_FRAME_OFFSET_F8_F9]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f10, [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f12, [%sp + SPARC_FP_FRAME_OFFSET_F12_F13]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f14, [%sp + SPARC_FP_FRAME_OFFSET_F14_F15]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f16, [%sp + SPARC_FP_FRAME_OFFSET_F16_F17]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f18, [%sp + SPARC_FP_FRAME_OFFSET_F18_F19]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f20, [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f22, [%sp + SPARC_FP_FRAME_OFFSET_F22_F23]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f24, [%sp + SPARC_FP_FRAME_OFFSET_F24_F25]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f26, [%sp + SPARC_FP_FRAME_OFFSET_F26_F27]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f28, [%sp + SPARC_FP_FRAME_OFFSET_F28_F29]
+ SPARC_LEON3FT_B2BST_NOP
+ std %f30, [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31]
+ SPARC_LEON3FT_B2BST_NOP
+ st %fsr, [%sp + SPARC_FP_FRAME_OFFSET_FSR]
+ call SYM(_Thread_Do_dispatch)
+ mov %g6, %o0
+
+ /*
+ * Restore the floating point context from stack frame and release the
+ * stack frame.
+ */
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_FO_F1], %f0
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F2_F3], %f2
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F4_F5], %f4
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F6_F7], %f6
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F8_F9], %f8
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11], %f10
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F12_F13], %f12
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F14_F15], %f14
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F16_F17], %f16
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F18_F19], %f18
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21], %f20
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F22_F23], %f22
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F24_F25], %f24
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F26_F27], %f26
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F28_F29], %f28
+ ldd [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31], %f30
+ ld [%sp + SPARC_FP_FRAME_OFFSET_FSR], %fsr
+ ba .Lthread_dispatch_done
+ add %sp, SPARC_FP_FRAME_SIZE, %sp
+
+.Lnon_fp_thread_dispatch:
+#endif
+
+ call SYM(_Thread_Do_dispatch)
+ mov %g6, %o0
+
+#if SPARC_HAS_FPU == 1
+.Lthread_dispatch_done:
+#endif
+
+ ta SPARC_SWTRAP_IRQDIS ! **** DISABLE INTERRUPTS ****
+
+ /*
+ * While we had ISR dispatching disabled in this thread,
+ * did we miss anything? If so, then we need to do another
+ * _Thread_Do_dispatch() before leaving this ISR dispatch context.
+ */
+ ldub [%g6 + PER_CPU_DISPATCH_NEEDED], %l7
+
+ orcc %l7, %g0, %g0 ! Is a thread dispatch necessary?
+ bne isr_dispatch ! Yes, then invoke the dispatcher again.
+ mov 0, %o1 ! ISR level for _Thread_Do_dispatch()
+
+ /*
+ * No, then set the ISR dispatch disable flag to zero and continue with
+ * the simple return.
+ */
+ st %g0, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
+
+ /*
+ * The CWP in place at this point may be different from
+ * that which was in effect at the beginning of the ISR if we
+ * have been context switched between the beginning of this invocation
+ * of _ISR_Handler and this point. Thus the CWP and WIM should
+ * not be changed back to their values at ISR entry time. Any
+ * changes to the PSR must preserve the CWP.
+ */
+
+simple_return:
+ ld [%fp + ISF_Y_OFFSET], %l5 ! restore y
+ wr %l5, 0, %y
+
+ ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC
+ ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC
+ rd %psr, %l3
+ and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP
+ andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task
+ or %l3, %l0, %l0 ! install it later...
+ andn %l0, SPARC_PSR_ET_MASK, %l0
+
+ /*
+ * Restore tasks global and out registers
+ */
+
+ mov %fp, %g1
+
+ ! g1 is restored later
+ ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3
+ ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5
+ ld [%fp + ISF_G7_OFFSET], %g7 ! restore g7
+
+ ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1
+ ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3
+ ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5
+ ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
+
+ /*
+ * Registers:
+ *
+ * ALL global registers EXCEPT G1 and the input registers have
+ * already been restored and thuse off limits.
+ *
+ * The following is the contents of the local registers:
+ *
+ * l0 = original psr
+ * l1 = return address (i.e. PC)
+ * l2 = nPC
+ * l3 = CWP
+ */
+
+ /*
+ * if (CWP + 1) is an invalid window then we need to reload it.
+ *
+ * WARNING: Traps should now be disabled
+ */
+
+ mov %l0, %psr ! **** DISABLE TRAPS ****
+ nop
+ nop
+ nop
+ rd %wim, %l4
+ add %l0, 1, %l6 ! l6 = cwp + 1
+ and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
+ srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count
+ ! and CWP are conveniently LS 5 bits
+ cmp %l5, 1 ! Is tasks window invalid?
+ bne good_task_window
+
+ /*
+ * The following code is the same as a 1 position left rotate of WIM.
+ */
+
+ sll %l4, 1, %l5 ! l5 = WIM << 1
+ srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
+ ! l4 = WIM >> (Number Windows - 1)
+ or %l4, %l5, %l4 ! l4 = (WIM << 1) |
+ ! (WIM >> (Number Windows - 1))
+
+ /*
+ * Now restore the window just as if we underflowed to it.
+ */
+
+ wr %l4, 0, %wim ! WIM = new WIM
+ nop ! must delay after writing WIM
+ nop
+ nop
+ restore ! now into the tasks window
+
+ ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
+ ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
+ ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
+ ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
+ ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
+ ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
+ ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
+ ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
+ ! reload of sp clobbers ISF
+ save ! Back to ISR dispatch window
+
+good_task_window:
+ TN0018_WAIT_IFLUSH %l3,%l4 ! GRLIB-TN-0018 work around macro
+
+ mov %l0, %psr ! **** DISABLE TRAPS ****
+ nop; nop; nop
+ ! and restore condition codes.
+ ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
+ TN0018_FIX %l3,%l4 ! GRLIB-TN-0018 work around macro
+ jmp %l1 ! transfer control and
+ rett %l2 ! go back to tasks window
+
+/* end of file */
diff --git a/cpukit/score/cpu/sparc/sparc-isr-install.c b/cpukit/score/cpu/sparc/sparc-isr-install.c
new file mode 100644
index 0000000000..017173c489
--- /dev/null
+++ b/cpukit/score/cpu/sparc/sparc-isr-install.c
@@ -0,0 +1,194 @@
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUSPARC
+ *
+ * @brief This source file contains the SPARC-specific implementation of
+ * _CPU_ISR_install_raw_handler() and _CPU_ISR_install_vector().
+ */
+
+/*
+ * COPYRIGHT (c) 1989-2007.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/isr.h>
+#include <rtems/rtems/cache.h>
+
+/*
+ * This initializes the set of opcodes placed in each trap
+ * table entry. The routine which installs a handler is responsible
+ * for filling in the fields for the _handler address and the _vector
+ * trap type.
+ *
+ * The constants following this structure are masks for the fields which
+ * must be filled in when the handler is installed.
+ */
+const CPU_Trap_table_entry _CPU_Trap_slot_template = {
+ 0xa1480000, /* mov %psr, %l0 */
+ 0x29000000, /* sethi %hi(_handler), %l4 */
+ 0x81c52000, /* jmp %l4 + %lo(_handler) */
+ 0xa6102000 /* mov _vector, %l3 */
+};
+
+/*
+ * _CPU_ISR_install_raw_handler
+ *
+ * This routine installs the specified handler as a "raw" non-executive
+ * supported trap handler (a.k.a. interrupt service routine).
+ *
+ * Input Parameters:
+ * vector - trap table entry number plus synchronous
+ * vs. asynchronous information
+ * new_handler - address of the handler to be installed
+ * old_handler - pointer to an address of the handler previously installed
+ *
+ * Output Parameters: NONE
+ * *new_handler - address of the handler previously installed
+ *
+ * NOTE:
+ *
+ * On the SPARC, there are really only 256 vectors. However, the executive
+ * has no easy, fast, reliable way to determine which traps are synchronous
+ * and which are asynchronous. By default, synchronous traps return to the
+ * instruction which caused the interrupt. So if you install a software
+ * trap handler as an executive interrupt handler (which is desirable since
+ * RTEMS takes care of window and register issues), then the executive needs
+ * to know that the return address is to the trap rather than the instruction
+ * following the trap.
+ *
+ * So vectors 0 through 255 are treated as regular asynchronous traps which
+ * provide the "correct" return address. Vectors 256 through 512 are assumed
+ * by the executive to be synchronous and to require that the return address
+ * be fudged.
+ *
+ * If you use this mechanism to install a trap handler which must reexecute
+ * the instruction which caused the trap, then it should be installed as
+ * an asynchronous trap. This will avoid the executive changing the return
+ * address.
+ */
+
+void _CPU_ISR_install_raw_handler(
+ uint32_t vector,
+ CPU_ISR_raw_handler new_handler,
+ CPU_ISR_raw_handler *old_handler
+)
+{
+ uint32_t real_vector;
+ CPU_Trap_table_entry *tbr;
+ CPU_Trap_table_entry *slot;
+ uint32_t u32_tbr;
+ uint32_t u32_handler;
+
+ /*
+ * Get the "real" trap number for this vector ignoring the synchronous
+ * versus asynchronous indicator included with our vector numbers.
+ */
+
+ real_vector = SPARC_REAL_TRAP_NUMBER( vector );
+
+ /*
+ * Get the current base address of the trap table and calculate a pointer
+ * to the slot we are interested in.
+ */
+
+ sparc_get_tbr( u32_tbr );
+
+ u32_tbr &= 0xfffff000;
+
+ tbr = (CPU_Trap_table_entry *) u32_tbr;
+
+ slot = &tbr[ real_vector ];
+
+ /*
+ * Get the address of the old_handler from the trap table.
+ *
+ * NOTE: The old_handler returned will be bogus if it does not follow
+ * the RTEMS model.
+ */
+
+#define HIGH_BITS_MASK 0xFFFFFC00
+#define HIGH_BITS_SHIFT 10
+#define LOW_BITS_MASK 0x000003FF
+
+ if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
+ u32_handler =
+ (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) |
+ (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
+ *old_handler = (CPU_ISR_raw_handler) u32_handler;
+ } else
+ *old_handler = 0;
+
+ /*
+ * Copy the template to the slot and then fix it.
+ */
+
+ *slot = _CPU_Trap_slot_template;
+
+ u32_handler = (uint32_t) new_handler;
+
+ slot->mov_vector_l3 |= vector;
+ slot->sethi_of_handler_to_l4 |=
+ (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
+ slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
+
+ /*
+ * There is no instruction cache snooping, so we need to invalidate
+ * the instruction cache to make sure that the processor sees the
+ * changes to the trap table. This step is required on both single-
+ * and multiprocessor systems.
+ *
+ * In a SMP configuration a change to the trap table might be
+ * missed by other cores. If the system state is up, the other
+ * cores can be notified using SMP messages that they need to
+ * flush their icache. If the up state has not been reached
+ * there is no need to notify other cores. They will do an
+ * automatic flush of the icache just after entering the up
+ * state, but before enabling interrupts.
+ */
+ rtems_cache_invalidate_entire_instruction();
+}
+
+void _CPU_ISR_install_vector(
+ uint32_t vector,
+ CPU_ISR_handler new_handler,
+ CPU_ISR_handler *old_handler
+)
+{
+ uint32_t real_vector;
+ CPU_ISR_raw_handler ignored;
+
+ /*
+ * Get the "real" trap number for this vector ignoring the synchronous
+ * versus asynchronous indicator included with our vector numbers.
+ */
+
+ real_vector = SPARC_REAL_TRAP_NUMBER( vector );
+
+ /*
+ * Return the previous ISR handler.
+ */
+
+ *old_handler = _ISR_Vector_table[ real_vector ];
+
+ /*
+ * Install the wrapper so this ISR can be invoked properly.
+ */
+
+ _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ real_vector ] = new_handler;
+}
diff --git a/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h
index e0f56b8e89..3c41a922e7 100644
--- a/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h
@@ -652,12 +652,6 @@ extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
*/
/*
- * Support routine to initialize the RTEMS vector table after it is allocated.
- */
-
-#define _CPU_Initialize_vectors()
-
-/*
* Disable all interrupts for a critical section. The previous
* level is returned in _level.
*/
diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
index 80dbc25214..1e97250188 100644
--- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
@@ -155,8 +155,6 @@ typedef struct {
#ifndef ASM
-#define _CPU_Initialize_vectors()
-
#define _CPU_ISR_Enable(_level) \
{ \
amd64_enable_interrupts(); \
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
index acc72fc1a6..f4dc372d33 100644
--- a/cpukit/score/src/condition.c
+++ b/cpukit/score/src/condition.c
@@ -188,7 +188,7 @@ int _Condition_Wait_timed(
&context.Base,
_Condition_Enqueue_with_timeout
);
- _Thread_queue_Context_set_timeout_argument( &context.Base, abstime );
+ _Thread_queue_Context_set_timeout_argument( &context.Base, abstime, true );
executing = _Condition_Do_wait( _condition, _mutex, &context );
eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
_Mutex_Acquire( _mutex );
@@ -243,7 +243,7 @@ int _Condition_Wait_recursive_timed(
&context.Base,
_Condition_Enqueue_with_timeout
);
- _Thread_queue_Context_set_timeout_argument( &context.Base, abstime );
+ _Thread_queue_Context_set_timeout_argument( &context.Base, abstime, true );
nest_level = _Condition_Unnest_mutex( _mutex );
executing = _Condition_Do_wait( _condition, &_mutex->_Mutex, &context );
eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
diff --git a/cpukit/score/src/coremsgclose.c b/cpukit/score/src/coremsgclose.c
index 1610d8166b..aae3d5ae82 100644
--- a/cpukit/score/src/coremsgclose.c
+++ b/cpukit/score/src/coremsgclose.c
@@ -51,11 +51,9 @@ void _CORE_message_queue_Close(
queue_context
);
- if ( the_message_queue->free_message_buffers != NULL ) {
- ( *the_message_queue->free_message_buffers )(
- the_message_queue->message_buffers
- );
- }
+ ( *the_message_queue->free_message_buffers )(
+ the_message_queue->message_buffers
+ );
_Thread_queue_Destroy( &the_message_queue->Wait_queue );
}
diff --git a/cpukit/score/src/isr.c b/cpukit/score/src/isr.c
index 519532b283..b77f682663 100644
--- a/cpukit/score/src/isr.c
+++ b/cpukit/score/src/isr.c
@@ -22,19 +22,9 @@
#include <rtems/score/isr.h>
#include <rtems/score/address.h>
-#include <rtems/score/interr.h>
#include <rtems/score/percpu.h>
-#include <rtems/score/stackimpl.h>
#include <rtems/config.h>
-#if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE)
- ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS ];
-#elif defined(CPU_INTERRUPT_NUMBER_OF_VECTORS)
- #error "CPU_INTERRUPT_NUMBER_OF_VECTORS is defined for non-simple vectored interrupts"
-#elif defined(CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER)
- #error "CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER is defined for non-simple vectored interrupts"
-#endif
-
void _ISR_Handler_initialization( void )
{
uint32_t cpu_max;
@@ -42,12 +32,6 @@ void _ISR_Handler_initialization( void )
size_t stack_size;
char *stack_low;
- _ISR_Nest_level = 0;
-
-#if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE)
- _CPU_Initialize_vectors();
-#endif
-
stack_size = rtems_configuration_get_interrupt_stack_size();
cpu_max = rtems_configuration_get_maximum_processors();
stack_low = _ISR_Stack_area_begin;
diff --git a/cpukit/score/src/isrvectortable.c b/cpukit/score/src/isrvectortable.c
new file mode 100644
index 0000000000..4bd892757e
--- /dev/null
+++ b/cpukit/score/src/isrvectortable.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreISR
+ *
+ * @brief This source file contains the definition of ::_ISR_Vector_table.
+ */
+
+/*
+ * Copyright (C) 2014 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/isr.h>
+
+#if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE)
+ ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS ];
+#elif defined(CPU_INTERRUPT_NUMBER_OF_VECTORS)
+ #error "CPU_INTERRUPT_NUMBER_OF_VECTORS is defined for non-simple vectored interrupts"
+#elif defined(CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER)
+ #error "CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER is defined for non-simple vectored interrupts"
+#endif
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index 88a390f323..f7e35093b2 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -206,7 +206,8 @@ int _Mutex_Acquire_timed(
} else {
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
&queue_context,
- abstime
+ abstime,
+ true
);
_Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context );
@@ -327,7 +328,8 @@ int _Mutex_recursive_Acquire_timed(
} else {
_Thread_queue_Context_set_enqueue_timeout_realtime_timespec(
&queue_context,
- abstime
+ abstime,
+ true
);
_Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context );
diff --git a/cpukit/score/src/stackallocatorfreenothing.c b/cpukit/score/src/objectfreenothing.c
index e341814b0c..0845d4c140 100644
--- a/cpukit/score/src/stackallocatorfreenothing.c
+++ b/cpukit/score/src/objectfreenothing.c
@@ -3,10 +3,10 @@
/**
* @file
*
- * @ingroup RTEMSScoreStack
+ * @ingroup RTEMSScoreObject
*
* @brief This source file contains the implementation of
- * _Stack_Free_nothing().
+ * _Objects_Free_nothing().
*/
/*
@@ -38,9 +38,9 @@
#include "config.h"
#endif
-#include <rtems/score/stackimpl.h>
+#include <rtems/score/objectimpl.h>
-void _Stack_Free_nothing( void *stack_area )
+void _Objects_Free_nothing( void *ptr )
{
- (void) stack_area;
+ (void) ptr;
}
diff --git a/cpukit/score/src/percpudata.c b/cpukit/score/src/percpudata.c
new file mode 100644
index 0000000000..c81c1b6a16
--- /dev/null
+++ b/cpukit/score/src/percpudata.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScorePerCPUData
+ *
+ * @brief This source file contains the per-CPU data linker set and its system
+ * initialization handler.
+ */
+
+/*
+ * Copyright (C) 2019, 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/percpudata.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/memory.h>
+#include <rtems/config.h>
+#include <rtems/sysinit.h>
+
+#include <string.h>
+
+RTEMS_LINKER_RWSET(
+ _Per_CPU_Data,
+#if defined(RTEMS_SMP)
+ /*
+ * In SMP configurations, prevent false cache line sharing of per-processor
+ * data with a proper alignment.
+ */
+ RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
+#endif
+ char
+);
+
+#if defined(RTEMS_SMP)
+static void _Per_CPU_Data_initialize( void )
+{
+ uintptr_t size;
+
+ size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
+
+ if ( size > 0 ) {
+ const Memory_Information *mem;
+ Per_CPU_Control *cpu;
+ uint32_t cpu_index;
+ uint32_t cpu_max;
+
+ mem = _Memory_Get();
+ cpu = _Per_CPU_Get_by_index( 0 );
+ cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
+
+ cpu_max = rtems_configuration_get_maximum_processors();
+
+ for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ cpu->data = _Memory_Allocate( mem, size, CPU_CACHE_LINE_BYTES );
+
+ if( cpu->data == NULL ) {
+ _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA );
+ }
+
+ memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
+ }
+ }
+}
+
+RTEMS_SYSINIT_ITEM(
+ _Per_CPU_Data_initialize,
+ RTEMS_SYSINIT_PER_CPU_DATA,
+ RTEMS_SYSINIT_ORDER_MIDDLE
+);
+#endif
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index dcfb55601a..845d19d1a8 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -5,27 +7,57 @@
*
* @brief This source file contains the implementation of
* _Scheduler_strong_APA_Add_processor(),
+ * _Scheduler_strong_APA_Allocate_processor(),
* _Scheduler_strong_APA_Ask_for_help(), _Scheduler_strong_APA_Block(),
- * _Scheduler_strong_APA_Initialize(),
+ * _Scheduler_strong_APA_Do_ask_for_help(),
+ * _Scheduler_strong_APA_Do_enqueue(),
+ * _Scheduler_strong_APA_Do_set_affinity(),
+ * _Scheduler_strong_APA_Do_update(), _Scheduler_strong_APA_Enqueue(),
+ * _Scheduler_strong_APA_Enqueue_scheduled(),
+ * _Scheduler_strong_APA_Extract_from_ready(),
+ * _Scheduler_strong_APA_Extract_from_scheduled(),
+ * _Scheduler_strong_APA_Find_highest_ready(),
+ * _Scheduler_strong_APA_Get_highest_ready(),
+ * _Scheduler_strong_APA_Get_lowest_reachable(),
+ * _Scheduler_strong_APA_Get_lowest_scheduled(),
+ * _Scheduler_strong_APA_Has_ready(),
+ * _Scheduler_strong_APA_Initialize(), _Scheduler_strong_APA_Insert_ready(),
+ * _Scheduler_strong_APA_Move_from_ready_to_scheduled(),
+ * _Scheduler_strong_APA_Move_from_scheduled_to_ready(),
* _Scheduler_strong_APA_Node_initialize(),
* _Scheduler_strong_APA_Reconsider_help_request(),
- * _Scheduler_strong_APA_Remove_processor(), _Scheduler_strong_APA_Unblock(),
- * _Scheduler_strong_APA_Update_priority(),
+ * _Scheduler_strong_APA_Register_idle(),
+ * _Scheduler_strong_APA_Remove_processor(),
+ * _Scheduler_strong_APA_Set_affinity(),
+ * _Scheduler_strong_APA_Set_scheduled(), _Scheduler_strong_APA_Start_idle(),
+ * _Scheduler_strong_APA_Unblock(), _Scheduler_strong_APA_Update_priority(),
* _Scheduler_strong_APA_Withdraw_node(), and _Scheduler_strong_APA_Yield().
*/
/*
- * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2020 Richi Dubey
+ * Copyright (C) 2013, 2016 embedded brains GmbH (http://www.embedded-brains.de)
*
- * embedded brains GmbH
- * Dornierstr. 4
- * 82178 Puchheim
- * Germany
- * <rtems@embedded-brains.de>
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -33,68 +65,220 @@
#endif
#include <rtems/score/schedulerstrongapa.h>
-#include <rtems/score/schedulerpriorityimpl.h>
#include <rtems/score/schedulersmpimpl.h>
+#include <rtems/score/assert.h>
-static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self(
- Scheduler_Context *context
-)
+#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node )
+
+static inline Scheduler_strong_APA_Context *
+_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
+{
+ return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
+}
+
+static inline Scheduler_strong_APA_Context *
+_Scheduler_strong_APA_Get_self( Scheduler_Context *context )
{
return (Scheduler_strong_APA_Context *) context;
}
-static Scheduler_strong_APA_Node *
+static inline Scheduler_strong_APA_Node *
_Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
{
return (Scheduler_strong_APA_Node *) node;
}
-static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
+static inline void _Scheduler_strong_APA_Do_update(
Scheduler_Context *context,
- Scheduler_Node *scheduled_to_ready
+ Scheduler_Node *node,
+ Priority_Control new_priority
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( scheduled_to_ready );
-
- _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
- _Scheduler_priority_Ready_queue_enqueue_first(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
+ Scheduler_SMP_Node *smp_node;
+ (void) context;
+
+ smp_node = _Scheduler_SMP_Node_downcast( node );
+ _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
}
-static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
- Scheduler_Context *context,
- Scheduler_Node *ready_to_scheduled
+/*
+ * Returns true if the Strong APA scheduler has ready nodes
+ * available for scheduling.
+ */
+static inline bool _Scheduler_strong_APA_Has_ready(
+ Scheduler_Context *context
)
{
Scheduler_strong_APA_Context *self;
+ const Chain_Node *tail;
+ Chain_Node *next;
Scheduler_strong_APA_Node *node;
- Priority_Control insert_priority;
self = _Scheduler_strong_APA_Get_self( context );
- node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
+ tail = _Chain_Immutable_tail( &self->Ready );
+ next = _Chain_First( &self->Ready );
+
+ while ( next != tail ) {
+ node = (Scheduler_strong_APA_Node *)STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+
+ if (
+ _Scheduler_SMP_Node_state( &node->Base.Base ) ==
+ SCHEDULER_SMP_NODE_READY
+ ) {
+ return true;
+ }
+
+ next = _Chain_Next( next );
+ }
+
+ return false;
+}
+
+static inline void _Scheduler_strong_APA_Set_scheduled(
+ Scheduler_strong_APA_Context *self,
+ Scheduler_Node *executing,
+ const Per_CPU_Control *cpu
+)
+{
+ self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
+}
+
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
+ const Scheduler_strong_APA_Context *self,
+ const Per_CPU_Control *cpu
+)
+{
+ return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
+}
+
+static inline void _Scheduler_strong_APA_Allocate_processor(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled_base,
+ Scheduler_Node *victim_base,
+ Per_CPU_Control *victim_cpu
+)
+{
+ Scheduler_strong_APA_Node *scheduled;
+ Scheduler_strong_APA_Context *self;
+
+ (void) victim_base;
+
+ scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
+ self = _Scheduler_strong_APA_Get_self( context );
- _Scheduler_priority_Ready_queue_extract(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
+ _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu );
+
+ _Scheduler_SMP_Allocate_processor_exact(
+ context,
+ &( scheduled->Base.Base ),
+ NULL,
+ victim_cpu
);
- insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
+}
+
+/*
+ * Finds and returns the highest ready node present by accessing the
+ * _Strong_APA_Context->CPU with front and rear values.
+ */
+static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
+ Scheduler_strong_APA_Context *self,
+ uint32_t front,
+ uint32_t rear
+)
+{
+ Scheduler_Node *highest_ready;
+ Scheduler_strong_APA_CPU *CPU;
+ const Chain_Node *tail;
+ Chain_Node *next;
+ Scheduler_strong_APA_Node *node;
+ Priority_Control min_priority_num;
+ Priority_Control curr_priority;
+ Per_CPU_Control *assigned_cpu;
+ Scheduler_SMP_Node_state curr_state;
+ Per_CPU_Control *curr_CPU;
+
+ CPU = self->CPU;
+ /*
+ * When the first task accessed has nothing to compare its priority against.
+ * So, it is the task with the highest priority witnessed so far.
+ */
+ min_priority_num = UINT64_MAX;
+
+ while ( front <= rear ) {
+ curr_CPU = CPU[ front++ ].cpu;
+
+ tail = _Chain_Immutable_tail( &self->Ready );
+ next = _Chain_First( &self->Ready );
+
+ while ( next != tail ) {
+ node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+ /*
+ * Check if the curr_CPU is in the affinity set of the node.
+ */
+ if (
+ _Processor_mask_Is_set( &node->Affinity, _Per_CPU_Get_index( curr_CPU ) )
+ ) {
+ curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
+
+ if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
+
+ if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited == false ) {
+ CPU[ ++rear ].cpu = assigned_cpu;
+ CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited = true;
+ /*
+ * The curr CPU of the queue invoked this node to add its CPU
+ * that it is executing on to the queue. So this node might get
+ * preempted because of the invoker curr_CPU and this curr_CPU
+ * is the CPU that node should preempt in case this node
+ * gets preempted.
+ */
+ node->cpu_to_preempt = curr_CPU;
+ }
+ } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
+ curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
+
+ if (
+ min_priority_num == UINT64_MAX ||
+ curr_priority < min_priority_num
+ ) {
+ min_priority_num = curr_priority;
+ highest_ready = &node->Base.Base;
+ /*
+ * In case curr_CPU is filter_CPU, we need to store the
+ * cpu_to_preempt value so that we go back to SMP_*
+ * function, rather than preempting the node ourselves.
+ */
+ node->cpu_to_preempt = curr_CPU;
+ }
+ }
+ }
+ next = _Chain_Next( next );
+ }
+ }
+
+ return highest_ready;
+}
+
+static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *ready_to_scheduled
+)
+{
+ Priority_Control insert_priority;
+
+ insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- _Chain_Insert_ordered_unprotected(
- &self->Base.Scheduled,
- &node->Base.Base.Node.Chain,
- &insert_priority,
- _Scheduler_SMP_Priority_less_equal
+ _Scheduler_SMP_Insert_scheduled(
+ context,
+ ready_to_scheduled,
+ insert_priority
);
}
-static void _Scheduler_strong_APA_Insert_ready(
+static inline void _Scheduler_strong_APA_Insert_ready(
Scheduler_Context *context,
Scheduler_Node *node_base,
Priority_Control insert_priority
@@ -106,228 +290,563 @@ static void _Scheduler_strong_APA_Insert_ready(
self = _Scheduler_strong_APA_Get_self( context );
node = _Scheduler_strong_APA_Node_downcast( node_base );
- if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
- _Scheduler_priority_Ready_queue_enqueue(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
- } else {
- _Scheduler_priority_Ready_queue_enqueue_first(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
+ if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
+ _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
+ } else {
+ _Chain_Extract_unprotected( &node->Ready_node );
+ _Chain_Set_off_chain( &node->Ready_node );
+ _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
}
}
-static void _Scheduler_strong_APA_Extract_from_ready(
+static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
Scheduler_Context *context,
- Scheduler_Node *the_thread
+ Scheduler_Node *scheduled_to_ready
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( the_thread );
-
- _Scheduler_priority_Ready_queue_extract(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
+ Priority_Control insert_priority;
+
+ if( !_Chain_Is_node_off_chain( &scheduled_to_ready->Node.Chain ) ) {
+ _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
+ }
+
+ insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
+
+ _Scheduler_strong_APA_Insert_ready(
+ context,
+ scheduled_to_ready,
+ insert_priority
);
}
-static void _Scheduler_strong_APA_Do_update(
+/*
+ * Implement the BFS Algorithm for task departure to get the highest ready task
+ * for a particular CPU, returns the highest ready Scheduler_Node
+ * Scheduler_Node filter here points to the victim node that is blocked
+ * resulting which this function is called.
+ */
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
Scheduler_Context *context,
- Scheduler_Node *node_to_update,
- Priority_Control new_priority
+ Scheduler_Node *filter
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( node_to_update );
-
- _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
- _Scheduler_priority_Ready_queue_update(
- &node->Ready_queue,
- SCHEDULER_PRIORITY_UNMAP( new_priority ),
- &self->Bit_map,
- &self->Ready[ 0 ]
- );
+ Scheduler_strong_APA_Context *self;
+ Per_CPU_Control *filter_cpu;
+ Scheduler_strong_APA_Node *node;
+ Scheduler_Node *highest_ready;
+ Scheduler_Node *curr_node;
+ Scheduler_Node *next_node;
+ Scheduler_strong_APA_CPU *CPU;
+ uint32_t front;
+ uint32_t rear;
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+
+ self = _Scheduler_strong_APA_Get_self( context );
+ /*
+ * Denotes front and rear of the queue
+ */
+ front = 0;
+ rear = -1;
+
+ filter_cpu = _Thread_Get_CPU( filter->user );
+ CPU = self->CPU;
+ cpu_max = _SMP_Get_processor_maximum();
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ CPU[ cpu_index ].visited = false;
+ }
+
+ CPU[ ++rear ].cpu = filter_cpu;
+ CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
+
+ highest_ready = _Scheduler_strong_APA_Find_highest_ready(
+ self,
+ front,
+ rear
+ );
+
+ if ( highest_ready != filter ) {
+ /*
+ * Backtrack on the path from