summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@oarcorp.com>2012-07-13 11:38:40 -0500
committerJoel Sherrill <joel.sherrill@oarcorp.com>2012-07-13 11:38:40 -0500
commita8030171cc89f2478126792c577956ef3bdc11e7 (patch)
tree483634b67fdea3220c0ca465352360b43be47f01
parentReadd MIPS cpu specific .h files (diff)
downloadrtems-libbsd-a8030171cc89f2478126792c577956ef3bdc11e7.tar.bz2
Add ARM, PowerPC, and SPARC64 cpufunc.h files from FreeBSD
-rwxr-xr-xfreebsd-to-rtems.py9
-rw-r--r--freebsd/arm/include/freebsd/machine/cpufunc.h618
-rw-r--r--freebsd/powerpc/include/freebsd/machine/cpufunc.h199
-rw-r--r--freebsd/powerpc/include/freebsd/machine/psl.h120
-rw-r--r--freebsd/powerpc/include/freebsd/machine/spr.h698
-rw-r--r--freebsd/sparc64/include/freebsd/machine/asi.h260
-rw-r--r--freebsd/sparc64/include/freebsd/machine/cpufunc.h268
-rw-r--r--freebsd/sparc64/include/freebsd/machine/pstate.h63
8 files changed, 2235 insertions, 0 deletions
diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py
index 9f2b935c..adc014af 100755
--- a/freebsd-to-rtems.py
+++ b/freebsd-to-rtems.py
@@ -1223,12 +1223,21 @@ devNic.addHeaderFiles(
devNic.addCPUDependentHeaderFiles(
[
+ 'arm/include/cpufunc.h',
'i386/include/specialreg.h',
'i386/include/md_var.h',
'i386/include/intr_machdep.h',
'i386/include/legacyvar.h',
'i386/include/pci_cfgreg.h',
'i386/include/cpufunc.h',
+ 'mips/include/cpufunc.h',
+ 'mips/include/cpuregs.h',
+ 'powerpc/include/cpufunc.h',
+ 'powerpc/include/psl.h',
+ 'powerpc/include/spr.h',
+ 'sparc64/include/cpufunc.h',
+ 'sparc64/include/asi.h',
+ 'sparc64/include/pstate.h',
]
)
diff --git a/freebsd/arm/include/freebsd/machine/cpufunc.h b/freebsd/arm/include/freebsd/machine/cpufunc.h
new file mode 100644
index 00000000..822fadb1
--- /dev/null
+++ b/freebsd/arm/include/freebsd/machine/cpufunc.h
@@ -0,0 +1,618 @@
+/* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */
+
+/*-
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.h
+ *
+ * Prototypes for cpu, mmu and tlb related functions.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_HH_
+#define _MACHINE_CPUFUNC_HH_
+
+#ifdef _KERNEL
+
+#include <freebsd/sys/types.h>
+#include <freebsd/machine/cpuconf.h>
+#include <freebsd/machine/katelib.h> /* For in[bwl] and out[bwl] */
+
+static __inline void
+breakpoint(void)
+{
+ __asm(".word 0xe7ffffff");
+}
+
+struct cpu_functions {
+
+ /* CPU functions */
+
+ u_int (*cf_id) (void);
+ void (*cf_cpwait) (void);
+
+ /* MMU functions */
+
+ u_int (*cf_control) (u_int bic, u_int eor);
+ void (*cf_domains) (u_int domains);
+ void (*cf_setttb) (u_int ttb);
+ u_int (*cf_faultstatus) (void);
+ u_int (*cf_faultaddress) (void);
+
+ /* TLB functions */
+
+ void (*cf_tlb_flushID) (void);
+ void (*cf_tlb_flushID_SE) (u_int va);
+ void (*cf_tlb_flushI) (void);
+ void (*cf_tlb_flushI_SE) (u_int va);
+ void (*cf_tlb_flushD) (void);
+ void (*cf_tlb_flushD_SE) (u_int va);
+
+ /*
+ * Cache operations:
+ *
+ * We define the following primitives:
+ *
+ * icache_sync_all Synchronize I-cache
+ * icache_sync_range Synchronize I-cache range
+ *
+ * dcache_wbinv_all Write-back and Invalidate D-cache
+ * dcache_wbinv_range Write-back and Invalidate D-cache range
+ * dcache_inv_range Invalidate D-cache range
+ * dcache_wb_range Write-back D-cache range
+ *
+ * idcache_wbinv_all Write-back and Invalidate D-cache,
+ * Invalidate I-cache
+ * idcache_wbinv_range Write-back and Invalidate D-cache,
+ * Invalidate I-cache range
+ *
+ * Note that the ARM term for "write-back" is "clean". We use
+ * the term "write-back" since it's a more common way to describe
+ * the operation.
+ *
+ * There are some rules that must be followed:
+ *
+ * I-cache Synch (all or range):
+ * The goal is to synchronize the instruction stream,
+ * so you may beed to write-back dirty D-cache blocks
+ * first. If a range is requested, and you can't
+ * synchronize just a range, you have to hit the whole
+ * thing.
+ *
+ * D-cache Write-Back and Invalidate range:
+ * If you can't WB-Inv a range, you must WB-Inv the
+ * entire D-cache.
+ *
+ * D-cache Invalidate:
+ * If you can't Inv the D-cache, you must Write-Back
+ * and Invalidate. Code that uses this operation
+ * MUST NOT assume that the D-cache will not be written
+ * back to memory.
+ *
+ * D-cache Write-Back:
+ * If you can't Write-back without doing an Inv,
+ * that's fine. Then treat this as a WB-Inv.
+ * Skipping the invalidate is merely an optimization.
+ *
+ * All operations:
+ * Valid virtual addresses must be passed to each
+ * cache operation.
+ */
+ void (*cf_icache_sync_all) (void);
+ void (*cf_icache_sync_range) (vm_offset_t, vm_size_t);
+
+ void (*cf_dcache_wbinv_all) (void);
+ void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t);
+ void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t);
+ void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t);
+
+ void (*cf_idcache_wbinv_all) (void);
+ void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t);
+ void (*cf_l2cache_wbinv_all) (void);
+ void (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t);
+ void (*cf_l2cache_inv_range) (vm_offset_t, vm_size_t);
+ void (*cf_l2cache_wb_range) (vm_offset_t, vm_size_t);
+
+ /* Other functions */
+
+ void (*cf_flush_prefetchbuf) (void);
+ void (*cf_drain_writebuf) (void);
+ void (*cf_flush_brnchtgt_C) (void);
+ void (*cf_flush_brnchtgt_E) (u_int va);
+
+ void (*cf_sleep) (int mode);
+
+ /* Soft functions */
+
+ int (*cf_dataabt_fixup) (void *arg);
+ int (*cf_prefetchabt_fixup) (void *arg);
+
+ void (*cf_context_switch) (void);
+
+ void (*cf_setup) (char *string);
+};
+
+extern struct cpu_functions cpufuncs;
+extern u_int cputype;
+
+#define cpu_id() cpufuncs.cf_id()
+#define cpu_cpwait() cpufuncs.cf_cpwait()
+
+#define cpu_control(c, e) cpufuncs.cf_control(c, e)
+#define cpu_domains(d) cpufuncs.cf_domains(d)
+#define cpu_setttb(t) cpufuncs.cf_setttb(t)
+#define cpu_faultstatus() cpufuncs.cf_faultstatus()
+#define cpu_faultaddress() cpufuncs.cf_faultaddress()
+
+#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
+#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
+#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
+#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
+#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
+#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
+
+#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
+#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
+
+#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
+#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
+
+#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
+#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
+#define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all()
+#define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s))
+#define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s))
+#define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s))
+
+#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
+#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
+#define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
+#define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
+
+#define cpu_sleep(m) cpufuncs.cf_sleep(m)
+
+#define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
+#define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
+#define ABORT_FIXUP_OK 0 /* fixup succeeded */
+#define ABORT_FIXUP_FAILED 1 /* fixup failed */
+#define ABORT_FIXUP_RETURN 2 /* abort handler should return */
+
+#define cpu_setup(a) cpufuncs.cf_setup(a)
+
+int set_cpufuncs (void);
+#define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
+#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
+
+void cpufunc_nullop (void);
+int cpufunc_null_fixup (void *);
+int early_abort_fixup (void *);
+int late_abort_fixup (void *);
+u_int cpufunc_id (void);
+u_int cpufunc_control (u_int clear, u_int bic);
+void cpufunc_domains (u_int domains);
+u_int cpufunc_faultstatus (void);
+u_int cpufunc_faultaddress (void);
+
+#ifdef CPU_ARM3
+u_int arm3_control (u_int clear, u_int bic);
+void arm3_cache_flush (void);
+#endif /* CPU_ARM3 */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7)
+void arm67_setttb (u_int ttb);
+void arm67_tlb_flush (void);
+void arm67_tlb_purge (u_int va);
+void arm67_cache_flush (void);
+void arm67_context_switch (void);
+#endif /* CPU_ARM6 || CPU_ARM7 */
+
+#ifdef CPU_ARM6
+void arm6_setup (char *string);
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+void arm7_setup (char *string);
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+int arm7_dataabt_fixup (void *arg);
+void arm7tdmi_setup (char *string);
+void arm7tdmi_setttb (u_int ttb);
+void arm7tdmi_tlb_flushID (void);
+void arm7tdmi_tlb_flushID_SE (u_int va);
+void arm7tdmi_cache_flushID (void);
+void arm7tdmi_context_switch (void);
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+void arm8_setttb (u_int ttb);
+void arm8_tlb_flushID (void);
+void arm8_tlb_flushID_SE (u_int va);
+void arm8_cache_flushID (void);
+void arm8_cache_flushID_E (u_int entry);
+void arm8_cache_cleanID (void);
+void arm8_cache_cleanID_E (u_int entry);
+void arm8_cache_purgeID (void);
+void arm8_cache_purgeID_E (u_int entry);
+
+void arm8_cache_syncI (void);
+void arm8_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void arm8_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+
+void arm8_context_switch (void);
+
+void arm8_setup (char *string);
+
+u_int arm8_clock_config (u_int, u_int);
+#endif
+
+#ifdef CPU_SA110
+void sa110_setup (char *string);
+void sa110_context_switch (void);
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa11x0_drain_readbuf (void);
+
+void sa11x0_context_switch (void);
+void sa11x0_cpu_sleep (int mode);
+
+void sa11x0_setup (char *string);
+#endif
+
+#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa1_setttb (u_int ttb);
+
+void sa1_tlb_flushID_SE (u_int va);
+
+void sa1_cache_flushID (void);
+void sa1_cache_flushI (void);
+void sa1_cache_flushD (void);
+void sa1_cache_flushD_SE (u_int entry);
+
+void sa1_cache_cleanID (void);
+void sa1_cache_cleanD (void);
+void sa1_cache_cleanD_E (u_int entry);
+
+void sa1_cache_purgeID (void);
+void sa1_cache_purgeID_E (u_int entry);
+void sa1_cache_purgeD (void);
+void sa1_cache_purgeD_E (u_int entry);
+
+void sa1_cache_syncI (void);
+void sa1_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void sa1_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+
+#endif
+
+#ifdef CPU_ARM9
+void arm9_setttb (u_int);
+
+void arm9_tlb_flushID_SE (u_int va);
+
+void arm9_icache_sync_all (void);
+void arm9_icache_sync_range (vm_offset_t, vm_size_t);
+
+void arm9_dcache_wbinv_all (void);
+void arm9_dcache_wbinv_range (vm_offset_t, vm_size_t);
+void arm9_dcache_inv_range (vm_offset_t, vm_size_t);
+void arm9_dcache_wb_range (vm_offset_t, vm_size_t);
+
+void arm9_idcache_wbinv_all (void);
+void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t);
+
+void arm9_context_switch (void);
+
+void arm9_setup (char *string);
+
+extern unsigned arm9_dcache_sets_max;
+extern unsigned arm9_dcache_sets_inc;
+extern unsigned arm9_dcache_index_max;
+extern unsigned arm9_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM9E) || defined(CPU_ARM10)
+void arm10_setttb (u_int);
+
+void arm10_tlb_flushID_SE (u_int);
+void arm10_tlb_flushI_SE (u_int);
+
+void arm10_icache_sync_all (void);
+void arm10_icache_sync_range (vm_offset_t, vm_size_t);
+
+void arm10_dcache_wbinv_all (void);
+void arm10_dcache_wbinv_range (vm_offset_t, vm_size_t);
+void arm10_dcache_inv_range (vm_offset_t, vm_size_t);
+void arm10_dcache_wb_range (vm_offset_t, vm_size_t);
+
+void arm10_idcache_wbinv_all (void);
+void arm10_idcache_wbinv_range (vm_offset_t, vm_size_t);
+
+void arm10_context_switch (void);
+
+void arm10_setup (char *string);
+
+extern unsigned arm10_dcache_sets_max;
+extern unsigned arm10_dcache_sets_inc;
+extern unsigned arm10_dcache_index_max;
+extern unsigned arm10_dcache_index_inc;
+
+u_int sheeva_control_ext (u_int, u_int);
+void sheeva_setttb (u_int);
+void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t);
+void sheeva_dcache_inv_range (vm_offset_t, vm_size_t);
+void sheeva_dcache_wb_range (vm_offset_t, vm_size_t);
+void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t);
+
+void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t);
+void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t);
+void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t);
+void sheeva_l2cache_wbinv_all (void);
+#endif
+
+#ifdef CPU_ARM11
+void arm11_setttb (u_int);
+
+void arm11_tlb_flushID_SE (u_int);
+void arm11_tlb_flushI_SE (u_int);
+
+void arm11_context_switch (void);
+
+void arm11_setup (char *string);
+void arm11_tlb_flushID (void);
+void arm11_tlb_flushI (void);
+void arm11_tlb_flushD (void);
+void arm11_tlb_flushD_SE (u_int va);
+
+void arm11_drain_writebuf (void);
+#endif
+
+#if defined(CPU_ARM9E) || defined (CPU_ARM10)
+void armv5_ec_setttb(u_int);
+
+void armv5_ec_icache_sync_all(void);
+void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
+
+void armv5_ec_dcache_wbinv_all(void);
+void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
+void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
+
+void armv5_ec_idcache_wbinv_all(void);
+void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
+#endif
+
+#if defined (CPU_ARM10) || defined (CPU_ARM11)
+void armv5_setttb(u_int);
+
+void armv5_icache_sync_all(void);
+void armv5_icache_sync_range(vm_offset_t, vm_size_t);
+
+void armv5_dcache_wbinv_all(void);
+void armv5_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void armv5_dcache_inv_range(vm_offset_t, vm_size_t);
+void armv5_dcache_wb_range(vm_offset_t, vm_size_t);
+
+void armv5_idcache_wbinv_all(void);
+void armv5_idcache_wbinv_range(vm_offset_t, vm_size_t);
+
+extern unsigned armv5_dcache_sets_max;
+extern unsigned armv5_dcache_sets_inc;
+extern unsigned armv5_dcache_index_max;
+extern unsigned armv5_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
+ defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
+ defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
+
+void armv4_tlb_flushID (void);
+void armv4_tlb_flushI (void);
+void armv4_tlb_flushD (void);
+void armv4_tlb_flushD_SE (u_int va);
+
+void armv4_drain_writebuf (void);
+#endif
+
+#if defined(CPU_IXP12X0)
+void ixp12x0_drain_readbuf (void);
+void ixp12x0_context_switch (void);
+void ixp12x0_setup (char *string);
+#endif
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
+ defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
+void xscale_cpwait (void);
+
+void xscale_cpu_sleep (int mode);
+
+u_int xscale_control (u_int clear, u_int bic);
+
+void xscale_setttb (u_int ttb);
+
+void xscale_tlb_flushID_SE (u_int va);
+
+void xscale_cache_flushID (void);
+void xscale_cache_flushI (void);
+void xscale_cache_flushD (void);
+void xscale_cache_flushD_SE (u_int entry);
+
+void xscale_cache_cleanID (void);
+void xscale_cache_cleanD (void);
+void xscale_cache_cleanD_E (u_int entry);
+
+void xscale_cache_clean_minidata (void);
+
+void xscale_cache_purgeID (void);
+void xscale_cache_purgeID_E (u_int entry);
+void xscale_cache_purgeD (void);
+void xscale_cache_purgeD_E (u_int entry);
+
+void xscale_cache_syncI (void);
+void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end);
+
+void xscale_context_switch (void);
+
+void xscale_setup (char *string);
+#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
+ CPU_XSCALE_80219 */
+
+#ifdef CPU_XSCALE_81342
+
+void xscalec3_l2cache_purge (void);
+void xscalec3_cache_purgeID (void);
+void xscalec3_cache_purgeD (void);
+void xscalec3_cache_cleanID (void);
+void xscalec3_cache_cleanD (void);
+void xscalec3_cache_syncI (void);
+
+void xscalec3_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
+void xscalec3_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
+void xscalec3_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
+void xscalec3_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
+void xscalec3_cache_syncI_rng (vm_offset_t start, vm_size_t end);
+
+void xscalec3_l2cache_flush_rng (vm_offset_t, vm_size_t);
+void xscalec3_l2cache_clean_rng (vm_offset_t start, vm_size_t end);
+void xscalec3_l2cache_purge_rng (vm_offset_t start, vm_size_t end);
+
+
+void xscalec3_setttb (u_int ttb);
+void xscalec3_context_switch (void);
+
+#endif /* CPU_XSCALE_81342 */
+
+#define tlb_flush cpu_tlb_flushID
+#define setttb cpu_setttb
+#define drain_writebuf cpu_drain_writebuf
+
+/*
+ * Macros for manipulating CPU interrupts
+ */
+static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__));
+
+static __inline u_int32_t
+__set_cpsr_c(u_int bic, u_int eor)
+{
+ u_int32_t tmp, ret;
+
+ __asm __volatile(
+ "mrs %0, cpsr\n" /* Get the CPSR */
+ "bic %1, %0, %2\n" /* Clear bits */
+ "eor %1, %1, %3\n" /* XOR bits */
+ "msr cpsr_c, %1\n" /* Set the control field of CPSR */
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (bic), "r" (eor) : "memory");
+
+ return ret;
+}
+
+#define disable_interrupts(mask) \
+ (__set_cpsr_c((mask) & (I32_bit | F32_bit), \
+ (mask) & (I32_bit | F32_bit)))
+
+#define enable_interrupts(mask) \
+ (__set_cpsr_c((mask) & (I32_bit | F32_bit), 0))
+
+#define restore_interrupts(old_cpsr) \
+ (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
+
+#define intr_disable() \
+ disable_interrupts(I32_bit | F32_bit)
+#define intr_restore(s) \
+ restore_interrupts(s)
+/* Functions to manipulate the CPSR. */
+u_int SetCPSR(u_int bic, u_int eor);
+u_int GetCPSR(void);
+
+/*
+ * Functions to manipulate cpu r13
+ * (in arm/arm32/setstack.S)
+ */
+
+void set_stackptr (u_int mode, u_int address);
+u_int get_stackptr (u_int mode);
+
+/*
+ * Miscellany
+ */
+
+int get_pc_str_offset (void);
+
+/*
+ * CPU functions from locore.S
+ */
+
+void cpu_reset (void) __attribute__((__noreturn__));
+
+/*
+ * Cache info variables.
+ */
+
+/* PRIMARY CACHE VARIABLES */
+extern int arm_picache_size;
+extern int arm_picache_line_size;
+extern int arm_picache_ways;
+
+extern int arm_pdcache_size; /* and unified */
+extern int arm_pdcache_line_size;
+extern int arm_pdcache_ways;
+
+extern int arm_pcache_type;
+extern int arm_pcache_unified;
+
+extern int arm_dcache_align;
+extern int arm_dcache_align_mask;
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_CPUFUNC_HH_ */
+
+/* End of cpufunc.h */
diff --git a/freebsd/powerpc/include/freebsd/machine/cpufunc.h b/freebsd/powerpc/include/freebsd/machine/cpufunc.h
new file mode 100644
index 00000000..7c240e2e
--- /dev/null
+++ b/freebsd/powerpc/include/freebsd/machine/cpufunc.h
@@ -0,0 +1,199 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_HH_
+#define _MACHINE_CPUFUNC_HH_
+
+/*
+ * Required for user-space atomic.h includes
+ */
+static __inline void
+powerpc_mb(void)
+{
+
+ __asm __volatile("eieio; sync" : : : "memory");
+}
+
+#ifdef _KERNEL
+
+#include <freebsd/sys/types.h>
+
+#include <freebsd/machine/psl.h>
+#include <freebsd/machine/spr.h>
+
+struct thread;
+
+#ifdef KDB
+void breakpoint(void);
+#else
+static __inline void
+breakpoint(void)
+{
+
+ return;
+}
+#endif
+
+/* CPU register mangling inlines */
+
+static __inline void
+mtmsr(register_t value)
+{
+
+ __asm __volatile ("mtmsr %0; isync" :: "r"(value));
+}
+
+static __inline register_t
+mfmsr(void)
+{
+ register_t value;
+
+ __asm __volatile ("mfmsr %0" : "=r"(value));
+
+ return (value);
+}
+
+static __inline void
+mtsrin(vm_offset_t va, register_t value)
+{
+
+ __asm __volatile ("mtsrin %0,%1" :: "r"(value), "r"(va));
+}
+
+static __inline register_t
+mfsrin(vm_offset_t va)
+{
+ register_t value;
+
+ __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
+
+ return (value);
+}
+
+static __inline void
+mtdec(register_t value)
+{
+
+ __asm __volatile ("mtdec %0" :: "r"(value));
+}
+
+static __inline register_t
+mfdec(void)
+{
+ register_t value;
+
+ __asm __volatile ("mfdec %0" : "=r"(value));
+
+ return (value);
+}
+
+static __inline register_t
+mfpvr(void)
+{
+ register_t value;
+
+ __asm __volatile ("mfpvr %0" : "=r"(value));
+
+ return (value);
+}
+
+static __inline u_quad_t
+mftb(void)
+{
+ u_quad_t tb;
+ uint32_t *tbup = (uint32_t *)&tb;
+ uint32_t *tblp = tbup + 1;
+
+ do {
+ *tbup = mfspr(TBR_TBU);
+ *tblp = mfspr(TBR_TBL);
+ } while (*tbup != mfspr(TBR_TBU));
+
+ return (tb);
+}
+
+static __inline void
+mttb(u_quad_t time)
+{
+
+ mtspr(TBR_TBWL, 0);
+ mtspr(TBR_TBWU, (uint32_t)(time >> 32));
+ mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
+}
+
+static __inline void
+eieio(void)
+{
+
+ __asm __volatile ("eieio");
+}
+
+static __inline void
+isync(void)
+{
+
+ __asm __volatile ("isync");
+}
+
+static __inline void
+powerpc_sync(void)
+{
+
+ __asm __volatile ("sync");
+}
+
+static __inline register_t
+intr_disable(void)
+{
+ register_t msr;
+
+ msr = mfmsr();
+ mtmsr(msr & ~PSL_EE);
+ return (msr);
+}
+
+static __inline void
+intr_restore(register_t msr)
+{
+
+ mtmsr(msr);
+}
+
+static __inline struct pcpu *
+powerpc_get_pcpup(void)
+{
+ struct pcpu *ret;
+
+ __asm __volatile("mfsprg %0, 0" : "=r"(ret));
+
+ return (ret);
+}
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_CPUFUNC_HH_ */
diff --git a/freebsd/powerpc/include/freebsd/machine/psl.h b/freebsd/powerpc/include/freebsd/machine/psl.h
new file mode 100644
index 00000000..4764c626
--- /dev/null
+++ b/freebsd/powerpc/include/freebsd/machine/psl.h
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: psl.h,v 1.5 2000/11/19 19:52:37 matt Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PSL_HH_
+#define _MACHINE_PSL_HH_
+
+#if defined(E500)
+/*
+ * Machine State Register (MSR) - e500 core
+ *
+ * The PowerPC e500 does not implement the following bits:
+ *
+ * FP, FE0, FE1 - reserved, always cleared, setting has no effect.
+ *
+ */
+#define PSL_UCLE 0x04000000 /* User mode cache lock enable */
+#define PSL_SPE 0x02000000 /* SPE enable */
+#define PSL_WE 0x00040000 /* Wait state enable */
+#define PSL_CE 0x00020000 /* Critical interrupt enable */
+#define PSL_EE 0x00008000 /* External interrupt enable */
+#define PSL_PR 0x00004000 /* User mode */
+#define PSL_FP 0x00002000 /* Floating point available */
+#define PSL_ME 0x00001000 /* Machine check interrupt enable */
+#define PSL_FE0 0x00000800 /* Floating point exception mode 0 */
+#define PSL_UBLE 0x00000400 /* BTB lock enable */
+#define PSL_DE 0x00000200 /* Debug interrupt enable */
+#define PSL_FE1 0x00000100 /* Floating point exception mode 1 */
+#define PSL_IS 0x00000020 /* Instruction address space */
+#define PSL_DS 0x00000010 /* Data address space */
+#define PSL_PMM 0x00000004 /* Performance monitor mark */
+
+#define PSL_FE_DFLT 0x00000004 /* default: no FP */
+
+/* Initial kernel MSR, use IS=1 ad DS=1. */
+#define PSL_KERNSET_INIT (PSL_IS | PSL_DS)
+#define PSL_KERNSET (PSL_CE | PSL_ME | PSL_EE)
+#define PSL_USERSET (PSL_KERNSET | PSL_PR)
+
+#else /* if defined(E500) */
+/*
+ * Machine State Register (MSR)
+ *
+ * The PowerPC 601 does not implement the following bits:
+ *
+ * VEC, POW, ILE, BE, RI, LE[*]
+ *
+ * [*] Little-endian mode on the 601 is implemented in the HID0 register.
+ */
+#define PSL_VEC 0x02000000 /* AltiVec vector unit available */
+#define PSL_POW 0x00040000 /* power management */
+#define PSL_ILE 0x00010000 /* interrupt endian mode (1 == le) */
+#define PSL_EE 0x00008000 /* external interrupt enable */
+#define PSL_PR 0x00004000 /* privilege mode (1 == user) */
+#define PSL_FP 0x00002000 /* floating point enable */
+#define PSL_ME 0x00001000 /* machine check enable */
+#define PSL_FE0 0x00000800 /* floating point interrupt mode 0 */
+#define PSL_SE 0x00000400 /* single-step trace enable */
+#define PSL_BE 0x00000200 /* branch trace enable */
+#define PSL_FE1 0x00000100 /* floating point interrupt mode 1 */
+#define PSL_IP 0x00000040 /* interrupt prefix */
+#define PSL_IR 0x00000020 /* instruction address relocation */
+#define PSL_DR 0x00000010 /* data address relocation */
+#define PSL_RI 0x00000002 /* recoverable interrupt */
+#define PSL_LE 0x00000001 /* endian mode (1 == le) */
+
+#define PSL_601_MASK ~(PSL_POW|PSL_ILE|PSL_BE|PSL_RI|PSL_LE)
+
+/*
+ * Floating-point exception modes:
+ */
+#define PSL_FE_DIS 0 /* none */
+#define PSL_FE_NONREC PSL_FE1 /* imprecise non-recoverable */
+#define PSL_FE_REC PSL_FE0 /* imprecise recoverable */
+#define PSL_FE_PREC (PSL_FE0 | PSL_FE1) /* precise */
+#define PSL_FE_DFLT PSL_FE_DIS /* default == none */
+
+/*
+ * Note that PSL_POW and PSL_ILE are not in the saved copy of the MSR
+ */
+#define PSL_MBO 0
+#define PSL_MBZ 0
+
+#define PSL_KERNSET (PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI)
+#define PSL_USERSET (PSL_KERNSET | PSL_PR)
+
+#define PSL_USERSTATIC (PSL_USERSET | PSL_IP | 0x87c0008c)
+
+#endif /* if defined(E500) */
+#endif /* _MACHINE_PSL_HH_ */
diff --git a/freebsd/powerpc/include/freebsd/machine/spr.h b/freebsd/powerpc/include/freebsd/machine/spr.h
new file mode 100644
index 00000000..b24a6ade
--- /dev/null
+++ b/freebsd/powerpc/include/freebsd/machine/spr.h
@@ -0,0 +1,698 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: spr.h,v 1.25 2002/08/14 15:38:40 matt Exp $
+ * $FreeBSD$
+ */
+#ifndef _POWERPC_SPR_HH_
+#define _POWERPC_SPR_HH_
+
+#ifndef _LOCORE
+#define mtspr(reg, val) \
+ __asm __volatile("mtspr %0,%1" : : "K"(reg), "r"(val))
+#define mfspr(reg) \
+ ( { register_t val; \
+ __asm __volatile("mfspr %0,%1" : "=r"(val) : "K"(reg)); \
+ val; } )
+
+/* The following routines allow manipulation of the full 64-bit width
+ * of SPRs on 64 bit CPUs in bridge mode */
+
+#define mtspr64(reg,valhi,vallo,scratch) \
+ __asm __volatile(" \
+ mfmsr %0; \
+ insrdi %0,%5,1,0; \
+ mtmsrd %0; \
+ isync; \
+ \
+ sld %1,%1,%4; \
+ or %1,%1,%2; \
+ mtspr %3,%1; \
+ srd %1,%1,%4; \
+ \
+ clrldi %0,%0,1; \
+ mtmsrd %0; \
+ isync;" \
+ : "=r"(scratch), "=r"(valhi) : "r"(vallo), "K"(reg), "r"(32), "r"(1))
+
+#define mfspr64upper(reg,scratch) \
+ ( { register_t val; \
+ __asm __volatile(" \
+ mfmsr %0; \
+ insrdi %0,%4,1,0; \
+ mtmsrd %0; \
+ isync; \
+ \
+ mfspr %1,%2; \
+ srd %1,%1,%3; \
+ \
+ clrldi %0,%0,1; \
+ mtmsrd %0; \
+ isync;" \
+ : "=r"(scratch), "=r"(val) : "K"(reg), "r"(32), "r"(1)); \
+ val; } )
+
+#endif /* _LOCORE */
+
+/*
+ * Special Purpose Register declarations.
+ *
+ * The first column in the comments indicates which PowerPC
+ * architectures the SPR is valid on - 4 for 4xx series,
+ * 6 for 6xx/7xx series and 8 for 8xx and 8xxx series.
+ */
+
+#define SPR_MQ 0x000 /* .6. 601 MQ register */
+#define SPR_XER 0x001 /* 468 Fixed Point Exception Register */
+#define SPR_RTCU_R 0x004 /* .6. 601 RTC Upper - Read */
+#define SPR_RTCL_R 0x005 /* .6. 601 RTC Lower - Read */
+#define SPR_LR 0x008 /* 468 Link Register */
+#define SPR_CTR 0x009 /* 468 Count Register */
+#define SPR_DSISR 0x012 /* .68 DSI exception source */
+#define DSISR_DIRECT 0x80000000 /* Direct-store error exception */
+#define DSISR_NOTFOUND 0x40000000 /* Translation not found */
+#define DSISR_PROTECT 0x08000000 /* Memory access not permitted */
+#define DSISR_INVRX 0x04000000 /* Reserve-indexed insn direct-store access */
+#define DSISR_STORE 0x02000000 /* Store operation */
+#define DSISR_DABR 0x00400000 /* DABR match */
+#define DSISR_SEGMENT 0x00200000 /* XXX; not in 6xx PEM */
+#define DSISR_EAR 0x00100000 /* eciwx/ecowx && EAR[E] == 0 */
+#define SPR_DAR 0x013 /* .68 Data Address Register */
+#define SPR_RTCU_W 0x014 /* .6. 601 RTC Upper - Write */
+#define SPR_RTCL_W 0x015 /* .6. 601 RTC Lower - Write */
+#define SPR_DEC 0x016 /* .68 DECrementer register */
+#define SPR_SDR1 0x019 /* .68 Page table base address register */
+#define SPR_SRR0 0x01a /* 468 Save/Restore Register 0 */
+#define SPR_SRR1 0x01b /* 468 Save/Restore Register 1 */
+#define SPR_DECAR 0x036 /* ..8 Decrementer auto reload */
+#define SPR_EIE 0x050 /* ..8 Exception Interrupt ??? */
+#define SPR_EID 0x051 /* ..8 Exception Interrupt ??? */
+#define SPR_NRI 0x052 /* ..8 Exception Interrupt ??? */
+#define SPR_USPRG0 0x100 /* 4.. User SPR General 0 */
+#define SPR_VRSAVE 0x100 /* .6. AltiVec VRSAVE */
+#define SPR_SPRG0 0x110 /* 468 SPR General 0 */
+#define SPR_SPRG1 0x111 /* 468 SPR General 1 */
+#define SPR_SPRG2 0x112 /* 468 SPR General 2 */
+#define SPR_SPRG3 0x113 /* 468 SPR General 3 */
+#define SPR_SPRG4 0x114 /* 4.. SPR General 4 */
+#define SPR_SPRG5 0x115 /* 4.. SPR General 5 */
+#define SPR_SPRG6 0x116 /* 4.. SPR General 6 */
+#define SPR_SPRG7 0x117 /* 4.. SPR General 7 */
+#define SPR_SCOMC 0x114 /* ... SCOM Address Register (970) */
+#define SPR_SCOMD 0x115 /* ... SCOM Data Register (970) */
+#define SPR_ASR 0x118 /* ... Address Space Register (PPC64) */
+#define SPR_EAR 0x11a /* .68 External Access Register */
+#define SPR_PVR 0x11f /* 468 Processor Version Register */
+#define MPC601 0x0001
+#define MPC603 0x0003
+#define MPC604 0x0004
+#define MPC602 0x0005
+#define MPC603e 0x0006
+#define MPC603ev 0x0007
+#define MPC750 0x0008
+#define MPC604ev 0x0009
+#define MPC7400 0x000c
+#define MPC620 0x0014
+#define IBM403 0x0020
+#define IBM401A1 0x0021
+#define IBM401B2 0x0022
+#define IBM401C2 0x0023
+#define IBM401D2 0x0024
+#define IBM401E2 0x0025
+#define IBM401F2 0x0026
+#define IBM401G2 0x0027
+#define IBM970 0x0039
+#define IBM970FX 0x003c
+#define IBMPOWER3 0x0041
+#define IBM970MP 0x0044
+#define IBM970GX 0x0045
+#define MPC860 0x0050
+#define MPC8240 0x0081
+#define IBM405GP 0x4011
+#define IBM405L 0x4161
+#define IBM750FX 0x7000
+#define MPC745X_P(v) ((v & 0xFFF8) == 0x8000)
+#define MPC7450 0x8000
+#define MPC7455 0x8001
+#define MPC7457 0x8002
+#define MPC7447A 0x8003
+#define MPC7448 0x8004
+#define MPC7410 0x800c
+#define MPC8245 0x8081
+#define FSL_E500v1 0x8020
+#define FSL_E500v2 0x8021
+
+#define SPR_IBAT0U 0x210 /* .68 Instruction BAT Reg 0 Upper */
+#define SPR_IBAT0U 0x210 /* .6. Instruction BAT Reg 0 Upper */
+#define SPR_IBAT0L 0x211 /* .6. Instruction BAT Reg 0 Lower */
+#define SPR_IBAT1U 0x212 /* .6. Instruction BAT Reg 1 Upper */
+#define SPR_IBAT1L 0x213 /* .6. Instruction BAT Reg 1 Lower */
+#define SPR_IBAT2U 0x214 /* .6. Instruction BAT Reg 2 Upper */
+#define SPR_IBAT2L 0x215 /* .6. Instruction BAT Reg 2 Lower */
+#define SPR_IBAT3U 0x216 /* .6. Instruction BAT Reg 3 Upper */
+#define SPR_IBAT3L 0x217 /* .6. Instruction BAT Reg 3 Lower */
+#define SPR_DBAT0U 0x218 /* .6. Data BAT Reg 0 Upper */
+#define SPR_DBAT0L 0x219 /* .6. Data BAT Reg 0 Lower */
+#define SPR_DBAT1U 0x21a /* .6. Data BAT Reg 1 Upper */
+#define SPR_DBAT1L 0x21b /* .6. Data BAT Reg 1 Lower */
+#define SPR_DBAT2U 0x21c /* .6. Data BAT Reg 2 Upper */
+#define SPR_DBAT2L 0x21d /* .6. Data BAT Reg 2 Lower */
+#define SPR_DBAT3U 0x21e /* .6. Data BAT Reg 3 Upper */
+#define SPR_DBAT3L 0x21f /* .6. Data BAT Reg 3 Lower */
+#define SPR_IC_CST 0x230 /* ..8 Instruction Cache CSR */
+#define IC_CST_IEN 0x80000000 /* I cache is ENabled (RO) */
+#define IC_CST_CMD_INVALL 0x0c000000 /* I cache invalidate all */
+#define IC_CST_CMD_UNLOCKALL 0x0a000000 /* I cache unlock all */
+#define IC_CST_CMD_UNLOCK 0x08000000 /* I cache unlock block */
+#define IC_CST_CMD_LOADLOCK 0x06000000 /* I cache load & lock block */
+#define IC_CST_CMD_DISABLE 0x04000000 /* I cache disable */
+#define IC_CST_CMD_ENABLE 0x02000000 /* I cache enable */
+#define IC_CST_CCER1 0x00200000 /* I cache error type 1 (RO) */
+#define IC_CST_CCER2 0x00100000 /* I cache error type 2 (RO) */
+#define IC_CST_CCER3 0x00080000 /* I cache error type 3 (RO) */
+#define SPR_IBAT4U 0x230 /* .6. Instruction BAT Reg 4 Upper */
+#define SPR_IC_ADR 0x231 /* ..8 Instruction Cache Address */
+#define SPR_IBAT4L 0x231 /* .6. Instruction BAT Reg 4 Lower */
+#define SPR_IC_DAT 0x232 /* ..8 Instruction Cache Data */
+#define SPR_IBAT5U 0x232 /* .6. Instruction BAT Reg 5 Upper */
+#define SPR_IBAT5L 0x233 /* .6. Instruction BAT Reg 5 Lower */
+#define SPR_IBAT6U 0x234 /* .6. Instruction BAT Reg 6 Upper */
+#define SPR_IBAT6L 0x235 /* .6. Instruction BAT Reg 6 Lower */
+#define SPR_IBAT7U 0x236 /* .6. Instruction BAT Reg 7 Upper */
+#define SPR_IBAT7L 0x237 /* .6. Instruction BAT Reg 7 Lower */
+#define SPR_DC_CST 0x230 /* ..8 Data Cache CSR */
+#define DC_CST_DEN 0x80000000 /* D cache ENabled (RO) */
+#define DC_CST_DFWT 0x40000000 /* D cache Force Write-Thru (RO) */
+#define DC_CST_LES 0x20000000 /* D cache Little Endian Swap (RO) */
+#define DC_CST_CMD_FLUSH 0x0e000000 /* D cache invalidate all */
+#define DC_CST_CMD_INVALL 0x0c000000 /* D cache invalidate all */
+#define DC_CST_CMD_UNLOCKALL 0x0a000000 /* D cache unlock all */
+#define DC_CST_CMD_UNLOCK 0x08000000 /* D cache unlock block */
+#define DC_CST_CMD_CLRLESWAP 0x07000000 /* D cache clr little-endian swap */
+#define DC_CST_CMD_LOADLOCK 0x06000000 /* D cache load & lock block */
+#define DC_CST_CMD_SETLESWAP 0x05000000 /* D cache set little-endian swap */
+#define DC_CST_CMD_DISABLE 0x04000000 /* D cache disable */
+#define DC_CST_CMD_CLRFWT 0x03000000 /* D cache clear forced write-thru */
+#define DC_CST_CMD_ENABLE 0x02000000 /* D cache enable */
+#define DC_CST_CMD_SETFWT 0x01000000 /* D cache set forced write-thru */
+#define DC_CST_CCER1 0x00200000 /* D cache error type 1 (RO) */
+#define DC_CST_CCER2 0x00100000 /* D cache error type 2 (RO) */
+#define DC_CST_CCER3 0x00080000 /* D cache error type 3 (RO) */
+#define SPR_DBAT4U 0x238 /* .6. Data BAT Reg 4 Upper */
+#define SPR_DC_ADR 0x231 /* ..8 Data Cache Address */
+#define SPR_DBAT4L 0x239 /* .6. Data BAT Reg 4 Lower */
+#define SPR_DC_DAT 0x232 /* ..8 Data Cache Data */
+#define SPR_DBAT5U 0x23a /* .6. Data BAT Reg 5 Upper */
+#define SPR_DBAT5L 0x23b /* .6. Data BAT Reg 5 Lower */
+#define SPR_DBAT6U 0x23c /* .6. Data BAT Reg 6 Upper */
+#define SPR_DBAT6L 0x23d /* .6. Data BAT Reg 6 Lower */
+#define SPR_DBAT7U 0x23e /* .6. Data BAT Reg 7 Upper */
+#define SPR_DBAT7L 0x23f /* .6. Data BAT Reg 7 Lower */
+#define SPR_MI_CTR 0x310 /* ..8 IMMU control */
+#define Mx_CTR_GPM 0x80000000 /* Group Protection Mode */
+#define Mx_CTR_PPM 0x40000000 /* Page Protection Mode */
+#define Mx_CTR_CIDEF 0x20000000 /* Cache-Inhibit DEFault */
+#define MD_CTR_WTDEF 0x20000000 /* Write-Through DEFault */
+#define Mx_CTR_RSV4 0x08000000 /* Reserve 4 TLB entries */
+#define MD_CTR_TWAM 0x04000000 /* TableWalk Assist Mode */
+#define Mx_CTR_PPCS 0x02000000 /* Priv/user state compare mode */
+#define Mx_CTR_TLB_INDX 0x000001f0 /* TLB index mask */
+#define Mx_CTR_TLB_INDX_BITPOS 8 /* TLB index shift */
+#define SPR_MI_AP 0x312 /* ..8 IMMU access protection */
+#define Mx_GP_SUPER(n) (0 << (2*(15-(n)))) /* access is supervisor */
+#define Mx_GP_PAGE (1 << (2*(15-(n)))) /* access is page protect */
+#define Mx_GP_SWAPPED (2 << (2*(15-(n)))) /* access is swapped */
+#define Mx_GP_USER (3 << (2*(15-(n)))) /* access is user */
+#define SPR_MI_EPN 0x313 /* ..8 IMMU effective number */
+#define Mx_EPN_EPN 0xfffff000 /* Effective Page Number mask */
+#define Mx_EPN_EV 0x00000020 /* Entry Valid */
+#define Mx_EPN_ASID 0x0000000f /* Address Space ID */
+#define SPR_MI_TWC 0x315 /* ..8 IMMU tablewalk control */
+#define MD_TWC_L2TB 0xfffff000 /* Level-2 Tablewalk Base */
+#define Mx_TWC_APG 0x000001e0 /* Access Protection Group */
+#define Mx_TWC_G 0x00000010 /* Guarded memory */
+#define Mx_TWC_PS 0x0000000c /* Page Size (L1) */
+#define MD_TWC_WT 0x00000002 /* Write-Through */
+#define Mx_TWC_V 0x00000001 /* Entry Valid */
+#define SPR_MI_RPN 0x316 /* ..8 IMMU real (phys) page number */
+#define Mx_RPN_RPN 0xfffff000 /* Real Page Number */
+#define Mx_RPN_PP 0x00000ff0 /* Page Protection */
+#define Mx_RPN_SPS 0x00000008 /* Small Page Size */
+#define Mx_RPN_SH 0x00000004 /* SHared page */
+#define Mx_RPN_CI 0x00000002 /* Cache Inhibit */
+#define Mx_RPN_V 0x00000001 /* Valid */
+#define SPR_MD_CTR 0x318 /* ..8 DMMU control */
+#define SPR_M_CASID 0x319 /* ..8 CASID */
+#define M_CASID 0x0000000f /* Current AS Id */
+#define SPR_MD_AP 0x31a /* ..8 DMMU access protection */
+#define SPR_MD_EPN 0x31b /* ..8 DMMU effective number */
+#define SPR_M_TWB 0x31c /* ..8 MMU tablewalk base */
+#define M_TWB_L1TB 0xfffff000 /* level-1 translation base */
+#define M_TWB_L1INDX 0x00000ffc /* level-1 index */
+#define SPR_MD_TWC 0x31d /* ..8 DMMU tablewalk control */
+#define SPR_MD_RPN 0x31e /* ..8 DMMU real (phys) page number */
+#define SPR_MD_TW 0x31f /* ..8 MMU tablewalk scratch */
+#define SPR_MI_CAM 0x330 /* ..8 IMMU CAM entry read */
+#define SPR_MI_RAM0 0x331 /* ..8 IMMU RAM entry read reg 0 */
+#define SPR_MI_RAM1 0x332 /* ..8 IMMU RAM entry read reg 1 */
+#define SPR_MD_CAM 0x338 /* ..8 IMMU CAM entry read */
+#define SPR_MD_RAM0 0x339 /* ..8 IMMU RAM entry read reg 0 */
+#define SPR_MD_RAM1 0x33a /* ..8 IMMU RAM entry read reg 1 */
+#define SPR_UMMCR2 0x3a0 /* .6. User Monitor Mode Control Register 2 */
+#define SPR_UMMCR0 0x3a8 /* .6. User Monitor Mode Control Register 0 */
+#define SPR_USIA 0x3ab /* .6. User Sampled Instruction Address */
+#define SPR_UMMCR1 0x3ac /* .6. User Monitor Mode Control Register 1 */
+#define SPR_ZPR 0x3b0 /* 4.. Zone Protection Register */
+#define SPR_MMCR2 0x3b0 /* .6. Monitor Mode Control Register 2 */
+#define SPR_MMCR2_THRESHMULT_32 0x80000000 /* Multiply MMCR0 threshold by 32 */
+#define SPR_MMCR2_THRESHMULT_2 0x00000000 /* Multiply MMCR0 threshold by 2 */
+#define SPR_PID 0x3b1 /* 4.. Process ID */
+#define SPR_PMC5 0x3b1 /* .6. Performance Counter Register 5 */
+#define SPR_PMC6 0x3b2 /* .6. Performance Counter Register 6 */
+#define SPR_CCR0 0x3b3 /* 4.. Core Configuration Register 0 */
+#define SPR_IAC3 0x3b4 /* 4.. Instruction Address Compare 3 */
+#define SPR_IAC4 0x3b5 /* 4.. Instruction Address Compare 4 */
+#define SPR_DVC1 0x3b6 /* 4.. Data Value Compare 1 */
+#define SPR_DVC2 0x3b7 /* 4.. Data Value Compare 2 */
+#define SPR_MMCR0 0x3b8 /* .6. Monitor Mode Control Register 0 */
+
+#define SPR_970MMCR0 0x31b /* ... Monitor Mode Control Register 0 (PPC 970) */
+#define SPR_970MMCR1 0x31e /* ... Monitor Mode Control Register 1 (PPC 970) */
+#define SPR_970MMCRA 0x312 /* ... Monitor Mode Control Register 2 (PPC 970) */
+#define SPR_970MMCR0 0x31b /* ... Monitor Mode Control Register 0 (PPC 970) */
+#define SPR_970PMC1 0x313 /* ... PMC 1 */
+#define SPR_970PMC2 0x314 /* ... PMC 2 */
+#define SPR_970PMC3 0x315 /* ... PMC 3 */
+#define SPR_970PMC4 0x316 /* ... PMC 4 */
+#define SPR_970PMC5 0x317 /* ... PMC 5 */
+#define SPR_970PMC6 0x318 /* ... PMC 6 */
+#define SPR_970PMC7 0x319 /* ... PMC 7 */
+#define SPR_970PMC8 0x31a /* ... PMC 8 */
+
+#define SPR_MMCR0_FC 0x80000000 /* Freeze counters */
+#define SPR_MMCR0_FCS 0x40000000 /* Freeze counters in supervisor mode */
+#define SPR_MMCR0_FCP 0x20000000 /* Freeze counters in user mode */
+#define SPR_MMCR0_FCM1 0x10000000 /* Freeze counters when mark=1 */
+#define SPR_MMCR0_FCM0 0x08000000 /* Freeze counters when mark=0 */
+#define SPR_MMCR0_PMXE 0x04000000 /* Enable PM interrupt */
+#define SPR_MMCR0_FCECE 0x02000000 /* Freeze counters after event */
+#define SPR_MMCR0_TBSEL_15 0x01800000 /* Count bit 15 of TBL */
+#define SPR_MMCR0_TBSEL_19 0x01000000 /* Count bit 19 of TBL */
+#define SPR_MMCR0_TBSEL_23 0x00800000 /* Count bit 23 of TBL */
+#define SPR_MMCR0_TBSEL_31 0x00000000 /* Count bit 31 of TBL */
+#define SPR_MMCR0_TBEE 0x00400000 /* Time-base event enable */
+#define SPR_MMCRO_THRESHOLD(x) ((x) << 16) /* Threshold value */
+#define SPR_MMCR0_PMC1CE 0x00008000 /* PMC1 condition enable */
+#define SPR_MMCR0_PMCNCE 0x00004000 /* PMCn condition enable */
+#define SPR_MMCR0_TRIGGER 0x00002000 /* Trigger */
+#define SPR_MMCR0_PMC1SEL(x) ((x) << 6) /* PMC1 selector */
+#define SPR_MMCR0_PMC2SEL(x) ((x) << 0) /* PMC2 selector */
+#define SPR_970MMCR0_PMC1SEL(x) ((x) << 8) /* PMC1 selector (970) */
+#define SPR_970MMCR0_PMC2SEL(x) ((x) << 1) /* PMC2 selector (970) */
+#define SPR_SGR 0x3b9 /* 4.. Storage Guarded Register */
+#define SPR_PMC1 0x3b9 /* .6. Performance Counter Register 1 */
+#define SPR_DCWR 0x3ba /* 4.. Data Cache Write-through Register */
+#define SPR_PMC2 0x3ba /* .6. Performance Counter Register 2 */
+#define SPR_SLER 0x3bb /* 4.. Storage Little Endian Register */
+#define SPR_SIA 0x3bb /* .6. Sampled Instruction Address */
+#define SPR_MMCR1 0x3bc /* .6. Monitor Mode Control Register 2 */
+#define SPR_MMCR1_PMC3SEL(x) ((x) << 27) /* PMC 3 selector */
+#define SPR_MMCR1_PMC4SEL(x) ((x) << 22) /* PMC 4 selector */
+#define SPR_MMCR1_PMC5SEL(x) ((x) << 17) /* PMC 5 selector */
+#define SPR_MMCR1_PMC6SEL(x) ((x) << 11) /* PMC 6 selector */
+
+#define SPR_SU0R 0x3bc /* 4.. Storage User-defined 0 Register */
+#define SPR_PMC3 0x3bd /* .6. Performance Counter Register 3 */
+#define SPR_PMC4 0x3be /* .6. Performance Counter Register 4 */
+#define SPR_DMISS 0x3d0 /* .68 Data TLB Miss Address Register */
+#define SPR_DCMP 0x3d1 /* .68 Data TLB Compare Register */
+#define SPR_HASH1 0x3d2 /* .68 Primary Hash Address Register */
+#define SPR_ICDBDR 0x3d3 /* 4.. Instruction Cache Debug Data Register */
+#define SPR_HASH2 0x3d3 /* .68 Secondary Hash Address Register */
+#define SPR_IMISS 0x3d4 /* .68 Instruction TLB Miss Address Register */
+#define SPR_TLBMISS 0x3d4 /* .6. TLB Miss Address Register */
+#define SPR_DEAR 0x3d5 /* 4.. Data Error Address Register */
+#define SPR_ICMP 0x3d5 /* .68 Instruction TLB Compare Register */
+#define SPR_PTEHI 0x3d5 /* .6. Instruction TLB Compare Register */
+#define SPR_EVPR 0x3d6 /* 4.. Exception Vector Prefix Register */
+#define SPR_RPA 0x3d6 /* .68 Required Physical Address Register */
+#define SPR_PTELO 0x3d6 /* .6. Required Physical Address Register */
+
+#define SPR_TSR 0x150 /* ..8 Timer Status Register */
+#define SPR_TCR 0x154 /* ..8 Timer Control Register */
+
+#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
+#define TSR_WIS 0x40000000 /* Watchdog Interrupt Status */
+#define TSR_WRS_MASK 0x30000000 /* Watchdog Reset Status */
+#define TSR_WRS_NONE 0x00000000 /* No watchdog reset has occurred */
+#define TSR_WRS_CORE 0x10000000 /* Core reset was forced by the watchdog */
+#define TSR_WRS_CHIP 0x20000000 /* Chip reset was forced by the watchdog */
+#define TSR_WRS_SYSTEM 0x30000000 /* System reset was forced by the watchdog */
+#define TSR_PIS 0x08000000 /* PIT Interrupt Status */
+#define TSR_DIS 0x08000000 /* Decrementer Interrupt Status */
+#define TSR_FIS 0x04000000 /* FIT Interrupt Status */
+
+#define TCR_WP_MASK 0xc0000000 /* Watchdog Period mask */
+#define TCR_WP_2_17 0x00000000 /* 2**17 clocks */
+#define TCR_WP_2_21 0x40000000 /* 2**21 clocks */
+#define TCR_WP_2_25 0x80000000 /* 2**25 clocks */
+#define TCR_WP_2_29 0xc0000000 /* 2**29 clocks */
+#define TCR_WRC_MASK 0x30000000 /* Watchdog Reset Control mask */
+#define TCR_WRC_NONE 0x00000000 /* No watchdog reset */
+#define TCR_WRC_CORE 0x10000000 /* Core reset */
+#define TCR_WRC_CHIP 0x20000000 /* Chip reset */
+#define TCR_WRC_SYSTEM 0x30000000 /* System reset */
+#define TCR_WIE 0x08000000 /* Watchdog Interrupt Enable */
+#define TCR_PIE 0x04000000 /* PIT Interrupt Enable */
+#define TCR_DIE 0x04000000 /* Pecrementer Interrupt Enable */
+#define TCR_FP_MASK 0x03000000 /* FIT Period */
+#define TCR_FP_2_9 0x00000000 /* 2**9 clocks */
+#define TCR_FP_2_13 0x01000000 /* 2**13 clocks */
+#define TCR_FP_2_17 0x02000000 /* 2**17 clocks */
+#define TCR_FP_2_21 0x03000000 /* 2**21 clocks */
+#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
+#define TCR_ARE 0x00400000 /* Auto Reload Enable */
+
+#define SPR_PIT 0x3db /* 4.. Programmable Interval Timer */
+#define SPR_SRR2 0x3de /* 4.. Save/Restore Register 2 */
+#define SPR_SRR3 0x3df /* 4.. Save/Restore Register 3 */
+#define SPR_HID0 0x3f0 /* ..8 Hardware Implementation Register 0 */
+#define SPR_HID1 0x3f1 /* ..8 Hardware Implementation Register 1 */
+#define SPR_HID4 0x3f4 /* ..8 Hardware Implementation Register 4 */
+#define SPR_HID5 0x3f6 /* ..8 Hardware Implementation Register 5 */
+
+#if defined(AIM)
+#define SPR_DBSR 0x3f0 /* 4.. Debug Status Register */
+#define DBSR_IC 0x80000000 /* Instruction completion debug event */
+#define DBSR_BT 0x40000000 /* Branch Taken debug event */
+#define DBSR_EDE 0x20000000 /* Exception debug event */
+#define DBSR_TIE 0x10000000 /* Trap Instruction debug event */
+#define DBSR_UDE 0x08000000 /* Unconditional debug event */
+#define DBSR_IA1 0x04000000 /* IAC1 debug event */
+#define DBSR_IA2 0x02000000 /* IAC2 debug event */
+#define DBSR_DR1 0x01000000 /* DAC1 Read debug event */
+#define DBSR_DW1 0x00800000 /* DAC1 Write debug event */
+#define DBSR_DR2 0x00400000 /* DAC2 Read debug event */
+#define DBSR_DW2 0x00200000 /* DAC2 Write debug event */
+#define DBSR_IDE 0x00100000 /* Imprecise debug event */
+#define DBSR_IA3 0x00080000 /* IAC3 debug event */
+#define DBSR_IA4 0x00040000 /* IAC4 debug event */
+#define DBSR_MRR 0x00000300 /* Most recent reset */
+#define SPR_DBCR0 0x3f2 /* 4.. Debug Control Register 0 */
+#define SPR_DBCR1 0x3bd /* 4.. Debug Control Register 1 */
+#define SPR_IAC1 0x3f4 /* 4.. Instruction Address Compare 1 */
+#define SPR_IAC2 0x3f5 /* 4.. Instruction Address Compare 2 */
+#define SPR_DAC1 0x3f6 /* 4.. Data Address Compare 1 */
+#define SPR_DAC2 0x3f7 /* 4.. Data Address Compare 2 */
+#define SPR_PIR 0x3ff /* .6. Processor Identification Register */
+#elif defined(E500)
+#define SPR_PIR 0x11e /* ..8 Processor Identification Register */
+#define SPR_DBSR 0x130 /* ..8 Debug Status Register */
+#define DBSR_IDE 0x80000000 /* Imprecise debug event. */
+#define DBSR_UDE 0x40000000 /* Unconditional debug event. */
+#define DBSR_MRR 0x30000000 /* Most recent Reset (mask). */
+#define DBSR_ICMP 0x08000000 /* Instr. complete debug event. */
+#define DBSR_BRT 0x04000000 /* Branch taken debug event. */
+#define DBSR_IRPT 0x02000000 /* Interrupt taken debug event. */
+#define DBSR_TRAP 0x01000000 /* Trap instr. debug event. */
+#define DBSR_IAC1 0x00800000 /* Instr. address compare #1. */
+#define DBSR_IAC2 0x00400000 /* Instr. address compare #2. */
+#define DBSR_IAC3 0x00200000 /* Instr. address compare #3. */
+#define DBSR_IAC4 0x00100000 /* Instr. address compare #4. */
+#define DBSR_DAC1R 0x00080000 /* Data addr. read compare #1. */
+#define DBSR_DAC1W 0x00040000 /* Data addr. write compare #1. */
+#define DBSR_DAC2R 0x00020000 /* Data addr. read compare #2. */
+#define DBSR_DAC2W 0x00010000 /* Data addr. write compare #2. */
+#define DBSR_RET 0x00008000 /* Return debug event. */
+#define SPR_DBCR0 0x134 /* ..8 Debug Control Register 0 */
+#define SPR_DBCR1 0x135 /* ..8 Debug Control Register 1 */
+#define SPR_IAC1 0x138 /* ..8 Instruction Address Compare 1 */
+#define SPR_IAC2 0x139 /* ..8 Instruction Address Compare 2 */
+#define SPR_DAC1 0x13c /* ..8 Data Address Compare 1 */
+#define SPR_DAC2 0x13d /* ..8 Data Address Compare 2 */
+#endif
+
+#define DBCR0_EDM 0x80000000 /* External Debug Mode */
+#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */
+#define DBCR0_RST_MASK 0x30000000 /* ReSeT */
+#define DBCR0_RST_NONE 0x00000000 /* No action */
+#define DBCR0_RST_CORE 0x10000000 /* Core reset */
+#define DBCR0_RST_CHIP 0x20000000 /* Chip reset */
+#define DBCR0_RST_SYSTEM 0x30000000 /* System reset */
+#define DBCR0_IC 0x08000000 /* Instruction Completion debug event */
+#define DBCR0_BT 0x04000000 /* Branch Taken debug event */
+#define DBCR0_EDE 0x02000000 /* Exception Debug Event */
+#define DBCR0_TDE 0x01000000 /* Trap Debug Event */
+#define DBCR0_IA1 0x00800000 /* IAC (Instruction Address Compare) 1 debug event */
+#define DBCR0_IA2 0x00400000 /* IAC 2 debug event */
+#define DBCR0_IA12 0x00200000 /* Instruction Address Range Compare 1-2 */
+#define DBCR0_IA12X 0x00100000 /* IA12 eXclusive */
+#define DBCR0_IA3 0x00080000 /* IAC 3 debug event */
+#define DBCR0_IA4 0x00040000 /* IAC 4 debug event */
+#define DBCR0_IA34 0x00020000 /* Instruction Address Range Compare 3-4 */
+#define DBCR0_IA34X 0x00010000 /* IA34 eXclusive */
+#define DBCR0_IA12T 0x00008000 /* Instruction Address Range Compare 1-2 range Toggle */
+#define DBCR0_IA34T 0x00004000 /* Instruction Address Range Compare 3-4 range Toggle */
+#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
+
+#define SPR_IABR 0x3f2 /* ..8 Instruction Address Breakpoint Register 0 */
+#define SPR_DABR 0x3f5 /* .6. Data Address Breakpoint Register */
+#define SPR_MSSCR0 0x3f6 /* .6. Memory SubSystem Control Register */
+#define MSSCR0_SHDEN 0x80000000 /* 0: Shared-state enable */
+#define MSSCR0_SHDPEN3 0x40000000 /* 1: ~SHD[01] signal enable in MEI mode */
+#define MSSCR0_L1INTVEN 0x38000000 /* 2-4: L1 data cache ~HIT intervention enable */
+#define MSSCR0_L2INTVEN 0x07000000 /* 5-7: L2 data cache ~HIT intervention enable*/
+#define MSSCR0_DL1HWF 0x00800000 /* 8: L1 data cache hardware flush */
+#define MSSCR0_MBO 0x00400000 /* 9: must be one */
+#define MSSCR0_EMODE 0x00200000 /* 10: MPX bus mode (read-only) */
+#define MSSCR0_ABD 0x00100000 /* 11: address bus driven (read-only) */
+#define MSSCR0_MBZ 0x000fffff /* 12-31: must be zero */
+#define SPR_L2PM 0x3f8 /* .6. L2 Private Memory Control Register */
+#define SPR_L2CR 0x3f9 /* .6. L2 Control Register */
+#define L2CR_L2E 0x80000000 /* 0: L2 enable */
+#define L2CR_L2PE 0x40000000 /* 1: L2 data parity enable */
+#define L2CR_L2SIZ 0x30000000 /* 2-3: L2 size */
+#define L2SIZ_2M 0x00000000
+#define L2SIZ_256K 0x10000000
+#define L2SIZ_512K 0x20000000
+#define L2SIZ_1M 0x30000000
+#define L2CR_L2CLK 0x0e000000 /* 4-6: L2 clock ratio */
+#define L2CLK_DIS 0x00000000 /* disable L2 clock */
+#define L2CLK_10 0x02000000 /* core clock / 1 */
+#define L2CLK_15 0x04000000 /* / 1.5 */
+#define L2CLK_20 0x08000000 /* / 2 */
+#define L2CLK_25 0x0a000000 /* / 2.5 */
+#define L2CLK_30 0x0c000000 /* / 3 */
+#define L2CR_L2RAM 0x01800000 /* 7-8: L2 RAM type */
+#define L2RAM_FLOWTHRU_BURST 0x00000000
+#define L2RAM_PIPELINE_BURST 0x01000000
+#define L2RAM_PIPELINE_LATE 0x01800000
+#define L2CR_L2DO 0x00400000 /* 9: L2 data-only.
+ Setting this bit disables instruction
+ caching. */
+#define L2CR_L2I 0x00200000 /* 10: L2 global invalidate. */
+#define L2CR_L2CTL 0x00100000 /* 11: L2 RAM control (ZZ enable).
+ Enables automatic operation of the
+ L2ZZ (low-power mode) signal. */
+#define L2CR_L2WT 0x00080000 /* 12: L2 write-through. */
+#define L2CR_L2TS 0x00040000 /* 13: L2 test support. */
+#define L2CR_L2OH 0x00030000 /* 14-15: L2 output hold. */
+#define L2CR_L2SL 0x00008000 /* 16: L2 DLL slow. */
+#define L2CR_L2DF 0x00004000 /* 17: L2 differential clock. */
+#define L2CR_L2BYP 0x00002000 /* 18: L2 DLL bypass. */
+#define L2CR_L2FA 0x00001000 /* 19: L2 flush assist (for software flush). */
+#define L2CR_L2HWF 0x00000800 /* 20: L2 hardware flush. */
+#define L2CR_L2IO 0x00000400 /* 21: L2 instruction-only. */
+#define L2CR_L2CLKSTP 0x00000200 /* 22: L2 clock stop. */
+#define L2CR_L2DRO 0x00000100 /* 23: L2DLL rollover checkstop enable. */
+#define L2CR_L2IP 0x00000001 /* 31: L2 global invalidate in */
+ /* progress (read only). */
+
+#define SPR_L3CR 0x3fa /* .6. L3 Control Register */
+#define L3CR_L3E 0x80000000 /* 0: L3 enable */
+#define L3CR_L3PE 0x40000000 /* 1: L3 data parity enable */
+#define L3CR_L3APE 0x20000000
+#define L3CR_L3SIZ 0x10000000 /* 3: L3 size (0=1MB, 1=2MB) */
+#define L3CR_L3CLKEN 0x08000000 /* 4: Enables L3_CLK[0:1] */
+#define L3CR_L3CLK 0x03800000
+#define L3CR_L3IO 0x00400000
+#define L3CR_L3CLKEXT 0x00200000
+#define L3CR_L3CKSPEXT 0x00100000
+#define L3CR_L3OH1 0x00080000
+#define L3CR_L3SPO 0x00040000
+#define L3CR_L3CKSP 0x00030000
+#define L3CR_L3PSP 0x0000e000
+#define L3CR_L3REP 0x00001000
+#define L3CR_L3HWF 0x00000800
+#define L3CR_L3I 0x00000400 /* 21: L3 global invalidate */
+#define L3CR_L3RT 0x00000300
+#define L3CR_L3NIRCA 0x00000080
+#define L3CR_L3DO 0x00000040
+#define L3CR_PMEN 0x00000004
+#define L3CR_PMSIZ 0x00000003
+
+#define SPR_DCCR 0x3fa /* 4.. Data Cache Cachability Register */
+#define SPR_ICCR 0x3fb /* 4.. Instruction Cache Cachability Register */
+#define SPR_THRM1 0x3fc /* .6. Thermal Management Register */
+#define SPR_THRM2 0x3fd /* .6. Thermal Management Register */
+#define SPR_THRM_TIN 0x80000000 /* Thermal interrupt bit (RO) */
+#define SPR_THRM_TIV 0x40000000 /* Thermal interrupt valid (RO) */
+#define SPR_THRM_THRESHOLD(x) ((x) << 23) /* Thermal sensor threshold */
+#define SPR_THRM_TID 0x00000004 /* Thermal interrupt direction */
+#define SPR_THRM_TIE 0x00000002 /* Thermal interrupt enable */
+#define SPR_THRM_VALID 0x00000001 /* Valid bit */
+#define SPR_THRM3 0x3fe /* .6. Thermal Management Register */
+#define SPR_THRM_TIMER(x) ((x) << 1) /* Sampling interval timer */
+#define SPR_THRM_ENABLE 0x00000001 /* TAU Enable */
+#define SPR_FPECR 0x3fe /* .6. Floating-Point Exception Cause Register */
+
+/* Time Base Register declarations */
+#define TBR_TBL 0x10c /* 468 Time Base Lower - read */
+#define TBR_TBU 0x10d /* 468 Time Base Upper - read */
+#define TBR_TBWL 0x11c /* 468 Time Base Lower - supervisor, write */
+#define TBR_TBWU 0x11d /* 468 Time Base Upper - supervisor, write */
+
+/* Performance counter declarations */
+#define PMC_OVERFLOW 0x80000000 /* Counter has overflowed */
+
+/* The first five countable [non-]events are common to many PMC's */
+#define PMCN_NONE 0 /* Count nothing */
+#define PMCN_CYCLES 1 /* Processor cycles */
+#define PMCN_ICOMP 2 /* Instructions completed */
+#define PMCN_TBLTRANS 3 /* TBL bit transitions */
+#define PCMN_IDISPATCH 4 /* Instructions dispatched */
+
+/* Similar things for the 970 PMC direct counters */
+#define PMC970N_NONE 0x8 /* Count nothing */
+#define PMC970N_CYCLES 0xf /* Processor cycles */
+#define PMC970N_ICOMP 0x9 /* Instructions completed */
+
+#if defined(AIM)
+
+#define SPR_ESR 0x3d4 /* 4.. Exception Syndrome Register */
+#define ESR_MCI 0x80000000 /* Machine check - instruction */
+#define ESR_PIL 0x08000000 /* Program interrupt - illegal */
+#define ESR_PPR 0x04000000 /* Program interrupt - privileged */
+#define ESR_PTR 0x02000000 /* Program interrupt - trap */
+#define ESR_ST 0x01000000 /* Store operation */
+#define ESR_DST 0x00800000 /* Data storage interrupt - store fault */
+#define ESR_DIZ 0x00800000 /* Data/instruction storage interrupt - zone fault */
+#define ESR_U0F 0x00008000 /* Data storage interrupt - U0 fault */
+
+#elif defined(E500)
+
+#define SPR_ESR 0x003e /* ..8 Exception Syndrome Register */
+#define ESR_PIL 0x08000000 /* Program interrupt - illegal */
+#define ESR_PPR 0x04000000 /* Program interrupt - privileged */
+#define ESR_PTR 0x02000000 /* Program interrupt - trap */
+#define ESR_ST 0x00800000 /* Store operation */
+#define ESR_DLK 0x00200000 /* Data storage, D cache locking */
+#define ESR_ILK 0x00100000 /* Data storage, I cache locking */
+#define ESR_BO 0x00020000 /* Data/instruction storage, byte ordering */
+#define ESR_SPE 0x00000080 /* SPE exception bit */
+
+#define SPR_CSRR0 0x03a /* ..8 58 Critical SRR0 */
+#define SPR_CSRR1 0x03b /* ..8 59 Critical SRR1 */
+#define SPR_MCSRR0 0x23a /* ..8 570 Machine check SRR0 */
+#define SPR_MCSRR1 0x23b /* ..8 571 Machine check SRR1 */
+
+#define SPR_SVR 0x3ff /* ..8 1023 System Version Register */
+#define SVR_MPC8533 0x803c
+#define SVR_MPC8533E 0x8034
+#define SVR_MPC8541 0x8072
+#define SVR_MPC8541E 0x807a
+#define SVR_MPC8548 0x8031
+#define SVR_MPC8548E 0x8039
+#define SVR_MPC8555 0x8071
+#define SVR_MPC8555E 0x8079
+#define SVR_MPC8572 0x80e0
+#define SVR_MPC8572E 0x80e8
+#define SVR_VER(svr) (((svr) >> 16) & 0xffff)
+
+#define SPR_PID0 0x030 /* ..8 Process ID Register 0 */
+#define SPR_PID1 0x279 /* ..8 Process ID Register 1 */
+#define SPR_PID2 0x27a /* ..8 Process ID Register 2 */
+
+#define SPR_TLB0CFG 0x2B0 /* ..8 TLB 0 Config Register */
+#define SPR_TLB1CFG 0x2B1 /* ..8 TLB 1 Config Register */
+#define TLBCFG_ASSOC_MASK 0xff000000 /* Associativity of TLB */
+#define TLBCFG_ASSOC_SHIFT 24
+#define TLBCFG_NENTRY_MASK 0x00000fff /* Number of entries in TLB */
+
+#define SPR_IVPR 0x03f /* ..8 Interrupt Vector Prefix Register */
+#define SPR_IVOR0 0x190 /* ..8 Critical input */
+#define SPR_IVOR1 0x191 /* ..8 Machine check */
+#define SPR_IVOR2 0x192
+#define SPR_IVOR3 0x193
+#define SPR_IVOR4 0x194
+#define SPR_IVOR5 0x195
+#define SPR_IVOR6 0x196
+#define SPR_IVOR7 0x197
+#define SPR_IVOR8 0x198
+#define SPR_IVOR9 0x199
+#define SPR_IVOR10 0x19a
+#define SPR_IVOR11 0x19b
+#define SPR_IVOR12 0x19c
+#define SPR_IVOR13 0x19d
+#define SPR_IVOR14 0x19e
+#define SPR_IVOR15 0x19f
+#define SPR_IVOR32 0x210
+#define SPR_IVOR33 0x211
+#define SPR_IVOR34 0x212
+#define SPR_IVOR35 0x213
+
+#define SPR_MAS0 0x270 /* ..8 MMU Assist Register 0 Book-E/e500 */
+#define SPR_MAS1 0x271 /* ..8 MMU Assist Register 1 Book-E/e500 */
+#define SPR_MAS2 0x272 /* ..8 MMU Assist Register 2 Book-E/e500 */
+#define SPR_MAS3 0x273 /* ..8 MMU Assist Register 3 Book-E/e500 */
+#define SPR_MAS4 0x274 /* ..8 MMU Assist Register 4 Book-E/e500 */
+#define SPR_MAS5 0x275 /* ..8 MMU Assist Register 5 Book-E */
+#define SPR_MAS6 0x276 /* ..8 MMU Assist Register 6 Book-E/e500 */
+#define SPR_MAS7 0x3B0 /* ..8 MMU Assist Register 7 Book-E/e500 */
+
+#define SPR_L1CSR0 0x3F2 /* ..8 L1 Cache Control and Status Register 0 */
+#define L1CSR0_DCPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_DCLFR 0x00000100 /* Data Cache Lock Bits Flash Reset */
+#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
+#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
+#define SPR_L1CSR1 0x3F3 /* ..8 L1 Cache Control and Status Register 1 */
+#define L1CSR1_ICPE 0x00010000 /* Instruction Cache Parity Enable */
+#define L1CSR1_ICLFR 0x00000100 /* Instruction Cache Lock Bits Flash Reset */
+#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
+#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
+
+#define SPR_BUCSR 0x3F5 /* ..8 Branch Unit Control and Status Register */
+#define BUCSR_BPEN 0x00000001 /* Branch Prediction Enable */
+
+#endif /* #elif defined(E500) */
+
+#endif /* !_POWERPC_SPR_HH_ */
diff --git a/freebsd/sparc64/include/freebsd/machine/asi.h b/freebsd/sparc64/include/freebsd/machine/asi.h
new file mode 100644
index 00000000..83a42ff3
--- /dev/null
+++ b/freebsd/sparc64/include/freebsd/machine/asi.h
@@ -0,0 +1,260 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: BSDI: asi.h,v 1.3 1997/08/08 14:31:42 torek
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASI_HH_
+#define _MACHINE_ASI_HH_
+
+/*
+ * Standard v9 ASIs
+ */
+#define ASI_N 0x4
+#define ASI_NL 0xc
+#define ASI_AIUP 0x10
+#define ASI_AIUS 0x11
+#define ASI_AIUPL 0x18
+#define ASI_AIUSL 0x19
+#define ASI_P 0x80
+#define ASI_S 0x81
+#define ASI_PNF 0x82
+#define ASI_SNF 0x83
+#define ASI_PL 0x88
+#define ASI_SL 0x89
+#define ASI_PNFL 0x8a
+#define ASI_SNFL 0x8b
+
+/*
+ * UltraSPARC extensions - ASIs limited to a certain family are annotated.
+ */
+#define ASI_PHYS_USE_EC 0x14
+#define ASI_PHYS_BYPASS_EC_WITH_EBIT 0x15
+#define ASI_PHYS_USE_EC_L 0x1c
+#define ASI_PHYS_BYPASS_EC_WITH_EBIT_L 0x1d
+
+#define ASI_NUCLEUS_QUAD_LDD 0x24
+#define ASI_NUCLEUS_QUAD_LDD_L 0x2c
+
+#define ASI_PCACHE_STATUS_DATA 0x30 /* US-III Cu */
+#define ASI_PCACHE_DATA 0x31 /* US-III Cu */
+#define ASI_PCACHE_TAG 0x32 /* US-III Cu */
+#define ASI_PCACHE_SNOOP_TAG 0x33 /* US-III Cu */
+
+#define ASI_ATOMIC_QUAD_LDD_PHYS 0x34 /* US-III Cu */
+
+#define ASI_WCACHE_VALID_BITS 0x38 /* US-III Cu */
+#define ASI_WCACHE_DATA 0x39 /* US-III Cu */
+#define ASI_WCACHE_TAG 0x3a /* US-III Cu */
+#define ASI_WCACHE_SNOOP_TAG 0x3b /* US-III Cu */
+
+#define ASI_ATOMIC_QUAD_LDD_PHYS_L 0x3c /* US-III Cu */
+
+#define ASI_SRAM_FAST_INIT 0x40 /* US-III Cu */
+
+#define ASI_DCACHE_INVALIDATE 0x42 /* US-III Cu */
+#define ASI_DCACHE_UTAG 0x43 /* US-III Cu */
+#define ASI_DCACHE_SNOOP_TAG 0x44 /* US-III Cu */
+
+/* Named ASI_DCUCR on US-III, but is mostly identical except for added bits. */
+#define ASI_LSU_CTL_REG 0x45 /* US only */
+
+#define ASI_MCNTL 0x45 /* SPARC64 only */
+#define AA_MCNTL 0x08
+
+#define ASI_DCACHE_DATA 0x46
+#define ASI_DCACHE_TAG 0x47
+
+#define ASI_INTR_DISPATCH_STATUS 0x48
+#define ASI_INTR_RECEIVE 0x49
+#define ASI_UPA_CONFIG_REG 0x4a /* US-I, II */
+
+#define ASI_FIREPLANE_CONFIG_REG 0x4a /* US-III{,+}, IV{,+} */
+#define AA_FIREPLANE_CONFIG 0x0 /* US-III{,+}, IV{,+} */
+#define AA_FIREPLANE_ADDRESS 0x8 /* US-III{,+}, IV{,+} */
+#define AA_FIREPLANE_CONFIG_2 0x10 /* US-IV{,+} */
+
+#define ASI_JBUS_CONFIG_REG 0x4a /* US-IIIi{,+} */
+
+#define ASI_ESTATE_ERROR_EN_REG 0x4b
+#define AA_ESTATE_CEEN 0x1
+#define AA_ESTATE_NCEEN 0x2
+#define AA_ESTATE_ISAPEN 0x4
+
+#define ASI_AFSR 0x4c
+#define ASI_AFAR 0x4d
+
+#define ASI_ECACHE_TAG_DATA 0x4e
+
+#define ASI_IMMU_TAG_TARGET_REG 0x50
+#define ASI_IMMU 0x50
+#define AA_IMMU_TTR 0x0
+#define AA_IMMU_SFSR 0x18
+#define AA_IMMU_TSB 0x28
+#define AA_IMMU_TAR 0x30
+#define AA_IMMU_TSB_PEXT_REG 0x48 /* US-III family */
+#define AA_IMMU_TSB_SEXT_REG 0x50 /* US-III family */
+#define AA_IMMU_TSB_NEXT_REG 0x58 /* US-III family */
+
+#define ASI_IMMU_TSB_8KB_PTR_REG 0x51
+#define ASI_IMMU_TSB_64KB_PTR_REG 0x52
+
+#define ASI_SERIAL_ID 0x53 /* US-III family */
+
+#define ASI_ITLB_DATA_IN_REG 0x54
+/* US-III Cu: also ASI_ITLB_CAM_ADDRESS_REG */
+#define ASI_ITLB_DATA_ACCESS_REG 0x55
+#define ASI_ITLB_TAG_READ_REG 0x56
+#define ASI_IMMU_DEMAP 0x57
+
+#define ASI_DMMU_TAG_TARGET_REG 0x58
+#define ASI_DMMU 0x58
+#define AA_DMMU_TTR 0x0
+#define AA_DMMU_PCXR 0x8
+#define AA_DMMU_SCXR 0x10
+#define AA_DMMU_SFSR 0x18
+#define AA_DMMU_SFAR 0x20
+#define AA_DMMU_TSB 0x28
+#define AA_DMMU_TAR 0x30
+#define AA_DMMU_VWPR 0x38
+#define AA_DMMU_PWPR 0x40
+#define AA_DMMU_TSB_PEXT_REG 0x48
+#define AA_DMMU_TSB_SEXT_REG 0x50
+#define AA_DMMU_TSB_NEXT_REG 0x58
+#define AA_DMMU_TAG_ACCESS_EXT 0x60 /* US-III family */
+
+#define ASI_DMMU_TSB_8KB_PTR_REG 0x59
+#define ASI_DMMU_TSB_64KB_PTR_REG 0x5a
+#define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b
+#define ASI_DTLB_DATA_IN_REG 0x5c
+/* US-III Cu: also ASI_DTLB_CAM_ADDRESS_REG */
+#define ASI_DTLB_DATA_ACCESS_REG 0x5d
+#define ASI_DTLB_TAG_READ_REG 0x5e
+#define ASI_DMMU_DEMAP 0x5f
+
+#define ASI_IIU_INST_TRAP 0x60 /* US-III family */
+
+#define ASI_INTR_ID 0x63 /* US-IV{,+} */
+#define AA_INTR_ID 0x0 /* US-IV{,+} */
+#define AA_CORE_ID 0x10 /* US-IV{,+} */
+#define AA_CESR_ID 0x40 /* US-IV{,+} */
+
+#define ASI_ICACHE_INSTR 0x66
+#define ASI_ICACHE_TAG 0x67
+#define ASI_ICACHE_SNOOP_TAG 0x68 /* US-III family */
+#define ASI_ICACHE_PRE_DECODE 0x6e /* US-I, II */
+#define ASI_ICACHE_PRE_NEXT_FIELD 0x6f /* US-I, II */
+
+#define ASI_FLUSH_L1I 0x67 /* SPARC64 only */
+
+#define ASI_BLK_AUIP 0x70
+#define ASI_BLK_AIUS 0x71
+
+#define ASI_MCU_CONFIG_REG 0x72 /* US-III Cu */
+#define AA_MCU_TIMING1_REG 0x0 /* US-III Cu */
+#define AA_MCU_TIMING2_REG 0x8 /* US-III Cu */
+#define AA_MCU_TIMING3_REG 0x10 /* US-III Cu */
+#define AA_MCU_TIMING4_REG 0x18 /* US-III Cu */
+#define AA_MCU_DEC1_REG 0x20 /* US-III Cu */
+#define AA_MCU_DEC2_REG 0x28 /* US-III Cu */
+#define AA_MCU_DEC3_REG 0x30 /* US-III Cu */
+#define AA_MCU_DEC4_REG 0x38 /* US-III Cu */
+#define AA_MCU_ADDR_CNTL_REG 0x40 /* US-III Cu */
+
+#define ASI_ECACHE_DATA 0x74 /* US-III Cu */
+#define ASI_ECACHE_CONTROL 0x75 /* US-III Cu */
+#define ASI_ECACHE_W 0x76
+
+/*
+ * With the advent of the US-III, the numbering has changed, as additional
+ * registers were inserted in between. We retain the original ordering for
+ * now, and append an A to the inserted registers.
+ * Exceptions are AA_SDB_INTR_D6 and AA_SDB_INTR_D7, which were appended
+ * at the end.
+ */
+#define ASI_SDB_ERROR_W 0x77
+#define ASI_SDB_CONTROL_W 0x77
+#define ASI_SDB_INTR_W 0x77
+#define AA_SDB_ERR_HIGH 0x0
+#define AA_SDB_ERR_LOW 0x18
+#define AA_SDB_CNTL_HIGH 0x20
+#define AA_SDB_CNTL_LOW 0x38
+#define AA_SDB_INTR_D0 0x40
+#define AA_SDB_INTR_D0A 0x48 /* US-III family */
+#define AA_SDB_INTR_D1 0x50
+#define AA_SDB_INTR_D1A 0x5A /* US-III family */
+#define AA_SDB_INTR_D2 0x60
+#define AA_SDB_INTR_D2A 0x68 /* US-III family */
+#define AA_INTR_SEND 0x70
+#define AA_SDB_INTR_D6 0x80 /* US-III family */
+#define AA_SDB_INTR_D7 0x88 /* US-III family */
+
+#define ASI_BLK_AIUPL 0x78
+#define ASI_BLK_AIUSL 0x79
+
+#define ASI_ECACHE_R 0x7e
+
+/*
+ * These have the same registers as their corresponding write versions
+ * except for AA_INTR_SEND.
+ */
+#define ASI_SDB_ERROR_R 0x7f
+#define ASI_SDB_CONTROL_R 0x7f
+#define ASI_SDB_INTR_R 0x7f
+
+#define ASI_PST8_P 0xc0
+#define ASI_PST8_S 0xc1
+#define ASI_PST16_P 0xc2
+#define ASI_PST16_S 0xc3
+#define ASI_PST32_P 0xc4
+#define ASI_PST32_S 0xc5
+
+#define ASI_PST8_PL 0xc8
+#define ASI_PST8_SL 0xc9
+#define ASI_PST16_PL 0xca
+#define ASI_PST16_SL 0xcb
+#define ASI_PST32_PL 0xcc
+#define ASI_PST32_SL 0xcd
+
+#define ASI_FL8_P 0xd0
+#define ASI_FL8_S 0xd1
+#define ASI_FL16_P 0xd2
+#define ASI_FL16_S 0xd3
+#define ASI_FL8_PL 0xd8
+#define ASI_FL8_SL 0xd9
+#define ASI_FL16_PL 0xda
+#define ASI_FL16_SL 0xdb
+
+#define ASI_BLK_COMMIT_P 0xe0
+#define ASI_BLK_COMMIT_S 0xe1
+#define ASI_BLK_P 0xf0
+#define ASI_BLK_S 0xf1
+#define ASI_BLK_PL 0xf8
+#define ASI_BLK_SL 0xf9
+
+#endif /* !_MACHINE_ASI_HH_ */
diff --git a/freebsd/sparc64/include/freebsd/machine/cpufunc.h b/freebsd/sparc64/include/freebsd/machine/cpufunc.h
new file mode 100644
index 00000000..7805abc4
--- /dev/null
+++ b/freebsd/sparc64/include/freebsd/machine/cpufunc.h
@@ -0,0 +1,268 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_HH_
+#define _MACHINE_CPUFUNC_HH_
+
+#include <freebsd/machine/asi.h>
+#include <freebsd/machine/pstate.h>
+
+struct thread;
+
+/*
+ * Membar operand macros for use in other macros when # is a special
+ * character. Keep these in sync with what the hardware expects.
+ */
+#define C_Lookaside (0)
+#define C_MemIssue (1)
+#define C_Sync (2)
+#define M_LoadLoad (0)
+#define M_StoreLoad (1)
+#define M_LoadStore (2)
+#define M_StoreStore (3)
+
+#define CMASK_SHIFT (4)
+#define MMASK_SHIFT (0)
+
+#define CMASK_GEN(bit) ((1 << (bit)) << CMASK_SHIFT)
+#define MMASK_GEN(bit) ((1 << (bit)) << MMASK_SHIFT)
+
+#define Lookaside CMASK_GEN(C_Lookaside)
+#define MemIssue CMASK_GEN(C_MemIssue)
+#define Sync CMASK_GEN(C_Sync)
+#define LoadLoad MMASK_GEN(M_LoadLoad)
+#define StoreLoad MMASK_GEN(M_StoreLoad)
+#define LoadStore MMASK_GEN(M_LoadStore)
+#define StoreStore MMASK_GEN(M_StoreStore)
+
+#define casa(rs1, rs2, rd, asi) ({ \
+ u_int __rd = (uint32_t)(rd); \
+ __asm __volatile("casa [%2] %3, %4, %0" \
+ : "+r" (__rd), "=m" (*rs1) \
+ : "r" (rs1), "n" (asi), "r" (rs2), "m" (*rs1)); \
+ __rd; \
+})
+
+#define casxa(rs1, rs2, rd, asi) ({ \
+ u_long __rd = (uint64_t)(rd); \
+ __asm __volatile("casxa [%2] %3, %4, %0" \
+ : "+r" (__rd), "=m" (*rs1) \
+ : "r" (rs1), "n" (asi), "r" (rs2), "m" (*rs1)); \
+ __rd; \
+})
+
+#define flush(va) do { \
+ __asm __volatile("flush %0" : : "r" (va)); \
+} while (0)
+
+#define flushw() do { \
+ __asm __volatile("flushw" : :); \
+} while (0)
+
+#define mov(val, reg) do { \
+ __asm __volatile("mov %0, %" __XSTRING(reg) : : "r" (val)); \
+} while (0)
+
+/* Generate ld*a/st*a functions for non-constant ASIs. */
+#define LDNC_GEN(tp, o) \
+ static __inline tp \
+ o ## _nc(caddr_t va, int asi) \
+ { \
+ tp r; \
+ __asm __volatile("wr %2, 0, %%asi;" #o " [%1] %%asi, %0"\
+ : "=r" (r) : "r" (va), "r" (asi)); \
+ return (r); \
+ }
+
+LDNC_GEN(u_char, lduba);
+LDNC_GEN(u_short, lduha);
+LDNC_GEN(u_int, lduwa);
+LDNC_GEN(u_long, ldxa);
+
+#define LD_GENERIC(va, asi, op, type) ({ \
+ type __r; \
+ __asm __volatile(#op " [%1] %2, %0" \
+ : "=r" (__r) : "r" (va), "n" (asi)); \
+ __r; \
+})
+
+#define lduba(va, asi) LD_GENERIC(va, asi, lduba, u_char)
+#define lduha(va, asi) LD_GENERIC(va, asi, lduha, u_short)
+#define lduwa(va, asi) LD_GENERIC(va, asi, lduwa, u_int)
+#define ldxa(va, asi) LD_GENERIC(va, asi, ldxa, u_long)
+
+#define STNC_GEN(tp, o) \
+ static __inline void \
+ o ## _nc(caddr_t va, int asi, tp val) \
+ { \
+ __asm __volatile("wr %2, 0, %%asi;" #o " %0, [%1] %%asi"\
+ : : "r" (val), "r" (va), "r" (asi)); \
+ }
+
+STNC_GEN(u_char, stba);
+STNC_GEN(u_short, stha);
+STNC_GEN(u_int, stwa);
+STNC_GEN(u_long, stxa);
+
+#define ST_GENERIC(va, asi, val, op) \
+ __asm __volatile(#op " %0, [%1] %2" \
+ : : "r" (val), "r" (va), "n" (asi)); \
+
+#define stba(va, asi, val) ST_GENERIC(va, asi, val, stba)
+#define stha(va, asi, val) ST_GENERIC(va, asi, val, stha)
+#define stwa(va, asi, val) ST_GENERIC(va, asi, val, stwa)
+#define stxa(va, asi, val) ST_GENERIC(va, asi, val, stxa)
+
+/*
+ * Attempt to read from addr, val. If a Data Access Error trap happens,
+ * they return -1 and the contents of val is undefined. A return of 0
+ * means no trap happened, and the contents of val is valid.
+ */
+int fasword8(u_long asi, void *addr, uint8_t *val);
+int fasword16(u_long asi, void *addr, uint16_t *val);
+int fasword32(u_long asi, void *addr, uint32_t *val);
+
+#define membar(mask) do { \
+ __asm __volatile("membar %0" : : "n" (mask) : "memory"); \
+} while (0)
+
+#define rd(name) ({ \
+ uint64_t __sr; \
+ __asm __volatile("rd %%" #name ", %0" : "=r" (__sr) :); \
+ __sr; \
+})
+
+#define wr(name, val, xor) do { \
+ __asm __volatile("wr %0, %1, %%" #name \
+ : : "r" (val), "rI" (xor)); \
+} while (0)
+
+#define rdpr(name) ({ \
+ uint64_t __pr; \
+ __asm __volatile("rdpr %%" #name", %0" : "=r" (__pr) :); \
+ __pr; \
+})
+
+#define wrpr(name, val, xor) do { \
+ __asm __volatile("wrpr %0, %1, %%" #name \
+ : : "r" (val), "rI" (xor)); \
+} while (0)
+
+/*
+ * Trick GAS/GCC into compiling access to STICK/STICK_COMPARE independently
+ * of the selected instruction set.
+ */
+#define rdstick() rd(asr24)
+#define rdstickcmpr() rd(asr25)
+#define wrstick(val, xor) wr(asr24, (val), (xor))
+#define wrstickcmpr(val, xor) wr(asr25, (val), (xor))
+
+/*
+ * Macro intended to be used instead of wr(asr23, val, xor) for writing to
+ * the TICK_COMPARE register in order to avoid a bug in BlackBird CPUs that
+ * can cause these writes to fail under certain condidtions which in turn
+ * causes the hardclock to stop. The workaround is to read the TICK_COMPARE
+ * register back immediately after writing to it with these two instructions
+ * aligned to a quadword boundary in order to ensure that I$ misses won't
+ * split them up.
+ */
+#define wrtickcmpr(val, xor) ({ \
+ __asm __volatile( \
+ " ba,pt %%xcc, 1f ; " \
+ " nop ; " \
+ " .align 128 ; " \
+ "1: wr %0, %1, %%asr23 ; " \
+ " rd %%asr23, %%g0 ; " \
+ : : "r" (val), "rI" (xor)); \
+})
+
+static __inline void
+breakpoint(void)
+{
+
+ __asm __volatile("ta %%xcc, 1" : :);
+}
+
+static __inline register_t
+intr_disable(void)
+{
+ register_t s;
+
+ s = rdpr(pstate);
+ wrpr(pstate, s & ~PSTATE_IE, 0);
+ return (s);
+}
+#define intr_restore(s) wrpr(pstate, (s), 0)
+
+/*
+ * In some places, it is required that the store is directly followed by a
+ * membar #Sync. Don't trust the compiler to not insert instructions in
+ * between. We also need to disable interrupts completely.
+ */
+#define stxa_sync(va, asi, val) do { \
+ register_t s; \
+ s = intr_disable(); \
+ __asm __volatile("stxa %0, [%1] %2; membar #Sync" \
+ : : "r" (val), "r" (va), "n" (asi)); \
+ intr_restore(s); \
+} while (0)
+
+void ascopy(u_long asi, vm_offset_t src, vm_offset_t dst, size_t len);
+void ascopyfrom(u_long sasi, vm_offset_t src, caddr_t dst, size_t len);
+void ascopyto(caddr_t src, u_long dasi, vm_offset_t dst, size_t len);
+void aszero(u_long asi, vm_offset_t dst, size_t len);
+
+/*
+ * Ultrasparc II doesn't implement popc in hardware.
+ */
+#if 0
+#define HAVE_INLINE_FFS
+/*
+ * See page 202 of the SPARC v9 Architecture Manual.
+ */
+static __inline int
+ffs(int mask)
+{
+ int result;
+ int neg;
+ int tmp;
+
+ __asm __volatile(
+ " neg %3, %1 ; "
+ " xnor %3, %1, %2 ; "
+ " popc %2, %0 ; "
+ " movrz %3, %%g0, %0 ; "
+ : "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask));
+ return (result);
+}
+#endif
+
+#undef LDNC_GEN
+#undef STNC_GEN
+
+#endif /* !_MACHINE_CPUFUNC_HH_ */
diff --git a/freebsd/sparc64/include/freebsd/machine/pstate.h b/freebsd/sparc64/include/freebsd/machine/pstate.h
new file mode 100644
index 00000000..19d98be5
--- /dev/null
+++ b/freebsd/sparc64/include/freebsd/machine/pstate.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PSTATE_HH_
+#define _MACHINE_PSTATE_HH_
+
+#define PSTATE_AG (1<<0)
+#define PSTATE_IE (1<<1)
+#define PSTATE_PRIV (1<<2)
+#define PSTATE_AM (1<<3)
+#define PSTATE_PEF (1<<4)
+#define PSTATE_RED (1<<5)
+
+#define PSTATE_MM_SHIFT (6)
+#define PSTATE_MM_SIZE (2)
+#define PSTATE_MM_MASK (((1<<PSTATE_MM_SIZE)-1)<<PSTATE_MM_SHIFT)
+#define PSTATE_MM_TSO (0<<PSTATE_MM_SHIFT)
+#define PSTATE_MM_PSO (1<<PSTATE_MM_SHIFT)
+#define PSTATE_MM_RMO (2<<PSTATE_MM_SHIFT)
+
+#define PSTATE_TLE (1<<8)
+#define PSTATE_CLE (1<<9)
+#define PSTATE_MG (1<<10)
+#define PSTATE_IG (1<<11)
+
+#define PSTATE_MM PSTATE_MM_TSO
+
+#define PSTATE_NORMAL (PSTATE_MM | PSTATE_PEF | PSTATE_PRIV)
+#define PSTATE_ALT (PSTATE_NORMAL | PSTATE_AG)
+#define PSTATE_INTR (PSTATE_NORMAL | PSTATE_IG)
+#define PSTATE_MMU (PSTATE_NORMAL | PSTATE_MG)
+
+#define PSTATE_KERNEL (PSTATE_NORMAL | PSTATE_IE)
+
+#define PSTATE_SECURE(pstate) \
+ (((pstate) & ~(PSTATE_AM|PSTATE_MM_MASK)) == (PSTATE_IE|PSTATE_PEF))
+
+#endif /* !_MACHINE_PSTATE_HH_ */