summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/cpu/sparc64')
-rw-r--r--cpukit/score/cpu/sparc64/.cvsignore2
-rw-r--r--cpukit/score/cpu/sparc64/ChangeLog71
-rw-r--r--cpukit/score/cpu/sparc64/Makefile.am22
-rw-r--r--cpukit/score/cpu/sparc64/README15
-rw-r--r--cpukit/score/cpu/sparc64/context.S322
-rw-r--r--cpukit/score/cpu/sparc64/cpu.c116
-rw-r--r--cpukit/score/cpu/sparc64/preinstall.am41
-rw-r--r--cpukit/score/cpu/sparc64/rtems/asm.h102
-rw-r--r--cpukit/score/cpu/sparc64/rtems/score/cpu.h1067
-rw-r--r--cpukit/score/cpu/sparc64/rtems/score/sparc64.h341
-rw-r--r--cpukit/score/cpu/sparc64/rtems/score/types.h44
11 files changed, 2143 insertions, 0 deletions
diff --git a/cpukit/score/cpu/sparc64/.cvsignore b/cpukit/score/cpu/sparc64/.cvsignore
new file mode 100644
index 0000000000..282522db03
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/.cvsignore
@@ -0,0 +1,2 @@
+Makefile
+Makefile.in
diff --git a/cpukit/score/cpu/sparc64/ChangeLog b/cpukit/score/cpu/sparc64/ChangeLog
new file mode 100644
index 0000000000..ce31be0d58
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/ChangeLog
@@ -0,0 +1,71 @@
+2011-02-11 Ralf Corsépius <ralf.corsepius@rtems.org>
+
+ * rtems/score/cpu.h, rtems/score/sparc64.h:
+ Use "__asm__" instead of "asm" for improved c99-compliance.
+
+2010-11-16 Gedare Bloom <giddyup44@yahoo.com>
+
+ * rtems/score/sparc64.h: Fix typo.
+
+2010-10-21 Joel Sherrill <joel.sherrill@oarcorp.com>
+
+ * rtems/score/cpu.h: Add RTEMS_COMPILER_NO_RETURN_ATTRIBUTE to
+ _CPU_Context_restore() because it does not return. Telling GCC this
+ avoids generation of dead code.
+
+2010-08-19 Gedare Bloom <giddyup44@yahoo.com>
+
+ PR 1681/cpukit
+ * rtems/score/cpu.h: With the percpu patch, ASM uses
+ INTERRUPT_STACK_HIGH instead of _CPU_Interrupt_stack_high. The
+ sparc64 was still using the old variable, which was declared in its
+ cpu.h file. This patch comments out the declaration and switch to
+ using INTERRUPT_STACK_HIGH.
+
+2010-07-29 Gedare Bloom <giddyup44@yahoo.com>
+
+ PR 1635/cpukit
+ * rtems/score/types.h: Refactoring of priority handling, to isolate the
+ bitmap implementation of priorities in the supercore so that priority
+ management is a little more modular. This change is in anticipation
+ of scheduler implementations that can select how they manage tracking
+ priority levels / finding the highest priority ready task. Note that
+ most of the changes here are simple renaming, to clarify the use of
+ the bitmap-based priority management.
+
+2010-07-16 Sebastian Huber <sebastian.huber@embedded-brains.de>
+
+ * rtems/score/cpu.h: Include <rtems/score/types.h> first.
+ * rtems/score/types.h: Use <rtems/score/basedefs.h> header file.
+
+2010-06-15 Joel Sherrill <joel.sherrill@oarcorp.com>
+
+ PR 1561/cpukit
+ * .cvsignore, ChangeLog, Makefile.am, Makefile.in, README, context.S,
+ cpu.c, preinstall.am, rtems/asm.h, rtems/score/cpu.h,
+ rtems/score/sparc64.h, rtems/score/types.h: New files.
+
+2010-05-10 Gedare Bloom <gedare@gwmail.gwu.edu>
+
+ * README: Explain separation of score between here and libcpu.
+
+2010-05-03 Gedare Bloom <gedare@gwmail.gwu.edu>
+
+ * rtems/score/cpu.h, context.S, cpu.c: Remove privileged
+ registers from context.
+
+2010-05-03 Gedare Bloom <gedare@gwmail.gwu.edu>
+
+ * cpu.c: Remove interrupt handling code. Moved to libcpu.
+
+2010-05-03 Gedare Bloom <gedare@gwmail.gwu.edu>
+
+ * context.S: renamed cpu_asm.S to context.S
+
+2010-05-02 Gedare Bloom <gedare@gwmail.gwu.edu>
+
+ * cpu_asm.S: Remove interrupt handling code. Moved to libcpu.
+
+2009-10-19 Eugen Leontie <eugen@gwu.edu>
+
+ Added sparc64 architecture
diff --git a/cpukit/score/cpu/sparc64/Makefile.am b/cpukit/score/cpu/sparc64/Makefile.am
new file mode 100644
index 0000000000..f04c81014b
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/Makefile.am
@@ -0,0 +1,22 @@
+##
+## $Id$
+##
+
+include $(top_srcdir)/automake/compile.am
+
+include_rtemsdir = $(includedir)/rtems
+include_rtems_HEADERS = rtems/asm.h
+
+#include_rtems_sparc64dir = $(includedir)/rtems/sparc64
+#include_rtems_sparc64_HEADERS =
+
+include_rtems_scoredir = $(includedir)/rtems/score
+include_rtems_score_HEADERS = rtems/score/sparc64.h rtems/score/cpu.h \
+ rtems/score/types.h
+
+noinst_LIBRARIES = libscorecpu.a
+libscorecpu_a_SOURCES = context.S cpu.c
+libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS)
+
+include $(srcdir)/preinstall.am
+include $(top_srcdir)/automake/local.am
diff --git a/cpukit/score/cpu/sparc64/README b/cpukit/score/cpu/sparc64/README
new file mode 100644
index 0000000000..9da0c5e0ef
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/README
@@ -0,0 +1,15 @@
+#
+# $Id$
+#
+
+A lot of explanation needed, will do when known.
+
+The score/cpu/sparc64 contains only code that can execute without accessing
+privileged registers or using privileged instructions. This was done because
+the privileged registers differ between the sun4u and sun4v models.
+
+The model specific and privileged code that would normally be found in
+score/cpu/sparc64 resides in libcpu/sparc64/@RTEMS_CPU_MODEL@/score or in
+libcpu/sparc64/shared/score directory. This is primarily the interrupt
+handling code.
+
diff --git a/cpukit/score/cpu/sparc64/context.S b/cpukit/score/cpu/sparc64/context.S
new file mode 100644
index 0000000000..bbf4d4a76e
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/context.S
@@ -0,0 +1,322 @@
+/* context.S
+ *
+ * This file contains the basic algorithms for all assembly code used
+ * in an specific CPU port of RTEMS. These algorithms must be implemented
+ * in assembly language.
+ *
+ * COPYRIGHT (c) 1989-2007.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * Ported to ERC32 implementation of the SPARC by On-Line Applications
+ * Research Corporation (OAR) under contract to the European Space
+ * Agency (ESA).
+ *
+ * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
+ * European Space Agency.
+ *
+ * Ported to UltraSPARC T1 Niagara implementation of the SPARC-v9.
+ * Niagara modifications of respective RTEMS file:
+ * COPYRIGHT (c) 2010. Gedare Bloom.
+ *
+ * $Id$
+ */
+
+#include <rtems/asm.h>
+
+
+/*
+ * The assembler needs to be told that we know what to do with
+ * the global registers.
+ */
+.register %g2, #scratch
+.register %g3, #scratch
+.register %g6, #scratch
+.register %g7, #scratch
+
+#if (SPARC_HAS_FPU == 1)
+
+/*
+ * void _CPU_Context_save_fp(
+ * void **fp_context_ptr
+ * )
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ */
+
+ .align 4
+PUBLIC(_CPU_Context_save_fp)
+ SYM(_CPU_Context_save_fp):
+ save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
+
+ /*
+ * The following enables the floating point unit.
+ */
+
+ sparc64_enable_FPU(%l0)
+
+ /*
+ * Although sun4v supports alternate register names for double-
+ * and quad-word floating point, SPARC v9 only uses f[#]
+ *
+ * Because quad-word fp is not supported by the hardware in
+ * many situations, we stick with double-word fp operations
+ */
+ ldx [%i0], %l0
+ std %f0, [%l0]
+ std %f2, [%l0 + F2_OFFSET]
+ std %f4, [%l0 + F4_OFFSET]
+ std %f6, [%l0 + F6_OFFSET]
+ std %f8, [%l0 + F8_OFFSET]
+ std %f10, [%l0 + F1O_OFFSET]
+ std %f12, [%l0 + F12_OFFSET]
+ std %f14, [%l0 + F14_OFFSET]
+ std %f16, [%l0 + F16_OFFSET]
+ std %f18, [%l0 + F18_OFFSET]
+ std %f20, [%l0 + F2O_OFFSET]
+ std %f22, [%l0 + F22_OFFSET]
+ std %f24, [%l0 + F24_OFFSET]
+ std %f26, [%l0 + F26_OFFSET]
+ std %f28, [%l0 + F28_OFFSET]
+ std %f30, [%l0 + F3O_OFFSET]
+ std %f32, [%l0 + F32_OFFSET]
+ std %f34, [%l0 + F34_OFFSET]
+ std %f36, [%l0 + F36_OFFSET]
+ std %f38, [%l0 + F38_OFFSET]
+ std %f40, [%l0 + F4O_OFFSET]
+ std %f42, [%l0 + F42_OFFSET]
+ std %f44, [%l0 + F44_OFFSET]
+ std %f46, [%l0 + F46_OFFSET]
+ std %f48, [%l0 + F48_OFFSET]
+ std %f50, [%l0 + F5O_OFFSET]
+ std %f52, [%l0 + F52_OFFSET]
+ std %f54, [%l0 + F54_OFFSET]
+ std %f56, [%l0 + F56_OFFSET]
+ std %f58, [%l0 + F58_OFFSET]
+ std %f60, [%l0 + F6O_OFFSET]
+ std %f62, [%l0 + F62_OFFSET]
+ stx %fsr, [%l0 + FSR_OFFSET]
+ ret
+ restore
+
+ /*
+ * void _CPU_Context_restore_fp(
+ * void **fp_context_ptr
+ * )
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ */
+
+ .align 4
+PUBLIC(_CPU_Context_restore_fp)
+ SYM(_CPU_Context_restore_fp):
+ save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp
+
+ /*
+ * The following enables the floating point unit.
+ */
+
+ sparc64_enable_FPU(%l0)
+
+ ldx [%i0], %l0
+ ldd [%l0 + FO_OFFSET], %f0
+ ldd [%l0 + F2_OFFSET], %f2
+ ldd [%l0 + F4_OFFSET], %f4
+ ldd [%l0 + F6_OFFSET], %f6
+ ldd [%l0 + F8_OFFSET], %f8
+ ldd [%l0 + F1O_OFFSET], %f10
+ ldd [%l0 + F12_OFFSET], %f12
+ ldd [%l0 + F14_OFFSET], %f14
+ ldd [%l0 + F16_OFFSET], %f16
+ ldd [%l0 + F18_OFFSET], %f18
+ ldd [%l0 + F2O_OFFSET], %f20
+ ldd [%l0 + F22_OFFSET], %f22
+ ldd [%l0 + F24_OFFSET], %f24
+ ldd [%l0 + F26_OFFSET], %f26
+ ldd [%l0 + F28_OFFSET], %f28
+ ldd [%l0 + F3O_OFFSET], %f30
+ ldd [%l0 + F32_OFFSET], %f32
+ ldd [%l0 + F34_OFFSET], %f34
+ ldd [%l0 + F36_OFFSET], %f36
+ ldd [%l0 + F38_OFFSET], %f38
+ ldd [%l0 + F4O_OFFSET], %f40
+ ldd [%l0 + F42_OFFSET], %f42
+ ldd [%l0 + F44_OFFSET], %f44
+ ldd [%l0 + F46_OFFSET], %f46
+ ldd [%l0 + F48_OFFSET], %f48
+ ldd [%l0 + F5O_OFFSET], %f50
+ ldd [%l0 + F52_OFFSET], %f52
+ ldd [%l0 + F54_OFFSET], %f54
+ ldd [%l0 + F56_OFFSET], %f56
+ ldd [%l0 + F58_OFFSET], %f58
+ ldd [%l0 + F6O_OFFSET], %f60
+ ldd [%l0 + F62_OFFSET], %f62
+ ldx [%l0 + FSR_OFFSET], %fsr
+ ret
+ restore
+
+#endif /* SPARC_HAS_FPU */
+
+ /*
+ * void _CPU_Context_switch(
+ * Context_Control *run,
+ * Context_Control *heir
+ * )
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+
+ .align 4
+PUBLIC(_CPU_Context_switch)
+ SYM(_CPU_Context_switch):
+ ! skip g0
+ stx %g1, [%o0 + G1_OFFSET] ! save the global registers
+ stx %g2, [%o0 + G2_OFFSET]
+ stx %g3, [%o0 + G3_OFFSET]
+ stx %g4, [%o0 + G4_OFFSET]
+ stx %g5, [%o0 + G5_OFFSET]
+ stx %g6, [%o0 + G6_OFFSET]
+ stx %g7, [%o0 + G7_OFFSET]
+
+ ! load the address of the ISR stack nesting prevention flag
+ setx SYM(_CPU_ISR_Dispatch_disable), %g1, %g2
+ lduw [%g2], %g2
+
+ ! save it a bit later so we do not waste a couple of cycles
+
+ stx %l0, [%o0 + L0_OFFSET] ! save the local registers
+ stx %l1, [%o0 + L1_OFFSET]
+ stx %l2, [%o0 + L2_OFFSET]
+ stx %l3, [%o0 + L3_OFFSET]
+ stx %l4, [%o0 + L4_OFFSET]
+ stx %l5, [%o0 + L5_OFFSET]
+ stx %l6, [%o0 + L6_OFFSET]
+ stx %l7, [%o0 + L7_OFFSET]
+
+ ! Now actually save ISR stack nesting prevention flag
+ stuw %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
+
+ stx %i0, [%o0 + I0_OFFSET] ! save the input registers
+ stx %i1, [%o0 + I1_OFFSET]
+ stx %i2, [%o0 + I2_OFFSET]
+ stx %i3, [%o0 + I3_OFFSET]
+ stx %i4, [%o0 + I4_OFFSET]
+ stx %i5, [%o0 + I5_OFFSET]
+ stx %i6, [%o0 + I6_FP_OFFSET]
+ stx %i7, [%o0 + I7_OFFSET]
+
+ stx %o0, [%o0 + O0_OFFSET] ! save the output registers
+ stx %o1, [%o0 + O1_OFFSET]
+ stx %o2, [%o0 + O2_OFFSET]
+ stx %o3, [%o0 + O3_OFFSET]
+ stx %o4, [%o0 + O4_OFFSET]
+ stx %o5, [%o0 + O5_OFFSET]
+ stx %o6, [%o0 + O6_SP_OFFSET]
+ stx %o7, [%o0 + O7_OFFSET] ! o7 is the PC
+
+! rdpr %pil, %o2
+! stuw %o2, [%o0 + PIL_OFFSET] ! save pil
+
+! rdpr %pstate, %o2
+! stx %o2, [%o0 + PSTATE_OFFSET] ! save status register
+
+ /*
+ * This is entered from _CPU_Context_restore with:
+ * o1 = context to restore
+! * o2 = pstate
+ *
+ * NOTE: Flushing the register windows is necessary, but it adds
+ * an unpredictable (but bounded) overhead to context switching.
+ */
+
+PUBLIC(_CPU_Context_restore_heir)
+ SYM(_CPU_Context_restore_heir):
+
+ flushw
+
+
+
+ ! skip g0
+ ldx [%o1 + G1_OFFSET], %g1 ! restore the global registers
+ ldx [%o1 + G2_OFFSET], %g2
+ ldx [%o1 + G3_OFFSET], %g3
+ ldx [%o1 + G4_OFFSET], %g4
+ ldx [%o1 + G5_OFFSET], %g5
+ ldx [%o1 + G6_OFFSET], %g6
+ ldx [%o1 + G7_OFFSET], %g7
+
+ ! Load thread specific ISR dispatch prevention flag
+ ldx [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
+ setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o3
+ ! Store it to memory later to use the cycles
+
+ ldx [%o1 + L0_OFFSET], %l0 ! restore the local registers
+ ldx [%o1 + L1_OFFSET], %l1
+ ldx [%o1 + L2_OFFSET], %l2
+ ldx [%o1 + L3_OFFSET], %l3
+ ldx [%o1 + L4_OFFSET], %l4
+ ldx [%o1 + L5_OFFSET], %l5
+ ldx [%o1 + L6_OFFSET], %l6
+ ldx [%o1 + L7_OFFSET], %l7
+
+ ! Now restore thread specific ISR dispatch prevention flag
+ stuw %o2, [%o3]
+
+ ldx [%o1 + I0_OFFSET], %i0 ! restore the input registers
+ ldx [%o1 + I1_OFFSET], %i1
+ ldx [%o1 + I2_OFFSET], %i2
+ ldx [%o1 + I3_OFFSET], %i3
+ ldx [%o1 + I4_OFFSET], %i4
+ ldx [%o1 + I5_OFFSET], %i5
+ ldx [%o1 + I6_FP_OFFSET], %i6
+ ldx [%o1 + I7_OFFSET], %i7
+
+ ldx [%o1 + O0_OFFSET], %o0
+ ldx [%o1 + O2_OFFSET], %o2 ! restore the output registers
+ ldx [%o1 + O3_OFFSET], %o3
+ ldx [%o1 + O4_OFFSET], %o4
+ ldx [%o1 + O5_OFFSET], %o5
+ ldx [%o1 + O6_SP_OFFSET], %o6
+ ldx [%o1 + O7_OFFSET], %o7 ! PC
+
+ ! on a hunch... we should be able to use some of the %o regs
+! lduw [%o1 + PIL_OFFSET], %o2
+! wrpr %g0, %o2, %pil
+
+! ldx [%o1 + PSTATE_OFFSET], %o2
+
+ ! do o1 last to avoid destroying heir context pointer
+ ldx [%o1 + O1_OFFSET], %o1 ! overwrite heir pointer
+! wrpr %g0, %o2, %pstate
+
+ retl
+ nop
+
+ /*
+ * void _CPU_Context_restore(
+ * Context_Control *new_context
+ * )
+ *
+ * This routine is generally used only to perform restart self.
+ *
+ * NOTE: It is unnecessary to reload some registers.
+ */
+ /* if _CPU_Context_restore_heir does not flushw, then do it here */
+ .align 4
+PUBLIC(_CPU_Context_restore)
+ SYM(_CPU_Context_restore):
+ save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
+! rdpr %pstate, %o2
+ ba SYM(_CPU_Context_restore_heir)
+ mov %i0, %o1 ! in the delay slot
+
+/* end of file */
diff --git a/cpukit/score/cpu/sparc64/cpu.c b/cpukit/score/cpu/sparc64/cpu.c
new file mode 100644
index 0000000000..3a415229cc
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/cpu.c
@@ -0,0 +1,116 @@
+/*
+ * SPARC-v9 Dependent Source
+ *
+ * COPYRIGHT (c) 1989-2007.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This file is based on the SPARC cpu.c file. Modifications are made to
+ * provide support for the SPARC-v9.
+ * COPYRIGHT (c) 2010. Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/asm.h>
+#include <rtems/score/isr.h>
+#include <rtems/rtems/cache.h>
+
+/*PAGE
+ *
+ * _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS: NONE
+ *
+ * Output Parameters: NONE
+ *
+ * NOTE: There is no need to save the pointer to the thread dispatch routine.
+ * The SPARC's assembly code can reference it directly with no problems.
+ */
+
+void _CPU_Initialize(void)
+{
+#if (SPARC_HAS_FPU == 1)
+ Context_Control_fp *pointer;
+
+ /*
+ * This seems to be the most appropriate way to obtain an initial
+ * FP context on the SPARC. The NULL fp context is copied in to
+ * the task's FP context during Context_Initialize_fp.
+ */
+
+ pointer = &_CPU_Null_fp_context;
+ _CPU_Context_save_fp( &pointer );
+
+#endif
+
+ /*
+ * Since no tasks have been created yet and no interrupts have occurred,
+ * there is no way that the currently executing thread can have an
+ * _ISR_Dispatch stack frame on its stack.
+ */
+ _CPU_ISR_Dispatch_disable = 0;
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ *
+ * This kernel routine initializes the basic non-FP context area associated
+ * with each thread.
+ *
+ * Input parameters:
+ * the_context - pointer to the context area
+ * stack_base - address of memory for the SPARC
+ * size - size in bytes of the stack area
+ * new_level - interrupt level for this context area
+ * entry_point - the starting execution point for this this context
+ * is_fp - TRUE if this context is associated with an FP thread
+ *
+ * Output parameters: NONE
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ void *stack_base,
+ uint32_t size,
+ uint32_t new_level,
+ void *entry_point,
+ bool is_fp
+)
+{
+ uint64_t stack_high; /* highest "stack aligned" address */
+ uint32_t the_size;
+
+ /*
+ * On CPUs with stacks which grow down (i.e. SPARC), we build the stack
+ * based on the stack_high address.
+ */
+
+ stack_high = ((uint64_t)(stack_base) + size);
+ stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
+
+ the_size = size & ~(CPU_STACK_ALIGNMENT - 1);
+
+ /*
+ * See the README in this directory for a diagram of the stack.
+ */
+
+ the_context->o7 = ((uint64_t) entry_point) - 8;
+ the_context->o6_sp = stack_high - CPU_MINIMUM_STACK_FRAME_SIZE - STACK_BIAS;
+ the_context->i6_fp = 0;
+
+ /* PSTATE used to be built here, but is no longer included in context */
+
+ /*
+ * Since THIS thread is being created, there is no way that THIS
+ * thread can have an _ISR_Dispatch stack frame on its stack.
+ */
+ the_context->isr_dispatch_disable = 0;
+}
diff --git a/cpukit/score/cpu/sparc64/preinstall.am b/cpukit/score/cpu/sparc64/preinstall.am
new file mode 100644
index 0000000000..9c6d5e4296
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/preinstall.am
@@ -0,0 +1,41 @@
+## Automatically generated by ampolish3 - Do not edit
+
+if AMPOLISH3
+$(srcdir)/preinstall.am: Makefile.am
+ $(AMPOLISH3) $(srcdir)/Makefile.am > $(srcdir)/preinstall.am
+endif
+
+PREINSTALL_DIRS =
+DISTCLEANFILES = $(PREINSTALL_DIRS)
+
+all-am: $(PREINSTALL_FILES)
+
+PREINSTALL_FILES =
+CLEANFILES = $(PREINSTALL_FILES)
+
+$(PROJECT_INCLUDE)/rtems/$(dirstamp):
+ @$(MKDIR_P) $(PROJECT_INCLUDE)/rtems
+ @: > $(PROJECT_INCLUDE)/rtems/$(dirstamp)
+PREINSTALL_DIRS += $(PROJECT_INCLUDE)/rtems/$(dirstamp)
+
+$(PROJECT_INCLUDE)/rtems/asm.h: rtems/asm.h $(PROJECT_INCLUDE)/rtems/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/asm.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/asm.h
+
+$(PROJECT_INCLUDE)/rtems/score/$(dirstamp):
+ @$(MKDIR_P) $(PROJECT_INCLUDE)/rtems/score
+ @: > $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+PREINSTALL_DIRS += $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+
+$(PROJECT_INCLUDE)/rtems/score/sparc64.h: rtems/score/sparc64.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/sparc64.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/sparc64.h
+
+$(PROJECT_INCLUDE)/rtems/score/cpu.h: rtems/score/cpu.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpu.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpu.h
+
+$(PROJECT_INCLUDE)/rtems/score/types.h: rtems/score/types.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/types.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/types.h
+
diff --git a/cpukit/score/cpu/sparc64/rtems/asm.h b/cpukit/score/cpu/sparc64/rtems/asm.h
new file mode 100644
index 0000000000..29847ea1d5
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/rtems/asm.h
@@ -0,0 +1,102 @@
+/**
+ * @file rtems/asm.h
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ */
+
+/*
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ *
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted.
+ *
+ * $Id$
+ */
+
+#ifndef _RTEMS_ASM_H
+#define _RTEMS_ASM_H
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#ifndef ASM
+#define ASM
+#endif
+#ifndef __ASM__
+#define __ASM__
+#endif
+
+#include <rtems/score/cpuopts.h>
+#include <rtems/score/cpu.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+/* XXX __USER_LABEL_PREFIX__ and __REGISTER_PREFIX__ do not work on gcc 2.7.0 */
+/* XXX The following ifdef magic fixes the problem but results in a warning */
+/* XXX when compiling assembly code. */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+#include <rtems/concat.h>
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/*
+ * define macros for all of the registers on this CPU
+ *
+ * EXAMPLE: #define d0 REG (d0)
+ */
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+#endif
diff --git a/cpukit/score/cpu/sparc64/rtems/score/cpu.h b/cpukit/score/cpu/sparc64/rtems/score/cpu.h
new file mode 100644
index 0000000000..3b424f8b74
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/rtems/score/cpu.h
@@ -0,0 +1,1067 @@
+/**
+ * @file rtems/score/cpu.h
+ */
+
+/*
+ * This include file contains information pertaining to the port of
+ * the executive to the SPARC64 processor.
+ *
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This file is based on the SPARC cpu.h file. Modifications are made
+ * to support the SPARC64 processor.
+ * COPYRIGHT (c) 2010. Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#ifndef _RTEMS_SCORE_CPU_H
+#define _RTEMS_SCORE_CPU_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/types.h>
+#include <rtems/score/sparc64.h>
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH TRUE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * This parameter could go either way on the SPARC. The interrupt flash
+ * code is relatively lengthy given the requirements for nops following
+ * writes to the psr. But if the clock speed were high enough, this would
+ * not represent a great deal of time.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
+
+/*
+ * Does the executive manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _ISR_Handler_initialization.
+ * If FALSE, nothing is done.
+ *
+ * The SPARC does not have a dedicated HW interrupt stack and one has
+ * been implemented in SW.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the CPU follow the simple vectored interrupt model?
+ *
+ * If TRUE, then RTEMS allocates the vector table it internally manages.
+ * If FALSE, then the BSP is assumed to allocate and manage the vector
+ * table
+ *
+ * SPARC Specific Information:
+ *
+ * XXX document implementation including references if appropriate
+ */
+#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * The SPARC does not have a dedicated HW interrupt stack.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+
+/*
+ * Do we allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the RTEMS invoke the user's ISR with the vector number and
+ * a pointer to the saved interrupt frame (1) or just the vector
+ * number (0)?
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 0
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the FLOATING_POINT task attribute is supported.
+ * If FALSE, then the FLOATING_POINT task attribute is ignored.
+ */
+
+#if ( SPARC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+#define CPU_SOFTWARE_FP FALSE
+
+/*
+ * Are all tasks FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the FLOATING_POINT task attribute is followed.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Thread_Idle_body
+ * must be provided and is the default IDLE thread body instead of
+ * _CPU_Thread_Idle_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ *
+ * The stack grows to lower addresses on the SPARC.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical data structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The SPARC does not appear to have particularly strict alignment
+ * requirements. This value (16) was chosen to take advantages of caches.
+ *
+ * SPARC 64 requirements on floating point alignment is at least 8,
+ * and is 16 if quad-word fp instructions are available (e.g. LDQF).
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
+
+/*
+ * Define what is required to specify how the network to host conversion
+ * routines are handled.
+ */
+
+#define CPU_BIG_ENDIAN TRUE
+#define CPU_LITTLE_ENDIAN FALSE
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ *
+ * The SPARC v9 has 16 interrupt levels in the PIL field of the PSR.
+ */
+
+#define CPU_MODES_INTERRUPT_MASK 0x0000000F
+
+/*
+ * This structure represents the organization of the minimum stack frame
+ * for the SPARC. More framing information is required in certain situaions
+ * such as when there are a large number of out parameters or when the callee
+ * must save floating point registers.
+ */
+
+#ifndef ASM
+
+typedef struct {
+ uint64_t l0;
+ uint64_t l1;
+ uint64_t l2;
+ uint64_t l3;
+ uint64_t l4;
+ uint64_t l5;
+ uint64_t l6;
+ uint64_t l7;
+ uint64_t i0;
+ uint64_t i1;
+ uint64_t i2;
+ uint64_t i3;
+ uint64_t i4;
+ uint64_t i5;
+ uint64_t i6_fp;
+ uint64_t i7;
+ void *structure_return_address;
+ /*
+ * The following are for the callee to save the register arguments in
+ * should this be necessary.
+ */
+ uint64_t saved_arg0;
+ uint64_t saved_arg1;
+ uint64_t saved_arg2;
+ uint64_t saved_arg3;
+ uint64_t saved_arg4;
+ uint64_t saved_arg5;
+ uint64_t pad0;
+} CPU_Minimum_stack_frame;
+
+#endif /* !ASM */
+
+#define CPU_STACK_FRAME_L0_OFFSET 0x00
+#define CPU_STACK_FRAME_L1_OFFSET 0x08
+#define CPU_STACK_FRAME_L2_OFFSET 0x10
+#define CPU_STACK_FRAME_L3_OFFSET 0x18
+#define CPU_STACK_FRAME_L4_OFFSET 0x20
+#define CPU_STACK_FRAME_L5_OFFSET 0x28
+#define CPU_STACK_FRAME_L6_OFFSET 0x30
+#define CPU_STACK_FRAME_L7_OFFSET 0x38
+#define CPU_STACK_FRAME_I0_OFFSET 0x40
+#define CPU_STACK_FRAME_I1_OFFSET 0x48
+#define CPU_STACK_FRAME_I2_OFFSET 0x50
+#define CPU_STACK_FRAME_I3_OFFSET 0x58
+#define CPU_STACK_FRAME_I4_OFFSET 0x60
+#define CPU_STACK_FRAME_I5_OFFSET 0x68
+#define CPU_STACK_FRAME_I6_FP_OFFSET 0x70
+#define CPU_STACK_FRAME_I7_OFFSET 0x78
+#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET 0x80
+#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET 0x88
+#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET 0x90
+#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET 0x98
+#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET 0xA0
+#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET 0xA8
+#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET 0xB0
+#define CPU_STACK_FRAME_PAD0_OFFSET 0xB8
+
+#define CPU_MINIMUM_STACK_FRAME_SIZE 0xC0
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On the SPARC, we are relatively conservative in that we save most
+ * of the CPU state in the context area. The ET (enable trap) bit and
+ * the CWP (current window pointer) fields of the PSR are considered
+ * system wide resources and are not maintained on a per-thread basis.
+ */
+
+#ifndef ASM
+
+typedef struct {
+ uint64_t g1;
+ uint64_t g2;
+ uint64_t g3;
+ uint64_t g4;
+ uint64_t g5;
+ uint64_t g6;
+ uint64_t g7;
+
+ uint64_t l0;
+ uint64_t l1;
+ uint64_t l2;
+ uint64_t l3;
+ uint64_t l4;
+ uint64_t l5;
+ uint64_t l6;
+ uint64_t l7;
+
+ uint64_t i0;
+ uint64_t i1;
+ uint64_t i2;
+ uint64_t i3;
+ uint64_t i4;
+ uint64_t i5;
+ uint64_t i6_fp;
+ uint64_t i7;
+
+ uint64_t o0;
+ uint64_t o1;
+ uint64_t o2;
+ uint64_t o3;
+ uint64_t o4;
+ uint64_t o5;
+ uint64_t o6_sp;
+ uint64_t o7;
+
+ uint32_t isr_dispatch_disable;
+ uint32_t pad;
+} Context_Control;
+
+#define _CPU_Context_Get_SP( _context ) \
+ (_context)->o6_sp
+
+#endif /* ASM */
+
+/*
+ * Offsets of fields with Context_Control for assembly routines.
+ */
+
+#define G1_OFFSET 0x00
+#define G2_OFFSET 0x08
+#define G3_OFFSET 0x10
+#define G4_OFFSET 0x18
+#define G5_OFFSET 0x20
+#define G6_OFFSET 0x28
+#define G7_OFFSET 0x30
+
+#define L0_OFFSET 0x38
+#define L1_OFFSET 0x40
+#define L2_OFFSET 0x48
+#define L3_OFFSET 0x50
+#define L4_OFFSET 0x58
+#define L5_OFFSET 0x60
+#define L6_OFFSET 0x68
+#define L7_OFFSET 0x70
+
+#define I0_OFFSET 0x78
+#define I1_OFFSET 0x80
+#define I2_OFFSET 0x88
+#define I3_OFFSET 0x90
+#define I4_OFFSET 0x98
+#define I5_OFFSET 0xA0
+#define I6_FP_OFFSET 0xA8
+#define I7_OFFSET 0xB0
+
+#define O0_OFFSET 0xB8
+#define O1_OFFSET 0xC0
+#define O2_OFFSET 0xC8
+#define O3_OFFSET 0xD0
+#define O4_OFFSET 0xD8
+#define O5_OFFSET 0xE0
+#define O6_SP_OFFSET 0xE8
+#define O7_OFFSET 0xF0
+
+#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0xF8
+#define ISR_PAD_OFFSET 0xFC
+
+#define CONTEXT_CONTROL_SIZE 0x100
+
+/*
+ * The floating point context area.
+ */
+
+#ifndef ASM
+
+typedef struct {
+ double f0; /* f0-f1 */
+ double f2; /* f2-f3 */
+ double f4; /* f4-f5 */
+ double f6; /* f6-f7 */
+ double f8; /* f8-f9 */
+ double f10; /* f10-f11 */
+ double f12; /* f12-f13 */
+ double f14; /* f14-f15 */
+ double f16; /* f16-f17 */
+ double f18; /* f18-f19 */
+ double f20; /* f20-f21 */
+ double f22; /* f22-f23 */
+ double f24; /* f24-f25 */
+ double f26; /* f26-f27 */
+ double f28; /* f28-f29 */
+ double f30; /* f30-f31 */
+ double f32;
+ double f34;
+ double f36;
+ double f38;
+ double f40;
+ double f42;
+ double f44;
+ double f46;
+ double f48;
+ double f50;
+ double f52;
+ double f54;
+ double f56;
+ double f58;
+ double f60;
+ double f62;
+ uint64_t fsr;
+} Context_Control_fp;
+
+#endif /* !ASM */
+
+/*
+ * Offsets of fields with Context_Control_fp for assembly routines.
+ */
+
+#define FO_OFFSET 0x00
+#define F2_OFFSET 0x08
+#define F4_OFFSET 0x10
+#define F6_OFFSET 0x18
+#define F8_OFFSET 0x20
+#define F1O_OFFSET 0x28
+#define F12_OFFSET 0x30
+#define F14_OFFSET 0x38
+#define F16_OFFSET 0x40
+#define F18_OFFSET 0x48
+#define F2O_OFFSET 0x50
+#define F22_OFFSET 0x58
+#define F24_OFFSET 0x60
+#define F26_OFFSET 0x68
+#define F28_OFFSET 0x70
+#define F3O_OFFSET 0x78
+#define F32_OFFSET 0x80
+#define F34_OFFSET 0x88
+#define F36_OFFSET 0x90
+#define F38_OFFSET 0x98
+#define F4O_OFFSET 0xA0
+#define F42_OFFSET 0xA8
+#define F44_OFFSET 0xB0
+#define F46_OFFSET 0xB8
+#define F48_OFFSET 0xC0
+#define F5O_OFFSET 0xC8
+#define F52_OFFSET 0xD0
+#define F54_OFFSET 0xD8
+#define F56_OFFSET 0xE0
+#define F58_OFFSET 0xE8
+#define F6O_OFFSET 0xF0
+#define F62_OFFSET 0xF8
+#define FSR_OFFSET 0x100
+
+#define CONTEXT_CONTROL_FP_SIZE 0x108
+
+#ifndef ASM
+
+/*
+ * Context saved on stack for an interrupt.
+ *
+ * NOTE: The tstate, tpc, and tnpc are saved in this structure
+ * to allow resetting the TL while still being able to return
+ * from a trap later. The PIL is saved because
+ * if this is an external interrupt, we will mask lower
+ * priority interrupts until finishing. Even though the y register
+ * is deprecated, gcc still uses it.
+ */
+
+typedef struct {
+ CPU_Minimum_stack_frame Stack_frame;
+ uint64_t tstate;
+ uint64_t tpc;
+ uint64_t tnpc;
+ uint64_t pil;
+ uint64_t y;
+ uint64_t g1;
+ uint64_t g2;
+ uint64_t g3;
+ uint64_t g4;
+ uint64_t g5;
+ uint64_t g6;
+ uint64_t g7;
+ uint64_t o0;
+ uint64_t o1;
+ uint64_t o2;
+ uint64_t o3;
+ uint64_t o4;
+ uint64_t o5;
+ uint64_t o6_sp;
+ uint64_t o7;
+} CPU_Interrupt_frame;
+
+#endif /* ASM */
+
+/*
+ * Offsets of fields with CPU_Interrupt_frame for assembly routines.
+ */
+
+#define ISF_STACK_FRAME_OFFSET 0x00
+#define ISF_TSTATE_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
+#define ISF_TPC_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
+#define ISF_TNPC_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
+#define ISF_PIL_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
+#define ISF_Y_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
+#define ISF_G1_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
+#define ISF_G2_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
+#define ISF_G3_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
+#define ISF_G4_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
+#define ISF_G5_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
+#define ISF_G6_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
+#define ISF_G7_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x58
+#define ISF_O0_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x60
+#define ISF_O1_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x68
+#define ISF_O2_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x70
+#define ISF_O3_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x78
+#define ISF_O4_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x80
+#define ISF_O5_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x88
+#define ISF_O6_SP_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x90
+#define ISF_O7_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x98
+#define ISF_TVEC_NUM CPU_MINIMUM_STACK_FRAME_SIZE + 0xA0
+
+#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0xA8
+#ifndef ASM
+/*
+ * This variable is contains the initialize context for the FP unit.
+ * It is filled in by _CPU_Initialize and copied into the task's FP
+ * context area during _CPU_Context_Initialize.
+ */
+
+SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
+
+/*
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use. Thus
+ * both must be present if either is.
+ *
+ * The SPARC supports a software based interrupt stack and these
+ * are required.
+ */
+/*
+SCORE_EXTERN void *_CPU_Interrupt_stack_low;
+SCORE_EXTERN void *_CPU_Interrupt_stack_high;
+*/
+/*
+ * This flag is context switched with each thread. It indicates
+ * that THIS thread has an _ISR_Dispatch stack frame on its stack.
+ * By using this flag, we can avoid nesting more interrupt dispatching
+ * attempts on a previously interrupted thread's stack.
+ */
+
+SCORE_EXTERN volatile uint32_t _CPU_ISR_Dispatch_disable;
+
+/*
+ * The following type defines an entry in the SPARC's trap table.
+ *
+ * NOTE: The instructions chosen are RTEMS dependent although one is
+ * obligated to use two of the four instructions to perform a
+ * long jump. The other instructions load one register with the
+ * trap type (a.k.a. vector) and another with the psr.
+ */
+/* For SPARC V9, we must use 6 of these instructions to perform a long
+ * jump, because the _handler value is now 64-bits. We also need to store
+ * temporary values in the global register set at this trap level. Because
+ * the handler runs at TL > 0 with GL > 0, it should be OK to use g2 and g3
+ * to pass parameters to ISR_Handler.
+ *
+ * The instruction sequence is now more like:
+ * rdpr %tstate, %g4
+ * setx _handler, %g2, %g3
+ * jmp %g3+0
+ * mov _vector, %g2
+ */
+typedef struct {
+ uint32_t rdpr_tstate_g4; /* rdpr %tstate, %g4 */
+ uint32_t sethi_of_hh_handler_to_g2; /* sethi %hh(_handler), %g2 */
+ uint32_t or_g2_hm_handler_to_g2; /* or %l3, %hm(_handler), %g2 */
+ uint32_t sllx_g2_by_32_to_g2; /* sllx %g2, 32, %g2 */
+ uint32_t sethi_of_handler_to_g3; /* sethi %hi(_handler), %g3 */
+ uint32_t or_g3_g2_to_g3; /* or %g3, %g2, %g3 */
+ uint32_t jmp_to_low_of_handler_plus_g3; /* jmp %g3 + %lo(_handler) */
+ uint32_t mov_vector_g2; /* mov _vector, %g2 */
+} CPU_Trap_table_entry;
+
+/*
+ * This is the set of opcodes for the instructions loaded into a trap
+ * table entry. The routine which installs a handler is responsible
+ * for filling in the fields for the _handler address and the _vector
+ * trap type.
+ *
+ * The constants following this structure are masks for the fields which
+ * must be filled in when the handler is installed.
+ */
+
+extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
+
+/*
+ * The size of the floating point context area.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+#endif
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * MPCI receive server thread. Remember that in a multiprocessor
+ * system this thread must exist and be able to process all directives.
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by the executive.
+ *
+ * On the SPARC, there are really only 256 vectors. However, the executive
+ * has no easy, fast, reliable way to determine which traps are synchronous
+ * and which are asynchronous. By default, synchronous traps return to the
+ * instruction which caused the interrupt. So if you install a software
+ * trap handler as an executive interrupt handler (which is desirable since
+ * RTEMS takes care of window and register issues), then the executive needs
+ * to know that the return address is to the trap rather than the instruction
+ * following the trap.
+ *
+ * So vectors 0 through 255 are treated as regular asynchronous traps which
+ * provide the "correct" return address. Vectors 256 through 512 are assumed
+ * by the executive to be synchronous and to require that the return address
+ * be fudged.
+ *
+ * If you use this mechanism to install a trap handler which must reexecute
+ * the instruction which caused the trap, then it should be installed as
+ * an asynchronous trap. This will avoid the executive changing the return
+ * address.
+ */
+/* On SPARC v9, there are 512 vectors. The same philosophy applies to
+ * vector installation and use, we just provide a larger table.
+ */
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 512
+#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 1023
+
+#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK 0x200
+#define SPARC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
+#define SPARC_SYNCHRONOUS_TRAP( _trap ) ((_trap) + 512 )
+
+#define SPARC_REAL_TRAP_NUMBER( _trap ) ((_trap) % 512)
+
+/*
+ * This is defined if the port has a special way to report the ISR nesting
+ * level. Most ports maintain the variable _ISR_Nest_level.
+ */
+
+#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
+
+/*
+ * Should be large enough to run all tests. This ensures
+ * that a "reasonable" small application should not have any problems.
+ *
+ * This appears to be a fairly generous number for the SPARC since
+ * represents a call depth of about 20 routines based on the minimum
+ * stack frame.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*8)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ *
+ * On the SPARC, this is required for double word loads and stores.
+ *
+ * Note: quad-word loads/stores need alignment of 16, but currently supported
+ * architectures do not provide HW implemented quad-word operations.
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ *
+ * The alignment restrictions for the SPARC are not that strict but this
+ * should unsure that the stack is always sufficiently alignment that the
+ * window overflow, underflow, and flush routines can use double word loads
+ * and stores.
+ */
+
+#define CPU_STACK_ALIGNMENT 16
+
+#ifndef ASM
+
+/*
+ * ISR handler macros
+ */
+
+/*
+ * Support routine to initialize the RTEMS vector table after it is allocated.
+ */
+
+#define _CPU_Initialize_vectors()
+
+/*
+ * Disable all interrupts for a critical section. The previous
+ * level is returned in _level.
+ */
+
+ #define _CPU_ISR_Disable( _level ) \
+ (_level) = sparc_disable_interrupts()
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of a critical section. The parameter
+ * _level is not modified.
+ */
+
+#define _CPU_ISR_Enable( _level ) \
+ sparc_enable_interrupts( _level )
+
+/*
+ * This temporarily restores the interrupt to _level before immediately
+ * disabling them again. This is used to divide long critical
+ * sections into two or more parts. The parameter _level is not
+ * modified.
+ */
+
+#define _CPU_ISR_Flash( _level ) \
+ sparc_flash_interrupts( _level )
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a straight fashion are undefined.
+ */
+
+#define _CPU_ISR_Set_level( _newlevel ) \
+ sparc_enable_interrupts( _newlevel)
+
+uint32_t _CPU_ISR_Get_level( void );
+
+/* end of ISR handler macros */
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ void *stack_base,
+ uint32_t size,
+ uint32_t new_level,
+ void *entry_point,
+ bool is_fp
+);
+
+/*
+ * This macro is invoked from _Thread_Handler to do whatever CPU
+ * specific magic is required that must be done in the context of
+ * the thread when it starts.
+ *
+ * On the SPARC, this is setting the frame pointer so GDB is happy.
+ * Make GDB stop unwinding at _Thread_Handler, previous register window
+ * Frame pointer is 0 and calling address must be a function with starting
+ * with a SAVE instruction. If return address is leaf-function (no SAVE)
+ * GDB will not look at prev reg window fp.
+ *
+ * _Thread_Handler is known to start with SAVE.
+ */
+
+#define _CPU_Context_Initialization_at_thread_begin() \
+ do { \
+ __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
+ } while (0)
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task.
+ *
+ * On the SPARC, this is is relatively painless but requires a small
+ * amount of wrapper code before using the regular restore code in
+ * of the context switch.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The FP context area for the SPARC is a simple structure and nothing
+ * special is required to find the "starting load point"
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ *
+ * The SPARC allows us to use the simple initialization model
+ * in which an "initial" FP context was saved into _CPU_Null_fp_context
+ * at CPU initialization and it is simply copied into the destination
+ * context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ do { \
+ *(*(_destination)) = _CPU_Null_fp_context; \
+ } while (0)
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ do { \
+ uint32_t level; \
+ \
+ level = sparc_disable_interrupts(); \
+ __asm__ volatile ( "mov %0, %%g1 " : "=r" (level) : "0" (level) ); \
+ while (1); /* loop forever */ \
+ } while (0)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * The SPARC port uses the generic C algorithm for bitfield scan if the
+ * CPU model does not have a scan instruction.
+ */
+
+#if ( SPARC_HAS_BITSCAN == 0 )
+#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
+#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
+#else
+#error "scan instruction not currently supported by RTEMS!!"
+#endif
+
+/* end of Bitfield handler macros */
+
+/* Priority handler handler macros */
+
+/*
+ * The SPARC port uses the generic C algorithm for bitfield scan if the
+ * CPU model does not have a scan instruction.
+ */
+
+#if ( SPARC_HAS_BITSCAN == 1 )
+#error "scan instruction not currently supported by RTEMS!!"
+#endif
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(void);
+
+/*
+ * _CPU_ISR_install_raw_handler
+ *
+ * This routine installs new_handler to be directly called from the trap
+ * table.
+ */
+
+void _CPU_ISR_install_raw_handler(
+ uint32_t vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ uint64_t vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
+
+/*
+ * _CPU_Thread_Idle_body
+ *
+ * Some SPARC implementations have low power, sleep, or idle modes. This
+ * tries to take advantage of those models.
+ */
+
+void *_CPU_Thread_Idle_body( uintptr_t ignored );
+
+#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generally used only to restart self in an
+ * efficient manner.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ Context_Control_fp **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ Context_Control_fp **fp_context_ptr
+);
+
+/*
+ * CPU_swap_u32
+ *
+ * The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if you come across a better
+ * way for the SPARC PLEASE use it. The most common way to swap a 32-bit
+ * entity as shown below is not any more efficient on the SPARC.
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * It is not obvious how the SPARC can do significantly better than the
+ * generic code. gcc 2.7.0 only generates about 12 instructions for the
+ * following code at optimization level four (i.e. -O4).
+ */
+
+static inline uint32_t CPU_swap_u32(
+ uint32_t value
+)
+{
+ uint32_t byte1, byte2, byte3, byte4, swapped;
+
+ byte4 = (value >> 24) & 0xff;
+ byte3 = (value >> 16) & 0xff;
+ byte2 = (value >> 8) & 0xff;
+ byte1 = value & 0xff;
+
+ swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+ return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+ (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+#endif /* ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/cpukit/score/cpu/sparc64/rtems/score/sparc64.h b/cpukit/score/cpu/sparc64/rtems/score/sparc64.h
new file mode 100644
index 0000000000..41862c2362
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/rtems/score/sparc64.h
@@ -0,0 +1,341 @@
+/**
+ * @file rtems/score/sparc64.h
+ */
+
+/*
+ * This include file contains information pertaining to the SPARC
+ * processor family.
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * This file is based on the SPARC sparc.h file. Modifications are made
+ * to support the SPARC64 processor.
+ * COPYRIGHT (c) 2010. Gedare Bloom.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#ifndef _RTEMS_SCORE_SPARC_H
+#define _RTEMS_SCORE_SPARC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This file contains the information required to build
+ * RTEMS for a particular member of the "sparc" family. It does
+ * this by setting variables to indicate which implementation
+ * dependent features are present in a particular member
+ * of the family.
+ *
+ * Currently recognized feature flags:
+ *
+ * + SPARC_HAS_FPU
+ * 0 - no HW FPU
+ * 1 - has HW FPU (assumed to be compatible w/90C602)
+ *
+ * + SPARC_HAS_BITSCAN
+ * 0 - does not have scan instructions
+ * 1 - has scan instruction (not currently implemented)
+ *
+ * + SPARC_NUMBER_OF_REGISTER_WINDOWS
+ * 8 is the most common number supported by SPARC implementations.
+ * SPARC_PSR_CWP_MASK is derived from this value.
+ */
+
+/*
+ * Some higher end SPARCs have a bitscan instructions. It would
+ * be nice to take advantage of them. Right now, there is no
+ * port to a CPU model with this feature and no (untested) code
+ * that is based on this feature flag.
+ */
+
+#define SPARC_HAS_BITSCAN 0
+
+/*
+ * This should be OK until a port to a higher end SPARC processor
+ * is made that has more than 8 register windows. If this cannot
+ * be determined based on multilib settings (v7/v8/v9), then the
+ * cpu_asm.S code that depends on this will have to move to libcpu.
+ *
+ * SPARC v9 supports from 3 to 32 register windows.
+ * N_REG_WINDOWS = 8 on UltraSPARC T1 (impl. dep. #2-V8).
+ */
+
+#define SPARC_NUMBER_OF_REGISTER_WINDOWS 8
+
+/*
+ * This should be determined based on some soft float derived
+ * cpp predefine but gcc does not currently give us that information.
+ */
+
+
+#if defined(_SOFT_FLOAT)
+#define SPARC_HAS_FPU 0
+#else
+#define SPARC_HAS_FPU 1
+#endif
+
+#if SPARC_HAS_FPU
+#define CPU_MODEL_NAME "w/FPU"
+#else
+#define CPU_MODEL_NAME "w/soft-float"
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "SPARC"
+
+/*
+ * Miscellaneous constants
+ */
+
+/*
+ * The PSR is deprecated and deleted.
+ *
+ * The following registers represent fields of the PSR:
+ * PIL - Processor Interrupt Level register
+ * CWP - Current Window Pointer register
+ * VER - Version register
+ * CCR - Condition Codes Register
+ * PSTATE - Processor State register
+ */
+
+/*
+ * PSTATE masks and starting bit positions
+ *
+ * NOTE: Reserved bits are ignored.
+ */
+
+#define SPARC_PSTATE_AG_MASK 0x00000001 /* bit 0 */
+#define SPARC_PSTATE_IE_MASK 0x00000002 /* bit 1 */
+#define SPARC_PSTATE_PRIV_MASK 0x00000004 /* bit 2 */
+#define SPARC_PSTATE_AM_MASK 0x00000008 /* bit 3 */
+#define SPARC_PSTATE_PEF_MASK 0x00000010 /* bit 4 */
+#define SPARC_PSTATE_MM_MASK 0x00000040 /* bit 6 */
+#define SPARC_PSTATE_TLE_MASK 0x00000100 /* bit 8 */
+#define SPARC_PSTATE_CLE_MASK 0x00000200 /* bit 9 */
+
+#define SPARC_PSTATE_AG_BIT_POSITION 0 /* bit 0 */
+#define SPARC_PSTATE_IE_BIT_POSITION 1 /* bit 1 */
+#define SPARC_PSTATE_PRIV_BIT_POSITION 2 /* bit 2 */
+#define SPARC_PSTATE_AM_BIT_POSITION 3 /* bit 3 */
+#define SPARC_PSTATE_PEF_BIT_POSITION 4 /* bit 4 */
+#define SPARC_PSTATE_MM_BIT_POSITION 6 /* bit 6 */
+#define SPARC_PSTATE_TLE_BIT_POSITION 8 /* bit 8 */
+#define SPARC_PSTATE_CLE_BIT_POSITION 9 /* bit 9 */
+
+#define SPARC_FPRS_FEF_MASK 0x0100 /* bit 2 */
+#define SPARC_FPRS_FEF_BIT_POSITION 2 /* bit 2 */
+
+#define SPARC_TSTATE_IE_MASK 0x00000200 /* bit 9 */
+
+#define SPARC_SOFTINT_TM_MASK 0x00000001 /* bit 0 */
+#define SPARC_SOFTINT_SM_MASK 0x00010000 /* bit 16 */
+#define SPARC_SOFTINT_TM_BIT_POSITION 1 /* bit 0 */
+#define SPARC_SOFTINT_SM_BIT_POSITION 17 /* bit 16 */
+
+#define STACK_BIAS (2047)
+
+#ifdef ASM
+
+/*
+ * To enable the FPU we need to set both PSTATE.pef and FPRS.fef
+ */
+
+#define sparc64_enable_FPU(rtmp1) \
+ rdpr %pstate, rtmp1; \
+ or rtmp1, SPARC_PSTATE_PEF_MASK, rtmp1; \
+ wrpr %g0, rtmp1, %pstate; \
+ rd %fprs, rtmp1; \
+ or rtmp1, SPARC_FPRS_FEF_MASK, rtmp1; \
+ wr %g0, rtmp1, %fprs
+
+
+#endif
+
+#ifndef ASM
+
+/*
+ * Standard nop
+ */
+
+#define nop() \
+ do { \
+ __asm__ volatile ( "nop" ); \
+ } while ( 0 )
+
+/*
+ * Get and set the pstate
+ */
+
+#define sparc64_get_pstate( _pstate ) \
+ do { \
+ (_pstate) = 0; \
+ __asm__ volatile( "rdpr %%pstate, %0" : "=r" (_pstate) : "0" (_pstate) ); \
+ } while ( 0 )
+
+#define sparc64_set_pstate( _pstate ) \
+ do { \
+ __asm__ volatile ( \
+ "wrpr %%g0, %0, %%pstate " : "=r" ((_pstate)) : "0" ((_pstate)) ); \
+ } while ( 0 )
+
+/*
+ * Get and set the PIL
+ */
+
+#define sparc64_get_pil( _pil ) \
+ do { \
+ (_pil) = 0; \
+ __asm__ volatile( "rdpr %%pil, %0" : "=r" (_pil) : "0" (_pil) ); \
+ } while ( 0 )
+
+#define sparc64_set_pil( _pil ) \
+ do { \
+ __asm__ volatile ( "wrpr %%g0, %0, %%pil " : "=r" ((_pil)) : "0" ((_pil)) ); \
+ } while ( 0 )
+
+
+/*
+ * Get and set the TBA
+ */
+
+#define sparc64_get_tba( _tba ) \
+ do { \
+ (_tba) = 0; /* to avoid unitialized warnings */ \
+ __asm__ volatile( "rdpr %%tba, %0" : "=r" (_tba) : "0" (_tba) ); \
+ } while ( 0 )
+
+#define sparc64_set_tba( _tba ) \
+ do { \
+ __asm__ volatile( "wrpr %%g0, %0, %%tba" : "=r" (_tba) : "0" (_tba) ); \
+ } while ( 0 )
+
+/*
+ * Get and set the TL (trap level)
+ */
+
+#define sparc64_get_tl( _tl ) \
+ do { \
+ (_tl) = 0; /* to avoid unitialized warnings */ \
+ __asm__ volatile( "rdpr %%tl, %0" : "=r" (_tl) : "0" (_tl) ); \
+ } while ( 0 )
+
+#define sparc64_set_tl( _tl ) \
+ do { \
+ __asm__ volatile( "wrpr %%g0, %0, %%tl" : "=r" (_tl) : "0" (_tl) ); \
+ } while ( 0 )
+
+
+/*
+ * read the stick register
+ *
+ * Note:
+ * stick asr=24, mnemonic=stick
+ * Note: stick does not appear to be a valid ASR for US3, although it is
+ * implemented in US3i.
+ */
+#define sparc64_read_stick( _stick ) \
+ do { \
+ (_stick) = 0; \
+ __asm__ volatile( "rd %%stick, %0" : "=r" (_stick) : "0" (_stick) ); \
+ } while ( 0 )
+
+/*
+ * write the stick_cmpr register
+ *
+ * Note:
+ * stick_cmpr asr=25, mnemonic=stick_cmpr
+ * Note: stick_cmpr does not appear to be a valid ASR for US3, although it is
+ * implemented in US3i.
+ */
+#define sparc64_write_stick_cmpr( _stick_cmpr ) \
+ do { \
+ __asm__ volatile( "wr %%g0, %0, %%stick_cmpr" : "=r" (_stick_cmpr) \
+ : "0" (_stick_cmpr) ); \
+ } while ( 0 )
+
+/*
+ * read the Tick register
+ */
+#define sparc64_read_tick( _tick ) \
+ do { \
+ (_tick) = 0; \
+ __asm__ volatile( "rd %%tick, %0" : "=r" (_tick) : "0" (_tick) ); \
+ } while ( 0 )
+
+/*
+ * write the tick_cmpr register
+ */
+#define sparc64_write_tick_cmpr( _tick_cmpr ) \
+ do { \
+ __asm__ volatile( "wr %%g0, %0, %%tick_cmpr" : "=r" (_tick_cmpr) \
+ : "0" (_tick_cmpr) ); \
+ } while ( 0 )
+
+/*
+ * Clear the softint register.
+ *
+ * sun4u and sun4v: softint_clr asr = 21, with mnemonic clear_softint
+ */
+#define sparc64_clear_interrupt_bits( _bit_mask ) \
+ do { \
+ __asm__ volatile( "wr %%g0, %0, %%clear_softint" : "=r" (_bit_mask) \
+ : "0" (_bit_mask)); \
+ } while ( 0 )
+
+/************* DEPRECATED ****************/
+/* Note: Although the y register is deprecated, gcc still uses it */
+/*
+ * Get and set the Y
+ */
+
+#define sparc_get_y( _y ) \
+ do { \
+ __asm__ volatile( "rd %%y, %0" : "=r" (_y) : "0" (_y) ); \
+ } while ( 0 )
+
+#define sparc_set_y( _y ) \
+ do { \
+ __asm__ volatile( "wr %0, %%y" : "=r" (_y) : "0" (_y) ); \
+ } while ( 0 )
+
+/************* /DEPRECATED ****************/
+
+/*
+ * Manipulate the interrupt level in the pstate
+ */
+
+uint32_t sparc_disable_interrupts(void);
+void sparc_enable_interrupts(uint32_t);
+
+#define sparc_flash_interrupts( _level ) \
+ do { \
+ register uint32_t _ignored = 0; \
+ \
+ sparc_enable_interrupts( (_level) ); \
+ _ignored = sparc_disable_interrupts(); \
+ } while ( 0 )
+
+#define sparc64_get_interrupt_level( _level ) \
+ do { \
+ _level = 0; \
+ sparc64_get_pil( _level ); \
+ } while ( 0 )
+
+#endif /* !ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_SPARC_H */
diff --git a/cpukit/score/cpu/sparc64/rtems/score/types.h b/cpukit/score/cpu/sparc64/rtems/score/types.h
new file mode 100644
index 0000000000..92c67cdf61
--- /dev/null
+++ b/cpukit/score/cpu/sparc64/rtems/score/types.h
@@ -0,0 +1,44 @@
+/**
+ * @file rtems/score/types.h
+ */
+
+/*
+ * This include file contains type definitions pertaining to the
+ * SPARC-v9 processor family.
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#ifndef _RTEMS_SCORE_TYPES_H
+#define _RTEMS_SCORE_TYPES_H
+
+#include <rtems/score/basedefs.h>
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef uint16_t Priority_bit_map_Control;
+typedef void sparc_isr;
+typedef void ( *sparc_isr_entry )( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif