summaryrefslogtreecommitdiffstats
path: root/c
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>1995-10-06 20:01:20 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>1995-10-06 20:01:20 +0000
commitc62d36feed16c25aefa8e85de0944f1bff0e7bdf (patch)
tree71bbb213b1bd9b1e6acedbb00cb7da221d76afab /c
parentmoved to new development machine and went to gcc 2.7.0 (diff)
downloadrtems-c62d36feed16c25aefa8e85de0944f1bff0e7bdf.tar.bz2
SPARC merged and successfully tested w/o interrupt support
Diffstat (limited to 'c')
-rw-r--r--c/ACKNOWLEDGEMENTS6
-rw-r--r--c/build-tools/cklength.c4
-rw-r--r--c/build-tools/eolstrip.c4
-rw-r--r--c/build-tools/src/cklength.c4
-rw-r--r--c/build-tools/src/eolstrip.c4
-rw-r--r--c/src/exec/score/cpu/i386/cpu.c1
-rw-r--r--c/src/exec/score/cpu/sparc/asm.h97
-rw-r--r--c/src/exec/score/cpu/sparc/cpu.c197
-rw-r--r--c/src/exec/score/cpu/sparc/cpu.h986
-rw-r--r--c/src/exec/score/cpu/sparc/cpu_asm.s328
-rw-r--r--c/src/exec/score/cpu/sparc/rtems.s33
-rw-r--r--c/src/exec/score/cpu/sparc/sparc.h176
-rw-r--r--c/src/exec/score/cpu/sparc/sparctypes.h49
-rw-r--r--c/src/lib/libbsp/m68k/mvme136/clock/ckinit.c1
-rw-r--r--c/src/lib/libbsp/m68k/mvme136/timer/timer.c1
15 files changed, 1888 insertions, 3 deletions
diff --git a/c/ACKNOWLEDGEMENTS b/c/ACKNOWLEDGEMENTS
index 6b8939aa48..bee6d1d526 100644
--- a/c/ACKNOWLEDGEMENTS
+++ b/c/ACKNOWLEDGEMENTS
@@ -64,6 +64,12 @@ The following persons/organizations have made contributions:
be considered general purpose controllers when used without the EFI
companion board(s). See the README in each BSP for more information.
++ The European Space Agency for sponsoring On-Line Applications Research
+ to port RTEMS to the SPARC V7 architecture for use with their ERC32
+ radiation-hardened CPU. Jiri Gaisler (jgais@wd.estec.esa.nl) deserves
+ special thanks for championing this port within the ESA was well as
+ for developing the SPARC Instruction Simulator used to test this port.
+
Finally, the RTEMS project would like to thank those who have contributed
to the other free software efforts which RTEMS utilizes. The primary RTEMS
development environment is from the Free Software Foundation (the GNU
diff --git a/c/build-tools/cklength.c b/c/build-tools/cklength.c
index 3786b3985d..b2a8cdf006 100644
--- a/c/build-tools/cklength.c
+++ b/c/build-tools/cklength.c
@@ -11,6 +11,9 @@
*
* $Id$
* $Log$
+ * Revision 1.3 1995/10/06 16:32:20 joel
+ * moved to new development machine and went to gcc 2.7.0
+ *
* Revision 1.2 1995/05/31 16:44:26 joel
* fixing revision
*
@@ -33,6 +36,7 @@ maximum line length. The default maximum line length is 80.\n\
";
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <ctype.h>
#include <stdlib.h>
diff --git a/c/build-tools/eolstrip.c b/c/build-tools/eolstrip.c
index 178bcd224c..bf39bd4972 100644
--- a/c/build-tools/eolstrip.c
+++ b/c/build-tools/eolstrip.c
@@ -9,6 +9,9 @@
*
* $Id$
* $Log$
+ * Revision 1.3 1995/10/06 16:32:21 joel
+ * moved to new development machine and went to gcc 2.7.0
+ *
* Revision 1.2 1995/05/31 16:44:28 joel
* fixing revision
*
@@ -28,6 +31,7 @@ Strip the white space from the end of every line on the list of files.\n\
";
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <ctype.h>
#include <stdlib.h>
diff --git a/c/build-tools/src/cklength.c b/c/build-tools/src/cklength.c
index 3786b3985d..b2a8cdf006 100644
--- a/c/build-tools/src/cklength.c
+++ b/c/build-tools/src/cklength.c
@@ -11,6 +11,9 @@
*
* $Id$
* $Log$
+ * Revision 1.3 1995/10/06 16:32:20 joel
+ * moved to new development machine and went to gcc 2.7.0
+ *
* Revision 1.2 1995/05/31 16:44:26 joel
* fixing revision
*
@@ -33,6 +36,7 @@ maximum line length. The default maximum line length is 80.\n\
";
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <ctype.h>
#include <stdlib.h>
diff --git a/c/build-tools/src/eolstrip.c b/c/build-tools/src/eolstrip.c
index 178bcd224c..bf39bd4972 100644
--- a/c/build-tools/src/eolstrip.c
+++ b/c/build-tools/src/eolstrip.c
@@ -9,6 +9,9 @@
*
* $Id$
* $Log$
+ * Revision 1.3 1995/10/06 16:32:21 joel
+ * moved to new development machine and went to gcc 2.7.0
+ *
* Revision 1.2 1995/05/31 16:44:28 joel
* fixing revision
*
@@ -28,6 +31,7 @@ Strip the white space from the end of every line on the list of files.\n\
";
#include <stdio.h>
+#include <stdlib.h>
#include <fcntl.h>
#include <ctype.h>
#include <stdlib.h>
diff --git a/c/src/exec/score/cpu/i386/cpu.c b/c/src/exec/score/cpu/i386/cpu.c
index 917cf042a9..ad9c56e20a 100644
--- a/c/src/exec/score/cpu/i386/cpu.c
+++ b/c/src/exec/score/cpu/i386/cpu.c
@@ -15,7 +15,6 @@
#include <rtems/system.h>
#include <rtems/score/isr.h>
-#include <rtems/score/wkspace.h>
/* _CPU_Initialize
*
diff --git a/c/src/exec/score/cpu/sparc/asm.h b/c/src/exec/score/cpu/sparc/asm.h
new file mode 100644
index 0000000000..10157171c5
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/asm.h
@@ -0,0 +1,97 @@
+/* asm.h
+ *
+ * This include file attempts to address the problems
+ * caused by incompatible flavors of assemblers and
+ * toolsets. It primarily addresses variations in the
+ * use of leading underscores on symbols and the requirement
+ * that register names be preceded by a %.
+ *
+ *
+ * NOTE: The spacing in the use of these macros
+ * is critical to them working as advertised.
+ *
+ * COPYRIGHT:
+ *
+ * This file is based on similar code found in newlib available
+ * from ftp.cygnus.com. The file which was used had no copyright
+ * notice. This file is freely distributable as long as the source
+ * of the file is noted.
+ *
+ * $Id$
+ */
+
+#ifndef __SPARC_ASM_h
+#define __SPARC_ASM_h
+
+/*
+ * Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#define ASM
+#include <rtems/score/sparc.h>
+
+/*
+ * Recent versions of GNU cpp define variables which indicate the
+ * need for underscores and percents. If not using GNU cpp or
+ * the version does not support this, then you will obviously
+ * have to define these as appropriate.
+ */
+
+/* XXX This does not appear to work on gcc 2.7.0 on the sparc */
+#undef __USER_LABEL_PREFIX__
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers. */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/*
+ * define macros for all of the registers on this CPU
+ *
+ * EXAMPLE: #define d0 REG (d0)
+ */
+
+/*
+ * Define macros to handle section beginning and ends.
+ */
+
+
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ * Following must be tailor for a particular flavor of the C compiler.
+ * They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym) .globl SYM (sym)
+#define EXTERN(sym) .globl SYM (sym)
+
+#endif
+/* end of include file */
+
+
diff --git a/c/src/exec/score/cpu/sparc/cpu.c b/c/src/exec/score/cpu/sparc/cpu.c
new file mode 100644
index 0000000000..cf70913d5e
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/cpu.c
@@ -0,0 +1,197 @@
+/*
+ * SPARC Dependent Source
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ *
+ * INPUT PARAMETERS:
+ * cpu_table - CPU table to initialize
+ * thread_dispatch - address of disptaching routine
+ */
+
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch) /* ignored on this CPU */
+)
+{
+ void *pointer;
+
+ /*
+ * The thread_dispatch argument is the address of the entry point
+ * for the routine called at the end of an ISR once it has been
+ * decided a context switch is necessary. On some compilation
+ * systems it is difficult to call a high-level language routine
+ * from assembly. This allows us to trick these systems.
+ *
+ * If you encounter this problem save the entry point in a CPU
+ * dependent variable.
+ */
+
+ _CPU_Thread_dispatch_pointer = thread_dispatch;
+
+ /*
+ * If there is not an easy way to initialize the FP context
+ * during Context_Initialize, then it is usually easier to
+ * save an "uninitialized" FP context here and copy it to
+ * the task's during Context_Initialize.
+ */
+
+ pointer = &_CPU_Null_fp_context;
+ _CPU_Context_save_fp( &pointer );
+
+ _CPU_Table = *cpu_table;
+}
+
+/*PAGE
+ *
+ * _CPU_ISR_Get_level
+ */
+
+unsigned32 _CPU_ISR_Get_level( void )
+{
+ unsigned32 level;
+
+ sparc_get_interrupt_level( level );
+
+ return level;
+}
+
+/* _CPU_ISR_install_vector
+ *
+ * This kernel routine installs the RTEMS handler for the
+ * specified vector.
+ *
+ * Input parameters:
+ * vector - interrupt vector number
+ * old_handler - former ISR for this vector number
+ * new_handler - replacement ISR for this vector number
+ *
+ * Output parameters: NONE
+ *
+ */
+
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+)
+{
+ *old_handler = _ISR_Vector_table[ vector ];
+
+ /*
+ * If the interrupt vector table is a table of pointer to isr entry
+ * points, then we need to install the appropriate RTEMS interrupt
+ * handler for this vector number.
+ */
+
+ /*
+ * We put the actual user ISR address in '_ISR_vector_table'. This will
+ * be used by the _ISR_Handler so the user gets control.
+ */
+
+ _ISR_Vector_table[ vector ] = new_handler;
+}
+
+/*PAGE
+ *
+ * _CPU_Install_interrupt_stack
+ */
+
+void _CPU_Install_interrupt_stack( void )
+{
+}
+
+/*PAGE
+ *
+ * _CPU_Context_Initialize
+ */
+
+/*
+ * The following constants assist in building a thread's initial context.
+ */
+
+#define CPU_FRAME_SIZE (112) /* based on disassembled test code */
+#define ADDR_ADJ_OFFSET -8
+
+void _CPU_Context_Initialize(
+ Context_Control *_the_context,
+ unsigned32 *_stack_base,
+ unsigned32 _size,
+ unsigned32 _new_level,
+ void *_entry_point
+)
+{
+ unsigned32 jmp_addr;
+ unsigned32 _stack_high; /* highest "stack aligned" address */
+ unsigned32 _the_size;
+ unsigned32 tmp_psr;
+
+ jmp_addr = (unsigned32) _entry_point;
+
+ /*
+ * On CPUs with stacks which grow down (i.e. SPARC), we build the stack
+ * based on the _stack_high address.
+ */
+
+ _stack_high = ((unsigned32)(_stack_base) + _size);
+ _stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
+
+ _the_size = _size & ~(CPU_STACK_ALIGNMENT - 1);
+
+/* XXX following code is based on unix port */
+ /*
+ * XXX SPARC port needs a diagram like this one...
+ * See /usr/include/sys/stack.h in Solaris 2.3 for a nice
+ * diagram of the stack.
+ */
+
+ _the_context->o7 = jmp_addr + ADDR_ADJ_OFFSET;
+ _the_context->o6 = (unsigned32)(_stack_high - CPU_FRAME_SIZE);
+ _the_context->i6 = (unsigned32)(_stack_high);
+#if 0
+ _the_context->rp = jmp_addr + ADDR_ADJ_OFFSET;
+ _the_context->sp = (unsigned32)(_stack_high - CPU_FRAME_SIZE);
+ _the_context->fp = (unsigned32)(_stack_high);
+#endif
+
+ _the_context->wim = 0x01;
+
+ sparc_get_psr( tmp_psr );
+ tmp_psr &= ~SPARC_PIL_MASK;
+ tmp_psr |= (((_new_level) << 8) & SPARC_PIL_MASK);
+ tmp_psr = (tmp_psr & ~0x07) | 0x07; /* XXX should use num windows */
+ _the_context->psr = tmp_psr;
+}
+
+/*PAGE
+ *
+ * _CPU_Internal_threads_Idle_thread_body
+ *
+ * NOTES:
+ *
+ * 1. This is the same as the regular CPU independent algorithm.
+ *
+ * 2. If you implement this using a "halt", "idle", or "shutdown"
+ * instruction, then don't forget to put it in an infinite loop.
+ *
+ * 3. Be warned. Some processors with onboard DMA have been known
+ * to stop the DMA if the CPU were put in IDLE mode. This might
+ * also be a problem with other on-chip peripherals. So use this
+ * hook with caution.
+ */
+
+void _CPU_Internal_threads_Idle_thread_body( void )
+{
+
+ for( ; ; )
+ /* insert your "halt" instruction here */ ;
+}
diff --git a/c/src/exec/score/cpu/sparc/cpu.h b/c/src/exec/score/cpu/sparc/cpu.h
new file mode 100644
index 0000000000..6b9890d132
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/cpu.h
@@ -0,0 +1,986 @@
+/* cpu.h
+ *
+ * This include file contains information pertaining to the XXX
+ * processor.
+ *
+ * $Id$
+ */
+
+#ifndef __CPU_h
+#define __CPU_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/sparc.h> /* pick up machine definitions */
+#ifndef ASM
+#include <rtems/score/sparctypes.h>
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ * Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ * If TRUE, then they are inlined.
+ * If FALSE, then a subroutine call is made.
+ *
+ * Basically this is an example of the classic trade-off of size
+ * versus speed. Inlining the call (TRUE) typically increases the
+ * size of the executive while speeding up the enabling of dispatching.
+ * [NOTE: In general, the _Thread_Dispatch_disable_level will
+ * only be 0 or 1 unless you are in an interrupt handler and that
+ * interrupt handler invokes the executive.] When not inlined
+ * something calls _Thread_Enable_dispatch which in turns calls
+ * _Thread_Dispatch. If the enable dispatch is inlined, then
+ * one subroutine call is avoided entirely.]
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH TRUE
+
+/*
+ * Should the body of the search loops in _Thread_queue_Enqueue_priority
+ * be unrolled one time? In unrolled each iteration of the loop examines
+ * two "nodes" on the chain being searched. Otherwise, only one node
+ * is examined per iteration.
+ *
+ * If TRUE, then the loops are unrolled.
+ * If FALSE, then the loops are not unrolled.
+ *
+ * The primary factor in making this decision is the cost of disabling
+ * and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ * body of the loop. On some CPUs, the flash is more expensive than
+ * one iteration of the loop body. In this case, it might be desirable
+ * to unroll the loop. It is important to note that on some CPUs, this
+ * code is the longest interrupt disable period in the executive. So it is
+ * necessary to strike a balance when setting this parameter.
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
+
+/*
+ * Does the executive manage a dedicated interrupt stack in software?
+ *
+ * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
+ * If FALSE, nothing is done.
+ *
+ * If the CPU supports a dedicated interrupt stack in hardware,
+ * then it is generally the responsibility of the BSP to allocate it
+ * and set it up.
+ *
+ * If the CPU does not support a dedicated interrupt stack, then
+ * the porter has two options: (1) execute interrupts on the stack of
+ * the interrupted task, and (2) have the executive manage a dedicated
+ * interrupt stack.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE /* XXX */
+
+/*
+ * Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ * If TRUE, then it must be installed during initialization.
+ * If FALSE, then no installation is performed.
+ *
+ * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is
+ * possible that both are FALSE for a particular CPU. Although it
+ * is unclear what that would imply about the interrupt processing
+ * procedure on that CPU.
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE /* XXX */
+
+/*
+ * Do we allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ * If TRUE, then the memory is allocated during initialization.
+ * If FALSE, then the memory is allocated during initialization.
+ *
+ * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ * Does the CPU have hardware floating point?
+ *
+ * If TRUE, then the FLOATING_POINT task attribute is supported.
+ * If FALSE, then the FLOATING_POINT task attribute is ignored.
+ *
+ * If there is a FP coprocessor such as the i387 or mc68881, then
+ * the answer is TRUE.
+ *
+ * The macro name "SPARC_HAS_FPU" should be made CPU specific.
+ * It indicates whether or not this CPU model has FP support. For
+ * example, it would be possible to have an i386_nofp CPU model
+ * which set this to false to indicate that you have an i386 without
+ * an i387 and wish to leave floating point support out.
+ */
+
+#if ( SPARC_HAS_FPU == 1 )
+#define CPU_HARDWARE_FP TRUE
+#else
+#define CPU_HARDWARE_FP FALSE
+#endif
+
+/*
+ * Are all tasks FLOATING_POINT tasks implicitly?
+ *
+ * If TRUE, then the FLOATING_POINT task attribute is assumed.
+ * If FALSE, then the FLOATING_POINT task attribute is followed.
+ *
+ * So far, the only CPU in which this option has been used is the
+ * HP PA-RISC. The HP C compiler and gcc both implicitly use the
+ * floating point registers to perform integer multiplies. If
+ * a function which you would not think utilize the FP unit DOES,
+ * then one can not easily predict which tasks will use the FP hardware.
+ * In this case, this option should be TRUE.
+ *
+ * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ */
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+/*
+ * Should the IDLE task have a floating point context?
+ *
+ * If TRUE, then the IDLE task is created as a FLOATING_POINT task
+ * and it has a floating point context which is switched in and out.
+ * If FALSE, then the IDLE task does not have a floating point context.
+ *
+ * Setting this to TRUE negatively impacts the time required to preempt
+ * the IDLE task from an interrupt because the floating point context
+ * must be saved as part of the preemption.
+ */
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+/*
+ * Should the saving of the floating point registers be deferred
+ * until a context switch is made to another different floating point
+ * task?
+ *
+ * If TRUE, then the floating point context will not be stored until
+ * necessary. It will remain in the floating point registers and not
+ * disturned until another floating point task is switched to.
+ *
+ * If FALSE, then the floating point context is saved when a floating
+ * point task is switched out and restored when the next floating point
+ * task is restored. The state of the floating point registers between
+ * those two operations is not specified.
+ *
+ * If the floating point context does NOT have to be saved as part of
+ * interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ * Setting this flag to TRUE results in using a different algorithm
+ * for deciding when to save and restore the floating point context.
+ * The deferred FP switch algorithm minimizes the number of times
+ * the FP context is saved and restored. The FP context is not saved
+ * until a context switch is made to another, different FP task.
+ * Thus in a system with only one FP task, the FP context will never
+ * be saved or restored.
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH TRUE
+
+/*
+ * Does this port provide a CPU dependent IDLE task implementation?
+ *
+ * If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
+ * must be provided and is the default IDLE thread body instead of
+ * _Internal_threads_Idle_thread_body.
+ *
+ * If FALSE, then use the generic IDLE thread body if the BSP does
+ * not provide one.
+ *
+ * This is intended to allow for supporting processors which have
+ * a low power or idle mode. When the IDLE thread is executed, then
+ * the CPU can be powered down.
+ *
+ * The order of precedence for selecting the IDLE thread body is:
+ *
+ * 1. BSP provided
+ * 2. CPU dependent (if provided)
+ * 3. generic (if no BSP and no CPU dependent)
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+
+/*
+ * Does the stack grow up (toward higher addresses) or down
+ * (toward lower addresses)?
+ *
+ * If TRUE, then the grows upward.
+ * If FALSE, then the grows toward smaller addresses.
+ */
+
+#define CPU_STACK_GROWS_UP FALSE
+
+/*
+ * The following is the variable attribute used to force alignment
+ * of critical data structures. On some processors it may make
+ * sense to have these aligned on tighter boundaries than
+ * the minimum requirements of the compiler in order to have as
+ * much of the critical data area as possible in a cache line.
+ *
+ * The placement of this macro in the declaration of the variables
+ * is based on the syntactically requirements of the GNU C
+ * "__attribute__" extension. For example with GNU C, use
+ * the following to force a structures to a 32 byte boundary.
+ *
+ * __attribute__ ((aligned (32)))
+ *
+ * NOTE: Currently only the Priority Bit Map table uses this feature.
+ * To benefit from using this, the data must be heavily
+ * used so it will stay in the cache and used frequently enough
+ * in the executive to justify turning this on.
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
+
+/*
+ * The following defines the number of bits actually used in the
+ * interrupt field of the task mode. How those bits map to the
+ * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ */
+
+#define CPU_MODES_INTERRUPT_MASK 0x0000000F
+
+/*
+ * Processor defined structures
+ *
+ * Examples structures include the descriptor tables from the i386
+ * and the processor control structure on the i960ca.
+ */
+
+/* XXX may need to put some structures here. */
+
+/*
+ * Contexts
+ *
+ * Generally there are 2 types of context to save.
+ * 1. Interrupt registers to save
+ * 2. Task level registers to save
+ *
+ * This means we have the following 3 context items:
+ * 1. task level context stuff:: Context_Control
+ * 2. floating point task stuff:: Context_Control_fp
+ * 3. special interrupt level context :: Context_Control_interrupt
+ *
+ * On some processors, it is cost-effective to save only the callee
+ * preserved registers during a task context switch. This means
+ * that the ISR code needs to save those registers which do not
+ * persist across function calls. It is not mandatory to make this
+ * distinctions between the caller/callee saves registers for the
+ * purpose of minimizing context saved during task switch and on interrupts.
+ * If the cost of saving extra registers is minimal, simplicity is the
+ * choice. Save the same context on interrupt entry as for tasks in
+ * this case.
+ *
+ * Additionally, if gdb is to be made aware of tasks for this CPU, then
+ * care should be used in designing the context area.
+ *
+ * On some CPUs with hardware floating point support, the Context_Control_fp
+ * structure will not be used or it simply consist of an array of a
+ * fixed number of bytes. This is done when the floating point context
+ * is dumped by a "FP save context" type instruction and the format
+ * is not really defined by the CPU. In this case, there is no need
+ * to figure out the exact format -- only the size. Of course, although
+ * this is enough information for context switches, it is probably not
+ * enough for a debugger such as gdb. But that is another problem.
+ */
+
+#ifndef ASM
+
+/* XXX */
+typedef struct {
+ unsigned32 g0;
+ unsigned32 g1;
+ unsigned32 g2;
+ unsigned32 g3;
+ unsigned32 g4;
+ unsigned32 g5;
+ unsigned32 g6;
+ unsigned32 g7;
+
+ unsigned32 l0;
+ unsigned32 l1;
+ unsigned32 l2;
+ unsigned32 l3;
+ unsigned32 l4;
+ unsigned32 l5;
+ unsigned32 l6;
+ unsigned32 l7;
+
+ unsigned32 i0;
+ unsigned32 i1;
+ unsigned32 i2;
+ unsigned32 i3;
+ unsigned32 i4;
+ unsigned32 i5;
+ unsigned32 i6;
+ unsigned32 i7;
+
+ unsigned32 o0;
+ unsigned32 o1;
+ unsigned32 o2;
+ unsigned32 o3;
+ unsigned32 o4;
+ unsigned32 o5;
+ unsigned32 o6;
+ unsigned32 o7;
+
+ unsigned32 wim;
+ unsigned32 psr;
+} Context_Control;
+
+#endif /* ASM */
+
+/*
+ * Offsets of fields with Context_Control for assembly routines.
+ */
+
+#define G0_OFFSET 0x00
+#define G1_OFFSET 0x04
+#define G2_OFFSET 0x08
+#define G3_OFFSET 0x0C
+#define G4_OFFSET 0x10
+#define G5_OFFSET 0x14
+#define G6_OFFSET 0x18
+#define G7_OFFSET 0x1C
+
+#define L0_OFFSET 0x20
+#define L1_OFFSET 0x24
+#define L2_OFFSET 0x28
+#define L3_OFFSET 0x2C
+#define L4_OFFSET 0x30
+#define L5_OFFSET 0x34
+#define L6_OFFSET 0x38
+#define L7_OFFSET 0x3C
+
+#define I0_OFFSET 0x40
+#define I1_OFFSET 0x44
+#define I2_OFFSET 0x48
+#define I3_OFFSET 0x4C
+#define I4_OFFSET 0x50
+#define I5_OFFSET 0x54
+#define I6_OFFSET 0x58
+#define I7_OFFSET 0x5C
+
+#define O0_OFFSET 0x60
+#define O1_OFFSET 0x64
+#define O2_OFFSET 0x68
+#define O3_OFFSET 0x6C
+#define O4_OFFSET 0x70
+#define O5_OFFSET 0x74
+#define O6_OFFSET 0x78
+#define O7_OFFSET 0x7C
+
+#define WIM_OFFSET 0x80
+#define PSR_OFFSET 0x84
+
+#ifndef ASM
+
+/* XXX */
+typedef struct {
+ double f0_f1;
+ double f2_f3;
+ double f4_f5;
+ double f6_f7;
+ double f8_f9;
+ double f10_f11;
+ double f12_f13;
+ double f14_f15;
+ double f16_f17;
+ double f18_f19;
+ double f20_f21;
+ double f22_f23;
+ double f24_f25;
+ double f26_f27;
+ double f28_f29;
+ double f30_f31;
+ unsigned32 fsr;
+} Context_Control_fp;
+
+#endif /* ASM */
+
+/*
+ * Offsets of fields with Context_Control_fp for assembly routines.
+ */
+
+#define FO_F1_OFFSET 0x00
+#define F2_F3_OFFSET 0x08
+#define F4_F5_OFFSET 0x10
+#define F6_F7_OFFSET 0x18
+#define F8_F9_OFFSET 0x20
+#define F1O_F11_OFFSET 0x28
+#define F12_F13_OFFSET 0x30
+#define F14_F15_OFFSET 0x38
+#define F16_F17_OFFSET 0x40
+#define F18_F19_OFFSET 0x48
+#define F2O_F21_OFFSET 0x50
+#define F22_F23_OFFSET 0x58
+#define F24_F25_OFFSET 0x60
+#define F26_F27_OFFSET 0x68
+#define F28_F29_OFFSET 0x70
+#define F3O_F31_OFFSET 0x78
+#define FSR_OFFSET 0x80
+
+#ifndef ASM
+
+typedef struct {
+ unsigned32 special_interrupt_register_XXX;
+} CPU_Interrupt_frame;
+
+#endif /* ASM */
+
+/*
+ * Offsets of fields with CPU_Interrupt_frame for assembly routines.
+ */
+
+#ifndef ASM
+
+/*
+ * The following table contains the information required to configure
+ * the XXX processor specific parameters.
+ *
+ * NOTE: The interrupt_stack_size field is required if
+ * CPU_ALLOCATE_INTERRUPT_STACK is defined as TRUE.
+ *
+ * The pretasking_hook, predriver_hook, and postdriver_hook,
+ * and the do_zero_of_workspace fields are required on ALL CPUs.
+ */
+
+typedef struct {
+ void (*pretasking_hook)( void );
+ void (*predriver_hook)( void );
+ void (*postdriver_hook)( void );
+ void (*idle_task)( void );
+ boolean do_zero_of_workspace;
+ unsigned32 interrupt_stack_size;
+ unsigned32 extra_system_initialization_stack;
+ unsigned32 some_other_cpu_dependent_info_XXX;
+} rtems_cpu_table;
+
+/*
+ * This variable is optional. It is used on CPUs on which it is difficult
+ * to generate an "uninitialized" FP context. It is filled in by
+ * _CPU_Initialize and copied into the task's FP context area during
+ * _CPU_Context_Initialize.
+ */
+
+EXTERN Context_Control_fp _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
+
+/*
+ * On some CPUs, software managed interrupt stack is supported.
+ * This stack is allocated by the Interrupt Manager and the switch
+ * is performed in _ISR_Handler. These variables contain pointers
+ * to the lowest and highest addresses in the chunk of memory allocated
+ * for the interrupt stack. Since it is unknown whether the stack
+ * grows up or down (in general), this give the CPU dependent
+ * code the option of picking the version it wants to use.
+ *
+ * NOTE: These two variables are required if the macro
+ * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
+ */
+
+EXTERN void *_CPU_Interrupt_stack_low;
+EXTERN void *_CPU_Interrupt_stack_high;
+
+/*
+ * With some compilation systems, it is difficult if not impossible to
+ * call a high-level language routine from assembly language. This
+ * is especially true of commercial Ada compilers and name mangling
+ * C++ ones. This variable can be optionally defined by the CPU porter
+ * and contains the address of the routine _Thread_Dispatch. This
+ * can make it easier to invoke that routine at the end of the interrupt
+ * sequence (if a dispatch is necessary).
+ */
+
+EXTERN void (*_CPU_Thread_dispatch_pointer)();
+
+/*
+ * Nothing prevents the porter from declaring more CPU specific variables.
+ */
+
+/* XXX: if needed, put more variables here */
+
+/*
+ * The size of the floating point context area. On some CPUs this
+ * will not be a "sizeof" because the format of the floating point
+ * area is not defined -- only the size is. This is usually on
+ * CPUs with a "floating point save context" instruction.
+ */
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+/*
+ * Amount of extra stack (above minimum stack size) required by
+ * system initialization thread. Remember that in a multiprocessor
+ * system the system intialization thread becomes the MP server thread.
+ */
+
+#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 1024
+
+/*
+ * This defines the number of entries in the ISR_Vector_table managed
+ * by the executive.
+ */
+
+#define CPU_INTERRUPT_NUMBER_OF_VECTORS 255
+
+/*
+ * Should be large enough to run all tests. This insures
+ * that a "reasonable" small application should not have any problems.
+ */
+
+#define CPU_STACK_MINIMUM_SIZE (1024*2)
+
+/*
+ * CPU's worst alignment requirement for data types on a byte boundary. This
+ * alignment does not take into account the requirements for the stack.
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * heap handler. This alignment requirement may be stricter than that
+ * for the data types alignment specified by CPU_ALIGNMENT. It is
+ * common for the heap to follow the same alignment requirement as
+ * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
+ * then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for memory
+ * buffers allocated by the partition manager. This alignment requirement
+ * may be stricter than that for the data types alignment specified by
+ * CPU_ALIGNMENT. It is common for the partition to follow the same
+ * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict
+ * enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ * NOTE: This does not have to be a power of 2. It does have to
+ * be greater or equal to than CPU_ALIGNMENT.
+ */
+
+#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
+
+/*
+ * This number corresponds to the byte alignment requirement for the
+ * stack. This alignment requirement may be stricter than that for the
+ * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
+ * is strict enough for the stack, then this should be set to 0.
+ *
+ * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ */
+
+#define CPU_STACK_ALIGNMENT 16
+
+#endif /* ASM */
+
+#ifndef ASM
+
+/* ISR handler macros */
+
+/*
+ * Disable all interrupts for a critical section. The previous
+ * level is returned in _level.
+ */
+
+#define _CPU_ISR_Disable( _level ) \
+ sparc_disable_interrupts( _level )
+
+/*
+ * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ * This indicates the end of a critical section. The parameter
+ * _level is not modified.
+ */
+
+#define _CPU_ISR_Enable( _level ) \
+ sparc_enable_interrupts( _level )
+
+/*
+ * This temporarily restores the interrupt to _level before immediately
+ * disabling them again. This is used to divide long critical
+ * sections into two or more parts. The parameter _level is not
+ * modified.
+ */
+
+#define _CPU_ISR_Flash( _level ) \
+ sparc_flash_interrupts( _level )
+
+/*
+ * Map interrupt level in task mode onto the hardware that the CPU
+ * actually provides. Currently, interrupt levels which do not
+ * map onto the CPU in a generic fashion are undefined. Someday,
+ * it would be nice if these were "mapped" by the application
+ * via a callout. For example, m68k has 8 levels 0 - 7, levels
+ * 8 - 255 would be available for bsp/application specific meaning.
+ * This could be used to manage a programmable interrupt controller
+ * via the rtems_task_mode directive.
+ */
+
+#define _CPU_ISR_Set_level( _newlevel ) \
+ sparc_set_interrupt_level( _newlevel )
+
+unsigned32 _CPU_ISR_Get_level( void );
+
+/* end of ISR handler macros */
+
+/* Context handler macros */
+
+/*
+ * Initialize the context to a state suitable for starting a
+ * task after a context restore operation. Generally, this
+ * involves:
+ *
+ * - setting a starting address
+ * - preparing the stack
+ * - preparing the stack and frame pointers
+ * - setting the proper interrupt level in the context
+ * - initializing the floating point context
+ *
+ * This routine generally does not set any unnecessary register
+ * in the context. The state of the "general data" registers is
+ * undefined at task start time.
+ *
+ * NOTE: Implemented as a subroutine for the SPARC port.
+ */
+
+void _CPU_Context_Initialize(
+ Context_Control *_the_context,
+ unsigned32 *_stack_base,
+ unsigned32 _size,
+ unsigned32 _new_level,
+ void *_entry_point
+);
+
+/*
+ * This routine is responsible for somehow restarting the currently
+ * executing task. If you are lucky, then all that is necessary
+ * is restoring the context. Otherwise, there will need to be
+ * a special assembly routine which does something special in this
+ * case. Context_Restore should work most of the time. It will
+ * not work if restarting self conflicts with the stack frame
+ * assumptions of restoring a context.
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+/*
+ * The purpose of this macro is to allow the initial pointer into
+ * a floating point context area (used to save the floating point
+ * context) to be at an arbitrary place in the floating point
+ * context area.
+ *
+ * This is necessary because some FP units are designed to have
+ * their context saved as a stack which grows into lower addresses.
+ * Other FP units can be saved by simply moving registers into offsets
+ * from the base of the context area. Finally some FP units provide
+ * a "dump context" instruction which could fill in from high to low
+ * or low to high based on the whim of the CPU designers.
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+ ( (void *) (_base) + (_offset) )
+
+/*
+ * This routine initializes the FP context area passed to it to.
+ * There are a few standard ways in which to initialize the
+ * floating point context. The code included for this macro assumes
+ * that this is a CPU in which a "initial" FP context was saved into
+ * _CPU_Null_fp_context and it simply copies it to the destination
+ * context passed to it.
+ *
+ * Other models include (1) not doing anything, and (2) putting
+ * a "null FP status word" in the correct place in the FP context.
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ { \
+ *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
+ }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ * This routine copies _error into a known place -- typically a stack
+ * location or a register, optionally disables interrupts, and
+ * halts/stops the CPU.
+ */
+
+#define _CPU_Fatal_halt( _error ) \
+ { \
+ }
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ * This routine sets _output to the bit number of the first bit
+ * set in _value. _value is of CPU dependent type Priority_Bit_map_control.
+ * This type may be either 16 or 32 bits wide although only the 16
+ * least significant bits will be used.
+ *
+ * There are a number of variables in using a "find first bit" type
+ * instruction.
+ *
+ * (1) What happens when run on a value of zero?
+ * (2) Bits may be numbered from MSB to LSB or vice-versa.
+ * (3) The numbering may be zero or one based.
+ * (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ * The executive guarantees that (1) will never happen so it is not a concern.
+ * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ * _CPU_Priority_Bits_index(). These three form a set of routines
+ * which must logically operate together. Bits in the _value are
+ * set and cleared based on masks built by _CPU_Priority_mask().
+ * The basic major and minor values calculated by _Priority_Major()
+ * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
+ * to properly range between the values returned by the "find first bit"
+ * instruction. This makes it possible for _Priority_Get_highest() to
+ * calculate the major and directly index into the minor table.
+ * This mapping is necessary to ensure that 0 (a high priority major/minor)
+ * is the first bit found.
+ *
+ * This entire "find first bit" and mapping process depends heavily
+ * on the manner in which a priority is broken into a major and minor
+ * components with the major being the 4 MSB of a priority and minor
+ * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next
+ * to the lowest priority.
+ *
+ * If your CPU does not have a "find first bit" instruction, then
+ * there are ways to make do without it. Here are a handful of ways
+ * to implement this in software:
+ *
+ * - a series of 16 bit test instructions
+ * - a "binary search using if's"
+ * - _number = 0
+ * if _value > 0x00ff
+ * _value >>=8
+ * _number = 8;
+ *
+ * if _value > 0x0000f
+ * _value >=8
+ * _number += 4
+ *
+ * _number += bit_set_table[ _value ]
+ *
+ * where bit_set_table[ 16 ] has values which indicate the first
+ * bit set
+ */
+
+#ifndef INIT
+ extern const unsigned char __log2table[256];
+#else
+const unsigned char __log2table[256] = {
+ 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+};
+#endif
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+ { \
+ register __value = (_value); \
+ \
+ if ( !(__value & 0xff00) ) \
+ (_output) = __log2table[ __value ]; \
+ else \
+ (_output) = __log2table[ __value >> 8 ] + 8; \
+ }
+
+
+/* end of Bitfield handler macros */
+
+/*
+ * This routine builds the mask which corresponds to the bit fields
+ * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Mask( _bit_number ) \
+ ( 0x8000 >> (_bit_number) )
+
+/*
+ * This routine translates the bit numbers returned by
+ * _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ * a major or minor component of a priority. See the discussion
+ * for that routine.
+ */
+
+#define _CPU_Priority_Bits_index( _priority ) \
+ (15 - (_priority))
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ * _CPU_Initialize
+ *
+ * This routine performs CPU dependent initialization.
+ */
+
+void _CPU_Initialize(
+ rtems_cpu_table *cpu_table,
+ void (*thread_dispatch)
+);
+
+/*
+ * _CPU_ISR_install_vector
+ *
+ * This routine installs an interrupt vector.
+ */
+
+void _CPU_ISR_install_vector(
+ unsigned32 vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/*
+ * _CPU_Install_interrupt_stack
+ *
+ * This routine installs the hardware interrupt stack pointer.
+ *
+ * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ * is TRUE.
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ * _CPU_Internal_threads_Idle_thread_body
+ *
+ * This routine is the CPU dependent IDLE thread body.
+ *
+ * NOTE: It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
+ * is TRUE.
+ */
+
+void _CPU_Internal_threads_Idle_thread_body( void );
+
+/*
+ * _CPU_Context_switch
+ *
+ * This routine switches from the run context to the heir context.
+ */
+
+void _CPU_Context_switch(
+ Context_Control *run,
+ Context_Control *heir
+);
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+void _CPU_Context_restore(
+ Context_Control *new_context
+);
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine saves the floating point context passed to it.
+ */
+
+void _CPU_Context_save_fp(
+ void **fp_context_ptr
+);
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine restores the floating point context passed to it.
+ */
+
+void _CPU_Context_restore_fp(
+ void **fp_context_ptr
+);
+
+/* The following routine swaps the endian format of an unsigned int.
+ * It must be static because it is referenced indirectly.
+ *
+ * This version will work on any processor, but if there is a better
+ * way for your CPU PLEASE use it. The most common way to do this is to:
+ *
+ * swap least significant two bytes with 16-bit rotate
+ * swap upper and lower 16-bits
+ * swap most significant two bytes with 16-bit rotate
+ *
+ * Some CPUs have special instructions which swap a 32-bit quantity in
+ * a single instruction (e.g. i486). It is probably best to avoid
+ * an "endian swapping control bit" in the CPU. One good reason is
+ * that interrupts would probably have to be disabled to insure that
+ * an interrupt does not try to access the same "chunk" with the wrong
+ * endian. Another good reason is that on some CPUs, the endian bit
+ * endianness for ALL fetches -- both code and data -- so the code
+ * will be fetched incorrectly.
+ */
+
+static inline unsigned int CPU_swap_u32(
+ unsigned int value
+)
+{
+ unsigned32 byte1, byte2, byte3, byte4, swapped;
+
+ byte4 = (value >> 24) & 0xff;
+ byte3 = (value >> 16) & 0xff;
+ byte2 = (value >> 8) & 0xff;
+ byte1 = value & 0xff;
+
+ swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+ return( swapped );
+}
+
+#endif ASM
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/c/src/exec/score/cpu/sparc/cpu_asm.s b/c/src/exec/score/cpu/sparc/cpu_asm.s
new file mode 100644
index 0000000000..d1e275ca3f
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/cpu_asm.s
@@ -0,0 +1,328 @@
+/* cpu_asm.s
+ *
+ * This file contains the basic algorithms for all assembly code used
+ * in an specific CPU port of RTEMS. These algorithms must be implemented
+ * in assembly language
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+#include <rtems/score/cpu.h>
+
+/*
+ * _CPU_Context_save_fp
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ *
+ * void _CPU_Context_save_fp(
+ * void **fp_context_ptr
+ * )
+ * {
+ * }
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_save_fp)
+SYM(_CPU_Context_save_fp):
+ save %sp,-104,%sp
+ ld [%i0],%l0
+ std %f0,[%l0+FO_F1_OFFSET]
+ std %f2,[%l0+F2_F3_OFFSET]
+ std %f4,[%l0+F4_F5_OFFSET]
+ std %f6,[%l0+F6_F7_OFFSET]
+ std %f8,[%l0+F8_F9_OFFSET]
+ std %f10,[%l0+F1O_F11_OFFSET]
+ std %f12,[%l0+F12_F13_OFFSET]
+ std %f14,[%l0+F14_F15_OFFSET]
+ std %f16,[%l0+F16_F17_OFFSET]
+ std %f18,[%l0+F18_F19_OFFSET]
+ std %f20,[%l0+F2O_F21_OFFSET]
+ std %f22,[%l0+F22_F23_OFFSET]
+ std %f24,[%l0+F24_F25_OFFSET]
+ std %f26,[%l0+F26_F27_OFFSET]
+ std %f28,[%l0+F28_F29_OFFSET]
+ std %f30,[%l0+F3O_F31_OFFSET]
+ st %fsr,[%l0+FSR_OFFSET]
+ ret
+ restore
+
+/*
+ * _CPU_Context_restore_fp
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ *
+ * void _CPU_Context_restore_fp(
+ * void **fp_context_ptr
+ * )
+ * {
+ * }
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_restore_fp)
+SYM(_CPU_Context_restore_fp):
+ save %sp,-104,%sp
+ ld [%o0],%l0
+ ldd [%l0+FO_F1_OFFSET],%f0
+ ldd [%l0+F2_F3_OFFSET],%f2
+ ldd [%l0+F4_F5_OFFSET],%f4
+ ldd [%l0+F6_F7_OFFSET],%f6
+ ldd [%l0+F8_F9_OFFSET],%f8
+ ldd [%l0+F1O_F11_OFFSET],%f10
+ ldd [%l0+F12_F13_OFFSET],%f12
+ ldd [%l0+F14_F15_OFFSET],%f14
+ ldd [%l0+F16_F17_OFFSET],%f16
+ ldd [%l0+F18_F19_OFFSET],%f18
+ ldd [%l0+F2O_F21_OFFSET],%f20
+ ldd [%l0+F22_F23_OFFSET],%f22
+ ldd [%l0+F24_F25_OFFSET],%f24
+ ldd [%l0+F26_F27_OFFSET],%f26
+ ldd [%l0+F28_F29_OFFSET],%f28
+ ldd [%l0+F3O_F31_OFFSET],%f30
+ ld [%l0+FSR_OFFSET],%fsr
+ ret
+ restore
+
+/* _CPU_Context_switch
+ *
+ * This routine performs a normal non-FP context switch.
+ *
+ * void _CPU_Context_switch(
+ * Context_Control *run,
+ * Context_Control *heir
+ * )
+ * {
+ * }
+ */
+
+/* from gcc-2.7.0/config/sparc/sparc.h on register usage */
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ g0 is used for the condition code and not to represent %g0, which is
+ hardwired to 0, so reg 0 is *not* fixed.
+ On non-v9 systems:
+ g1 is free to use as temporary.
+ g2-g4 are reserved for applications. Gcc normally uses them as
+ temporaries, but this can be disabled via the -mno-app-regs option.
+ g5 through g7 are reserved for the operating system.
+ On v9 systems:
+ g1 and g5 are free to use as temporaries.
+ g2-g4 are reserved for applications (the compiler will not normally use
+ them, but they can be used as temporaries with -mapp-regs).
+ g6-g7 are reserved for the operating system.
+ ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must
+ currently be a fixed register until this pattern is rewritten.
+ Register 1 is also used when restoring call-preserved registers in large
+ stack frames. */
+
+
+ .align 4
+ PUBLIC(_CPU_Context_switch)
+SYM(_CPU_Context_switch):
+ ta 0x03 /* flush registers */
+
+ /* skip g0 */
+ st %g1,[%o0+G1_OFFSET] /* globals */
+ st %g2,[%o0+G2_OFFSET]
+ st %g3,[%o0+G3_OFFSET]
+ st %g4,[%o0+G4_OFFSET]
+ st %g5,[%o0+G5_OFFSET]
+ st %g6,[%o0+G6_OFFSET]
+ st %g7,[%o0+G7_OFFSET]
+
+ st %l0,[%o0+L0_OFFSET]
+ st %l1,[%o0+L1_OFFSET]
+ st %l2,[%o0+L2_OFFSET]
+ st %l3,[%o0+L3_OFFSET]
+ st %l4,[%o0+L4_OFFSET]
+ st %l5,[%o0+L5_OFFSET]
+ st %l6,[%o0+L6_OFFSET]
+ st %l7,[%o0+L7_OFFSET]
+
+ st %i0,[%o0+I0_OFFSET]
+ st %i1,[%o0+I1_OFFSET]
+ st %i2,[%o0+I2_OFFSET]
+ st %i3,[%o0+I3_OFFSET]
+ st %i4,[%o0+I4_OFFSET]
+ st %i5,[%o0+I5_OFFSET]
+ st %i6,[%o0+I6_OFFSET]
+ st %i7,[%o0+I7_OFFSET]
+
+ st %o0,[%o0+O0_OFFSET]
+ st %o1,[%o0+O1_OFFSET]
+ st %o2,[%o0+O2_OFFSET]
+ st %o3,[%o0+O3_OFFSET]
+ st %o4,[%o0+O4_OFFSET]
+ st %o5,[%o0+O5_OFFSET]
+ st %o6,[%o0+O6_OFFSET]
+ st %o7,[%o0+O7_OFFSET]
+
+ rd %psr,%o2
+ st %o2,[%o0+PSR_OFFSET] /* save status register */
+
+ /* enter here with o1 = context to restore */
+ /* o2 = psr */
+restore:
+
+ ld [%o1+PSR_OFFSET],%o0
+ and %o2,31,%o2 /* g1 = cwp */
+ and %o0,-32,%o0 /* o0 = psr w/o cwp */
+ or %o0,%o2,%o2 /* o2 = new psr */
+ wr %o2,0,%psr /* restore status register */
+
+ /* skip g0 */
+ ld [%o1+G1_OFFSET],%g1
+ ld [%o1+G2_OFFSET],%g2
+ ld [%o1+G3_OFFSET],%g3
+ ld [%o1+G4_OFFSET],%g4
+ ld [%o1+G5_OFFSET],%g5
+ ld [%o1+G6_OFFSET],%g6
+ ld [%o1+G7_OFFSET],%g7
+
+ ld [%o1+L0_OFFSET],%l0
+ ld [%o1+L1_OFFSET],%l1
+ ld [%o1+L2_OFFSET],%l2
+ ld [%o1+L3_OFFSET],%l3
+ ld [%o1+L4_OFFSET],%l4
+ ld [%o1+L5_OFFSET],%l5
+ ld [%o1+L6_OFFSET],%l6
+ ld [%o1+L7_OFFSET],%l7
+
+ ld [%o1+I0_OFFSET],%i0
+ ld [%o1+I1_OFFSET],%i1
+ ld [%o1+I2_OFFSET],%i2
+ ld [%o1+I3_OFFSET],%i3
+ ld [%o1+I4_OFFSET],%i4
+ ld [%o1+I5_OFFSET],%i5
+ ld [%o1+I6_OFFSET],%i6
+ ld [%o1+I7_OFFSET],%i7
+
+ ld [%o1+O0_OFFSET],%o0
+ /* do o1 last to avoid destroying heir context pointer */
+ ld [%o1+O2_OFFSET],%o2
+ ld [%o1+O3_OFFSET],%o3
+ ld [%o1+O4_OFFSET],%o4
+ ld [%o1+O5_OFFSET],%o5
+ ld [%o1+O6_OFFSET],%o6
+ ld [%o1+O7_OFFSET],%o7
+
+ ld [%o1+O1_OFFSET],%o1 /* overwrite heir pointer */
+
+ jmp %o7 + 8 /* return */
+ nop /* delay slot */
+
+
+/*
+ * _CPU_Context_restore
+ *
+ * This routine is generallu used only to restart self in an
+ * efficient manner. It may simply be a label in _CPU_Context_switch.
+ *
+ * NOTE: May be unnecessary to reload some registers.
+ *
+ * void _CPU_Context_restore(
+ * Context_Control *new_context
+ * )
+ * {
+ * }
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_restore)
+SYM(_CPU_Context_restore):
+ save %sp, -104, %sp /* save a stack frame */
+ ta 0x03 /* flush registers */
+ rd %psr,%o2
+ ba restore
+ mov %i0,%o1 /* in the delay slot */
+
+/* void _ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * void _ISR_Handler()
+ * {
+ * }
+ */
+
+ .align 4
+ PUBLIC(_ISR_Handler)
+SYM(_ISR_Handler):
+ ret
+
+ /*
+ * This discussion ignores a lot of the ugly details in a real
+ * implementation such as saving enough registers/state to be
+ * able to do something real. Keep in mind that the goal is
+ * to invoke a user's ISR handler which is written in C and
+ * uses a certain set of registers.
+ *
+ * Also note that the exact order is to a large extent flexible.
+ * Hardware will dictate a sequence for a certain subset of
+ * _ISR_Handler while requirements for setting
+ */
+
+ /*
+ * At entry to "common" _ISR_Handler, the vector number must be
+ * available. On some CPUs the hardware puts either the vector
+ * number or the offset into the vector table for this ISR in a
+ * known place. If the hardware does not give us this information,
+ * then the assembly portion of RTEMS for this port will contain
+ * a set of distinct interrupt entry points which somehow place
+ * the vector number in a known place (which is safe if another
+ * interrupt nests this one) and branches to _ISR_Handler.
+ *
+ * save some or all context on stack
+ * may need to save some special interrupt information for exit
+ *
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * if ( _ISR_Nest_level == 0 )
+ * switch to software interrupt stack
+ * #endif
+ *
+ * _ISR_Nest_level++;
+ *
+ * _Thread_Dispatch_disable_level++;
+ *
+ * (*_ISR_Vector_table[ vector ])( vector );
+ *
+ * --_ISR_Nest_level;
+ *
+ * if ( _ISR_Nest_level )
+ * goto the label "exit interrupt (simple case)"
+ *
+ * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+ * restore stack
+ * #endif
+ *
+ * if ( !_Context_Switch_necessary )
+ * goto the label "exit interrupt (simple case)"
+ *
+ * if ( !_ISR_Signals_to_thread_executing )
+ * goto the label "exit interrupt (simple case)"
+ *
+ * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+ *
+ * prepare to get out of interrupt
+ * return from interrupt (maybe to _ISR_Dispatch)
+ *
+ * LABEL "exit interrupt (simple case):
+ * prepare to get out of interrupt
+ * return from interrupt
+ */
diff --git a/c/src/exec/score/cpu/sparc/rtems.s b/c/src/exec/score/cpu/sparc/rtems.s
new file mode 100644
index 0000000000..2b9bd10454
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/rtems.s
@@ -0,0 +1,33 @@
+/* rtems.s
+ *
+ * This file contains the single entry point code for
+ * the SPARC port of RTEMS.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * RTEMS
+ *
+ * This routine jumps to the directive indicated in the
+ * CPU defined register. This routine is used when RTEMS is
+ * linked by itself and placed in ROM. This routine is the
+ * first address in the ROM space for RTEMS. The user "calls"
+ * this address with the directive arguments in the normal place.
+ * This routine then jumps indirectly to the correct directive
+ * preserving the arguments. The directive should not realize
+ * it has been "wrapped" in this way. The table "_Entry_points"
+ * is used to look up the directive.
+ *
+ * void RTEMS()
+ * {
+ * }
+ */
+
+ .align 4
+ PUBLIC(RTEMS)
+SYM(RTEMS):
+ ret
+
diff --git a/c/src/exec/score/cpu/sparc/sparc.h b/c/src/exec/score/cpu/sparc/sparc.h
new file mode 100644
index 0000000000..3c5f0574f5
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/sparc.h
@@ -0,0 +1,176 @@
+/* sparc.h
+ *
+ * This include file contains information pertaining to the Motorola
+ * SPARC processor family.
+ *
+ * $Id$
+ */
+
+#ifndef _INCLUDE_SPARC_h
+#define _INCLUDE_SPARC_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following define the CPU Family and Model within the family
+ *
+ * NOTE: The string "REPLACE_THIS_WITH_THE_CPU_MODEL" is replaced
+ * with the name of the appropriate macro for this target CPU.
+ */
+
+#ifdef sparc
+#undef sparc
+#endif
+#define sparc
+
+#ifdef REPLACE_THIS_WITH_THE_CPU_MODEL
+#undef REPLACE_THIS_WITH_THE_CPU_MODEL
+#endif
+#define REPLACE_THIS_WITH_THE_CPU_MODEL
+
+#ifdef REPLACE_THIS_WITH_THE_BSP
+#undef REPLACE_THIS_WITH_THE_BSP
+#endif
+#define REPLACE_THIS_WITH_THE_BSP
+
+/*
+ * This file contains the information required to build
+ * RTEMS for a particular member of the "sparc"
+ * family when executing in protected mode. It does
+ * this by setting variables to indicate which implementation
+ * dependent features are present in a particular member
+ * of the family.
+ *
+ * Currently recognized feature flags:
+ *
+ * + SPARC_HAS_FPU
+ * 0 - no HW FPU
+ * 1 - has HW FPU (assumed to be compatible w/90C602)
+ *
+ * + SPARC_HAS_BITSCAN
+ * 0 - does not have scan instructions
+ * 1 - has scan instruction (no support implemented)
+ *
+ */
+
+#if defined(erc32)
+
+#define CPU_MODEL_NAME "erc32"
+#define SPARC_HAS_FPU 1
+#define SPARC_HAS_BITSCAN 0
+
+#else
+
+#error "Unsupported CPU Model"
+
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "SPARC"
+
+/*
+ * Standard nop
+ */
+
+#define nop() \
+ do { \
+ asm volatile ( "nop" ); \
+ } while ( 0 )
+
+/*
+ * Some macros to aid in accessing special registers.
+ */
+
+#define sparc_get_psr( _psr ) \
+ do { \
+ (_psr) = 0; \
+ asm volatile( "rd %%psr, %0" : "=r" (_psr) : "0" (_psr) ); \
+ } while ( 0 )
+
+#define sparc_set_psr( _psr ) \
+ do { \
+ asm volatile ( "wr %%g0,%0,%%psr " : "=r" ((_psr)) : "0" ((_psr)) ); \
+ nop(); nop(); nop(); \
+ } while ( 0 )
+
+#define sparc_get_tbr( _tbr ) \
+ do { \
+ asm volatile( "rd %%tbr, %0" : "=r" (_tbr) : "0" (_tbr) ); \
+ } while ( 0 )
+
+#define sparc_set_tbr( _tbr ) \
+ do { \
+ } while ( 0 )
+
+#define sparc_get_wim( _wim ) \
+ do { \
+ asm volatile( "rd %%wim, %0" : "=r" (_wim) : "0" (_wim) ); \
+ } while ( 0 )
+
+#define sparc_set_wim( _wim ) \
+ do { \
+ } while ( 0 )
+
+/*
+ * Manipulate the interrupt level in the psr
+ *
+ */
+
+#define SPARC_PIL_MASK 0x00000F00
+
+#define sparc_disable_interrupts( _level ) \
+ do { register unsigned int _mask = SPARC_PIL_MASK; \
+ (_level) = 0; \
+ \
+ asm volatile ( "rd %%psr,%0 ; \
+ wr %0,%1,%%psr " \
+ : "=r" ((_level)), "=r" (_mask) \
+ : "0" ((_level)), "1" (_mask) \
+ ); \
+ nop(); nop(); nop(); \
+ } while ( 0 )
+
+#define sparc_enable_interrupts( _level ) \
+ do { unsigned int _tmp; \
+ sparc_get_psr( _tmp ); \
+ _tmp &= ~SPARC_PIL_MASK; \
+ _tmp |= (_level) & SPARC_PIL_MASK; \
+ sparc_set_psr( _tmp ); \
+ } while ( 0 )
+
+
+#define sparc_flash_interrupts( _level ) \
+ do { \
+ register unsigned32 _ignored = 0; \
+ sparc_enable_interrupts( (_level) ); \
+ sparc_disable_interrupts( _ignored ); \
+ } while ( 0 )
+
+#define sparc_set_interrupt_level( _new_level ) \
+ do { register unsigned32 _new_psr_level = 0; \
+ \
+ sparc_get_psr( _new_psr_level ); \
+ _new_psr_level &= ~SPARC_PIL_MASK; \
+ _new_psr_level |= (((_new_level) << 8) & SPARC_PIL_MASK); \
+ sparc_set_psr( _new_psr_level ); \
+ } while ( 0 )
+
+#define sparc_get_interrupt_level( _level ) \
+ do { \
+ register unsigned32 _psr_level = 0; \
+ \
+ sparc_get_psr( _psr_level ); \
+ (_level) = (_psr_level & SPARC_PIL_MASK) >> 8; \
+ } while ( 0 )
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! _INCLUDE_SPARC_h */
+/* end of include file */
diff --git a/c/src/exec/score/cpu/sparc/sparctypes.h b/c/src/exec/score/cpu/sparc/sparctypes.h
new file mode 100644
index 0000000000..d07501f483
--- /dev/null
+++ b/c/src/exec/score/cpu/sparc/sparctypes.h
@@ -0,0 +1,49 @@
+/* sparctypes.h
+ *
+ * This include file contains type definitions pertaining to the Intel
+ * SPARC processor family.
+ *
+ * $Id$
+ */
+
+#ifndef __SPARC_TYPES_h
+#define __SPARC_TYPES_h
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+typedef unsigned char unsigned8; /* unsigned 8-bit integer */
+typedef unsigned short unsigned16; /* unsigned 16-bit integer */
+typedef unsigned int unsigned32; /* unsigned 32-bit integer */
+typedef unsigned long long unsigned64; /* unsigned 64-bit integer */
+
+typedef unsigned16 Priority_Bit_map_control;
+
+typedef signed char signed8; /* 8-bit signed integer */
+typedef signed short signed16; /* 16-bit signed integer */
+typedef signed int signed32; /* 32-bit signed integer */
+typedef signed long long signed64; /* 64 bit signed integer */
+
+typedef unsigned32 boolean; /* Boolean value */
+
+typedef float single_precision; /* single precision float */
+typedef double double_precision; /* double precision float */
+
+typedef void sparc_isr;
+typedef void ( *sparc_isr_entry )( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif
+/* end of include file */
diff --git a/c/src/lib/libbsp/m68k/mvme136/clock/ckinit.c b/c/src/lib/libbsp/m68k/mvme136/clock/ckinit.c
index 195d4a11af..61069a8627 100644
--- a/c/src/lib/libbsp/m68k/mvme136/clock/ckinit.c
+++ b/c/src/lib/libbsp/m68k/mvme136/clock/ckinit.c
@@ -179,4 +179,3 @@ rtems_device_driver Clock_control(
done:
return RTEMS_SUCCESSFUL;
}
-
diff --git a/c/src/lib/libbsp/m68k/mvme136/timer/timer.c b/c/src/lib/libbsp/m68k/mvme136/timer/timer.c
index 8c3ecd45f2..23cbc46ffd 100644
--- a/c/src/lib/libbsp/m68k/mvme136/timer/timer.c
+++ b/c/src/lib/libbsp/m68k/mvme136/timer/timer.c
@@ -25,7 +25,6 @@
*/
-#include <rtems.h>
#include <bsp.h>
#include <z8036.h>