summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>1998-12-14 23:15:38 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>1998-12-14 23:15:38 +0000
commit01629105c2817a59a4f1f05039593f211cf5ddaa (patch)
tree76f6bb8f9ca6ddbd015e3b81964a8dacffaf5cf9 /cpukit
parentPatch from Ralf Corsepius <corsepiu@faw.uni-ulm.de> to rename all (diff)
downloadrtems-01629105c2817a59a4f1f05039593f211cf5ddaa.tar.bz2
Patch from Ralf Corsepius <corsepiu@faw.uni-ulm.de> to rename all
.s files to .S in conformance with GNU conventions. This is a minor step along the way to supporting automake.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/score/cpu/a29k/cpu_asm.S491
-rw-r--r--cpukit/score/cpu/a29k/sig.S197
-rw-r--r--cpukit/score/cpu/hppa1.1/cpu_asm.S778
-rw-r--r--cpukit/score/cpu/i386/cpu_asm.S282
-rw-r--r--cpukit/score/cpu/i960/cpu_asm.S199
-rw-r--r--cpukit/score/cpu/m68k/cpu_asm.S291
-rw-r--r--cpukit/score/cpu/sparc/cpu_asm.S726
7 files changed, 2964 insertions, 0 deletions
diff --git a/cpukit/score/cpu/a29k/cpu_asm.S b/cpukit/score/cpu/a29k/cpu_asm.S
new file mode 100644
index 0000000000..a3ed2c59c3
--- /dev/null
+++ b/cpukit/score/cpu/a29k/cpu_asm.S
@@ -0,0 +1,491 @@
+;/* cpu_asm.c ===> cpu_asm.S or cpu_asm.s
+; *
+; * Author: Craig Lebakken <craigl@transition.com>
+; *
+; * COPYRIGHT (c) 1996 by Transition Networks Inc.
+; *
+; * To anyone who acknowledges that this file is provided "AS IS"
+; * without any express or implied warranty:
+; * permission to use, copy, modify, and distribute this file
+; * for any purpose is hereby granted without fee, provided that
+; * the above copyright notice and this notice appears in all
+; * copies, and that the name of Transition Networks not be used in
+; * advertising or publicity pertaining to distribution of the
+; * software without specific, written prior permission.
+; * Transition Networks makes no representations about the suitability
+; * of this software for any purpose.
+; *
+; *
+; * This file contains the basic algorithms for all assembly code used
+; * in an specific CPU port of RTEMS. These algorithms must be implemented
+; * in assembly language
+; *
+; * NOTE: This is supposed to be a .S or .s file NOT a C file.
+; *
+; * COPYRIGHT (c) 1989-1998.
+; * On-Line Applications Research Corporation (OAR).
+; * Copyright assigned to U.S. Government, 1994.
+; *
+; * The license and distribution terms for this file may be
+; * found in the file LICENSE in this distribution or at
+; * http://www.OARcorp.com/rtems/license.html.
+; *
+; * $Id$
+; */
+
+;/*
+; * This is supposed to be an assembly file. This means that system.h
+; * and cpu.h should not be included in a "real" cpu_asm file. An
+; * implementation in assembly should include "cpu_asm.h>
+; */
+
+;#include <cpu_asm.h>
+ .include "register.ah"
+ .include "amd.ah"
+ .include "pswmacro.ah"
+; .extern _bsp_exit
+;
+; push a register onto the struct
+ .macro spush, sp, reg
+ store 0, 0, reg, sp ; push register
+ add sp, sp, 4 ; adjust stack pointer
+ .endm
+; push a register onto the struct
+ .macro spushsr, sp, reg, sr
+ mfsr reg, sr
+ store 0, 0, reg, sp ; push register
+ add sp, sp, 4 ; adjust stack pointer
+ .endm
+; pop a register from the struct
+ .macro spop, reg, sp
+ load 0, 0, reg, sp
+ add sp,sp,4
+ .endm
+; pop a special register from the struct
+ .macro spopsr, sreg, reg, sp
+ load 0, 0, reg, sp
+ mtsr sreg, reg
+ add sp,sp,4
+ .endm
+;
+;/*
+; * _CPU_Context_save_fp_context
+; *
+; * This routine is responsible for saving the FP context
+; * at *fp_context_ptr. If the point to load the FP context
+; * from is changed then the pointer is modified by this routine.
+; *
+; * Sometimes a macro implementation of this is in cpu.h which dereferences
+; * the ** and a similarly named routine in this file is passed something
+; * like a (Context_Control_fp *). The general rule on making this decision
+; * is to avoid writing assembly language.
+; */
+
+;#if 0
+;void _CPU_Context_save_fp(
+; void **fp_context_ptr
+;)
+;{
+;}
+;#endif
+ .global _CPU_Context_save_fp
+_CPU_Context_save_fp:
+ jmpi lr0
+ nop
+
+;/*
+; * _CPU_Context_restore_fp_context
+; *
+; * This routine is responsible for restoring the FP context
+; * at *fp_context_ptr. If the point to load the FP context
+; * from is changed then the pointer is modified by this routine.
+; *
+; * Sometimes a macro implementation of this is in cpu.h which dereferences
+; * the ** and a similarly named routine in this file is passed something
+; * like a (Context_Control_fp *). The general rule on making this decision
+; * is to avoid writing assembly language.
+; */
+
+;#if 0
+;void _CPU_Context_restore_fp(
+; void **fp_context_ptr
+;)
+;{
+;}
+;#endif
+ .global __CPU_Context_restore_fp
+__CPU_Context_restore_fp:
+ jmpi lr0
+ nop
+
+;/* _CPU_Context_switch
+; *
+; * This routine performs a normal non-FP context switch.
+; */
+;#if 0
+;void _CPU_Context_switch(
+; Context_Control *run,
+; Context_Control *heir
+;)
+;{
+;}
+;#endif
+ .global __CPU_Context_switch
+__CPU_Context_switch:
+ asneq 106, gr1, gr1 ; syscall
+ jmpi lr0 ;
+ nop ;
+
+
+
+ .global _a29k_context_switch_sup
+_a29k_context_switch_sup:
+ add pcb,lr2,0
+ add kt1,lr3,0 ;move heir pointer to safe location
+ constn it0,SIG_SYNC
+ spush pcb,it0
+ spush pcb,gr1
+ spush pcb,rab ;push rab
+ spushsr pcb,it0,pc0 ;push specials
+ spushsr pcb,it0,pc1
+ add pcb,pcb,1*4 ;space pc2
+ spushsr pcb,it0,CHA ;push CHA
+ spushsr pcb,it0,CHD ;push CHD
+ spushsr pcb,it0,CHC ;push CHC
+ add pcb,pcb,1*4 ;space for alu
+ spushsr pcb,it0,ops ;push OPS
+ mfsr kt0,cps ;current status
+ const it1,FZ ;FZ constant
+ andn it1,kt0,it1 ;clear FZ bit
+ mtsr cps,it1 ;cps without FZ
+ add pcb,pcb,1*4 ;space for tav
+ mtsrim chc,0 ;possible DERR
+;
+ spush pcb,lr1 ;push R-stack
+ spush pcb,rfb ; support
+ spush pcb,msp ;push M-stack pnt.
+;
+ add pcb,pcb,3*4 ;space for floating point
+; spush pcb,FPStat0 ;floating point
+; spush pcb,FPStat1
+; spush pcb,FPStat2
+;
+ add pcb,pcb,4*4 ;space for IPA..Q
+;
+ mtsrim cr,29-1
+ storem 0,0,gr96,pcb ;push gr96-124, optional
+ add pcb,pcb,29*4 ;space for gr96-124
+;
+ sub it0,rfb,gr1 ;get bytes in cache
+ srl it0,it0,2 ;adjust to words
+ sub it0,it0,1
+ spush pcb,it0
+ mtsr cr,it0
+ storem 0,0,lr0,pcb ;save lr0-rfb
+;
+context_restore:
+ add pcb,kt1,0 ;pcb=heir
+ add pcb,pcb,4 ;space for signal num
+ spop gr1,pcb ;restore freeze registers
+ add gr1,gr1,0 ;alu op
+ add pcb,pcb,9*4 ;move past freeze registers
+ add pcb,pcb,1*4 ;space for tav
+ spop lr1,pcb
+ spop rfb,pcb
+ spop msp,pcb
+; spop FPStat0,pcb
+; spop FPStat1,pcb
+; spop FPStat2,pcb
+ add pcb,pcb,3*4 ;space for floating point
+ add pcb,pcb,4*4 ;space for IPA..Q
+ mtsrim cr,29-1
+ loadm 0,0,gr96,pcb ;pop gr96-gr124
+ add pcb,pcb,29*4 ;space for gr96-124
+
+ spop it1,pcb ;pop locals count
+ mtsr cr,it1
+ loadm 0,0,lr0,pcb ;load locals
+
+ add pcb,kt1,0 ;pcb=heir
+ mtsr cps,kt0 ;cps with FZ
+ nop
+ add pcb,pcb,4 ;space for signal num
+ spop gr1,pcb ;restore freeze registers
+ add gr1,gr1,0 ;alu op
+ spop rab,pcb
+ spopsr pc0,it1,pcb
+ spopsr pc1,it1,pcb
+ add pcb,pcb,4 ;space for pc2
+ spopsr CHA,it1,pcb
+ spopsr CHD,it1,pcb
+ spopsr CHC,it1,pcb
+ add pcb,pcb,4 ;space for alu
+ spopsr ops,it1,pcb
+ nop
+ iret
+
+
+;/*
+; * _CPU_Context_restore
+; *
+; * This routine is generally used only to restart self in an
+; * efficient manner. It may simply be a label in _CPU_Context_switch.
+; *
+; * NOTE: May be unnecessary to reload some registers.
+; */
+;#if 0
+;void _CPU_Context_restore(
+; Context_Control *new_context
+;)
+;{
+;}
+;#endif
+
+ .global __CPU_Context_restore
+__CPU_Context_restore:
+ asneq 107, gr1, gr1 ; syscall
+ jmpi lr0 ;
+ nop ;
+
+ .global _a29k_context_restore_sup
+_a29k_context_restore_sup:
+ add kt1,lr2,0 ;kt1 = restore context
+ mfsr kt0,cps ;current status
+ const it1,FZ ;FZ constant
+ andn it1,kt0,it1 ;clear FZ bit
+ mtsr cps,it1 ;cps without FZ
+ jmp context_restore
+ nop
+
+ .global _a29k_context_save_sup
+_a29k_context_save_sup:
+ add pcb,lr2,0
+ constn it0,SIG_SYNC
+ spush pcb,it0
+ spush pcb,gr1
+ spush pcb,rab ;push rab
+ spushsr pcb,it0,pc0 ;push specials
+ spushsr pcb,it0,pc1
+ add pcb,pcb,1*4 ;space pc2
+ spushsr pcb,it0,CHA ;push CHA
+ spushsr pcb,it0,CHD ;push CHD
+ spushsr pcb,it0,CHC ;push CHC
+ add pcb,pcb,1*4 ;space for alu
+ spushsr pcb,it0,ops ;push OPS
+ mfsr it0,cps ;current status
+SaveFZState it1,it2
+ add pcb,pcb,1*4 ;space for tav
+ mtsrim chc,0 ;possible DERR
+;
+ spush pcb,lr1 ;push R-stack
+ spush pcb,rfb ; support
+ spush pcb,msp ;push M-stack pnt.
+;
+ spush pcb,FPStat0 ;floating point
+ spush pcb,FPStat1
+ spush pcb,FPStat2
+;
+ add pcb,pcb,4*4 ;space for IPA..Q
+;
+ mtsrim cr,29-1
+ storem 0,0,gr96,pcb ;push gr96-124, optional
+ add pcb,pcb,29*4 ;space for gr96-124
+;
+ sub kt0,rfb,gr1 ;get bytes in cache
+ srl kt0,kt0,2 ;adjust to words
+ sub kt0,kt0,1
+ spush pcb,kt0 ;push number of words
+ mtsr cr,kt0
+ storem 0,0,lr0,pcb ;save lr0-rfb
+;
+ mtsr cps,it0 ;cps with FZ
+RestoreFZState it1,it2
+
+ nop
+ nop
+ nop
+;
+ iret
+;
+
+ .global __CPU_Context_save
+__CPU_Context_save:
+ asneq 108, gr1, gr1 ; syscall
+ jmpi lr0 ;
+ nop ;
+
+
+;/* void __ISR_Handler()
+; *
+; * This routine provides the RTEMS interrupt management.
+; *
+; */
+
+;#if 0
+;void _ISR_Handler()
+;{
+; /*
+; * This discussion ignores a lot of the ugly details in a real
+; * implementation such as saving enough registers/state to be
+; * able to do something real. Keep in mind that the goal is
+; * to invoke a user's ISR handler which is written in C and
+; * uses a certain set of registers.
+; *
+; * Also note that the exact order is to a large extent flexible.
+; * Hardware will dictate a sequence for a certain subset of
+; * _ISR_Handler while requirements for setting
+; */
+
+; /*
+; * At entry to "common" _ISR_Handler, the vector number must be
+; * available. On some CPUs the hardware puts either the vector
+; * number or the offset into the vector table for this ISR in a
+; * known place. If the hardware does not give us this information,
+; * then the assembly portion of RTEMS for this port will contain
+; * a set of distinct interrupt entry points which somehow place
+; * the vector number in a known place (which is safe if another
+; * interrupt nests this one) and branches to _ISR_Handler.
+; *
+; * save some or all context on stack
+; * may need to save some special interrupt information for exit
+; *
+; * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+; * if ( _ISR_Nest_level == 0 )
+; * switch to software interrupt stack
+; * #endif
+; *
+; * _ISR_Nest_level++;
+; *
+; * _Thread_Dispatch_disable_level++;
+; *
+; * (*_ISR_Vector_table[ vector ])( vector );
+; *
+; * --_ISR_Nest_level;
+; *
+; * if ( _ISR_Nest_level )
+; * goto the label "exit interrupt (simple case)"
+; *
+; * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
+; * restore stack
+; * #endif
+; *
+; * if ( !_Context_Switch_necessary )
+; * goto the label "exit interrupt (simple case)"
+; *
+; * if ( !_ISR_Signals_to_thread_executing )
+; * goto the label "exit interrupt (simple case)"
+; *
+; * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
+; *
+; * prepare to get out of interrupt
+; * return from interrupt (maybe to _ISR_Dispatch)
+; *
+; * LABEL "exit interrupt (simple case):
+; * prepare to get out of interrupt
+; * return from interrupt
+; */
+;}
+;#endif
+; .global __ISR_Handler
+;__ISR_Handler:
+; jmpi lr0
+; nop
+
+ .global _a29k_getops
+_a29k_getops:
+ asneq 113, gr96, gr96
+ jmpi lr0
+ nop
+
+ .global _a29k_getops_sup
+_a29k_getops_sup:
+ mfsr gr96, ops ; caller wants ops
+ iret
+ nop
+
+ .global _a29k_disable
+_a29k_disable:
+ asneq 110, gr96, gr96
+ jmpi lr0
+ nop
+
+ .global _a29k_disable_sup
+_a29k_disable_sup:
+ mfsr kt0, ops
+ add gr96, kt0, 0 ; return ops to caller
+ const kt1, (DI | TD)
+ consth kt1, (DI | TD)
+ or kt1, kt0, kt1
+ mtsr ops, kt1
+ iret
+ nop
+
+ .global _a29k_disable_all
+_a29k_disable_all:
+ asneq 112, gr96, gr96
+ jmpi lr0
+ nop
+
+ .global _a29k_disable_all_sup
+_a29k_disable_all_sup:
+ mfsr kt0, ops
+ const kt1, (DI | TD)
+ consth kt1, (DI | TD)
+ or kt1, kt0, kt1
+ mtsr ops, kt1
+ iret
+ nop
+
+ .global _a29k_enable_all
+_a29k_enable_all:
+ asneq 111, gr96, gr96
+ jmpi lr0
+ nop
+
+ .global _a29k_enable_all_sup
+_a29k_enable_all_sup:
+ mfsr kt0, ops
+ const kt1, (DI | TD)
+ consth kt1, (DI | TD)
+ andn kt1, kt0, kt1
+ mtsr ops, kt1
+ iret
+ nop
+
+ .global _a29k_enable
+_a29k_enable:
+ asneq 109, gr96, gr96
+ jmpi lr0
+ nop
+
+ .global _a29k_enable_sup
+_a29k_enable_sup:
+ mfsr kt0, ops
+ const kt1, (DI | TD)
+ consth kt1, (DI | TD)
+ and kt3, lr2, kt1
+ andn kt0, kt0, kt1
+ or kt1, kt0, kt3
+ mtsr ops, kt1
+ iret
+ nop
+
+ .global _a29k_halt
+_a29k_halt:
+ halt
+ jmp _a29k_halt
+ nop
+
+ .global _a29k_super_mode
+_a29k_super_mode:
+ mfsr gr96, ops
+ or gr96, gr96, 0x10
+ mtsr ops, gr96
+ iret
+ nop
+
+ .global _a29k_as70
+_a29k_as70:
+ asneq 70,gr96,gr96
+ jmpi lr0
+ nop
diff --git a/cpukit/score/cpu/a29k/sig.S b/cpukit/score/cpu/a29k/sig.S
new file mode 100644
index 0000000000..245570ffcb
--- /dev/null
+++ b/cpukit/score/cpu/a29k/sig.S
@@ -0,0 +1,197 @@
+;/*
+; * $Id$
+; */
+
+; .include "register.ah"
+ .include "amd.ah"
+ .include "pswmacro.ah"
+ .comm WindowSize,4
+ .text
+ .reg it0,gr64
+ .reg it1,gr65
+ .reg it2,gr66
+ .reg it3,gr67
+ .reg v0,gr96
+ .reg v1,gr97
+ .reg v2,gr98
+ .reg v3,gr99
+ .reg trapreg,it0
+ .reg FPStat0,gr79
+ .reg FPStat1,gr79
+ .reg FPStat2,gr79
+ .reg FPStat3,gr79
+
+ .global _intr14
+_intr14:
+ const it3,14
+ sup_sv
+ jmp interrupt
+ nop
+
+ .global _intr18
+_intr18:
+ const it3,18
+ sup_sv
+ jmp interrupt
+ nop
+
+ .global _intr19
+_intr19:
+ const it3,19
+ sup_sv
+ jmp interrupt
+ nop
+
+interrupt:
+ push msp,it3
+ push msp,gr1
+ push msp,rab
+ const it0,512
+ sub rab,rfb,it0 ;set rab = rfb-512
+ pushsr msp,it0,pc0
+ pushsr msp,it0,pc1
+ pushsr msp,it0,pc2
+ pushsr msp,it0,cha
+ pushsr msp,it0,chd
+ pushsr msp,it0,chc
+ pushsr msp,it0,alu
+ pushsr msp,it0,ops
+ push msp,tav
+;
+;now come off freeze, and go to user-mode code.
+;ensure load/store does not restart
+;
+ mtsrim chc,0
+
+ mfsr it0, cps
+ const it1, FZ
+ consth it1, FZ
+ andn it0, it0, it1
+ const it1,(DI|TD)
+ consth it1,(DI|TD)
+ or it0,it1,it0
+ mtsr cps, it0
+; fall through to _sigcode
+
+ .extern _a29k_ISR_Handler
+ .global _sigcode
+_sigcode:
+
+ push msp, lr1 ; R stack support
+ push msp, rfb ; support
+ push msp, msp ; M stack support
+
+; push msp, FPStat0 ; Floating point 0
+; push msp, FPStat1 ; Floating point 1
+; push msp, FPStat2 ; Floating point 2
+; push msp, FPStat3 ; Floating point 3
+ sub msp,msp,4*4
+
+ pushsr msp, tav, IPA ; save user mode special
+ pushsr msp, tav, IPB ; save user mode special
+ pushsr msp, tav, IPC ; save user mode special
+ pushsr msp, tav, Q ; save user mode special
+
+ sub msp, msp, 29*4 ; gr96-gr124
+ mtsrim cr, 29-1 ;
+ storem 0, 0, gr96, msp ;
+
+
+ const v0, WindowSize ; Window Size value
+ consth v0, WindowSize ; Window Size value
+ load 0, 0, v0, v0 ; load Window size
+ add v2, msp, SIGCTX_RAB ; intr RAB value
+
+ load 0, 0, v2, v2 ; rab value
+
+ sub v1, rfb, v2 ;
+ cpgeu v1, v1, v0 ;
+ jmpt v1, nfill ; jmp if spill
+ add v1, gr1, 8 ;
+
+ cpgtu v1, v1, rfb ; longjump test
+ jmpt v1, nfill ;
+ nop ;
+
+ifill:
+ add v0, msp, SIGCTX_RAB+4 ;
+ push v0, rab ;
+ const v2, fill+4 ;
+ consth v2, fill+4 ;
+
+ push v0, v2 ; resave PC0
+ sub v2, v2, 4 ;
+ push v0, v2 ; resave PC1
+ const v2, 0 ;
+
+ sub v0, v0, 3*4 ;
+ push v0, v2 ;
+
+nfill:
+ cpgtu v0, gr1, rfb ; if gr1>rfb -> gr1=rfb
+ jmpt v0, lower ;
+ cpltu v0, gr1, rab ;
+ jmpt v0, raise ; gr1<rab then gr1=rab
+ nop ;
+
+sendsig:
+ sub gr1, gr1, RALLOC ;
+ asgeu V_SPILL, gr1, rab ;
+ add lr1, rfb, 0 ;
+ add v1, msp, SIGCTX_SIG ;
+
+cont:
+ add lr2,it3,0 ; signal #
+ call lr0, _a29k_ISR_Handler ; call the handler
+ nop
+
+ nop ; WASTE
+ jmp _a29k_sigdfl ; return code
+ nop ; WASTE
+ nop ; ALIGN
+
+lower:
+ jmp sendsig ;
+ add gr1, rfb, 0 ;
+raise:
+ jmp sendsig ;
+ add gr1, rab, 0 ;
+
+
+ .global _a29k_sigdfl_sup
+_a29k_sigdfl_sup:
+ repair_R_stack ;
+ repair_regs ;
+ sig_return ; return
+ halt ; never executes
+
+
+ .global _sigret
+_sigret:
+;assume msp points to tav
+ mfsr it0,cps
+ const it1,FZ
+ or it1,it0,it1
+ mtsr cps,it1
+ nop
+ nop
+_sigret1:
+ pop tav,msp
+ popsr ops,it0,msp
+ popsr alu,it0,msp
+ popsr chc,it0,msp
+ popsr chd,it0,msp
+ popsr cha,it0,msp
+ popsr pc2,it0,msp
+ popsr pc1,it0,msp
+ popsr pc0,it0,msp
+ pop rab,msp
+ pop it0,msp
+ add gr1,it0,0
+ add msp,msp,4 ;discount signal
+ iret
+
+_a29k_sigdfl:
+ asneq SIGDFL,gr1,gr1
+ jmpi lr0
+ nop
diff --git a/cpukit/score/cpu/hppa1.1/cpu_asm.S b/cpukit/score/cpu/hppa1.1/cpu_asm.S
new file mode 100644
index 0000000000..e6d9fd08d8
--- /dev/null
+++ b/cpukit/score/cpu/hppa1.1/cpu_asm.S
@@ -0,0 +1,778 @@
+/*
+ * TODO:
+ * Context_switch needs to only save callee save registers
+ * I think this means can skip: r1, r2, r19-29, r31
+ * Ref: p 3-2 of Procedure Calling Conventions Manual
+ * This should be #ifndef DEBUG so that debugger has
+ * accurate visibility into all registers
+ *
+ * This file contains the assembly code for the HPPA implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1994,95 by Division Incorporated
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <rtems/score/hppa.h>
+#include <rtems/score/cpu_asm.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/offsets.h>
+
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+/*
+ * Special register usage for context switch and interrupts
+ * Stay away from %cr28 which is used for TLB misses on 72000
+ */
+
+isr_arg0 .reg %cr24
+isr_r9 .reg %cr25
+isr_r8 .reg %cr26
+
+/*
+ * Interrupt stack frame looks like this
+ *
+ * offset item
+ * -----------------------------------------------------------------
+ * INTEGER_CONTEXT_OFFSET Context_Control
+ * FP_CONTEXT_OFFSET Context_Control_fp
+ *
+ * It is padded out to a multiple of 64
+ */
+
+
+/*PAGE^L
+ * void _Generic_ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * We jump here from the interrupt vector.
+ * The HPPA hardware has done some stuff for us:
+ * PSW saved in IPSW
+ * PSW set to 0
+ * PSW[E] set to default (0)
+ * PSW[M] set to 1 iff this is HPMC
+ *
+ * IIA queue is frozen (since PSW[Q] is now 0)
+ * privilege level promoted to 0
+ * IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap
+ * registers GR 1,8,9,16,17,24,25 copied to shadow regs
+ * SHR 0 1 2 3 4 5 6
+ *
+ * Our vector stub (in the BSP) MUST have done the following:
+ *
+ * a) Saved the original %r9 into %isr_r9 (%cr25)
+ * b) Placed the vector number in %r9
+ * c) Was allowed to also destroy $isr_r8 (%cr26),
+ * but the stub was NOT allowed to destroy any other registers.
+ *
+ * The typical stub sequence (in the BSP) should look like this:
+ *
+ * a) mtctl %r9,isr_r9 ; (save r9 in cr25)
+ * b) ldi vector,%r9 ; (load constant vector number in r9)
+ * c) mtctl %r8,isr_r8 ; (save r8 in cr26)
+ * d) ldil L%MY_BSP_first_level_interrupt_handler,%r8
+ * e) ldo R%MY_BSP_first_level_interrupt_handler(%r8),%r8
+ * ; (point to BSP raw handler table)
+ * f) ldwx,s %r9(%r8),%r8 ; (load value from raw handler table)
+ * g) bv 0(%r8) ; (call raw handler: _Generic_ISR_Handler)
+ * h) mfctl isr_r8,%r8 ; (restore r8 from cr26 in delay slot)
+ *
+ * Optionally, steps (c) thru (h) _could_ be replaced with a single
+ * bl,n _Generic_ISR_Handler,%r0
+ *
+ *
+ */
+ .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0
+_Generic_ISR_Handler:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+ mtctl arg0, isr_arg0
+
+/*
+ * save interrupt state
+ */
+ mfctl ipsw, arg0
+ stw arg0, IPSW_OFFSET(sp)
+
+ mfctl iir, arg0
+ stw arg0, IIR_OFFSET(sp)
+
+ mfctl ior, arg0
+ stw arg0, IOR_OFFSET(sp)
+
+ mfctl pcoq, arg0
+ stw arg0, PCOQFRONT_OFFSET(sp)
+
+ mtctl %r0, pcoq
+ mfctl pcoq, arg0
+ stw arg0, PCOQBACK_OFFSET(sp)
+
+ mfctl %sar, arg0
+ stw arg0, SAR_OFFSET(sp)
+
+/*
+ * Build an interrupt frame to hold the contexts we will need.
+ * We have already saved the interrupt items on the stack
+ *
+ * At this point the following registers are damaged wrt the interrupt
+ * reg current value saved value
+ * ------------------------------------------------
+ * arg0 scratch isr_arg0 (cr24)
+ * r9 vector number isr_r9 (cr25)
+ *
+ * Point to beginning of integer context and
+ * save the integer context
+ */
+ stw %r1,R1_OFFSET(sp)
+ stw %r2,R2_OFFSET(sp)
+ stw %r3,R3_OFFSET(sp)
+ stw %r4,R4_OFFSET(sp)
+ stw %r5,R5_OFFSET(sp)
+ stw %r6,R6_OFFSET(sp)
+ stw %r7,R7_OFFSET(sp)
+ stw %r8,R8_OFFSET(sp)
+/*
+ * skip r9
+ */
+ stw %r10,R10_OFFSET(sp)
+ stw %r11,R11_OFFSET(sp)
+ stw %r12,R12_OFFSET(sp)
+ stw %r13,R13_OFFSET(sp)
+ stw %r14,R14_OFFSET(sp)
+ stw %r15,R15_OFFSET(sp)
+ stw %r16,R16_OFFSET(sp)
+ stw %r17,R17_OFFSET(sp)
+ stw %r18,R18_OFFSET(sp)
+ stw %r19,R19_OFFSET(sp)
+ stw %r20,R20_OFFSET(sp)
+ stw %r21,R21_OFFSET(sp)
+ stw %r22,R22_OFFSET(sp)
+ stw %r23,R23_OFFSET(sp)
+ stw %r24,R24_OFFSET(sp)
+ stw %r25,R25_OFFSET(sp)
+/*
+ * skip arg0
+ */
+ stw %r27,R27_OFFSET(sp)
+ stw %r28,R28_OFFSET(sp)
+ stw %r29,R29_OFFSET(sp)
+ stw %r30,R30_OFFSET(sp)
+ stw %r31,R31_OFFSET(sp)
+
+/* Now most registers are available since they have been saved
+ *
+ * The following items are currently wrong in the integer context
+ * reg current value saved value
+ * ------------------------------------------------
+ * arg0 scratch isr_arg0 (cr24)
+ * r9 vector number isr_r9 (cr25)
+ *
+ * Fix them
+ */
+
+ mfctl isr_arg0,%r3
+ stw %r3,ARG0_OFFSET(sp)
+
+ mfctl isr_r9,%r3
+ stw %r3,R9_OFFSET(sp)
+
+/*
+ * At this point we are done with isr_arg0, and isr_r9 control registers
+ *
+ * Prepare to re-enter virtual mode
+ * We need Q in case the interrupt handler enables interrupts
+ */
+
+ ldil L%CPU_PSW_DEFAULT, arg0
+ ldo R%CPU_PSW_DEFAULT(arg0), arg0
+ mtctl arg0, ipsw
+
+/*
+ * Now jump to "rest_of_isr_handler" with the rfi
+ * We are assuming the space queues are all correct already
+ */
+
+ ldil L%rest_of_isr_handler, arg0
+ ldo R%rest_of_isr_handler(arg0), arg0
+ mtctl arg0, pcoq
+ ldo 4(arg0), arg0
+ mtctl arg0, pcoq
+
+ rfi
+ nop
+
+/*
+ * At this point we are back in virtual mode and all our
+ * normal addressing is once again ok.
+ *
+ * It is now ok to take an exception or trap
+ */
+
+rest_of_isr_handler:
+
+/*
+ * Point to beginning of float context and
+ * save the floating point context -- doing whatever patches are necessary
+ */
+
+ .call ARGW0=GR
+ bl _CPU_Save_float_context,%r2
+ ldo FP_CONTEXT_OFFSET(sp),arg0
+
+/*
+ * save the ptr to interrupt frame as an argument for the interrupt handler
+ */
+
+ copy sp, arg1
+
+/*
+ * Advance the frame to point beyond all interrupt contexts (integer & float)
+ * this also includes the pad to align to 64byte stack boundary
+ */
+ ldo CPU_INTERRUPT_FRAME_SIZE(sp), sp
+
+/*
+ * r3 -- &_ISR_Nest_level
+ * r5 -- value _ISR_Nest_level
+ * r4 -- &_Thread_Dispatch_disable_level
+ * r6 -- value _Thread_Dispatch_disable_level
+ * r9 -- vector number
+ */
+
+ .import _ISR_Nest_level,data
+ ldil L%_ISR_Nest_level,%r3
+ ldo R%_ISR_Nest_level(%r3),%r3
+ ldw 0(%r3),%r5
+
+ .import _Thread_Dispatch_disable_level,data
+ ldil L%_Thread_Dispatch_disable_level,%r4
+ ldo R%_Thread_Dispatch_disable_level(%r4),%r4
+ ldw 0(%r4),%r6
+
+/*
+ * increment interrupt nest level counter. If outermost interrupt
+ * switch the stack and squirrel away the previous sp.
+ */
+ addi 1,%r5,%r5
+ stw %r5, 0(%r3)
+
+/*
+ * compute and save new stack (with frame)
+ * just in case we are nested -- simpler this way
+ */
+ comibf,= 1,%r5,stack_done
+ ldo 128(sp),%r7
+
+/*
+ * Switch to interrupt stack allocated by the interrupt manager (intr.c)
+ */
+ .import _CPU_Interrupt_stack_low,data
+ ldil L%_CPU_Interrupt_stack_low,%r7
+ ldw R%_CPU_Interrupt_stack_low(%r7),%r7
+ ldo 128(%r7),%r7
+
+stack_done:
+/*
+ * save our current stack pointer where the "old sp" is supposed to be
+ */
+ stw sp, -4(%r7)
+/*
+ * and switch stacks (or advance old stack in nested case)
+ */
+ copy %r7, sp
+
+/*
+ * increment the dispatch disable level counter.
+ */
+ addi 1,%r6,%r6
+ stw %r6, 0(%r4)
+
+/*
+ * load address of user handler
+ * Note: No error checking is done, it is assumed that the
+ * vector table contains a valid address or a stub
+ * spurious handler.
+ */
+ .import _ISR_Vector_table,data
+ ldil L%_ISR_Vector_table,%r8
+ ldo R%_ISR_Vector_table(%r8),%r8
+ ldwx,s %r9(%r8),%r8
+
+/*
+ * invoke user interrupt handler
+ * Interrupts are currently disabled, as per RTEMS convention
+ * The handler has the option of re-enabling interrupts
+ * NOTE: can not use 'bl' since it uses "pc-relative" addressing
+ * and we are using a hard coded address from a table
+ * So... we fudge r2 ourselves (ala dynacall)
+ * arg0 = vector number, arg1 = ptr to rtems_interrupt_frame
+ */
+ copy %r9, %r26
+ .call ARGW0=GR, ARGW1=GR
+ blr %r0, rp
+ bv,n 0(%r8)
+
+post_user_interrupt_handler:
+
+/*
+ * Back from user handler(s)
+ * Disable external interrupts (since the interrupt handler could
+ * have turned them on) and return to the interrupted task stack (assuming
+ * (_ISR_Nest_level == 0)
+ */
+
+ rsm HPPA_PSW_I + HPPA_PSW_R, %r0
+ ldw -4(sp), sp
+
+/*
+ * r3 -- (most of) &_ISR_Nest_level
+ * r5 -- value _ISR_Nest_level
+ * r4 -- (most of) &_Thread_Dispatch_disable_level
+ * r6 -- value _Thread_Dispatch_disable_level
+ * r7 -- (most of) &_ISR_Signals_to_thread_executing
+ * r8 -- value _ISR_Signals_to_thread_executing
+ */
+
+ .import _ISR_Nest_level,data
+ ldil L%_ISR_Nest_level,%r3
+ ldw R%_ISR_Nest_level(%r3),%r5
+
+ .import _Thread_Dispatch_disable_level,data
+ ldil L%_Thread_Dispatch_disable_level,%r4
+ ldw R%_Thread_Dispatch_disable_level(%r4),%r6
+
+ .import _ISR_Signals_to_thread_executing,data
+ ldil L%_ISR_Signals_to_thread_executing,%r7
+
+/*
+ * decrement isr nest level
+ */
+ addi -1, %r5, %r5
+ stw %r5, R%_ISR_Nest_level(%r3)
+
+/*
+ * decrement dispatch disable level counter and, if not 0, go on
+ */
+ addi -1,%r6,%r6
+ comibf,= 0,%r6,isr_restore
+ stw %r6, R%_Thread_Dispatch_disable_level(%r4)
+
+/*
+ * check whether or not a context switch is necessary
+ */
+ .import _Context_Switch_necessary,data
+ ldil L%_Context_Switch_necessary,%r8
+ ldw R%_Context_Switch_necessary(%r8),%r8
+ comibf,=,n 0,%r8,ISR_dispatch
+
+/*
+ * check whether or not a context switch is necessary because an ISR
+ * sent signals to the interrupted task
+ */
+ ldw R%_ISR_Signals_to_thread_executing(%r7),%r8
+ comibt,=,n 0,%r8,isr_restore
+
+
+/*
+ * OK, something happened while in ISR and we need to switch to a task
+ * other than the one which was interrupted or the
+ * ISR_Signals_to_thread_executing case
+ * We also turn on interrupts, since the interrupted task had them
+ * on (obviously :-) and Thread_Dispatch is happy to leave ints on.
+ */
+
+ISR_dispatch:
+ stw %r0, R%_ISR_Signals_to_thread_executing(%r7)
+
+ ssm HPPA_PSW_I, %r0
+
+ .import _Thread_Dispatch,code
+ .call
+ bl _Thread_Dispatch,%r2
+ ldo 128(sp),sp
+
+ ldo -128(sp),sp
+
+isr_restore:
+
+/*
+ * enable interrupts during most of restore
+ */
+ ssm HPPA_PSW_I, %r0
+
+/*
+ * Get a pointer to beginning of our stack frame
+ */
+ ldo -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1
+
+/*
+ * restore float
+ */
+ .call ARGW0=GR
+ bl _CPU_Restore_float_context,%r2
+ ldo FP_CONTEXT_OFFSET(%arg1), arg0
+
+ copy %arg1, %arg0
+
+/*
+ * ********** FALL THRU **********
+ */
+
+/*
+ * Jump here from bottom of Context_Switch
+ * Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self
+ * restore interrupt state
+ */
+
+ .EXPORT _CPU_Context_restore
+_CPU_Context_restore:
+
+/*
+ * restore integer state
+ */
+ ldw R1_OFFSET(arg0),%r1
+ ldw R2_OFFSET(arg0),%r2
+ ldw R3_OFFSET(arg0),%r3
+ ldw R4_OFFSET(arg0),%r4
+ ldw R5_OFFSET(arg0),%r5
+ ldw R6_OFFSET(arg0),%r6
+ ldw R7_OFFSET(arg0),%r7
+ ldw R8_OFFSET(arg0),%r8
+ ldw R9_OFFSET(arg0),%r9
+ ldw R10_OFFSET(arg0),%r10
+ ldw R11_OFFSET(arg0),%r11
+ ldw R12_OFFSET(arg0),%r12
+ ldw R13_OFFSET(arg0),%r13
+ ldw R14_OFFSET(arg0),%r14
+ ldw R15_OFFSET(arg0),%r15
+ ldw R16_OFFSET(arg0),%r16
+ ldw R17_OFFSET(arg0),%r17
+ ldw R18_OFFSET(arg0),%r18
+ ldw R19_OFFSET(arg0),%r19
+ ldw R20_OFFSET(arg0),%r20
+ ldw R21_OFFSET(arg0),%r21
+ ldw R22_OFFSET(arg0),%r22
+ ldw R23_OFFSET(arg0),%r23
+ ldw R24_OFFSET(arg0),%r24
+/*
+ * skipping r25; used as scratch register below
+ * skipping r26 (arg0) until we are done with it
+ */
+ ldw R27_OFFSET(arg0),%r27
+ ldw R28_OFFSET(arg0),%r28
+ ldw R29_OFFSET(arg0),%r29
+/*
+ * skipping r30 (sp) until we turn off interrupts
+ */
+ ldw R31_OFFSET(arg0),%r31
+
+/*
+ * Turn off Q & R & I so we can write r30 and interrupt control registers
+ */
+ rsm HPPA_PSW_Q + HPPA_PSW_R + HPPA_PSW_I, %r0
+
+/*
+ * now safe to restore r30
+ */
+ ldw R30_OFFSET(arg0),%r30
+
+ ldw IPSW_OFFSET(arg0), %r25
+ mtctl %r25, ipsw
+
+ ldw SAR_OFFSET(arg0), %r25
+ mtctl %r25, sar
+
+ ldw PCOQFRONT_OFFSET(arg0), %r25
+ mtctl %r25, pcoq
+
+ ldw PCOQBACK_OFFSET(arg0), %r25
+ mtctl %r25, pcoq
+
+/*
+ * Load r25 with interrupts off
+ */
+ ldw R25_OFFSET(arg0),%r25
+/*
+ * Must load r26 (arg0) last
+ */
+ ldw R26_OFFSET(arg0),%r26
+
+isr_exit:
+ rfi
+ .EXIT
+ .PROCEND
+
+/*
+ * This section is used to context switch floating point registers.
+ * Ref: 6-35 of Architecture 1.1
+ *
+ * NOTE: since integer multiply uses the floating point unit,
+ * we have to save/restore fp on every trap. We cannot
+ * just try to keep track of fp usage.
+ */
+
+ .align 32
+ .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0
+_CPU_Save_float_context:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ fstds,ma %fr0,8(%arg0)
+ fstds,ma %fr1,8(%arg0)
+ fstds,ma %fr2,8(%arg0)
+ fstds,ma %fr3,8(%arg0)
+ fstds,ma %fr4,8(%arg0)
+ fstds,ma %fr5,8(%arg0)
+ fstds,ma %fr6,8(%arg0)
+ fstds,ma %fr7,8(%arg0)
+ fstds,ma %fr8,8(%arg0)
+ fstds,ma %fr9,8(%arg0)
+ fstds,ma %fr10,8(%arg0)
+ fstds,ma %fr11,8(%arg0)
+ fstds,ma %fr12,8(%arg0)
+ fstds,ma %fr13,8(%arg0)
+ fstds,ma %fr14,8(%arg0)
+ fstds,ma %fr15,8(%arg0)
+ fstds,ma %fr16,8(%arg0)
+ fstds,ma %fr17,8(%arg0)
+ fstds,ma %fr18,8(%arg0)
+ fstds,ma %fr19,8(%arg0)
+ fstds,ma %fr20,8(%arg0)
+ fstds,ma %fr21,8(%arg0)
+ fstds,ma %fr22,8(%arg0)
+ fstds,ma %fr23,8(%arg0)
+ fstds,ma %fr24,8(%arg0)
+ fstds,ma %fr25,8(%arg0)
+ fstds,ma %fr26,8(%arg0)
+ fstds,ma %fr27,8(%arg0)
+ fstds,ma %fr28,8(%arg0)
+ fstds,ma %fr29,8(%arg0)
+ fstds,ma %fr30,8(%arg0)
+ fstds %fr31,0(%arg0)
+ bv 0(%r2)
+ addi -(31*8), %arg0, %arg0 ; restore arg0 just for fun
+ .EXIT
+ .PROCEND
+
+ .align 32
+ .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0
+_CPU_Restore_float_context:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ addi (31*8), %arg0, %arg0 ; point at last double
+ fldds 0(%arg0),%fr31
+ fldds,mb -8(%arg0),%fr30
+ fldds,mb -8(%arg0),%fr29
+ fldds,mb -8(%arg0),%fr28
+ fldds,mb -8(%arg0),%fr27
+ fldds,mb -8(%arg0),%fr26
+ fldds,mb -8(%arg0),%fr25
+ fldds,mb -8(%arg0),%fr24
+ fldds,mb -8(%arg0),%fr23
+ fldds,mb -8(%arg0),%fr22
+ fldds,mb -8(%arg0),%fr21
+ fldds,mb -8(%arg0),%fr20
+ fldds,mb -8(%arg0),%fr19
+ fldds,mb -8(%arg0),%fr18
+ fldds,mb -8(%arg0),%fr17
+ fldds,mb -8(%arg0),%fr16
+ fldds,mb -8(%arg0),%fr15
+ fldds,mb -8(%arg0),%fr14
+ fldds,mb -8(%arg0),%fr13
+ fldds,mb -8(%arg0),%fr12
+ fldds,mb -8(%arg0),%fr11
+ fldds,mb -8(%arg0),%fr10
+ fldds,mb -8(%arg0),%fr9
+ fldds,mb -8(%arg0),%fr8
+ fldds,mb -8(%arg0),%fr7
+ fldds,mb -8(%arg0),%fr6
+ fldds,mb -8(%arg0),%fr5
+ fldds,mb -8(%arg0),%fr4
+ fldds,mb -8(%arg0),%fr3
+ fldds,mb -8(%arg0),%fr2
+ fldds,mb -8(%arg0),%fr1
+ bv 0(%r2)
+ fldds,mb -8(%arg0),%fr0
+ .EXIT
+ .PROCEND
+
+/*
+ * These 2 small routines are unused right now.
+ * Normally we just go thru _CPU_Save_float_context (and Restore)
+ *
+ * Here we just deref the ptr and jump up, letting _CPU_Save_float_context
+ * do the return for us.
+ */
+
+ .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0
+_CPU_Context_save_fp:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ bl _CPU_Save_float_context, %r0
+ ldw 0(%arg0), %arg0
+ .EXIT
+ .PROCEND
+
+ .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0
+_CPU_Context_restore_fp:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ bl _CPU_Restore_float_context, %r0
+ ldw 0(%arg0), %arg0
+ .EXIT
+ .PROCEND
+
+
+/*
+ * void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+
+ .align 32
+ .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR
+_CPU_Context_switch:
+ .PROC
+ .CALLINFO FRAME=64
+ .ENTRY
+
+/*
+ * Save the integer context
+ */
+ stw %r1,R1_OFFSET(arg0)
+ stw %r2,R2_OFFSET(arg0)
+ stw %r3,R3_OFFSET(arg0)
+ stw %r4,R4_OFFSET(arg0)
+ stw %r5,R5_OFFSET(arg0)
+ stw %r6,R6_OFFSET(arg0)
+ stw %r7,R7_OFFSET(arg0)
+ stw %r8,R8_OFFSET(arg0)
+ stw %r9,R9_OFFSET(arg0)
+ stw %r10,R10_OFFSET(arg0)
+ stw %r11,R11_OFFSET(arg0)
+ stw %r12,R12_OFFSET(arg0)
+ stw %r13,R13_OFFSET(arg0)
+ stw %r14,R14_OFFSET(arg0)
+ stw %r15,R15_OFFSET(arg0)
+ stw %r16,R16_OFFSET(arg0)
+ stw %r17,R17_OFFSET(arg0)
+ stw %r18,R18_OFFSET(arg0)
+ stw %r19,R19_OFFSET(arg0)
+ stw %r20,R20_OFFSET(arg0)
+ stw %r21,R21_OFFSET(arg0)
+ stw %r22,R22_OFFSET(arg0)
+ stw %r23,R23_OFFSET(arg0)
+ stw %r24,R24_OFFSET(arg0)
+ stw %r25,R25_OFFSET(arg0)
+ stw %r26,R26_OFFSET(arg0)
+ stw %r27,R27_OFFSET(arg0)
+ stw %r28,R28_OFFSET(arg0)
+ stw %r29,R29_OFFSET(arg0)
+ stw %r30,R30_OFFSET(arg0)
+ stw %r31,R31_OFFSET(arg0)
+
+/*
+ * fill in interrupt context section
+ */
+ stw %r2, PCOQFRONT_OFFSET(%arg0)
+ ldo 4(%r2), %r2
+ stw %r2, PCOQBACK_OFFSET(%arg0)
+
+/*
+ * Generate a suitable IPSW by using the system default psw
+ * with the current low bits added in.
+ */
+
+ ldil L%CPU_PSW_DEFAULT, %r2
+ ldo R%CPU_PSW_DEFAULT(%r2), %r2
+ ssm 0, %arg2
+ dep %arg2, 31, 8, %r2
+ stw %r2, IPSW_OFFSET(%arg0)
+
+/*
+ * at this point, the running task context is completely saved
+ * Now jump to the bottom of the interrupt handler to load the
+ * heirs context
+ */
+
+ b _CPU_Context_restore
+ copy %arg1, %arg0
+
+ .EXIT
+ .PROCEND
+
+
+/*
+ * Find first bit
+ * NOTE:
+ * This is used (and written) only for the ready chain code and
+ * priority bit maps.
+ * Any other use constitutes fraud.
+ * Returns first bit from the least significant side.
+ * Eg: if input is 0x8001
+ * output will indicate the '1' bit and return 0.
+ * This is counter to HPPA bit numbering which calls this
+ * bit 31. This way simplifies the macros _CPU_Priority_Mask
+ * and _CPU_Priority_Bits_index.
+ *
+ * NOTE:
+ * We just use 16 bit version
+ * does not handle zero case
+ *
+ * Based on the UTAH Mach libc version of ffs.
+ */
+
+ .align 32
+ .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR
+hppa_rtems_ffs:
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+
+#ifdef RETURN_ERROR_ON_ZERO
+ comb,= %arg0,%r0,ffsdone ; If arg0 is 0
+ ldi -1,%ret0 ; return -1
+#endif
+
+#if BITFIELD_SIZE == 32
+ ldi 31,%ret0 ; Set return to high bit
+ extru,= %arg0,31,16,%r0 ; If low 16 bits are non-zero
+ addi,tr -16,%ret0,%ret0 ; subtract 16 from bitpos
+ shd %r0,%arg0,16,%arg0 ; else shift right 16 bits
+#else
+ ldi 15,%ret0 ; Set return to high bit
+#endif
+ extru,= %arg0,31,8,%r0 ; If low 8 bits are non-zero
+ addi,tr -8,%ret0,%ret0 ; subtract 8 from bitpos
+ shd %r0,%arg0,8,%arg0 ; else shift right 8 bits
+ extru,= %arg0,31,4,%r0 ; If low 4 bits are non-zero
+ addi,tr -4,%ret0,%ret0 ; subtract 4 from bitpos
+ shd %r0,%arg0,4,%arg0 ; else shift right 4 bits
+ extru,= %arg0,31,2,%r0 ; If low 2 bits are non-zero
+ addi,tr -2,%ret0,%ret0 ; subtract 2 from bitpos
+ shd %r0,%arg0,2,%arg0 ; else shift right 2 bits
+ extru,= %arg0,31,1,%r0 ; If low bit is non-zero
+ addi -1,%ret0,%ret0 ; subtract 1 from bitpos
+ffsdone:
+ bv,n 0(%r2)
+ nop
+ .EXIT
+ .PROCEND
diff --git a/cpukit/score/cpu/i386/cpu_asm.S b/cpukit/score/cpu/i386/cpu_asm.S
new file mode 100644
index 0000000000..4ac4dab626
--- /dev/null
+++ b/cpukit/score/cpu/i386/cpu_asm.S
@@ -0,0 +1,282 @@
+/* cpu_asm.s
+ *
+ * This file contains all assembly code for the Intel i386 implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1989-1998.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+/*
+ * Format of i386 Register structure
+ */
+
+.set REG_EFLAGS, 0
+.set REG_ESP, REG_EFLAGS + 4
+.set REG_EBP, REG_ESP + 4
+.set REG_EBX, REG_EBP + 4
+.set REG_ESI, REG_EBX + 4
+.set REG_EDI, REG_ESI + 4
+.set SIZE_REGS, REG_EDI + 4
+
+ BEGIN_CODE
+
+/*
+ * void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ .p2align 1
+ PUBLIC (_CPU_Context_switch)
+
+.set RUNCONTEXT_ARG, 4 # save context argument
+.set HEIRCONTEXT_ARG, 8 # restore context argument
+
+SYM (_CPU_Context_switch):
+ movl RUNCONTEXT_ARG(esp),eax # eax = running threads context
+ pushf # push eflags
+ popl REG_EFLAGS(eax) # save eflags
+ movl esp,REG_ESP(eax) # save stack pointer
+ movl ebp,REG_EBP(eax) # save base pointer
+ movl ebx,REG_EBX(eax) # save ebx
+ movl esi,REG_ESI(eax) # save source register
+ movl edi,REG_EDI(eax) # save destination register
+
+ movl HEIRCONTEXT_ARG(esp),eax # eax = heir threads context
+
+restore:
+ pushl REG_EFLAGS(eax) # push eflags
+ popf # restore eflags
+ movl REG_ESP(eax),esp # restore stack pointer
+ movl REG_EBP(eax),ebp # restore base pointer
+ movl REG_EBX(eax),ebx # restore ebx
+ movl REG_ESI(eax),esi # restore source register
+ movl REG_EDI(eax),edi # restore destination register
+ ret
+
+/*
+ * NOTE: May be unnecessary to reload some registers.
+ */
+
+/*
+ * void _CPU_Context_restore( new_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ PUBLIC (_CPU_Context_restore)
+
+.set NEWCONTEXT_ARG, 4 # context to restore argument
+
+SYM (_CPU_Context_restore):
+
+ movl NEWCONTEXT_ARG(esp),eax # eax = running threads context
+ jmp restore
+
+/*PAGE
+ * void _CPU_Context_save_fp_context( &fp_context_ptr )
+ * void _CPU_Context_restore_fp_context( &fp_context_ptr )
+ *
+ * This section is used to context switch an i80287, i80387,
+ * the built-in coprocessor or the i80486 or compatible.
+ */
+
+.set FPCONTEXT_ARG, 4 # FP context argument
+
+ .p2align 1
+ PUBLIC (_CPU_Context_save_fp)
+SYM (_CPU_Context_save_fp):
+ movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area
+ movl (eax),eax # eax = FP context area
+ fsave (eax) # save FP context
+ ret
+
+ .p2align 1
+ PUBLIC (_CPU_Context_restore_fp)
+SYM (_CPU_Context_restore_fp):
+ movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area
+ movl (eax),eax # eax = FP context area
+ frstor (eax) # restore FP context
+ ret
+
+ PUBLIC (_Exception_Handler)
+SYM (_Exception_Handler):
+ pusha # Push general purpose registers
+ pushl esp # Push exception frame address
+ movl _currentExcHandler, eax # Call function storead in _currentExcHandler
+ call * eax
+ addl $4, esp
+ popa # restore general purpose registers
+ addl $8, esp # skill vector number and faultCode
+ iret
+
+#define DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY(_vector) \
+ .p2align 4 ; \
+ PUBLIC (rtems_exception_prologue_ ## _vector ) ; \
+SYM (rtems_exception_prologue_ ## _vector ): \
+ pushl $ _vector ; \
+ jmp SYM (_Exception_Handler) ;
+
+#define DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY(_vector) \
+ .p2align 4 ; \
+ PUBLIC (rtems_exception_prologue_ ## _vector ) ; \
+SYM (rtems_exception_prologue_ ## _vector ): \
+ pushl $ 0 ; \
+ pushl $ _vector ; \
+ jmp SYM (_Exception_Handler) ;
+
+/*
+ * Divide Error
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (0)
+/*
+ * Debug Exception
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (1)
+/*
+ * NMI
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (2)
+/*
+ * Breakpoint
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (3)
+/*
+ * Overflow
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (4)
+/*
+ * Bound Range Exceeded
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (5)
+/*
+ * Invalid Opcode
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (6)
+/*
+ * No Math Coproc
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (7)
+/*
+ * Double Fault
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (8)
+/*
+ * Coprocessor segment overrun
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (9)
+/*
+ * Invalid TSS
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (10)
+/*
+ * Segment Not Present
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (11)
+/*
+ * Stack segment Fault
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (12)
+/*
+ * General Protection Fault
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (13)
+/*
+ * Page Fault
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (14)
+/*
+ * Floating point error (NB 15 is reserved it is therefor skipped)
+ */
+DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (16)
+/*
+ * Aligment Check
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (17)
+/*
+ * Machine Check
+ */
+DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (18)
+
+
+/*
+ * GO32 does not require these segment related routines.
+ */
+
+#ifndef __GO32__
+
+/*
+ * void *i386_Logical_to_physical(
+ * rtems_unsigned16 segment,
+ * void *address
+ * );
+ *
+ * Returns thirty-two bit physical address for segment:address.
+ */
+
+.set SEGMENT_ARG, 4
+.set ADDRESS_ARG, 8
+
+ PUBLIC (i386_Logical_to_physical)
+
+SYM (i386_Logical_to_physical):
+
+ xorl eax,eax # clear eax
+ movzwl SEGMENT_ARG(esp),ecx # ecx = segment value
+ movl $ SYM (_Global_descriptor_table),edx
+ # edx = address of our GDT
+ addl ecx,edx # edx = address of desired entry
+ movb 7(edx),ah # ah = base 31:24
+ movb 4(edx),al # al = base 23:16
+ shll $16,eax # move ax into correct bits
+ movw 2(edx),ax # ax = base 0:15
+ movl ADDRESS_ARG(esp),ecx # ecx = address to convert
+ addl eax,ecx # ecx = physical address equivalent
+ movl ecx,eax # eax = ecx
+ ret
+
+/*
+ * void *i386_Physical_to_logical(
+ * rtems_unsigned16 segment,
+ * void *address
+ * );
+ *
+ * Returns thirty-two bit physical address for segment:address.
+ */
+
+/*
+ *.set SEGMENT_ARG, 4
+ *.set ADDRESS_ARG, 8 -- use sets from above
+ */
+
+ PUBLIC (i386_Physical_to_logical)
+
+SYM (i386_Physical_to_logical):
+ xorl eax,eax # clear eax
+ movzwl SEGMENT_ARG(esp),ecx # ecx = segment value
+ movl $ SYM (_Global_descriptor_table),edx
+ # edx = address of our GDT
+ addl ecx,edx # edx = address of desired entry
+ movb 7(edx),ah # ah = base 31:24
+ movb 4(edx),al # al = base 23:16
+ shll $16,eax # move ax into correct bits
+ movw 2(edx),ax # ax = base 0:15
+ movl ADDRESS_ARG(esp),ecx # ecx = address to convert
+ subl eax,ecx # ecx = logical address equivalent
+ movl ecx,eax # eax = ecx
+ ret
+#endif /* __GO32__ */
+
+END_CODE
+
+END
diff --git a/cpukit/score/cpu/i960/cpu_asm.S b/cpukit/score/cpu/i960/cpu_asm.S
new file mode 100644
index 0000000000..fbed8babd8
--- /dev/null
+++ b/cpukit/score/cpu/i960/cpu_asm.S
@@ -0,0 +1,199 @@
+/* cpu_asm.s
+ *
+ * This file contains all assembly code for the i960CA implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1989-1998.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+ .text
+/*
+ * Format of i960ca Register structure
+ */
+
+.set REG_R0_PFP , 0 # (r0) Previous Frame Pointer
+.set REG_R1_SP , REG_R0_PFP+4 # (r1) Stack Pointer
+.set REG_PC , REG_R1_SP+4 # (pc) Processor Controls
+.set REG_G8 , REG_PC+4 # (g8) Global Register 8
+.set REG_G9 , REG_G8+4 # (g9) Global Register 9
+.set REG_G10 , REG_G9+4 # (g10) Global Register 10
+.set REG_G11 , REG_G10+4 # (g11) Global Register 11
+.set REG_G12 , REG_G11+4 # (g12) Global Register 12
+.set REG_G13 , REG_G12+4 # (g13) Global Register 13
+.set REG_G14 , REG_G13+4 # (g14) Global Register 14
+.set REG_G15_FP , REG_G14+4 # (g15) Global Register 15
+.set SIZE_REGS , REG_G15_FP+4 # size of cpu_context_registers
+ # structure
+
+/*
+ * void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+ .align 4
+ .globl __CPU_Context_switch
+
+__CPU_Context_switch:
+ modpc 0,0,g2 # get old intr level (PC)
+ st g2,REG_PC(g0) # save pc
+ stq g8,REG_G8(g0) # save g8-g11
+ stq g12,REG_G12(g0) # save g12-g15
+ stl pfp,REG_R0_PFP(g0) # save pfp, sp
+
+restore: flushreg # flush register cache
+ ldconst 0x001f0000,g2 # g2 = PC mask
+ ld REG_PC(g1),g3 # thread->Regs.pc = pc;
+ ldq REG_G12(g1),g12 # restore g12-g15
+ ldl REG_R0_PFP(g1),pfp # restore pfp, sp
+ ldq REG_G8(g1),g8 # restore g8-g11
+ modpc 0,g2,g3 # restore PC register
+ ret
+
+/*
+ * void _CPU_Context_restore( new_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ .globl __CPU_Context_restore
+__CPU_Context_restore:
+ mov g0,g1 # g0 = _Thread_executing
+ b restore
+
+/*PAGE
+ * void _CPU_Context_save_fp_context( &fp_context_ptr )
+ * void _CPU_Context_restore_fp_context( &fp_context_ptr )
+ *
+ * There is currently no hardware floating point for the i960.
+ */
+
+ .globl __CPU_Context_save_fp
+ .globl __CPU_Context_restore_fp
+__CPU_Context_save_fp:
+__CPU_Context_restore_fp:
+#if ( I960_HAS_FPU == 1 )
+#error "Floating point support for i960 family has been implemented!!!"
+#endif
+ ret
+
+/*PAGE
+ * void __ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * Input parameters: NONE
+ *
+ * Output parameters: NONE
+ *
+ * NOTE:
+ * Upon entry, the supervisor stack will contain a stack frame
+ * back to the interrupted thread and the interrupt stack will contain
+ * an interrupt stack frame. If dispatching is enabled, this
+ * is the outer most interrupt, and (a context switch is necessary or
+ * the current thread has signals), then set up the supervisor stack to
+ * transfer control to the interrupt dispatcher.
+ */
+
+ .globl __ISR_Handler
+__ISR_Handler:
+ #ldconst 1,r8
+ #modpc 0,r8,r8 # enable tracing
+
+ # r4 = &_Thread_Dispatch_disable_level
+ ld __Thread_Dispatch_disable_level,r4
+ movl g0,r8 # save g0-g1
+
+ ld -16+8(fp),g0 # g0 = vector number
+ movl g2,r10 # save g2-g3
+
+ ld __ISR_Nest_level,r5 # r5 = &_Isr_nest_level
+ mov g14,r7 # save g14
+
+ lda 0,g14 # NOT Branch and Link
+ movl g4,r12 # save g4-g5
+
+ lda 1(r4),r4 # increment dispatch disable level
+ movl g6,r14 # save g6-g7
+
+ ld __ISR_Vector_table[g0*4],g1 # g1 = Users handler
+ addo 1,r5,r5 # increment ISR level
+
+ st r4,__Thread_Dispatch_disable_level
+ # one ISR nest level deeper
+ subo 1,r4,r4 # decrement dispatch disable level
+
+ st r5,__ISR_Nest_level # disable multitasking
+ subo 1,r5,r5 # decrement ISR nest level
+
+ callx (g1) # invoke user ISR
+
+ st r4,__Thread_Dispatch_disable_level
+ # unnest multitasking
+ st r5,__ISR_Nest_level # one less ISR nest level
+ cmpobne.f 0,r4,exit # If dispatch disabled, exit
+ ldl -16(fp),g0 # g0 = threads PC reg
+ # g1 = threads AC reg
+ ld __Context_Switch_necessary,r6
+ # r6 = Is thread switch necessary?
+ bbs.f 13,g0,exit # not outer level, then exit
+ cmpobne.f 0,r6,bframe # Switch necessary?
+
+ ld __ISR_Signals_to_thread_executing,g2
+ # signals sent to Run_thread
+ # while in interrupt handler?
+ cmpobe.f 0,g2,exit # No, then exit
+
+bframe: mov 0,g2
+ st g2,__ISR_Signals_to_thread_executing
+
+ ldconst 0x1f0000,g2 # g2 = intr disable mask
+ mov g2,g3 # g3 = new intr level
+ modpc 0,g2,g3 # set new level
+
+ andnot 7,pfp,r4 # r4 = pfp without ret type
+ flushreg # flush registers
+ # push _Isr_dispatch ret frame
+ # build ISF in r4-r6
+ ldconst 64,g2 # g2 = size of stack frame
+ ld 4(r4),g3 # g3 = previous sp
+ addo g2,g3,r5 # r5 = _Isr_dispatch SP
+ lda __ISR_Dispatch,r6 # r6 = _Isr_dispatch entry
+ stt r4,(g3) # set _Isr_dispatch ret info
+ st g1,16(g3) # set r4 = AC for ISR disp
+ or 7,g3,pfp # pfp to _Isr_dispatch
+
+exit: mov r7,g14 # restore g14
+ movq r8,g0 # restore g0-g3
+ movq r12,g4 # restore g4-g7
+ ret
+
+
+/*PAGE
+ *
+ * void __ISR_Dispatch()
+ *
+ * Entry point from the outermost interrupt service routine exit.
+ * The current stack is the supervisor mode stack.
+ */
+
+__ISR_Dispatch:
+ mov g14,r7
+ mov 0,g14
+ movq g0,r8
+ movq g4,r12
+ call __Thread_Dispatch
+
+ ldconst -1,r5 # r5 = reload mask
+ modac r5,r4,r4 # restore threads AC register
+ mov r7,g14
+ movq r8,g0
+ movq r12,g4
+ ret
diff --git a/cpukit/score/cpu/m68k/cpu_asm.S b/cpukit/score/cpu/m68k/cpu_asm.S
new file mode 100644
index 0000000000..03747222e5
--- /dev/null
+++ b/cpukit/score/cpu/m68k/cpu_asm.S
@@ -0,0 +1,291 @@
+/* cpu_asm.s
+ *
+ * This file contains all assembly code for the MC68020 implementation
+ * of RTEMS.
+ *
+ * COPYRIGHT (c) 1989-1998.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * $Id$
+ */
+
+
+#include <asm.h>
+
+ .text
+
+/* void _CPU_Context_switch( run_context, heir_context )
+ *
+ * This routine performs a normal non-FP context.
+ */
+
+ .align 4
+ .global SYM (_CPU_Context_switch)
+
+.set RUNCONTEXT_ARG, 4 | save context argument
+.set HEIRCONTEXT_ARG, 8 | restore context argument
+
+SYM (_CPU_Context_switch):
+ moval a7@(RUNCONTEXT_ARG),a0| a0 = running thread context
+ movw sr,d1 | d1 = status register
+ movml d1-d7/a2-a7,a0@ | save context
+
+ moval a7@(HEIRCONTEXT_ARG),a0| a0 = heir thread context
+restore: movml a0@,d1-d7/a2-a7 | restore context
+ movw d1,sr | restore status register
+ rts
+
+/*PAGE
+ * void __CPU_Context_save_fp_context( &fp_context_ptr )
+ * void __CPU_Context_restore_fp_context( &fp_context_ptr )
+ *
+ * These routines are used to context switch a MC68881 or MC68882.
+ *
+ * NOTE: Context save and restore code is based upon the code shown
+ * on page 6-38 of the MC68881/68882 Users Manual (rev 1).
+ *
+ * CPU_FP_CONTEXT_SIZE is higher than expected to account for the
+ * -1 pushed at end of this sequence.
+ *
+ * Neither of these entries is required if we have software FPU
+ * emulation. But if we don't have an FPU or emulation, then
+ * we need the stub versions of these routines.
+ */
+
+#if (CPU_SOFTWARE_FP == FALSE)
+
+.set FPCONTEXT_ARG, 4 | save FP context argument
+
+ .align 4
+ .global SYM (_CPU_Context_save_fp)
+SYM (_CPU_Context_save_fp):
+#if ( M68K_HAS_FPU == 1 )
+ moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area
+ moval a1@,a0 | a0 = Save context area
+ fsave a0@- | save 68881/68882 state frame
+ tstb a0@ | check for a null frame
+ beq nosv | Yes, skip save of user model
+ fmovem fp0-fp7,a0@- | save data registers (fp0-fp7)
+ fmovem fpc/fps/fpi,a0@- | and save control registers
+ movl #-1,a0@- | place not-null flag on stack
+nosv: movl a0,a1@ | save pointer to saved context
+#endif
+ rts
+
+ .align 4
+ .global SYM (_CPU_Context_restore_fp)
+SYM (_CPU_Context_restore_fp):
+#if ( M68K_HAS_FPU == 1 )
+ moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area
+ moval a1@,a0 | a0 = address of saved context
+ tstb a0@ | Null context frame?
+ beq norst | Yes, skip fp restore
+ addql #4,a0 | throwaway non-null flag
+ fmovem a0@+,fpc/fps/fpi | restore control registers
+ fmovem a0@+,fp0-fp7 | restore data regs (fp0-fp7)
+norst: frestore a0@+ | restore the fp state frame
+ movl a0,a1@ | save pointer to saved context
+#endif
+ rts
+#endif
+
+/*PAGE
+ * void _ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * NOTE:
+ * Upon entry, the master stack will contain an interrupt stack frame
+ * back to the interrupted thread and the interrupt stack will contain
+ * a throwaway interrupt stack frame. If dispatching is enabled, this
+ * is the outer most interrupt, and (a context switch is necessary or
+ * the current thread has signals), then set up the master stack to
+ * transfer control to the interrupt dispatcher.
+ */
+
+/*
+ * With this approach, lower priority interrupts may
+ * execute twice if a higher priority interrupt is
+ * acknowledged before _Thread_Dispatch_disable is
+ * incremented and the higher priority interrupt
+ * performs a context switch after executing. The lower
+ * priority interrupt will execute (1) at the end of the
+ * higher priority interrupt in the new context if
+ * permitted by the new interrupt level mask, and (2) when
+ * the original context regains the cpu.
+ */
+
+#if ( M68K_COLDFIRE_ARCH == 1 )
+.set SR_OFFSET, 2 | Status register offset
+.set PC_OFFSET, 4 | Program Counter offset
+.set FVO_OFFSET, 0 | Format/vector offset
+#elif ( M68K_HAS_VBR == 1)
+.set SR_OFFSET, 0 | Status register offset
+.set PC_OFFSET, 2 | Program Counter offset
+.set FVO_OFFSET, 6 | Format/vector offset
+#else
+.set SR_OFFSET, 2 | Status register offset
+.set PC_OFFSET, 4 | Program Counter offset
+.set FVO_OFFSET, 0 | Format/vector offset placed in the stack
+#endif /* M68K_HAS_VBR */
+
+.set SAVED, 16 | space for saved registers
+
+ .align 4
+ .global SYM (_ISR_Handler)
+
+SYM (_ISR_Handler):
+ addql #1,SYM (_Thread_Dispatch_disable_level) | disable multitasking
+#if ( M68K_COLDFIRE_ARCH == 0 )
+ moveml d0-d1/a0-a1,a7@- | save d0-d1,a0-a1
+ movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO
+ andl #0x0fff,d0 | d0 = vector offset in vbr
+#else
+ lea a7@(-SAVED),a7
+ movm.l d0-d1/a0-a1,a7@ | save d0-d1,a0-a1
+ movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO
+ andl #0x0ffc,d0 | d0 = vector offset in vbr
+#endif
+
+
+#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 )
+ #if ( M68K_COLDFIRE_ARCH == 0 )
+ movew sr,d1 | Save status register
+ oriw #0x700,sr | Disable interrupts
+ #else
+ move.l d0,a7@- | Save d0 value
+ move.l #0x700,d0 | Load in disable ints value
+ move.w sr,d1 | Grab SR
+ or.l d1,d0 | Create new SR
+ move.w d0,sr | Disable interrupts
+ move.l a7@+,d0 | Restore d0 value
+ #endif
+
+ tstl SYM (_ISR_Nest_level) | Interrupting an interrupt handler?
+ bne 1f | Yes, just skip over stack switch code
+ movel SYM(_CPU_Interrupt_stack_high),a0 | End of interrupt stack
+ movel a7,a0@- | Save task stack pointer
+ movel a0,a7 | Switch to interrupt stack
+1:
+ addql #1,SYM(_ISR_Nest_level) | one nest level deeper
+ movew d1,sr | Restore status register
+#else
+ addql #1,SYM (_ISR_Nest_level) | one nest level deeper
+#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */
+
+#if ( M68K_HAS_PREINDEXING == 1 )
+ movel @( SYM (_ISR_Vector_table),d0:w:1),a0| fetch the ISR
+#else
+ movel # SYM (_ISR_Vector_table),a0 | a0 = base of RTEMS table
+ addal d0,a0 | a0 = address of vector
+ movel (a0),a0 | a0 = address of user routine
+#endif
+
+ lsrl #2,d0 | d0 = vector number
+ movel d0,a7@- | push vector number
+ jbsr a0@ | invoke the user ISR
+ addql #4,a7 | remove vector number
+
+#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 )
+ #if ( M68K_COLDFIRE_ARCH == 0 )
+ movew sr,d0 | Save status register
+ oriw #0x700,sr | Disable interrupts
+ #else
+ move.l #0x700,d1 | Load in disable int value
+ move.w sr,d0 | Grab SR
+ or.l d0,d1 | Create new SR
+ move.w d1,sr | Load to disable interrupts
+ #endif
+
+ subql #1,SYM(_ISR_Nest_level) | Reduce interrupt-nesting count
+ bne 1f | Skip if return to interrupt
+ movel (a7),a7 | Restore task stack pointer
+1:
+ movew d0,sr | Restore status register
+#else
+ subql #1,SYM (_ISR_Nest_level) | one less nest level
+#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */
+
+ subql #1,SYM (_Thread_Dispatch_disable_level)
+ | unnest multitasking
+ bne exit | If dispatch disabled, exit
+
+#if ( M68K_HAS_SEPARATE_STACKS == 1 )
+ movew #0xf000,d0 | isolate format nibble
+ andw a7@(SAVED+FVO_OFFSET),d0 | get F/VO
+ cmpiw #0x1000,d0 | is it a throwaway isf?
+ bne exit | NOT outer level, so branch
+#endif
+
+ tstl SYM (_Context_Switch_necessary)
+ | Is thread switch necessary?
+ bne bframe | Yes, invoke dispatcher
+
+ tstl SYM (_ISR_Signals_to_thread_executing)
+ | signals sent to Run_thread
+ | while in interrupt handler?
+ beq exit | No, then exit
+
+
+bframe: clrl SYM (_ISR_Signals_to_thread_executing)
+ | If sent, will be processed
+#if ( M68K_HAS_SEPARATE_STACKS == 1 )
+ movec msp,a0 | a0 = master stack pointer
+ movew #0,a0@- | push format word
+ movel #SYM(_ISR_Dispatch),a0@- | push return addr
+ movew a0@(6),a0@- | push saved sr
+ movec a0,msp | set master stack pointer
+#else
+ jsr SYM (_Thread_Dispatch) | Perform context switch
+#endif
+
+#if ( M68K_COLDFIRE_ARCH == 0 )
+exit: moveml a7@+,d0-d1/a0-a1 | restore d0-d1,a0-a1
+#else
+exit: moveml a7@,d0-d1/a0-a1 | restore d0-d1,a0-a1
+ lea a7@(SAVED),a7
+#endif
+
+#if ( M68K_HAS_VBR == 0 )
+ addql #2,a7 | pop format/id
+#endif /* M68K_HAS_VBR */
+ rte | return to thread
+ | OR _Isr_dispatch
+
+/*PAGE
+ * void _ISR_Dispatch()
+ *
+ * Entry point from the outermost interrupt service routine exit.
+ * The current stack is the supervisor mode stack if this processor
+ * has separate stacks.
+ *
+ * 1. save all registers not preserved across C calls.
+ * 2. invoke the _Thread_Dispatch routine to switch tasks
+ * or a signal to the currently executing task.
+ * 3. restore all registers not preserved across C calls.
+ * 4. return from interrupt
+ */
+
+ .global SYM (_ISR_Dispatch)
+SYM (_ISR_Dispatch):
+#if ( M68K_COLDFIRE_ARCH == 0 )
+ movml d0-d1/a0-a1,a7@-
+ jsr SYM (_Thread_Dispatch)
+ movml a7@+,d0-d1/a0-a1
+#else
+ lea a7@(-SAVED),a7
+ movml d0-d1/a0-a1,a7@
+ jsr SYM (_Thread_Dispatch)
+ movml a7@,d0-d1/a0-a1
+ lea a7@(SAVED),a7
+#endif
+
+#if ( M68K_HAS_VBR == 0 )
+ addql #2,a7 | pop format/id
+#endif /* M68K_HAS_VBR */
+ rte
diff --git a/cpukit/score/cpu/sparc/cpu_asm.S b/cpukit/score/cpu/sparc/cpu_asm.S
new file mode 100644
index 0000000000..39962eedeb
--- /dev/null
+++ b/cpukit/score/cpu/sparc/cpu_asm.S
@@ -0,0 +1,726 @@
+/* cpu_asm.s
+ *
+ * This file contains the basic algorithms for all assembly code used
+ * in an specific CPU port of RTEMS. These algorithms must be implemented
+ * in assembly language.
+ *
+ * COPYRIGHT (c) 1989-1998.
+ * On-Line Applications Research Corporation (OAR).
+ * Copyright assigned to U.S. Government, 1994.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.OARcorp.com/rtems/license.html.
+ *
+ * Ported to ERC32 implementation of the SPARC by On-Line Applications
+ * Research Corporation (OAR) under contract to the European Space
+ * Agency (ESA).
+ *
+ * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
+ * European Space Agency.
+ *
+ * $Id$
+ */
+
+#include <asm.h>
+
+#if (SPARC_HAS_FPU == 1)
+
+/*
+ * void _CPU_Context_save_fp(
+ * void **fp_context_ptr
+ * )
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * NOTE: See the README in this directory for information on the
+ * management of the "EF" bit in the PSR.
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_save_fp)
+SYM(_CPU_Context_save_fp):
+ save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
+
+ /*
+ * The following enables the floating point unit.
+ */
+
+ mov %psr, %l0
+ sethi %hi(SPARC_PSR_EF_MASK), %l1
+ or %l1, %lo(SPARC_PSR_EF_MASK), %l1
+ or %l0, %l1, %l0
+ mov %l0, %psr ! **** ENABLE FLOAT ACCESS ****
+
+ ld [%i0], %l0
+ std %f0, [%l0 + FO_F1_OFFSET]
+ std %f2, [%l0 + F2_F3_OFFSET]
+ std %f4, [%l0 + F4_F5_OFFSET]
+ std %f6, [%l0 + F6_F7_OFFSET]
+ std %f8, [%l0 + F8_F9_OFFSET]
+ std %f10, [%l0 + F1O_F11_OFFSET]
+ std %f12, [%l0 + F12_F13_OFFSET]
+ std %f14, [%l0 + F14_F15_OFFSET]
+ std %f16, [%l0 + F16_F17_OFFSET]
+ std %f18, [%l0 + F18_F19_OFFSET]
+ std %f20, [%l0 + F2O_F21_OFFSET]
+ std %f22, [%l0 + F22_F23_OFFSET]
+ std %f24, [%l0 + F24_F25_OFFSET]
+ std %f26, [%l0 + F26_F27_OFFSET]
+ std %f28, [%l0 + F28_F29_OFFSET]
+ std %f30, [%l0 + F3O_F31_OFFSET]
+ st %fsr, [%l0 + FSR_OFFSET]
+ ret
+ restore
+
+/*
+ * void _CPU_Context_restore_fp(
+ * void **fp_context_ptr
+ * )
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * NOTE: See the README in this directory for information on the
+ * management of the "EF" bit in the PSR.
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_restore_fp)
+SYM(_CPU_Context_restore_fp):
+ save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp
+
+ /*
+ * The following enables the floating point unit.
+ */
+
+ mov %psr, %l0
+ sethi %hi(SPARC_PSR_EF_MASK), %l1
+ or %l1, %lo(SPARC_PSR_EF_MASK), %l1
+ or %l0, %l1, %l0
+ mov %l0, %psr ! **** ENABLE FLOAT ACCESS ****
+
+ ld [%i0], %l0
+ ldd [%l0 + FO_F1_OFFSET], %f0
+ ldd [%l0 + F2_F3_OFFSET], %f2
+ ldd [%l0 + F4_F5_OFFSET], %f4
+ ldd [%l0 + F6_F7_OFFSET], %f6
+ ldd [%l0 + F8_F9_OFFSET], %f8
+ ldd [%l0 + F1O_F11_OFFSET], %f10
+ ldd [%l0 + F12_F13_OFFSET], %f12
+ ldd [%l0 + F14_F15_OFFSET], %f14
+ ldd [%l0 + F16_F17_OFFSET], %f16
+ ldd [%l0 + F18_F19_OFFSET], %f18
+ ldd [%l0 + F2O_F21_OFFSET], %f20
+ ldd [%l0 + F22_F23_OFFSET], %f22
+ ldd [%l0 + F24_F25_OFFSET], %f24
+ ldd [%l0 + F26_F27_OFFSET], %f26
+ ldd [%l0 + F28_F29_OFFSET], %f28
+ ldd [%l0 + F3O_F31_OFFSET], %f30
+ ld [%l0 + FSR_OFFSET], %fsr
+ ret
+ restore
+
+#endif /* SPARC_HAS_FPU */
+
+/*
+ * void _CPU_Context_switch(
+ * Context_Control *run,
+ * Context_Control *heir
+ * )
+ *
+ * This routine performs a normal non-FP context switch.
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_switch)
+SYM(_CPU_Context_switch):
+ ! skip g0
+ st %g1, [%o0 + G1_OFFSET] ! save the global registers
+ std %g2, [%o0 + G2_OFFSET]
+ std %g4, [%o0 + G4_OFFSET]
+ std %g6, [%o0 + G6_OFFSET]
+
+ std %l0, [%o0 + L0_OFFSET] ! save the local registers
+ std %l2, [%o0 + L2_OFFSET]
+ std %l4, [%o0 + L4_OFFSET]
+ std %l6, [%o0 + L6_OFFSET]
+
+ std %i0, [%o0 + I0_OFFSET] ! save the input registers
+ std %i2, [%o0 + I2_OFFSET]
+ std %i4, [%o0 + I4_OFFSET]
+ std %i6, [%o0 + I6_FP_OFFSET]
+
+ std %o0, [%o0 + O0_OFFSET] ! save the output registers
+ std %o2, [%o0 + O2_OFFSET]
+ std %o4, [%o0 + O4_OFFSET]
+ std %o6, [%o0 + O6_SP_OFFSET]
+
+ rd %psr, %o2
+ st %o2, [%o0 + PSR_OFFSET] ! save status register
+
+ /*
+ * This is entered from _CPU_Context_restore with:
+ * o1 = context to restore
+ * o2 = psr
+ */
+
+ PUBLIC(_CPU_Context_restore_heir)
+SYM(_CPU_Context_restore_heir):
+ /*
+ * Flush all windows with valid contents except the current one.
+ * In examining the set register windows, one may logically divide
+ * the windows into sets (some of which may be empty) based on their
+ * current status:
+ *
+ * + current (i.e. in use),
+ * + used (i.e. a restore would not trap)
+ * + invalid (i.e. 1 in corresponding bit in WIM)
+ * + unused
+ *
+ * Either the used or unused set of windows may be empty.
+ *
+ * NOTE: We assume only one bit is set in the WIM at a time.
+ *
+ * Given a CWP of 5 and a WIM of 0x1, the registers are divided
+ * into sets as follows:
+ *
+ * + 0 - invalid
+ * + 1-4 - unused
+ * + 5 - current
+ * + 6-7 - used
+ *
+ * In this case, we only would save the used windows -- 6 and 7.
+ *
+ * Traps are disabled for the same logical period as in a
+ * flush all windows trap handler.
+ *
+ * Register Usage while saving the windows:
+ * g1 = current PSR
+ * g2 = current wim
+ * g3 = CWP
+ * g4 = wim scratch
+ * g5 = scratch
+ */
+
+ ld [%o1 + PSR_OFFSET], %g1 ! g1 = saved psr
+
+ and %o2, SPARC_PSR_CWP_MASK, %g3 ! g3 = CWP
+ ! g1 = psr w/o cwp
+ andn %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
+ or %g1, %g3, %g1 ! g1 = heirs psr
+ mov %g1, %psr ! restore status register and
+ ! **** DISABLE TRAPS ****
+ mov %wim, %g2 ! g2 = wim
+ mov 1, %g4
+ sll %g4, %g3, %g4 ! g4 = WIM mask for CW invalid
+
+save_frame_loop:
+ sll %g4, 1, %g5 ! rotate the "wim" left 1
+ srl %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
+ or %g4, %g5, %g4 ! g4 = wim if we do one restore
+
+ /*
+ * If a restore would not underflow, then continue.
+ */
+
+ andcc %g4, %g2, %g0 ! Any windows to flush?
+ bnz done_flushing ! No, then continue
+ nop
+
+ restore ! back one window
+
+ /*
+ * Now save the window just as if we overflowed to it.
+ */
+
+ std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
+ std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
+ std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
+ std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
+
+ std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
+ std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
+ std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
+ std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
+
+ ba save_frame_loop
+ nop
+
+done_flushing:
+
+ add %g3, 1, %g3 ! calculate desired WIM
+ and %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
+ mov 1, %g4
+ sll %g4, %g3, %g4 ! g4 = new WIM
+ mov %g4, %wim
+
+ or %g1, SPARC_PSR_ET_MASK, %g1
+ mov %g1, %psr ! **** ENABLE TRAPS ****
+ ! and restore CWP
+ nop
+ nop
+ nop
+
+ ! skip g0
+ ld [%o1 + G1_OFFSET], %g1 ! restore the global registers
+ ldd [%o1 + G2_OFFSET], %g2
+ ldd [%o1 + G4_OFFSET], %g4
+ ldd [%o1 + G6_OFFSET], %g6
+
+ ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers
+ ldd [%o1 + L2_OFFSET], %l2
+ ldd [%o1 + L4_OFFSET], %l4
+ ldd [%o1 + L6_OFFSET], %l6
+
+ ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers
+ ldd [%o1 + I2_OFFSET], %i2
+ ldd [%o1 + I4_OFFSET], %i4
+ ldd [%o1 + I6_FP_OFFSET], %i6
+
+ ldd [%o1 + O2_OFFSET], %o2 ! restore the output registers
+ ldd [%o1 + O4_OFFSET], %o4
+ ldd [%o1 + O6_SP_OFFSET], %o6
+ ! do o0/o1 last to avoid destroying heir context pointer
+ ldd [%o1 + O0_OFFSET], %o0 ! overwrite heir pointer
+
+ jmp %o7 + 8 ! return
+ nop ! delay slot
+
+/*
+ * void _CPU_Context_restore(
+ * Context_Control *new_context
+ * )
+ *
+ * This routine is generally used only to perform restart self.
+ *
+ * NOTE: It is unnecessary to reload some registers.
+ */
+
+ .align 4
+ PUBLIC(_CPU_Context_restore)
+SYM(_CPU_Context_restore):
+ save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
+ rd %psr, %o2
+ ba SYM(_CPU_Context_restore_heir)
+ mov %i0, %o1 ! in the delay slot
+
+/*
+ * void _ISR_Handler()
+ *
+ * This routine provides the RTEMS interrupt management.
+ *
+ * We enter this handler from the 4 instructions in the trap table with
+ * the following registers assumed to be set as shown:
+ *
+ * l0 = PSR
+ * l1 = PC
+ * l2 = nPC
+ * l3 = trap type
+ *
+ * NOTE: By an executive defined convention, trap type is between 0 and 255 if
+ * it is an asynchonous trap and 256 and 511 if it is synchronous.
+ */
+
+ .align 4
+ PUBLIC(_ISR_Handler)
+SYM(_ISR_Handler):
+ /*
+ * Fix the return address for synchronous traps.
+ */
+
+ andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
+ ! Is this a synchronous trap?
+ be,a win_ovflow ! No, then skip the adjustment
+ nop ! DELAY
+ mov %l2, %l1 ! do not return to the instruction
+ add %l2, 4, %l2 ! indicated
+
+win_ovflow:
+ /*
+ * Save the globals this block uses.
+ *
+ * These registers are not restored from the locals. Their contents
+ * are saved directly from the locals into the ISF below.
+ */
+
+ mov %g4, %l4 ! save the globals this block uses
+ mov %g5, %l5
+
+ /*
+ * When at a "window overflow" trap, (wim == (1 << cwp)).
+ * If we get here like that, then process a window overflow.
+ */
+
+ rd %wim, %g4
+ srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP
+ ! are LS 5 bits ; how convenient :)
+ cmp %g5, 1 ! Is this an invalid window?
+ bne dont_do_the_window ! No, then skip all this stuff
+ ! we are using the delay slot
+
+ /*
+ * The following is same as a 1 position right rotate of WIM
+ */
+
+ srl %g4, 1, %g5 ! g5 = WIM >> 1
+ sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
+ ! g4 = WIM << (Number Windows - 1)
+ or %g4, %g5, %g4 ! g4 = (WIM >> 1) |
+ ! (WIM << (Number Windows - 1))
+
+ /*
+ * At this point:
+ *
+ * g4 = the new WIM
+ * g5 is free
+ */
+
+ /*
+ * Since we are tinkering with the register windows, we need to
+ * make sure that all the required information is in global registers.
+ */
+
+ save ! Save into the window
+ wr %g4, 0, %wim ! WIM = new WIM
+ nop ! delay slots
+ nop
+ nop
+
+ /*
+ * Now save the window just as if we overflowed to it.
+ */
+
+ std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
+ std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
+ std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
+ std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
+
+ std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
+ std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
+ std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
+ std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
+
+ restore
+ nop
+
+dont_do_the_window:
+ /*
+ * Global registers %g4 and %g5 are saved directly from %l4 and
+ * %l5 directly into the ISF below.
+ */
+
+save_isf:
+
+ /*
+ * Save the state of the interrupted task -- especially the global
+ * registers -- in the Interrupt Stack Frame. Note that the ISF
+ * includes a regular minimum stack frame which will be used if
+ * needed by register window overflow and underflow handlers.
+ *
+ * REGISTERS SAME AS AT _ISR_Handler
+ */
+
+ sub %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
+ ! make space for ISF
+
+ std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC
+ st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC
+ st %g1, [%sp + ISF_G1_OFFSET] ! save g1
+ std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3
+ std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above
+ std %g6, [%sp + ISF_G6_OFFSET] ! save g6, g7
+
+ std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1
+ std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3
+ std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5
+ std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7
+
+ rd %y, %g1
+ st %g1, [%sp + ISF_Y_OFFSET] ! save y
+
+ mov %sp, %o1 ! 2nd arg to ISR Handler
+
+ /*
+ * Increment ISR nest level and Thread dispatch disable level.
+ *
+ * Register usage for this section:
+ *
+ * l4 = _Thread_Dispatch_disable_level pointer
+ * l5 = _ISR_Nest_level pointer
+ * l6 = _Thread_Dispatch_disable_level value
+ * l7 = _ISR_Nest_level value
+ *
+ * NOTE: It is assumed that l4 - l7 will be preserved until the ISR
+ * nest and thread dispatch disable levels are unnested.
+ */
+
+ sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4
+ ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
+ sethi %hi(SYM(_ISR_Nest_level)), %l5
+ ld [%l5 + %lo(SYM(_ISR_Nest_level))], %l7
+
+ add %l6, 1, %l6
+ st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
+
+ add %l7, 1, %l7
+ st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
+
+ /*
+ * If ISR nest level was zero (now 1), then switch stack.
+ */
+
+ mov %sp, %fp
+ subcc %l7, 1, %l7 ! outermost interrupt handler?
+ bnz dont_switch_stacks ! No, then do not switch stacks
+
+ sethi %hi(SYM(_CPU_Interrupt_stack_high)), %g4
+ ld [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp
+
+dont_switch_stacks:
+ /*
+ * Make sure we have a place on the stack for the window overflow
+ * trap handler to write into. At this point it is safe to
+ * enable traps again.
+ */
+
+ sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
+
+ /*
+ * Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
+ * set the PIL in the %psr to mask off interrupts with lower priority.
+ * The original %psr in %l0 is not modified since it will be restored
+ * when the interrupt handler returns.
+ */
+
+ mov %l0, %g5
+ subcc %l3, 0x11, %g0
+ bl dont_fix_pil
+ subcc %l3, 0x1f, %g0
+ bg dont_fix_pil
+ sll %l3, 8, %g4
+ and %g4, SPARC_PSR_PIL_MASK, %g4
+ andn %l0, SPARC_PSR_PIL_MASK, %g5
+ or %g4, %g5, %g5
+dont_fix_pil:
+ wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
+
+ /*
+ * Vector to user's handler.
+ *
+ * NOTE: TBR may no longer have vector number in it since
+ * we just enabled traps. It is definitely in l3.
+ */
+
+ sethi %hi(SYM(_ISR_Vector_table)), %g4
+ or %g4, %lo(SYM(_ISR_Vector_table)), %g4
+ and %l3, 0xFF, %g5 ! remove synchronous trap indicator
+ sll %g5, 2, %g5 ! g5 = offset into table
+ ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ]
+
+
+ ! o1 = 2nd arg = address of the ISF
+ ! WAS LOADED WHEN ISF WAS SAVED!!!
+ mov %l3, %o0 ! o0 = 1st arg = vector number
+ call %g4, 0
+ nop ! delay slot
+
+ /*
+ * Redisable traps so we can finish up the interrupt processing.
+ * This is a VERY conservative place to do this.
+ *
+ * NOTE: %l0 has the PSR which was in place when we took the trap.
+ */
+
+ mov %l0, %psr ! **** DISABLE TRAPS ****
+
+ /*
+ * Decrement ISR nest level and Thread dispatch disable level.
+ *
+ * Register usage for this section:
+ *
+ * l4 = _Thread_Dispatch_disable_level pointer
+ * l5 = _ISR_Nest_level pointer
+ * l6 = _Thread_Dispatch_disable_level value
+ * l7 = _ISR_Nest_level value
+ */
+
+ sub %l6, 1, %l6
+ st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
+
+ st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
+
+ /*
+ * If dispatching is disabled (includes nested interrupt case),
+ * then do a "simple" exit.
+ */
+
+ orcc %l6, %g0, %g0 ! Is dispatching disabled?
+ bnz simple_return ! Yes, then do a "simple" exit
+ nop ! delay slot
+
+ /*
+ * If a context switch is necessary, then do fudge stack to
+ * return to the interrupt dispatcher.
+ */
+
+ sethi %hi(SYM(_Context_Switch_necessary)), %l4
+ ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
+
+ orcc %l5, %g0, %g0 ! Is thread switch necessary?
+ bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
+ nop ! delay slot
+
+ /*
+ * Finally, check to see if signals were sent to the currently
+ * executing task. If so, we need to invoke the interrupt dispatcher.
+ */
+
+ sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
+ ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
+
+ orcc %l7, %g0, %g0 ! Were signals sent to the currently
+ ! executing thread?
+ bz simple_return ! yes, then invoke the dispatcher
+ ! use the delay slot to clear the signals
+ ! to the currently executing task flag
+ st %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))]
+
+
+ /*
+ * Invoke interrupt dispatcher.
+ */
+
+ PUBLIC(_ISR_Dispatch)
+SYM(_ISR_Dispatch):
+
+ /*
+ * The following subtract should get us back on the interrupted
+ * tasks stack and add enough room to invoke the dispatcher.
+ * When we enable traps, we are mostly back in the context
+ * of the task and subsequent interrupts can operate normally.
+ */
+
+ sub %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
+
+ or %l0, SPARC_PSR_ET_MASK, %l7 ! l7 = PSR with ET=1
+ mov %l7, %psr ! **** ENABLE TRAPS ****
+ nop
+ nop
+ nop
+
+ call SYM(_Thread_Dispatch), 0
+ nop
+
+ /*
+ * The CWP in place at this point may be different from
+ * that which was in effect at the beginning of the ISR if we
+ * have been context switched between the beginning of this invocation
+ * of _ISR_Handler and this point. Thus the CWP and WIM should
+ * not be changed back to their values at ISR entry time. Any
+ * changes to the PSR must preserve the CWP.
+ */
+
+simple_return:
+ ld [%fp + ISF_Y_OFFSET], %l5 ! restore y
+ wr %l5, 0, %y
+
+ ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC
+ ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC
+ rd %psr, %l3
+ and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP
+ andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task
+ or %l3, %l0, %l0 ! install it later...
+ andn %l0, SPARC_PSR_ET_MASK, %l0
+
+ /*
+ * Restore tasks global and out registers
+ */
+
+ mov %fp, %g1
+
+ ! g1 is restored later
+ ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3
+ ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5
+ ldd [%fp + ISF_G6_OFFSET], %g6 ! restore g6, g7
+
+ ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1
+ ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3
+ ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5
+ ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
+
+ /*
+ * Registers:
+ *
+ * ALL global registers EXCEPT G1 and the input registers have
+ * already been restored and thuse off limits.
+ *
+ * The following is the contents of the local registers:
+ *
+ * l0 = original psr
+ * l1 = return address (i.e. PC)
+ * l2 = nPC
+ * l3 = CWP
+ */
+
+ /*
+ * if (CWP + 1) is an invalid window then we need to reload it.
+ *
+ * WARNING: Traps should now be disabled
+ */
+
+ mov %l0, %psr ! **** DISABLE TRAPS ****
+ nop
+ nop
+ nop
+ rd %wim, %l4
+ add %l0, 1, %l6 ! l6 = cwp + 1
+ and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
+ srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count
+ ! and CWP are conveniently LS 5 bits
+ cmp %l5, 1 ! Is tasks window invalid?
+ bne good_task_window
+
+ /*
+ * The following code is the same as a 1 position left rotate of WIM.
+ */
+
+ sll %l4, 1, %l5 ! l5 = WIM << 1
+ srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
+ ! l4 = WIM >> (Number Windows - 1)
+ or %l4, %l5, %l4 ! l4 = (WIM << 1) |
+ ! (WIM >> (Number Windows - 1))
+
+ /*
+ * Now restore the window just as if we underflowed to it.
+ */
+
+ wr %l4, 0, %wim ! WIM = new WIM
+ nop ! must delay after writing WIM
+ nop
+ nop
+ restore ! now into the tasks window
+
+ ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
+ ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
+ ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
+ ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
+ ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
+ ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
+ ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
+ ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
+ ! reload of sp clobbers ISF
+ save ! Back to ISR dispatch window
+
+good_task_window:
+
+ mov %l0, %psr ! **** DISABLE TRAPS ****
+ ! and restore condition codes.
+ ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
+ jmp %l1 ! transfer control and
+ rett %l2 ! go back to tasks window
+
+/* end of file */