From 01629105c2817a59a4f1f05039593f211cf5ddaa Mon Sep 17 00:00:00 2001 From: Joel Sherrill Date: Mon, 14 Dec 1998 23:15:38 +0000 Subject: Patch from Ralf Corsepius to rename all .s files to .S in conformance with GNU conventions. This is a minor step along the way to supporting automake. --- c/src/exec/score/cpu/a29k/Makefile.in | 6 +- c/src/exec/score/cpu/a29k/cpu_asm.S | 491 +++++++++++++++++++ c/src/exec/score/cpu/a29k/cpu_asm.s | 491 ------------------- c/src/exec/score/cpu/a29k/sig.S | 197 ++++++++ c/src/exec/score/cpu/a29k/sig.s | 197 -------- c/src/exec/score/cpu/hppa1.1/Makefile.in | 4 +- c/src/exec/score/cpu/hppa1.1/cpu_asm.S | 778 +++++++++++++++++++++++++++++ c/src/exec/score/cpu/hppa1.1/cpu_asm.s | 778 ----------------------------- c/src/exec/score/cpu/hppa1.1/rtems.S | 53 ++ c/src/exec/score/cpu/hppa1.1/rtems.s | 53 -- c/src/exec/score/cpu/i386/Makefile.in | 6 +- c/src/exec/score/cpu/i386/cpu_asm.S | 282 +++++++++++ c/src/exec/score/cpu/i386/cpu_asm.s | 282 ----------- c/src/exec/score/cpu/i386/rtems.S | 31 ++ c/src/exec/score/cpu/i386/rtems.s | 31 -- c/src/exec/score/cpu/i960/Makefile.in | 6 +- c/src/exec/score/cpu/i960/cpu_asm.S | 199 ++++++++ c/src/exec/score/cpu/i960/cpu_asm.s | 199 -------- c/src/exec/score/cpu/i960/rtems.S | 25 + c/src/exec/score/cpu/i960/rtems.s | 25 - c/src/exec/score/cpu/m68k/Makefile.in | 6 +- c/src/exec/score/cpu/m68k/cpu_asm.S | 291 +++++++++++ c/src/exec/score/cpu/m68k/cpu_asm.s | 291 ----------- c/src/exec/score/cpu/m68k/rtems.S | 52 ++ c/src/exec/score/cpu/m68k/rtems.s | 52 -- c/src/exec/score/cpu/no_cpu/Makefile.in | 6 +- c/src/exec/score/cpu/powerpc/Makefile.in | 8 +- c/src/exec/score/cpu/powerpc/cpu_asm.S | 809 +++++++++++++++++++++++++++++++ c/src/exec/score/cpu/powerpc/cpu_asm.s | 809 ------------------------------- c/src/exec/score/cpu/powerpc/irq_stub.S | 268 ++++++++++ c/src/exec/score/cpu/powerpc/irq_stub.s | 268 ---------- c/src/exec/score/cpu/powerpc/rtems.S | 132 +++++ c/src/exec/score/cpu/powerpc/rtems.s | 132 ----- c/src/exec/score/cpu/sh/Makefile.in | 6 +- c/src/exec/score/cpu/sparc/Makefile.in | 6 +- c/src/exec/score/cpu/sparc/cpu_asm.S | 726 +++++++++++++++++++++++++++ c/src/exec/score/cpu/sparc/cpu_asm.s | 726 --------------------------- c/src/exec/score/cpu/sparc/rtems.S | 58 +++ c/src/exec/score/cpu/sparc/rtems.s | 58 --- 39 files changed, 4419 insertions(+), 4419 deletions(-) create mode 100644 c/src/exec/score/cpu/a29k/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/a29k/cpu_asm.s create mode 100644 c/src/exec/score/cpu/a29k/sig.S delete mode 100644 c/src/exec/score/cpu/a29k/sig.s create mode 100644 c/src/exec/score/cpu/hppa1.1/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/hppa1.1/cpu_asm.s create mode 100644 c/src/exec/score/cpu/hppa1.1/rtems.S delete mode 100644 c/src/exec/score/cpu/hppa1.1/rtems.s create mode 100644 c/src/exec/score/cpu/i386/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/i386/cpu_asm.s create mode 100644 c/src/exec/score/cpu/i386/rtems.S delete mode 100644 c/src/exec/score/cpu/i386/rtems.s create mode 100644 c/src/exec/score/cpu/i960/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/i960/cpu_asm.s create mode 100644 c/src/exec/score/cpu/i960/rtems.S delete mode 100644 c/src/exec/score/cpu/i960/rtems.s create mode 100644 c/src/exec/score/cpu/m68k/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/m68k/cpu_asm.s create mode 100644 c/src/exec/score/cpu/m68k/rtems.S delete mode 100644 c/src/exec/score/cpu/m68k/rtems.s create mode 100644 c/src/exec/score/cpu/powerpc/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/powerpc/cpu_asm.s create mode 100644 c/src/exec/score/cpu/powerpc/irq_stub.S delete mode 100644 c/src/exec/score/cpu/powerpc/irq_stub.s create mode 100644 c/src/exec/score/cpu/powerpc/rtems.S delete mode 100644 c/src/exec/score/cpu/powerpc/rtems.s create mode 100644 c/src/exec/score/cpu/sparc/cpu_asm.S delete mode 100644 c/src/exec/score/cpu/sparc/cpu_asm.s create mode 100644 c/src/exec/score/cpu/sparc/rtems.S delete mode 100644 c/src/exec/score/cpu/sparc/rtems.s (limited to 'c/src/exec') diff --git a/c/src/exec/score/cpu/a29k/Makefile.in b/c/src/exec/score/cpu/a29k/Makefile.in index f22da60b68..88873b749c 100644 --- a/c/src/exec/score/cpu/a29k/Makefile.in +++ b/c/src/exec/score/cpu/a29k/Makefile.in @@ -22,11 +22,11 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/a29k.h $(srcdir)/a29ktypes.h EXTERNAL_H_FILES = $(srcdir)/asm.h $(srcdir)/amd.ah \ $(srcdir)/pswmacro.ah $(srcdir)/register.ah -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S # Normally cpu_asm and rtems are assembly files S_PIECES=cpu_asm sig -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/a29k/cpu_asm.S b/c/src/exec/score/cpu/a29k/cpu_asm.S new file mode 100644 index 0000000000..a3ed2c59c3 --- /dev/null +++ b/c/src/exec/score/cpu/a29k/cpu_asm.S @@ -0,0 +1,491 @@ +;/* cpu_asm.c ===> cpu_asm.S or cpu_asm.s +; * +; * Author: Craig Lebakken +; * +; * COPYRIGHT (c) 1996 by Transition Networks Inc. +; * +; * To anyone who acknowledges that this file is provided "AS IS" +; * without any express or implied warranty: +; * permission to use, copy, modify, and distribute this file +; * for any purpose is hereby granted without fee, provided that +; * the above copyright notice and this notice appears in all +; * copies, and that the name of Transition Networks not be used in +; * advertising or publicity pertaining to distribution of the +; * software without specific, written prior permission. +; * Transition Networks makes no representations about the suitability +; * of this software for any purpose. +; * +; * +; * This file contains the basic algorithms for all assembly code used +; * in an specific CPU port of RTEMS. These algorithms must be implemented +; * in assembly language +; * +; * NOTE: This is supposed to be a .S or .s file NOT a C file. +; * +; * COPYRIGHT (c) 1989-1998. +; * On-Line Applications Research Corporation (OAR). +; * Copyright assigned to U.S. Government, 1994. +; * +; * The license and distribution terms for this file may be +; * found in the file LICENSE in this distribution or at +; * http://www.OARcorp.com/rtems/license.html. +; * +; * $Id$ +; */ + +;/* +; * This is supposed to be an assembly file. This means that system.h +; * and cpu.h should not be included in a "real" cpu_asm file. An +; * implementation in assembly should include "cpu_asm.h> +; */ + +;#include + .include "register.ah" + .include "amd.ah" + .include "pswmacro.ah" +; .extern _bsp_exit +; +; push a register onto the struct + .macro spush, sp, reg + store 0, 0, reg, sp ; push register + add sp, sp, 4 ; adjust stack pointer + .endm +; push a register onto the struct + .macro spushsr, sp, reg, sr + mfsr reg, sr + store 0, 0, reg, sp ; push register + add sp, sp, 4 ; adjust stack pointer + .endm +; pop a register from the struct + .macro spop, reg, sp + load 0, 0, reg, sp + add sp,sp,4 + .endm +; pop a special register from the struct + .macro spopsr, sreg, reg, sp + load 0, 0, reg, sp + mtsr sreg, reg + add sp,sp,4 + .endm +; +;/* +; * _CPU_Context_save_fp_context +; * +; * This routine is responsible for saving the FP context +; * at *fp_context_ptr. If the point to load the FP context +; * from is changed then the pointer is modified by this routine. +; * +; * Sometimes a macro implementation of this is in cpu.h which dereferences +; * the ** and a similarly named routine in this file is passed something +; * like a (Context_Control_fp *). The general rule on making this decision +; * is to avoid writing assembly language. +; */ + +;#if 0 +;void _CPU_Context_save_fp( +; void **fp_context_ptr +;) +;{ +;} +;#endif + .global _CPU_Context_save_fp +_CPU_Context_save_fp: + jmpi lr0 + nop + +;/* +; * _CPU_Context_restore_fp_context +; * +; * This routine is responsible for restoring the FP context +; * at *fp_context_ptr. If the point to load the FP context +; * from is changed then the pointer is modified by this routine. +; * +; * Sometimes a macro implementation of this is in cpu.h which dereferences +; * the ** and a similarly named routine in this file is passed something +; * like a (Context_Control_fp *). The general rule on making this decision +; * is to avoid writing assembly language. +; */ + +;#if 0 +;void _CPU_Context_restore_fp( +; void **fp_context_ptr +;) +;{ +;} +;#endif + .global __CPU_Context_restore_fp +__CPU_Context_restore_fp: + jmpi lr0 + nop + +;/* _CPU_Context_switch +; * +; * This routine performs a normal non-FP context switch. +; */ +;#if 0 +;void _CPU_Context_switch( +; Context_Control *run, +; Context_Control *heir +;) +;{ +;} +;#endif + .global __CPU_Context_switch +__CPU_Context_switch: + asneq 106, gr1, gr1 ; syscall + jmpi lr0 ; + nop ; + + + + .global _a29k_context_switch_sup +_a29k_context_switch_sup: + add pcb,lr2,0 + add kt1,lr3,0 ;move heir pointer to safe location + constn it0,SIG_SYNC + spush pcb,it0 + spush pcb,gr1 + spush pcb,rab ;push rab + spushsr pcb,it0,pc0 ;push specials + spushsr pcb,it0,pc1 + add pcb,pcb,1*4 ;space pc2 + spushsr pcb,it0,CHA ;push CHA + spushsr pcb,it0,CHD ;push CHD + spushsr pcb,it0,CHC ;push CHC + add pcb,pcb,1*4 ;space for alu + spushsr pcb,it0,ops ;push OPS + mfsr kt0,cps ;current status + const it1,FZ ;FZ constant + andn it1,kt0,it1 ;clear FZ bit + mtsr cps,it1 ;cps without FZ + add pcb,pcb,1*4 ;space for tav + mtsrim chc,0 ;possible DERR +; + spush pcb,lr1 ;push R-stack + spush pcb,rfb ; support + spush pcb,msp ;push M-stack pnt. +; + add pcb,pcb,3*4 ;space for floating point +; spush pcb,FPStat0 ;floating point +; spush pcb,FPStat1 +; spush pcb,FPStat2 +; + add pcb,pcb,4*4 ;space for IPA..Q +; + mtsrim cr,29-1 + storem 0,0,gr96,pcb ;push gr96-124, optional + add pcb,pcb,29*4 ;space for gr96-124 +; + sub it0,rfb,gr1 ;get bytes in cache + srl it0,it0,2 ;adjust to words + sub it0,it0,1 + spush pcb,it0 + mtsr cr,it0 + storem 0,0,lr0,pcb ;save lr0-rfb +; +context_restore: + add pcb,kt1,0 ;pcb=heir + add pcb,pcb,4 ;space for signal num + spop gr1,pcb ;restore freeze registers + add gr1,gr1,0 ;alu op + add pcb,pcb,9*4 ;move past freeze registers + add pcb,pcb,1*4 ;space for tav + spop lr1,pcb + spop rfb,pcb + spop msp,pcb +; spop FPStat0,pcb +; spop FPStat1,pcb +; spop FPStat2,pcb + add pcb,pcb,3*4 ;space for floating point + add pcb,pcb,4*4 ;space for IPA..Q + mtsrim cr,29-1 + loadm 0,0,gr96,pcb ;pop gr96-gr124 + add pcb,pcb,29*4 ;space for gr96-124 + + spop it1,pcb ;pop locals count + mtsr cr,it1 + loadm 0,0,lr0,pcb ;load locals + + add pcb,kt1,0 ;pcb=heir + mtsr cps,kt0 ;cps with FZ + nop + add pcb,pcb,4 ;space for signal num + spop gr1,pcb ;restore freeze registers + add gr1,gr1,0 ;alu op + spop rab,pcb + spopsr pc0,it1,pcb + spopsr pc1,it1,pcb + add pcb,pcb,4 ;space for pc2 + spopsr CHA,it1,pcb + spopsr CHD,it1,pcb + spopsr CHC,it1,pcb + add pcb,pcb,4 ;space for alu + spopsr ops,it1,pcb + nop + iret + + +;/* +; * _CPU_Context_restore +; * +; * This routine is generally used only to restart self in an +; * efficient manner. It may simply be a label in _CPU_Context_switch. +; * +; * NOTE: May be unnecessary to reload some registers. +; */ +;#if 0 +;void _CPU_Context_restore( +; Context_Control *new_context +;) +;{ +;} +;#endif + + .global __CPU_Context_restore +__CPU_Context_restore: + asneq 107, gr1, gr1 ; syscall + jmpi lr0 ; + nop ; + + .global _a29k_context_restore_sup +_a29k_context_restore_sup: + add kt1,lr2,0 ;kt1 = restore context + mfsr kt0,cps ;current status + const it1,FZ ;FZ constant + andn it1,kt0,it1 ;clear FZ bit + mtsr cps,it1 ;cps without FZ + jmp context_restore + nop + + .global _a29k_context_save_sup +_a29k_context_save_sup: + add pcb,lr2,0 + constn it0,SIG_SYNC + spush pcb,it0 + spush pcb,gr1 + spush pcb,rab ;push rab + spushsr pcb,it0,pc0 ;push specials + spushsr pcb,it0,pc1 + add pcb,pcb,1*4 ;space pc2 + spushsr pcb,it0,CHA ;push CHA + spushsr pcb,it0,CHD ;push CHD + spushsr pcb,it0,CHC ;push CHC + add pcb,pcb,1*4 ;space for alu + spushsr pcb,it0,ops ;push OPS + mfsr it0,cps ;current status +SaveFZState it1,it2 + add pcb,pcb,1*4 ;space for tav + mtsrim chc,0 ;possible DERR +; + spush pcb,lr1 ;push R-stack + spush pcb,rfb ; support + spush pcb,msp ;push M-stack pnt. +; + spush pcb,FPStat0 ;floating point + spush pcb,FPStat1 + spush pcb,FPStat2 +; + add pcb,pcb,4*4 ;space for IPA..Q +; + mtsrim cr,29-1 + storem 0,0,gr96,pcb ;push gr96-124, optional + add pcb,pcb,29*4 ;space for gr96-124 +; + sub kt0,rfb,gr1 ;get bytes in cache + srl kt0,kt0,2 ;adjust to words + sub kt0,kt0,1 + spush pcb,kt0 ;push number of words + mtsr cr,kt0 + storem 0,0,lr0,pcb ;save lr0-rfb +; + mtsr cps,it0 ;cps with FZ +RestoreFZState it1,it2 + + nop + nop + nop +; + iret +; + + .global __CPU_Context_save +__CPU_Context_save: + asneq 108, gr1, gr1 ; syscall + jmpi lr0 ; + nop ; + + +;/* void __ISR_Handler() +; * +; * This routine provides the RTEMS interrupt management. +; * +; */ + +;#if 0 +;void _ISR_Handler() +;{ +; /* +; * This discussion ignores a lot of the ugly details in a real +; * implementation such as saving enough registers/state to be +; * able to do something real. Keep in mind that the goal is +; * to invoke a user's ISR handler which is written in C and +; * uses a certain set of registers. +; * +; * Also note that the exact order is to a large extent flexible. +; * Hardware will dictate a sequence for a certain subset of +; * _ISR_Handler while requirements for setting +; */ + +; /* +; * At entry to "common" _ISR_Handler, the vector number must be +; * available. On some CPUs the hardware puts either the vector +; * number or the offset into the vector table for this ISR in a +; * known place. If the hardware does not give us this information, +; * then the assembly portion of RTEMS for this port will contain +; * a set of distinct interrupt entry points which somehow place +; * the vector number in a known place (which is safe if another +; * interrupt nests this one) and branches to _ISR_Handler. +; * +; * save some or all context on stack +; * may need to save some special interrupt information for exit +; * +; * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) +; * if ( _ISR_Nest_level == 0 ) +; * switch to software interrupt stack +; * #endif +; * +; * _ISR_Nest_level++; +; * +; * _Thread_Dispatch_disable_level++; +; * +; * (*_ISR_Vector_table[ vector ])( vector ); +; * +; * --_ISR_Nest_level; +; * +; * if ( _ISR_Nest_level ) +; * goto the label "exit interrupt (simple case)" +; * +; * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) +; * restore stack +; * #endif +; * +; * if ( !_Context_Switch_necessary ) +; * goto the label "exit interrupt (simple case)" +; * +; * if ( !_ISR_Signals_to_thread_executing ) +; * goto the label "exit interrupt (simple case)" +; * +; * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch +; * +; * prepare to get out of interrupt +; * return from interrupt (maybe to _ISR_Dispatch) +; * +; * LABEL "exit interrupt (simple case): +; * prepare to get out of interrupt +; * return from interrupt +; */ +;} +;#endif +; .global __ISR_Handler +;__ISR_Handler: +; jmpi lr0 +; nop + + .global _a29k_getops +_a29k_getops: + asneq 113, gr96, gr96 + jmpi lr0 + nop + + .global _a29k_getops_sup +_a29k_getops_sup: + mfsr gr96, ops ; caller wants ops + iret + nop + + .global _a29k_disable +_a29k_disable: + asneq 110, gr96, gr96 + jmpi lr0 + nop + + .global _a29k_disable_sup +_a29k_disable_sup: + mfsr kt0, ops + add gr96, kt0, 0 ; return ops to caller + const kt1, (DI | TD) + consth kt1, (DI | TD) + or kt1, kt0, kt1 + mtsr ops, kt1 + iret + nop + + .global _a29k_disable_all +_a29k_disable_all: + asneq 112, gr96, gr96 + jmpi lr0 + nop + + .global _a29k_disable_all_sup +_a29k_disable_all_sup: + mfsr kt0, ops + const kt1, (DI | TD) + consth kt1, (DI | TD) + or kt1, kt0, kt1 + mtsr ops, kt1 + iret + nop + + .global _a29k_enable_all +_a29k_enable_all: + asneq 111, gr96, gr96 + jmpi lr0 + nop + + .global _a29k_enable_all_sup +_a29k_enable_all_sup: + mfsr kt0, ops + const kt1, (DI | TD) + consth kt1, (DI | TD) + andn kt1, kt0, kt1 + mtsr ops, kt1 + iret + nop + + .global _a29k_enable +_a29k_enable: + asneq 109, gr96, gr96 + jmpi lr0 + nop + + .global _a29k_enable_sup +_a29k_enable_sup: + mfsr kt0, ops + const kt1, (DI | TD) + consth kt1, (DI | TD) + and kt3, lr2, kt1 + andn kt0, kt0, kt1 + or kt1, kt0, kt3 + mtsr ops, kt1 + iret + nop + + .global _a29k_halt +_a29k_halt: + halt + jmp _a29k_halt + nop + + .global _a29k_super_mode +_a29k_super_mode: + mfsr gr96, ops + or gr96, gr96, 0x10 + mtsr ops, gr96 + iret + nop + + .global _a29k_as70 +_a29k_as70: + asneq 70,gr96,gr96 + jmpi lr0 + nop diff --git a/c/src/exec/score/cpu/a29k/cpu_asm.s b/c/src/exec/score/cpu/a29k/cpu_asm.s deleted file mode 100644 index a3ed2c59c3..0000000000 --- a/c/src/exec/score/cpu/a29k/cpu_asm.s +++ /dev/null @@ -1,491 +0,0 @@ -;/* cpu_asm.c ===> cpu_asm.S or cpu_asm.s -; * -; * Author: Craig Lebakken -; * -; * COPYRIGHT (c) 1996 by Transition Networks Inc. -; * -; * To anyone who acknowledges that this file is provided "AS IS" -; * without any express or implied warranty: -; * permission to use, copy, modify, and distribute this file -; * for any purpose is hereby granted without fee, provided that -; * the above copyright notice and this notice appears in all -; * copies, and that the name of Transition Networks not be used in -; * advertising or publicity pertaining to distribution of the -; * software without specific, written prior permission. -; * Transition Networks makes no representations about the suitability -; * of this software for any purpose. -; * -; * -; * This file contains the basic algorithms for all assembly code used -; * in an specific CPU port of RTEMS. These algorithms must be implemented -; * in assembly language -; * -; * NOTE: This is supposed to be a .S or .s file NOT a C file. -; * -; * COPYRIGHT (c) 1989-1998. -; * On-Line Applications Research Corporation (OAR). -; * Copyright assigned to U.S. Government, 1994. -; * -; * The license and distribution terms for this file may be -; * found in the file LICENSE in this distribution or at -; * http://www.OARcorp.com/rtems/license.html. -; * -; * $Id$ -; */ - -;/* -; * This is supposed to be an assembly file. This means that system.h -; * and cpu.h should not be included in a "real" cpu_asm file. An -; * implementation in assembly should include "cpu_asm.h> -; */ - -;#include - .include "register.ah" - .include "amd.ah" - .include "pswmacro.ah" -; .extern _bsp_exit -; -; push a register onto the struct - .macro spush, sp, reg - store 0, 0, reg, sp ; push register - add sp, sp, 4 ; adjust stack pointer - .endm -; push a register onto the struct - .macro spushsr, sp, reg, sr - mfsr reg, sr - store 0, 0, reg, sp ; push register - add sp, sp, 4 ; adjust stack pointer - .endm -; pop a register from the struct - .macro spop, reg, sp - load 0, 0, reg, sp - add sp,sp,4 - .endm -; pop a special register from the struct - .macro spopsr, sreg, reg, sp - load 0, 0, reg, sp - mtsr sreg, reg - add sp,sp,4 - .endm -; -;/* -; * _CPU_Context_save_fp_context -; * -; * This routine is responsible for saving the FP context -; * at *fp_context_ptr. If the point to load the FP context -; * from is changed then the pointer is modified by this routine. -; * -; * Sometimes a macro implementation of this is in cpu.h which dereferences -; * the ** and a similarly named routine in this file is passed something -; * like a (Context_Control_fp *). The general rule on making this decision -; * is to avoid writing assembly language. -; */ - -;#if 0 -;void _CPU_Context_save_fp( -; void **fp_context_ptr -;) -;{ -;} -;#endif - .global _CPU_Context_save_fp -_CPU_Context_save_fp: - jmpi lr0 - nop - -;/* -; * _CPU_Context_restore_fp_context -; * -; * This routine is responsible for restoring the FP context -; * at *fp_context_ptr. If the point to load the FP context -; * from is changed then the pointer is modified by this routine. -; * -; * Sometimes a macro implementation of this is in cpu.h which dereferences -; * the ** and a similarly named routine in this file is passed something -; * like a (Context_Control_fp *). The general rule on making this decision -; * is to avoid writing assembly language. -; */ - -;#if 0 -;void _CPU_Context_restore_fp( -; void **fp_context_ptr -;) -;{ -;} -;#endif - .global __CPU_Context_restore_fp -__CPU_Context_restore_fp: - jmpi lr0 - nop - -;/* _CPU_Context_switch -; * -; * This routine performs a normal non-FP context switch. -; */ -;#if 0 -;void _CPU_Context_switch( -; Context_Control *run, -; Context_Control *heir -;) -;{ -;} -;#endif - .global __CPU_Context_switch -__CPU_Context_switch: - asneq 106, gr1, gr1 ; syscall - jmpi lr0 ; - nop ; - - - - .global _a29k_context_switch_sup -_a29k_context_switch_sup: - add pcb,lr2,0 - add kt1,lr3,0 ;move heir pointer to safe location - constn it0,SIG_SYNC - spush pcb,it0 - spush pcb,gr1 - spush pcb,rab ;push rab - spushsr pcb,it0,pc0 ;push specials - spushsr pcb,it0,pc1 - add pcb,pcb,1*4 ;space pc2 - spushsr pcb,it0,CHA ;push CHA - spushsr pcb,it0,CHD ;push CHD - spushsr pcb,it0,CHC ;push CHC - add pcb,pcb,1*4 ;space for alu - spushsr pcb,it0,ops ;push OPS - mfsr kt0,cps ;current status - const it1,FZ ;FZ constant - andn it1,kt0,it1 ;clear FZ bit - mtsr cps,it1 ;cps without FZ - add pcb,pcb,1*4 ;space for tav - mtsrim chc,0 ;possible DERR -; - spush pcb,lr1 ;push R-stack - spush pcb,rfb ; support - spush pcb,msp ;push M-stack pnt. -; - add pcb,pcb,3*4 ;space for floating point -; spush pcb,FPStat0 ;floating point -; spush pcb,FPStat1 -; spush pcb,FPStat2 -; - add pcb,pcb,4*4 ;space for IPA..Q -; - mtsrim cr,29-1 - storem 0,0,gr96,pcb ;push gr96-124, optional - add pcb,pcb,29*4 ;space for gr96-124 -; - sub it0,rfb,gr1 ;get bytes in cache - srl it0,it0,2 ;adjust to words - sub it0,it0,1 - spush pcb,it0 - mtsr cr,it0 - storem 0,0,lr0,pcb ;save lr0-rfb -; -context_restore: - add pcb,kt1,0 ;pcb=heir - add pcb,pcb,4 ;space for signal num - spop gr1,pcb ;restore freeze registers - add gr1,gr1,0 ;alu op - add pcb,pcb,9*4 ;move past freeze registers - add pcb,pcb,1*4 ;space for tav - spop lr1,pcb - spop rfb,pcb - spop msp,pcb -; spop FPStat0,pcb -; spop FPStat1,pcb -; spop FPStat2,pcb - add pcb,pcb,3*4 ;space for floating point - add pcb,pcb,4*4 ;space for IPA..Q - mtsrim cr,29-1 - loadm 0,0,gr96,pcb ;pop gr96-gr124 - add pcb,pcb,29*4 ;space for gr96-124 - - spop it1,pcb ;pop locals count - mtsr cr,it1 - loadm 0,0,lr0,pcb ;load locals - - add pcb,kt1,0 ;pcb=heir - mtsr cps,kt0 ;cps with FZ - nop - add pcb,pcb,4 ;space for signal num - spop gr1,pcb ;restore freeze registers - add gr1,gr1,0 ;alu op - spop rab,pcb - spopsr pc0,it1,pcb - spopsr pc1,it1,pcb - add pcb,pcb,4 ;space for pc2 - spopsr CHA,it1,pcb - spopsr CHD,it1,pcb - spopsr CHC,it1,pcb - add pcb,pcb,4 ;space for alu - spopsr ops,it1,pcb - nop - iret - - -;/* -; * _CPU_Context_restore -; * -; * This routine is generally used only to restart self in an -; * efficient manner. It may simply be a label in _CPU_Context_switch. -; * -; * NOTE: May be unnecessary to reload some registers. -; */ -;#if 0 -;void _CPU_Context_restore( -; Context_Control *new_context -;) -;{ -;} -;#endif - - .global __CPU_Context_restore -__CPU_Context_restore: - asneq 107, gr1, gr1 ; syscall - jmpi lr0 ; - nop ; - - .global _a29k_context_restore_sup -_a29k_context_restore_sup: - add kt1,lr2,0 ;kt1 = restore context - mfsr kt0,cps ;current status - const it1,FZ ;FZ constant - andn it1,kt0,it1 ;clear FZ bit - mtsr cps,it1 ;cps without FZ - jmp context_restore - nop - - .global _a29k_context_save_sup -_a29k_context_save_sup: - add pcb,lr2,0 - constn it0,SIG_SYNC - spush pcb,it0 - spush pcb,gr1 - spush pcb,rab ;push rab - spushsr pcb,it0,pc0 ;push specials - spushsr pcb,it0,pc1 - add pcb,pcb,1*4 ;space pc2 - spushsr pcb,it0,CHA ;push CHA - spushsr pcb,it0,CHD ;push CHD - spushsr pcb,it0,CHC ;push CHC - add pcb,pcb,1*4 ;space for alu - spushsr pcb,it0,ops ;push OPS - mfsr it0,cps ;current status -SaveFZState it1,it2 - add pcb,pcb,1*4 ;space for tav - mtsrim chc,0 ;possible DERR -; - spush pcb,lr1 ;push R-stack - spush pcb,rfb ; support - spush pcb,msp ;push M-stack pnt. -; - spush pcb,FPStat0 ;floating point - spush pcb,FPStat1 - spush pcb,FPStat2 -; - add pcb,pcb,4*4 ;space for IPA..Q -; - mtsrim cr,29-1 - storem 0,0,gr96,pcb ;push gr96-124, optional - add pcb,pcb,29*4 ;space for gr96-124 -; - sub kt0,rfb,gr1 ;get bytes in cache - srl kt0,kt0,2 ;adjust to words - sub kt0,kt0,1 - spush pcb,kt0 ;push number of words - mtsr cr,kt0 - storem 0,0,lr0,pcb ;save lr0-rfb -; - mtsr cps,it0 ;cps with FZ -RestoreFZState it1,it2 - - nop - nop - nop -; - iret -; - - .global __CPU_Context_save -__CPU_Context_save: - asneq 108, gr1, gr1 ; syscall - jmpi lr0 ; - nop ; - - -;/* void __ISR_Handler() -; * -; * This routine provides the RTEMS interrupt management. -; * -; */ - -;#if 0 -;void _ISR_Handler() -;{ -; /* -; * This discussion ignores a lot of the ugly details in a real -; * implementation such as saving enough registers/state to be -; * able to do something real. Keep in mind that the goal is -; * to invoke a user's ISR handler which is written in C and -; * uses a certain set of registers. -; * -; * Also note that the exact order is to a large extent flexible. -; * Hardware will dictate a sequence for a certain subset of -; * _ISR_Handler while requirements for setting -; */ - -; /* -; * At entry to "common" _ISR_Handler, the vector number must be -; * available. On some CPUs the hardware puts either the vector -; * number or the offset into the vector table for this ISR in a -; * known place. If the hardware does not give us this information, -; * then the assembly portion of RTEMS for this port will contain -; * a set of distinct interrupt entry points which somehow place -; * the vector number in a known place (which is safe if another -; * interrupt nests this one) and branches to _ISR_Handler. -; * -; * save some or all context on stack -; * may need to save some special interrupt information for exit -; * -; * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) -; * if ( _ISR_Nest_level == 0 ) -; * switch to software interrupt stack -; * #endif -; * -; * _ISR_Nest_level++; -; * -; * _Thread_Dispatch_disable_level++; -; * -; * (*_ISR_Vector_table[ vector ])( vector ); -; * -; * --_ISR_Nest_level; -; * -; * if ( _ISR_Nest_level ) -; * goto the label "exit interrupt (simple case)" -; * -; * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) -; * restore stack -; * #endif -; * -; * if ( !_Context_Switch_necessary ) -; * goto the label "exit interrupt (simple case)" -; * -; * if ( !_ISR_Signals_to_thread_executing ) -; * goto the label "exit interrupt (simple case)" -; * -; * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch -; * -; * prepare to get out of interrupt -; * return from interrupt (maybe to _ISR_Dispatch) -; * -; * LABEL "exit interrupt (simple case): -; * prepare to get out of interrupt -; * return from interrupt -; */ -;} -;#endif -; .global __ISR_Handler -;__ISR_Handler: -; jmpi lr0 -; nop - - .global _a29k_getops -_a29k_getops: - asneq 113, gr96, gr96 - jmpi lr0 - nop - - .global _a29k_getops_sup -_a29k_getops_sup: - mfsr gr96, ops ; caller wants ops - iret - nop - - .global _a29k_disable -_a29k_disable: - asneq 110, gr96, gr96 - jmpi lr0 - nop - - .global _a29k_disable_sup -_a29k_disable_sup: - mfsr kt0, ops - add gr96, kt0, 0 ; return ops to caller - const kt1, (DI | TD) - consth kt1, (DI | TD) - or kt1, kt0, kt1 - mtsr ops, kt1 - iret - nop - - .global _a29k_disable_all -_a29k_disable_all: - asneq 112, gr96, gr96 - jmpi lr0 - nop - - .global _a29k_disable_all_sup -_a29k_disable_all_sup: - mfsr kt0, ops - const kt1, (DI | TD) - consth kt1, (DI | TD) - or kt1, kt0, kt1 - mtsr ops, kt1 - iret - nop - - .global _a29k_enable_all -_a29k_enable_all: - asneq 111, gr96, gr96 - jmpi lr0 - nop - - .global _a29k_enable_all_sup -_a29k_enable_all_sup: - mfsr kt0, ops - const kt1, (DI | TD) - consth kt1, (DI | TD) - andn kt1, kt0, kt1 - mtsr ops, kt1 - iret - nop - - .global _a29k_enable -_a29k_enable: - asneq 109, gr96, gr96 - jmpi lr0 - nop - - .global _a29k_enable_sup -_a29k_enable_sup: - mfsr kt0, ops - const kt1, (DI | TD) - consth kt1, (DI | TD) - and kt3, lr2, kt1 - andn kt0, kt0, kt1 - or kt1, kt0, kt3 - mtsr ops, kt1 - iret - nop - - .global _a29k_halt -_a29k_halt: - halt - jmp _a29k_halt - nop - - .global _a29k_super_mode -_a29k_super_mode: - mfsr gr96, ops - or gr96, gr96, 0x10 - mtsr ops, gr96 - iret - nop - - .global _a29k_as70 -_a29k_as70: - asneq 70,gr96,gr96 - jmpi lr0 - nop diff --git a/c/src/exec/score/cpu/a29k/sig.S b/c/src/exec/score/cpu/a29k/sig.S new file mode 100644 index 0000000000..245570ffcb --- /dev/null +++ b/c/src/exec/score/cpu/a29k/sig.S @@ -0,0 +1,197 @@ +;/* +; * $Id$ +; */ + +; .include "register.ah" + .include "amd.ah" + .include "pswmacro.ah" + .comm WindowSize,4 + .text + .reg it0,gr64 + .reg it1,gr65 + .reg it2,gr66 + .reg it3,gr67 + .reg v0,gr96 + .reg v1,gr97 + .reg v2,gr98 + .reg v3,gr99 + .reg trapreg,it0 + .reg FPStat0,gr79 + .reg FPStat1,gr79 + .reg FPStat2,gr79 + .reg FPStat3,gr79 + + .global _intr14 +_intr14: + const it3,14 + sup_sv + jmp interrupt + nop + + .global _intr18 +_intr18: + const it3,18 + sup_sv + jmp interrupt + nop + + .global _intr19 +_intr19: + const it3,19 + sup_sv + jmp interrupt + nop + +interrupt: + push msp,it3 + push msp,gr1 + push msp,rab + const it0,512 + sub rab,rfb,it0 ;set rab = rfb-512 + pushsr msp,it0,pc0 + pushsr msp,it0,pc1 + pushsr msp,it0,pc2 + pushsr msp,it0,cha + pushsr msp,it0,chd + pushsr msp,it0,chc + pushsr msp,it0,alu + pushsr msp,it0,ops + push msp,tav +; +;now come off freeze, and go to user-mode code. +;ensure load/store does not restart +; + mtsrim chc,0 + + mfsr it0, cps + const it1, FZ + consth it1, FZ + andn it0, it0, it1 + const it1,(DI|TD) + consth it1,(DI|TD) + or it0,it1,it0 + mtsr cps, it0 +; fall through to _sigcode + + .extern _a29k_ISR_Handler + .global _sigcode +_sigcode: + + push msp, lr1 ; R stack support + push msp, rfb ; support + push msp, msp ; M stack support + +; push msp, FPStat0 ; Floating point 0 +; push msp, FPStat1 ; Floating point 1 +; push msp, FPStat2 ; Floating point 2 +; push msp, FPStat3 ; Floating point 3 + sub msp,msp,4*4 + + pushsr msp, tav, IPA ; save user mode special + pushsr msp, tav, IPB ; save user mode special + pushsr msp, tav, IPC ; save user mode special + pushsr msp, tav, Q ; save user mode special + + sub msp, msp, 29*4 ; gr96-gr124 + mtsrim cr, 29-1 ; + storem 0, 0, gr96, msp ; + + + const v0, WindowSize ; Window Size value + consth v0, WindowSize ; Window Size value + load 0, 0, v0, v0 ; load Window size + add v2, msp, SIGCTX_RAB ; intr RAB value + + load 0, 0, v2, v2 ; rab value + + sub v1, rfb, v2 ; + cpgeu v1, v1, v0 ; + jmpt v1, nfill ; jmp if spill + add v1, gr1, 8 ; + + cpgtu v1, v1, rfb ; longjump test + jmpt v1, nfill ; + nop ; + +ifill: + add v0, msp, SIGCTX_RAB+4 ; + push v0, rab ; + const v2, fill+4 ; + consth v2, fill+4 ; + + push v0, v2 ; resave PC0 + sub v2, v2, 4 ; + push v0, v2 ; resave PC1 + const v2, 0 ; + + sub v0, v0, 3*4 ; + push v0, v2 ; + +nfill: + cpgtu v0, gr1, rfb ; if gr1>rfb -> gr1=rfb + jmpt v0, lower ; + cpltu v0, gr1, rab ; + jmpt v0, raise ; gr1rfb -> gr1=rfb - jmpt v0, lower ; - cpltu v0, gr1, rab ; - jmpt v0, raise ; gr1 +#include +#include +#include + + .SPACE $PRIVATE$ + .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31 + .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82 + .SPACE $TEXT$ + .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44 + .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY + .SPACE $TEXT$ + .SUBSPA $CODE$ + +/* + * Special register usage for context switch and interrupts + * Stay away from %cr28 which is used for TLB misses on 72000 + */ + +isr_arg0 .reg %cr24 +isr_r9 .reg %cr25 +isr_r8 .reg %cr26 + +/* + * Interrupt stack frame looks like this + * + * offset item + * ----------------------------------------------------------------- + * INTEGER_CONTEXT_OFFSET Context_Control + * FP_CONTEXT_OFFSET Context_Control_fp + * + * It is padded out to a multiple of 64 + */ + + +/*PAGE^L + * void _Generic_ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + * We jump here from the interrupt vector. + * The HPPA hardware has done some stuff for us: + * PSW saved in IPSW + * PSW set to 0 + * PSW[E] set to default (0) + * PSW[M] set to 1 iff this is HPMC + * + * IIA queue is frozen (since PSW[Q] is now 0) + * privilege level promoted to 0 + * IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap + * registers GR 1,8,9,16,17,24,25 copied to shadow regs + * SHR 0 1 2 3 4 5 6 + * + * Our vector stub (in the BSP) MUST have done the following: + * + * a) Saved the original %r9 into %isr_r9 (%cr25) + * b) Placed the vector number in %r9 + * c) Was allowed to also destroy $isr_r8 (%cr26), + * but the stub was NOT allowed to destroy any other registers. + * + * The typical stub sequence (in the BSP) should look like this: + * + * a) mtctl %r9,isr_r9 ; (save r9 in cr25) + * b) ldi vector,%r9 ; (load constant vector number in r9) + * c) mtctl %r8,isr_r8 ; (save r8 in cr26) + * d) ldil L%MY_BSP_first_level_interrupt_handler,%r8 + * e) ldo R%MY_BSP_first_level_interrupt_handler(%r8),%r8 + * ; (point to BSP raw handler table) + * f) ldwx,s %r9(%r8),%r8 ; (load value from raw handler table) + * g) bv 0(%r8) ; (call raw handler: _Generic_ISR_Handler) + * h) mfctl isr_r8,%r8 ; (restore r8 from cr26 in delay slot) + * + * Optionally, steps (c) thru (h) _could_ be replaced with a single + * bl,n _Generic_ISR_Handler,%r0 + * + * + */ + .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0 +_Generic_ISR_Handler: + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + + mtctl arg0, isr_arg0 + +/* + * save interrupt state + */ + mfctl ipsw, arg0 + stw arg0, IPSW_OFFSET(sp) + + mfctl iir, arg0 + stw arg0, IIR_OFFSET(sp) + + mfctl ior, arg0 + stw arg0, IOR_OFFSET(sp) + + mfctl pcoq, arg0 + stw arg0, PCOQFRONT_OFFSET(sp) + + mtctl %r0, pcoq + mfctl pcoq, arg0 + stw arg0, PCOQBACK_OFFSET(sp) + + mfctl %sar, arg0 + stw arg0, SAR_OFFSET(sp) + +/* + * Build an interrupt frame to hold the contexts we will need. + * We have already saved the interrupt items on the stack + * + * At this point the following registers are damaged wrt the interrupt + * reg current value saved value + * ------------------------------------------------ + * arg0 scratch isr_arg0 (cr24) + * r9 vector number isr_r9 (cr25) + * + * Point to beginning of integer context and + * save the integer context + */ + stw %r1,R1_OFFSET(sp) + stw %r2,R2_OFFSET(sp) + stw %r3,R3_OFFSET(sp) + stw %r4,R4_OFFSET(sp) + stw %r5,R5_OFFSET(sp) + stw %r6,R6_OFFSET(sp) + stw %r7,R7_OFFSET(sp) + stw %r8,R8_OFFSET(sp) +/* + * skip r9 + */ + stw %r10,R10_OFFSET(sp) + stw %r11,R11_OFFSET(sp) + stw %r12,R12_OFFSET(sp) + stw %r13,R13_OFFSET(sp) + stw %r14,R14_OFFSET(sp) + stw %r15,R15_OFFSET(sp) + stw %r16,R16_OFFSET(sp) + stw %r17,R17_OFFSET(sp) + stw %r18,R18_OFFSET(sp) + stw %r19,R19_OFFSET(sp) + stw %r20,R20_OFFSET(sp) + stw %r21,R21_OFFSET(sp) + stw %r22,R22_OFFSET(sp) + stw %r23,R23_OFFSET(sp) + stw %r24,R24_OFFSET(sp) + stw %r25,R25_OFFSET(sp) +/* + * skip arg0 + */ + stw %r27,R27_OFFSET(sp) + stw %r28,R28_OFFSET(sp) + stw %r29,R29_OFFSET(sp) + stw %r30,R30_OFFSET(sp) + stw %r31,R31_OFFSET(sp) + +/* Now most registers are available since they have been saved + * + * The following items are currently wrong in the integer context + * reg current value saved value + * ------------------------------------------------ + * arg0 scratch isr_arg0 (cr24) + * r9 vector number isr_r9 (cr25) + * + * Fix them + */ + + mfctl isr_arg0,%r3 + stw %r3,ARG0_OFFSET(sp) + + mfctl isr_r9,%r3 + stw %r3,R9_OFFSET(sp) + +/* + * At this point we are done with isr_arg0, and isr_r9 control registers + * + * Prepare to re-enter virtual mode + * We need Q in case the interrupt handler enables interrupts + */ + + ldil L%CPU_PSW_DEFAULT, arg0 + ldo R%CPU_PSW_DEFAULT(arg0), arg0 + mtctl arg0, ipsw + +/* + * Now jump to "rest_of_isr_handler" with the rfi + * We are assuming the space queues are all correct already + */ + + ldil L%rest_of_isr_handler, arg0 + ldo R%rest_of_isr_handler(arg0), arg0 + mtctl arg0, pcoq + ldo 4(arg0), arg0 + mtctl arg0, pcoq + + rfi + nop + +/* + * At this point we are back in virtual mode and all our + * normal addressing is once again ok. + * + * It is now ok to take an exception or trap + */ + +rest_of_isr_handler: + +/* + * Point to beginning of float context and + * save the floating point context -- doing whatever patches are necessary + */ + + .call ARGW0=GR + bl _CPU_Save_float_context,%r2 + ldo FP_CONTEXT_OFFSET(sp),arg0 + +/* + * save the ptr to interrupt frame as an argument for the interrupt handler + */ + + copy sp, arg1 + +/* + * Advance the frame to point beyond all interrupt contexts (integer & float) + * this also includes the pad to align to 64byte stack boundary + */ + ldo CPU_INTERRUPT_FRAME_SIZE(sp), sp + +/* + * r3 -- &_ISR_Nest_level + * r5 -- value _ISR_Nest_level + * r4 -- &_Thread_Dispatch_disable_level + * r6 -- value _Thread_Dispatch_disable_level + * r9 -- vector number + */ + + .import _ISR_Nest_level,data + ldil L%_ISR_Nest_level,%r3 + ldo R%_ISR_Nest_level(%r3),%r3 + ldw 0(%r3),%r5 + + .import _Thread_Dispatch_disable_level,data + ldil L%_Thread_Dispatch_disable_level,%r4 + ldo R%_Thread_Dispatch_disable_level(%r4),%r4 + ldw 0(%r4),%r6 + +/* + * increment interrupt nest level counter. If outermost interrupt + * switch the stack and squirrel away the previous sp. + */ + addi 1,%r5,%r5 + stw %r5, 0(%r3) + +/* + * compute and save new stack (with frame) + * just in case we are nested -- simpler this way + */ + comibf,= 1,%r5,stack_done + ldo 128(sp),%r7 + +/* + * Switch to interrupt stack allocated by the interrupt manager (intr.c) + */ + .import _CPU_Interrupt_stack_low,data + ldil L%_CPU_Interrupt_stack_low,%r7 + ldw R%_CPU_Interrupt_stack_low(%r7),%r7 + ldo 128(%r7),%r7 + +stack_done: +/* + * save our current stack pointer where the "old sp" is supposed to be + */ + stw sp, -4(%r7) +/* + * and switch stacks (or advance old stack in nested case) + */ + copy %r7, sp + +/* + * increment the dispatch disable level counter. + */ + addi 1,%r6,%r6 + stw %r6, 0(%r4) + +/* + * load address of user handler + * Note: No error checking is done, it is assumed that the + * vector table contains a valid address or a stub + * spurious handler. + */ + .import _ISR_Vector_table,data + ldil L%_ISR_Vector_table,%r8 + ldo R%_ISR_Vector_table(%r8),%r8 + ldwx,s %r9(%r8),%r8 + +/* + * invoke user interrupt handler + * Interrupts are currently disabled, as per RTEMS convention + * The handler has the option of re-enabling interrupts + * NOTE: can not use 'bl' since it uses "pc-relative" addressing + * and we are using a hard coded address from a table + * So... we fudge r2 ourselves (ala dynacall) + * arg0 = vector number, arg1 = ptr to rtems_interrupt_frame + */ + copy %r9, %r26 + .call ARGW0=GR, ARGW1=GR + blr %r0, rp + bv,n 0(%r8) + +post_user_interrupt_handler: + +/* + * Back from user handler(s) + * Disable external interrupts (since the interrupt handler could + * have turned them on) and return to the interrupted task stack (assuming + * (_ISR_Nest_level == 0) + */ + + rsm HPPA_PSW_I + HPPA_PSW_R, %r0 + ldw -4(sp), sp + +/* + * r3 -- (most of) &_ISR_Nest_level + * r5 -- value _ISR_Nest_level + * r4 -- (most of) &_Thread_Dispatch_disable_level + * r6 -- value _Thread_Dispatch_disable_level + * r7 -- (most of) &_ISR_Signals_to_thread_executing + * r8 -- value _ISR_Signals_to_thread_executing + */ + + .import _ISR_Nest_level,data + ldil L%_ISR_Nest_level,%r3 + ldw R%_ISR_Nest_level(%r3),%r5 + + .import _Thread_Dispatch_disable_level,data + ldil L%_Thread_Dispatch_disable_level,%r4 + ldw R%_Thread_Dispatch_disable_level(%r4),%r6 + + .import _ISR_Signals_to_thread_executing,data + ldil L%_ISR_Signals_to_thread_executing,%r7 + +/* + * decrement isr nest level + */ + addi -1, %r5, %r5 + stw %r5, R%_ISR_Nest_level(%r3) + +/* + * decrement dispatch disable level counter and, if not 0, go on + */ + addi -1,%r6,%r6 + comibf,= 0,%r6,isr_restore + stw %r6, R%_Thread_Dispatch_disable_level(%r4) + +/* + * check whether or not a context switch is necessary + */ + .import _Context_Switch_necessary,data + ldil L%_Context_Switch_necessary,%r8 + ldw R%_Context_Switch_necessary(%r8),%r8 + comibf,=,n 0,%r8,ISR_dispatch + +/* + * check whether or not a context switch is necessary because an ISR + * sent signals to the interrupted task + */ + ldw R%_ISR_Signals_to_thread_executing(%r7),%r8 + comibt,=,n 0,%r8,isr_restore + + +/* + * OK, something happened while in ISR and we need to switch to a task + * other than the one which was interrupted or the + * ISR_Signals_to_thread_executing case + * We also turn on interrupts, since the interrupted task had them + * on (obviously :-) and Thread_Dispatch is happy to leave ints on. + */ + +ISR_dispatch: + stw %r0, R%_ISR_Signals_to_thread_executing(%r7) + + ssm HPPA_PSW_I, %r0 + + .import _Thread_Dispatch,code + .call + bl _Thread_Dispatch,%r2 + ldo 128(sp),sp + + ldo -128(sp),sp + +isr_restore: + +/* + * enable interrupts during most of restore + */ + ssm HPPA_PSW_I, %r0 + +/* + * Get a pointer to beginning of our stack frame + */ + ldo -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1 + +/* + * restore float + */ + .call ARGW0=GR + bl _CPU_Restore_float_context,%r2 + ldo FP_CONTEXT_OFFSET(%arg1), arg0 + + copy %arg1, %arg0 + +/* + * ********** FALL THRU ********** + */ + +/* + * Jump here from bottom of Context_Switch + * Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self + * restore interrupt state + */ + + .EXPORT _CPU_Context_restore +_CPU_Context_restore: + +/* + * restore integer state + */ + ldw R1_OFFSET(arg0),%r1 + ldw R2_OFFSET(arg0),%r2 + ldw R3_OFFSET(arg0),%r3 + ldw R4_OFFSET(arg0),%r4 + ldw R5_OFFSET(arg0),%r5 + ldw R6_OFFSET(arg0),%r6 + ldw R7_OFFSET(arg0),%r7 + ldw R8_OFFSET(arg0),%r8 + ldw R9_OFFSET(arg0),%r9 + ldw R10_OFFSET(arg0),%r10 + ldw R11_OFFSET(arg0),%r11 + ldw R12_OFFSET(arg0),%r12 + ldw R13_OFFSET(arg0),%r13 + ldw R14_OFFSET(arg0),%r14 + ldw R15_OFFSET(arg0),%r15 + ldw R16_OFFSET(arg0),%r16 + ldw R17_OFFSET(arg0),%r17 + ldw R18_OFFSET(arg0),%r18 + ldw R19_OFFSET(arg0),%r19 + ldw R20_OFFSET(arg0),%r20 + ldw R21_OFFSET(arg0),%r21 + ldw R22_OFFSET(arg0),%r22 + ldw R23_OFFSET(arg0),%r23 + ldw R24_OFFSET(arg0),%r24 +/* + * skipping r25; used as scratch register below + * skipping r26 (arg0) until we are done with it + */ + ldw R27_OFFSET(arg0),%r27 + ldw R28_OFFSET(arg0),%r28 + ldw R29_OFFSET(arg0),%r29 +/* + * skipping r30 (sp) until we turn off interrupts + */ + ldw R31_OFFSET(arg0),%r31 + +/* + * Turn off Q & R & I so we can write r30 and interrupt control registers + */ + rsm HPPA_PSW_Q + HPPA_PSW_R + HPPA_PSW_I, %r0 + +/* + * now safe to restore r30 + */ + ldw R30_OFFSET(arg0),%r30 + + ldw IPSW_OFFSET(arg0), %r25 + mtctl %r25, ipsw + + ldw SAR_OFFSET(arg0), %r25 + mtctl %r25, sar + + ldw PCOQFRONT_OFFSET(arg0), %r25 + mtctl %r25, pcoq + + ldw PCOQBACK_OFFSET(arg0), %r25 + mtctl %r25, pcoq + +/* + * Load r25 with interrupts off + */ + ldw R25_OFFSET(arg0),%r25 +/* + * Must load r26 (arg0) last + */ + ldw R26_OFFSET(arg0),%r26 + +isr_exit: + rfi + .EXIT + .PROCEND + +/* + * This section is used to context switch floating point registers. + * Ref: 6-35 of Architecture 1.1 + * + * NOTE: since integer multiply uses the floating point unit, + * we have to save/restore fp on every trap. We cannot + * just try to keep track of fp usage. + */ + + .align 32 + .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0 +_CPU_Save_float_context: + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + fstds,ma %fr0,8(%arg0) + fstds,ma %fr1,8(%arg0) + fstds,ma %fr2,8(%arg0) + fstds,ma %fr3,8(%arg0) + fstds,ma %fr4,8(%arg0) + fstds,ma %fr5,8(%arg0) + fstds,ma %fr6,8(%arg0) + fstds,ma %fr7,8(%arg0) + fstds,ma %fr8,8(%arg0) + fstds,ma %fr9,8(%arg0) + fstds,ma %fr10,8(%arg0) + fstds,ma %fr11,8(%arg0) + fstds,ma %fr12,8(%arg0) + fstds,ma %fr13,8(%arg0) + fstds,ma %fr14,8(%arg0) + fstds,ma %fr15,8(%arg0) + fstds,ma %fr16,8(%arg0) + fstds,ma %fr17,8(%arg0) + fstds,ma %fr18,8(%arg0) + fstds,ma %fr19,8(%arg0) + fstds,ma %fr20,8(%arg0) + fstds,ma %fr21,8(%arg0) + fstds,ma %fr22,8(%arg0) + fstds,ma %fr23,8(%arg0) + fstds,ma %fr24,8(%arg0) + fstds,ma %fr25,8(%arg0) + fstds,ma %fr26,8(%arg0) + fstds,ma %fr27,8(%arg0) + fstds,ma %fr28,8(%arg0) + fstds,ma %fr29,8(%arg0) + fstds,ma %fr30,8(%arg0) + fstds %fr31,0(%arg0) + bv 0(%r2) + addi -(31*8), %arg0, %arg0 ; restore arg0 just for fun + .EXIT + .PROCEND + + .align 32 + .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0 +_CPU_Restore_float_context: + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + addi (31*8), %arg0, %arg0 ; point at last double + fldds 0(%arg0),%fr31 + fldds,mb -8(%arg0),%fr30 + fldds,mb -8(%arg0),%fr29 + fldds,mb -8(%arg0),%fr28 + fldds,mb -8(%arg0),%fr27 + fldds,mb -8(%arg0),%fr26 + fldds,mb -8(%arg0),%fr25 + fldds,mb -8(%arg0),%fr24 + fldds,mb -8(%arg0),%fr23 + fldds,mb -8(%arg0),%fr22 + fldds,mb -8(%arg0),%fr21 + fldds,mb -8(%arg0),%fr20 + fldds,mb -8(%arg0),%fr19 + fldds,mb -8(%arg0),%fr18 + fldds,mb -8(%arg0),%fr17 + fldds,mb -8(%arg0),%fr16 + fldds,mb -8(%arg0),%fr15 + fldds,mb -8(%arg0),%fr14 + fldds,mb -8(%arg0),%fr13 + fldds,mb -8(%arg0),%fr12 + fldds,mb -8(%arg0),%fr11 + fldds,mb -8(%arg0),%fr10 + fldds,mb -8(%arg0),%fr9 + fldds,mb -8(%arg0),%fr8 + fldds,mb -8(%arg0),%fr7 + fldds,mb -8(%arg0),%fr6 + fldds,mb -8(%arg0),%fr5 + fldds,mb -8(%arg0),%fr4 + fldds,mb -8(%arg0),%fr3 + fldds,mb -8(%arg0),%fr2 + fldds,mb -8(%arg0),%fr1 + bv 0(%r2) + fldds,mb -8(%arg0),%fr0 + .EXIT + .PROCEND + +/* + * These 2 small routines are unused right now. + * Normally we just go thru _CPU_Save_float_context (and Restore) + * + * Here we just deref the ptr and jump up, letting _CPU_Save_float_context + * do the return for us. + */ + + .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0 +_CPU_Context_save_fp: + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + bl _CPU_Save_float_context, %r0 + ldw 0(%arg0), %arg0 + .EXIT + .PROCEND + + .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0 +_CPU_Context_restore_fp: + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + bl _CPU_Restore_float_context, %r0 + ldw 0(%arg0), %arg0 + .EXIT + .PROCEND + + +/* + * void _CPU_Context_switch( run_context, heir_context ) + * + * This routine performs a normal non-FP context switch. + */ + + .align 32 + .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR +_CPU_Context_switch: + .PROC + .CALLINFO FRAME=64 + .ENTRY + +/* + * Save the integer context + */ + stw %r1,R1_OFFSET(arg0) + stw %r2,R2_OFFSET(arg0) + stw %r3,R3_OFFSET(arg0) + stw %r4,R4_OFFSET(arg0) + stw %r5,R5_OFFSET(arg0) + stw %r6,R6_OFFSET(arg0) + stw %r7,R7_OFFSET(arg0) + stw %r8,R8_OFFSET(arg0) + stw %r9,R9_OFFSET(arg0) + stw %r10,R10_OFFSET(arg0) + stw %r11,R11_OFFSET(arg0) + stw %r12,R12_OFFSET(arg0) + stw %r13,R13_OFFSET(arg0) + stw %r14,R14_OFFSET(arg0) + stw %r15,R15_OFFSET(arg0) + stw %r16,R16_OFFSET(arg0) + stw %r17,R17_OFFSET(arg0) + stw %r18,R18_OFFSET(arg0) + stw %r19,R19_OFFSET(arg0) + stw %r20,R20_OFFSET(arg0) + stw %r21,R21_OFFSET(arg0) + stw %r22,R22_OFFSET(arg0) + stw %r23,R23_OFFSET(arg0) + stw %r24,R24_OFFSET(arg0) + stw %r25,R25_OFFSET(arg0) + stw %r26,R26_OFFSET(arg0) + stw %r27,R27_OFFSET(arg0) + stw %r28,R28_OFFSET(arg0) + stw %r29,R29_OFFSET(arg0) + stw %r30,R30_OFFSET(arg0) + stw %r31,R31_OFFSET(arg0) + +/* + * fill in interrupt context section + */ + stw %r2, PCOQFRONT_OFFSET(%arg0) + ldo 4(%r2), %r2 + stw %r2, PCOQBACK_OFFSET(%arg0) + +/* + * Generate a suitable IPSW by using the system default psw + * with the current low bits added in. + */ + + ldil L%CPU_PSW_DEFAULT, %r2 + ldo R%CPU_PSW_DEFAULT(%r2), %r2 + ssm 0, %arg2 + dep %arg2, 31, 8, %r2 + stw %r2, IPSW_OFFSET(%arg0) + +/* + * at this point, the running task context is completely saved + * Now jump to the bottom of the interrupt handler to load the + * heirs context + */ + + b _CPU_Context_restore + copy %arg1, %arg0 + + .EXIT + .PROCEND + + +/* + * Find first bit + * NOTE: + * This is used (and written) only for the ready chain code and + * priority bit maps. + * Any other use constitutes fraud. + * Returns first bit from the least significant side. + * Eg: if input is 0x8001 + * output will indicate the '1' bit and return 0. + * This is counter to HPPA bit numbering which calls this + * bit 31. This way simplifies the macros _CPU_Priority_Mask + * and _CPU_Priority_Bits_index. + * + * NOTE: + * We just use 16 bit version + * does not handle zero case + * + * Based on the UTAH Mach libc version of ffs. + */ + + .align 32 + .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR +hppa_rtems_ffs: + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + +#ifdef RETURN_ERROR_ON_ZERO + comb,= %arg0,%r0,ffsdone ; If arg0 is 0 + ldi -1,%ret0 ; return -1 +#endif + +#if BITFIELD_SIZE == 32 + ldi 31,%ret0 ; Set return to high bit + extru,= %arg0,31,16,%r0 ; If low 16 bits are non-zero + addi,tr -16,%ret0,%ret0 ; subtract 16 from bitpos + shd %r0,%arg0,16,%arg0 ; else shift right 16 bits +#else + ldi 15,%ret0 ; Set return to high bit +#endif + extru,= %arg0,31,8,%r0 ; If low 8 bits are non-zero + addi,tr -8,%ret0,%ret0 ; subtract 8 from bitpos + shd %r0,%arg0,8,%arg0 ; else shift right 8 bits + extru,= %arg0,31,4,%r0 ; If low 4 bits are non-zero + addi,tr -4,%ret0,%ret0 ; subtract 4 from bitpos + shd %r0,%arg0,4,%arg0 ; else shift right 4 bits + extru,= %arg0,31,2,%r0 ; If low 2 bits are non-zero + addi,tr -2,%ret0,%ret0 ; subtract 2 from bitpos + shd %r0,%arg0,2,%arg0 ; else shift right 2 bits + extru,= %arg0,31,1,%r0 ; If low bit is non-zero + addi -1,%ret0,%ret0 ; subtract 1 from bitpos +ffsdone: + bv,n 0(%r2) + nop + .EXIT + .PROCEND diff --git a/c/src/exec/score/cpu/hppa1.1/cpu_asm.s b/c/src/exec/score/cpu/hppa1.1/cpu_asm.s deleted file mode 100644 index e6d9fd08d8..0000000000 --- a/c/src/exec/score/cpu/hppa1.1/cpu_asm.s +++ /dev/null @@ -1,778 +0,0 @@ -/* - * TODO: - * Context_switch needs to only save callee save registers - * I think this means can skip: r1, r2, r19-29, r31 - * Ref: p 3-2 of Procedure Calling Conventions Manual - * This should be #ifndef DEBUG so that debugger has - * accurate visibility into all registers - * - * This file contains the assembly code for the HPPA implementation - * of RTEMS. - * - * COPYRIGHT (c) 1994,95 by Division Incorporated - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - -#include -#include -#include -#include - - .SPACE $PRIVATE$ - .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31 - .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82 - .SPACE $TEXT$ - .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44 - .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY - .SPACE $TEXT$ - .SUBSPA $CODE$ - -/* - * Special register usage for context switch and interrupts - * Stay away from %cr28 which is used for TLB misses on 72000 - */ - -isr_arg0 .reg %cr24 -isr_r9 .reg %cr25 -isr_r8 .reg %cr26 - -/* - * Interrupt stack frame looks like this - * - * offset item - * ----------------------------------------------------------------- - * INTEGER_CONTEXT_OFFSET Context_Control - * FP_CONTEXT_OFFSET Context_Control_fp - * - * It is padded out to a multiple of 64 - */ - - -/*PAGE^L - * void _Generic_ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * - * We jump here from the interrupt vector. - * The HPPA hardware has done some stuff for us: - * PSW saved in IPSW - * PSW set to 0 - * PSW[E] set to default (0) - * PSW[M] set to 1 iff this is HPMC - * - * IIA queue is frozen (since PSW[Q] is now 0) - * privilege level promoted to 0 - * IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap - * registers GR 1,8,9,16,17,24,25 copied to shadow regs - * SHR 0 1 2 3 4 5 6 - * - * Our vector stub (in the BSP) MUST have done the following: - * - * a) Saved the original %r9 into %isr_r9 (%cr25) - * b) Placed the vector number in %r9 - * c) Was allowed to also destroy $isr_r8 (%cr26), - * but the stub was NOT allowed to destroy any other registers. - * - * The typical stub sequence (in the BSP) should look like this: - * - * a) mtctl %r9,isr_r9 ; (save r9 in cr25) - * b) ldi vector,%r9 ; (load constant vector number in r9) - * c) mtctl %r8,isr_r8 ; (save r8 in cr26) - * d) ldil L%MY_BSP_first_level_interrupt_handler,%r8 - * e) ldo R%MY_BSP_first_level_interrupt_handler(%r8),%r8 - * ; (point to BSP raw handler table) - * f) ldwx,s %r9(%r8),%r8 ; (load value from raw handler table) - * g) bv 0(%r8) ; (call raw handler: _Generic_ISR_Handler) - * h) mfctl isr_r8,%r8 ; (restore r8 from cr26 in delay slot) - * - * Optionally, steps (c) thru (h) _could_ be replaced with a single - * bl,n _Generic_ISR_Handler,%r0 - * - * - */ - .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0 -_Generic_ISR_Handler: - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - - mtctl arg0, isr_arg0 - -/* - * save interrupt state - */ - mfctl ipsw, arg0 - stw arg0, IPSW_OFFSET(sp) - - mfctl iir, arg0 - stw arg0, IIR_OFFSET(sp) - - mfctl ior, arg0 - stw arg0, IOR_OFFSET(sp) - - mfctl pcoq, arg0 - stw arg0, PCOQFRONT_OFFSET(sp) - - mtctl %r0, pcoq - mfctl pcoq, arg0 - stw arg0, PCOQBACK_OFFSET(sp) - - mfctl %sar, arg0 - stw arg0, SAR_OFFSET(sp) - -/* - * Build an interrupt frame to hold the contexts we will need. - * We have already saved the interrupt items on the stack - * - * At this point the following registers are damaged wrt the interrupt - * reg current value saved value - * ------------------------------------------------ - * arg0 scratch isr_arg0 (cr24) - * r9 vector number isr_r9 (cr25) - * - * Point to beginning of integer context and - * save the integer context - */ - stw %r1,R1_OFFSET(sp) - stw %r2,R2_OFFSET(sp) - stw %r3,R3_OFFSET(sp) - stw %r4,R4_OFFSET(sp) - stw %r5,R5_OFFSET(sp) - stw %r6,R6_OFFSET(sp) - stw %r7,R7_OFFSET(sp) - stw %r8,R8_OFFSET(sp) -/* - * skip r9 - */ - stw %r10,R10_OFFSET(sp) - stw %r11,R11_OFFSET(sp) - stw %r12,R12_OFFSET(sp) - stw %r13,R13_OFFSET(sp) - stw %r14,R14_OFFSET(sp) - stw %r15,R15_OFFSET(sp) - stw %r16,R16_OFFSET(sp) - stw %r17,R17_OFFSET(sp) - stw %r18,R18_OFFSET(sp) - stw %r19,R19_OFFSET(sp) - stw %r20,R20_OFFSET(sp) - stw %r21,R21_OFFSET(sp) - stw %r22,R22_OFFSET(sp) - stw %r23,R23_OFFSET(sp) - stw %r24,R24_OFFSET(sp) - stw %r25,R25_OFFSET(sp) -/* - * skip arg0 - */ - stw %r27,R27_OFFSET(sp) - stw %r28,R28_OFFSET(sp) - stw %r29,R29_OFFSET(sp) - stw %r30,R30_OFFSET(sp) - stw %r31,R31_OFFSET(sp) - -/* Now most registers are available since they have been saved - * - * The following items are currently wrong in the integer context - * reg current value saved value - * ------------------------------------------------ - * arg0 scratch isr_arg0 (cr24) - * r9 vector number isr_r9 (cr25) - * - * Fix them - */ - - mfctl isr_arg0,%r3 - stw %r3,ARG0_OFFSET(sp) - - mfctl isr_r9,%r3 - stw %r3,R9_OFFSET(sp) - -/* - * At this point we are done with isr_arg0, and isr_r9 control registers - * - * Prepare to re-enter virtual mode - * We need Q in case the interrupt handler enables interrupts - */ - - ldil L%CPU_PSW_DEFAULT, arg0 - ldo R%CPU_PSW_DEFAULT(arg0), arg0 - mtctl arg0, ipsw - -/* - * Now jump to "rest_of_isr_handler" with the rfi - * We are assuming the space queues are all correct already - */ - - ldil L%rest_of_isr_handler, arg0 - ldo R%rest_of_isr_handler(arg0), arg0 - mtctl arg0, pcoq - ldo 4(arg0), arg0 - mtctl arg0, pcoq - - rfi - nop - -/* - * At this point we are back in virtual mode and all our - * normal addressing is once again ok. - * - * It is now ok to take an exception or trap - */ - -rest_of_isr_handler: - -/* - * Point to beginning of float context and - * save the floating point context -- doing whatever patches are necessary - */ - - .call ARGW0=GR - bl _CPU_Save_float_context,%r2 - ldo FP_CONTEXT_OFFSET(sp),arg0 - -/* - * save the ptr to interrupt frame as an argument for the interrupt handler - */ - - copy sp, arg1 - -/* - * Advance the frame to point beyond all interrupt contexts (integer & float) - * this also includes the pad to align to 64byte stack boundary - */ - ldo CPU_INTERRUPT_FRAME_SIZE(sp), sp - -/* - * r3 -- &_ISR_Nest_level - * r5 -- value _ISR_Nest_level - * r4 -- &_Thread_Dispatch_disable_level - * r6 -- value _Thread_Dispatch_disable_level - * r9 -- vector number - */ - - .import _ISR_Nest_level,data - ldil L%_ISR_Nest_level,%r3 - ldo R%_ISR_Nest_level(%r3),%r3 - ldw 0(%r3),%r5 - - .import _Thread_Dispatch_disable_level,data - ldil L%_Thread_Dispatch_disable_level,%r4 - ldo R%_Thread_Dispatch_disable_level(%r4),%r4 - ldw 0(%r4),%r6 - -/* - * increment interrupt nest level counter. If outermost interrupt - * switch the stack and squirrel away the previous sp. - */ - addi 1,%r5,%r5 - stw %r5, 0(%r3) - -/* - * compute and save new stack (with frame) - * just in case we are nested -- simpler this way - */ - comibf,= 1,%r5,stack_done - ldo 128(sp),%r7 - -/* - * Switch to interrupt stack allocated by the interrupt manager (intr.c) - */ - .import _CPU_Interrupt_stack_low,data - ldil L%_CPU_Interrupt_stack_low,%r7 - ldw R%_CPU_Interrupt_stack_low(%r7),%r7 - ldo 128(%r7),%r7 - -stack_done: -/* - * save our current stack pointer where the "old sp" is supposed to be - */ - stw sp, -4(%r7) -/* - * and switch stacks (or advance old stack in nested case) - */ - copy %r7, sp - -/* - * increment the dispatch disable level counter. - */ - addi 1,%r6,%r6 - stw %r6, 0(%r4) - -/* - * load address of user handler - * Note: No error checking is done, it is assumed that the - * vector table contains a valid address or a stub - * spurious handler. - */ - .import _ISR_Vector_table,data - ldil L%_ISR_Vector_table,%r8 - ldo R%_ISR_Vector_table(%r8),%r8 - ldwx,s %r9(%r8),%r8 - -/* - * invoke user interrupt handler - * Interrupts are currently disabled, as per RTEMS convention - * The handler has the option of re-enabling interrupts - * NOTE: can not use 'bl' since it uses "pc-relative" addressing - * and we are using a hard coded address from a table - * So... we fudge r2 ourselves (ala dynacall) - * arg0 = vector number, arg1 = ptr to rtems_interrupt_frame - */ - copy %r9, %r26 - .call ARGW0=GR, ARGW1=GR - blr %r0, rp - bv,n 0(%r8) - -post_user_interrupt_handler: - -/* - * Back from user handler(s) - * Disable external interrupts (since the interrupt handler could - * have turned them on) and return to the interrupted task stack (assuming - * (_ISR_Nest_level == 0) - */ - - rsm HPPA_PSW_I + HPPA_PSW_R, %r0 - ldw -4(sp), sp - -/* - * r3 -- (most of) &_ISR_Nest_level - * r5 -- value _ISR_Nest_level - * r4 -- (most of) &_Thread_Dispatch_disable_level - * r6 -- value _Thread_Dispatch_disable_level - * r7 -- (most of) &_ISR_Signals_to_thread_executing - * r8 -- value _ISR_Signals_to_thread_executing - */ - - .import _ISR_Nest_level,data - ldil L%_ISR_Nest_level,%r3 - ldw R%_ISR_Nest_level(%r3),%r5 - - .import _Thread_Dispatch_disable_level,data - ldil L%_Thread_Dispatch_disable_level,%r4 - ldw R%_Thread_Dispatch_disable_level(%r4),%r6 - - .import _ISR_Signals_to_thread_executing,data - ldil L%_ISR_Signals_to_thread_executing,%r7 - -/* - * decrement isr nest level - */ - addi -1, %r5, %r5 - stw %r5, R%_ISR_Nest_level(%r3) - -/* - * decrement dispatch disable level counter and, if not 0, go on - */ - addi -1,%r6,%r6 - comibf,= 0,%r6,isr_restore - stw %r6, R%_Thread_Dispatch_disable_level(%r4) - -/* - * check whether or not a context switch is necessary - */ - .import _Context_Switch_necessary,data - ldil L%_Context_Switch_necessary,%r8 - ldw R%_Context_Switch_necessary(%r8),%r8 - comibf,=,n 0,%r8,ISR_dispatch - -/* - * check whether or not a context switch is necessary because an ISR - * sent signals to the interrupted task - */ - ldw R%_ISR_Signals_to_thread_executing(%r7),%r8 - comibt,=,n 0,%r8,isr_restore - - -/* - * OK, something happened while in ISR and we need to switch to a task - * other than the one which was interrupted or the - * ISR_Signals_to_thread_executing case - * We also turn on interrupts, since the interrupted task had them - * on (obviously :-) and Thread_Dispatch is happy to leave ints on. - */ - -ISR_dispatch: - stw %r0, R%_ISR_Signals_to_thread_executing(%r7) - - ssm HPPA_PSW_I, %r0 - - .import _Thread_Dispatch,code - .call - bl _Thread_Dispatch,%r2 - ldo 128(sp),sp - - ldo -128(sp),sp - -isr_restore: - -/* - * enable interrupts during most of restore - */ - ssm HPPA_PSW_I, %r0 - -/* - * Get a pointer to beginning of our stack frame - */ - ldo -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1 - -/* - * restore float - */ - .call ARGW0=GR - bl _CPU_Restore_float_context,%r2 - ldo FP_CONTEXT_OFFSET(%arg1), arg0 - - copy %arg1, %arg0 - -/* - * ********** FALL THRU ********** - */ - -/* - * Jump here from bottom of Context_Switch - * Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self - * restore interrupt state - */ - - .EXPORT _CPU_Context_restore -_CPU_Context_restore: - -/* - * restore integer state - */ - ldw R1_OFFSET(arg0),%r1 - ldw R2_OFFSET(arg0),%r2 - ldw R3_OFFSET(arg0),%r3 - ldw R4_OFFSET(arg0),%r4 - ldw R5_OFFSET(arg0),%r5 - ldw R6_OFFSET(arg0),%r6 - ldw R7_OFFSET(arg0),%r7 - ldw R8_OFFSET(arg0),%r8 - ldw R9_OFFSET(arg0),%r9 - ldw R10_OFFSET(arg0),%r10 - ldw R11_OFFSET(arg0),%r11 - ldw R12_OFFSET(arg0),%r12 - ldw R13_OFFSET(arg0),%r13 - ldw R14_OFFSET(arg0),%r14 - ldw R15_OFFSET(arg0),%r15 - ldw R16_OFFSET(arg0),%r16 - ldw R17_OFFSET(arg0),%r17 - ldw R18_OFFSET(arg0),%r18 - ldw R19_OFFSET(arg0),%r19 - ldw R20_OFFSET(arg0),%r20 - ldw R21_OFFSET(arg0),%r21 - ldw R22_OFFSET(arg0),%r22 - ldw R23_OFFSET(arg0),%r23 - ldw R24_OFFSET(arg0),%r24 -/* - * skipping r25; used as scratch register below - * skipping r26 (arg0) until we are done with it - */ - ldw R27_OFFSET(arg0),%r27 - ldw R28_OFFSET(arg0),%r28 - ldw R29_OFFSET(arg0),%r29 -/* - * skipping r30 (sp) until we turn off interrupts - */ - ldw R31_OFFSET(arg0),%r31 - -/* - * Turn off Q & R & I so we can write r30 and interrupt control registers - */ - rsm HPPA_PSW_Q + HPPA_PSW_R + HPPA_PSW_I, %r0 - -/* - * now safe to restore r30 - */ - ldw R30_OFFSET(arg0),%r30 - - ldw IPSW_OFFSET(arg0), %r25 - mtctl %r25, ipsw - - ldw SAR_OFFSET(arg0), %r25 - mtctl %r25, sar - - ldw PCOQFRONT_OFFSET(arg0), %r25 - mtctl %r25, pcoq - - ldw PCOQBACK_OFFSET(arg0), %r25 - mtctl %r25, pcoq - -/* - * Load r25 with interrupts off - */ - ldw R25_OFFSET(arg0),%r25 -/* - * Must load r26 (arg0) last - */ - ldw R26_OFFSET(arg0),%r26 - -isr_exit: - rfi - .EXIT - .PROCEND - -/* - * This section is used to context switch floating point registers. - * Ref: 6-35 of Architecture 1.1 - * - * NOTE: since integer multiply uses the floating point unit, - * we have to save/restore fp on every trap. We cannot - * just try to keep track of fp usage. - */ - - .align 32 - .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0 -_CPU_Save_float_context: - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - fstds,ma %fr0,8(%arg0) - fstds,ma %fr1,8(%arg0) - fstds,ma %fr2,8(%arg0) - fstds,ma %fr3,8(%arg0) - fstds,ma %fr4,8(%arg0) - fstds,ma %fr5,8(%arg0) - fstds,ma %fr6,8(%arg0) - fstds,ma %fr7,8(%arg0) - fstds,ma %fr8,8(%arg0) - fstds,ma %fr9,8(%arg0) - fstds,ma %fr10,8(%arg0) - fstds,ma %fr11,8(%arg0) - fstds,ma %fr12,8(%arg0) - fstds,ma %fr13,8(%arg0) - fstds,ma %fr14,8(%arg0) - fstds,ma %fr15,8(%arg0) - fstds,ma %fr16,8(%arg0) - fstds,ma %fr17,8(%arg0) - fstds,ma %fr18,8(%arg0) - fstds,ma %fr19,8(%arg0) - fstds,ma %fr20,8(%arg0) - fstds,ma %fr21,8(%arg0) - fstds,ma %fr22,8(%arg0) - fstds,ma %fr23,8(%arg0) - fstds,ma %fr24,8(%arg0) - fstds,ma %fr25,8(%arg0) - fstds,ma %fr26,8(%arg0) - fstds,ma %fr27,8(%arg0) - fstds,ma %fr28,8(%arg0) - fstds,ma %fr29,8(%arg0) - fstds,ma %fr30,8(%arg0) - fstds %fr31,0(%arg0) - bv 0(%r2) - addi -(31*8), %arg0, %arg0 ; restore arg0 just for fun - .EXIT - .PROCEND - - .align 32 - .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0 -_CPU_Restore_float_context: - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - addi (31*8), %arg0, %arg0 ; point at last double - fldds 0(%arg0),%fr31 - fldds,mb -8(%arg0),%fr30 - fldds,mb -8(%arg0),%fr29 - fldds,mb -8(%arg0),%fr28 - fldds,mb -8(%arg0),%fr27 - fldds,mb -8(%arg0),%fr26 - fldds,mb -8(%arg0),%fr25 - fldds,mb -8(%arg0),%fr24 - fldds,mb -8(%arg0),%fr23 - fldds,mb -8(%arg0),%fr22 - fldds,mb -8(%arg0),%fr21 - fldds,mb -8(%arg0),%fr20 - fldds,mb -8(%arg0),%fr19 - fldds,mb -8(%arg0),%fr18 - fldds,mb -8(%arg0),%fr17 - fldds,mb -8(%arg0),%fr16 - fldds,mb -8(%arg0),%fr15 - fldds,mb -8(%arg0),%fr14 - fldds,mb -8(%arg0),%fr13 - fldds,mb -8(%arg0),%fr12 - fldds,mb -8(%arg0),%fr11 - fldds,mb -8(%arg0),%fr10 - fldds,mb -8(%arg0),%fr9 - fldds,mb -8(%arg0),%fr8 - fldds,mb -8(%arg0),%fr7 - fldds,mb -8(%arg0),%fr6 - fldds,mb -8(%arg0),%fr5 - fldds,mb -8(%arg0),%fr4 - fldds,mb -8(%arg0),%fr3 - fldds,mb -8(%arg0),%fr2 - fldds,mb -8(%arg0),%fr1 - bv 0(%r2) - fldds,mb -8(%arg0),%fr0 - .EXIT - .PROCEND - -/* - * These 2 small routines are unused right now. - * Normally we just go thru _CPU_Save_float_context (and Restore) - * - * Here we just deref the ptr and jump up, letting _CPU_Save_float_context - * do the return for us. - */ - - .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0 -_CPU_Context_save_fp: - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - bl _CPU_Save_float_context, %r0 - ldw 0(%arg0), %arg0 - .EXIT - .PROCEND - - .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0 -_CPU_Context_restore_fp: - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - bl _CPU_Restore_float_context, %r0 - ldw 0(%arg0), %arg0 - .EXIT - .PROCEND - - -/* - * void _CPU_Context_switch( run_context, heir_context ) - * - * This routine performs a normal non-FP context switch. - */ - - .align 32 - .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR -_CPU_Context_switch: - .PROC - .CALLINFO FRAME=64 - .ENTRY - -/* - * Save the integer context - */ - stw %r1,R1_OFFSET(arg0) - stw %r2,R2_OFFSET(arg0) - stw %r3,R3_OFFSET(arg0) - stw %r4,R4_OFFSET(arg0) - stw %r5,R5_OFFSET(arg0) - stw %r6,R6_OFFSET(arg0) - stw %r7,R7_OFFSET(arg0) - stw %r8,R8_OFFSET(arg0) - stw %r9,R9_OFFSET(arg0) - stw %r10,R10_OFFSET(arg0) - stw %r11,R11_OFFSET(arg0) - stw %r12,R12_OFFSET(arg0) - stw %r13,R13_OFFSET(arg0) - stw %r14,R14_OFFSET(arg0) - stw %r15,R15_OFFSET(arg0) - stw %r16,R16_OFFSET(arg0) - stw %r17,R17_OFFSET(arg0) - stw %r18,R18_OFFSET(arg0) - stw %r19,R19_OFFSET(arg0) - stw %r20,R20_OFFSET(arg0) - stw %r21,R21_OFFSET(arg0) - stw %r22,R22_OFFSET(arg0) - stw %r23,R23_OFFSET(arg0) - stw %r24,R24_OFFSET(arg0) - stw %r25,R25_OFFSET(arg0) - stw %r26,R26_OFFSET(arg0) - stw %r27,R27_OFFSET(arg0) - stw %r28,R28_OFFSET(arg0) - stw %r29,R29_OFFSET(arg0) - stw %r30,R30_OFFSET(arg0) - stw %r31,R31_OFFSET(arg0) - -/* - * fill in interrupt context section - */ - stw %r2, PCOQFRONT_OFFSET(%arg0) - ldo 4(%r2), %r2 - stw %r2, PCOQBACK_OFFSET(%arg0) - -/* - * Generate a suitable IPSW by using the system default psw - * with the current low bits added in. - */ - - ldil L%CPU_PSW_DEFAULT, %r2 - ldo R%CPU_PSW_DEFAULT(%r2), %r2 - ssm 0, %arg2 - dep %arg2, 31, 8, %r2 - stw %r2, IPSW_OFFSET(%arg0) - -/* - * at this point, the running task context is completely saved - * Now jump to the bottom of the interrupt handler to load the - * heirs context - */ - - b _CPU_Context_restore - copy %arg1, %arg0 - - .EXIT - .PROCEND - - -/* - * Find first bit - * NOTE: - * This is used (and written) only for the ready chain code and - * priority bit maps. - * Any other use constitutes fraud. - * Returns first bit from the least significant side. - * Eg: if input is 0x8001 - * output will indicate the '1' bit and return 0. - * This is counter to HPPA bit numbering which calls this - * bit 31. This way simplifies the macros _CPU_Priority_Mask - * and _CPU_Priority_Bits_index. - * - * NOTE: - * We just use 16 bit version - * does not handle zero case - * - * Based on the UTAH Mach libc version of ffs. - */ - - .align 32 - .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR -hppa_rtems_ffs: - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - -#ifdef RETURN_ERROR_ON_ZERO - comb,= %arg0,%r0,ffsdone ; If arg0 is 0 - ldi -1,%ret0 ; return -1 -#endif - -#if BITFIELD_SIZE == 32 - ldi 31,%ret0 ; Set return to high bit - extru,= %arg0,31,16,%r0 ; If low 16 bits are non-zero - addi,tr -16,%ret0,%ret0 ; subtract 16 from bitpos - shd %r0,%arg0,16,%arg0 ; else shift right 16 bits -#else - ldi 15,%ret0 ; Set return to high bit -#endif - extru,= %arg0,31,8,%r0 ; If low 8 bits are non-zero - addi,tr -8,%ret0,%ret0 ; subtract 8 from bitpos - shd %r0,%arg0,8,%arg0 ; else shift right 8 bits - extru,= %arg0,31,4,%r0 ; If low 4 bits are non-zero - addi,tr -4,%ret0,%ret0 ; subtract 4 from bitpos - shd %r0,%arg0,4,%arg0 ; else shift right 4 bits - extru,= %arg0,31,2,%r0 ; If low 2 bits are non-zero - addi,tr -2,%ret0,%ret0 ; subtract 2 from bitpos - shd %r0,%arg0,2,%arg0 ; else shift right 2 bits - extru,= %arg0,31,1,%r0 ; If low bit is non-zero - addi -1,%ret0,%ret0 ; subtract 1 from bitpos -ffsdone: - bv,n 0(%r2) - nop - .EXIT - .PROCEND diff --git a/c/src/exec/score/cpu/hppa1.1/rtems.S b/c/src/exec/score/cpu/hppa1.1/rtems.S new file mode 100644 index 0000000000..3f02c9d006 --- /dev/null +++ b/c/src/exec/score/cpu/hppa1.1/rtems.S @@ -0,0 +1,53 @@ +/* rtems.S + * + * This file contains the single entry point code for + * the HPPA implementation of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + +#include +#include + + .SPACE $PRIVATE$ + .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31 + .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82 + .SPACE $TEXT$ + .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44 + .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY + .SPACE $TEXT$ + .SUBSPA $CODE$ + + .align 32 + .EXPORT cpu_jump_to_directive,ENTRY,PRIV_LEV=0 +cpu_jump_to_directive + .PROC + .CALLINFO FRAME=0,NO_CALLS + .ENTRY + +# invoke user interrupt handler + +# XXX: look at register usage and code +# XXX: this is not necessarily right!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# r9 = directive number + + .import _Entry_points,data + ldil L%_Entry_points,%r8 + ldo R%_Entry_points(%r8),%r8 + ldwx,s %r9(%r8),%r8 + + .call ARGW0=GR + bv,n 0(%r8) + nop + + .EXIT + .PROCEND + diff --git a/c/src/exec/score/cpu/hppa1.1/rtems.s b/c/src/exec/score/cpu/hppa1.1/rtems.s deleted file mode 100644 index 3f02c9d006..0000000000 --- a/c/src/exec/score/cpu/hppa1.1/rtems.s +++ /dev/null @@ -1,53 +0,0 @@ -/* rtems.S - * - * This file contains the single entry point code for - * the HPPA implementation of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - -#include -#include - - .SPACE $PRIVATE$ - .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31 - .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82 - .SPACE $TEXT$ - .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44 - .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY - .SPACE $TEXT$ - .SUBSPA $CODE$ - - .align 32 - .EXPORT cpu_jump_to_directive,ENTRY,PRIV_LEV=0 -cpu_jump_to_directive - .PROC - .CALLINFO FRAME=0,NO_CALLS - .ENTRY - -# invoke user interrupt handler - -# XXX: look at register usage and code -# XXX: this is not necessarily right!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# r9 = directive number - - .import _Entry_points,data - ldil L%_Entry_points,%r8 - ldo R%_Entry_points(%r8),%r8 - ldwx,s %r9(%r8),%r8 - - .call ARGW0=GR - bv,n 0(%r8) - nop - - .EXIT - .PROCEND - diff --git a/c/src/exec/score/cpu/i386/Makefile.in b/c/src/exec/score/cpu/i386/Makefile.in index af64e214dd..241f99c830 100644 --- a/c/src/exec/score/cpu/i386/Makefile.in +++ b/c/src/exec/score/cpu/i386/Makefile.in @@ -21,10 +21,10 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/i386.h $(srcdir)/i386types.h # i386.h is handled specially EXTERNAL_H_FILES = $(srcdir)/asm.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S S_PIECES=cpu_asm rtems -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/i386/cpu_asm.S b/c/src/exec/score/cpu/i386/cpu_asm.S new file mode 100644 index 0000000000..4ac4dab626 --- /dev/null +++ b/c/src/exec/score/cpu/i386/cpu_asm.S @@ -0,0 +1,282 @@ +/* cpu_asm.s + * + * This file contains all assembly code for the Intel i386 implementation + * of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + +#include + +/* + * Format of i386 Register structure + */ + +.set REG_EFLAGS, 0 +.set REG_ESP, REG_EFLAGS + 4 +.set REG_EBP, REG_ESP + 4 +.set REG_EBX, REG_EBP + 4 +.set REG_ESI, REG_EBX + 4 +.set REG_EDI, REG_ESI + 4 +.set SIZE_REGS, REG_EDI + 4 + + BEGIN_CODE + +/* + * void _CPU_Context_switch( run_context, heir_context ) + * + * This routine performs a normal non-FP context. + */ + + .p2align 1 + PUBLIC (_CPU_Context_switch) + +.set RUNCONTEXT_ARG, 4 # save context argument +.set HEIRCONTEXT_ARG, 8 # restore context argument + +SYM (_CPU_Context_switch): + movl RUNCONTEXT_ARG(esp),eax # eax = running threads context + pushf # push eflags + popl REG_EFLAGS(eax) # save eflags + movl esp,REG_ESP(eax) # save stack pointer + movl ebp,REG_EBP(eax) # save base pointer + movl ebx,REG_EBX(eax) # save ebx + movl esi,REG_ESI(eax) # save source register + movl edi,REG_EDI(eax) # save destination register + + movl HEIRCONTEXT_ARG(esp),eax # eax = heir threads context + +restore: + pushl REG_EFLAGS(eax) # push eflags + popf # restore eflags + movl REG_ESP(eax),esp # restore stack pointer + movl REG_EBP(eax),ebp # restore base pointer + movl REG_EBX(eax),ebx # restore ebx + movl REG_ESI(eax),esi # restore source register + movl REG_EDI(eax),edi # restore destination register + ret + +/* + * NOTE: May be unnecessary to reload some registers. + */ + +/* + * void _CPU_Context_restore( new_context ) + * + * This routine performs a normal non-FP context. + */ + + PUBLIC (_CPU_Context_restore) + +.set NEWCONTEXT_ARG, 4 # context to restore argument + +SYM (_CPU_Context_restore): + + movl NEWCONTEXT_ARG(esp),eax # eax = running threads context + jmp restore + +/*PAGE + * void _CPU_Context_save_fp_context( &fp_context_ptr ) + * void _CPU_Context_restore_fp_context( &fp_context_ptr ) + * + * This section is used to context switch an i80287, i80387, + * the built-in coprocessor or the i80486 or compatible. + */ + +.set FPCONTEXT_ARG, 4 # FP context argument + + .p2align 1 + PUBLIC (_CPU_Context_save_fp) +SYM (_CPU_Context_save_fp): + movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area + movl (eax),eax # eax = FP context area + fsave (eax) # save FP context + ret + + .p2align 1 + PUBLIC (_CPU_Context_restore_fp) +SYM (_CPU_Context_restore_fp): + movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area + movl (eax),eax # eax = FP context area + frstor (eax) # restore FP context + ret + + PUBLIC (_Exception_Handler) +SYM (_Exception_Handler): + pusha # Push general purpose registers + pushl esp # Push exception frame address + movl _currentExcHandler, eax # Call function storead in _currentExcHandler + call * eax + addl $4, esp + popa # restore general purpose registers + addl $8, esp # skill vector number and faultCode + iret + +#define DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY(_vector) \ + .p2align 4 ; \ + PUBLIC (rtems_exception_prologue_ ## _vector ) ; \ +SYM (rtems_exception_prologue_ ## _vector ): \ + pushl $ _vector ; \ + jmp SYM (_Exception_Handler) ; + +#define DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY(_vector) \ + .p2align 4 ; \ + PUBLIC (rtems_exception_prologue_ ## _vector ) ; \ +SYM (rtems_exception_prologue_ ## _vector ): \ + pushl $ 0 ; \ + pushl $ _vector ; \ + jmp SYM (_Exception_Handler) ; + +/* + * Divide Error + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (0) +/* + * Debug Exception + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (1) +/* + * NMI + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (2) +/* + * Breakpoint + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (3) +/* + * Overflow + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (4) +/* + * Bound Range Exceeded + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (5) +/* + * Invalid Opcode + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (6) +/* + * No Math Coproc + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (7) +/* + * Double Fault + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (8) +/* + * Coprocessor segment overrun + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (9) +/* + * Invalid TSS + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (10) +/* + * Segment Not Present + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (11) +/* + * Stack segment Fault + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (12) +/* + * General Protection Fault + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (13) +/* + * Page Fault + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (14) +/* + * Floating point error (NB 15 is reserved it is therefor skipped) + */ +DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (16) +/* + * Aligment Check + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (17) +/* + * Machine Check + */ +DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (18) + + +/* + * GO32 does not require these segment related routines. + */ + +#ifndef __GO32__ + +/* + * void *i386_Logical_to_physical( + * rtems_unsigned16 segment, + * void *address + * ); + * + * Returns thirty-two bit physical address for segment:address. + */ + +.set SEGMENT_ARG, 4 +.set ADDRESS_ARG, 8 + + PUBLIC (i386_Logical_to_physical) + +SYM (i386_Logical_to_physical): + + xorl eax,eax # clear eax + movzwl SEGMENT_ARG(esp),ecx # ecx = segment value + movl $ SYM (_Global_descriptor_table),edx + # edx = address of our GDT + addl ecx,edx # edx = address of desired entry + movb 7(edx),ah # ah = base 31:24 + movb 4(edx),al # al = base 23:16 + shll $16,eax # move ax into correct bits + movw 2(edx),ax # ax = base 0:15 + movl ADDRESS_ARG(esp),ecx # ecx = address to convert + addl eax,ecx # ecx = physical address equivalent + movl ecx,eax # eax = ecx + ret + +/* + * void *i386_Physical_to_logical( + * rtems_unsigned16 segment, + * void *address + * ); + * + * Returns thirty-two bit physical address for segment:address. + */ + +/* + *.set SEGMENT_ARG, 4 + *.set ADDRESS_ARG, 8 -- use sets from above + */ + + PUBLIC (i386_Physical_to_logical) + +SYM (i386_Physical_to_logical): + xorl eax,eax # clear eax + movzwl SEGMENT_ARG(esp),ecx # ecx = segment value + movl $ SYM (_Global_descriptor_table),edx + # edx = address of our GDT + addl ecx,edx # edx = address of desired entry + movb 7(edx),ah # ah = base 31:24 + movb 4(edx),al # al = base 23:16 + shll $16,eax # move ax into correct bits + movw 2(edx),ax # ax = base 0:15 + movl ADDRESS_ARG(esp),ecx # ecx = address to convert + subl eax,ecx # ecx = logical address equivalent + movl ecx,eax # eax = ecx + ret +#endif /* __GO32__ */ + +END_CODE + +END diff --git a/c/src/exec/score/cpu/i386/cpu_asm.s b/c/src/exec/score/cpu/i386/cpu_asm.s deleted file mode 100644 index 4ac4dab626..0000000000 --- a/c/src/exec/score/cpu/i386/cpu_asm.s +++ /dev/null @@ -1,282 +0,0 @@ -/* cpu_asm.s - * - * This file contains all assembly code for the Intel i386 implementation - * of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - -#include - -/* - * Format of i386 Register structure - */ - -.set REG_EFLAGS, 0 -.set REG_ESP, REG_EFLAGS + 4 -.set REG_EBP, REG_ESP + 4 -.set REG_EBX, REG_EBP + 4 -.set REG_ESI, REG_EBX + 4 -.set REG_EDI, REG_ESI + 4 -.set SIZE_REGS, REG_EDI + 4 - - BEGIN_CODE - -/* - * void _CPU_Context_switch( run_context, heir_context ) - * - * This routine performs a normal non-FP context. - */ - - .p2align 1 - PUBLIC (_CPU_Context_switch) - -.set RUNCONTEXT_ARG, 4 # save context argument -.set HEIRCONTEXT_ARG, 8 # restore context argument - -SYM (_CPU_Context_switch): - movl RUNCONTEXT_ARG(esp),eax # eax = running threads context - pushf # push eflags - popl REG_EFLAGS(eax) # save eflags - movl esp,REG_ESP(eax) # save stack pointer - movl ebp,REG_EBP(eax) # save base pointer - movl ebx,REG_EBX(eax) # save ebx - movl esi,REG_ESI(eax) # save source register - movl edi,REG_EDI(eax) # save destination register - - movl HEIRCONTEXT_ARG(esp),eax # eax = heir threads context - -restore: - pushl REG_EFLAGS(eax) # push eflags - popf # restore eflags - movl REG_ESP(eax),esp # restore stack pointer - movl REG_EBP(eax),ebp # restore base pointer - movl REG_EBX(eax),ebx # restore ebx - movl REG_ESI(eax),esi # restore source register - movl REG_EDI(eax),edi # restore destination register - ret - -/* - * NOTE: May be unnecessary to reload some registers. - */ - -/* - * void _CPU_Context_restore( new_context ) - * - * This routine performs a normal non-FP context. - */ - - PUBLIC (_CPU_Context_restore) - -.set NEWCONTEXT_ARG, 4 # context to restore argument - -SYM (_CPU_Context_restore): - - movl NEWCONTEXT_ARG(esp),eax # eax = running threads context - jmp restore - -/*PAGE - * void _CPU_Context_save_fp_context( &fp_context_ptr ) - * void _CPU_Context_restore_fp_context( &fp_context_ptr ) - * - * This section is used to context switch an i80287, i80387, - * the built-in coprocessor or the i80486 or compatible. - */ - -.set FPCONTEXT_ARG, 4 # FP context argument - - .p2align 1 - PUBLIC (_CPU_Context_save_fp) -SYM (_CPU_Context_save_fp): - movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area - movl (eax),eax # eax = FP context area - fsave (eax) # save FP context - ret - - .p2align 1 - PUBLIC (_CPU_Context_restore_fp) -SYM (_CPU_Context_restore_fp): - movl FPCONTEXT_ARG(esp),eax # eax = &ptr to FP context area - movl (eax),eax # eax = FP context area - frstor (eax) # restore FP context - ret - - PUBLIC (_Exception_Handler) -SYM (_Exception_Handler): - pusha # Push general purpose registers - pushl esp # Push exception frame address - movl _currentExcHandler, eax # Call function storead in _currentExcHandler - call * eax - addl $4, esp - popa # restore general purpose registers - addl $8, esp # skill vector number and faultCode - iret - -#define DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY(_vector) \ - .p2align 4 ; \ - PUBLIC (rtems_exception_prologue_ ## _vector ) ; \ -SYM (rtems_exception_prologue_ ## _vector ): \ - pushl $ _vector ; \ - jmp SYM (_Exception_Handler) ; - -#define DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY(_vector) \ - .p2align 4 ; \ - PUBLIC (rtems_exception_prologue_ ## _vector ) ; \ -SYM (rtems_exception_prologue_ ## _vector ): \ - pushl $ 0 ; \ - pushl $ _vector ; \ - jmp SYM (_Exception_Handler) ; - -/* - * Divide Error - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (0) -/* - * Debug Exception - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (1) -/* - * NMI - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (2) -/* - * Breakpoint - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (3) -/* - * Overflow - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (4) -/* - * Bound Range Exceeded - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (5) -/* - * Invalid Opcode - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (6) -/* - * No Math Coproc - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (7) -/* - * Double Fault - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (8) -/* - * Coprocessor segment overrun - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (9) -/* - * Invalid TSS - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (10) -/* - * Segment Not Present - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (11) -/* - * Stack segment Fault - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (12) -/* - * General Protection Fault - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (13) -/* - * Page Fault - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (14) -/* - * Floating point error (NB 15 is reserved it is therefor skipped) - */ -DISTINCT_EXCEPTION_WITHOUT_FAULTCODE_ENTRY (16) -/* - * Aligment Check - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (17) -/* - * Machine Check - */ -DISTINCT_EXCEPTION_WITH_FAULTCODE_ENTRY (18) - - -/* - * GO32 does not require these segment related routines. - */ - -#ifndef __GO32__ - -/* - * void *i386_Logical_to_physical( - * rtems_unsigned16 segment, - * void *address - * ); - * - * Returns thirty-two bit physical address for segment:address. - */ - -.set SEGMENT_ARG, 4 -.set ADDRESS_ARG, 8 - - PUBLIC (i386_Logical_to_physical) - -SYM (i386_Logical_to_physical): - - xorl eax,eax # clear eax - movzwl SEGMENT_ARG(esp),ecx # ecx = segment value - movl $ SYM (_Global_descriptor_table),edx - # edx = address of our GDT - addl ecx,edx # edx = address of desired entry - movb 7(edx),ah # ah = base 31:24 - movb 4(edx),al # al = base 23:16 - shll $16,eax # move ax into correct bits - movw 2(edx),ax # ax = base 0:15 - movl ADDRESS_ARG(esp),ecx # ecx = address to convert - addl eax,ecx # ecx = physical address equivalent - movl ecx,eax # eax = ecx - ret - -/* - * void *i386_Physical_to_logical( - * rtems_unsigned16 segment, - * void *address - * ); - * - * Returns thirty-two bit physical address for segment:address. - */ - -/* - *.set SEGMENT_ARG, 4 - *.set ADDRESS_ARG, 8 -- use sets from above - */ - - PUBLIC (i386_Physical_to_logical) - -SYM (i386_Physical_to_logical): - xorl eax,eax # clear eax - movzwl SEGMENT_ARG(esp),ecx # ecx = segment value - movl $ SYM (_Global_descriptor_table),edx - # edx = address of our GDT - addl ecx,edx # edx = address of desired entry - movb 7(edx),ah # ah = base 31:24 - movb 4(edx),al # al = base 23:16 - shll $16,eax # move ax into correct bits - movw 2(edx),ax # ax = base 0:15 - movl ADDRESS_ARG(esp),ecx # ecx = address to convert - subl eax,ecx # ecx = logical address equivalent - movl ecx,eax # eax = ecx - ret -#endif /* __GO32__ */ - -END_CODE - -END diff --git a/c/src/exec/score/cpu/i386/rtems.S b/c/src/exec/score/cpu/i386/rtems.S new file mode 100644 index 0000000000..f08fe63299 --- /dev/null +++ b/c/src/exec/score/cpu/i386/rtems.S @@ -0,0 +1,31 @@ +/* rtems.s + * + * This file contains the single entry point code for + * the i386 implementation of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + +#include + + EXTERN (_Entry_points) + + BEGIN_CODE + + .align 2 + PUBLIC (RTEMS) + +SYM (RTEMS): + jmpl *SYM (_Entry_points)(,eax,4) + + END_CODE + +END diff --git a/c/src/exec/score/cpu/i386/rtems.s b/c/src/exec/score/cpu/i386/rtems.s deleted file mode 100644 index f08fe63299..0000000000 --- a/c/src/exec/score/cpu/i386/rtems.s +++ /dev/null @@ -1,31 +0,0 @@ -/* rtems.s - * - * This file contains the single entry point code for - * the i386 implementation of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - -#include - - EXTERN (_Entry_points) - - BEGIN_CODE - - .align 2 - PUBLIC (RTEMS) - -SYM (RTEMS): - jmpl *SYM (_Entry_points)(,eax,4) - - END_CODE - -END diff --git a/c/src/exec/score/cpu/i960/Makefile.in b/c/src/exec/score/cpu/i960/Makefile.in index 641de5b218..8c265002b5 100644 --- a/c/src/exec/score/cpu/i960/Makefile.in +++ b/c/src/exec/score/cpu/i960/Makefile.in @@ -20,10 +20,10 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/i960.h $(srcdir)/i960types.h # H_FILES that get installed externally EXTERNAL_H_FILES = $(srcdir)/asm.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S S_PIECES=cpu_asm rtems -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/i960/cpu_asm.S b/c/src/exec/score/cpu/i960/cpu_asm.S new file mode 100644 index 0000000000..fbed8babd8 --- /dev/null +++ b/c/src/exec/score/cpu/i960/cpu_asm.S @@ -0,0 +1,199 @@ +/* cpu_asm.s + * + * This file contains all assembly code for the i960CA implementation + * of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + + .text +/* + * Format of i960ca Register structure + */ + +.set REG_R0_PFP , 0 # (r0) Previous Frame Pointer +.set REG_R1_SP , REG_R0_PFP+4 # (r1) Stack Pointer +.set REG_PC , REG_R1_SP+4 # (pc) Processor Controls +.set REG_G8 , REG_PC+4 # (g8) Global Register 8 +.set REG_G9 , REG_G8+4 # (g9) Global Register 9 +.set REG_G10 , REG_G9+4 # (g10) Global Register 10 +.set REG_G11 , REG_G10+4 # (g11) Global Register 11 +.set REG_G12 , REG_G11+4 # (g12) Global Register 12 +.set REG_G13 , REG_G12+4 # (g13) Global Register 13 +.set REG_G14 , REG_G13+4 # (g14) Global Register 14 +.set REG_G15_FP , REG_G14+4 # (g15) Global Register 15 +.set SIZE_REGS , REG_G15_FP+4 # size of cpu_context_registers + # structure + +/* + * void _CPU_Context_switch( run_context, heir_context ) + * + * This routine performs a normal non-FP context. + */ + .align 4 + .globl __CPU_Context_switch + +__CPU_Context_switch: + modpc 0,0,g2 # get old intr level (PC) + st g2,REG_PC(g0) # save pc + stq g8,REG_G8(g0) # save g8-g11 + stq g12,REG_G12(g0) # save g12-g15 + stl pfp,REG_R0_PFP(g0) # save pfp, sp + +restore: flushreg # flush register cache + ldconst 0x001f0000,g2 # g2 = PC mask + ld REG_PC(g1),g3 # thread->Regs.pc = pc; + ldq REG_G12(g1),g12 # restore g12-g15 + ldl REG_R0_PFP(g1),pfp # restore pfp, sp + ldq REG_G8(g1),g8 # restore g8-g11 + modpc 0,g2,g3 # restore PC register + ret + +/* + * void _CPU_Context_restore( new_context ) + * + * This routine performs a normal non-FP context. + */ + + .globl __CPU_Context_restore +__CPU_Context_restore: + mov g0,g1 # g0 = _Thread_executing + b restore + +/*PAGE + * void _CPU_Context_save_fp_context( &fp_context_ptr ) + * void _CPU_Context_restore_fp_context( &fp_context_ptr ) + * + * There is currently no hardware floating point for the i960. + */ + + .globl __CPU_Context_save_fp + .globl __CPU_Context_restore_fp +__CPU_Context_save_fp: +__CPU_Context_restore_fp: +#if ( I960_HAS_FPU == 1 ) +#error "Floating point support for i960 family has been implemented!!!" +#endif + ret + +/*PAGE + * void __ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + * Input parameters: NONE + * + * Output parameters: NONE + * + * NOTE: + * Upon entry, the supervisor stack will contain a stack frame + * back to the interrupted thread and the interrupt stack will contain + * an interrupt stack frame. If dispatching is enabled, this + * is the outer most interrupt, and (a context switch is necessary or + * the current thread has signals), then set up the supervisor stack to + * transfer control to the interrupt dispatcher. + */ + + .globl __ISR_Handler +__ISR_Handler: + #ldconst 1,r8 + #modpc 0,r8,r8 # enable tracing + + # r4 = &_Thread_Dispatch_disable_level + ld __Thread_Dispatch_disable_level,r4 + movl g0,r8 # save g0-g1 + + ld -16+8(fp),g0 # g0 = vector number + movl g2,r10 # save g2-g3 + + ld __ISR_Nest_level,r5 # r5 = &_Isr_nest_level + mov g14,r7 # save g14 + + lda 0,g14 # NOT Branch and Link + movl g4,r12 # save g4-g5 + + lda 1(r4),r4 # increment dispatch disable level + movl g6,r14 # save g6-g7 + + ld __ISR_Vector_table[g0*4],g1 # g1 = Users handler + addo 1,r5,r5 # increment ISR level + + st r4,__Thread_Dispatch_disable_level + # one ISR nest level deeper + subo 1,r4,r4 # decrement dispatch disable level + + st r5,__ISR_Nest_level # disable multitasking + subo 1,r5,r5 # decrement ISR nest level + + callx (g1) # invoke user ISR + + st r4,__Thread_Dispatch_disable_level + # unnest multitasking + st r5,__ISR_Nest_level # one less ISR nest level + cmpobne.f 0,r4,exit # If dispatch disabled, exit + ldl -16(fp),g0 # g0 = threads PC reg + # g1 = threads AC reg + ld __Context_Switch_necessary,r6 + # r6 = Is thread switch necessary? + bbs.f 13,g0,exit # not outer level, then exit + cmpobne.f 0,r6,bframe # Switch necessary? + + ld __ISR_Signals_to_thread_executing,g2 + # signals sent to Run_thread + # while in interrupt handler? + cmpobe.f 0,g2,exit # No, then exit + +bframe: mov 0,g2 + st g2,__ISR_Signals_to_thread_executing + + ldconst 0x1f0000,g2 # g2 = intr disable mask + mov g2,g3 # g3 = new intr level + modpc 0,g2,g3 # set new level + + andnot 7,pfp,r4 # r4 = pfp without ret type + flushreg # flush registers + # push _Isr_dispatch ret frame + # build ISF in r4-r6 + ldconst 64,g2 # g2 = size of stack frame + ld 4(r4),g3 # g3 = previous sp + addo g2,g3,r5 # r5 = _Isr_dispatch SP + lda __ISR_Dispatch,r6 # r6 = _Isr_dispatch entry + stt r4,(g3) # set _Isr_dispatch ret info + st g1,16(g3) # set r4 = AC for ISR disp + or 7,g3,pfp # pfp to _Isr_dispatch + +exit: mov r7,g14 # restore g14 + movq r8,g0 # restore g0-g3 + movq r12,g4 # restore g4-g7 + ret + + +/*PAGE + * + * void __ISR_Dispatch() + * + * Entry point from the outermost interrupt service routine exit. + * The current stack is the supervisor mode stack. + */ + +__ISR_Dispatch: + mov g14,r7 + mov 0,g14 + movq g0,r8 + movq g4,r12 + call __Thread_Dispatch + + ldconst -1,r5 # r5 = reload mask + modac r5,r4,r4 # restore threads AC register + mov r7,g14 + movq r8,g0 + movq r12,g4 + ret diff --git a/c/src/exec/score/cpu/i960/cpu_asm.s b/c/src/exec/score/cpu/i960/cpu_asm.s deleted file mode 100644 index fbed8babd8..0000000000 --- a/c/src/exec/score/cpu/i960/cpu_asm.s +++ /dev/null @@ -1,199 +0,0 @@ -/* cpu_asm.s - * - * This file contains all assembly code for the i960CA implementation - * of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - - .text -/* - * Format of i960ca Register structure - */ - -.set REG_R0_PFP , 0 # (r0) Previous Frame Pointer -.set REG_R1_SP , REG_R0_PFP+4 # (r1) Stack Pointer -.set REG_PC , REG_R1_SP+4 # (pc) Processor Controls -.set REG_G8 , REG_PC+4 # (g8) Global Register 8 -.set REG_G9 , REG_G8+4 # (g9) Global Register 9 -.set REG_G10 , REG_G9+4 # (g10) Global Register 10 -.set REG_G11 , REG_G10+4 # (g11) Global Register 11 -.set REG_G12 , REG_G11+4 # (g12) Global Register 12 -.set REG_G13 , REG_G12+4 # (g13) Global Register 13 -.set REG_G14 , REG_G13+4 # (g14) Global Register 14 -.set REG_G15_FP , REG_G14+4 # (g15) Global Register 15 -.set SIZE_REGS , REG_G15_FP+4 # size of cpu_context_registers - # structure - -/* - * void _CPU_Context_switch( run_context, heir_context ) - * - * This routine performs a normal non-FP context. - */ - .align 4 - .globl __CPU_Context_switch - -__CPU_Context_switch: - modpc 0,0,g2 # get old intr level (PC) - st g2,REG_PC(g0) # save pc - stq g8,REG_G8(g0) # save g8-g11 - stq g12,REG_G12(g0) # save g12-g15 - stl pfp,REG_R0_PFP(g0) # save pfp, sp - -restore: flushreg # flush register cache - ldconst 0x001f0000,g2 # g2 = PC mask - ld REG_PC(g1),g3 # thread->Regs.pc = pc; - ldq REG_G12(g1),g12 # restore g12-g15 - ldl REG_R0_PFP(g1),pfp # restore pfp, sp - ldq REG_G8(g1),g8 # restore g8-g11 - modpc 0,g2,g3 # restore PC register - ret - -/* - * void _CPU_Context_restore( new_context ) - * - * This routine performs a normal non-FP context. - */ - - .globl __CPU_Context_restore -__CPU_Context_restore: - mov g0,g1 # g0 = _Thread_executing - b restore - -/*PAGE - * void _CPU_Context_save_fp_context( &fp_context_ptr ) - * void _CPU_Context_restore_fp_context( &fp_context_ptr ) - * - * There is currently no hardware floating point for the i960. - */ - - .globl __CPU_Context_save_fp - .globl __CPU_Context_restore_fp -__CPU_Context_save_fp: -__CPU_Context_restore_fp: -#if ( I960_HAS_FPU == 1 ) -#error "Floating point support for i960 family has been implemented!!!" -#endif - ret - -/*PAGE - * void __ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * - * Input parameters: NONE - * - * Output parameters: NONE - * - * NOTE: - * Upon entry, the supervisor stack will contain a stack frame - * back to the interrupted thread and the interrupt stack will contain - * an interrupt stack frame. If dispatching is enabled, this - * is the outer most interrupt, and (a context switch is necessary or - * the current thread has signals), then set up the supervisor stack to - * transfer control to the interrupt dispatcher. - */ - - .globl __ISR_Handler -__ISR_Handler: - #ldconst 1,r8 - #modpc 0,r8,r8 # enable tracing - - # r4 = &_Thread_Dispatch_disable_level - ld __Thread_Dispatch_disable_level,r4 - movl g0,r8 # save g0-g1 - - ld -16+8(fp),g0 # g0 = vector number - movl g2,r10 # save g2-g3 - - ld __ISR_Nest_level,r5 # r5 = &_Isr_nest_level - mov g14,r7 # save g14 - - lda 0,g14 # NOT Branch and Link - movl g4,r12 # save g4-g5 - - lda 1(r4),r4 # increment dispatch disable level - movl g6,r14 # save g6-g7 - - ld __ISR_Vector_table[g0*4],g1 # g1 = Users handler - addo 1,r5,r5 # increment ISR level - - st r4,__Thread_Dispatch_disable_level - # one ISR nest level deeper - subo 1,r4,r4 # decrement dispatch disable level - - st r5,__ISR_Nest_level # disable multitasking - subo 1,r5,r5 # decrement ISR nest level - - callx (g1) # invoke user ISR - - st r4,__Thread_Dispatch_disable_level - # unnest multitasking - st r5,__ISR_Nest_level # one less ISR nest level - cmpobne.f 0,r4,exit # If dispatch disabled, exit - ldl -16(fp),g0 # g0 = threads PC reg - # g1 = threads AC reg - ld __Context_Switch_necessary,r6 - # r6 = Is thread switch necessary? - bbs.f 13,g0,exit # not outer level, then exit - cmpobne.f 0,r6,bframe # Switch necessary? - - ld __ISR_Signals_to_thread_executing,g2 - # signals sent to Run_thread - # while in interrupt handler? - cmpobe.f 0,g2,exit # No, then exit - -bframe: mov 0,g2 - st g2,__ISR_Signals_to_thread_executing - - ldconst 0x1f0000,g2 # g2 = intr disable mask - mov g2,g3 # g3 = new intr level - modpc 0,g2,g3 # set new level - - andnot 7,pfp,r4 # r4 = pfp without ret type - flushreg # flush registers - # push _Isr_dispatch ret frame - # build ISF in r4-r6 - ldconst 64,g2 # g2 = size of stack frame - ld 4(r4),g3 # g3 = previous sp - addo g2,g3,r5 # r5 = _Isr_dispatch SP - lda __ISR_Dispatch,r6 # r6 = _Isr_dispatch entry - stt r4,(g3) # set _Isr_dispatch ret info - st g1,16(g3) # set r4 = AC for ISR disp - or 7,g3,pfp # pfp to _Isr_dispatch - -exit: mov r7,g14 # restore g14 - movq r8,g0 # restore g0-g3 - movq r12,g4 # restore g4-g7 - ret - - -/*PAGE - * - * void __ISR_Dispatch() - * - * Entry point from the outermost interrupt service routine exit. - * The current stack is the supervisor mode stack. - */ - -__ISR_Dispatch: - mov g14,r7 - mov 0,g14 - movq g0,r8 - movq g4,r12 - call __Thread_Dispatch - - ldconst -1,r5 # r5 = reload mask - modac r5,r4,r4 # restore threads AC register - mov r7,g14 - movq r8,g0 - movq r12,g4 - ret diff --git a/c/src/exec/score/cpu/i960/rtems.S b/c/src/exec/score/cpu/i960/rtems.S new file mode 100644 index 0000000000..37fb734436 --- /dev/null +++ b/c/src/exec/score/cpu/i960/rtems.S @@ -0,0 +1,25 @@ +/* rtems.s + * + * This file contains the single entry point code for + * the i960 implementation of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + + .text + + .align 4 + .globl RTEMS + +RTEMS: + ld __Entry_points[g7*4],r4 + bx (r4) + diff --git a/c/src/exec/score/cpu/i960/rtems.s b/c/src/exec/score/cpu/i960/rtems.s deleted file mode 100644 index 37fb734436..0000000000 --- a/c/src/exec/score/cpu/i960/rtems.s +++ /dev/null @@ -1,25 +0,0 @@ -/* rtems.s - * - * This file contains the single entry point code for - * the i960 implementation of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - - .text - - .align 4 - .globl RTEMS - -RTEMS: - ld __Entry_points[g7*4],r4 - bx (r4) - diff --git a/c/src/exec/score/cpu/m68k/Makefile.in b/c/src/exec/score/cpu/m68k/Makefile.in index f273d69362..90bdc8e4d8 100644 --- a/c/src/exec/score/cpu/m68k/Makefile.in +++ b/c/src/exec/score/cpu/m68k/Makefile.in @@ -21,10 +21,10 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/m68k.h $(srcdir)/m68ktypes.h EXTERNAL_H_FILES = $(srcdir)/asm.h $(srcdir)/m68302.h $(srcdir)/m68360.h \ $(srcdir)/qsm.h $(srcdir)/sim.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S S_PIECES=cpu_asm rtems -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/m68k/cpu_asm.S b/c/src/exec/score/cpu/m68k/cpu_asm.S new file mode 100644 index 0000000000..03747222e5 --- /dev/null +++ b/c/src/exec/score/cpu/m68k/cpu_asm.S @@ -0,0 +1,291 @@ +/* cpu_asm.s + * + * This file contains all assembly code for the MC68020 implementation + * of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + + +#include + + .text + +/* void _CPU_Context_switch( run_context, heir_context ) + * + * This routine performs a normal non-FP context. + */ + + .align 4 + .global SYM (_CPU_Context_switch) + +.set RUNCONTEXT_ARG, 4 | save context argument +.set HEIRCONTEXT_ARG, 8 | restore context argument + +SYM (_CPU_Context_switch): + moval a7@(RUNCONTEXT_ARG),a0| a0 = running thread context + movw sr,d1 | d1 = status register + movml d1-d7/a2-a7,a0@ | save context + + moval a7@(HEIRCONTEXT_ARG),a0| a0 = heir thread context +restore: movml a0@,d1-d7/a2-a7 | restore context + movw d1,sr | restore status register + rts + +/*PAGE + * void __CPU_Context_save_fp_context( &fp_context_ptr ) + * void __CPU_Context_restore_fp_context( &fp_context_ptr ) + * + * These routines are used to context switch a MC68881 or MC68882. + * + * NOTE: Context save and restore code is based upon the code shown + * on page 6-38 of the MC68881/68882 Users Manual (rev 1). + * + * CPU_FP_CONTEXT_SIZE is higher than expected to account for the + * -1 pushed at end of this sequence. + * + * Neither of these entries is required if we have software FPU + * emulation. But if we don't have an FPU or emulation, then + * we need the stub versions of these routines. + */ + +#if (CPU_SOFTWARE_FP == FALSE) + +.set FPCONTEXT_ARG, 4 | save FP context argument + + .align 4 + .global SYM (_CPU_Context_save_fp) +SYM (_CPU_Context_save_fp): +#if ( M68K_HAS_FPU == 1 ) + moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area + moval a1@,a0 | a0 = Save context area + fsave a0@- | save 68881/68882 state frame + tstb a0@ | check for a null frame + beq nosv | Yes, skip save of user model + fmovem fp0-fp7,a0@- | save data registers (fp0-fp7) + fmovem fpc/fps/fpi,a0@- | and save control registers + movl #-1,a0@- | place not-null flag on stack +nosv: movl a0,a1@ | save pointer to saved context +#endif + rts + + .align 4 + .global SYM (_CPU_Context_restore_fp) +SYM (_CPU_Context_restore_fp): +#if ( M68K_HAS_FPU == 1 ) + moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area + moval a1@,a0 | a0 = address of saved context + tstb a0@ | Null context frame? + beq norst | Yes, skip fp restore + addql #4,a0 | throwaway non-null flag + fmovem a0@+,fpc/fps/fpi | restore control registers + fmovem a0@+,fp0-fp7 | restore data regs (fp0-fp7) +norst: frestore a0@+ | restore the fp state frame + movl a0,a1@ | save pointer to saved context +#endif + rts +#endif + +/*PAGE + * void _ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + * NOTE: + * Upon entry, the master stack will contain an interrupt stack frame + * back to the interrupted thread and the interrupt stack will contain + * a throwaway interrupt stack frame. If dispatching is enabled, this + * is the outer most interrupt, and (a context switch is necessary or + * the current thread has signals), then set up the master stack to + * transfer control to the interrupt dispatcher. + */ + +/* + * With this approach, lower priority interrupts may + * execute twice if a higher priority interrupt is + * acknowledged before _Thread_Dispatch_disable is + * incremented and the higher priority interrupt + * performs a context switch after executing. The lower + * priority interrupt will execute (1) at the end of the + * higher priority interrupt in the new context if + * permitted by the new interrupt level mask, and (2) when + * the original context regains the cpu. + */ + +#if ( M68K_COLDFIRE_ARCH == 1 ) +.set SR_OFFSET, 2 | Status register offset +.set PC_OFFSET, 4 | Program Counter offset +.set FVO_OFFSET, 0 | Format/vector offset +#elif ( M68K_HAS_VBR == 1) +.set SR_OFFSET, 0 | Status register offset +.set PC_OFFSET, 2 | Program Counter offset +.set FVO_OFFSET, 6 | Format/vector offset +#else +.set SR_OFFSET, 2 | Status register offset +.set PC_OFFSET, 4 | Program Counter offset +.set FVO_OFFSET, 0 | Format/vector offset placed in the stack +#endif /* M68K_HAS_VBR */ + +.set SAVED, 16 | space for saved registers + + .align 4 + .global SYM (_ISR_Handler) + +SYM (_ISR_Handler): + addql #1,SYM (_Thread_Dispatch_disable_level) | disable multitasking +#if ( M68K_COLDFIRE_ARCH == 0 ) + moveml d0-d1/a0-a1,a7@- | save d0-d1,a0-a1 + movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO + andl #0x0fff,d0 | d0 = vector offset in vbr +#else + lea a7@(-SAVED),a7 + movm.l d0-d1/a0-a1,a7@ | save d0-d1,a0-a1 + movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO + andl #0x0ffc,d0 | d0 = vector offset in vbr +#endif + + +#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 ) + #if ( M68K_COLDFIRE_ARCH == 0 ) + movew sr,d1 | Save status register + oriw #0x700,sr | Disable interrupts + #else + move.l d0,a7@- | Save d0 value + move.l #0x700,d0 | Load in disable ints value + move.w sr,d1 | Grab SR + or.l d1,d0 | Create new SR + move.w d0,sr | Disable interrupts + move.l a7@+,d0 | Restore d0 value + #endif + + tstl SYM (_ISR_Nest_level) | Interrupting an interrupt handler? + bne 1f | Yes, just skip over stack switch code + movel SYM(_CPU_Interrupt_stack_high),a0 | End of interrupt stack + movel a7,a0@- | Save task stack pointer + movel a0,a7 | Switch to interrupt stack +1: + addql #1,SYM(_ISR_Nest_level) | one nest level deeper + movew d1,sr | Restore status register +#else + addql #1,SYM (_ISR_Nest_level) | one nest level deeper +#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */ + +#if ( M68K_HAS_PREINDEXING == 1 ) + movel @( SYM (_ISR_Vector_table),d0:w:1),a0| fetch the ISR +#else + movel # SYM (_ISR_Vector_table),a0 | a0 = base of RTEMS table + addal d0,a0 | a0 = address of vector + movel (a0),a0 | a0 = address of user routine +#endif + + lsrl #2,d0 | d0 = vector number + movel d0,a7@- | push vector number + jbsr a0@ | invoke the user ISR + addql #4,a7 | remove vector number + +#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 ) + #if ( M68K_COLDFIRE_ARCH == 0 ) + movew sr,d0 | Save status register + oriw #0x700,sr | Disable interrupts + #else + move.l #0x700,d1 | Load in disable int value + move.w sr,d0 | Grab SR + or.l d0,d1 | Create new SR + move.w d1,sr | Load to disable interrupts + #endif + + subql #1,SYM(_ISR_Nest_level) | Reduce interrupt-nesting count + bne 1f | Skip if return to interrupt + movel (a7),a7 | Restore task stack pointer +1: + movew d0,sr | Restore status register +#else + subql #1,SYM (_ISR_Nest_level) | one less nest level +#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */ + + subql #1,SYM (_Thread_Dispatch_disable_level) + | unnest multitasking + bne exit | If dispatch disabled, exit + +#if ( M68K_HAS_SEPARATE_STACKS == 1 ) + movew #0xf000,d0 | isolate format nibble + andw a7@(SAVED+FVO_OFFSET),d0 | get F/VO + cmpiw #0x1000,d0 | is it a throwaway isf? + bne exit | NOT outer level, so branch +#endif + + tstl SYM (_Context_Switch_necessary) + | Is thread switch necessary? + bne bframe | Yes, invoke dispatcher + + tstl SYM (_ISR_Signals_to_thread_executing) + | signals sent to Run_thread + | while in interrupt handler? + beq exit | No, then exit + + +bframe: clrl SYM (_ISR_Signals_to_thread_executing) + | If sent, will be processed +#if ( M68K_HAS_SEPARATE_STACKS == 1 ) + movec msp,a0 | a0 = master stack pointer + movew #0,a0@- | push format word + movel #SYM(_ISR_Dispatch),a0@- | push return addr + movew a0@(6),a0@- | push saved sr + movec a0,msp | set master stack pointer +#else + jsr SYM (_Thread_Dispatch) | Perform context switch +#endif + +#if ( M68K_COLDFIRE_ARCH == 0 ) +exit: moveml a7@+,d0-d1/a0-a1 | restore d0-d1,a0-a1 +#else +exit: moveml a7@,d0-d1/a0-a1 | restore d0-d1,a0-a1 + lea a7@(SAVED),a7 +#endif + +#if ( M68K_HAS_VBR == 0 ) + addql #2,a7 | pop format/id +#endif /* M68K_HAS_VBR */ + rte | return to thread + | OR _Isr_dispatch + +/*PAGE + * void _ISR_Dispatch() + * + * Entry point from the outermost interrupt service routine exit. + * The current stack is the supervisor mode stack if this processor + * has separate stacks. + * + * 1. save all registers not preserved across C calls. + * 2. invoke the _Thread_Dispatch routine to switch tasks + * or a signal to the currently executing task. + * 3. restore all registers not preserved across C calls. + * 4. return from interrupt + */ + + .global SYM (_ISR_Dispatch) +SYM (_ISR_Dispatch): +#if ( M68K_COLDFIRE_ARCH == 0 ) + movml d0-d1/a0-a1,a7@- + jsr SYM (_Thread_Dispatch) + movml a7@+,d0-d1/a0-a1 +#else + lea a7@(-SAVED),a7 + movml d0-d1/a0-a1,a7@ + jsr SYM (_Thread_Dispatch) + movml a7@,d0-d1/a0-a1 + lea a7@(SAVED),a7 +#endif + +#if ( M68K_HAS_VBR == 0 ) + addql #2,a7 | pop format/id +#endif /* M68K_HAS_VBR */ + rte diff --git a/c/src/exec/score/cpu/m68k/cpu_asm.s b/c/src/exec/score/cpu/m68k/cpu_asm.s deleted file mode 100644 index 03747222e5..0000000000 --- a/c/src/exec/score/cpu/m68k/cpu_asm.s +++ /dev/null @@ -1,291 +0,0 @@ -/* cpu_asm.s - * - * This file contains all assembly code for the MC68020 implementation - * of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - - -#include - - .text - -/* void _CPU_Context_switch( run_context, heir_context ) - * - * This routine performs a normal non-FP context. - */ - - .align 4 - .global SYM (_CPU_Context_switch) - -.set RUNCONTEXT_ARG, 4 | save context argument -.set HEIRCONTEXT_ARG, 8 | restore context argument - -SYM (_CPU_Context_switch): - moval a7@(RUNCONTEXT_ARG),a0| a0 = running thread context - movw sr,d1 | d1 = status register - movml d1-d7/a2-a7,a0@ | save context - - moval a7@(HEIRCONTEXT_ARG),a0| a0 = heir thread context -restore: movml a0@,d1-d7/a2-a7 | restore context - movw d1,sr | restore status register - rts - -/*PAGE - * void __CPU_Context_save_fp_context( &fp_context_ptr ) - * void __CPU_Context_restore_fp_context( &fp_context_ptr ) - * - * These routines are used to context switch a MC68881 or MC68882. - * - * NOTE: Context save and restore code is based upon the code shown - * on page 6-38 of the MC68881/68882 Users Manual (rev 1). - * - * CPU_FP_CONTEXT_SIZE is higher than expected to account for the - * -1 pushed at end of this sequence. - * - * Neither of these entries is required if we have software FPU - * emulation. But if we don't have an FPU or emulation, then - * we need the stub versions of these routines. - */ - -#if (CPU_SOFTWARE_FP == FALSE) - -.set FPCONTEXT_ARG, 4 | save FP context argument - - .align 4 - .global SYM (_CPU_Context_save_fp) -SYM (_CPU_Context_save_fp): -#if ( M68K_HAS_FPU == 1 ) - moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area - moval a1@,a0 | a0 = Save context area - fsave a0@- | save 68881/68882 state frame - tstb a0@ | check for a null frame - beq nosv | Yes, skip save of user model - fmovem fp0-fp7,a0@- | save data registers (fp0-fp7) - fmovem fpc/fps/fpi,a0@- | and save control registers - movl #-1,a0@- | place not-null flag on stack -nosv: movl a0,a1@ | save pointer to saved context -#endif - rts - - .align 4 - .global SYM (_CPU_Context_restore_fp) -SYM (_CPU_Context_restore_fp): -#if ( M68K_HAS_FPU == 1 ) - moval a7@(FPCONTEXT_ARG),a1 | a1 = &ptr to context area - moval a1@,a0 | a0 = address of saved context - tstb a0@ | Null context frame? - beq norst | Yes, skip fp restore - addql #4,a0 | throwaway non-null flag - fmovem a0@+,fpc/fps/fpi | restore control registers - fmovem a0@+,fp0-fp7 | restore data regs (fp0-fp7) -norst: frestore a0@+ | restore the fp state frame - movl a0,a1@ | save pointer to saved context -#endif - rts -#endif - -/*PAGE - * void _ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * - * NOTE: - * Upon entry, the master stack will contain an interrupt stack frame - * back to the interrupted thread and the interrupt stack will contain - * a throwaway interrupt stack frame. If dispatching is enabled, this - * is the outer most interrupt, and (a context switch is necessary or - * the current thread has signals), then set up the master stack to - * transfer control to the interrupt dispatcher. - */ - -/* - * With this approach, lower priority interrupts may - * execute twice if a higher priority interrupt is - * acknowledged before _Thread_Dispatch_disable is - * incremented and the higher priority interrupt - * performs a context switch after executing. The lower - * priority interrupt will execute (1) at the end of the - * higher priority interrupt in the new context if - * permitted by the new interrupt level mask, and (2) when - * the original context regains the cpu. - */ - -#if ( M68K_COLDFIRE_ARCH == 1 ) -.set SR_OFFSET, 2 | Status register offset -.set PC_OFFSET, 4 | Program Counter offset -.set FVO_OFFSET, 0 | Format/vector offset -#elif ( M68K_HAS_VBR == 1) -.set SR_OFFSET, 0 | Status register offset -.set PC_OFFSET, 2 | Program Counter offset -.set FVO_OFFSET, 6 | Format/vector offset -#else -.set SR_OFFSET, 2 | Status register offset -.set PC_OFFSET, 4 | Program Counter offset -.set FVO_OFFSET, 0 | Format/vector offset placed in the stack -#endif /* M68K_HAS_VBR */ - -.set SAVED, 16 | space for saved registers - - .align 4 - .global SYM (_ISR_Handler) - -SYM (_ISR_Handler): - addql #1,SYM (_Thread_Dispatch_disable_level) | disable multitasking -#if ( M68K_COLDFIRE_ARCH == 0 ) - moveml d0-d1/a0-a1,a7@- | save d0-d1,a0-a1 - movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO - andl #0x0fff,d0 | d0 = vector offset in vbr -#else - lea a7@(-SAVED),a7 - movm.l d0-d1/a0-a1,a7@ | save d0-d1,a0-a1 - movew a7@(SAVED+FVO_OFFSET),d0 | d0 = F/VO - andl #0x0ffc,d0 | d0 = vector offset in vbr -#endif - - -#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 ) - #if ( M68K_COLDFIRE_ARCH == 0 ) - movew sr,d1 | Save status register - oriw #0x700,sr | Disable interrupts - #else - move.l d0,a7@- | Save d0 value - move.l #0x700,d0 | Load in disable ints value - move.w sr,d1 | Grab SR - or.l d1,d0 | Create new SR - move.w d0,sr | Disable interrupts - move.l a7@+,d0 | Restore d0 value - #endif - - tstl SYM (_ISR_Nest_level) | Interrupting an interrupt handler? - bne 1f | Yes, just skip over stack switch code - movel SYM(_CPU_Interrupt_stack_high),a0 | End of interrupt stack - movel a7,a0@- | Save task stack pointer - movel a0,a7 | Switch to interrupt stack -1: - addql #1,SYM(_ISR_Nest_level) | one nest level deeper - movew d1,sr | Restore status register -#else - addql #1,SYM (_ISR_Nest_level) | one nest level deeper -#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */ - -#if ( M68K_HAS_PREINDEXING == 1 ) - movel @( SYM (_ISR_Vector_table),d0:w:1),a0| fetch the ISR -#else - movel # SYM (_ISR_Vector_table),a0 | a0 = base of RTEMS table - addal d0,a0 | a0 = address of vector - movel (a0),a0 | a0 = address of user routine -#endif - - lsrl #2,d0 | d0 = vector number - movel d0,a7@- | push vector number - jbsr a0@ | invoke the user ISR - addql #4,a7 | remove vector number - -#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 ) - #if ( M68K_COLDFIRE_ARCH == 0 ) - movew sr,d0 | Save status register - oriw #0x700,sr | Disable interrupts - #else - move.l #0x700,d1 | Load in disable int value - move.w sr,d0 | Grab SR - or.l d0,d1 | Create new SR - move.w d1,sr | Load to disable interrupts - #endif - - subql #1,SYM(_ISR_Nest_level) | Reduce interrupt-nesting count - bne 1f | Skip if return to interrupt - movel (a7),a7 | Restore task stack pointer -1: - movew d0,sr | Restore status register -#else - subql #1,SYM (_ISR_Nest_level) | one less nest level -#endif /* CPU_HAS_SOFTWARE_INTERRUPT_STACK == 1 */ - - subql #1,SYM (_Thread_Dispatch_disable_level) - | unnest multitasking - bne exit | If dispatch disabled, exit - -#if ( M68K_HAS_SEPARATE_STACKS == 1 ) - movew #0xf000,d0 | isolate format nibble - andw a7@(SAVED+FVO_OFFSET),d0 | get F/VO - cmpiw #0x1000,d0 | is it a throwaway isf? - bne exit | NOT outer level, so branch -#endif - - tstl SYM (_Context_Switch_necessary) - | Is thread switch necessary? - bne bframe | Yes, invoke dispatcher - - tstl SYM (_ISR_Signals_to_thread_executing) - | signals sent to Run_thread - | while in interrupt handler? - beq exit | No, then exit - - -bframe: clrl SYM (_ISR_Signals_to_thread_executing) - | If sent, will be processed -#if ( M68K_HAS_SEPARATE_STACKS == 1 ) - movec msp,a0 | a0 = master stack pointer - movew #0,a0@- | push format word - movel #SYM(_ISR_Dispatch),a0@- | push return addr - movew a0@(6),a0@- | push saved sr - movec a0,msp | set master stack pointer -#else - jsr SYM (_Thread_Dispatch) | Perform context switch -#endif - -#if ( M68K_COLDFIRE_ARCH == 0 ) -exit: moveml a7@+,d0-d1/a0-a1 | restore d0-d1,a0-a1 -#else -exit: moveml a7@,d0-d1/a0-a1 | restore d0-d1,a0-a1 - lea a7@(SAVED),a7 -#endif - -#if ( M68K_HAS_VBR == 0 ) - addql #2,a7 | pop format/id -#endif /* M68K_HAS_VBR */ - rte | return to thread - | OR _Isr_dispatch - -/*PAGE - * void _ISR_Dispatch() - * - * Entry point from the outermost interrupt service routine exit. - * The current stack is the supervisor mode stack if this processor - * has separate stacks. - * - * 1. save all registers not preserved across C calls. - * 2. invoke the _Thread_Dispatch routine to switch tasks - * or a signal to the currently executing task. - * 3. restore all registers not preserved across C calls. - * 4. return from interrupt - */ - - .global SYM (_ISR_Dispatch) -SYM (_ISR_Dispatch): -#if ( M68K_COLDFIRE_ARCH == 0 ) - movml d0-d1/a0-a1,a7@- - jsr SYM (_Thread_Dispatch) - movml a7@+,d0-d1/a0-a1 -#else - lea a7@(-SAVED),a7 - movml d0-d1/a0-a1,a7@ - jsr SYM (_Thread_Dispatch) - movml a7@,d0-d1/a0-a1 - lea a7@(SAVED),a7 -#endif - -#if ( M68K_HAS_VBR == 0 ) - addql #2,a7 | pop format/id -#endif /* M68K_HAS_VBR */ - rte diff --git a/c/src/exec/score/cpu/m68k/rtems.S b/c/src/exec/score/cpu/m68k/rtems.S new file mode 100644 index 0000000000..e8cba8204c --- /dev/null +++ b/c/src/exec/score/cpu/m68k/rtems.S @@ -0,0 +1,52 @@ +/* rtems.s + * + * This file contains the single entry point code for + * the m68k implementation of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + + +#include + +/* + * There seems to be no reason to have two versions of this. + * The following version should work across the entire family. + * The worst assumption is that gcc will put entry in a scratch + * register and not screw up the stack. + * + * NOTE: This is a 68020 version: + * + * jmpl @(%%d0:l:4)@(__Entry_points) + */ + + EXTERN (_Entry_points) + + BEGIN_CODE + + .align 4 + .global SYM (RTEMS) + +SYM (RTEMS): + moveal SYM (_Entry_points), a0 + lsll #2, d0 + addal d0, a0 + +#if (M68K_COLDFIRE_ARCH == 0) + moveal @(a0),a0 + jmpl @(a0) +#else + moveal (a0),a0 + jmpl (a0) +#endif + + END_CODE +END diff --git a/c/src/exec/score/cpu/m68k/rtems.s b/c/src/exec/score/cpu/m68k/rtems.s deleted file mode 100644 index e8cba8204c..0000000000 --- a/c/src/exec/score/cpu/m68k/rtems.s +++ /dev/null @@ -1,52 +0,0 @@ -/* rtems.s - * - * This file contains the single entry point code for - * the m68k implementation of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - - -#include - -/* - * There seems to be no reason to have two versions of this. - * The following version should work across the entire family. - * The worst assumption is that gcc will put entry in a scratch - * register and not screw up the stack. - * - * NOTE: This is a 68020 version: - * - * jmpl @(%%d0:l:4)@(__Entry_points) - */ - - EXTERN (_Entry_points) - - BEGIN_CODE - - .align 4 - .global SYM (RTEMS) - -SYM (RTEMS): - moveal SYM (_Entry_points), a0 - lsll #2, d0 - addal d0, a0 - -#if (M68K_COLDFIRE_ARCH == 0) - moveal @(a0),a0 - jmpl @(a0) -#else - moveal (a0),a0 - jmpl (a0) -#endif - - END_CODE -END diff --git a/c/src/exec/score/cpu/no_cpu/Makefile.in b/c/src/exec/score/cpu/no_cpu/Makefile.in index 8277c5e5b7..8646f1d070 100644 --- a/c/src/exec/score/cpu/no_cpu/Makefile.in +++ b/c/src/exec/score/cpu/no_cpu/Makefile.in @@ -21,11 +21,11 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/no_cpu.h $(srcdir)/no_cputypes.h # H_FILES that get installed externally EXTERNAL_H_FILES = $(srcdir)/asm.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S # Normally cpu_asm and rtems are assembly files S_PIECES= -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/powerpc/Makefile.in b/c/src/exec/score/cpu/powerpc/Makefile.in index f5f7ca5b92..1511342df9 100644 --- a/c/src/exec/score/cpu/powerpc/Makefile.in +++ b/c/src/exec/score/cpu/powerpc/Makefile.in @@ -21,11 +21,11 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/ppc.h $(srcdir)/ppctypes.h # H_FILES that get installed externally EXTERNAL_H_FILES = $(srcdir)/asm.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S # Normally cpu_asm and rtems are assembly files S_PIECES=cpu_asm rtems -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) @@ -73,7 +73,7 @@ preinstall: $(ARCH) \ # we will share the basic cpu file $(INSTALL) -m 444 ${EXTERNAL_H_FILES} $(PROJECT_INCLUDE) # make a link in case we are not compiling in the source directory - test -f irq_stub.s || $(LN) -s $(srcdir)/irq_stub.s irq_stub.s + #test -f irq_stub.s || $(LN) -s $(srcdir)/irq_stub.s irq_stub.s $(PROJECT_INCLUDE)/rtems/score/targopts.h: $(ARCH)/targopts.h-tmp $(INSTALL) -m 444 $(ARCH)/targopts.h-tmp $@ diff --git a/c/src/exec/score/cpu/powerpc/cpu_asm.S b/c/src/exec/score/cpu/powerpc/cpu_asm.S new file mode 100644 index 0000000000..7370764607 --- /dev/null +++ b/c/src/exec/score/cpu/powerpc/cpu_asm.S @@ -0,0 +1,809 @@ + +/* cpu_asm.s 1.1 - 95/12/04 + * + * This file contains the assembly code for the PowerPC implementation + * of RTEMS. + * + * Author: Andrew Bray + * + * COPYRIGHT (c) 1995 by i-cubed ltd. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of i-cubed limited not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * i-cubed limited makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c: + * + * COPYRIGHT (c) 1989-1997. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may in + * the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + +#include + +/* + * Offsets for various Contexts + */ + .set GP_1, 0 + .set GP_2, (GP_1 + 4) + .set GP_13, (GP_2 + 4) + .set GP_14, (GP_13 + 4) + + .set GP_15, (GP_14 + 4) + .set GP_16, (GP_15 + 4) + .set GP_17, (GP_16 + 4) + .set GP_18, (GP_17 + 4) + + .set GP_19, (GP_18 + 4) + .set GP_20, (GP_19 + 4) + .set GP_21, (GP_20 + 4) + .set GP_22, (GP_21 + 4) + + .set GP_23, (GP_22 + 4) + .set GP_24, (GP_23 + 4) + .set GP_25, (GP_24 + 4) + .set GP_26, (GP_25 + 4) + + .set GP_27, (GP_26 + 4) + .set GP_28, (GP_27 + 4) + .set GP_29, (GP_28 + 4) + .set GP_30, (GP_29 + 4) + + .set GP_31, (GP_30 + 4) + .set GP_CR, (GP_31 + 4) + .set GP_PC, (GP_CR + 4) + .set GP_MSR, (GP_PC + 4) + +#if (PPC_HAS_DOUBLE == 1) + .set FP_0, 0 + .set FP_1, (FP_0 + 8) + .set FP_2, (FP_1 + 8) + .set FP_3, (FP_2 + 8) + .set FP_4, (FP_3 + 8) + .set FP_5, (FP_4 + 8) + .set FP_6, (FP_5 + 8) + .set FP_7, (FP_6 + 8) + .set FP_8, (FP_7 + 8) + .set FP_9, (FP_8 + 8) + .set FP_10, (FP_9 + 8) + .set FP_11, (FP_10 + 8) + .set FP_12, (FP_11 + 8) + .set FP_13, (FP_12 + 8) + .set FP_14, (FP_13 + 8) + .set FP_15, (FP_14 + 8) + .set FP_16, (FP_15 + 8) + .set FP_17, (FP_16 + 8) + .set FP_18, (FP_17 + 8) + .set FP_19, (FP_18 + 8) + .set FP_20, (FP_19 + 8) + .set FP_21, (FP_20 + 8) + .set FP_22, (FP_21 + 8) + .set FP_23, (FP_22 + 8) + .set FP_24, (FP_23 + 8) + .set FP_25, (FP_24 + 8) + .set FP_26, (FP_25 + 8) + .set FP_27, (FP_26 + 8) + .set FP_28, (FP_27 + 8) + .set FP_29, (FP_28 + 8) + .set FP_30, (FP_29 + 8) + .set FP_31, (FP_30 + 8) + .set FP_FPSCR, (FP_31 + 8) +#else + .set FP_0, 0 + .set FP_1, (FP_0 + 4) + .set FP_2, (FP_1 + 4) + .set FP_3, (FP_2 + 4) + .set FP_4, (FP_3 + 4) + .set FP_5, (FP_4 + 4) + .set FP_6, (FP_5 + 4) + .set FP_7, (FP_6 + 4) + .set FP_8, (FP_7 + 4) + .set FP_9, (FP_8 + 4) + .set FP_10, (FP_9 + 4) + .set FP_11, (FP_10 + 4) + .set FP_12, (FP_11 + 4) + .set FP_13, (FP_12 + 4) + .set FP_14, (FP_13 + 4) + .set FP_15, (FP_14 + 4) + .set FP_16, (FP_15 + 4) + .set FP_17, (FP_16 + 4) + .set FP_18, (FP_17 + 4) + .set FP_19, (FP_18 + 4) + .set FP_20, (FP_19 + 4) + .set FP_21, (FP_20 + 4) + .set FP_22, (FP_21 + 4) + .set FP_23, (FP_22 + 4) + .set FP_24, (FP_23 + 4) + .set FP_25, (FP_24 + 4) + .set FP_26, (FP_25 + 4) + .set FP_27, (FP_26 + 4) + .set FP_28, (FP_27 + 4) + .set FP_29, (FP_28 + 4) + .set FP_30, (FP_29 + 4) + .set FP_31, (FP_30 + 4) + .set FP_FPSCR, (FP_31 + 4) +#endif + + .set IP_LINK, 0 +#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27) + .set IP_0, (IP_LINK + 56) +#else + .set IP_0, (IP_LINK + 8) +#endif + .set IP_2, (IP_0 + 4) + + .set IP_3, (IP_2 + 4) + .set IP_4, (IP_3 + 4) + .set IP_5, (IP_4 + 4) + .set IP_6, (IP_5 + 4) + + .set IP_7, (IP_6 + 4) + .set IP_8, (IP_7 + 4) + .set IP_9, (IP_8 + 4) + .set IP_10, (IP_9 + 4) + + .set IP_11, (IP_10 + 4) + .set IP_12, (IP_11 + 4) + .set IP_13, (IP_12 + 4) + .set IP_28, (IP_13 + 4) + + .set IP_29, (IP_28 + 4) + .set IP_30, (IP_29 + 4) + .set IP_31, (IP_30 + 4) + .set IP_CR, (IP_31 + 4) + + .set IP_CTR, (IP_CR + 4) + .set IP_XER, (IP_CTR + 4) + .set IP_LR, (IP_XER + 4) + .set IP_PC, (IP_LR + 4) + + .set IP_MSR, (IP_PC + 4) + .set IP_END, (IP_MSR + 16) + + /* _CPU_IRQ_info offsets */ + + /* These must be in this order */ + .set Nest_level, 0 + .set Disable_level, 4 + .set Vector_table, 8 + .set Stack, 12 +#if (PPC_ABI == PPC_ABI_POWEROPEN) + .set Dispatch_r2, 16 + .set Switch_necessary, 20 +#else + .set Default_r2, 16 +#if (PPC_ABI != PPC_ABI_GCC27) + .set Default_r13, 20 + .set Switch_necessary, 24 +#else + .set Switch_necessary, 20 +#endif +#endif + .set Signal, Switch_necessary + 4 + .set msr_initial, Signal + 4 + + BEGIN_CODE +/* + * _CPU_Context_save_fp_context + * + * This routine is responsible for saving the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * Sometimes a macro implementation of this is in cpu.h which dereferences + * the ** and a similarly named routine in this file is passed something + * like a (Context_Control_fp *). The general rule on making this decision + * is to avoid writing assembly language. + */ + + ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) + PUBLIC_PROC (_CPU_Context_save_fp) +PROC (_CPU_Context_save_fp): +#if (PPC_HAS_FPU == 1) + lwz r3, 0(r3) +#if (PPC_HAS_DOUBLE == 1) + stfd f0, FP_0(r3) + stfd f1, FP_1(r3) + stfd f2, FP_2(r3) + stfd f3, FP_3(r3) + stfd f4, FP_4(r3) + stfd f5, FP_5(r3) + stfd f6, FP_6(r3) + stfd f7, FP_7(r3) + stfd f8, FP_8(r3) + stfd f9, FP_9(r3) + stfd f10, FP_10(r3) + stfd f11, FP_11(r3) + stfd f12, FP_12(r3) + stfd f13, FP_13(r3) + stfd f14, FP_14(r3) + stfd f15, FP_15(r3) + stfd f16, FP_16(r3) + stfd f17, FP_17(r3) + stfd f18, FP_18(r3) + stfd f19, FP_19(r3) + stfd f20, FP_20(r3) + stfd f21, FP_21(r3) + stfd f22, FP_22(r3) + stfd f23, FP_23(r3) + stfd f24, FP_24(r3) + stfd f25, FP_25(r3) + stfd f26, FP_26(r3) + stfd f27, FP_27(r3) + stfd f28, FP_28(r3) + stfd f29, FP_29(r3) + stfd f30, FP_30(r3) + stfd f31, FP_31(r3) + mffs f2 + stfd f2, FP_FPSCR(r3) +#else + stfs f0, FP_0(r3) + stfs f1, FP_1(r3) + stfs f2, FP_2(r3) + stfs f3, FP_3(r3) + stfs f4, FP_4(r3) + stfs f5, FP_5(r3) + stfs f6, FP_6(r3) + stfs f7, FP_7(r3) + stfs f8, FP_8(r3) + stfs f9, FP_9(r3) + stfs f10, FP_10(r3) + stfs f11, FP_11(r3) + stfs f12, FP_12(r3) + stfs f13, FP_13(r3) + stfs f14, FP_14(r3) + stfs f15, FP_15(r3) + stfs f16, FP_16(r3) + stfs f17, FP_17(r3) + stfs f18, FP_18(r3) + stfs f19, FP_19(r3) + stfs f20, FP_20(r3) + stfs f21, FP_21(r3) + stfs f22, FP_22(r3) + stfs f23, FP_23(r3) + stfs f24, FP_24(r3) + stfs f25, FP_25(r3) + stfs f26, FP_26(r3) + stfs f27, FP_27(r3) + stfs f28, FP_28(r3) + stfs f29, FP_29(r3) + stfs f30, FP_30(r3) + stfs f31, FP_31(r3) + mffs f2 + stfs f2, FP_FPSCR(r3) +#endif +#endif + blr + +/* + * _CPU_Context_restore_fp_context + * + * This routine is responsible for restoring the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * Sometimes a macro implementation of this is in cpu.h which dereferences + * the ** and a similarly named routine in this file is passed something + * like a (Context_Control_fp *). The general rule on making this decision + * is to avoid writing assembly language. + */ + + ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) + PUBLIC_PROC (_CPU_Context_restore_fp) +PROC (_CPU_Context_restore_fp): +#if (PPC_HAS_FPU == 1) + lwz r3, 0(r3) +#if (PPC_HAS_DOUBLE == 1) + lfd f2, FP_FPSCR(r3) + mtfsf 255, f2 + lfd f0, FP_0(r3) + lfd f1, FP_1(r3) + lfd f2, FP_2(r3) + lfd f3, FP_3(r3) + lfd f4, FP_4(r3) + lfd f5, FP_5(r3) + lfd f6, FP_6(r3) + lfd f7, FP_7(r3) + lfd f8, FP_8(r3) + lfd f9, FP_9(r3) + lfd f10, FP_10(r3) + lfd f11, FP_11(r3) + lfd f12, FP_12(r3) + lfd f13, FP_13(r3) + lfd f14, FP_14(r3) + lfd f15, FP_15(r3) + lfd f16, FP_16(r3) + lfd f17, FP_17(r3) + lfd f18, FP_18(r3) + lfd f19, FP_19(r3) + lfd f20, FP_20(r3) + lfd f21, FP_21(r3) + lfd f22, FP_22(r3) + lfd f23, FP_23(r3) + lfd f24, FP_24(r3) + lfd f25, FP_25(r3) + lfd f26, FP_26(r3) + lfd f27, FP_27(r3) + lfd f28, FP_28(r3) + lfd f29, FP_29(r3) + lfd f30, FP_30(r3) + lfd f31, FP_31(r3) +#else + lfs f2, FP_FPSCR(r3) + mtfsf 255, f2 + lfs f0, FP_0(r3) + lfs f1, FP_1(r3) + lfs f2, FP_2(r3) + lfs f3, FP_3(r3) + lfs f4, FP_4(r3) + lfs f5, FP_5(r3) + lfs f6, FP_6(r3) + lfs f7, FP_7(r3) + lfs f8, FP_8(r3) + lfs f9, FP_9(r3) + lfs f10, FP_10(r3) + lfs f11, FP_11(r3) + lfs f12, FP_12(r3) + lfs f13, FP_13(r3) + lfs f14, FP_14(r3) + lfs f15, FP_15(r3) + lfs f16, FP_16(r3) + lfs f17, FP_17(r3) + lfs f18, FP_18(r3) + lfs f19, FP_19(r3) + lfs f20, FP_20(r3) + lfs f21, FP_21(r3) + lfs f22, FP_22(r3) + lfs f23, FP_23(r3) + lfs f24, FP_24(r3) + lfs f25, FP_25(r3) + lfs f26, FP_26(r3) + lfs f27, FP_27(r3) + lfs f28, FP_28(r3) + lfs f29, FP_29(r3) + lfs f30, FP_30(r3) + lfs f31, FP_31(r3) +#endif +#endif + blr + + +/* _CPU_Context_switch + * + * This routine performs a normal non-FP context switch. + */ + ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) + PUBLIC_PROC (_CPU_Context_switch) +PROC (_CPU_Context_switch): + sync + isync +#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */ + stw r1, GP_1(r3) + lwz r1, GP_1(r4) + stw r2, GP_2(r3) + lwz r2, GP_2(r4) +#if (PPC_USE_MULTIPLE == 1) + stmw r13, GP_13(r3) + lmw r13, GP_13(r4) +#else + stw r13, GP_13(r3) + lwz r13, GP_13(r4) + stw r14, GP_14(r3) + lwz r14, GP_14(r4) + stw r15, GP_15(r3) + lwz r15, GP_15(r4) + stw r16, GP_16(r3) + lwz r16, GP_16(r4) + stw r17, GP_17(r3) + lwz r17, GP_17(r4) + stw r18, GP_18(r3) + lwz r18, GP_18(r4) + stw r19, GP_19(r3) + lwz r19, GP_19(r4) + stw r20, GP_20(r3) + lwz r20, GP_20(r4) + stw r21, GP_21(r3) + lwz r21, GP_21(r4) + stw r22, GP_22(r3) + lwz r22, GP_22(r4) + stw r23, GP_23(r3) + lwz r23, GP_23(r4) + stw r24, GP_24(r3) + lwz r24, GP_24(r4) + stw r25, GP_25(r3) + lwz r25, GP_25(r4) + stw r26, GP_26(r3) + lwz r26, GP_26(r4) + stw r27, GP_27(r3) + lwz r27, GP_27(r4) + stw r28, GP_28(r3) + lwz r28, GP_28(r4) + stw r29, GP_29(r3) + lwz r29, GP_29(r4) + stw r30, GP_30(r3) + lwz r30, GP_30(r4) + stw r31, GP_31(r3) + lwz r31, GP_31(r4) +#endif + mfcr r5 + stw r5, GP_CR(r3) + lwz r5, GP_CR(r4) + mflr r6 + mtcrf 255, r5 + stw r6, GP_PC(r3) + lwz r6, GP_PC(r4) + mfmsr r7 + mtlr r6 + stw r7, GP_MSR(r3) + lwz r7, GP_MSR(r4) + mtmsr r7 +#endif +#if (PPC_CACHE_ALIGNMENT == 16) + /* This assumes that all the registers are in the given order */ + li r5, 16 + addi r3,r3,-4 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r1, GP_1+4(r3) + stw r2, GP_2+4(r3) +#if (PPC_USE_MULTIPLE == 1) + addi r3, r3, GP_14+4 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + + addi r3, r3, GP_18-GP_14 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + addi r3, r3, GP_22-GP_18 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + addi r3, r3, GP_26-GP_22 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stmw r13, GP_13-GP_26(r3) +#else + stw r13, GP_13+4(r3) + stwu r14, GP_14+4(r3) +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r15, GP_15-GP_14(r3) + stw r16, GP_16-GP_14(r3) + stw r17, GP_17-GP_14(r3) + stwu r18, GP_18-GP_14(r3) +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r19, GP_19-GP_18(r3) + stw r20, GP_20-GP_18(r3) + stw r21, GP_21-GP_18(r3) + stwu r22, GP_22-GP_18(r3) +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r23, GP_23-GP_22(r3) + stw r24, GP_24-GP_22(r3) + stw r25, GP_25-GP_22(r3) + stwu r26, GP_26-GP_22(r3) +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r27, GP_27-GP_26(r3) + stw r28, GP_28-GP_26(r3) + stw r29, GP_29-GP_26(r3) + stw r30, GP_30-GP_26(r3) + stw r31, GP_31-GP_26(r3) +#endif +#if ( PPC_USE_DATA_CACHE ) + dcbt r0, r4 +#endif + mfcr r6 + stw r6, GP_CR-GP_26(r3) + mflr r7 + stw r7, GP_PC-GP_26(r3) + mfmsr r8 + stw r8, GP_MSR-GP_26(r3) + +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r1, GP_1(r4) + lwz r2, GP_2(r4) +#if (PPC_USE_MULTIPLE == 1) + addi r4, r4, GP_15 +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + addi r4, r4, GP_19-GP_15 +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + addi r4, r4, GP_23-GP_19 +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + addi r4, r4, GP_27-GP_23 +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lmw r13, GP_13-GP_27(r4) +#else + lwz r13, GP_13(r4) + lwz r14, GP_14(r4) + lwzu r15, GP_15(r4) +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r16, GP_16-GP_15(r4) + lwz r17, GP_17-GP_15(r4) + lwz r18, GP_18-GP_15(r4) + lwzu r19, GP_19-GP_15(r4) +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r20, GP_20-GP_19(r4) + lwz r21, GP_21-GP_19(r4) + lwz r22, GP_22-GP_19(r4) + lwzu r23, GP_23-GP_19(r4) +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r24, GP_24-GP_23(r4) + lwz r25, GP_25-GP_23(r4) + lwz r26, GP_26-GP_23(r4) + lwzu r27, GP_27-GP_23(r4) +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r28, GP_28-GP_27(r4) + lwz r29, GP_29-GP_27(r4) + lwz r30, GP_30-GP_27(r4) + lwz r31, GP_31-GP_27(r4) +#endif + lwz r6, GP_CR-GP_27(r4) + lwz r7, GP_PC-GP_27(r4) + lwz r8, GP_MSR-GP_27(r4) + mtcrf 255, r6 + mtlr r7 + mtmsr r8 +#endif +#if (PPC_CACHE_ALIGNMENT == 32) + /* This assumes that all the registers are in the given order */ + li r5, 32 + addi r3,r3,-4 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r1, GP_1+4(r3) + stw r2, GP_2+4(r3) +#if (PPC_USE_MULTIPLE == 1) + addi r3, r3, GP_18+4 +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stmw r13, GP_13-GP_18(r3) +#else + stw r13, GP_13+4(r3) + stw r14, GP_14+4(r3) + stw r15, GP_15+4(r3) + stw r16, GP_16+4(r3) + stw r17, GP_17+4(r3) + stwu r18, GP_18+4(r3) +#if ( PPC_USE_DATA_CACHE ) + dcbz r5, r3 +#endif + stw r19, GP_19-GP_18(r3) + stw r20, GP_20-GP_18(r3) + stw r21, GP_21-GP_18(r3) + stw r22, GP_22-GP_18(r3) + stw r23, GP_23-GP_18(r3) + stw r24, GP_24-GP_18(r3) + stw r25, GP_25-GP_18(r3) + stw r26, GP_26-GP_18(r3) + stw r27, GP_27-GP_18(r3) + stw r28, GP_28-GP_18(r3) + stw r29, GP_29-GP_18(r3) + stw r30, GP_30-GP_18(r3) + stw r31, GP_31-GP_18(r3) +#endif +#if ( PPC_USE_DATA_CACHE ) + dcbt r0, r4 +#endif + mfcr r6 + stw r6, GP_CR-GP_18(r3) + mflr r7 + stw r7, GP_PC-GP_18(r3) + mfmsr r8 + stw r8, GP_MSR-GP_18(r3) + +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r1, GP_1(r4) + lwz r2, GP_2(r4) +#if (PPC_USE_MULTIPLE == 1) + addi r4, r4, GP_19 +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lmw r13, GP_13-GP_19(r4) +#else + lwz r13, GP_13(r4) + lwz r14, GP_14(r4) + lwz r15, GP_15(r4) + lwz r16, GP_16(r4) + lwz r17, GP_17(r4) + lwz r18, GP_18(r4) + lwzu r19, GP_19(r4) +#if ( PPC_USE_DATA_CACHE ) + dcbt r5, r4 +#endif + lwz r20, GP_20-GP_19(r4) + lwz r21, GP_21-GP_19(r4) + lwz r22, GP_22-GP_19(r4) + lwz r23, GP_23-GP_19(r4) + lwz r24, GP_24-GP_19(r4) + lwz r25, GP_25-GP_19(r4) + lwz r26, GP_26-GP_19(r4) + lwz r27, GP_27-GP_19(r4) + lwz r28, GP_28-GP_19(r4) + lwz r29, GP_29-GP_19(r4) + lwz r30, GP_30-GP_19(r4) + lwz r31, GP_31-GP_19(r4) +#endif + lwz r6, GP_CR-GP_19(r4) + lwz r7, GP_PC-GP_19(r4) + lwz r8, GP_MSR-GP_19(r4) + mtcrf 255, r6 + mtlr r7 + mtmsr r8 +#endif + blr + +/* + * _CPU_Context_restore + * + * This routine is generallu used only to restart self in an + * efficient manner. It may simply be a label in _CPU_Context_switch. + * + * NOTE: May be unnecessary to reload some registers. + */ +/* + * ACB: Don't worry about cache optimisation here - this is not THAT critical. + */ + ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) + PUBLIC_PROC (_CPU_Context_restore) +PROC (_CPU_Context_restore): + lwz r5, GP_CR(r3) + lwz r6, GP_PC(r3) + lwz r7, GP_MSR(r3) + mtcrf 255, r5 + mtlr r6 + mtmsr r7 + lwz r1, GP_1(r3) + lwz r2, GP_2(r3) +#if (PPC_USE_MULTIPLE == 1) + lmw r13, GP_13(r3) +#else + lwz r13, GP_13(r3) + lwz r14, GP_14(r3) + lwz r15, GP_15(r3) + lwz r16, GP_16(r3) + lwz r17, GP_17(r3) + lwz r18, GP_18(r3) + lwz r19, GP_19(r3) + lwz r20, GP_20(r3) + lwz r21, GP_21(r3) + lwz r22, GP_22(r3) + lwz r23, GP_23(r3) + lwz r24, GP_24(r3) + lwz r25, GP_25(r3) + lwz r26, GP_26(r3) + lwz r27, GP_27(r3) + lwz r28, GP_28(r3) + lwz r29, GP_29(r3) + lwz r30, GP_30(r3) + lwz r31, GP_31(r3) +#endif + + blr + +/* Individual interrupt prologues look like this: + * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27) + * #if (PPC_HAS_FPU) + * stwu r1, -(20*4 + 18*8 + IP_END)(r1) + * #else + * stwu r1, -(20*4 + IP_END)(r1) + * #endif + * #else + * stwu r1, -(IP_END)(r1) + * #endif + * stw r0, IP_0(r1) + * + * li r0, vectornum + * b PROC (_ISR_Handler{,C}) + */ + +/* void __ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * The vector number is in r0. R0 has already been stacked. + * + */ + ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) + PUBLIC_PROC (_ISR_Handler) +PROC (_ISR_Handler): +#define LABEL(x) x +/* XXX ?? +#define MTSAVE(x) mtspr sprg0, x +#define MFSAVE(x) mfspr x, sprg0 +*/ +#define MTPC(x) mtspr srr0, x +#define MFPC(x) mfspr x, srr0 +#define MTMSR(x) mtspr srr1, x +#define MFMSR(x) mfspr x, srr1 + + #include "irq_stub.s" + rfi + +#if (PPC_HAS_RFCI == 1) +/* void __ISR_HandlerC() + * + * This routine provides the RTEMS interrupt management. + * For critical interrupts + * + */ + ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) + PUBLIC_PROC (_ISR_HandlerC) +PROC (_ISR_HandlerC): +#undef LABEL +#undef MTSAVE +#undef MFSAVE +#undef MTPC +#undef MFPC +#undef MTMSR +#undef MFMSR +#define LABEL(x) x##_C +/* XXX?? +#define MTSAVE(x) mtspr sprg1, x +#define MFSAVE(x) mfspr x, sprg1 +*/ +#define MTPC(x) mtspr srr2, x +#define MFPC(x) mfspr x, srr2 +#define MTMSR(x) mtspr srr3, x +#define MFMSR(x) mfspr x, srr3 + #include "irq_stub.s" + rfci +#endif + +/* PowerOpen descriptors for indirect function calls. + */ + +#if (PPC_ABI == PPC_ABI_POWEROPEN) + DESCRIPTOR (_CPU_Context_save_fp) + DESCRIPTOR (_CPU_Context_restore_fp) + DESCRIPTOR (_CPU_Context_switch) + DESCRIPTOR (_CPU_Context_restore) + DESCRIPTOR (_ISR_Handler) +#if (PPC_HAS_RFCI == 1) + DESCRIPTOR (_ISR_HandlerC) +#endif +#endif diff --git a/c/src/exec/score/cpu/powerpc/cpu_asm.s b/c/src/exec/score/cpu/powerpc/cpu_asm.s deleted file mode 100644 index 7370764607..0000000000 --- a/c/src/exec/score/cpu/powerpc/cpu_asm.s +++ /dev/null @@ -1,809 +0,0 @@ - -/* cpu_asm.s 1.1 - 95/12/04 - * - * This file contains the assembly code for the PowerPC implementation - * of RTEMS. - * - * Author: Andrew Bray - * - * COPYRIGHT (c) 1995 by i-cubed ltd. - * - * To anyone who acknowledges that this file is provided "AS IS" - * without any express or implied warranty: - * permission to use, copy, modify, and distribute this file - * for any purpose is hereby granted without fee, provided that - * the above copyright notice and this notice appears in all - * copies, and that the name of i-cubed limited not be used in - * advertising or publicity pertaining to distribution of the - * software without specific, written prior permission. - * i-cubed limited makes no representations about the suitability - * of this software for any purpose. - * - * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c: - * - * COPYRIGHT (c) 1989-1997. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may in - * the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - -#include - -/* - * Offsets for various Contexts - */ - .set GP_1, 0 - .set GP_2, (GP_1 + 4) - .set GP_13, (GP_2 + 4) - .set GP_14, (GP_13 + 4) - - .set GP_15, (GP_14 + 4) - .set GP_16, (GP_15 + 4) - .set GP_17, (GP_16 + 4) - .set GP_18, (GP_17 + 4) - - .set GP_19, (GP_18 + 4) - .set GP_20, (GP_19 + 4) - .set GP_21, (GP_20 + 4) - .set GP_22, (GP_21 + 4) - - .set GP_23, (GP_22 + 4) - .set GP_24, (GP_23 + 4) - .set GP_25, (GP_24 + 4) - .set GP_26, (GP_25 + 4) - - .set GP_27, (GP_26 + 4) - .set GP_28, (GP_27 + 4) - .set GP_29, (GP_28 + 4) - .set GP_30, (GP_29 + 4) - - .set GP_31, (GP_30 + 4) - .set GP_CR, (GP_31 + 4) - .set GP_PC, (GP_CR + 4) - .set GP_MSR, (GP_PC + 4) - -#if (PPC_HAS_DOUBLE == 1) - .set FP_0, 0 - .set FP_1, (FP_0 + 8) - .set FP_2, (FP_1 + 8) - .set FP_3, (FP_2 + 8) - .set FP_4, (FP_3 + 8) - .set FP_5, (FP_4 + 8) - .set FP_6, (FP_5 + 8) - .set FP_7, (FP_6 + 8) - .set FP_8, (FP_7 + 8) - .set FP_9, (FP_8 + 8) - .set FP_10, (FP_9 + 8) - .set FP_11, (FP_10 + 8) - .set FP_12, (FP_11 + 8) - .set FP_13, (FP_12 + 8) - .set FP_14, (FP_13 + 8) - .set FP_15, (FP_14 + 8) - .set FP_16, (FP_15 + 8) - .set FP_17, (FP_16 + 8) - .set FP_18, (FP_17 + 8) - .set FP_19, (FP_18 + 8) - .set FP_20, (FP_19 + 8) - .set FP_21, (FP_20 + 8) - .set FP_22, (FP_21 + 8) - .set FP_23, (FP_22 + 8) - .set FP_24, (FP_23 + 8) - .set FP_25, (FP_24 + 8) - .set FP_26, (FP_25 + 8) - .set FP_27, (FP_26 + 8) - .set FP_28, (FP_27 + 8) - .set FP_29, (FP_28 + 8) - .set FP_30, (FP_29 + 8) - .set FP_31, (FP_30 + 8) - .set FP_FPSCR, (FP_31 + 8) -#else - .set FP_0, 0 - .set FP_1, (FP_0 + 4) - .set FP_2, (FP_1 + 4) - .set FP_3, (FP_2 + 4) - .set FP_4, (FP_3 + 4) - .set FP_5, (FP_4 + 4) - .set FP_6, (FP_5 + 4) - .set FP_7, (FP_6 + 4) - .set FP_8, (FP_7 + 4) - .set FP_9, (FP_8 + 4) - .set FP_10, (FP_9 + 4) - .set FP_11, (FP_10 + 4) - .set FP_12, (FP_11 + 4) - .set FP_13, (FP_12 + 4) - .set FP_14, (FP_13 + 4) - .set FP_15, (FP_14 + 4) - .set FP_16, (FP_15 + 4) - .set FP_17, (FP_16 + 4) - .set FP_18, (FP_17 + 4) - .set FP_19, (FP_18 + 4) - .set FP_20, (FP_19 + 4) - .set FP_21, (FP_20 + 4) - .set FP_22, (FP_21 + 4) - .set FP_23, (FP_22 + 4) - .set FP_24, (FP_23 + 4) - .set FP_25, (FP_24 + 4) - .set FP_26, (FP_25 + 4) - .set FP_27, (FP_26 + 4) - .set FP_28, (FP_27 + 4) - .set FP_29, (FP_28 + 4) - .set FP_30, (FP_29 + 4) - .set FP_31, (FP_30 + 4) - .set FP_FPSCR, (FP_31 + 4) -#endif - - .set IP_LINK, 0 -#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27) - .set IP_0, (IP_LINK + 56) -#else - .set IP_0, (IP_LINK + 8) -#endif - .set IP_2, (IP_0 + 4) - - .set IP_3, (IP_2 + 4) - .set IP_4, (IP_3 + 4) - .set IP_5, (IP_4 + 4) - .set IP_6, (IP_5 + 4) - - .set IP_7, (IP_6 + 4) - .set IP_8, (IP_7 + 4) - .set IP_9, (IP_8 + 4) - .set IP_10, (IP_9 + 4) - - .set IP_11, (IP_10 + 4) - .set IP_12, (IP_11 + 4) - .set IP_13, (IP_12 + 4) - .set IP_28, (IP_13 + 4) - - .set IP_29, (IP_28 + 4) - .set IP_30, (IP_29 + 4) - .set IP_31, (IP_30 + 4) - .set IP_CR, (IP_31 + 4) - - .set IP_CTR, (IP_CR + 4) - .set IP_XER, (IP_CTR + 4) - .set IP_LR, (IP_XER + 4) - .set IP_PC, (IP_LR + 4) - - .set IP_MSR, (IP_PC + 4) - .set IP_END, (IP_MSR + 16) - - /* _CPU_IRQ_info offsets */ - - /* These must be in this order */ - .set Nest_level, 0 - .set Disable_level, 4 - .set Vector_table, 8 - .set Stack, 12 -#if (PPC_ABI == PPC_ABI_POWEROPEN) - .set Dispatch_r2, 16 - .set Switch_necessary, 20 -#else - .set Default_r2, 16 -#if (PPC_ABI != PPC_ABI_GCC27) - .set Default_r13, 20 - .set Switch_necessary, 24 -#else - .set Switch_necessary, 20 -#endif -#endif - .set Signal, Switch_necessary + 4 - .set msr_initial, Signal + 4 - - BEGIN_CODE -/* - * _CPU_Context_save_fp_context - * - * This routine is responsible for saving the FP context - * at *fp_context_ptr. If the point to load the FP context - * from is changed then the pointer is modified by this routine. - * - * Sometimes a macro implementation of this is in cpu.h which dereferences - * the ** and a similarly named routine in this file is passed something - * like a (Context_Control_fp *). The general rule on making this decision - * is to avoid writing assembly language. - */ - - ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) - PUBLIC_PROC (_CPU_Context_save_fp) -PROC (_CPU_Context_save_fp): -#if (PPC_HAS_FPU == 1) - lwz r3, 0(r3) -#if (PPC_HAS_DOUBLE == 1) - stfd f0, FP_0(r3) - stfd f1, FP_1(r3) - stfd f2, FP_2(r3) - stfd f3, FP_3(r3) - stfd f4, FP_4(r3) - stfd f5, FP_5(r3) - stfd f6, FP_6(r3) - stfd f7, FP_7(r3) - stfd f8, FP_8(r3) - stfd f9, FP_9(r3) - stfd f10, FP_10(r3) - stfd f11, FP_11(r3) - stfd f12, FP_12(r3) - stfd f13, FP_13(r3) - stfd f14, FP_14(r3) - stfd f15, FP_15(r3) - stfd f16, FP_16(r3) - stfd f17, FP_17(r3) - stfd f18, FP_18(r3) - stfd f19, FP_19(r3) - stfd f20, FP_20(r3) - stfd f21, FP_21(r3) - stfd f22, FP_22(r3) - stfd f23, FP_23(r3) - stfd f24, FP_24(r3) - stfd f25, FP_25(r3) - stfd f26, FP_26(r3) - stfd f27, FP_27(r3) - stfd f28, FP_28(r3) - stfd f29, FP_29(r3) - stfd f30, FP_30(r3) - stfd f31, FP_31(r3) - mffs f2 - stfd f2, FP_FPSCR(r3) -#else - stfs f0, FP_0(r3) - stfs f1, FP_1(r3) - stfs f2, FP_2(r3) - stfs f3, FP_3(r3) - stfs f4, FP_4(r3) - stfs f5, FP_5(r3) - stfs f6, FP_6(r3) - stfs f7, FP_7(r3) - stfs f8, FP_8(r3) - stfs f9, FP_9(r3) - stfs f10, FP_10(r3) - stfs f11, FP_11(r3) - stfs f12, FP_12(r3) - stfs f13, FP_13(r3) - stfs f14, FP_14(r3) - stfs f15, FP_15(r3) - stfs f16, FP_16(r3) - stfs f17, FP_17(r3) - stfs f18, FP_18(r3) - stfs f19, FP_19(r3) - stfs f20, FP_20(r3) - stfs f21, FP_21(r3) - stfs f22, FP_22(r3) - stfs f23, FP_23(r3) - stfs f24, FP_24(r3) - stfs f25, FP_25(r3) - stfs f26, FP_26(r3) - stfs f27, FP_27(r3) - stfs f28, FP_28(r3) - stfs f29, FP_29(r3) - stfs f30, FP_30(r3) - stfs f31, FP_31(r3) - mffs f2 - stfs f2, FP_FPSCR(r3) -#endif -#endif - blr - -/* - * _CPU_Context_restore_fp_context - * - * This routine is responsible for restoring the FP context - * at *fp_context_ptr. If the point to load the FP context - * from is changed then the pointer is modified by this routine. - * - * Sometimes a macro implementation of this is in cpu.h which dereferences - * the ** and a similarly named routine in this file is passed something - * like a (Context_Control_fp *). The general rule on making this decision - * is to avoid writing assembly language. - */ - - ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) - PUBLIC_PROC (_CPU_Context_restore_fp) -PROC (_CPU_Context_restore_fp): -#if (PPC_HAS_FPU == 1) - lwz r3, 0(r3) -#if (PPC_HAS_DOUBLE == 1) - lfd f2, FP_FPSCR(r3) - mtfsf 255, f2 - lfd f0, FP_0(r3) - lfd f1, FP_1(r3) - lfd f2, FP_2(r3) - lfd f3, FP_3(r3) - lfd f4, FP_4(r3) - lfd f5, FP_5(r3) - lfd f6, FP_6(r3) - lfd f7, FP_7(r3) - lfd f8, FP_8(r3) - lfd f9, FP_9(r3) - lfd f10, FP_10(r3) - lfd f11, FP_11(r3) - lfd f12, FP_12(r3) - lfd f13, FP_13(r3) - lfd f14, FP_14(r3) - lfd f15, FP_15(r3) - lfd f16, FP_16(r3) - lfd f17, FP_17(r3) - lfd f18, FP_18(r3) - lfd f19, FP_19(r3) - lfd f20, FP_20(r3) - lfd f21, FP_21(r3) - lfd f22, FP_22(r3) - lfd f23, FP_23(r3) - lfd f24, FP_24(r3) - lfd f25, FP_25(r3) - lfd f26, FP_26(r3) - lfd f27, FP_27(r3) - lfd f28, FP_28(r3) - lfd f29, FP_29(r3) - lfd f30, FP_30(r3) - lfd f31, FP_31(r3) -#else - lfs f2, FP_FPSCR(r3) - mtfsf 255, f2 - lfs f0, FP_0(r3) - lfs f1, FP_1(r3) - lfs f2, FP_2(r3) - lfs f3, FP_3(r3) - lfs f4, FP_4(r3) - lfs f5, FP_5(r3) - lfs f6, FP_6(r3) - lfs f7, FP_7(r3) - lfs f8, FP_8(r3) - lfs f9, FP_9(r3) - lfs f10, FP_10(r3) - lfs f11, FP_11(r3) - lfs f12, FP_12(r3) - lfs f13, FP_13(r3) - lfs f14, FP_14(r3) - lfs f15, FP_15(r3) - lfs f16, FP_16(r3) - lfs f17, FP_17(r3) - lfs f18, FP_18(r3) - lfs f19, FP_19(r3) - lfs f20, FP_20(r3) - lfs f21, FP_21(r3) - lfs f22, FP_22(r3) - lfs f23, FP_23(r3) - lfs f24, FP_24(r3) - lfs f25, FP_25(r3) - lfs f26, FP_26(r3) - lfs f27, FP_27(r3) - lfs f28, FP_28(r3) - lfs f29, FP_29(r3) - lfs f30, FP_30(r3) - lfs f31, FP_31(r3) -#endif -#endif - blr - - -/* _CPU_Context_switch - * - * This routine performs a normal non-FP context switch. - */ - ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) - PUBLIC_PROC (_CPU_Context_switch) -PROC (_CPU_Context_switch): - sync - isync -#if (PPC_CACHE_ALIGNMENT == 4) /* No cache */ - stw r1, GP_1(r3) - lwz r1, GP_1(r4) - stw r2, GP_2(r3) - lwz r2, GP_2(r4) -#if (PPC_USE_MULTIPLE == 1) - stmw r13, GP_13(r3) - lmw r13, GP_13(r4) -#else - stw r13, GP_13(r3) - lwz r13, GP_13(r4) - stw r14, GP_14(r3) - lwz r14, GP_14(r4) - stw r15, GP_15(r3) - lwz r15, GP_15(r4) - stw r16, GP_16(r3) - lwz r16, GP_16(r4) - stw r17, GP_17(r3) - lwz r17, GP_17(r4) - stw r18, GP_18(r3) - lwz r18, GP_18(r4) - stw r19, GP_19(r3) - lwz r19, GP_19(r4) - stw r20, GP_20(r3) - lwz r20, GP_20(r4) - stw r21, GP_21(r3) - lwz r21, GP_21(r4) - stw r22, GP_22(r3) - lwz r22, GP_22(r4) - stw r23, GP_23(r3) - lwz r23, GP_23(r4) - stw r24, GP_24(r3) - lwz r24, GP_24(r4) - stw r25, GP_25(r3) - lwz r25, GP_25(r4) - stw r26, GP_26(r3) - lwz r26, GP_26(r4) - stw r27, GP_27(r3) - lwz r27, GP_27(r4) - stw r28, GP_28(r3) - lwz r28, GP_28(r4) - stw r29, GP_29(r3) - lwz r29, GP_29(r4) - stw r30, GP_30(r3) - lwz r30, GP_30(r4) - stw r31, GP_31(r3) - lwz r31, GP_31(r4) -#endif - mfcr r5 - stw r5, GP_CR(r3) - lwz r5, GP_CR(r4) - mflr r6 - mtcrf 255, r5 - stw r6, GP_PC(r3) - lwz r6, GP_PC(r4) - mfmsr r7 - mtlr r6 - stw r7, GP_MSR(r3) - lwz r7, GP_MSR(r4) - mtmsr r7 -#endif -#if (PPC_CACHE_ALIGNMENT == 16) - /* This assumes that all the registers are in the given order */ - li r5, 16 - addi r3,r3,-4 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r1, GP_1+4(r3) - stw r2, GP_2+4(r3) -#if (PPC_USE_MULTIPLE == 1) - addi r3, r3, GP_14+4 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - - addi r3, r3, GP_18-GP_14 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - addi r3, r3, GP_22-GP_18 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - addi r3, r3, GP_26-GP_22 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stmw r13, GP_13-GP_26(r3) -#else - stw r13, GP_13+4(r3) - stwu r14, GP_14+4(r3) -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r15, GP_15-GP_14(r3) - stw r16, GP_16-GP_14(r3) - stw r17, GP_17-GP_14(r3) - stwu r18, GP_18-GP_14(r3) -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r19, GP_19-GP_18(r3) - stw r20, GP_20-GP_18(r3) - stw r21, GP_21-GP_18(r3) - stwu r22, GP_22-GP_18(r3) -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r23, GP_23-GP_22(r3) - stw r24, GP_24-GP_22(r3) - stw r25, GP_25-GP_22(r3) - stwu r26, GP_26-GP_22(r3) -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r27, GP_27-GP_26(r3) - stw r28, GP_28-GP_26(r3) - stw r29, GP_29-GP_26(r3) - stw r30, GP_30-GP_26(r3) - stw r31, GP_31-GP_26(r3) -#endif -#if ( PPC_USE_DATA_CACHE ) - dcbt r0, r4 -#endif - mfcr r6 - stw r6, GP_CR-GP_26(r3) - mflr r7 - stw r7, GP_PC-GP_26(r3) - mfmsr r8 - stw r8, GP_MSR-GP_26(r3) - -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r1, GP_1(r4) - lwz r2, GP_2(r4) -#if (PPC_USE_MULTIPLE == 1) - addi r4, r4, GP_15 -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - addi r4, r4, GP_19-GP_15 -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - addi r4, r4, GP_23-GP_19 -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - addi r4, r4, GP_27-GP_23 -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lmw r13, GP_13-GP_27(r4) -#else - lwz r13, GP_13(r4) - lwz r14, GP_14(r4) - lwzu r15, GP_15(r4) -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r16, GP_16-GP_15(r4) - lwz r17, GP_17-GP_15(r4) - lwz r18, GP_18-GP_15(r4) - lwzu r19, GP_19-GP_15(r4) -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r20, GP_20-GP_19(r4) - lwz r21, GP_21-GP_19(r4) - lwz r22, GP_22-GP_19(r4) - lwzu r23, GP_23-GP_19(r4) -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r24, GP_24-GP_23(r4) - lwz r25, GP_25-GP_23(r4) - lwz r26, GP_26-GP_23(r4) - lwzu r27, GP_27-GP_23(r4) -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r28, GP_28-GP_27(r4) - lwz r29, GP_29-GP_27(r4) - lwz r30, GP_30-GP_27(r4) - lwz r31, GP_31-GP_27(r4) -#endif - lwz r6, GP_CR-GP_27(r4) - lwz r7, GP_PC-GP_27(r4) - lwz r8, GP_MSR-GP_27(r4) - mtcrf 255, r6 - mtlr r7 - mtmsr r8 -#endif -#if (PPC_CACHE_ALIGNMENT == 32) - /* This assumes that all the registers are in the given order */ - li r5, 32 - addi r3,r3,-4 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r1, GP_1+4(r3) - stw r2, GP_2+4(r3) -#if (PPC_USE_MULTIPLE == 1) - addi r3, r3, GP_18+4 -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stmw r13, GP_13-GP_18(r3) -#else - stw r13, GP_13+4(r3) - stw r14, GP_14+4(r3) - stw r15, GP_15+4(r3) - stw r16, GP_16+4(r3) - stw r17, GP_17+4(r3) - stwu r18, GP_18+4(r3) -#if ( PPC_USE_DATA_CACHE ) - dcbz r5, r3 -#endif - stw r19, GP_19-GP_18(r3) - stw r20, GP_20-GP_18(r3) - stw r21, GP_21-GP_18(r3) - stw r22, GP_22-GP_18(r3) - stw r23, GP_23-GP_18(r3) - stw r24, GP_24-GP_18(r3) - stw r25, GP_25-GP_18(r3) - stw r26, GP_26-GP_18(r3) - stw r27, GP_27-GP_18(r3) - stw r28, GP_28-GP_18(r3) - stw r29, GP_29-GP_18(r3) - stw r30, GP_30-GP_18(r3) - stw r31, GP_31-GP_18(r3) -#endif -#if ( PPC_USE_DATA_CACHE ) - dcbt r0, r4 -#endif - mfcr r6 - stw r6, GP_CR-GP_18(r3) - mflr r7 - stw r7, GP_PC-GP_18(r3) - mfmsr r8 - stw r8, GP_MSR-GP_18(r3) - -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r1, GP_1(r4) - lwz r2, GP_2(r4) -#if (PPC_USE_MULTIPLE == 1) - addi r4, r4, GP_19 -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lmw r13, GP_13-GP_19(r4) -#else - lwz r13, GP_13(r4) - lwz r14, GP_14(r4) - lwz r15, GP_15(r4) - lwz r16, GP_16(r4) - lwz r17, GP_17(r4) - lwz r18, GP_18(r4) - lwzu r19, GP_19(r4) -#if ( PPC_USE_DATA_CACHE ) - dcbt r5, r4 -#endif - lwz r20, GP_20-GP_19(r4) - lwz r21, GP_21-GP_19(r4) - lwz r22, GP_22-GP_19(r4) - lwz r23, GP_23-GP_19(r4) - lwz r24, GP_24-GP_19(r4) - lwz r25, GP_25-GP_19(r4) - lwz r26, GP_26-GP_19(r4) - lwz r27, GP_27-GP_19(r4) - lwz r28, GP_28-GP_19(r4) - lwz r29, GP_29-GP_19(r4) - lwz r30, GP_30-GP_19(r4) - lwz r31, GP_31-GP_19(r4) -#endif - lwz r6, GP_CR-GP_19(r4) - lwz r7, GP_PC-GP_19(r4) - lwz r8, GP_MSR-GP_19(r4) - mtcrf 255, r6 - mtlr r7 - mtmsr r8 -#endif - blr - -/* - * _CPU_Context_restore - * - * This routine is generallu used only to restart self in an - * efficient manner. It may simply be a label in _CPU_Context_switch. - * - * NOTE: May be unnecessary to reload some registers. - */ -/* - * ACB: Don't worry about cache optimisation here - this is not THAT critical. - */ - ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) - PUBLIC_PROC (_CPU_Context_restore) -PROC (_CPU_Context_restore): - lwz r5, GP_CR(r3) - lwz r6, GP_PC(r3) - lwz r7, GP_MSR(r3) - mtcrf 255, r5 - mtlr r6 - mtmsr r7 - lwz r1, GP_1(r3) - lwz r2, GP_2(r3) -#if (PPC_USE_MULTIPLE == 1) - lmw r13, GP_13(r3) -#else - lwz r13, GP_13(r3) - lwz r14, GP_14(r3) - lwz r15, GP_15(r3) - lwz r16, GP_16(r3) - lwz r17, GP_17(r3) - lwz r18, GP_18(r3) - lwz r19, GP_19(r3) - lwz r20, GP_20(r3) - lwz r21, GP_21(r3) - lwz r22, GP_22(r3) - lwz r23, GP_23(r3) - lwz r24, GP_24(r3) - lwz r25, GP_25(r3) - lwz r26, GP_26(r3) - lwz r27, GP_27(r3) - lwz r28, GP_28(r3) - lwz r29, GP_29(r3) - lwz r30, GP_30(r3) - lwz r31, GP_31(r3) -#endif - - blr - -/* Individual interrupt prologues look like this: - * #if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27) - * #if (PPC_HAS_FPU) - * stwu r1, -(20*4 + 18*8 + IP_END)(r1) - * #else - * stwu r1, -(20*4 + IP_END)(r1) - * #endif - * #else - * stwu r1, -(IP_END)(r1) - * #endif - * stw r0, IP_0(r1) - * - * li r0, vectornum - * b PROC (_ISR_Handler{,C}) - */ - -/* void __ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * The vector number is in r0. R0 has already been stacked. - * - */ - ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) - PUBLIC_PROC (_ISR_Handler) -PROC (_ISR_Handler): -#define LABEL(x) x -/* XXX ?? -#define MTSAVE(x) mtspr sprg0, x -#define MFSAVE(x) mfspr x, sprg0 -*/ -#define MTPC(x) mtspr srr0, x -#define MFPC(x) mfspr x, srr0 -#define MTMSR(x) mtspr srr1, x -#define MFMSR(x) mfspr x, srr1 - - #include "irq_stub.s" - rfi - -#if (PPC_HAS_RFCI == 1) -/* void __ISR_HandlerC() - * - * This routine provides the RTEMS interrupt management. - * For critical interrupts - * - */ - ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER) - PUBLIC_PROC (_ISR_HandlerC) -PROC (_ISR_HandlerC): -#undef LABEL -#undef MTSAVE -#undef MFSAVE -#undef MTPC -#undef MFPC -#undef MTMSR -#undef MFMSR -#define LABEL(x) x##_C -/* XXX?? -#define MTSAVE(x) mtspr sprg1, x -#define MFSAVE(x) mfspr x, sprg1 -*/ -#define MTPC(x) mtspr srr2, x -#define MFPC(x) mfspr x, srr2 -#define MTMSR(x) mtspr srr3, x -#define MFMSR(x) mfspr x, srr3 - #include "irq_stub.s" - rfci -#endif - -/* PowerOpen descriptors for indirect function calls. - */ - -#if (PPC_ABI == PPC_ABI_POWEROPEN) - DESCRIPTOR (_CPU_Context_save_fp) - DESCRIPTOR (_CPU_Context_restore_fp) - DESCRIPTOR (_CPU_Context_switch) - DESCRIPTOR (_CPU_Context_restore) - DESCRIPTOR (_ISR_Handler) -#if (PPC_HAS_RFCI == 1) - DESCRIPTOR (_ISR_HandlerC) -#endif -#endif diff --git a/c/src/exec/score/cpu/powerpc/irq_stub.S b/c/src/exec/score/cpu/powerpc/irq_stub.S new file mode 100644 index 0000000000..76c8927305 --- /dev/null +++ b/c/src/exec/score/cpu/powerpc/irq_stub.S @@ -0,0 +1,268 @@ +/* + * This file contains the interrupt handler assembly code for the PowerPC + * implementation of RTEMS. It is #included from cpu_asm.s. + * + * Author: Andrew Bray + * + * COPYRIGHT (c) 1995 by i-cubed ltd. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of i-cubed limited not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * i-cubed limited makes no representations about the suitability + * of this software for any purpose. + * + * $Id$ + */ + +/* void __ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * The vector number is in r0. R0 has already been stacked. + * + */ + PUBLIC_VAR (_CPU_IRQ_info ) + + /* Finish off the interrupt frame */ + stw r2, IP_2(r1) + stw r3, IP_3(r1) + stw r4, IP_4(r1) + stw r5, IP_5(r1) + stw r6, IP_6(r1) + stw r7, IP_7(r1) + stw r8, IP_8(r1) + stw r9, IP_9(r1) + stw r10, IP_10(r1) + stw r11, IP_11(r1) + stw r12, IP_12(r1) + stw r13, IP_13(r1) + stmw r28, IP_28(r1) + mfcr r5 + mfctr r6 + mfxer r7 + mflr r8 + MFPC (r9) + MFMSR (r10) + /* Establish addressing */ +#if (PPC_USE_SPRG) + mfspr r11, sprg3 +#else + lis r11,_CPU_IRQ_info@ha + addi r11,r11,_CPU_IRQ_info@l +#endif + dcbt r0, r11 + stw r5, IP_CR(r1) + stw r6, IP_CTR(r1) + stw r7, IP_XER(r1) + stw r8, IP_LR(r1) + stw r9, IP_PC(r1) + stw r10, IP_MSR(r1) + + lwz r30, Vector_table(r11) + slwi r4,r0,2 + lwz r28, Nest_level(r11) + add r4, r4, r30 + + lwz r30, 0(r28) + mr r3, r0 + lwz r31, Stack(r11) + /* + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * if ( _ISR_Nest_level == 0 ) + * switch to software interrupt stack + * #endif + */ + /* Switch stacks, here we must prevent ALL interrupts */ +#if (PPC_USE_SPRG) + mfmsr r5 + mfspr r6, sprg2 +#else + lwz r6,msr_initial(r11) + lis r5,~PPC_MSR_DISABLE_MASK@ha + ori r5,r5,~PPC_MSR_DISABLE_MASK@l + and r6,r6,r5 + mfmsr r5 +#endif + mtmsr r6 + cmpwi r30, 0 + lwz r29, Disable_level(r11) + subf r31,r1,r31 + bne LABEL (nested) + stwux r1,r1,r31 +LABEL (nested): + /* + * _ISR_Nest_level++; + */ + lwz r31, 0(r29) + addi r30,r30,1 + stw r30,0(r28) + /* From here on out, interrupts can be re-enabled. RTEMS + * convention says not. + */ + lwz r4,0(r4) + /* + * _Thread_Dispatch_disable_level++; + */ + addi r31,r31,1 + stw r31, 0(r29) +/* SCE 980217 + * + * We need address translation ON when we call our ISR routine + + mtmsr r5 + + */ + + /* + * (*_ISR_Vector_table[ vector ])( vector ); + */ +#if (PPC_ABI == PPC_ABI_POWEROPEN) + lwz r6,0(r4) + lwz r2,4(r4) + mtlr r6 + lwz r11,8(r4) +#endif +#if (PPC_ABI == PPC_ABI_GCC27) + lwz r2, Default_r2(r11) + mtlr r4 + #lwz r2, 0(r2) +#endif +#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI) + mtlr r4 + lwz r2, Default_r2(r11) + lwz r13, Default_r13(r11) + #lwz r2, 0(r2) + #lwz r13, 0(r13) +#endif + mr r4,r1 + blrl + /* NOP marker for debuggers */ + or r6,r6,r6 + + /* We must re-disable the interrupts */ +#if (PPC_USE_SPRG) + mfspr r11, sprg3 + mfspr r0, sprg2 +#else + lis r11,_CPU_IRQ_info@ha + addi r11,r11,_CPU_IRQ_info@l + lwz r0,msr_initial(r11) + lis r30,~PPC_MSR_DISABLE_MASK@ha + ori r30,r30,~PPC_MSR_DISABLE_MASK@l + and r0,r0,r30 +#endif + mtmsr r0 + lwz r30, 0(r28) + lwz r31, 0(r29) + + /* + * if (--Thread_Dispatch_disable,--_ISR_Nest_level) + * goto easy_exit; + */ + addi r30, r30, -1 + cmpwi r30, 0 + addi r31, r31, -1 + stw r30, 0(r28) + stw r31, 0(r29) + bne LABEL (easy_exit) + cmpwi r31, 0 + + lwz r30, Switch_necessary(r11) + + /* + * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) + * restore stack + * #endif + */ + lwz r1,0(r1) + bne LABEL (easy_exit) + lwz r30, 0(r30) + lwz r31, Signal(r11) + + /* + * if ( _Context_Switch_necessary ) + * goto switch + */ + cmpwi r30, 0 + lwz r28, 0(r31) + li r6,0 + bne LABEL (switch) + /* + * if ( !_ISR_Signals_to_thread_executing ) + * goto easy_exit + * _ISR_Signals_to_thread_executing = 0; + */ + cmpwi r28, 0 + beq LABEL (easy_exit) + + /* + * switch: + * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch + */ +LABEL (switch): + stw r6, 0(r31) + /* Re-enable interrupts */ + lwz r0, IP_MSR(r1) +#if (PPC_ABI == PPC_ABI_POWEROPEN) + lwz r2, Dispatch_r2(r11) +#else + /* R2 and R13 still hold their values from the last call */ +#endif + mtmsr r0 + bl SYM (_Thread_Dispatch) + /* NOP marker for debuggers */ + or r6,r6,r6 + /* + * prepare to get out of interrupt + */ + /* Re-disable IRQs */ +#if (PPC_USE_SPRG) + mfspr r0, sprg2 +#else + lis r11,_CPU_IRQ_info@ha + addi r11,r11,_CPU_IRQ_info@l + lwz r0,msr_initial(r11) + lis r5,~PPC_MSR_DISABLE_MASK@ha + ori r5,r5,~PPC_MSR_DISABLE_MASK@l + and r0,r0,r5 +#endif + mtmsr r0 + + /* + * easy_exit: + * prepare to get out of interrupt + * return from interrupt + */ +LABEL (easy_exit): + lwz r5, IP_CR(r1) + lwz r6, IP_CTR(r1) + lwz r7, IP_XER(r1) + lwz r8, IP_LR(r1) + lwz r9, IP_PC(r1) + lwz r10, IP_MSR(r1) + mtcrf 255,r5 + mtctr r6 + mtxer r7 + mtlr r8 + MTPC (r9) + MTMSR (r10) + lwz r0, IP_0(r1) + lwz r2, IP_2(r1) + lwz r3, IP_3(r1) + lwz r4, IP_4(r1) + lwz r5, IP_5(r1) + lwz r6, IP_6(r1) + lwz r7, IP_7(r1) + lwz r8, IP_8(r1) + lwz r9, IP_9(r1) + lwz r10, IP_10(r1) + lwz r11, IP_11(r1) + lwz r12, IP_12(r1) + lwz r13, IP_13(r1) + lmw r28, IP_28(r1) + lwz r1, 0(r1) diff --git a/c/src/exec/score/cpu/powerpc/irq_stub.s b/c/src/exec/score/cpu/powerpc/irq_stub.s deleted file mode 100644 index 76c8927305..0000000000 --- a/c/src/exec/score/cpu/powerpc/irq_stub.s +++ /dev/null @@ -1,268 +0,0 @@ -/* - * This file contains the interrupt handler assembly code for the PowerPC - * implementation of RTEMS. It is #included from cpu_asm.s. - * - * Author: Andrew Bray - * - * COPYRIGHT (c) 1995 by i-cubed ltd. - * - * To anyone who acknowledges that this file is provided "AS IS" - * without any express or implied warranty: - * permission to use, copy, modify, and distribute this file - * for any purpose is hereby granted without fee, provided that - * the above copyright notice and this notice appears in all - * copies, and that the name of i-cubed limited not be used in - * advertising or publicity pertaining to distribution of the - * software without specific, written prior permission. - * i-cubed limited makes no representations about the suitability - * of this software for any purpose. - * - * $Id$ - */ - -/* void __ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * The vector number is in r0. R0 has already been stacked. - * - */ - PUBLIC_VAR (_CPU_IRQ_info ) - - /* Finish off the interrupt frame */ - stw r2, IP_2(r1) - stw r3, IP_3(r1) - stw r4, IP_4(r1) - stw r5, IP_5(r1) - stw r6, IP_6(r1) - stw r7, IP_7(r1) - stw r8, IP_8(r1) - stw r9, IP_9(r1) - stw r10, IP_10(r1) - stw r11, IP_11(r1) - stw r12, IP_12(r1) - stw r13, IP_13(r1) - stmw r28, IP_28(r1) - mfcr r5 - mfctr r6 - mfxer r7 - mflr r8 - MFPC (r9) - MFMSR (r10) - /* Establish addressing */ -#if (PPC_USE_SPRG) - mfspr r11, sprg3 -#else - lis r11,_CPU_IRQ_info@ha - addi r11,r11,_CPU_IRQ_info@l -#endif - dcbt r0, r11 - stw r5, IP_CR(r1) - stw r6, IP_CTR(r1) - stw r7, IP_XER(r1) - stw r8, IP_LR(r1) - stw r9, IP_PC(r1) - stw r10, IP_MSR(r1) - - lwz r30, Vector_table(r11) - slwi r4,r0,2 - lwz r28, Nest_level(r11) - add r4, r4, r30 - - lwz r30, 0(r28) - mr r3, r0 - lwz r31, Stack(r11) - /* - * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) - * if ( _ISR_Nest_level == 0 ) - * switch to software interrupt stack - * #endif - */ - /* Switch stacks, here we must prevent ALL interrupts */ -#if (PPC_USE_SPRG) - mfmsr r5 - mfspr r6, sprg2 -#else - lwz r6,msr_initial(r11) - lis r5,~PPC_MSR_DISABLE_MASK@ha - ori r5,r5,~PPC_MSR_DISABLE_MASK@l - and r6,r6,r5 - mfmsr r5 -#endif - mtmsr r6 - cmpwi r30, 0 - lwz r29, Disable_level(r11) - subf r31,r1,r31 - bne LABEL (nested) - stwux r1,r1,r31 -LABEL (nested): - /* - * _ISR_Nest_level++; - */ - lwz r31, 0(r29) - addi r30,r30,1 - stw r30,0(r28) - /* From here on out, interrupts can be re-enabled. RTEMS - * convention says not. - */ - lwz r4,0(r4) - /* - * _Thread_Dispatch_disable_level++; - */ - addi r31,r31,1 - stw r31, 0(r29) -/* SCE 980217 - * - * We need address translation ON when we call our ISR routine - - mtmsr r5 - - */ - - /* - * (*_ISR_Vector_table[ vector ])( vector ); - */ -#if (PPC_ABI == PPC_ABI_POWEROPEN) - lwz r6,0(r4) - lwz r2,4(r4) - mtlr r6 - lwz r11,8(r4) -#endif -#if (PPC_ABI == PPC_ABI_GCC27) - lwz r2, Default_r2(r11) - mtlr r4 - #lwz r2, 0(r2) -#endif -#if (PPC_ABI == PPC_ABI_SVR4 || PPC_ABI == PPC_ABI_EABI) - mtlr r4 - lwz r2, Default_r2(r11) - lwz r13, Default_r13(r11) - #lwz r2, 0(r2) - #lwz r13, 0(r13) -#endif - mr r4,r1 - blrl - /* NOP marker for debuggers */ - or r6,r6,r6 - - /* We must re-disable the interrupts */ -#if (PPC_USE_SPRG) - mfspr r11, sprg3 - mfspr r0, sprg2 -#else - lis r11,_CPU_IRQ_info@ha - addi r11,r11,_CPU_IRQ_info@l - lwz r0,msr_initial(r11) - lis r30,~PPC_MSR_DISABLE_MASK@ha - ori r30,r30,~PPC_MSR_DISABLE_MASK@l - and r0,r0,r30 -#endif - mtmsr r0 - lwz r30, 0(r28) - lwz r31, 0(r29) - - /* - * if (--Thread_Dispatch_disable,--_ISR_Nest_level) - * goto easy_exit; - */ - addi r30, r30, -1 - cmpwi r30, 0 - addi r31, r31, -1 - stw r30, 0(r28) - stw r31, 0(r29) - bne LABEL (easy_exit) - cmpwi r31, 0 - - lwz r30, Switch_necessary(r11) - - /* - * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) - * restore stack - * #endif - */ - lwz r1,0(r1) - bne LABEL (easy_exit) - lwz r30, 0(r30) - lwz r31, Signal(r11) - - /* - * if ( _Context_Switch_necessary ) - * goto switch - */ - cmpwi r30, 0 - lwz r28, 0(r31) - li r6,0 - bne LABEL (switch) - /* - * if ( !_ISR_Signals_to_thread_executing ) - * goto easy_exit - * _ISR_Signals_to_thread_executing = 0; - */ - cmpwi r28, 0 - beq LABEL (easy_exit) - - /* - * switch: - * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch - */ -LABEL (switch): - stw r6, 0(r31) - /* Re-enable interrupts */ - lwz r0, IP_MSR(r1) -#if (PPC_ABI == PPC_ABI_POWEROPEN) - lwz r2, Dispatch_r2(r11) -#else - /* R2 and R13 still hold their values from the last call */ -#endif - mtmsr r0 - bl SYM (_Thread_Dispatch) - /* NOP marker for debuggers */ - or r6,r6,r6 - /* - * prepare to get out of interrupt - */ - /* Re-disable IRQs */ -#if (PPC_USE_SPRG) - mfspr r0, sprg2 -#else - lis r11,_CPU_IRQ_info@ha - addi r11,r11,_CPU_IRQ_info@l - lwz r0,msr_initial(r11) - lis r5,~PPC_MSR_DISABLE_MASK@ha - ori r5,r5,~PPC_MSR_DISABLE_MASK@l - and r0,r0,r5 -#endif - mtmsr r0 - - /* - * easy_exit: - * prepare to get out of interrupt - * return from interrupt - */ -LABEL (easy_exit): - lwz r5, IP_CR(r1) - lwz r6, IP_CTR(r1) - lwz r7, IP_XER(r1) - lwz r8, IP_LR(r1) - lwz r9, IP_PC(r1) - lwz r10, IP_MSR(r1) - mtcrf 255,r5 - mtctr r6 - mtxer r7 - mtlr r8 - MTPC (r9) - MTMSR (r10) - lwz r0, IP_0(r1) - lwz r2, IP_2(r1) - lwz r3, IP_3(r1) - lwz r4, IP_4(r1) - lwz r5, IP_5(r1) - lwz r6, IP_6(r1) - lwz r7, IP_7(r1) - lwz r8, IP_8(r1) - lwz r9, IP_9(r1) - lwz r10, IP_10(r1) - lwz r11, IP_11(r1) - lwz r12, IP_12(r1) - lwz r13, IP_13(r1) - lmw r28, IP_28(r1) - lwz r1, 0(r1) diff --git a/c/src/exec/score/cpu/powerpc/rtems.S b/c/src/exec/score/cpu/powerpc/rtems.S new file mode 100644 index 0000000000..b653152411 --- /dev/null +++ b/c/src/exec/score/cpu/powerpc/rtems.S @@ -0,0 +1,132 @@ +/* rtems.s + * + * This file contains the single entry point code for + * the PowerPC implementation of RTEMS. + * + * Author: Andrew Bray + * + * COPYRIGHT (c) 1995 by i-cubed ltd. + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of i-cubed limited not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * i-cubed limited makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/cpu/no_cpu/rtems.c: + * + * COPYRIGHT (c) 1989-1997. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may in + * the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * $Id$ + */ + +#include + + BEGIN_CODE +/* + * RTEMS + * + * This routine jumps to the directive indicated in r11. + * This routine is used when RTEMS is linked by itself and placed + * in ROM. This routine is the first address in the ROM space for + * RTEMS. The user "calls" this address with the directive arguments + * in the normal place. + * This routine then jumps indirectly to the correct directive + * preserving the arguments. The directive should not realize + * it has been "wrapped" in this way. The table "_Entry_points" + * is used to look up the directive. + */ + + ALIGN (4, 2) + PUBLIC_PROC (RTEMS) +PROC (RTEMS): +#if (PPC_ABI == PPC_ABI_POWEROPEN) + mflr r0 + stw r0, 8(r1) + stwu r1, -64(r1) + + /* Establish addressing */ + bl base +base: + mflr r12 + addi r12, r12, tabaddr - base + + lwz r12, Entry_points-abase(r12) + slwi r11, r11, 2 + lwzx r12, r12, r11 + + stw r2, 56(r1) + lwz r0, 0(r12) + mtlr r0 + lwz r2, 4(r12) + lwz r11, 8(r12) + blrl + lwz r2, 56(r1) + addi r1, r1, 64 + lwz r0, 8(r1) + mtlr r0 +#else + mflr r0 + stw r0, 4(r1) + stwu r1, -16(r1) + + /* Establish addressing */ + bl base +base: + mflr r12 + addi r12, r12, tabaddr - base + + lwz r12, Entry_points-abase(r12) + slwi r11, r11, 2 + lwzx r11, r12, r11 + + stw r2, 8(r1) +#if (PPC_ABI != PPC_ABI_GCC27) + stw r13, 12(r1) +#endif + mtlr r11 + lwz r11, irqinfo-abase(r12) + lwz r2, 0(r11) +#if (PPC_ABI != PPC_ABI_GCC27) + lwz r13, 4(r11) +#endif + blrl + lwz r2, 8(r1) +#if (PPC_ABI != PPC_ABI_GCC27) + lwz r13, 12(r1) +#endif + addi r1, r1, 16 + lwz r0, 4(r1) + mtlr r0 +#endif + blr + + + /* Addressability stuff */ +tabaddr: +abase: + EXTERN_VAR (_Entry_points) +Entry_points: + EXT_SYM_REF (_Entry_points) +#if (PPC_ABI != PPC_ABI_POWEROPEN) + EXTERN_VAR (_CPU_IRQ_info) +irqinfo: + EXT_SYM_REF (_CPU_IRQ_info) +#endif + +#if (PPC_ABI == PPC_ABI_POWEROPEN) + DESCRIPTOR (RTEMS) +#endif + + diff --git a/c/src/exec/score/cpu/powerpc/rtems.s b/c/src/exec/score/cpu/powerpc/rtems.s deleted file mode 100644 index b653152411..0000000000 --- a/c/src/exec/score/cpu/powerpc/rtems.s +++ /dev/null @@ -1,132 +0,0 @@ -/* rtems.s - * - * This file contains the single entry point code for - * the PowerPC implementation of RTEMS. - * - * Author: Andrew Bray - * - * COPYRIGHT (c) 1995 by i-cubed ltd. - * - * To anyone who acknowledges that this file is provided "AS IS" - * without any express or implied warranty: - * permission to use, copy, modify, and distribute this file - * for any purpose is hereby granted without fee, provided that - * the above copyright notice and this notice appears in all - * copies, and that the name of i-cubed limited not be used in - * advertising or publicity pertaining to distribution of the - * software without specific, written prior permission. - * i-cubed limited makes no representations about the suitability - * of this software for any purpose. - * - * Derived from c/src/exec/cpu/no_cpu/rtems.c: - * - * COPYRIGHT (c) 1989-1997. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may in - * the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * $Id$ - */ - -#include - - BEGIN_CODE -/* - * RTEMS - * - * This routine jumps to the directive indicated in r11. - * This routine is used when RTEMS is linked by itself and placed - * in ROM. This routine is the first address in the ROM space for - * RTEMS. The user "calls" this address with the directive arguments - * in the normal place. - * This routine then jumps indirectly to the correct directive - * preserving the arguments. The directive should not realize - * it has been "wrapped" in this way. The table "_Entry_points" - * is used to look up the directive. - */ - - ALIGN (4, 2) - PUBLIC_PROC (RTEMS) -PROC (RTEMS): -#if (PPC_ABI == PPC_ABI_POWEROPEN) - mflr r0 - stw r0, 8(r1) - stwu r1, -64(r1) - - /* Establish addressing */ - bl base -base: - mflr r12 - addi r12, r12, tabaddr - base - - lwz r12, Entry_points-abase(r12) - slwi r11, r11, 2 - lwzx r12, r12, r11 - - stw r2, 56(r1) - lwz r0, 0(r12) - mtlr r0 - lwz r2, 4(r12) - lwz r11, 8(r12) - blrl - lwz r2, 56(r1) - addi r1, r1, 64 - lwz r0, 8(r1) - mtlr r0 -#else - mflr r0 - stw r0, 4(r1) - stwu r1, -16(r1) - - /* Establish addressing */ - bl base -base: - mflr r12 - addi r12, r12, tabaddr - base - - lwz r12, Entry_points-abase(r12) - slwi r11, r11, 2 - lwzx r11, r12, r11 - - stw r2, 8(r1) -#if (PPC_ABI != PPC_ABI_GCC27) - stw r13, 12(r1) -#endif - mtlr r11 - lwz r11, irqinfo-abase(r12) - lwz r2, 0(r11) -#if (PPC_ABI != PPC_ABI_GCC27) - lwz r13, 4(r11) -#endif - blrl - lwz r2, 8(r1) -#if (PPC_ABI != PPC_ABI_GCC27) - lwz r13, 12(r1) -#endif - addi r1, r1, 16 - lwz r0, 4(r1) - mtlr r0 -#endif - blr - - - /* Addressability stuff */ -tabaddr: -abase: - EXTERN_VAR (_Entry_points) -Entry_points: - EXT_SYM_REF (_Entry_points) -#if (PPC_ABI != PPC_ABI_POWEROPEN) - EXTERN_VAR (_CPU_IRQ_info) -irqinfo: - EXT_SYM_REF (_CPU_IRQ_info) -#endif - -#if (PPC_ABI == PPC_ABI_POWEROPEN) - DESCRIPTOR (RTEMS) -#endif - - diff --git a/c/src/exec/score/cpu/sh/Makefile.in b/c/src/exec/score/cpu/sh/Makefile.in index 54e1ed15ec..079f3c488b 100644 --- a/c/src/exec/score/cpu/sh/Makefile.in +++ b/c/src/exec/score/cpu/sh/Makefile.in @@ -27,11 +27,11 @@ H_FILES= \ EXTERNAL_H_FILES = \ $(srcdir)/asm.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S # Normally cpu_asm and rtems are assembly files S_PIECES= -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/sparc/Makefile.in b/c/src/exec/score/cpu/sparc/Makefile.in index 565128f2c3..fd888a472e 100644 --- a/c/src/exec/score/cpu/sparc/Makefile.in +++ b/c/src/exec/score/cpu/sparc/Makefile.in @@ -20,10 +20,10 @@ H_FILES=$(srcdir)/cpu.h $(srcdir)/sparc.h $(srcdir)/sparctypes.h # H_FILES that get installed externally EXTERNAL_H_FILES = $(srcdir)/asm.h $(srcdir)/erc32.h -# Assembly source names, if any, go here -- minus the .s +# Assembly source names, if any, go here -- minus the .S S_PIECES=cpu_asm rtems -S_FILES=$(S_PIECES:%=%.s) -S_O_FILES=$(S_FILES:%.s=${ARCH}/%.o) +S_FILES=$(S_PIECES:%=%.S) +S_O_FILES=$(S_FILES:%.S=${ARCH}/%.o) SRCS=$(C_FILES) $(CC_FILES) $(H_FILES) $(S_FILES) $(EXTERNAL_H_FILES) OBJS=$(C_O_FILES) $(CC_O_FILES) $(S_O_FILES) diff --git a/c/src/exec/score/cpu/sparc/cpu_asm.S b/c/src/exec/score/cpu/sparc/cpu_asm.S new file mode 100644 index 0000000000..39962eedeb --- /dev/null +++ b/c/src/exec/score/cpu/sparc/cpu_asm.S @@ -0,0 +1,726 @@ +/* cpu_asm.s + * + * This file contains the basic algorithms for all assembly code used + * in an specific CPU port of RTEMS. These algorithms must be implemented + * in assembly language. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * Ported to ERC32 implementation of the SPARC by On-Line Applications + * Research Corporation (OAR) under contract to the European Space + * Agency (ESA). + * + * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. + * European Space Agency. + * + * $Id$ + */ + +#include + +#if (SPARC_HAS_FPU == 1) + +/* + * void _CPU_Context_save_fp( + * void **fp_context_ptr + * ) + * + * This routine is responsible for saving the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * NOTE: See the README in this directory for information on the + * management of the "EF" bit in the PSR. + */ + + .align 4 + PUBLIC(_CPU_Context_save_fp) +SYM(_CPU_Context_save_fp): + save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp + + /* + * The following enables the floating point unit. + */ + + mov %psr, %l0 + sethi %hi(SPARC_PSR_EF_MASK), %l1 + or %l1, %lo(SPARC_PSR_EF_MASK), %l1 + or %l0, %l1, %l0 + mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** + + ld [%i0], %l0 + std %f0, [%l0 + FO_F1_OFFSET] + std %f2, [%l0 + F2_F3_OFFSET] + std %f4, [%l0 + F4_F5_OFFSET] + std %f6, [%l0 + F6_F7_OFFSET] + std %f8, [%l0 + F8_F9_OFFSET] + std %f10, [%l0 + F1O_F11_OFFSET] + std %f12, [%l0 + F12_F13_OFFSET] + std %f14, [%l0 + F14_F15_OFFSET] + std %f16, [%l0 + F16_F17_OFFSET] + std %f18, [%l0 + F18_F19_OFFSET] + std %f20, [%l0 + F2O_F21_OFFSET] + std %f22, [%l0 + F22_F23_OFFSET] + std %f24, [%l0 + F24_F25_OFFSET] + std %f26, [%l0 + F26_F27_OFFSET] + std %f28, [%l0 + F28_F29_OFFSET] + std %f30, [%l0 + F3O_F31_OFFSET] + st %fsr, [%l0 + FSR_OFFSET] + ret + restore + +/* + * void _CPU_Context_restore_fp( + * void **fp_context_ptr + * ) + * + * This routine is responsible for restoring the FP context + * at *fp_context_ptr. If the point to load the FP context + * from is changed then the pointer is modified by this routine. + * + * NOTE: See the README in this directory for information on the + * management of the "EF" bit in the PSR. + */ + + .align 4 + PUBLIC(_CPU_Context_restore_fp) +SYM(_CPU_Context_restore_fp): + save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp + + /* + * The following enables the floating point unit. + */ + + mov %psr, %l0 + sethi %hi(SPARC_PSR_EF_MASK), %l1 + or %l1, %lo(SPARC_PSR_EF_MASK), %l1 + or %l0, %l1, %l0 + mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** + + ld [%i0], %l0 + ldd [%l0 + FO_F1_OFFSET], %f0 + ldd [%l0 + F2_F3_OFFSET], %f2 + ldd [%l0 + F4_F5_OFFSET], %f4 + ldd [%l0 + F6_F7_OFFSET], %f6 + ldd [%l0 + F8_F9_OFFSET], %f8 + ldd [%l0 + F1O_F11_OFFSET], %f10 + ldd [%l0 + F12_F13_OFFSET], %f12 + ldd [%l0 + F14_F15_OFFSET], %f14 + ldd [%l0 + F16_F17_OFFSET], %f16 + ldd [%l0 + F18_F19_OFFSET], %f18 + ldd [%l0 + F2O_F21_OFFSET], %f20 + ldd [%l0 + F22_F23_OFFSET], %f22 + ldd [%l0 + F24_F25_OFFSET], %f24 + ldd [%l0 + F26_F27_OFFSET], %f26 + ldd [%l0 + F28_F29_OFFSET], %f28 + ldd [%l0 + F3O_F31_OFFSET], %f30 + ld [%l0 + FSR_OFFSET], %fsr + ret + restore + +#endif /* SPARC_HAS_FPU */ + +/* + * void _CPU_Context_switch( + * Context_Control *run, + * Context_Control *heir + * ) + * + * This routine performs a normal non-FP context switch. + */ + + .align 4 + PUBLIC(_CPU_Context_switch) +SYM(_CPU_Context_switch): + ! skip g0 + st %g1, [%o0 + G1_OFFSET] ! save the global registers + std %g2, [%o0 + G2_OFFSET] + std %g4, [%o0 + G4_OFFSET] + std %g6, [%o0 + G6_OFFSET] + + std %l0, [%o0 + L0_OFFSET] ! save the local registers + std %l2, [%o0 + L2_OFFSET] + std %l4, [%o0 + L4_OFFSET] + std %l6, [%o0 + L6_OFFSET] + + std %i0, [%o0 + I0_OFFSET] ! save the input registers + std %i2, [%o0 + I2_OFFSET] + std %i4, [%o0 + I4_OFFSET] + std %i6, [%o0 + I6_FP_OFFSET] + + std %o0, [%o0 + O0_OFFSET] ! save the output registers + std %o2, [%o0 + O2_OFFSET] + std %o4, [%o0 + O4_OFFSET] + std %o6, [%o0 + O6_SP_OFFSET] + + rd %psr, %o2 + st %o2, [%o0 + PSR_OFFSET] ! save status register + + /* + * This is entered from _CPU_Context_restore with: + * o1 = context to restore + * o2 = psr + */ + + PUBLIC(_CPU_Context_restore_heir) +SYM(_CPU_Context_restore_heir): + /* + * Flush all windows with valid contents except the current one. + * In examining the set register windows, one may logically divide + * the windows into sets (some of which may be empty) based on their + * current status: + * + * + current (i.e. in use), + * + used (i.e. a restore would not trap) + * + invalid (i.e. 1 in corresponding bit in WIM) + * + unused + * + * Either the used or unused set of windows may be empty. + * + * NOTE: We assume only one bit is set in the WIM at a time. + * + * Given a CWP of 5 and a WIM of 0x1, the registers are divided + * into sets as follows: + * + * + 0 - invalid + * + 1-4 - unused + * + 5 - current + * + 6-7 - used + * + * In this case, we only would save the used windows -- 6 and 7. + * + * Traps are disabled for the same logical period as in a + * flush all windows trap handler. + * + * Register Usage while saving the windows: + * g1 = current PSR + * g2 = current wim + * g3 = CWP + * g4 = wim scratch + * g5 = scratch + */ + + ld [%o1 + PSR_OFFSET], %g1 ! g1 = saved psr + + and %o2, SPARC_PSR_CWP_MASK, %g3 ! g3 = CWP + ! g1 = psr w/o cwp + andn %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1 + or %g1, %g3, %g1 ! g1 = heirs psr + mov %g1, %psr ! restore status register and + ! **** DISABLE TRAPS **** + mov %wim, %g2 ! g2 = wim + mov 1, %g4 + sll %g4, %g3, %g4 ! g4 = WIM mask for CW invalid + +save_frame_loop: + sll %g4, 1, %g5 ! rotate the "wim" left 1 + srl %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4 + or %g4, %g5, %g4 ! g4 = wim if we do one restore + + /* + * If a restore would not underflow, then continue. + */ + + andcc %g4, %g2, %g0 ! Any windows to flush? + bnz done_flushing ! No, then continue + nop + + restore ! back one window + + /* + * Now save the window just as if we overflowed to it. + */ + + std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] + std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] + std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] + std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] + + std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] + std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] + std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] + std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] + + ba save_frame_loop + nop + +done_flushing: + + add %g3, 1, %g3 ! calculate desired WIM + and %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3 + mov 1, %g4 + sll %g4, %g3, %g4 ! g4 = new WIM + mov %g4, %wim + + or %g1, SPARC_PSR_ET_MASK, %g1 + mov %g1, %psr ! **** ENABLE TRAPS **** + ! and restore CWP + nop + nop + nop + + ! skip g0 + ld [%o1 + G1_OFFSET], %g1 ! restore the global registers + ldd [%o1 + G2_OFFSET], %g2 + ldd [%o1 + G4_OFFSET], %g4 + ldd [%o1 + G6_OFFSET], %g6 + + ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers + ldd [%o1 + L2_OFFSET], %l2 + ldd [%o1 + L4_OFFSET], %l4 + ldd [%o1 + L6_OFFSET], %l6 + + ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers + ldd [%o1 + I2_OFFSET], %i2 + ldd [%o1 + I4_OFFSET], %i4 + ldd [%o1 + I6_FP_OFFSET], %i6 + + ldd [%o1 + O2_OFFSET], %o2 ! restore the output registers + ldd [%o1 + O4_OFFSET], %o4 + ldd [%o1 + O6_SP_OFFSET], %o6 + ! do o0/o1 last to avoid destroying heir context pointer + ldd [%o1 + O0_OFFSET], %o0 ! overwrite heir pointer + + jmp %o7 + 8 ! return + nop ! delay slot + +/* + * void _CPU_Context_restore( + * Context_Control *new_context + * ) + * + * This routine is generally used only to perform restart self. + * + * NOTE: It is unnecessary to reload some registers. + */ + + .align 4 + PUBLIC(_CPU_Context_restore) +SYM(_CPU_Context_restore): + save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp + rd %psr, %o2 + ba SYM(_CPU_Context_restore_heir) + mov %i0, %o1 ! in the delay slot + +/* + * void _ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + * We enter this handler from the 4 instructions in the trap table with + * the following registers assumed to be set as shown: + * + * l0 = PSR + * l1 = PC + * l2 = nPC + * l3 = trap type + * + * NOTE: By an executive defined convention, trap type is between 0 and 255 if + * it is an asynchonous trap and 256 and 511 if it is synchronous. + */ + + .align 4 + PUBLIC(_ISR_Handler) +SYM(_ISR_Handler): + /* + * Fix the return address for synchronous traps. + */ + + andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 + ! Is this a synchronous trap? + be,a win_ovflow ! No, then skip the adjustment + nop ! DELAY + mov %l2, %l1 ! do not return to the instruction + add %l2, 4, %l2 ! indicated + +win_ovflow: + /* + * Save the globals this block uses. + * + * These registers are not restored from the locals. Their contents + * are saved directly from the locals into the ISF below. + */ + + mov %g4, %l4 ! save the globals this block uses + mov %g5, %l5 + + /* + * When at a "window overflow" trap, (wim == (1 << cwp)). + * If we get here like that, then process a window overflow. + */ + + rd %wim, %g4 + srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP + ! are LS 5 bits ; how convenient :) + cmp %g5, 1 ! Is this an invalid window? + bne dont_do_the_window ! No, then skip all this stuff + ! we are using the delay slot + + /* + * The following is same as a 1 position right rotate of WIM + */ + + srl %g4, 1, %g5 ! g5 = WIM >> 1 + sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4 + ! g4 = WIM << (Number Windows - 1) + or %g4, %g5, %g4 ! g4 = (WIM >> 1) | + ! (WIM << (Number Windows - 1)) + + /* + * At this point: + * + * g4 = the new WIM + * g5 is free + */ + + /* + * Since we are tinkering with the register windows, we need to + * make sure that all the required information is in global registers. + */ + + save ! Save into the window + wr %g4, 0, %wim ! WIM = new WIM + nop ! delay slots + nop + nop + + /* + * Now save the window just as if we overflowed to it. + */ + + std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] + std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] + std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] + std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] + + std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] + std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] + std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] + std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] + + restore + nop + +dont_do_the_window: + /* + * Global registers %g4 and %g5 are saved directly from %l4 and + * %l5 directly into the ISF below. + */ + +save_isf: + + /* + * Save the state of the interrupted task -- especially the global + * registers -- in the Interrupt Stack Frame. Note that the ISF + * includes a regular minimum stack frame which will be used if + * needed by register window overflow and underflow handlers. + * + * REGISTERS SAME AS AT _ISR_Handler + */ + + sub %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp + ! make space for ISF + + std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC + st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC + st %g1, [%sp + ISF_G1_OFFSET] ! save g1 + std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3 + std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above + std %g6, [%sp + ISF_G6_OFFSET] ! save g6, g7 + + std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1 + std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3 + std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5 + std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7 + + rd %y, %g1 + st %g1, [%sp + ISF_Y_OFFSET] ! save y + + mov %sp, %o1 ! 2nd arg to ISR Handler + + /* + * Increment ISR nest level and Thread dispatch disable level. + * + * Register usage for this section: + * + * l4 = _Thread_Dispatch_disable_level pointer + * l5 = _ISR_Nest_level pointer + * l6 = _Thread_Dispatch_disable_level value + * l7 = _ISR_Nest_level value + * + * NOTE: It is assumed that l4 - l7 will be preserved until the ISR + * nest and thread dispatch disable levels are unnested. + */ + + sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4 + ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6 + sethi %hi(SYM(_ISR_Nest_level)), %l5 + ld [%l5 + %lo(SYM(_ISR_Nest_level))], %l7 + + add %l6, 1, %l6 + st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] + + add %l7, 1, %l7 + st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] + + /* + * If ISR nest level was zero (now 1), then switch stack. + */ + + mov %sp, %fp + subcc %l7, 1, %l7 ! outermost interrupt handler? + bnz dont_switch_stacks ! No, then do not switch stacks + + sethi %hi(SYM(_CPU_Interrupt_stack_high)), %g4 + ld [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp + +dont_switch_stacks: + /* + * Make sure we have a place on the stack for the window overflow + * trap handler to write into. At this point it is safe to + * enable traps again. + */ + + sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp + + /* + * Check if we have an external interrupt (trap 0x11 - 0x1f). If so, + * set the PIL in the %psr to mask off interrupts with lower priority. + * The original %psr in %l0 is not modified since it will be restored + * when the interrupt handler returns. + */ + + mov %l0, %g5 + subcc %l3, 0x11, %g0 + bl dont_fix_pil + subcc %l3, 0x1f, %g0 + bg dont_fix_pil + sll %l3, 8, %g4 + and %g4, SPARC_PSR_PIL_MASK, %g4 + andn %l0, SPARC_PSR_PIL_MASK, %g5 + or %g4, %g5, %g5 +dont_fix_pil: + wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** + + /* + * Vector to user's handler. + * + * NOTE: TBR may no longer have vector number in it since + * we just enabled traps. It is definitely in l3. + */ + + sethi %hi(SYM(_ISR_Vector_table)), %g4 + or %g4, %lo(SYM(_ISR_Vector_table)), %g4 + and %l3, 0xFF, %g5 ! remove synchronous trap indicator + sll %g5, 2, %g5 ! g5 = offset into table + ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] + + + ! o1 = 2nd arg = address of the ISF + ! WAS LOADED WHEN ISF WAS SAVED!!! + mov %l3, %o0 ! o0 = 1st arg = vector number + call %g4, 0 + nop ! delay slot + + /* + * Redisable traps so we can finish up the interrupt processing. + * This is a VERY conservative place to do this. + * + * NOTE: %l0 has the PSR which was in place when we took the trap. + */ + + mov %l0, %psr ! **** DISABLE TRAPS **** + + /* + * Decrement ISR nest level and Thread dispatch disable level. + * + * Register usage for this section: + * + * l4 = _Thread_Dispatch_disable_level pointer + * l5 = _ISR_Nest_level pointer + * l6 = _Thread_Dispatch_disable_level value + * l7 = _ISR_Nest_level value + */ + + sub %l6, 1, %l6 + st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] + + st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] + + /* + * If dispatching is disabled (includes nested interrupt case), + * then do a "simple" exit. + */ + + orcc %l6, %g0, %g0 ! Is dispatching disabled? + bnz simple_return ! Yes, then do a "simple" exit + nop ! delay slot + + /* + * If a context switch is necessary, then do fudge stack to + * return to the interrupt dispatcher. + */ + + sethi %hi(SYM(_Context_Switch_necessary)), %l4 + ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5 + + orcc %l5, %g0, %g0 ! Is thread switch necessary? + bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher + nop ! delay slot + + /* + * Finally, check to see if signals were sent to the currently + * executing task. If so, we need to invoke the interrupt dispatcher. + */ + + sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6 + ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7 + + orcc %l7, %g0, %g0 ! Were signals sent to the currently + ! executing thread? + bz simple_return ! yes, then invoke the dispatcher + ! use the delay slot to clear the signals + ! to the currently executing task flag + st %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))] + + + /* + * Invoke interrupt dispatcher. + */ + + PUBLIC(_ISR_Dispatch) +SYM(_ISR_Dispatch): + + /* + * The following subtract should get us back on the interrupted + * tasks stack and add enough room to invoke the dispatcher. + * When we enable traps, we are mostly back in the context + * of the task and subsequent interrupts can operate normally. + */ + + sub %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp + + or %l0, SPARC_PSR_ET_MASK, %l7 ! l7 = PSR with ET=1 + mov %l7, %psr ! **** ENABLE TRAPS **** + nop + nop + nop + + call SYM(_Thread_Dispatch), 0 + nop + + /* + * The CWP in place at this point may be different from + * that which was in effect at the beginning of the ISR if we + * have been context switched between the beginning of this invocation + * of _ISR_Handler and this point. Thus the CWP and WIM should + * not be changed back to their values at ISR entry time. Any + * changes to the PSR must preserve the CWP. + */ + +simple_return: + ld [%fp + ISF_Y_OFFSET], %l5 ! restore y + wr %l5, 0, %y + + ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC + ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC + rd %psr, %l3 + and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP + andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task + or %l3, %l0, %l0 ! install it later... + andn %l0, SPARC_PSR_ET_MASK, %l0 + + /* + * Restore tasks global and out registers + */ + + mov %fp, %g1 + + ! g1 is restored later + ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3 + ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5 + ldd [%fp + ISF_G6_OFFSET], %g6 ! restore g6, g7 + + ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1 + ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3 + ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5 + ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7 + + /* + * Registers: + * + * ALL global registers EXCEPT G1 and the input registers have + * already been restored and thuse off limits. + * + * The following is the contents of the local registers: + * + * l0 = original psr + * l1 = return address (i.e. PC) + * l2 = nPC + * l3 = CWP + */ + + /* + * if (CWP + 1) is an invalid window then we need to reload it. + * + * WARNING: Traps should now be disabled + */ + + mov %l0, %psr ! **** DISABLE TRAPS **** + nop + nop + nop + rd %wim, %l4 + add %l0, 1, %l6 ! l6 = cwp + 1 + and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it + srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count + ! and CWP are conveniently LS 5 bits + cmp %l5, 1 ! Is tasks window invalid? + bne good_task_window + + /* + * The following code is the same as a 1 position left rotate of WIM. + */ + + sll %l4, 1, %l5 ! l5 = WIM << 1 + srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4 + ! l4 = WIM >> (Number Windows - 1) + or %l4, %l5, %l4 ! l4 = (WIM << 1) | + ! (WIM >> (Number Windows - 1)) + + /* + * Now restore the window just as if we underflowed to it. + */ + + wr %l4, 0, %wim ! WIM = new WIM + nop ! must delay after writing WIM + nop + nop + restore ! now into the tasks window + + ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0 + ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2 + ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4 + ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6 + ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0 + ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2 + ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4 + ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 + ! reload of sp clobbers ISF + save ! Back to ISR dispatch window + +good_task_window: + + mov %l0, %psr ! **** DISABLE TRAPS **** + ! and restore condition codes. + ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1 + jmp %l1 ! transfer control and + rett %l2 ! go back to tasks window + +/* end of file */ diff --git a/c/src/exec/score/cpu/sparc/cpu_asm.s b/c/src/exec/score/cpu/sparc/cpu_asm.s deleted file mode 100644 index 39962eedeb..0000000000 --- a/c/src/exec/score/cpu/sparc/cpu_asm.s +++ /dev/null @@ -1,726 +0,0 @@ -/* cpu_asm.s - * - * This file contains the basic algorithms for all assembly code used - * in an specific CPU port of RTEMS. These algorithms must be implemented - * in assembly language. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * Ported to ERC32 implementation of the SPARC by On-Line Applications - * Research Corporation (OAR) under contract to the European Space - * Agency (ESA). - * - * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. - * European Space Agency. - * - * $Id$ - */ - -#include - -#if (SPARC_HAS_FPU == 1) - -/* - * void _CPU_Context_save_fp( - * void **fp_context_ptr - * ) - * - * This routine is responsible for saving the FP context - * at *fp_context_ptr. If the point to load the FP context - * from is changed then the pointer is modified by this routine. - * - * NOTE: See the README in this directory for information on the - * management of the "EF" bit in the PSR. - */ - - .align 4 - PUBLIC(_CPU_Context_save_fp) -SYM(_CPU_Context_save_fp): - save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp - - /* - * The following enables the floating point unit. - */ - - mov %psr, %l0 - sethi %hi(SPARC_PSR_EF_MASK), %l1 - or %l1, %lo(SPARC_PSR_EF_MASK), %l1 - or %l0, %l1, %l0 - mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** - - ld [%i0], %l0 - std %f0, [%l0 + FO_F1_OFFSET] - std %f2, [%l0 + F2_F3_OFFSET] - std %f4, [%l0 + F4_F5_OFFSET] - std %f6, [%l0 + F6_F7_OFFSET] - std %f8, [%l0 + F8_F9_OFFSET] - std %f10, [%l0 + F1O_F11_OFFSET] - std %f12, [%l0 + F12_F13_OFFSET] - std %f14, [%l0 + F14_F15_OFFSET] - std %f16, [%l0 + F16_F17_OFFSET] - std %f18, [%l0 + F18_F19_OFFSET] - std %f20, [%l0 + F2O_F21_OFFSET] - std %f22, [%l0 + F22_F23_OFFSET] - std %f24, [%l0 + F24_F25_OFFSET] - std %f26, [%l0 + F26_F27_OFFSET] - std %f28, [%l0 + F28_F29_OFFSET] - std %f30, [%l0 + F3O_F31_OFFSET] - st %fsr, [%l0 + FSR_OFFSET] - ret - restore - -/* - * void _CPU_Context_restore_fp( - * void **fp_context_ptr - * ) - * - * This routine is responsible for restoring the FP context - * at *fp_context_ptr. If the point to load the FP context - * from is changed then the pointer is modified by this routine. - * - * NOTE: See the README in this directory for information on the - * management of the "EF" bit in the PSR. - */ - - .align 4 - PUBLIC(_CPU_Context_restore_fp) -SYM(_CPU_Context_restore_fp): - save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp - - /* - * The following enables the floating point unit. - */ - - mov %psr, %l0 - sethi %hi(SPARC_PSR_EF_MASK), %l1 - or %l1, %lo(SPARC_PSR_EF_MASK), %l1 - or %l0, %l1, %l0 - mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** - - ld [%i0], %l0 - ldd [%l0 + FO_F1_OFFSET], %f0 - ldd [%l0 + F2_F3_OFFSET], %f2 - ldd [%l0 + F4_F5_OFFSET], %f4 - ldd [%l0 + F6_F7_OFFSET], %f6 - ldd [%l0 + F8_F9_OFFSET], %f8 - ldd [%l0 + F1O_F11_OFFSET], %f10 - ldd [%l0 + F12_F13_OFFSET], %f12 - ldd [%l0 + F14_F15_OFFSET], %f14 - ldd [%l0 + F16_F17_OFFSET], %f16 - ldd [%l0 + F18_F19_OFFSET], %f18 - ldd [%l0 + F2O_F21_OFFSET], %f20 - ldd [%l0 + F22_F23_OFFSET], %f22 - ldd [%l0 + F24_F25_OFFSET], %f24 - ldd [%l0 + F26_F27_OFFSET], %f26 - ldd [%l0 + F28_F29_OFFSET], %f28 - ldd [%l0 + F3O_F31_OFFSET], %f30 - ld [%l0 + FSR_OFFSET], %fsr - ret - restore - -#endif /* SPARC_HAS_FPU */ - -/* - * void _CPU_Context_switch( - * Context_Control *run, - * Context_Control *heir - * ) - * - * This routine performs a normal non-FP context switch. - */ - - .align 4 - PUBLIC(_CPU_Context_switch) -SYM(_CPU_Context_switch): - ! skip g0 - st %g1, [%o0 + G1_OFFSET] ! save the global registers - std %g2, [%o0 + G2_OFFSET] - std %g4, [%o0 + G4_OFFSET] - std %g6, [%o0 + G6_OFFSET] - - std %l0, [%o0 + L0_OFFSET] ! save the local registers - std %l2, [%o0 + L2_OFFSET] - std %l4, [%o0 + L4_OFFSET] - std %l6, [%o0 + L6_OFFSET] - - std %i0, [%o0 + I0_OFFSET] ! save the input registers - std %i2, [%o0 + I2_OFFSET] - std %i4, [%o0 + I4_OFFSET] - std %i6, [%o0 + I6_FP_OFFSET] - - std %o0, [%o0 + O0_OFFSET] ! save the output registers - std %o2, [%o0 + O2_OFFSET] - std %o4, [%o0 + O4_OFFSET] - std %o6, [%o0 + O6_SP_OFFSET] - - rd %psr, %o2 - st %o2, [%o0 + PSR_OFFSET] ! save status register - - /* - * This is entered from _CPU_Context_restore with: - * o1 = context to restore - * o2 = psr - */ - - PUBLIC(_CPU_Context_restore_heir) -SYM(_CPU_Context_restore_heir): - /* - * Flush all windows with valid contents except the current one. - * In examining the set register windows, one may logically divide - * the windows into sets (some of which may be empty) based on their - * current status: - * - * + current (i.e. in use), - * + used (i.e. a restore would not trap) - * + invalid (i.e. 1 in corresponding bit in WIM) - * + unused - * - * Either the used or unused set of windows may be empty. - * - * NOTE: We assume only one bit is set in the WIM at a time. - * - * Given a CWP of 5 and a WIM of 0x1, the registers are divided - * into sets as follows: - * - * + 0 - invalid - * + 1-4 - unused - * + 5 - current - * + 6-7 - used - * - * In this case, we only would save the used windows -- 6 and 7. - * - * Traps are disabled for the same logical period as in a - * flush all windows trap handler. - * - * Register Usage while saving the windows: - * g1 = current PSR - * g2 = current wim - * g3 = CWP - * g4 = wim scratch - * g5 = scratch - */ - - ld [%o1 + PSR_OFFSET], %g1 ! g1 = saved psr - - and %o2, SPARC_PSR_CWP_MASK, %g3 ! g3 = CWP - ! g1 = psr w/o cwp - andn %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1 - or %g1, %g3, %g1 ! g1 = heirs psr - mov %g1, %psr ! restore status register and - ! **** DISABLE TRAPS **** - mov %wim, %g2 ! g2 = wim - mov 1, %g4 - sll %g4, %g3, %g4 ! g4 = WIM mask for CW invalid - -save_frame_loop: - sll %g4, 1, %g5 ! rotate the "wim" left 1 - srl %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4 - or %g4, %g5, %g4 ! g4 = wim if we do one restore - - /* - * If a restore would not underflow, then continue. - */ - - andcc %g4, %g2, %g0 ! Any windows to flush? - bnz done_flushing ! No, then continue - nop - - restore ! back one window - - /* - * Now save the window just as if we overflowed to it. - */ - - std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] - std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] - std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] - std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] - - std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] - std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] - std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] - std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] - - ba save_frame_loop - nop - -done_flushing: - - add %g3, 1, %g3 ! calculate desired WIM - and %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3 - mov 1, %g4 - sll %g4, %g3, %g4 ! g4 = new WIM - mov %g4, %wim - - or %g1, SPARC_PSR_ET_MASK, %g1 - mov %g1, %psr ! **** ENABLE TRAPS **** - ! and restore CWP - nop - nop - nop - - ! skip g0 - ld [%o1 + G1_OFFSET], %g1 ! restore the global registers - ldd [%o1 + G2_OFFSET], %g2 - ldd [%o1 + G4_OFFSET], %g4 - ldd [%o1 + G6_OFFSET], %g6 - - ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers - ldd [%o1 + L2_OFFSET], %l2 - ldd [%o1 + L4_OFFSET], %l4 - ldd [%o1 + L6_OFFSET], %l6 - - ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers - ldd [%o1 + I2_OFFSET], %i2 - ldd [%o1 + I4_OFFSET], %i4 - ldd [%o1 + I6_FP_OFFSET], %i6 - - ldd [%o1 + O2_OFFSET], %o2 ! restore the output registers - ldd [%o1 + O4_OFFSET], %o4 - ldd [%o1 + O6_SP_OFFSET], %o6 - ! do o0/o1 last to avoid destroying heir context pointer - ldd [%o1 + O0_OFFSET], %o0 ! overwrite heir pointer - - jmp %o7 + 8 ! return - nop ! delay slot - -/* - * void _CPU_Context_restore( - * Context_Control *new_context - * ) - * - * This routine is generally used only to perform restart self. - * - * NOTE: It is unnecessary to reload some registers. - */ - - .align 4 - PUBLIC(_CPU_Context_restore) -SYM(_CPU_Context_restore): - save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp - rd %psr, %o2 - ba SYM(_CPU_Context_restore_heir) - mov %i0, %o1 ! in the delay slot - -/* - * void _ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * - * We enter this handler from the 4 instructions in the trap table with - * the following registers assumed to be set as shown: - * - * l0 = PSR - * l1 = PC - * l2 = nPC - * l3 = trap type - * - * NOTE: By an executive defined convention, trap type is between 0 and 255 if - * it is an asynchonous trap and 256 and 511 if it is synchronous. - */ - - .align 4 - PUBLIC(_ISR_Handler) -SYM(_ISR_Handler): - /* - * Fix the return address for synchronous traps. - */ - - andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 - ! Is this a synchronous trap? - be,a win_ovflow ! No, then skip the adjustment - nop ! DELAY - mov %l2, %l1 ! do not return to the instruction - add %l2, 4, %l2 ! indicated - -win_ovflow: - /* - * Save the globals this block uses. - * - * These registers are not restored from the locals. Their contents - * are saved directly from the locals into the ISF below. - */ - - mov %g4, %l4 ! save the globals this block uses - mov %g5, %l5 - - /* - * When at a "window overflow" trap, (wim == (1 << cwp)). - * If we get here like that, then process a window overflow. - */ - - rd %wim, %g4 - srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP - ! are LS 5 bits ; how convenient :) - cmp %g5, 1 ! Is this an invalid window? - bne dont_do_the_window ! No, then skip all this stuff - ! we are using the delay slot - - /* - * The following is same as a 1 position right rotate of WIM - */ - - srl %g4, 1, %g5 ! g5 = WIM >> 1 - sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4 - ! g4 = WIM << (Number Windows - 1) - or %g4, %g5, %g4 ! g4 = (WIM >> 1) | - ! (WIM << (Number Windows - 1)) - - /* - * At this point: - * - * g4 = the new WIM - * g5 is free - */ - - /* - * Since we are tinkering with the register windows, we need to - * make sure that all the required information is in global registers. - */ - - save ! Save into the window - wr %g4, 0, %wim ! WIM = new WIM - nop ! delay slots - nop - nop - - /* - * Now save the window just as if we overflowed to it. - */ - - std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] - std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] - std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] - std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] - - std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] - std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] - std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] - std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] - - restore - nop - -dont_do_the_window: - /* - * Global registers %g4 and %g5 are saved directly from %l4 and - * %l5 directly into the ISF below. - */ - -save_isf: - - /* - * Save the state of the interrupted task -- especially the global - * registers -- in the Interrupt Stack Frame. Note that the ISF - * includes a regular minimum stack frame which will be used if - * needed by register window overflow and underflow handlers. - * - * REGISTERS SAME AS AT _ISR_Handler - */ - - sub %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp - ! make space for ISF - - std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC - st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC - st %g1, [%sp + ISF_G1_OFFSET] ! save g1 - std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3 - std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above - std %g6, [%sp + ISF_G6_OFFSET] ! save g6, g7 - - std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1 - std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3 - std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5 - std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7 - - rd %y, %g1 - st %g1, [%sp + ISF_Y_OFFSET] ! save y - - mov %sp, %o1 ! 2nd arg to ISR Handler - - /* - * Increment ISR nest level and Thread dispatch disable level. - * - * Register usage for this section: - * - * l4 = _Thread_Dispatch_disable_level pointer - * l5 = _ISR_Nest_level pointer - * l6 = _Thread_Dispatch_disable_level value - * l7 = _ISR_Nest_level value - * - * NOTE: It is assumed that l4 - l7 will be preserved until the ISR - * nest and thread dispatch disable levels are unnested. - */ - - sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4 - ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6 - sethi %hi(SYM(_ISR_Nest_level)), %l5 - ld [%l5 + %lo(SYM(_ISR_Nest_level))], %l7 - - add %l6, 1, %l6 - st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] - - add %l7, 1, %l7 - st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] - - /* - * If ISR nest level was zero (now 1), then switch stack. - */ - - mov %sp, %fp - subcc %l7, 1, %l7 ! outermost interrupt handler? - bnz dont_switch_stacks ! No, then do not switch stacks - - sethi %hi(SYM(_CPU_Interrupt_stack_high)), %g4 - ld [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp - -dont_switch_stacks: - /* - * Make sure we have a place on the stack for the window overflow - * trap handler to write into. At this point it is safe to - * enable traps again. - */ - - sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp - - /* - * Check if we have an external interrupt (trap 0x11 - 0x1f). If so, - * set the PIL in the %psr to mask off interrupts with lower priority. - * The original %psr in %l0 is not modified since it will be restored - * when the interrupt handler returns. - */ - - mov %l0, %g5 - subcc %l3, 0x11, %g0 - bl dont_fix_pil - subcc %l3, 0x1f, %g0 - bg dont_fix_pil - sll %l3, 8, %g4 - and %g4, SPARC_PSR_PIL_MASK, %g4 - andn %l0, SPARC_PSR_PIL_MASK, %g5 - or %g4, %g5, %g5 -dont_fix_pil: - wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** - - /* - * Vector to user's handler. - * - * NOTE: TBR may no longer have vector number in it since - * we just enabled traps. It is definitely in l3. - */ - - sethi %hi(SYM(_ISR_Vector_table)), %g4 - or %g4, %lo(SYM(_ISR_Vector_table)), %g4 - and %l3, 0xFF, %g5 ! remove synchronous trap indicator - sll %g5, 2, %g5 ! g5 = offset into table - ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] - - - ! o1 = 2nd arg = address of the ISF - ! WAS LOADED WHEN ISF WAS SAVED!!! - mov %l3, %o0 ! o0 = 1st arg = vector number - call %g4, 0 - nop ! delay slot - - /* - * Redisable traps so we can finish up the interrupt processing. - * This is a VERY conservative place to do this. - * - * NOTE: %l0 has the PSR which was in place when we took the trap. - */ - - mov %l0, %psr ! **** DISABLE TRAPS **** - - /* - * Decrement ISR nest level and Thread dispatch disable level. - * - * Register usage for this section: - * - * l4 = _Thread_Dispatch_disable_level pointer - * l5 = _ISR_Nest_level pointer - * l6 = _Thread_Dispatch_disable_level value - * l7 = _ISR_Nest_level value - */ - - sub %l6, 1, %l6 - st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] - - st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] - - /* - * If dispatching is disabled (includes nested interrupt case), - * then do a "simple" exit. - */ - - orcc %l6, %g0, %g0 ! Is dispatching disabled? - bnz simple_return ! Yes, then do a "simple" exit - nop ! delay slot - - /* - * If a context switch is necessary, then do fudge stack to - * return to the interrupt dispatcher. - */ - - sethi %hi(SYM(_Context_Switch_necessary)), %l4 - ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5 - - orcc %l5, %g0, %g0 ! Is thread switch necessary? - bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher - nop ! delay slot - - /* - * Finally, check to see if signals were sent to the currently - * executing task. If so, we need to invoke the interrupt dispatcher. - */ - - sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6 - ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7 - - orcc %l7, %g0, %g0 ! Were signals sent to the currently - ! executing thread? - bz simple_return ! yes, then invoke the dispatcher - ! use the delay slot to clear the signals - ! to the currently executing task flag - st %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))] - - - /* - * Invoke interrupt dispatcher. - */ - - PUBLIC(_ISR_Dispatch) -SYM(_ISR_Dispatch): - - /* - * The following subtract should get us back on the interrupted - * tasks stack and add enough room to invoke the dispatcher. - * When we enable traps, we are mostly back in the context - * of the task and subsequent interrupts can operate normally. - */ - - sub %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp - - or %l0, SPARC_PSR_ET_MASK, %l7 ! l7 = PSR with ET=1 - mov %l7, %psr ! **** ENABLE TRAPS **** - nop - nop - nop - - call SYM(_Thread_Dispatch), 0 - nop - - /* - * The CWP in place at this point may be different from - * that which was in effect at the beginning of the ISR if we - * have been context switched between the beginning of this invocation - * of _ISR_Handler and this point. Thus the CWP and WIM should - * not be changed back to their values at ISR entry time. Any - * changes to the PSR must preserve the CWP. - */ - -simple_return: - ld [%fp + ISF_Y_OFFSET], %l5 ! restore y - wr %l5, 0, %y - - ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC - ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC - rd %psr, %l3 - and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP - andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task - or %l3, %l0, %l0 ! install it later... - andn %l0, SPARC_PSR_ET_MASK, %l0 - - /* - * Restore tasks global and out registers - */ - - mov %fp, %g1 - - ! g1 is restored later - ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3 - ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5 - ldd [%fp + ISF_G6_OFFSET], %g6 ! restore g6, g7 - - ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1 - ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3 - ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5 - ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7 - - /* - * Registers: - * - * ALL global registers EXCEPT G1 and the input registers have - * already been restored and thuse off limits. - * - * The following is the contents of the local registers: - * - * l0 = original psr - * l1 = return address (i.e. PC) - * l2 = nPC - * l3 = CWP - */ - - /* - * if (CWP + 1) is an invalid window then we need to reload it. - * - * WARNING: Traps should now be disabled - */ - - mov %l0, %psr ! **** DISABLE TRAPS **** - nop - nop - nop - rd %wim, %l4 - add %l0, 1, %l6 ! l6 = cwp + 1 - and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it - srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count - ! and CWP are conveniently LS 5 bits - cmp %l5, 1 ! Is tasks window invalid? - bne good_task_window - - /* - * The following code is the same as a 1 position left rotate of WIM. - */ - - sll %l4, 1, %l5 ! l5 = WIM << 1 - srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4 - ! l4 = WIM >> (Number Windows - 1) - or %l4, %l5, %l4 ! l4 = (WIM << 1) | - ! (WIM >> (Number Windows - 1)) - - /* - * Now restore the window just as if we underflowed to it. - */ - - wr %l4, 0, %wim ! WIM = new WIM - nop ! must delay after writing WIM - nop - nop - restore ! now into the tasks window - - ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0 - ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2 - ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4 - ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6 - ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0 - ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2 - ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4 - ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 - ! reload of sp clobbers ISF - save ! Back to ISR dispatch window - -good_task_window: - - mov %l0, %psr ! **** DISABLE TRAPS **** - ! and restore condition codes. - ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1 - jmp %l1 ! transfer control and - rett %l2 ! go back to tasks window - -/* end of file */ diff --git a/c/src/exec/score/cpu/sparc/rtems.S b/c/src/exec/score/cpu/sparc/rtems.S new file mode 100644 index 0000000000..f20d8c8288 --- /dev/null +++ b/c/src/exec/score/cpu/sparc/rtems.S @@ -0,0 +1,58 @@ +/* rtems.s + * + * This file contains the single entry point code for + * the SPARC port of RTEMS. + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * Copyright assigned to U.S. Government, 1994. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.OARcorp.com/rtems/license.html. + * + * Ported to ERC32 implementation of the SPARC by On-Line Applications + * Research Corporation (OAR) under contract to the European Space + * Agency (ESA). + * + * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. + * European Space Agency. + * + * $Id$ + */ + +#include + +/* + * RTEMS + * + * This routine jumps to the directive indicated in the + * CPU defined register. This routine is used when RTEMS is + * linked by itself and placed in ROM. This routine is the + * first address in the ROM space for RTEMS. The user "calls" + * this address with the directive arguments in the normal place. + * This routine then jumps indirectly to the correct directive + * preserving the arguments. The directive should not realize + * it has been "wrapped" in this way. The table "_Entry_points" + * is used to look up the directive. + * + * void RTEMS() + */ + + .align 4 + PUBLIC(RTEMS) +SYM(RTEMS): + /* + * g2 was chosen because gcc uses it as a scratch register in + * similar code scenarios and the other locals, ins, and outs + * are off limits to this routine unless it does a "save" and + * copies its in registers to the outs which only works up until + * 6 parameters. Best to take the simple approach in this case. + */ + sethi SYM(_Entry_points), %g2 + or %g2, %lo(SYM(_Entry_points)), %g2 + sll %g1, 2, %g1 + add %g1, %g2, %g2 + jmp %g2 + nop + diff --git a/c/src/exec/score/cpu/sparc/rtems.s b/c/src/exec/score/cpu/sparc/rtems.s deleted file mode 100644 index f20d8c8288..0000000000 --- a/c/src/exec/score/cpu/sparc/rtems.s +++ /dev/null @@ -1,58 +0,0 @@ -/* rtems.s - * - * This file contains the single entry point code for - * the SPARC port of RTEMS. - * - * COPYRIGHT (c) 1989-1998. - * On-Line Applications Research Corporation (OAR). - * Copyright assigned to U.S. Government, 1994. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.OARcorp.com/rtems/license.html. - * - * Ported to ERC32 implementation of the SPARC by On-Line Applications - * Research Corporation (OAR) under contract to the European Space - * Agency (ESA). - * - * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. - * European Space Agency. - * - * $Id$ - */ - -#include - -/* - * RTEMS - * - * This routine jumps to the directive indicated in the - * CPU defined register. This routine is used when RTEMS is - * linked by itself and placed in ROM. This routine is the - * first address in the ROM space for RTEMS. The user "calls" - * this address with the directive arguments in the normal place. - * This routine then jumps indirectly to the correct directive - * preserving the arguments. The directive should not realize - * it has been "wrapped" in this way. The table "_Entry_points" - * is used to look up the directive. - * - * void RTEMS() - */ - - .align 4 - PUBLIC(RTEMS) -SYM(RTEMS): - /* - * g2 was chosen because gcc uses it as a scratch register in - * similar code scenarios and the other locals, ins, and outs - * are off limits to this routine unless it does a "save" and - * copies its in registers to the outs which only works up until - * 6 parameters. Best to take the simple approach in this case. - */ - sethi SYM(_Entry_points), %g2 - or %g2, %lo(SYM(_Entry_points)), %g2 - sll %g1, 2, %g1 - add %g1, %g2, %g2 - jmp %g2 - nop - -- cgit v1.2.3