From 7633f5b3944a1ed9a7c23efd76602e7240276349 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Mon, 12 Mar 2018 06:59:15 +0100 Subject: sparc64: Move libcpu content to cpukit This patch is a part of the BSP source reorganization. Update #3285. --- c/src/lib/libbsp/sparc64/niagara/Makefile.am | 4 - .../libbsp/sparc64/niagara/make/custom/niagara.cfg | 4 - c/src/lib/libbsp/sparc64/usiii/Makefile.am | 4 - .../lib/libbsp/sparc64/usiii/make/custom/usiii.cfg | 4 - c/src/lib/libcpu/sparc64/Makefile.am | 47 -- c/src/lib/libcpu/sparc64/configure.ac | 39 -- c/src/lib/libcpu/sparc64/shared/score/cpu.c | 249 ---------- c/src/lib/libcpu/sparc64/shared/score/interrupt.S | 543 --------------------- .../sparc64/shared/syscall/sparc64-syscall.S | 126 ----- .../sparc64/shared/syscall/sparc64-syscall.h | 3 - cpukit/score/cpu/sparc64/Makefile.am | 10 +- cpukit/score/cpu/sparc64/cpu.c | 229 +++++++++ cpukit/score/cpu/sparc64/interrupt.S | 543 +++++++++++++++++++++ cpukit/score/cpu/sparc64/sparc64-syscall.S | 126 +++++ cpukit/score/cpu/sparc64/sparc64-syscall.h | 3 + 15 files changed, 906 insertions(+), 1028 deletions(-) delete mode 100644 c/src/lib/libcpu/sparc64/Makefile.am delete mode 100644 c/src/lib/libcpu/sparc64/configure.ac delete mode 100644 c/src/lib/libcpu/sparc64/shared/score/cpu.c delete mode 100644 c/src/lib/libcpu/sparc64/shared/score/interrupt.S delete mode 100644 c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.S delete mode 100644 c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.h create mode 100644 cpukit/score/cpu/sparc64/interrupt.S create mode 100644 cpukit/score/cpu/sparc64/sparc64-syscall.S create mode 100644 cpukit/score/cpu/sparc64/sparc64-syscall.h diff --git a/c/src/lib/libbsp/sparc64/niagara/Makefile.am b/c/src/lib/libbsp/sparc64/niagara/Makefile.am index 8ae6d0ce61..abe8e5a072 100644 --- a/c/src/lib/libbsp/sparc64/niagara/Makefile.am +++ b/c/src/lib/libbsp/sparc64/niagara/Makefile.am @@ -52,9 +52,5 @@ libbsp_a_SOURCES = $(startup_SOURCES) $(clock_SOURCES) $(console_SOURCES) \ libbsp_a_SOURCES += ../../../../../../bsps/shared/cache/nocache.c -libbsp_a_LIBADD = \ - ../../../libcpu/@RTEMS_CPU@/shared/shared-score.rel \ - ../../../libcpu/@RTEMS_CPU@/shared/sparc64-syscall.rel - include $(top_srcdir)/../../../../automake/local.am include $(srcdir)/../../../../../../bsps/sparc64/niagara/headers.am diff --git a/c/src/lib/libbsp/sparc64/niagara/make/custom/niagara.cfg b/c/src/lib/libbsp/sparc64/niagara/make/custom/niagara.cfg index 117899318c..2fd8973bd6 100644 --- a/c/src/lib/libbsp/sparc64/niagara/make/custom/niagara.cfg +++ b/c/src/lib/libbsp/sparc64/niagara/make/custom/niagara.cfg @@ -4,10 +4,6 @@ include $(RTEMS_ROOT)/make/custom/default.cfg RTEMS_CPU=sparc64 -RTEMS_CPU_MODEL=sun4v - -# This is the actual bsp directory used during the build process. -RTEMS_BSP_FAMILY=niagara # This contains the compiler options necessary to select the CPU model # and (hopefully) optimize for it. diff --git a/c/src/lib/libbsp/sparc64/usiii/Makefile.am b/c/src/lib/libbsp/sparc64/usiii/Makefile.am index aedeac340f..6cbf9e7e99 100644 --- a/c/src/lib/libbsp/sparc64/usiii/Makefile.am +++ b/c/src/lib/libbsp/sparc64/usiii/Makefile.am @@ -65,9 +65,5 @@ libbsp_a_SOURCES = $(startup_SOURCES) $(mmu_SOURCES) $(clock_SOURCES) $(console_ libbsp_a_SOURCES += ../../../../../../bsps/shared/cache/nocache.c -libbsp_a_LIBADD = \ - ../../../libcpu/@RTEMS_CPU@/shared/shared-score.rel \ - ../../../libcpu/@RTEMS_CPU@/shared/sparc64-syscall.rel - include $(top_srcdir)/../../../../automake/local.am include $(srcdir)/../../../../../../bsps/sparc64/usiii/headers.am diff --git a/c/src/lib/libbsp/sparc64/usiii/make/custom/usiii.cfg b/c/src/lib/libbsp/sparc64/usiii/make/custom/usiii.cfg index 740e5b6c34..b9fad21839 100644 --- a/c/src/lib/libbsp/sparc64/usiii/make/custom/usiii.cfg +++ b/c/src/lib/libbsp/sparc64/usiii/make/custom/usiii.cfg @@ -4,10 +4,6 @@ include $(RTEMS_ROOT)/make/custom/default.cfg RTEMS_CPU=sparc64 -RTEMS_CPU_MODEL=sun4u - -# This is the actual bsp directory used during the build process. -RTEMS_BSP_FAMILY=usiii # This contains the compiler options necessary to select the CPU model # and (hopefully) optimize for it. diff --git a/c/src/lib/libcpu/sparc64/Makefile.am b/c/src/lib/libcpu/sparc64/Makefile.am deleted file mode 100644 index d5883f5bd4..0000000000 --- a/c/src/lib/libcpu/sparc64/Makefile.am +++ /dev/null @@ -1,47 +0,0 @@ -ACLOCAL_AMFLAGS = -I ../../../aclocal - -include $(top_srcdir)/../../../automake/compile.am - -noinst_PROGRAMS = - -# NOTE: shared applies to targets meant to run with supervisor privilege, -# i.e. sun4u or sun4v. Any targets (e.g. simulators) that cannot emulate -# supervisor privilege should use their own score and sparc64-syscall. -# The other shared code is probably usable, but should be defined separately. -# See the example at the end of Makefile.am. -if shared - -noinst_PROGRAMS += shared/shared-score.rel -shared_shared_score_rel_SOURCES = shared/score/cpu.c shared/score/interrupt.S -shared_shared_score_rel_CPPFLAGS = $(AM_CPPFLAGS) -shared_shared_score_rel_LDFLAGS = $(RTEMS_RELLDFLAGS) - -noinst_PROGRAMS += shared/sparc64-syscall.rel -shared_sparc64_syscall_rel_SOURCES = shared/syscall/sparc64-syscall.S \ - shared/syscall/sparc64-syscall.h -shared_sparc64_syscall_rel_CPPFLAGS = $(AM_CPPFLAGS) -shared_sparc64_syscall_rel_LDFLAGS = $(RTEMS_RELLDFLAGS) -endif - -### This is an example of how to define a separate score implementation. -## NOTE: Unlike other CPUs, we install into a subdirectory to avoid -## file name conflicts (See sh CPU for simexampleilar approach) -#include_sparc64dir = $(includedir)/sparc64 -#include_rtems_scoredir = $(includedir)/rtems/score -# -#if simexample -#include_sparc64_HEADERS = -#include_rtems_score_HEADERS = $(include_rtems_scoredir)/sparc64.h \ -# $(include_rtems_scoredir)/cpu.h \ -# $(include_rtems_scoredir)/types.h - -## simexample/score -#noinst_PROGRAMS += simexample/score.rel -#simexample_score_rel_SOURCES = -#simexample_score_rel_CPPFLAGS = $(AM_CPPFLAGS) -#simexample_score_rel_LDFLAGS = $(RTEMS_RELLDFLAGS) -# -#endif -### End of example. - -include $(top_srcdir)/../../../automake/local.am diff --git a/c/src/lib/libcpu/sparc64/configure.ac b/c/src/lib/libcpu/sparc64/configure.ac deleted file mode 100644 index 08607a53e7..0000000000 --- a/c/src/lib/libcpu/sparc64/configure.ac +++ /dev/null @@ -1,39 +0,0 @@ -## Process this file with autoconf to produce a configure script. - -AC_PREREQ([2.69]) -AC_INIT([rtems-c-src-lib-libcpu-sparc64],[_RTEMS_VERSION],[https://devel.rtems.org/newticket]) -AC_CONFIG_SRCDIR([shared]) -RTEMS_TOP([../../../../..],[../../..]) -RTEMS_SOURCE_TOP -RTEMS_BUILD_TOP - -RTEMS_CANONICAL_TARGET_CPU - -AM_INIT_AUTOMAKE([no-define foreign subdir-objects 1.12.2]) -AM_MAINTAINER_MODE - -RTEMS_ENV_RTEMSBSP - -RTEMS_PROJECT_ROOT - -RTEMS_PROG_CC_FOR_TARGET -AM_PROG_CC_C_O -RTEMS_CANONICALIZE_TOOLS -RTEMS_PROG_CCAS - -AM_CONDITIONAL(shared, test x"$RTEMS_CPU_MODEL" = x"sun4v" || \ - test x"$RTEMS_CPU_MODEL" = x"sun4u") - -AM_CONDITIONAL([sun4u],[test x"$RTEMS_CPU_MODEL" = x"sun4u"]) -AM_CONDITIONAL([sun4v],[test x"$RTEMS_CPU_MODEL" = x"sun4v"]) - -## How to add a conditional simexample for the Makefile.am, based on a -## RTEMS_CPU_MODEL defined as simulator. -#AM_CONDITIONAL([simexample],[test x"$RTEMS_CPU_MODEL" = x"simulator"]) - -RTEMS_AMPOLISH3 - -# Explicitly list all Makefiles here -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - diff --git a/c/src/lib/libcpu/sparc64/shared/score/cpu.c b/c/src/lib/libcpu/sparc64/shared/score/cpu.c deleted file mode 100644 index ca5ea9f09f..0000000000 --- a/c/src/lib/libcpu/sparc64/shared/score/cpu.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * SPARC-v9 Dependent Source - */ - -/* - * COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR). - * - * This file is based on the SPARC cpu.c file. Modifications are made to - * provide support for the SPARC-v9. - * COPYRIGHT (c) 2010. Gedare Bloom. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#include -#include -#include -#include - -/* - * This initializes the set of opcodes placed in each trap - * table entry. The routine which installs a handler is responsible - * for filling in the fields for the _handler address and the _vector - * trap type. - * - * The constants following this structure are masks for the fields which - * must be filled in when the handler is installed. - */ - -/* 64-bit registers complicate this. Also, in sparc v9, - * each trap level gets its own set of global registers, but - * does not get its own dedicated register window. so we avoid - * using the local registers in the trap handler. - */ -const CPU_Trap_table_entry _CPU_Trap_slot_template = { - 0x89508000, /* rdpr %tstate, %g4 */ - 0x05000000, /* sethi %hh(_handler), %g2 */ - 0x8410a000, /* or %g2, %hm(_handler), %g2 */ - 0x8528b020, /* sllx %g2, 32, %g2 */ - 0x07000000, /* sethi %hi(_handler), %g3 */ - 0x8610c002, /* or %g3, %g2, %g3 */ - 0x81c0e000, /* jmp %g3 + %lo(_handler) */ - 0x84102000 /* mov _vector, %g2 */ -}; - - -/* - * _CPU_ISR_Get_level - * - * Input Parameters: NONE - * - * Output Parameters: - * returns the current interrupt level (PIL field of the PSR) - */ -uint32_t _CPU_ISR_Get_level( void ) -{ - uint32_t level; - - sparc64_get_interrupt_level( level ); - - return level; -} - -/* - * _CPU_ISR_install_raw_handler - * - * This routine installs the specified handler as a "raw" non-executive - * supported trap handler (a.k.a. interrupt service routine). - * - * Input Parameters: - * vector - trap table entry number plus synchronous - * vs. asynchronous information - * new_handler - address of the handler to be installed - * old_handler - pointer to an address of the handler previously installed - * - * Output Parameters: NONE - * *new_handler - address of the handler previously installed - * - * NOTE: - * - * On the SPARC v9, there are really only 512 vectors. However, the executive - * has no easy, fast, reliable way to determine which traps are synchronous - * and which are asynchronous. By default, traps return to the - * instruction which caused the interrupt. So if you install a software - * trap handler as an executive interrupt handler (which is desirable since - * RTEMS takes care of window and register issues), then the executive needs - * to know that the return address is to the trap rather than the instruction - * following the trap. - * - * So vectors 0 through 511 are treated as regular asynchronous traps which - * provide the "correct" return address. Vectors 512 through 1023 are assumed - * by the executive to be synchronous and to require that the return be to the - * trapping instruction. - * - * If you use this mechanism to install a trap handler which must reexecute - * the instruction which caused the trap, then it should be installed as - * a synchronous trap. This will avoid the executive changing the return - * address. - */ -void _CPU_ISR_install_raw_handler( - uint32_t vector, - proc_ptr new_handler, - proc_ptr *old_handler -) -{ - uint32_t real_vector; - CPU_Trap_table_entry *tba; - CPU_Trap_table_entry *slot; - uint64_t u64_tba; - uint64_t u64_handler; - - /* - * Get the "real" trap number for this vector ignoring the synchronous - * versus asynchronous indicator included with our vector numbers. - */ - - real_vector = SPARC_REAL_TRAP_NUMBER( vector ); - - /* - * Get the current base address of the trap table and calculate a pointer - * to the slot we are interested in. - */ - - sparc64_get_tba( u64_tba ); - -/* u32_tbr &= 0xfffff000; */ - u64_tba &= 0xffffffffffff8000; /* keep only trap base address */ - - tba = (CPU_Trap_table_entry *) u64_tba; - - /* use array indexing to fill in lower bits -- require - * CPU_Trap_table_entry to be full-sized. */ - slot = &tba[ real_vector ]; - - /* - * Get the address of the old_handler from the trap table. - * - * NOTE: The old_handler returned will be bogus if it does not follow - * the RTEMS model. - */ - - /* shift amount to shift of hi bits (31:10) */ -#define HI_BITS_SHIFT 10 - - /* shift amount of hm bits (41:32) */ -#define HM_BITS_SHIFT 32 - - /* shift amount of hh bits (63:42) */ -#define HH_BITS_SHIFT 42 - - /* We're only interested in bits 0-9 of the immediate field*/ -#define IMM_MASK 0x000003FF - - if ( slot->rdpr_tstate_g4 == _CPU_Trap_slot_template.rdpr_tstate_g4 ) { - u64_handler = - (((uint64_t)((slot->sethi_of_hh_handler_to_g2 << HI_BITS_SHIFT) | - (slot->or_g2_hm_handler_to_g2 & IMM_MASK))) << HM_BITS_SHIFT) | - ((slot->sethi_of_handler_to_g3 << HI_BITS_SHIFT) | - (slot->jmp_to_low_of_handler_plus_g3 & IMM_MASK)); - *old_handler = (proc_ptr) u64_handler; - } else - *old_handler = 0; - - /* - * Copy the template to the slot and then fix it. - */ - - *slot = _CPU_Trap_slot_template; - - u64_handler = (uint64_t) new_handler; - - /* mask for extracting %hh */ -#define HH_BITS_MASK 0xFFFFFC0000000000 - - /* mask for extracting %hm */ -#define HM_BITS_MASK 0x000003FF00000000 - - /* mask for extracting %hi */ -#define HI_BITS_MASK 0x00000000FFFFFC00 - - /* mask for extracting %lo */ -#define LO_BITS_MASK 0x00000000000003FF - - - slot->mov_vector_g2 |= vector; - slot->sethi_of_hh_handler_to_g2 |= - (u64_handler & HH_BITS_MASK) >> HH_BITS_SHIFT; - slot->or_g2_hm_handler_to_g2 |= - (u64_handler & HM_BITS_MASK) >> HM_BITS_SHIFT; - slot->sethi_of_handler_to_g3 |= - (u64_handler & HI_BITS_MASK) >> HI_BITS_SHIFT; - slot->jmp_to_low_of_handler_plus_g3 |= (u64_handler & LO_BITS_MASK); - - /* need to flush icache after this !!! */ - - /* need to flush icache in case old trap handler is in cache */ - rtems_cache_invalidate_entire_instruction(); - -} - -/* - * _CPU_ISR_install_vector - * - * This kernel routine installs the RTEMS handler for the - * specified vector. - * - * Input parameters: - * vector - interrupt vector number - * new_handler - replacement ISR for this vector number - * old_handler - pointer to former ISR for this vector number - * - * Output parameters: - * *old_handler - former ISR for this vector number - */ -void _CPU_ISR_install_vector( - uint64_t vector, - proc_ptr new_handler, - proc_ptr *old_handler -) -{ - uint64_t real_vector; - proc_ptr ignored; - - /* - * Get the "real" trap number for this vector ignoring the synchronous - * versus asynchronous indicator included with our vector numbers. - */ - real_vector = SPARC_REAL_TRAP_NUMBER( vector ); - /* - * Return the previous ISR handler. - */ - - *old_handler = _ISR_Vector_table[ vector ]; - - /* - * Install the wrapper so this ISR can be invoked properly. - */ - - _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); - - /* - * We put the actual user ISR address in '_ISR_vector_table'. This will - * be used by the _ISR_Handler so the user gets control. - */ - - _ISR_Vector_table[ real_vector ] = new_handler; -} diff --git a/c/src/lib/libcpu/sparc64/shared/score/interrupt.S b/c/src/lib/libcpu/sparc64/shared/score/interrupt.S deleted file mode 100644 index 6f8eb373f0..0000000000 --- a/c/src/lib/libcpu/sparc64/shared/score/interrupt.S +++ /dev/null @@ -1,543 +0,0 @@ -/* cpu_asm.s - * - * This file contains the basic algorithms for all assembly code used - * in an specific CPU port of RTEMS. These algorithms must be implemented - * in assembly language. - * - * COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR). - * COPYRIGHT (c) 2010. Gedare Bloom. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#include -#include - - -/* - * The assembler needs to be told that we know what to do with - * the global registers. - */ -.register %g2, #scratch -.register %g3, #scratch -.register %g6, #scratch -.register %g7, #scratch - - - /* - * void _ISR_Handler() - * - * This routine provides the RTEMS interrupt management. - * - * We enter this handler from the 8 instructions in the trap table with - * the following registers assumed to be set as shown: - * - * g4 = tstate (old l0) - * g2 = trap type (vector) (old l3) - * - * NOTE: By an executive defined convention: - * if trap type is between 0 and 511 it is an asynchronous trap - * if trap type is between 512 and 1023 it is an asynchonous trap - */ - - .align 4 -PUBLIC(_ISR_Handler) - SYM(_ISR_Handler): - - /* - * The ISR is called at TL = 1. - * On sun4u we use the alternate globals set. - * - * On entry: - * g4 = tstate (from trap table) - * g2 = trap vector # - * - * In either case, note that trap handlers share a register window with - * the interrupted context, unless we explicitly enter a new window. This - * differs from Sparc v8, in which a dedicated register window is saved - * for trap handling. This means we have to avoid overwriting any registers - * that we don't save. - * - */ - - - /* - * save some or all context on stack - */ - - /* - * Save the state of the interrupted task -- especially the global - * registers -- in the Interrupt Stack Frame. Note that the ISF - * includes a regular minimum stack frame which will be used if - * needed by register window overflow and underflow handlers. - * - * This is slightly wasteful, since the stack already has the window - * overflow space reserved, but there is no obvious way to ensure - * we can store the interrupted state and still handle window - * spill/fill correctly, since there is no room for the ISF. - * - */ - - /* this is for debugging purposes, make sure that TL = 1, otherwise - * things might get dicey */ - rdpr %tl, %g1 - cmp %g1, 1 - be 1f - nop - - 0: ba 0b - nop - - 1: - /* first store the sp of the interrupted task temporarily in g1 */ - mov %sp, %g1 - - sub %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp - ! make space for Stack_Frame||ISF - - /* save tstate, tpc, tnpc, pil */ - stx %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET] - rdpr %pil, %g3 - rdpr %tpc, %g4 - rdpr %tnpc, %g5 - stx %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET] - stx %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET] - stx %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET] - stx %g2, [%sp + STACK_BIAS + ISF_TVEC_OFFSET] - - rd %y, %g4 ! save y - stx %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET] - - ! save interrupted frame's output regs - stx %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET] ! save o0 - stx %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET] ! save o1 - stx %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET] ! save o2 - stx %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET] ! save o3 - stx %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET] ! save o4 - stx %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET] ! save o5 - stx %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET] ! save o6/sp - stx %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET] ! save o7 - - mov %g1, %o5 ! hold the old sp here for now - mov %g2, %o1 ! we'll need trap # later - - /* switch to TL[0] */ - wrpr %g0, 0, %tl - - /* switch to normal globals */ -#if defined (SUN4U) - /* the assignment to pstate below will mask out the AG bit */ -#elif defined (SUN4V) - wrpr %g0, 0, %gl -#endif - /* get pstate to known state */ - wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate - - ! save globals - stx %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET] ! save g1 - stx %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET] ! save g2 - stx %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET] ! save g3 - stx %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET] ! save g4 - stx %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET] ! save g5 - stx %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET] ! save g6 - stx %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET] ! save g7 - - - mov %o1, %g2 ! get the trap # - mov %o5, %g7 ! store the interrupted %sp (preserve) - mov %sp, %o1 ! 2nd arg to ISR Handler = address of ISF - add %o1, STACK_BIAS, %o1 ! need to adjust for stack bias, 2nd arg = ISF - - /* - * Increment ISR nest level and Thread dispatch disable level. - * - * Register usage for this section: (note, these are used later) - * - * g3 = _Thread_Dispatch_disable_level pointer - * g5 = _Thread_Dispatch_disable_level value (uint32_t) - * g6 = _ISR_Nest_level pointer - * g4 = _ISR_Nest_level value (uint32_t) - * o5 = temp - * - * NOTE: It is assumed that g6 - g7 will be preserved until the ISR - * nest and thread dispatch disable levels are unnested. - */ - - setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3 - lduw [%g3], %g5 - setx ISR_NEST_LEVEL, %o5, %g6 - lduw [%g6], %g4 - - add %g5, 1, %g5 - stuw %g5, [%g3] - - add %g4, 1, %g4 - stuw %g4, [%g6] - - /* - * If ISR nest level was zero (now 1), then switch stack. - */ - - subcc %g4, 1, %g4 ! outermost interrupt handler? - bnz dont_switch_stacks ! No, then do not switch stacks - - setx SYM(INTERRUPT_STACK_HIGH), %o5, %g1 - ldx [%g1], %sp - - /* - * Adjust the stack for the stack bias - */ - sub %sp, STACK_BIAS, %sp - - /* - * Make sure we have a place on the stack for the window overflow - * trap handler to write into. At this point it is safe to - * enable traps again. - */ - - sub %sp, SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp - - dont_switch_stacks: - /* - * Check if we have an external interrupt (trap 0x41 - 0x4f). If so, - * set the PIL to mask off interrupts with lower priority. - * - * The original PIL is not modified since it will be restored - * when the interrupt handler returns. - */ - - and %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]? - - subcc %g1, 0x41, %g0 - bl dont_fix_pil - subcc %g1, 0x4f, %g0 - bg dont_fix_pil - nop - wrpr %g0, %g1, %pil - - dont_fix_pil: - /* We need to be careful about enabling traps here. - * - * We already stored off the tstate, tpc, and tnpc, and switched to - * TL = 0, so it should be safe. - */ - - /* zero out g4 so that ofw calls work */ - mov %g0, %g4 - - ! **** ENABLE TRAPS **** - wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ - SPARC_PSTATE_IE_MASK, %pstate - - /* - * Vector to user's handler. - * - * NOTE: TBR may no longer have vector number in it since - * we just enabled traps. It is definitely in g2. - */ - setx SYM(_ISR_Vector_table), %o5, %g1 - and %g2, 0x1FF, %o5 ! remove synchronous trap indicator - sll %o5, 3, %o5 ! o5 = offset into table - ldx [%g1 + %o5], %g1 ! g1 = _ISR_Vector_table[ vector ] - - - ! o1 = 2nd arg = address of the ISF - ! WAS LOADED WHEN ISF WAS SAVED!!! - mov %g2, %o0 ! o0 = 1st arg = vector number - call %g1, 0 - nop ! delay slot - - /* - * Redisable traps so we can finish up the interrupt processing. - * This is a conservative place to do this. - */ - ! **** DISABLE TRAPS **** - wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate - - /* - * We may safely use any of the %o and %g registers, because - * we saved them earlier (and any other interrupt that uses - * them will also save them). Right now, the state of those - * registers are as follows: - * %o registers: unknown (user's handler may have destroyed) - * %g1,g4,g5: scratch - * %g2: unknown: was trap vector - * %g3: uknown: was _Thread_Dispatch_Disable_level pointer - * %g6: _ISR_Nest_level - * %g7: interrupted task's sp - */ - - /* - * Increment ISR nest level and Thread dispatch disable level. - * - * Register usage for this section: (note: as used above) - * - * g3 = _Thread_Dispatch_disable_level pointer - * g5 = _Thread_Dispatch_disable_level value - * g6 = _ISR_Nest_level pointer - * g4 = _ISR_Nest_level value - * o5 = temp - */ - - /* We have to re-load the values from memory, because there are - * not enough registers that we know will be preserved across the - * user's handler. If this is a problem, we can create a register - * window for _ISR_Handler. - */ - - setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3 - lduw [%g3],%g5 - lduw [%g6],%g4 - sub %g5, 1, %g5 - stuw %g5, [%g3] - sub %g4, 1, %g4 - stuw %g4, [%g6] - - orcc %g4, %g0, %g0 ! ISRs still nested? - bnz dont_restore_stack ! Yes then don't restore stack yet - nop - - /* - * This is the outermost interrupt handler. Need to get off the - * CPU Interrupt Stack and back to the tasks stack. - * - * The following subtract should get us back on the interrupted - * tasks stack and add enough room to invoke the dispatcher. - * When we enable traps, we are mostly back in the context - * of the task and subsequent interrupts can operate normally. - * - * Now %sp points to the bottom of the ISF. - * - */ - - sub %g7, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp - - dont_restore_stack: - - /* - * If dispatching is disabled (includes nested interrupt case), - * then do a "simple" exit. - */ - - orcc %g5, %g0, %g0 ! Is dispatching disabled? - bnz simple_return ! Yes, then do a "simple" exit - ! NOTE: Use the delay slot - mov %g0, %g4 ! clear g4 for ofw - - ! Are we dispatching from a previous ISR in the interrupted thread? - setx SYM(_CPU_ISR_Dispatch_disable), %o5, %g5 - lduw [%g5], %o5 - orcc %o5, %g0, %g0 ! Is this thread already doing an ISR? - bnz simple_return ! Yes, then do a "simple" exit - nop - - setx DISPATCH_NEEDED, %o5, %g7 - - - /* - * If a context switch is necessary, then do fudge stack to - * return to the interrupt dispatcher. - */ - - ldub [%g7], %o5 - - orcc %o5, %g0, %g0 ! Is thread switch necessary? - bz simple_return ! no, then do a simple return. otherwise fallthru - nop - - /* - * Invoke interrupt dispatcher. - */ -PUBLIC(_ISR_Dispatch) - SYM(_ISR_Dispatch): - ! Set ISR dispatch nesting prevention flag - mov 1, %o1 - setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o2 - stuw %o1, [%o2] - - - ! **** ENABLE TRAPS **** - wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ - SPARC_PSTATE_IE_MASK, %pstate - isr_dispatch: - call SYM(_Thread_Dispatch), 0 - nop - - /* - * We invoked _Thread_Dispatch in a state similar to the interrupted - * task. In order to safely be able to tinker with the register - * windows and get the task back to its pre-interrupt state, - * we need to disable interrupts. - */ - mov 2, %g4 ! syscall (disable interrupts) - ta 0 ! syscall (disable interrupts) - mov 0, %g4 - - /* - * While we had ISR dispatching disabled in this thread, - * did we miss anything. If so, then we need to do another - * _Thread_Dispatch before leaving this ISR Dispatch context. - */ - - setx DISPATCH_NEEDED, %o5, %o1 - ldub [%o1], %o2 - - orcc %o2, %g0, %g0 ! Is thread switch necessary? - bz allow_nest_again ! No, then clear out and return - nop - - ! Yes, then invoke the dispatcher -dispatchAgain: - mov 3, %g4 ! syscall (enable interrupts) - ta 0 ! syscall (enable interrupts) - ba isr_dispatch - mov 0, %g4 - - allow_nest_again: - - ! Zero out ISR stack nesting prevention flag - setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o1 - stuw %g0,[%o1] - - /* - * The CWP in place at this point may be different from - * that which was in effect at the beginning of the ISR if we - * have been context switched between the beginning of this invocation - * of _ISR_Handler and this point. Thus the CWP and WIM should - * not be changed back to their values at ISR entry time. Any - * changes to the PSR must preserve the CWP. - */ - - simple_return: - flushw ! get register windows to a 'clean' state - - ! **** DISABLE TRAPS **** - wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate - - ldx [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1 ! restore y - wr %o1, 0, %y - - ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 - -! see if cwp is proper (tstate.cwp == cwp) - and %g1, 0x1F, %g6 - rdpr %cwp, %g7 - cmp %g6, %g7 - bz good_window - nop - - /* - * Fix the CWP. Need the cwp to be the proper cwp that - * gets restored when returning from the trap via retry/done. Do - * this before reloading the task's output regs. Basically fake a - * window spill/fill. - * - * Is this necessary on sun4v? Why not just re-write - * tstate.cwp to be equal to the current cwp? - */ - mov %sp, %g1 - stx %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET] - stx %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET] - stx %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET] - stx %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET] - stx %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET] - stx %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET] - stx %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET] - stx %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET] - stx %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET] - stx %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET] - stx %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET] - stx %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET] - stx %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET] - stx %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET] - stx %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET] - stx %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET] - wrpr %g0, %g6, %cwp - mov %g1, %sp - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 - ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7 - - - good_window: - - - /* - * Restore tasks global and out registers - */ - - ldx [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1 ! restore g1 - ldx [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2 ! restore g2 - ldx [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3 ! restore g3 - ldx [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4 ! restore g4 - ldx [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5 ! restore g5 - ldx [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6 ! restore g6 - ldx [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7 ! restore g7 - - ! Assume the interrupted context is in TL 0 with GL 0 / normal globals. - ! When tstate is restored at done/retry, the interrupted context is restored. - ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC - wrpr %g0, 1, %tl - - ! return to GL=1 or AG -#if defined(SUN4U) - rdpr %pstate, %o1 - or %o1, SPARC_PSTATE_AG_MASK, %o1 - wrpr %o1, %g0, %pstate ! go to AG. -#elif defined(SUN4V) - wrpr %g0, 1, %gl -#endif - -! now we can use global registers (at gl=1 or AG) - ldx [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3 - ldx [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4 - ldx [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5 - ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 - ldx [%sp + STACK_BIAS + ISF_TVEC_OFFSET], %g2 - wrpr %g0, %g3, %pil - wrpr %g0, %g4, %tpc - wrpr %g0, %g5, %tnpc - - wrpr %g0, %g1, %tstate - - ldx [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0 ! restore o0 - ldx [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1 ! restore o1 - ldx [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2 ! restore o2 - ldx [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3 ! restore o3 - ldx [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4 ! restore o4 - ldx [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5 ! restore o5 - ! sp is restored later - ldx [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7 ! restore o7 - - ldx [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp - - /* - * Determine whether to re-execute the trapping instruction - * (asynchronous trap) or to skip the trapping instruction - * (synchronous trap). - */ - - andcc %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 - ! Is this a synchronous trap? - be not_synch ! No, then skip trapping instruction - mov 0, %g4 - retry ! re-execute trapping instruction - not_synch: - done ! skip trapping instruction - -/* end of file */ diff --git a/c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.S b/c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.S deleted file mode 100644 index ffd6e8538d..0000000000 --- a/c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.S +++ /dev/null @@ -1,126 +0,0 @@ -/* - * systrap.S - * - * This file contains emulated system calls using software trap 0. - * The following calls are supported: - * - * + SYS_exit (halt) - * + SYS_irqdis (disable interrupts) - * + SYS_irqset (set interrupt level) - * - * COPYRIGHT (c) 2010. Gedare Bloom. - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ - -#include -#include "sparc64-syscall.h" - - -.seg "text" -/* - * system call - * - * On entry: - * g4[AG | GL=1] = tstate (from trap table) - * g2[AG | GL=1] = trap vector # (256) - * g3[AG | GL=1] = address of SYM(syscall) - * g4[AG | GL-1] = system call id - * if arch = sun4v: - * We need to back to GL-1 to read the system call id. - * on sun4u: - * We need to go back to the normal globals to read the system call id. - * - * First thing is to return to the previous set of globals, so - * that the system call id can be read. The syscall code needs - * to re-read tstate. - * - * syscall should only ever be entered by ta 0 being called explicitly - * by a function that knows what is happening. This means the syscall - * code can safely use any scratch registers and the %o registers. - */ - - -PUBLIC(syscall) - - - SYM(syscall): - mov %g0, %g4 ! clear %g4 at this GL -#if defined (SUN4U) - rdpr %pstate, %g1 - andn %g1, SPARC_PSTATE_AG_MASK, %g1 - wrpr %g1, %g0, %pstate ! go to regular globals -#elif defined (SUN4V) - rdpr %gl, %g1 - dec %g1 - wrpr %g0, %g1, %gl ! go back to GL = GL - 1 -#endif - - subcc %g4, 2, %g0 - bne 3f - rdpr %tstate, %g5 ! re-read tstate, use delay slot - - ! syscall 2, disable interrupts - rdpr %pil, %g1 - and %g5, SPARC_TSTATE_IE_MASK, %o0 - or %o0, %g1, %o0 ! return TSTATE_IE | PIL - wrpr %g0, 0xf, %pil ! set PIL to 15 - andn %g5, SPARC_TSTATE_IE_MASK, %g1 - wrpr %g0, %g1, %tstate ! disable interrupts in trap state - ba,a 9f - - 3: ! syscall 3, enable interrupts - subcc %g4, 3, %g0 - bne 1f - and %o0, 0xf, %g1 - wrpr %g0, %g1, %pil ! restore PIL -! and %o0, SPARC_TSTATE_IE_MASK, %g1 -! or %g5, %g1, %g1 ! restore saved IE - or %g5, SPARC_TSTATE_IE_MASK, %g1 ! restore IE (safe?) - wrpr %g0, %g1, %tstate - ba,a 9f - - 1: - ba,a 1b ! spin. taking a trap here -> htrap - - 9: ! leave - mov 0, %g4 ! clear %g4 - DONE - -PUBLIC(sparc_disable_interrupts) - - SYM(sparc_disable_interrupts): - mov SYS_irqdis, %g4 - ta 0 -#if 0 - rdpr %pstate, %g5 - rdpr %pil, %g1 - and %g5, SPARC_PSTATE_IE_MASK, %o0 - or %o0, %g1, %o0 ! return PSTATE_IE | PIL - wrpr %g0, 0xf, %pil ! set PIL to 15 - andn %g5, SPARC_PSTATE_IE_MASK, %g1 - wrpr %g0, %g1, %pstate ! disable interrupts -#endif - retl - nop - -PUBLIC(sparc_enable_interrupts) - - SYM(sparc_enable_interrupts): - mov SYS_irqen, %g4 - ta 0 -#if 0 - rdpr %pstate, %g5 - and %o0, 0xf, %g1 - wrpr %g0, %g1, %pil ! restore PIL - and %o0, SPARC_PSTATE_IE_MASK, %g1 - or %g5, %g1, %g1 ! restore saved IE -! or %g5, SPARC_PSTATE_IE_MASK, %g1 ! set IE regardless of old (safe?) - wrpr %g0, %g1, %pstate -#endif - retl - nop - - /* end of file */ diff --git a/c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.h b/c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.h deleted file mode 100644 index 9af3560267..0000000000 --- a/c/src/lib/libcpu/sparc64/shared/syscall/sparc64-syscall.h +++ /dev/null @@ -1,3 +0,0 @@ -#define SYS_exit 1 -#define SYS_irqdis 2 -#define SYS_irqen 3 diff --git a/cpukit/score/cpu/sparc64/Makefile.am b/cpukit/score/cpu/sparc64/Makefile.am index 2afd3edead..6cd23f0c95 100644 --- a/cpukit/score/cpu/sparc64/Makefile.am +++ b/cpukit/score/cpu/sparc64/Makefile.am @@ -1,13 +1,13 @@ include $(top_srcdir)/automake/compile.am -#include_rtems_sparc64dir = $(includedir)/rtems/sparc64 -#include_rtems_sparc64_HEADERS = - noinst_LIBRARIES = libscorecpu.a -libscorecpu_a_SOURCES = context.S cpu.c +libscorecpu_a_SOURCES = +libscorecpu_a_SOURCES += context.S +libscorecpu_a_SOURCES += cpu.c +libscorecpu_a_SOURCES += interrupt.S libscorecpu_a_SOURCES += ../no_cpu/cpucounterread.c libscorecpu_a_SOURCES += sparc64-exception-frame-print.c -libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS) +libscorecpu_a_SOURCES += sparc64-syscall.S include $(top_srcdir)/automake/local.am include $(srcdir)/headers.am diff --git a/cpukit/score/cpu/sparc64/cpu.c b/cpukit/score/cpu/sparc64/cpu.c index 93a4573c72..7eb80988b1 100644 --- a/cpukit/score/cpu/sparc64/cpu.c +++ b/cpukit/score/cpu/sparc64/cpu.c @@ -110,3 +110,232 @@ void _CPU_Context_Initialize( the_context->g7 = (uintptr_t) tcb; } } + +/* + * This initializes the set of opcodes placed in each trap + * table entry. The routine which installs a handler is responsible + * for filling in the fields for the _handler address and the _vector + * trap type. + * + * The constants following this structure are masks for the fields which + * must be filled in when the handler is installed. + */ + +/* 64-bit registers complicate this. Also, in sparc v9, + * each trap level gets its own set of global registers, but + * does not get its own dedicated register window. so we avoid + * using the local registers in the trap handler. + */ +const CPU_Trap_table_entry _CPU_Trap_slot_template = { + 0x89508000, /* rdpr %tstate, %g4 */ + 0x05000000, /* sethi %hh(_handler), %g2 */ + 0x8410a000, /* or %g2, %hm(_handler), %g2 */ + 0x8528b020, /* sllx %g2, 32, %g2 */ + 0x07000000, /* sethi %hi(_handler), %g3 */ + 0x8610c002, /* or %g3, %g2, %g3 */ + 0x81c0e000, /* jmp %g3 + %lo(_handler) */ + 0x84102000 /* mov _vector, %g2 */ +}; + + +/* + * _CPU_ISR_Get_level + * + * Input Parameters: NONE + * + * Output Parameters: + * returns the current interrupt level (PIL field of the PSR) + */ +uint32_t _CPU_ISR_Get_level( void ) +{ + uint32_t level; + + sparc64_get_interrupt_level( level ); + + return level; +} + +/* + * _CPU_ISR_install_raw_handler + * + * This routine installs the specified handler as a "raw" non-executive + * supported trap handler (a.k.a. interrupt service routine). + * + * Input Parameters: + * vector - trap table entry number plus synchronous + * vs. asynchronous information + * new_handler - address of the handler to be installed + * old_handler - pointer to an address of the handler previously installed + * + * Output Parameters: NONE + * *new_handler - address of the handler previously installed + * + * NOTE: + * + * On the SPARC v9, there are really only 512 vectors. However, the executive + * has no easy, fast, reliable way to determine which traps are synchronous + * and which are asynchronous. By default, traps return to the + * instruction which caused the interrupt. So if you install a software + * trap handler as an executive interrupt handler (which is desirable since + * RTEMS takes care of window and register issues), then the executive needs + * to know that the return address is to the trap rather than the instruction + * following the trap. + * + * So vectors 0 through 511 are treated as regular asynchronous traps which + * provide the "correct" return address. Vectors 512 through 1023 are assumed + * by the executive to be synchronous and to require that the return be to the + * trapping instruction. + * + * If you use this mechanism to install a trap handler which must reexecute + * the instruction which caused the trap, then it should be installed as + * a synchronous trap. This will avoid the executive changing the return + * address. + */ +void _CPU_ISR_install_raw_handler( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + uint32_t real_vector; + CPU_Trap_table_entry *tba; + CPU_Trap_table_entry *slot; + uint64_t u64_tba; + uint64_t u64_handler; + + /* + * Get the "real" trap number for this vector ignoring the synchronous + * versus asynchronous indicator included with our vector numbers. + */ + + real_vector = SPARC_REAL_TRAP_NUMBER( vector ); + + /* + * Get the current base address of the trap table and calculate a pointer + * to the slot we are interested in. + */ + + sparc64_get_tba( u64_tba ); + +/* u32_tbr &= 0xfffff000; */ + u64_tba &= 0xffffffffffff8000; /* keep only trap base address */ + + tba = (CPU_Trap_table_entry *) u64_tba; + + /* use array indexing to fill in lower bits -- require + * CPU_Trap_table_entry to be full-sized. */ + slot = &tba[ real_vector ]; + + /* + * Get the address of the old_handler from the trap table. + * + * NOTE: The old_handler returned will be bogus if it does not follow + * the RTEMS model. + */ + + /* shift amount to shift of hi bits (31:10) */ +#define HI_BITS_SHIFT 10 + + /* shift amount of hm bits (41:32) */ +#define HM_BITS_SHIFT 32 + + /* shift amount of hh bits (63:42) */ +#define HH_BITS_SHIFT 42 + + /* We're only interested in bits 0-9 of the immediate field*/ +#define IMM_MASK 0x000003FF + + if ( slot->rdpr_tstate_g4 == _CPU_Trap_slot_template.rdpr_tstate_g4 ) { + u64_handler = + (((uint64_t)((slot->sethi_of_hh_handler_to_g2 << HI_BITS_SHIFT) | + (slot->or_g2_hm_handler_to_g2 & IMM_MASK))) << HM_BITS_SHIFT) | + ((slot->sethi_of_handler_to_g3 << HI_BITS_SHIFT) | + (slot->jmp_to_low_of_handler_plus_g3 & IMM_MASK)); + *old_handler = (proc_ptr) u64_handler; + } else + *old_handler = 0; + + /* + * Copy the template to the slot and then fix it. + */ + + *slot = _CPU_Trap_slot_template; + + u64_handler = (uint64_t) new_handler; + + /* mask for extracting %hh */ +#define HH_BITS_MASK 0xFFFFFC0000000000 + + /* mask for extracting %hm */ +#define HM_BITS_MASK 0x000003FF00000000 + + /* mask for extracting %hi */ +#define HI_BITS_MASK 0x00000000FFFFFC00 + + /* mask for extracting %lo */ +#define LO_BITS_MASK 0x00000000000003FF + + + slot->mov_vector_g2 |= vector; + slot->sethi_of_hh_handler_to_g2 |= + (u64_handler & HH_BITS_MASK) >> HH_BITS_SHIFT; + slot->or_g2_hm_handler_to_g2 |= + (u64_handler & HM_BITS_MASK) >> HM_BITS_SHIFT; + slot->sethi_of_handler_to_g3 |= + (u64_handler & HI_BITS_MASK) >> HI_BITS_SHIFT; + slot->jmp_to_low_of_handler_plus_g3 |= (u64_handler & LO_BITS_MASK); + + /* need to flush icache after this !!! */ + + /* need to flush icache in case old trap handler is in cache */ + rtems_cache_invalidate_entire_instruction(); + +} + +/* + * _CPU_ISR_install_vector + * + * This kernel routine installs the RTEMS handler for the + * specified vector. + * + * Input parameters: + * vector - interrupt vector number + * new_handler - replacement ISR for this vector number + * old_handler - pointer to former ISR for this vector number + * + * Output parameters: + * *old_handler - former ISR for this vector number + */ +void _CPU_ISR_install_vector( + uint64_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +) +{ + uint64_t real_vector; + proc_ptr ignored; + + /* + * Get the "real" trap number for this vector ignoring the synchronous + * versus asynchronous indicator included with our vector numbers. + */ + real_vector = SPARC_REAL_TRAP_NUMBER( vector ); + /* + * Return the previous ISR handler. + */ + + *old_handler = _ISR_Vector_table[ vector ]; + + /* + * Install the wrapper so this ISR can be invoked properly. + */ + + _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); + + /* + * We put the actual user ISR address in '_ISR_vector_table'. This will + * be used by the _ISR_Handler so the user gets control. + */ + + _ISR_Vector_table[ real_vector ] = new_handler; +} diff --git a/cpukit/score/cpu/sparc64/interrupt.S b/cpukit/score/cpu/sparc64/interrupt.S new file mode 100644 index 0000000000..6f8eb373f0 --- /dev/null +++ b/cpukit/score/cpu/sparc64/interrupt.S @@ -0,0 +1,543 @@ +/* cpu_asm.s + * + * This file contains the basic algorithms for all assembly code used + * in an specific CPU port of RTEMS. These algorithms must be implemented + * in assembly language. + * + * COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR). + * COPYRIGHT (c) 2010. Gedare Bloom. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#include +#include + + +/* + * The assembler needs to be told that we know what to do with + * the global registers. + */ +.register %g2, #scratch +.register %g3, #scratch +.register %g6, #scratch +.register %g7, #scratch + + + /* + * void _ISR_Handler() + * + * This routine provides the RTEMS interrupt management. + * + * We enter this handler from the 8 instructions in the trap table with + * the following registers assumed to be set as shown: + * + * g4 = tstate (old l0) + * g2 = trap type (vector) (old l3) + * + * NOTE: By an executive defined convention: + * if trap type is between 0 and 511 it is an asynchronous trap + * if trap type is between 512 and 1023 it is an asynchonous trap + */ + + .align 4 +PUBLIC(_ISR_Handler) + SYM(_ISR_Handler): + + /* + * The ISR is called at TL = 1. + * On sun4u we use the alternate globals set. + * + * On entry: + * g4 = tstate (from trap table) + * g2 = trap vector # + * + * In either case, note that trap handlers share a register window with + * the interrupted context, unless we explicitly enter a new window. This + * differs from Sparc v8, in which a dedicated register window is saved + * for trap handling. This means we have to avoid overwriting any registers + * that we don't save. + * + */ + + + /* + * save some or all context on stack + */ + + /* + * Save the state of the interrupted task -- especially the global + * registers -- in the Interrupt Stack Frame. Note that the ISF + * includes a regular minimum stack frame which will be used if + * needed by register window overflow and underflow handlers. + * + * This is slightly wasteful, since the stack already has the window + * overflow space reserved, but there is no obvious way to ensure + * we can store the interrupted state and still handle window + * spill/fill correctly, since there is no room for the ISF. + * + */ + + /* this is for debugging purposes, make sure that TL = 1, otherwise + * things might get dicey */ + rdpr %tl, %g1 + cmp %g1, 1 + be 1f + nop + + 0: ba 0b + nop + + 1: + /* first store the sp of the interrupted task temporarily in g1 */ + mov %sp, %g1 + + sub %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp + ! make space for Stack_Frame||ISF + + /* save tstate, tpc, tnpc, pil */ + stx %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET] + rdpr %pil, %g3 + rdpr %tpc, %g4 + rdpr %tnpc, %g5 + stx %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET] + stx %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET] + stx %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET] + stx %g2, [%sp + STACK_BIAS + ISF_TVEC_OFFSET] + + rd %y, %g4 ! save y + stx %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET] + + ! save interrupted frame's output regs + stx %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET] ! save o0 + stx %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET] ! save o1 + stx %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET] ! save o2 + stx %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET] ! save o3 + stx %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET] ! save o4 + stx %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET] ! save o5 + stx %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET] ! save o6/sp + stx %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET] ! save o7 + + mov %g1, %o5 ! hold the old sp here for now + mov %g2, %o1 ! we'll need trap # later + + /* switch to TL[0] */ + wrpr %g0, 0, %tl + + /* switch to normal globals */ +#if defined (SUN4U) + /* the assignment to pstate below will mask out the AG bit */ +#elif defined (SUN4V) + wrpr %g0, 0, %gl +#endif + /* get pstate to known state */ + wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate + + ! save globals + stx %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET] ! save g1 + stx %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET] ! save g2 + stx %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET] ! save g3 + stx %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET] ! save g4 + stx %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET] ! save g5 + stx %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET] ! save g6 + stx %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET] ! save g7 + + + mov %o1, %g2 ! get the trap # + mov %o5, %g7 ! store the interrupted %sp (preserve) + mov %sp, %o1 ! 2nd arg to ISR Handler = address of ISF + add %o1, STACK_BIAS, %o1 ! need to adjust for stack bias, 2nd arg = ISF + + /* + * Increment ISR nest level and Thread dispatch disable level. + * + * Register usage for this section: (note, these are used later) + * + * g3 = _Thread_Dispatch_disable_level pointer + * g5 = _Thread_Dispatch_disable_level value (uint32_t) + * g6 = _ISR_Nest_level pointer + * g4 = _ISR_Nest_level value (uint32_t) + * o5 = temp + * + * NOTE: It is assumed that g6 - g7 will be preserved until the ISR + * nest and thread dispatch disable levels are unnested. + */ + + setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3 + lduw [%g3], %g5 + setx ISR_NEST_LEVEL, %o5, %g6 + lduw [%g6], %g4 + + add %g5, 1, %g5 + stuw %g5, [%g3] + + add %g4, 1, %g4 + stuw %g4, [%g6] + + /* + * If ISR nest level was zero (now 1), then switch stack. + */ + + subcc %g4, 1, %g4 ! outermost interrupt handler? + bnz dont_switch_stacks ! No, then do not switch stacks + + setx SYM(INTERRUPT_STACK_HIGH), %o5, %g1 + ldx [%g1], %sp + + /* + * Adjust the stack for the stack bias + */ + sub %sp, STACK_BIAS, %sp + + /* + * Make sure we have a place on the stack for the window overflow + * trap handler to write into. At this point it is safe to + * enable traps again. + */ + + sub %sp, SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp + + dont_switch_stacks: + /* + * Check if we have an external interrupt (trap 0x41 - 0x4f). If so, + * set the PIL to mask off interrupts with lower priority. + * + * The original PIL is not modified since it will be restored + * when the interrupt handler returns. + */ + + and %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]? + + subcc %g1, 0x41, %g0 + bl dont_fix_pil + subcc %g1, 0x4f, %g0 + bg dont_fix_pil + nop + wrpr %g0, %g1, %pil + + dont_fix_pil: + /* We need to be careful about enabling traps here. + * + * We already stored off the tstate, tpc, and tnpc, and switched to + * TL = 0, so it should be safe. + */ + + /* zero out g4 so that ofw calls work */ + mov %g0, %g4 + + ! **** ENABLE TRAPS **** + wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ + SPARC_PSTATE_IE_MASK, %pstate + + /* + * Vector to user's handler. + * + * NOTE: TBR may no longer have vector number in it since + * we just enabled traps. It is definitely in g2. + */ + setx SYM(_ISR_Vector_table), %o5, %g1 + and %g2, 0x1FF, %o5 ! remove synchronous trap indicator + sll %o5, 3, %o5 ! o5 = offset into table + ldx [%g1 + %o5], %g1 ! g1 = _ISR_Vector_table[ vector ] + + + ! o1 = 2nd arg = address of the ISF + ! WAS LOADED WHEN ISF WAS SAVED!!! + mov %g2, %o0 ! o0 = 1st arg = vector number + call %g1, 0 + nop ! delay slot + + /* + * Redisable traps so we can finish up the interrupt processing. + * This is a conservative place to do this. + */ + ! **** DISABLE TRAPS **** + wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate + + /* + * We may safely use any of the %o and %g registers, because + * we saved them earlier (and any other interrupt that uses + * them will also save them). Right now, the state of those + * registers are as follows: + * %o registers: unknown (user's handler may have destroyed) + * %g1,g4,g5: scratch + * %g2: unknown: was trap vector + * %g3: uknown: was _Thread_Dispatch_Disable_level pointer + * %g6: _ISR_Nest_level + * %g7: interrupted task's sp + */ + + /* + * Increment ISR nest level and Thread dispatch disable level. + * + * Register usage for this section: (note: as used above) + * + * g3 = _Thread_Dispatch_disable_level pointer + * g5 = _Thread_Dispatch_disable_level value + * g6 = _ISR_Nest_level pointer + * g4 = _ISR_Nest_level value + * o5 = temp + */ + + /* We have to re-load the values from memory, because there are + * not enough registers that we know will be preserved across the + * user's handler. If this is a problem, we can create a register + * window for _ISR_Handler. + */ + + setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3 + lduw [%g3],%g5 + lduw [%g6],%g4 + sub %g5, 1, %g5 + stuw %g5, [%g3] + sub %g4, 1, %g4 + stuw %g4, [%g6] + + orcc %g4, %g0, %g0 ! ISRs still nested? + bnz dont_restore_stack ! Yes then don't restore stack yet + nop + + /* + * This is the outermost interrupt handler. Need to get off the + * CPU Interrupt Stack and back to the tasks stack. + * + * The following subtract should get us back on the interrupted + * tasks stack and add enough room to invoke the dispatcher. + * When we enable traps, we are mostly back in the context + * of the task and subsequent interrupts can operate normally. + * + * Now %sp points to the bottom of the ISF. + * + */ + + sub %g7, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp + + dont_restore_stack: + + /* + * If dispatching is disabled (includes nested interrupt case), + * then do a "simple" exit. + */ + + orcc %g5, %g0, %g0 ! Is dispatching disabled? + bnz simple_return ! Yes, then do a "simple" exit + ! NOTE: Use the delay slot + mov %g0, %g4 ! clear g4 for ofw + + ! Are we dispatching from a previous ISR in the interrupted thread? + setx SYM(_CPU_ISR_Dispatch_disable), %o5, %g5 + lduw [%g5], %o5 + orcc %o5, %g0, %g0 ! Is this thread already doing an ISR? + bnz simple_return ! Yes, then do a "simple" exit + nop + + setx DISPATCH_NEEDED, %o5, %g7 + + + /* + * If a context switch is necessary, then do fudge stack to + * return to the interrupt dispatcher. + */ + + ldub [%g7], %o5 + + orcc %o5, %g0, %g0 ! Is thread switch necessary? + bz simple_return ! no, then do a simple return. otherwise fallthru + nop + + /* + * Invoke interrupt dispatcher. + */ +PUBLIC(_ISR_Dispatch) + SYM(_ISR_Dispatch): + ! Set ISR dispatch nesting prevention flag + mov 1, %o1 + setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o2 + stuw %o1, [%o2] + + + ! **** ENABLE TRAPS **** + wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ + SPARC_PSTATE_IE_MASK, %pstate + isr_dispatch: + call SYM(_Thread_Dispatch), 0 + nop + + /* + * We invoked _Thread_Dispatch in a state similar to the interrupted + * task. In order to safely be able to tinker with the register + * windows and get the task back to its pre-interrupt state, + * we need to disable interrupts. + */ + mov 2, %g4 ! syscall (disable interrupts) + ta 0 ! syscall (disable interrupts) + mov 0, %g4 + + /* + * While we had ISR dispatching disabled in this thread, + * did we miss anything. If so, then we need to do another + * _Thread_Dispatch before leaving this ISR Dispatch context. + */ + + setx DISPATCH_NEEDED, %o5, %o1 + ldub [%o1], %o2 + + orcc %o2, %g0, %g0 ! Is thread switch necessary? + bz allow_nest_again ! No, then clear out and return + nop + + ! Yes, then invoke the dispatcher +dispatchAgain: + mov 3, %g4 ! syscall (enable interrupts) + ta 0 ! syscall (enable interrupts) + ba isr_dispatch + mov 0, %g4 + + allow_nest_again: + + ! Zero out ISR stack nesting prevention flag + setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o1 + stuw %g0,[%o1] + + /* + * The CWP in place at this point may be different from + * that which was in effect at the beginning of the ISR if we + * have been context switched between the beginning of this invocation + * of _ISR_Handler and this point. Thus the CWP and WIM should + * not be changed back to their values at ISR entry time. Any + * changes to the PSR must preserve the CWP. + */ + + simple_return: + flushw ! get register windows to a 'clean' state + + ! **** DISABLE TRAPS **** + wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate + + ldx [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1 ! restore y + wr %o1, 0, %y + + ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 + +! see if cwp is proper (tstate.cwp == cwp) + and %g1, 0x1F, %g6 + rdpr %cwp, %g7 + cmp %g6, %g7 + bz good_window + nop + + /* + * Fix the CWP. Need the cwp to be the proper cwp that + * gets restored when returning from the trap via retry/done. Do + * this before reloading the task's output regs. Basically fake a + * window spill/fill. + * + * Is this necessary on sun4v? Why not just re-write + * tstate.cwp to be equal to the current cwp? + */ + mov %sp, %g1 + stx %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET] + stx %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET] + stx %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET] + stx %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET] + stx %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET] + stx %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET] + stx %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET] + stx %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET] + stx %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET] + stx %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET] + stx %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET] + stx %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET] + stx %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET] + stx %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET] + stx %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET] + stx %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET] + wrpr %g0, %g6, %cwp + mov %g1, %sp + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 + ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7 + + + good_window: + + + /* + * Restore tasks global and out registers + */ + + ldx [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1 ! restore g1 + ldx [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2 ! restore g2 + ldx [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3 ! restore g3 + ldx [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4 ! restore g4 + ldx [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5 ! restore g5 + ldx [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6 ! restore g6 + ldx [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7 ! restore g7 + + ! Assume the interrupted context is in TL 0 with GL 0 / normal globals. + ! When tstate is restored at done/retry, the interrupted context is restored. + ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC + wrpr %g0, 1, %tl + + ! return to GL=1 or AG +#if defined(SUN4U) + rdpr %pstate, %o1 + or %o1, SPARC_PSTATE_AG_MASK, %o1 + wrpr %o1, %g0, %pstate ! go to AG. +#elif defined(SUN4V) + wrpr %g0, 1, %gl +#endif + +! now we can use global registers (at gl=1 or AG) + ldx [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3 + ldx [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4 + ldx [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5 + ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 + ldx [%sp + STACK_BIAS + ISF_TVEC_OFFSET], %g2 + wrpr %g0, %g3, %pil + wrpr %g0, %g4, %tpc + wrpr %g0, %g5, %tnpc + + wrpr %g0, %g1, %tstate + + ldx [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0 ! restore o0 + ldx [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1 ! restore o1 + ldx [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2 ! restore o2 + ldx [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3 ! restore o3 + ldx [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4 ! restore o4 + ldx [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5 ! restore o5 + ! sp is restored later + ldx [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7 ! restore o7 + + ldx [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp + + /* + * Determine whether to re-execute the trapping instruction + * (asynchronous trap) or to skip the trapping instruction + * (synchronous trap). + */ + + andcc %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 + ! Is this a synchronous trap? + be not_synch ! No, then skip trapping instruction + mov 0, %g4 + retry ! re-execute trapping instruction + not_synch: + done ! skip trapping instruction + +/* end of file */ diff --git a/cpukit/score/cpu/sparc64/sparc64-syscall.S b/cpukit/score/cpu/sparc64/sparc64-syscall.S new file mode 100644 index 0000000000..ffd6e8538d --- /dev/null +++ b/cpukit/score/cpu/sparc64/sparc64-syscall.S @@ -0,0 +1,126 @@ +/* + * systrap.S + * + * This file contains emulated system calls using software trap 0. + * The following calls are supported: + * + * + SYS_exit (halt) + * + SYS_irqdis (disable interrupts) + * + SYS_irqset (set interrupt level) + * + * COPYRIGHT (c) 2010. Gedare Bloom. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#include +#include "sparc64-syscall.h" + + +.seg "text" +/* + * system call + * + * On entry: + * g4[AG | GL=1] = tstate (from trap table) + * g2[AG | GL=1] = trap vector # (256) + * g3[AG | GL=1] = address of SYM(syscall) + * g4[AG | GL-1] = system call id + * if arch = sun4v: + * We need to back to GL-1 to read the system call id. + * on sun4u: + * We need to go back to the normal globals to read the system call id. + * + * First thing is to return to the previous set of globals, so + * that the system call id can be read. The syscall code needs + * to re-read tstate. + * + * syscall should only ever be entered by ta 0 being called explicitly + * by a function that knows what is happening. This means the syscall + * code can safely use any scratch registers and the %o registers. + */ + + +PUBLIC(syscall) + + + SYM(syscall): + mov %g0, %g4 ! clear %g4 at this GL +#if defined (SUN4U) + rdpr %pstate, %g1 + andn %g1, SPARC_PSTATE_AG_MASK, %g1 + wrpr %g1, %g0, %pstate ! go to regular globals +#elif defined (SUN4V) + rdpr %gl, %g1 + dec %g1 + wrpr %g0, %g1, %gl ! go back to GL = GL - 1 +#endif + + subcc %g4, 2, %g0 + bne 3f + rdpr %tstate, %g5 ! re-read tstate, use delay slot + + ! syscall 2, disable interrupts + rdpr %pil, %g1 + and %g5, SPARC_TSTATE_IE_MASK, %o0 + or %o0, %g1, %o0 ! return TSTATE_IE | PIL + wrpr %g0, 0xf, %pil ! set PIL to 15 + andn %g5, SPARC_TSTATE_IE_MASK, %g1 + wrpr %g0, %g1, %tstate ! disable interrupts in trap state + ba,a 9f + + 3: ! syscall 3, enable interrupts + subcc %g4, 3, %g0 + bne 1f + and %o0, 0xf, %g1 + wrpr %g0, %g1, %pil ! restore PIL +! and %o0, SPARC_TSTATE_IE_MASK, %g1 +! or %g5, %g1, %g1 ! restore saved IE + or %g5, SPARC_TSTATE_IE_MASK, %g1 ! restore IE (safe?) + wrpr %g0, %g1, %tstate + ba,a 9f + + 1: + ba,a 1b ! spin. taking a trap here -> htrap + + 9: ! leave + mov 0, %g4 ! clear %g4 + DONE + +PUBLIC(sparc_disable_interrupts) + + SYM(sparc_disable_interrupts): + mov SYS_irqdis, %g4 + ta 0 +#if 0 + rdpr %pstate, %g5 + rdpr %pil, %g1 + and %g5, SPARC_PSTATE_IE_MASK, %o0 + or %o0, %g1, %o0 ! return PSTATE_IE | PIL + wrpr %g0, 0xf, %pil ! set PIL to 15 + andn %g5, SPARC_PSTATE_IE_MASK, %g1 + wrpr %g0, %g1, %pstate ! disable interrupts +#endif + retl + nop + +PUBLIC(sparc_enable_interrupts) + + SYM(sparc_enable_interrupts): + mov SYS_irqen, %g4 + ta 0 +#if 0 + rdpr %pstate, %g5 + and %o0, 0xf, %g1 + wrpr %g0, %g1, %pil ! restore PIL + and %o0, SPARC_PSTATE_IE_MASK, %g1 + or %g5, %g1, %g1 ! restore saved IE +! or %g5, SPARC_PSTATE_IE_MASK, %g1 ! set IE regardless of old (safe?) + wrpr %g0, %g1, %pstate +#endif + retl + nop + + /* end of file */ diff --git a/cpukit/score/cpu/sparc64/sparc64-syscall.h b/cpukit/score/cpu/sparc64/sparc64-syscall.h new file mode 100644 index 0000000000..9af3560267 --- /dev/null +++ b/cpukit/score/cpu/sparc64/sparc64-syscall.h @@ -0,0 +1,3 @@ +#define SYS_exit 1 +#define SYS_irqdis 2 +#define SYS_irqen 3 -- cgit v1.2.3