From 632e4306dcb9bada638e35e7cd5f1de0fcf66902 Mon Sep 17 00:00:00 2001 From: Thomas Doerfler Date: Fri, 17 Jul 2009 13:50:31 +0000 Subject: exception handler maintenance --- cpukit/score/cpu/arm/ChangeLog | 16 + cpukit/score/cpu/arm/Makefile.am | 7 +- cpukit/score/cpu/arm/cpu.c | 283 +++------ cpukit/score/cpu/arm/cpu_asm.S | 130 ----- cpukit/score/cpu/arm/rtems/score/cpu.h | 948 +++++++++---------------------- cpukit/score/cpu/arm/rtems/score/types.h | 3 - 6 files changed, 350 insertions(+), 1037 deletions(-) (limited to 'cpukit') diff --git a/cpukit/score/cpu/arm/ChangeLog b/cpukit/score/cpu/arm/ChangeLog index 6bf9b93f24..04f9784fef 100644 --- a/cpukit/score/cpu/arm/ChangeLog +++ b/cpukit/score/cpu/arm/ChangeLog @@ -1,3 +1,19 @@ +2009-07-15 Sebastian Huber + + * arm_exc_handler_high.c, arm_exc_handler_low.S, arm_exc_interrupt.S: + New files. + * Makefile.am: Update. + * rtems/score/cpu.h: Removed all generic comments. Changed inline + assembler of interrupt support functions. Removed operating system + support for fast interrupts (FIQ). Overall cleanup. + * cpu.c: Changed type of arm_cpu_mode to uint32_t to match the type in + _CPU_Context_Initialize(). Moved exception handler code into + 'arm_exc_handler_high.c'. _CPU_ISR_install_vector() writes now only + if necessary. + * cpu_asm.S: Moved exception handler code into 'arm_exc_handler_low.S'. + * rtems/score/types.h: Removed superfluous defines. + * ChangeLog, thumb_isr.c: Removed files. + 2009-05-05 Joel Sherrill * rtems/score/cpu.h: Remove warnings. diff --git a/cpukit/score/cpu/arm/Makefile.am b/cpukit/score/cpu/arm/Makefile.am index abf5e9dcdc..4cab7ddcd9 100644 --- a/cpukit/score/cpu/arm/Makefile.am +++ b/cpukit/score/cpu/arm/Makefile.am @@ -11,8 +11,11 @@ include_rtems_score_HEADERS = rtems/score/cpu.h rtems/score/cpu_asm.h \ noinst_LIBRARIES = libscorecpu.a libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS) -libscorecpu_a_SOURCES = cpu.c cpu_asm.S -libscorecpu_a_SOURCES += thumb/thumb_isr.c +libscorecpu_a_SOURCES = cpu.c \ + cpu_asm.S \ + arm_exc_interrupt.S \ + arm_exc_handler_low.S \ + arm_exc_handler_high.c include $(srcdir)/preinstall.am include $(top_srcdir)/automake/local.am diff --git a/cpukit/score/cpu/arm/cpu.c b/cpukit/score/cpu/arm/cpu.c index a717923b82..3c4d96da48 100644 --- a/cpukit/score/cpu/arm/cpu.c +++ b/cpukit/score/cpu/arm/cpu.c @@ -1,7 +1,10 @@ -/* - * ARM CPU Dependent Source - * +/** + * @file * + * ARM support code. + */ + +/* * COPYRIGHT (c) 2000 Canon Research Centre France SA. * Emmanuel Raguet, mailto:raguet@crf.canon.fr * @@ -10,6 +13,8 @@ * * Copyright (c) 2007 Ray xu * + * Copyright (c) 2009 embedded brains GmbH + * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rtems.com/license/LICENSE. @@ -29,236 +34,86 @@ * This variable can be used to change the running mode of the execution * contexts. */ +uint32_t arm_cpu_mode = 0x13; -unsigned int arm_cpu_mode = 0x13; - -/* _CPU_Initialize - * - * INPUT PARAMETERS: NONE - * - * This routine performs processor dependent initialization. - */ - -void _CPU_Initialize(void) +void _CPU_Context_Initialize( + Context_Control *the_context, + uint32_t *stack_base, + uint32_t size, + uint32_t new_level, + void *entry_point, + bool is_fp +) { + the_context->register_sp = (uint32_t) stack_base + size ; + the_context->register_lr = (uint32_t) entry_point; + the_context->register_cpsr = new_level | arm_cpu_mode; } -/* - * - * _CPU_ISR_Get_level - returns the current interrupt level - */ -#define str(x) #x -#define xstr(x) str(x) -#define L(x) #x "_" xstr(__LINE__) - -#define TO_ARM_MODE(x) \ - asm volatile ( \ - ".code 16 \n" \ - L(x) "_thumb: \n" \ - ".align 2 \n" \ - "push {lr} \n" \ - "adr %0, "L(x) "_arm \n" \ - "bl " L(x)" \n" \ - "pop {pc} \n" \ - ".balign 4 \n" \ - L(x) ": \n" \ - "bx %0 \n" \ - "nop \n" \ - ".pool \n" \ - ".code 32 \n" \ - L(x) "_arm: \n" \ - :"=&r" (reg)) - - -/* - * Switch to Thumb mode Veneer,ugly but safe - */ - -#define TO_THUMB_MODE(x) \ - asm volatile ( \ - ".code 32 \n"\ - "adr %0, "L(x) "_thumb +1 \n"\ - "bx %0 \n"\ - ".pool \n"\ - ".thumb_func \n"\ - L(x) "_thumb: \n"\ - : "=&r" (reg)) +/* Preprocessor magic for stringification of x */ +#define _CPU_ISR_LEVEL_DO_STRINGOF( x) #x +#define _CPU_ISR_LEVEL_STRINGOF( x) _CPU_ISR_LEVEL_DO_STRINGOF( x) -#if (!defined(__THUMB_INTERWORK__) && !defined(__thumb__)) -uint32_t _CPU_ISR_Get_level( void ) +void _CPU_ISR_Set_level( uint32_t level ) { - uint32_t reg = 0; /* to avoid warning */ - asm volatile ("mrs %0, cpsr \n" \ - "and %0, %0, #0xc0 \n" \ - : "=r" (reg) \ - : "0" (reg) ); - return reg; + uint32_t reg; + + asm volatile ( + THUMB_TO_ARM + "mrs %0, cpsr\n" + "bic %0, %0, #" _CPU_ISR_LEVEL_STRINGOF( CPU_MODES_INTERRUPT_MASK ) "\n" + "orr %0, %0, %1\n" + "msr cpsr, %0\n" + ARM_TO_THUMB + : "=r" (reg) + : "r" (level) + ); } -#endif - +uint32_t _CPU_ISR_Get_level( void ) +{ + uint32_t reg; + uint32_t level; + + asm volatile ( + THUMB_TO_ARM + "mrs %0, cpsr\n" + "and %1, %0, #" _CPU_ISR_LEVEL_STRINGOF( CPU_MODES_INTERRUPT_MASK ) "\n" + ARM_TO_THUMB + : "=r" (reg), "=r" (level) + ); + + return level; +} -/* - * _CPU_ISR_install_vector - * - * This kernel routine installs the RTEMS handler for the - * specified vector. - * - * Input parameters: - * vector - interrupt vector number - * new_handler - replacement ISR for this vector number - * old_handler - pointer to store former ISR for this vector number - * - * FIXME: This vector scheme should be changed to allow FIQ to be - * handled better. I'd like to be able to put VectorTable - * elsewhere - JTM - * - * - * Output parameters: NONE - * - */ void _CPU_ISR_install_vector( - uint32_t vector, - proc_ptr new_handler, - proc_ptr *old_handler + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler ) { - /* pointer on the redirection table in RAM */ - long *VectorTable = (long *)(MAX_EXCEPTIONS * 4); - - if (old_handler != NULL) { - old_handler = *(proc_ptr *)(VectorTable + vector); - } + /* Redirection table starts at the end of the vector table */ + volatile uint32_t *table = (volatile uint32_t *) (MAX_EXCEPTIONS * 4); - *(VectorTable + vector) = (long)new_handler ; + uint32_t current_handler = table [vector]; + /* The current handler is now the old one */ + if (old_handler != NULL) { + *old_handler = (proc_ptr) current_handler; + } + + /* Write only if necessary to avoid writes to a maybe read-only memory */ + if (current_handler != (uint32_t) new_handler) { + table [vector] = (uint32_t) new_handler; + } } -void _CPU_Context_Initialize( - Context_Control *the_context, - uint32_t *stack_base, - uint32_t size, - uint32_t new_level, - void *entry_point, - bool is_fp -) -{ - the_context->register_sp = (uint32_t)stack_base + size ; - the_context->register_lr = (uint32_t)entry_point; - the_context->register_cpsr = new_level | arm_cpu_mode; -} - - -/* - * _CPU_Install_interrupt_stack - this function is empty since the - * BSP must set up the interrupt stacks. - */ - void _CPU_Install_interrupt_stack( void ) { + /* This function is empty since the BSP must set up the interrupt stacks */ } -void _defaultExcHandler (CPU_Exception_frame *ctx) +void _CPU_Initialize( void ) { - printk("\n\r"); - printk("----------------------------------------------------------\n\r"); -#if 1 - printk("Exception 0x%x caught at PC 0x%x by thread %d\n", - ctx->register_ip, ctx->register_lr - 4, - _Thread_Executing->Object.id); -#endif - printk("----------------------------------------------------------\n\r"); - printk("Processor execution context at time of the fault was :\n\r"); - printk("----------------------------------------------------------\n\r"); -#if 0 - printk(" r0 = %8x r1 = %8x r2 = %8x r3 = %8x\n\r", - ctx->register_r0, ctx->register_r1, - ctx->register_r2, ctx->register_r3); - printk(" r4 = %8x r5 = %8x r6 = %8x r7 = %8x\n\r", - ctx->register_r4, ctx->register_r5, - ctx->register_r6, ctx->register_r7); - printk(" r8 = %8x r9 = %8x r10 = %8x\n\r", - ctx->register_r8, ctx->register_r9, ctx->register_r10); - printk(" fp = %8x ip = %8x sp = %8x pc = %8x\n\r", - ctx->register_fp, ctx->register_ip, - ctx->register_sp, ctx->register_lr - 4); - printk("----------------------------------------------------------\n\r"); -#endif - if (_ISR_Nest_level > 0) { - /* - * In this case we shall not delete the task interrupted as - * it has nothing to do with the fault. We cannot return either - * because the eip points to the faulty instruction so... - */ - printk("Exception while executing ISR!!!. System locked\n\r"); - while(1); - } - else { - printk("*********** FAULTY THREAD WILL BE DELETED **************\n\r"); - rtems_task_delete(_Thread_Executing->Object.id); - } + /* Do nothing */ } - -cpuExcHandlerType _currentExcHandler = _defaultExcHandler; - -extern void _Exception_Handler_Undef_Swi(void); -extern void _Exception_Handler_Abort(void); -extern void _exc_data_abort(void); - - - -/* FIXME: put comments here */ -void rtems_exception_init_mngt(void) -{ - ISR_Level level; - - _CPU_ISR_Disable(level); - _CPU_ISR_install_vector(ARM_EXCEPTION_UNDEF, - _Exception_Handler_Undef_Swi, - NULL); - - _CPU_ISR_install_vector(ARM_EXCEPTION_SWI, - _Exception_Handler_Undef_Swi, - NULL); - - _CPU_ISR_install_vector(ARM_EXCEPTION_PREF_ABORT, - _Exception_Handler_Abort, - NULL); - - _CPU_ISR_install_vector(ARM_EXCEPTION_DATA_ABORT, - _exc_data_abort, - NULL); - - _CPU_ISR_install_vector(ARM_EXCEPTION_FIQ, - _Exception_Handler_Abort, - NULL); - - _CPU_ISR_install_vector(ARM_EXCEPTION_IRQ, - _Exception_Handler_Abort, - NULL); - - _CPU_ISR_Enable(level); -} - -#define INSN_MASK 0xc5 - -#define INSN_STM1 0x80 -#define INSN_STM2 0x84 -#define INSN_STR 0x40 -#define INSN_STRB 0x44 - -#define INSN_LDM1 0x81 -#define INSN_LDM23 0x85 -#define INSN_LDR 0x41 -#define INSN_LDRB 0x45 - -#define GET_RD(x) ((x & 0x0000f000) >> 12) -#define GET_RN(x) ((x & 0x000f0000) >> 16) - -#define GET_U(x) ((x & 0x00800000) >> 23) -#define GET_I(x) ((x & 0x02000000) >> 25) - -#define GET_REG(r, ctx) (((uint32_t *)ctx)[r]) -#define SET_REG(r, ctx, v) (((uint32_t *)ctx)[r] = v) -#define GET_OFFSET(insn) (insn & 0xfff) - diff --git a/cpukit/score/cpu/arm/cpu_asm.S b/cpukit/score/cpu/arm/cpu_asm.S index 79882b294d..91654fa4bb 100644 --- a/cpukit/score/cpu/arm/cpu_asm.S +++ b/cpukit/score/cpu/arm/cpu_asm.S @@ -84,133 +84,3 @@ _restore: FUNC_START_ARM(_CPU_Context_restore) mov r1, r0 b _restore - - - -/* FIXME: _Exception_Handler_Undef_Swi is untested */ -FUNC_START_ARM(_Exception_Handler_Undef_Swi) -/* FIXME: This should use load and store multiple instructions */ - sub r13,r13,#SIZE_REGS - str r4, [r13, #REG_R4] - str r5, [r13, #REG_R5] - str r6, [r13, #REG_R6] - str r7, [r13, #REG_R7] - str r8, [r13, #REG_R8] - str r9, [r13, #REG_R9] - str r10, [r13, #REG_R10] - str r11, [r13, #REG_R11] - str sp, [r13, #REG_SP] - str lr, [r13, #REG_LR] - mrs r0, cpsr /* read the status */ - and r0, r0,#0x1f /* we keep the mode as exception number */ - str r0, [r13, #REG_PC] /* we store it in a free place */ - mov r0, r13 /* put frame address in r0 (C arg 1) */ - - ldr r1, =SWI_Handler - ldr lr, =_go_back_1 - ldr pc,[r1] /* call handler */ -_go_back_1: - ldr r4, [r13, #REG_R4] - ldr r5, [r13, #REG_R5] - ldr r6, [r13, #REG_R6] - ldr r7, [r13, #REG_R7] - ldr r8, [r13, #REG_R8] - ldr r9, [r13, #REG_R9] - ldr r10, [r13, #REG_R10] - ldr r11, [r13, #REG_R11] - ldr sp, [r13, #REG_SP] - ldr lr, [r13, #REG_LR] - add r13,r13,#SIZE_REGS - movs pc,r14 /* return */ - -/* FIXME: _Exception_Handler_Abort is untested */ -FUNC_START_ARM(_Exception_Handler_Abort) -/* FIXME: This should use load and store multiple instructions */ - sub r13,r13,#SIZE_REGS - str r4, [r13, #REG_R4] - str r5, [r13, #REG_R5] - str r6, [r13, #REG_R6] - str r7, [r13, #REG_R7] - str r8, [r13, #REG_R8] - str r9, [r13, #REG_R9] - str sp, [r13, #REG_R11] - str lr, [r13, #REG_SP] - str lr, [r13, #REG_LR] - mrs r0, cpsr /* read the status */ - and r0, r0,#0x1f /* we keep the mode as exception number */ - str r0, [r13, #REG_PC] /* we store it in a free place */ - mov r0, r13 /* put frame address in ro (C arg 1) */ - - ldr r1, =_currentExcHandler - ldr lr, =_go_back_2 - ldr pc,[r1] /* call handler */ -_go_back_2: - ldr r4, [r13, #REG_R4] - ldr r5, [r13, #REG_R5] - ldr r6, [r13, #REG_R6] - ldr r7, [r13, #REG_R7] - ldr r8, [r13, #REG_R8] - ldr r9, [r13, #REG_R9] - ldr r10, [r13, #REG_R10] - ldr sp, [r13, #REG_R11] - ldr lr, [r13, #REG_SP] - ldr lr, [r13, #REG_LR] - add r13,r13,#SIZE_REGS -#ifdef __thumb__ - subs r11, r14,#4 - bx r11 - nop -#else - subs pc,r14,#4 /* return */ -#endif - -#define ABORT_REGS_OFFS 32-REG_R4 -#define ABORT_SIZE_REGS SIZE_REGS+ABORT_REGS_OFFS - -FUNC_START_ARM(_exc_data_abort) - sub sp, sp, #ABORT_SIZE_REGS /* reserve register frame */ - stmia sp, {r0-r11} - add sp, sp, #ABORT_REGS_OFFS /* the Context_Control structure starts by CPSR, R4, ... */ - - str ip, [sp, #REG_PC] /* store R12 (ip) somewhere, oh hackery, hackery, hack */ - str lr, [sp, #REG_LR] - - mov r1, lr - ldr r0, [r1, #-8] /* r0 = bad instruction */ - mrs r1, spsr /* r1 = spsr */ - mov r2, r13 /* r2 = exception frame of Context_Control type */ -#if defined(__thumb__) - .code 32 - /*arm to thumb*/ - adr r5, to_thumb + 1 - bx r5 - .code 16 -to_thumb: -#endif - bl do_data_abort -#if defined(__thumb__) -/*back to arm*/ - .code 16 -thumb_to_arm: - .align 2 - adr r5, arm_code - bx r5 - nop - .code 32 -arm_code: -#endif - - ldr lr, [sp, #REG_LR] - ldr ip, [sp, #REG_PC] /* restore R12 (ip) */ - - sub sp, sp, #ABORT_REGS_OFFS - ldmia sp, {r0-r11} - add sp, sp, #ABORT_SIZE_REGS -#ifdef __thumb__ - subs r11, r14, #4 /* return to the instruction */ - bx r11 - nop -#else - subs pc, r14, #4 -#endif - /* _AFTER_ the aborted one */ diff --git a/cpukit/score/cpu/arm/rtems/score/cpu.h b/cpukit/score/cpu/arm/rtems/score/cpu.h index d68030f817..cd81a1c5e5 100644 --- a/cpukit/score/cpu/arm/rtems/score/cpu.h +++ b/cpukit/score/cpu/arm/rtems/score/cpu.h @@ -24,646 +24,317 @@ * */ -/* FIXME: finish commenting/cleaning up this file */ #ifndef _RTEMS_SCORE_CPU_H #define _RTEMS_SCORE_CPU_H -#ifdef __cplusplus -extern "C" { -#endif +#include -#include /* pick up machine definitions */ #ifndef ASM -#include + #include #endif -/* conditional compilation parameters */ +#ifndef TRUE + #warning "TRUE not defined" + #define TRUE 1 +#endif -/* - * Should the calls to _Thread_Enable_dispatch be inlined? - * - * If TRUE, then they are inlined. - * If FALSE, then a subroutine call is made. - * - * Basically this is an example of the classic trade-off of size - * versus speed. Inlining the call (TRUE) typically increases the - * size of RTEMS while speeding up the enabling of dispatching. - * [NOTE: In general, the _Thread_Dispatch_disable_level will - * only be 0 or 1 unless you are in an interrupt handler and that - * interrupt handler invokes the executive.] When not inlined - * something calls _Thread_Enable_dispatch which in turns calls - * _Thread_Dispatch. If the enable dispatch is inlined, then - * one subroutine call is avoided entirely.] - */ +#ifndef FALSE + #warning "FALSE not defined" + #define FALSE 0 +#endif -#if defined(__thumb__) -#define CPU_INLINE_ENABLE_DISPATCH FALSE +#ifdef __thumb__ + #define ARM_TO_THUMB "add %0, pc, #1\nbx %0\n.thumb\n" + #define THUMB_TO_ARM ".align 2\nbx pc\n.arm\n" #else -#define CPU_INLINE_ENABLE_DISPATCH TRUE + #define ARM_TO_THUMB + #define THUMB_TO_ARM #endif -/* - * Should the body of the search loops in _Thread_queue_Enqueue_priority - * be unrolled one time? In unrolled each iteration of the loop examines - * two "nodes" on the chain being searched. Otherwise, only one node - * is examined per iteration. - * - * If TRUE, then the loops are unrolled. - * If FALSE, then the loops are not unrolled. - * - * The primary factor in making this decision is the cost of disabling - * and enabling interrupts (_ISR_Flash) versus the cost of rest of the - * body of the loop. On some CPUs, the flash is more expensive than - * one iteration of the loop body. In this case, it might be desirable - * to unroll the loop. It is important to note that on some CPUs, this - * code is the longest interrupt disable period in RTEMS. So it is - * necessary to strike a balance when setting this parameter. - */ +/* If someone uses THUMB we assume she wants minimal code size */ +#ifdef __thumb__ + #define CPU_INLINE_ENABLE_DISPATCH FALSE +#else + #define CPU_INLINE_ENABLE_DISPATCH TRUE +#endif -#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE +#if defined(__ARMEL__) + #define CPU_BIG_ENDIAN FALSE + #define CPU_LITTLE_ENDIAN TRUE +#elif defined(__ARMEB__) + #define CPU_BIG_ENDIAN TRUE + #define CPU_LITTLE_ENDIAN FALSE +#else + #error "unknown endianness" +#endif -/* - * Does RTEMS manage a dedicated interrupt stack in software? - * - * If TRUE, then a stack is allocated in _Interrupt_Manager_initialization. - * If FALSE, nothing is done. - * - * If the CPU supports a dedicated interrupt stack in hardware, - * then it is generally the responsibility of the BSP to allocate it - * and set it up. - * - * If the CPU does not support a dedicated interrupt stack, then - * the porter has two options: (1) execute interrupts on the - * stack of the interrupted task, and (2) have RTEMS manage a dedicated - * interrupt stack. - * - * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE. - * - * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and - * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is - * possible that both are FALSE for a particular CPU. Although it - * is unclear what that would imply about the interrupt processing - * procedure on that CPU. - */ +#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE -/* - * Does this CPU have hardware support for a dedicated interrupt stack? - * - * If TRUE, then it must be installed during initialization. - * If FALSE, then no installation is performed. - * - * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE. - * - * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and - * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is - * possible that both are FALSE for a particular CPU. Although it - * is unclear what that would imply about the interrupt processing - * procedure on that CPU. - */ - #define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE -/* - * Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager? - * - * If TRUE, then the memory is allocated during initialization. - * If FALSE, then the memory is allocated during initialization. - * - * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE. - */ - #define CPU_ALLOCATE_INTERRUPT_STACK FALSE -/* - * Does the RTEMS invoke the user's ISR with the vector number and - * a pointer to the saved interrupt frame (1) or just the vector - * number (0)? - */ - #define CPU_ISR_PASSES_FRAME_POINTER 0 -/* - * Does the CPU have hardware floating point? - * - * If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported. - * If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored. - * - * If there is a FP coprocessor such as the i387 or mc68881, then - * the answer is TRUE. - * - * The macro name "ARM_HAS_FPU" should be made CPU specific. - * It indicates whether or not this CPU model has FP support. For - * example, it would be possible to have an i386_nofp CPU model - * which set this to false to indicate that you have an i386 without - * an i387 and wish to leave floating point support out of RTEMS. - */ - #if ( ARM_HAS_FPU == 1 ) -#define CPU_HARDWARE_FP TRUE + #define CPU_HARDWARE_FP TRUE #else -#define CPU_HARDWARE_FP FALSE + #define CPU_HARDWARE_FP FALSE #endif -#define CPU_SOFTWARE_FP FALSE - -/* - * Are all tasks RTEMS_FLOATING_POINT tasks implicitly? - * - * If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed. - * If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed. - * - * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well. - */ - -#define CPU_ALL_TASKS_ARE_FP FALSE - -/* - * Should the IDLE task have a floating point context? - * - * If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task - * and it has a floating point context which is switched in and out. - * If FALSE, then the IDLE task does not have a floating point context. - * - * Setting this to TRUE negatively impacts the time required to preempt - * the IDLE task from an interrupt because the floating point context - * must be saved as part of the preemption. - */ - -#define CPU_IDLE_TASK_IS_FP FALSE - -/* - * Should the saving of the floating point registers be deferred - * until a context switch is made to another different floating point - * task? - * - * If TRUE, then the floating point context will not be stored until - * necessary. It will remain in the floating point registers and not - * disturned until another floating point task is switched to. - * - * If FALSE, then the floating point context is saved when a floating - * point task is switched out and restored when the next floating point - * task is restored. The state of the floating point registers between - * those two operations is not specified. - * - * If the floating point context does NOT have to be saved as part of - * interrupt dispatching, then it should be safe to set this to TRUE. - * - * Setting this flag to TRUE results in using a different algorithm - * for deciding when to save and restore the floating point context. - * The deferred FP switch algorithm minimizes the number of times - * the FP context is saved and restored. The FP context is not saved - * until a context switch is made to another, different FP task. - * Thus in a system with only one FP task, the FP context will never - * be saved or restored. - */ - -#define CPU_USE_DEFERRED_FP_SWITCH FALSE - -/* - * Does this port provide a CPU dependent IDLE task implementation? - * - * If TRUE, then the routine _CPU_Thread_Idle_body - * must be provided and is the default IDLE thread body instead of - * _CPU_Thread_Idle_body. - * - * If FALSE, then use the generic IDLE thread body if the BSP does - * not provide one. - * - * This is intended to allow for supporting processors which have - * a low power or idle mode. When the IDLE thread is executed, then - * the CPU can be powered down. - * - * The order of precedence for selecting the IDLE thread body is: - * - * 1. BSP provided - * 2. CPU dependent (if provided) - * 3. generic (if no BSP and no CPU dependent) - */ - -#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE +#define CPU_SOFTWARE_FP FALSE -/* - * Does the stack grow up (toward higher addresses) or down - * (toward lower addresses)? - * - * If TRUE, then the grows upward. - * If FALSE, then the grows toward smaller addresses. - */ +#define CPU_ALL_TASKS_ARE_FP FALSE -#define CPU_STACK_GROWS_UP FALSE +#define CPU_IDLE_TASK_IS_FP FALSE -/* - * The following is the variable attribute used to force alignment - * of critical RTEMS structures. On some processors it may make - * sense to have these aligned on tighter boundaries than - * the minimum requirements of the compiler in order to have as - * much of the critical data area as possible in a cache line. - * - * The placement of this macro in the declaration of the variables - * is based on the syntactically requirements of the GNU C - * "__attribute__" extension. For example with GNU C, use - * the following to force a structures to a 32 byte boundary. - * - * __attribute__ ((aligned (32))) - * - * NOTE: Currently only the Priority Bit Map table uses this feature. - * To benefit from using this, the data must be heavily - * used so it will stay in the cache and used frequently enough - * in the executive to justify turning this on. - */ +#define CPU_USE_DEFERRED_FP_SWITCH FALSE -#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32))) +#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE -/* - * Define what is required to specify how the network to host conversion - * routines are handled. - */ +#define CPU_STACK_GROWS_UP FALSE -#if defined(__ARMEL__) -#define CPU_BIG_ENDIAN FALSE -#define CPU_LITTLE_ENDIAN TRUE -#elif defined(__ARMEB__) -#define CPU_BIG_ENDIAN TRUE -#define CPU_LITTLE_ENDIAN FALSE -#else -#error "Unknown endianness" -#endif - -/* - * The following defines the number of bits actually used in the - * interrupt field of the task mode. How those bits map to the - * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level(). - */ - -#define CPU_MODES_INTERRUPT_MASK 0x000000c0 +/* XXX Why 32? */ +#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32))) /* - * Processor defined structures required by cpukit/score. - */ - -/* may need to put some structures here. */ - -/* - * Contexts - * - * Generally there are 2 types of context to save. - * 1. Interrupt registers to save - * 2. Task level registers to save - * - * This means we have the following 3 context items: - * 1. task level context stuff:: Context_Control - * 2. floating point task stuff:: Context_Control_fp - * 3. special interrupt level context :: Context_Control_interrupt - * - * On some processors, it is cost-effective to save only the callee - * preserved registers during a task context switch. This means - * that the ISR code needs to save those registers which do not - * persist across function calls. It is not mandatory to make this - * distinctions between the caller/callee saves registers for the - * purpose of minimizing context saved during task switch and on interrupts. - * If the cost of saving extra registers is minimal, simplicity is the - * choice. Save the same context on interrupt entry as for tasks in - * this case. + * The interrupt mask disables only normal interrupts (IRQ). * - * Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then - * care should be used in designing the context area. - * - * On some CPUs with hardware floating point support, the Context_Control_fp - * structure will not be used or it simply consist of an array of a - * fixed number of bytes. This is done when the floating point context - * is dumped by a "FP save context" type instruction and the format - * is not really defined by the CPU. In this case, there is no need - * to figure out the exact format -- only the size. Of course, although - * this is enough information for RTEMS, it is probably not enough for - * a debugger such as gdb. But that is another problem. - */ -typedef struct { - uint32_t register_cpsr; - uint32_t register_r4; - uint32_t register_r5; - uint32_t register_r6; - uint32_t register_r7; - uint32_t register_r8; - uint32_t register_r9; - uint32_t register_r10; - uint32_t register_fp; - uint32_t register_sp; - uint32_t register_lr; - uint32_t register_pc; -} Context_Control; - -#define _CPU_Context_Get_SP( _context ) \ - (_context)->register_sp - -typedef struct { - double some_float_register; -} Context_Control_fp; - -typedef struct { - uint32_t register_r0; - uint32_t register_r1; - uint32_t register_r2; - uint32_t register_r3; - uint32_t register_ip; - uint32_t register_lr; -} CPU_Exception_frame; - -typedef void (*cpuExcHandlerType) (CPU_Exception_frame*); -extern cpuExcHandlerType _currentExcHandler; -extern void rtems_exception_init_mngt(void); - -/* - * The following structure defines the set of information saved - * on the current stack by RTEMS upon receipt of each interrupt - * that will lead to re-enter the kernel to signal the thread. - */ - -typedef CPU_Exception_frame CPU_Interrupt_frame; - -/* - * This variable is optional. It is used on CPUs on which it is difficult - * to generate an "uninitialized" FP context. It is filled in by - * _CPU_Initialize and copied into the task's FP context area during - * _CPU_Context_Initialize. - */ - -SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context; - -/* - * The size of the floating point context area. On some CPUs this - * will not be a "sizeof" because the format of the floating point - * area is not defined -- only the size is. This is usually on - * CPUs with a "floating point save context" instruction. + * In order to support fast interrupts (FIQ) such that they can do something + * useful, we have to disable the operating system support for FIQs. Having + * operating system support for them would require that FIQs are disabled + * during critical sections of the operating system and application. At this + * level IRQs and FIQs would be equal. It is true that FIQs could interrupt + * the non critical sections of IRQs, so here they would have a small + * advantage. Without operating system support, the FIQs can execute at any + * time (of course not during the service of another FIQ). If someone needs + * operating system support for a FIQ, she can trigger a software interrupt and + * service the request in a two-step process. */ +#define CPU_MODES_INTERRUPT_MASK 0x80 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp ) -/* - * Amount of extra stack (above minimum stack size) required by - * MPCI receive server thread. Remember that in a multiprocessor - * system this thread must exist and be able to process all directives. - */ - #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0 -/* - * This defines the number of entries in the ISR_Vector_table managed - * by RTEMS. - */ +#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8 -#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8 -#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) - -/* - * This is defined if the port has a special way to report the ISR nesting - * level. Most ports maintain the variable _ISR_Nest_level. - */ +#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE -/* - * Should be large enough to run all RTEMS tests. This ensures - * that a "reasonable" small application should not have any problems. - */ +#define CPU_STACK_MINIMUM_SIZE (1024 * 4) -#define CPU_STACK_MINIMUM_SIZE (1024*4) +#define CPU_ALIGNMENT 4 -/* - * CPU's worst alignment requirement for data types on a byte boundary. This - * alignment does not take into account the requirements for the stack. - */ +#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT -#define CPU_ALIGNMENT 4 +#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT -/* - * This number corresponds to the byte alignment requirement for the - * heap handler. This alignment requirement may be stricter than that - * for the data types alignment specified by CPU_ALIGNMENT. It is - * common for the heap to follow the same alignment requirement as - * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap, - * then this should be set to CPU_ALIGNMENT. - * - * NOTE: This does not have to be a power of 2. It does have to - * be greater or equal to than CPU_ALIGNMENT. - */ - -#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT +#define CPU_STACK_ALIGNMENT 4 /* - * This number corresponds to the byte alignment requirement for memory - * buffers allocated by the partition manager. This alignment requirement - * may be stricter than that for the data types alignment specified by - * CPU_ALIGNMENT. It is common for the partition to follow the same - * alignment requirement as CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict - * enough for the partition, then this should be set to CPU_ALIGNMENT. + * Bitfield handler macros. * - * NOTE: This does not have to be a power of 2. It does have to - * be greater or equal to than CPU_ALIGNMENT. - */ - -#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT - -/* - * This number corresponds to the byte alignment requirement for the - * stack. This alignment requirement may be stricter than that for the - * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT - * is strict enough for the stack, then this should be set to 0. + * If we had a particularly fast function for finding the first + * bit set in a word, it would go here. Since we don't (*), we'll + * just use the universal macros. * - * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT. + * (*) On ARM V5 and later, there's a CLZ function which could be + * used to implement much quicker than the default macro. */ -#define CPU_STACK_ALIGNMENT 4 +#define CPU_USE_GENERIC_BITFIELD_CODE TRUE -/* ISR handler macros */ +#define CPU_USE_GENERIC_BITFIELD_DATA TRUE -/* - * Support routine to initialize the RTEMS vector table after it is allocated. - */ +#define CPU_ENABLE_C_ISR_DISPATCH_IMPLEMENTATION TRUE -#define _CPU_Initialize_vectors() +#ifndef ASM -/* - * Disable all interrupts for an RTEMS critical section. The previous - * level is returned in _level. - */ -#if (defined(__THUMB_INTERWORK__) || defined(__thumb__)) +#ifdef __cplusplus +extern "C" { +#endif -extern uint32_t _CPU_ISR_Disable_Thumb(void) __attribute__ ((naked)); -extern void _CPU_ISR_Enable_Thumb( int ) __attribute__ ((naked)); -extern void _CPU_ISR_Flash_Thumb(int) __attribute__ ((naked)); -extern void _CPU_ISR_Set_level_Thumb(int ) __attribute__ ((naked)); -extern uint32_t _CPU_ISR_Get_level_Thumb(void ) __attribute__ ((naked)); +typedef enum { + ARM_EXCEPTION_RESET = 0, + ARM_EXCEPTION_UNDEF = 1, + ARM_EXCEPTION_SWI = 2, + ARM_EXCEPTION_PREF_ABORT = 3, + ARM_EXCEPTION_DATA_ABORT = 4, + ARM_EXCEPTION_RESERVED = 5, + ARM_EXCEPTION_IRQ = 6, + ARM_EXCEPTION_FIQ = 7, + MAX_EXCEPTIONS = 8 +} Arm_symbolic_exception_name; -#define _CPU_ISR_Disable(_level) \ - (_level) = _CPU_ISR_Disable_Thumb() +typedef struct { + uint32_t register_cpsr; + uint32_t register_r4; + uint32_t register_r5; + uint32_t register_r6; + uint32_t register_r7; + uint32_t register_r8; + uint32_t register_r9; + uint32_t register_r10; + uint32_t register_fp; + uint32_t register_sp; + uint32_t register_lr; + uint32_t register_pc; +} Context_Control; -#define _CPU_ISR_Enable(a) _CPU_ISR_Enable_Thumb(a) +/* XXX This is out of date */ +typedef struct { + uint32_t register_r0; + uint32_t register_r1; + uint32_t register_r2; + uint32_t register_r3; + uint32_t register_ip; + uint32_t register_lr; +} CPU_Exception_frame; -#define _CPU_ISR_Flash(a) _CPU_ISR_Flash_Thumb(a) +typedef CPU_Exception_frame CPU_Interrupt_frame; -#define _CPU_ISR_Set_level(a) _CPU_ISR_Set_level_Thumb(a) +typedef struct { + /* Not supported */ +} Context_Control_fp; -#define _CPU_ISR_Get_level(a) _CPU_ISR_Get_level_Thumb(a) +SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context; -#else /*For ARM mode*/ -#define _CPU_ISR_Disable( _level ) \ - { \ - int reg; \ - asm volatile ("MRS %0, cpsr \n" \ - "ORR %1, %0, #0xc0 \n" \ - "MSR cpsr, %1 \n" \ - : "=&r" (_level), "=&r" (reg)); \ - } +static inline uint32_t arm_interrupt_disable( void ) +{ + uint32_t reg; + uint32_t level; + + asm volatile ( + THUMB_TO_ARM + "mrs %1, cpsr\n" + "orr %0, %1, #0x80\n" + "msr cpsr, %0\n" + ARM_TO_THUMB + : "=r" (reg), "=r" (level) + ); + + return level; +} -/* - * Enable interrupts to the previous level (returned by _CPU_ISR_Disable). - * This indicates the end of an RTEMS critical section. The parameter - * _level is not modified. - */ +static inline void arm_interrupt_enable( uint32_t level ) +{ + #ifdef __thumb__ + uint32_t reg; + + asm volatile ( + THUMB_TO_ARM + "msr cpsr, %1\n" + ARM_TO_THUMB + : "=r" (reg) + : "r" (level) + ); + #else + asm volatile ( + "msr cpsr, %0" + : + : "r" (level) + ); + #endif +} -#define _CPU_ISR_Enable( _level ) \ - { \ - asm volatile ("MSR cpsr, %0 \n" \ - : : "r" (_level)); \ - } +static inline void arm_interrupt_flash( uint32_t level ) +{ + uint32_t reg; + + asm volatile ( + THUMB_TO_ARM + "mrs %0, cpsr\n" + "msr cpsr, %1\n" + "msr cpsr, %0\n" + ARM_TO_THUMB + : "=r" (reg) + : "r" (level) + ); +} -/* - * This temporarily restores the interrupt to _level before immediately - * disabling them again. This is used to divide long RTEMS critical - * sections into two or more parts. The parameter _level is not - * modified. - */ +static inline uint32_t arm_status_irq_enable( void ) +{ + uint32_t reg; + uint32_t psr; -#define _CPU_ISR_Flash( _level ) \ - { \ - int reg; \ - asm volatile ("MRS %0, cpsr \n" \ - "MSR cpsr, %1 \n" \ - "MSR cpsr, %0 \n" \ - : "=&r" (reg) \ - : "r" (_level)); \ - } + RTEMS_COMPILER_MEMORY_BARRIER(); -/* - * Map interrupt level in task mode onto the hardware that the CPU - * actually provides. Currently, interrupt levels which do not - * map onto the CPU in a generic fashion are undefined. Someday, - * it would be nice if these were "mapped" by the application - * via a callout. For example, m68k has 8 levels 0 - 7, levels - * 8 - 255 would be available for bsp/application specific meaning. - * This could be used to manage a programmable interrupt controller - * via the rtems_task_mode directive. - * - * The get routine usually must be implemented as a subroutine. - */ + asm volatile ( + THUMB_TO_ARM + "mrs %1, cpsr\n" + "bic %0, %1, #0x80\n" + "msr cpsr, %0\n" + ARM_TO_THUMB + : "=r" (reg), "=r" (psr) + ); -#define _CPU_ISR_Set_level( new_level ) \ - { \ - int reg = 0; /* to avoid warning */ \ - asm volatile ("MRS %0, cpsr \n" \ - "BIC %0, %0, #0xc0 \n" \ - "ORR %0, %0, %2 \n" \ - "MSR cpsr_c, %0 \n" \ - : "=r" (reg) \ - : "0" (reg), "r" (new_level)); \ - } + return psr; +} -#endif /*(defined(__THUMB_INTERWORK__) || defined(__thumb__))*/ +static inline void arm_status_restore( uint32_t psr ) +{ + #ifdef __thumb__ + uint32_t reg; + + asm volatile ( + THUMB_TO_ARM + "msr cpsr, %1\n" + ARM_TO_THUMB + : "=r" (reg) + : "r" (psr) + ); + #else + asm volatile ( + "msr cpsr, %0" + : + : "r" (psr) + ); + #endif + + RTEMS_COMPILER_MEMORY_BARRIER(); +} -uint32_t _CPU_ISR_Get_level( void ); +#define _CPU_ISR_Disable( _isr_cookie ) \ + do { \ + _isr_cookie = arm_interrupt_disable(); \ + } while (0) -/* end of ISR handler macros */ +#define _CPU_ISR_Enable( _isr_cookie ) \ + arm_interrupt_enable( _isr_cookie ) -/* Context handler macros */ +#define _CPU_ISR_Flash( _isr_cookie ) \ + arm_interrupt_flash( _isr_cookie ) -/* - * Initialize the context to a state suitable for starting a - * task after a context restore operation. Generally, this - * involves: - * - * - setting a starting address - * - preparing the stack - * - preparing the stack and frame pointers - * - setting the proper interrupt level in the context - * - initializing the floating point context - * - * This routine generally does not set any unnecessary register - * in the context. The state of the "general data" registers is - * undefined at task start time. - * - * NOTE: This is_fp parameter is TRUE if the thread is to be a floating - * point thread. This is typically only used on CPUs where the - * FPU may be easily disabled by software such as on the SPARC - * where the PSR contains an enable FPU bit. - */ +void _CPU_ISR_Set_level( uint32_t level ); + +uint32_t _CPU_ISR_Get_level( void ); void _CPU_Context_Initialize( - Context_Control *the_context, - uint32_t *stack_base, - uint32_t size, - uint32_t new_level, - void *entry_point, - bool is_fp + Context_Control *the_context, + uint32_t *stack_base, + uint32_t size, + uint32_t new_level, + void *entry_point, + bool is_fp ); -/* - * This routine is responsible for somehow restarting the currently - * executing task. If you are lucky, then all that is necessary - * is restoring the context. Otherwise, there will need to be - * a special assembly routine which does something special in this - * case. Context_Restore should work most of the time. It will - * not work if restarting self conflicts with the stack frame - * assumptions of restoring a context. - */ +#define _CPU_Context_Get_SP( _context ) \ + (_context)->register_sp #define _CPU_Context_Restart_self( _the_context ) \ _CPU_Context_restore( (_the_context) ); -/* - * The purpose of this macro is to allow the initial pointer into - * a floating point context area (used to save the floating point - * context) to be at an arbitrary place in the floating point - * context area. - * - * This is necessary because some FP units are designed to have - * their context saved as a stack which grows into lower addresses. - * Other FP units can be saved by simply moving registers into offsets - * from the base of the context area. Finally some FP units provide - * a "dump context" instruction which could fill in from high to low - * or low to high based on the whim of the CPU designers. - */ - #define _CPU_Context_Fp_start( _base, _offset ) \ ( (void *) _Addresses_Add_offset( (_base), (_offset) ) ) -/* - * This routine initializes the FP context area passed to it to. - * There are a few standard ways in which to initialize the - * floating point context. The code included for this macro assumes - * that this is a CPU in which a "initial" FP context was saved into - * _CPU_Null_fp_context and it simply copies it to the destination - * context passed to it. - * - * Other models include (1) not doing anything, and (2) putting - * a "null FP status word" in the correct place in the FP context. - */ - #define _CPU_Context_Initialize_fp( _destination ) \ - { \ - *(*(_destination)) = _CPU_Null_fp_context; \ - } - -/* end of Context handler macros */ - -/* Fatal Error manager macros */ - -/* - * This routine copies _error into a known place -- typically a stack - * location or a register, optionally disables interrupts, and - * halts/stops the CPU. - */ + do { \ + *(*(_destination)) = _CPU_Null_fp_context; \ + } while (0) #define _CPU_Fatal_halt( _error ) \ do { \ @@ -673,175 +344,76 @@ void _CPU_Context_Initialize( : "=r" (_error) \ : "0" (_error) \ : "r0" ); \ - while(1) ; \ - } while(0); - - -/* end of Fatal Error manager macros */ - -/* Bitfield handler macros */ -/* - * If we had a particularly fast function for finding the first - * bit set in a word, it would go here. Since we don't (*), we'll - * just use the universal macros. - * - * (*) On ARM V5 and later, there's a CLZ function which could be - * used to implement much quicker than the default macro. - */ -#define CPU_USE_GENERIC_BITFIELD_CODE TRUE -#define CPU_USE_GENERIC_BITFIELD_DATA TRUE - -/* functions */ - -/* - * _CPU_Initialize - * - * This routine performs CPU dependent initialization. - */ + while (1); \ + } while (0); -void _CPU_Initialize(void); +void _CPU_Initialize( void ); -typedef enum { - ARM_EXCEPTION_RESET = 0, - ARM_EXCEPTION_UNDEF = 1, - ARM_EXCEPTION_SWI = 2, - ARM_EXCEPTION_PREF_ABORT = 3, - ARM_EXCEPTION_DATA_ABORT = 4, - ARM_EXCEPTION_RESERVED = 5, - ARM_EXCEPTION_IRQ = 6, - ARM_EXCEPTION_FIQ = 7, - MAX_EXCEPTIONS = 8 -} Arm_symbolic_exception_name; - -/* - * _CPU_ISR_install_vector - * - * This routine installs an interrupt vector. - */ +#define _CPU_Initialize_vectors() void _CPU_ISR_install_vector( - uint32_t vector, - proc_ptr new_handler, - proc_ptr *old_handler + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler ); -/* - * _CPU_Install_interrupt_stack - * - * This routine installs the hardware interrupt stack pointer. - * - * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK - * is TRUE. - */ - void _CPU_Install_interrupt_stack( void ); -/* - * _CPU_Context_switch - * - * This routine switches from the run context to the heir context. - */ - -void _CPU_Context_switch( - Context_Control *run, - Context_Control *heir -); +void _CPU_Context_switch( Context_Control *run, Context_Control *heir ); -/* - * _CPU_Context_restore - * - * This routine is generally used only to restart self in an - * efficient manner. It may simply be a label in _CPU_Context_switch. - * - * NOTE: May be unnecessary to reload some registers. - */ - -void _CPU_Context_restore( - Context_Control *new_context -); +void _CPU_Context_restore( Context_Control *new_context ); -#if (ARM_HAS_FPU == 1) -/* - * _CPU_Context_save_fp - * - * This routine saves the floating point context passed to it. - */ - -void _CPU_Context_save_fp( - Context_Control_fp **fp_context_ptr -); - -/* - * _CPU_Context_restore_fp - * - * This routine restores the floating point context passed to it. - */ +void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr ); -void _CPU_Context_restore_fp( - Context_Control_fp **fp_context_ptr -); -#endif /* (ARM_HAS_FPU == 1) */ +void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr ); -/* The following routine swaps the endian format of an unsigned int. - * It must be static because it is referenced indirectly. - * - * This version will work on any processor, but if there is a better - * way for your CPU PLEASE use it. The most common way to do this is to: - * - * swap least significant two bytes with 16-bit rotate - * swap upper and lower 16-bits - * swap most significant two bytes with 16-bit rotate - * - * Some CPUs have special instructions which swap a 32-bit quantity in - * a single instruction (e.g. i486). It is probably best to avoid - * an "endian swapping control bit" in the CPU. One good reason is - * that interrupts would probably have to be disabled to ensure that - * an interrupt does not try to access the same "chunk" with the wrong - * endian. Another good reason is that on some CPUs, the endian bit - * endianness for ALL fetches -- both code and data -- so the code - * will be fetched incorrectly. - */ - -static inline uint32_t CPU_swap_u32( - uint32_t value -) +static inline uint32_t CPU_swap_u32( uint32_t value ) { #if defined(__thumb__) uint32_t byte1, byte2, byte3, byte4, swapped; - + byte4 = (value >> 24) & 0xff; byte3 = (value >> 16) & 0xff; byte2 = (value >> 8) & 0xff; - byte1 = value & 0xff; - + byte1 = value & 0xff; + swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4; return swapped; #else - uint32_t tmp = value; /* make compiler warnings go away */ - asm volatile ("EOR %1, %0, %0, ROR #16\n" - "BIC %1, %1, #0xff0000\n" - "MOV %0, %0, ROR #8\n" - "EOR %0, %0, %1, LSR #8\n" - : "=r" (value), "=r" (tmp) + uint32_t tmp = value; /* make compiler warnings go away */ + asm volatile ("EOR %1, %0, %0, ROR #16\n" + "BIC %1, %1, #0xff0000\n" + "MOV %0, %0, ROR #8\n" + "EOR %0, %0, %1, LSR #8\n" + : "=r" (value), "=r" (tmp) : "0" (value), "1" (tmp)); return value; #endif } -static inline uint16_t CPU_swap_u16(uint16_t value) +static inline uint16_t CPU_swap_u16( uint16_t value ) { - uint16_t lower; - uint16_t upper; + return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU)); +} - value = value & (uint16_t) 0xffff; - lower = (value >> 8); - upper = (value << 8) & 0xff; +/* XXX */ - return (lower | upper); -} +extern uint32_t arm_cpu_mode; + +void arm_exc_abort_data( void ); + +void arm_exc_abort_prefetch( void ); + +void arm_exc_interrupt( void ); + +void arm_exc_undefined( void ); + +void bsp_interrupt_dispatch( void ); #ifdef __cplusplus } #endif -#endif +#endif /* ASM */ + +#endif /* _RTEMS_SCORE_CPU_H */ diff --git a/cpukit/score/cpu/arm/rtems/score/types.h b/cpukit/score/cpu/arm/rtems/score/types.h index d2d0a2ca01..8c24072043 100644 --- a/cpukit/score/cpu/arm/rtems/score/types.h +++ b/cpukit/score/cpu/arm/rtems/score/types.h @@ -35,9 +35,6 @@ extern "C" { typedef uint16_t Priority_Bit_map_control; -typedef void arm_cpu_isr; -typedef void (*arm_cpu_isr_entry)( void ); - #ifdef RTEMS_DEPRECATED_TYPES typedef bool boolean; /* Boolean value */ typedef float single_precision; /* single precision float */ -- cgit v1.2.3