From 76c03152e110dcb770253b54277811228e8f78df Mon Sep 17 00:00:00 2001 From: Amaan Cheval Date: Mon, 9 Jul 2018 16:42:56 +0530 Subject: bsp/x86_64: Minimal bootable BSP Current state: - Basic context initialization and switching code. - Stubbed console (empty functions). - Mostly functional linker script (may need tweaks if we ever want to move away from the large code model (see: CPU_CFLAGS). - Fully functional boot, by using FreeBSD's bootloader to load RTEMS's ELF for UEFI-awareness. In short, the current state with this commit lets us boot, go through the system initialization functions, and then call user application's Init task too. Updates #2898. --- cpukit/score/cpu/x86_64/include/rtems/score/cpu.h | 359 ++++++++++++++++++++++ 1 file changed, 359 insertions(+) create mode 100644 cpukit/score/cpu/x86_64/include/rtems/score/cpu.h (limited to 'cpukit/score/cpu/x86_64/include/rtems/score/cpu.h') diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h new file mode 100644 index 0000000000..012ef5069b --- /dev/null +++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h @@ -0,0 +1,359 @@ +/** + * @file rtems/score/cpu.h + * + * @brief x86_64 Dependent Source + * + * This include file contains information pertaining to the x86_64 processor. + */ + +/* + * Copyright (c) 2018. + * Amaan Cheval + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _RTEMS_SCORE_CPU_H +#define _RTEMS_SCORE_CPU_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE +#define CPU_ISR_PASSES_FRAME_POINTER FALSE +// XXX: Enable FPU support +#define CPU_HARDWARE_FP FALSE +#define CPU_SOFTWARE_FP FALSE +#define CPU_ALL_TASKS_ARE_FP FALSE +#define CPU_IDLE_TASK_IS_FP FALSE +#define CPU_USE_DEFERRED_FP_SWITCH TRUE +#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE +#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE +#define CPU_STACK_GROWS_UP FALSE + +#define CPU_STRUCTURE_ALIGNMENT __attribute__((aligned ( 64 ))) +#define CPU_CACHE_LINE_BYTES 64 +#define CPU_MODES_INTERRUPT_MASK 0x00000001 +#define CPU_MAXIMUM_PROCESSORS 32 + +#define CPU_EFLAGS_INTERRUPTS_ON 0x00003202 +#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002 + +#ifndef ASM + +typedef struct { + uint64_t rflags; + + /** + * Callee-saved registers as listed in the SysV ABI document: + * https://github.com/hjl-tools/x86-psABI/wiki/X86-psABI + */ + uint64_t rbx; + void *rsp; + void *rbp; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + + // XXX: FS segment descriptor for TLS + +#ifdef RTEMS_SMP + volatile bool is_executing; +#endif +} Context_Control; + +#define _CPU_Context_Get_SP( _context ) \ + (_context)->rsp + +typedef struct { + /* XXX: MMX, XMM, others? + * + * All x87 registers are caller-saved, so callees that make use of the MMX + * registers may use the faster femms instruction + */ + + /** FPU registers are listed here */ + double some_float_register; +} Context_Control_fp; + +typedef struct { + uint32_t special_interrupt_register; +} CPU_Interrupt_frame; + +#endif /* ASM */ + + +#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp ) +#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0 +#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE +#define CPU_STACK_MINIMUM_SIZE (1024*4) +#define CPU_SIZEOF_POINTER 8 +#define CPU_ALIGNMENT 8 +#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT +#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT +#define CPU_STACK_ALIGNMENT 16 +#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES + +/* + * ISR handler macros + */ + +#ifndef ASM + +#define _CPU_Initialize_vectors() + +// XXX: For RTEMS critical sections +#define _CPU_ISR_Disable( _isr_cookie ) \ + { \ + (_isr_cookie) = 0; /* do something to prevent warnings */ \ + } + +#define _CPU_ISR_Enable( _isr_cookie ) \ + { \ + (void) (_isr_cookie); /* prevent warnings from -Wunused-but-set-variable */ \ + } + +#define _CPU_ISR_Flash( _isr_cookie ) \ + { \ + } + +RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level ) +{ + return false; +} + +#define _CPU_ISR_Set_level( new_level ) \ + { \ + } + +uint32_t _CPU_ISR_Get_level( void ); + +/* end of ISR handler macros */ + +/* Context handler macros */ +#define _CPU_Context_Destroy( _the_thread, _the_context ) \ + { \ + } + +void _CPU_Context_Initialize( + Context_Control *the_context, + void *stack_area_begin, + size_t stack_area_size, + uint32_t new_level, + void (*entry_point)( void ), + bool is_fp, + void *tls_area +); + +#define _CPU_Context_Restart_self( _the_context ) \ + _CPU_Context_restore( (_the_context) ); + +#define _CPU_Context_Initialize_fp( _destination ) \ + { \ + *(*(_destination)) = _CPU_Null_fp_context; \ + } + +/* end of Context handler macros */ + +/* Fatal Error manager macros */ + +#define _CPU_Fatal_halt( _source, _error ) \ + { \ + } + +/* end of Fatal Error manager macros */ + +/* Bitfield handler macros */ + +#define CPU_USE_GENERIC_BITFIELD_CODE TRUE + +#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) +#define _CPU_Bitfield_Find_first_bit( _value, _output ) \ + { \ + (_output) = 0; /* do something to prevent warnings */ \ + } +#endif + +/* end of Bitfield handler macros */ + +#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) +#define _CPU_Priority_Mask( _bit_number ) \ + ( 1 << (_bit_number) ) +#endif + +#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) +#define _CPU_Priority_bits_index( _priority ) \ + (_priority) +#endif + +/* end of Priority handler macros */ + +/* functions */ + +void _CPU_Initialize(void); + +void _CPU_ISR_install_raw_handler( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +); + +void _CPU_ISR_install_vector( + uint32_t vector, + proc_ptr new_handler, + proc_ptr *old_handler +); + +void _CPU_Install_interrupt_stack( void ); + +void *_CPU_Thread_Idle_body( uintptr_t ignored ); + +void _CPU_Context_switch( + Context_Control *run, + Context_Control *heir +); + +void _CPU_Context_restore( + Context_Control *new_context +) RTEMS_NO_RETURN; + +void _CPU_Context_save_fp( + Context_Control_fp **fp_context_ptr +); + +void _CPU_Context_restore_fp( + Context_Control_fp **fp_context_ptr +); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ); + +static inline void _CPU_Context_validate( uintptr_t pattern ); + +static inline void _CPU_Context_volatile_clobber( uintptr_t pattern ) +{ + /* TODO */ +} + +static inline void _CPU_Context_validate( uintptr_t pattern ) +{ + while (1) { + /* TODO */ + } +} + +typedef struct { + uint32_t processor_state_register; + uint32_t integer_registers [1]; + double float_registers [1]; +} CPU_Exception_frame; + +void _CPU_Exception_frame_print( const CPU_Exception_frame *frame ); + +static inline uint32_t CPU_swap_u32( + uint32_t value +) +{ + uint32_t byte1, byte2, byte3, byte4, swapped; + + byte4 = (value >> 24) & 0xff; + byte3 = (value >> 16) & 0xff; + byte2 = (value >> 8) & 0xff; + byte1 = value & 0xff; + + swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4; + return swapped; +} + +#define CPU_swap_u16( value ) \ + (((value&0xff) << 8) | ((value >> 8)&0xff)) + +typedef uint32_t CPU_Counter_ticks; + +uint32_t _CPU_Counter_frequency( void ); + +CPU_Counter_ticks _CPU_Counter_read( void ); + + +static inline CPU_Counter_ticks _CPU_Counter_difference( + CPU_Counter_ticks second, + CPU_Counter_ticks first +) +{ + return second - first; +} + +#ifdef RTEMS_SMP + * + uint32_t _CPU_SMP_Initialize( void ); + + bool _CPU_SMP_Start_processor( uint32_t cpu_index ); + + void _CPU_SMP_Finalize_initialization( uint32_t cpu_count ); + + void _CPU_SMP_Prepare_start_multitasking( void ); + + static inline uint32_t _CPU_SMP_Get_current_processor( void ) + { + return 123; + } + + void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); + + static inline void _CPU_SMP_Processor_event_broadcast( void ) + { + __asm__ volatile ( "" : : : "memory" ); + } + + static inline void _CPU_SMP_Processor_event_receive( void ) + { + __asm__ volatile ( "" : : : "memory" ); + } + + static inline bool _CPU_Context_Get_is_executing( + const Context_Control *context + ) + return context->is_executing; + } + + static inline void _CPU_Context_Set_is_executing( + Context_Control *context, + bool is_executing + ) + { + } + +#endif /* RTEMS_SMP */ + +typedef uintptr_t CPU_Uint32ptr; + +#ifdef __cplusplus +} +#endif + +#endif /* ASM */ + +#endif /* _RTEMS_SCORE_CPU_H */ -- cgit v1.2.3