diff options
Diffstat (limited to 'include/rtems/score')
125 files changed, 34045 insertions, 0 deletions
diff --git a/include/rtems/score/address.h b/include/rtems/score/address.h new file mode 100644 index 0000000000..8f38f7c2dc --- /dev/null +++ b/include/rtems/score/address.h @@ -0,0 +1,200 @@ +/** + * @file rtems/score/address.h + * + * @brief Information Required to Manipulate Physical Addresses + * + * This include file contains the information required to manipulate + * physical addresses. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ADDRESS_H +#define _RTEMS_SCORE_ADDRESS_H + +#include <rtems/score/cpu.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreAddress Address Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which abstracts address + * manipulation in a portable manner. + */ +/**@{*/ + +/** + * @brief Add offset to an address. + * + * This function is used to add an @a offset to a @a base address. + * It returns the resulting address. This address is typically + * converted to an access type before being used further. + * + * @param[in] base is the base address. + * @param[in] offset is the offset to add to @a base. + * + * @return This method returns the resulting address. + */ +RTEMS_INLINE_ROUTINE void *_Addresses_Add_offset ( + const void *base, + uintptr_t offset +) +{ + return (void *)((uintptr_t)base + offset); +} + +/** + * @brief Subtract offset from offset. + * + * This function is used to subtract an @a offset from a @a base + * address. It returns the resulting address. This address is + * typically converted to an access type before being used further. + * + * @param[in] base is the base address. + * @param[in] offset is the offset to subtract to @a base. + * + * @return This method returns the resulting address. + */ + +RTEMS_INLINE_ROUTINE void *_Addresses_Subtract_offset ( + const void *base, + uintptr_t offset +) +{ + return (void *)((uintptr_t)base - offset); +} + +/** + * @brief Subtract two offsets. + * + * This function is used to subtract two addresses. It returns the + * resulting offset. + * + * @param[in] left is the address on the left hand side of the subtraction. + * @param[in] right is the address on the right hand side of the subtraction. + * + * @return This method returns the resulting address. + * + * @note The cast of an address to an uint32_t makes this code + * dependent on an addresses being thirty two bits. + */ +RTEMS_INLINE_ROUTINE int32_t _Addresses_Subtract ( + const void *left, + const void *right +) +{ + return (int32_t) ((const char *) left - (const char *) right); +} + +/** + * @brief Is address aligned. + * + * This function returns true if the given address is correctly + * aligned for this processor and false otherwise. Proper alignment + * is based on correctness and efficiency. + * + * @param[in] address is the address being checked for alignment. + * + * @retval true The @a address is aligned. + * @retval false The @a address is not aligned. + */ +RTEMS_INLINE_ROUTINE bool _Addresses_Is_aligned ( + const void *address +) +{ +#if (CPU_ALIGNMENT == 0) + return true; +#else + return (((uintptr_t)address % CPU_ALIGNMENT) == 0); +#endif +} + +/** + * @brief Is address in range. + * + * This function returns true if the given address is within the + * memory range specified and false otherwise. base is the address + * of the first byte in the memory range and limit is the address + * of the last byte in the memory range. The base address is + * assumed to be lower than the limit address. + * + * @param[in] address is the address to check. + * @param[in] base is the lowest address of the range to check against. + * @param[in] limit is the highest address of the range to check against. + * + * @retval true The @a address is within the memory range specified + * @retval false The @a address is not within the memory range specified. + */ +RTEMS_INLINE_ROUTINE bool _Addresses_Is_in_range ( + const void *address, + const void *base, + const void *limit +) +{ + return (address >= base && address <= limit); +} + +/** + * @brief Align address to nearest multiple of alignment, rounding up. + * + * This function returns the given address aligned to the given alignment. + * If the address already is aligned, or if alignment is 0, the address is + * returned as is. The returned address is greater than or equal to the + * given address. + * + * @param[in] address is the address to align. + * @param[in] alignment is the boundary for alignment and must be a power of 2 + * + * @return Returns the aligned address. + */ +RTEMS_INLINE_ROUTINE void *_Addresses_Align_up( + void *address, + size_t alignment +) +{ + uintptr_t mask = alignment - (uintptr_t)1; + return (void*)(((uintptr_t)address + mask) & ~mask); +} + +/** + * @brief Align address to nearest multiple of alignment, truncating. + * + * This function returns the given address aligned to the given alignment. + * If the address already is aligned, or if alignment is 0, the address is + * returned as is. The returned address is less than or equal to the + * given address. + * + * @param[in] address is the address to align. + * @param[in] alignment is the boundary for alignment and must be a power of 2. + * + * @return Returns the aligned address. + */ +RTEMS_INLINE_ROUTINE void *_Addresses_Align_down( + void *address, + size_t alignment +) +{ + uintptr_t mask = alignment - (uintptr_t)1; + return (void*)((uintptr_t)address & ~mask); +} + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/apiext.h b/include/rtems/score/apiext.h new file mode 100644 index 0000000000..ab1329b261 --- /dev/null +++ b/include/rtems/score/apiext.h @@ -0,0 +1,89 @@ +/** + * @file rtems/score/apiext.h + * + * @brief API Extensions Handler + * + * This is the API Extensions Handler. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_APIEXT_H +#define _RTEMS_SCORE_APIEXT_H + +#include <rtems/score/chainimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreAPIExtension API Extension Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides mechanisms for the + * SuperCore to perform API specific actions without there beingg + * "up-references" from the SuperCore to APIs. If these referencesg + * were allowed in the implementation, the cohesion would be too high + * and adding an API would be more difficult. The SuperCore is supposed + * to be largely independent of any API. + */ +/**@{*/ + +/** + * This type defines the prototype of the Postdriver Hook. + */ +typedef void (*API_extensions_Postdriver_hook)(void); + +/** + * The control structure which defines the points at which an API + * can add an extension to the system initialization thread. + */ +typedef struct { + /** This field allows this structure to be used with the Chain Handler. */ + Chain_Node Node; + + /** + * This field is the callout invoked during RTEMS initialization after + * RTEMS data structures and device driver initialization has occurred + * but before multitasking is initiated. + * + * @note If this field is NULL, no extension is invoked. + */ + API_extensions_Postdriver_hook postdriver_hook; +} API_extensions_Control; + +/** + * @brief Add extension set to the active set. + * + * This routine adds @a the_extension to the active set of API extensions. + * + * @param[in] the_extension is the extension set to add. + */ +void _API_extensions_Add( + API_extensions_Control *the_extension +); + +/** + * @brief Execute all post-driver extensions. + * + * This routine executes all of the postdriver callouts. + */ +void _API_extensions_Run_postdriver( void ); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/apimutex.h b/include/rtems/score/apimutex.h new file mode 100644 index 0000000000..615f60b31d --- /dev/null +++ b/include/rtems/score/apimutex.h @@ -0,0 +1,152 @@ +/** + * @file + * + * @ingroup ScoreAPIMutex + * + * @brief API Mutex Handler API + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_APIMUTEX_H +#define _RTEMS_SCORE_APIMUTEX_H + +#include <rtems/score/coremutex.h> +#include <rtems/score/object.h> + +/** + * @defgroup ScoreAPIMutex API Mutex Handler + * + * @ingroup Score + * + * @brief Provides routines to ensure mutual exclusion on API level. + */ +/**@{**/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Control block used to manage each API mutex. + */ +typedef struct { + /** + * @brief Allows each API Mutex to be a full-fledged RTEMS object. + */ + Objects_Control Object; + + /** + * Contains the SuperCore mutex information. + */ + CORE_mutex_Control Mutex; + + /** + * @brief The thread life protection state before the outer-most mutex + * obtain. + */ + bool previous_thread_life_protection; +} API_Mutex_Control; + +/** + * @brief Initialization for the API Mutexe Handler. + * + * The value @a maximum_mutexes is the maximum number of API mutexes that may + * exist at any time. + * + * @param[in] maximum_mutexes is the maximum number of API mutexes. + */ +void _API_Mutex_Initialization( uint32_t maximum_mutexes ); + +/** + * @brief Allocates an API mutex from the inactive set and returns it in + * @a mutex. + */ +void _API_Mutex_Allocate( API_Mutex_Control **mutex ); + +/** + * @brief Acquires the specified API mutex. + * + * @param[in] mutex The API mutex. + */ +void _API_Mutex_Lock( API_Mutex_Control *mutex ); + +/** + * @brief Releases the specified API mutex. + * + * @param[in] mutex The API mutex. + */ +void _API_Mutex_Unlock( API_Mutex_Control *mutex ); + +/** + * @brief Checks if the specified API mutex is owned by the executing thread. + * + * @param[in] mutex The API mutex. + */ +bool _API_Mutex_Is_owner( const API_Mutex_Control *mutex ); + +/** @} */ + +/** + * @defgroup ScoreAllocatorMutex RTEMS Allocator Mutex + * + * @ingroup ScoreAPIMutex + * + * @brief Protection for all memory allocations and deallocations in RTEMS. + * + * When the APIs all use this for allocation and deallocation protection, then + * this possibly should be renamed and moved to a higher level in the + * hierarchy. + */ +/**@{**/ + +/** + * @brief Memory allocation mutex. + * + * This points to the API Mutex instance used to ensure that only + * one thread at a time is allocating or freeing memory. + */ +SCORE_EXTERN API_Mutex_Control *_RTEMS_Allocator_Mutex; + +static inline void _RTEMS_Lock_allocator( void ) +{ + _API_Mutex_Lock( _RTEMS_Allocator_Mutex ); +} + +static inline void _RTEMS_Unlock_allocator( void ) +{ + _API_Mutex_Unlock( _RTEMS_Allocator_Mutex ); +} + +static inline bool _RTEMS_Allocator_is_owner( void ) +{ + return _API_Mutex_Is_owner( _RTEMS_Allocator_Mutex ); +} + +SCORE_EXTERN API_Mutex_Control *_Once_Mutex; + +static inline void _Once_Lock( void ) +{ + _API_Mutex_Lock( _Once_Mutex ); +} + +static inline void _Once_Unlock( void ) +{ + _API_Mutex_Unlock( _Once_Mutex ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/arm.h b/include/rtems/score/arm.h new file mode 100644 index 0000000000..6d1f8502a3 --- /dev/null +++ b/include/rtems/score/arm.h @@ -0,0 +1,83 @@ +/** + * @file + * + * @brief ARM Assembler Support API + */ + +/* + * COPYRIGHT (c) 2000 Canon Research Centre France SA. + * Emmanuel Raguet, mailto:raguet@crf.canon.fr + * + * Copyright (c) 2002 Advent Networks, Inc. + * Jay Monkman <jmonkman@adventnetworks.com> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + */ + +#ifndef _RTEMS_SCORE_ARM_H +#define _RTEMS_SCORE_ARM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreCPU + */ +/**@{**/ + +#if defined(__ARM_ARCH_7M__) + #define CPU_MODEL_NAME "ARMv7M" + #define ARM_MULTILIB_ARCH_V7M +#elif defined(__ARM_ARCH_6M__) + #define CPU_MODEL_NAME "ARMv6M" + #define ARM_MULTILIB_ARCH_V6M +#else + #define CPU_MODEL_NAME "ARMv4" + #define ARM_MULTILIB_ARCH_V4 +#endif + +#if defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) \ + || defined(__ARM_ARCH_7M__) + #define ARM_MULTILIB_HAS_WFI + #define ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE + #define ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS +#endif + +#if defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) + #define ARM_MULTILIB_HAS_THREAD_ID_REGISTER +#endif + +#if !defined(__SOFTFP__) + #if defined(__ARM_NEON__) + #define ARM_MULTILIB_VFP_D32 + #elif defined(__VFP_FP__) + #define ARM_MULTILIB_VFP_D16 + #else + #error "FPU support not implemented" + #endif +#endif + +#if defined(ARM_MULTILIB_VFP_D16) \ + || defined(ARM_MULTILIB_VFP_D32) + #define ARM_MULTILIB_VFP +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "ARM" + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_ARM_H */ diff --git a/include/rtems/score/armv4.h b/include/rtems/score/armv4.h new file mode 100644 index 0000000000..caeaa3e553 --- /dev/null +++ b/include/rtems/score/armv4.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2013 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef RTEMS_SCORE_ARMV4_H +#define RTEMS_SCORE_ARMV4_H + +#include <rtems/score/cpu.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef ARM_MULTILIB_ARCH_V4 + +void bsp_interrupt_dispatch( void ); + +void _ARMV4_Exception_interrupt( void ); + +typedef void ARMV4_Exception_abort_handler( CPU_Exception_frame *frame ); + +void _ARMV4_Exception_data_abort_set_handler( + ARMV4_Exception_abort_handler handler +); + +void _ARMV4_Exception_data_abort( void ); + +void _ARMV4_Exception_prefetch_abort_set_handler( + ARMV4_Exception_abort_handler handler +); + +void _ARMV4_Exception_prefetch_abort( void ); + +void _ARMV4_Exception_undef_default( void ); + +void _ARMV4_Exception_swi_default( void ); + +void _ARMV4_Exception_data_abort_default( void ); + +void _ARMV4_Exception_pref_abort_default( void ); + +void _ARMV4_Exception_reserved_default( void ); + +void _ARMV4_Exception_irq_default( void ); + +void _ARMV4_Exception_fiq_default( void ); + +static inline uint32_t _ARMV4_Status_irq_enable( void ) +{ + uint32_t arm_switch_reg; + uint32_t psr; + + RTEMS_COMPILER_MEMORY_BARRIER(); + + __asm__ volatile ( + ARM_SWITCH_TO_ARM + "mrs %[psr], cpsr\n" + "bic %[arm_switch_reg], %[psr], #0x80\n" + "msr cpsr, %[arm_switch_reg]\n" + ARM_SWITCH_BACK + : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr) + ); + + return psr; +} + +static inline void _ARMV4_Status_restore( uint32_t psr ) +{ + ARM_SWITCH_REGISTERS; + + __asm__ volatile ( + ARM_SWITCH_TO_ARM + "msr cpsr, %[psr]\n" + ARM_SWITCH_BACK + : ARM_SWITCH_OUTPUT + : [psr] "r" (psr) + ); + + RTEMS_COMPILER_MEMORY_BARRIER(); +} + +#endif /* ARM_MULTILIB_ARCH_V4 */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* RTEMS_SCORE_ARMV4_H */ diff --git a/include/rtems/score/armv7m.h b/include/rtems/score/armv7m.h new file mode 100644 index 0000000000..c5e473ec0a --- /dev/null +++ b/include/rtems/score/armv7m.h @@ -0,0 +1,548 @@ +/** + * @file + * + * @brief ARMV7M Architecture Support + */ + +/* + * Copyright (c) 2011-2014 Sebastian Huber. All rights reserved. + * + * embedded brains GmbH + * Obere Lagerstr. 30 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef RTEMS_SCORE_ARMV7M_H +#define RTEMS_SCORE_ARMV7M_H + +#include <rtems/score/cpu.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef ARM_MULTILIB_ARCH_V7M + +/* Coprocessor Access Control Register, CPACR */ +#define ARMV7M_CPACR 0xe000ed88 + +#ifndef ASM + +typedef struct { + uint32_t reserved_0; + uint32_t ictr; + uint32_t actlr; + uint32_t reserved_1; +} ARMV7M_ICTAC; + +typedef void (*ARMV7M_Exception_handler)(void); + +typedef struct { + uint32_t register_r0; + uint32_t register_r1; + uint32_t register_r2; + uint32_t register_r3; + uint32_t register_r12; + void *register_lr; + void *register_pc; + uint32_t register_xpsr; +#ifdef ARM_MULTILIB_VFP + uint32_t register_s0; + uint32_t register_s1; + uint32_t register_s2; + uint32_t register_s3; + uint32_t register_s4; + uint32_t register_s5; + uint32_t register_s6; + uint32_t register_s7; + uint32_t register_s8; + uint32_t register_s9; + uint32_t register_s10; + uint32_t register_s11; + uint32_t register_s12; + uint32_t register_s13; + uint32_t register_s14; + uint32_t register_s15; + uint32_t register_fpscr; + uint32_t reserved; +#endif +} ARMV7M_Exception_frame; + +typedef struct { + uint32_t cpuid; + +#define ARMV7M_SCB_ICSR_NMIPENDSET (1U << 31) +#define ARMV7M_SCB_ICSR_PENDSVSET (1U << 28) +#define ARMV7M_SCB_ICSR_PENDSVCLR (1U << 27) +#define ARMV7M_SCB_ICSR_PENDSTSET (1U << 26) +#define ARMV7M_SCB_ICSR_PENDSTCLR (1U << 25) +#define ARMV7M_SCB_ICSR_ISRPREEMPT (1U << 23) +#define ARMV7M_SCB_ICSR_ISRPENDING (1U << 22) +#define ARMV7M_SCB_ICSR_VECTPENDING_GET(reg) (((reg) >> 12) & 0x1ffU) +#define ARMV7M_SCB_ICSR_RETTOBASE (1U << 11) +#define ARMV7M_SCB_ICSR_VECTACTIVE_GET(reg) ((reg) & 0x1ffU) + uint32_t icsr; + + ARMV7M_Exception_handler *vtor; + +#define ARMV7M_SCB_AIRCR_VECTKEY (0x05fa << 16) +#define ARMV7M_SCB_AIRCR_ENDIANESS (1U << 15) +#define ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT 8 +#define ARMV7M_SCB_AIRCR_PRIGROUP_MASK \ + ((0x7U) << ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT) +#define ARMV7M_SCB_AIRCR_PRIGROUP(val) \ + (((val) << ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT) & ARMV7M_SCB_AIRCR_PRIGROUP_MASK) +#define ARMV7M_SCB_AIRCR_PRIGROUP_GET(reg) \ + (((val) & ARMV7M_SCB_AIRCR_PRIGROUP_MASK) >> ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT) +#define ARMV7M_SCB_AIRCR_PRIGROUP_SET(reg, val) \ + (((reg) & ~ARMV7M_SCB_AIRCR_PRIGROUP_MASK) | ARMV7M_SCB_AIRCR_PRIGROUP(val)) +#define ARMV7M_SCB_AIRCR_SYSRESETREQ (1U << 2) +#define ARMV7M_SCB_AIRCR_VECTCLRACTIVE (1U << 1) +#define ARMV7M_SCB_AIRCR_VECTRESET (1U << 0) + uint32_t aircr; + + uint32_t scr; + uint32_t ccr; + uint8_t shpr [12]; + +#define ARMV7M_SCB_SHCSR_USGFAULTENA (1U << 18) +#define ARMV7M_SCB_SHCSR_BUSFAULTENA (1U << 17) +#define ARMV7M_SCB_SHCSR_MEMFAULTENA (1U << 16) + uint32_t shcsr; + + uint32_t cfsr; + uint32_t hfsr; + uint32_t dfsr; + uint32_t mmfar; + uint32_t bfar; + uint32_t afsr; + uint32_t reserved_e000ed40[18]; + uint32_t cpacr; + uint32_t reserved_e000ed8c[106]; + uint32_t fpccr; + uint32_t fpcar; + uint32_t fpdscr; + uint32_t mvfr0; + uint32_t mvfr1; +} ARMV7M_SCB; + +typedef struct { +#define ARMV7M_SYSTICK_CSR_COUNTFLAG (1U << 16) +#define ARMV7M_SYSTICK_CSR_CLKSOURCE (1U << 2) +#define ARMV7M_SYSTICK_CSR_TICKINT (1U << 1) +#define ARMV7M_SYSTICK_CSR_ENABLE (1U << 0) + uint32_t csr; + + uint32_t rvr; + uint32_t cvr; + +#define ARMV7M_SYSTICK_CALIB_NOREF (1U << 31) +#define ARMV7M_SYSTICK_CALIB_SKEW (1U << 30) +#define ARMV7M_SYSTICK_CALIB_TENMS_GET(reg) ((reg) & 0xffffffU) + uint32_t calib; +} ARMV7M_Systick; + +typedef struct { + uint32_t iser [8]; + uint32_t reserved_0 [24]; + uint32_t icer [8]; + uint32_t reserved_1 [24]; + uint32_t ispr [8]; + uint32_t reserved_2 [24]; + uint32_t icpr [8]; + uint32_t reserved_3 [24]; + uint32_t iabr [8]; + uint32_t reserved_4 [56]; + uint8_t ipr [240]; + uint32_t reserved_5 [644]; + uint32_t stir; +} ARMV7M_NVIC; + +typedef struct { +#define ARMV7M_MPU_TYPE_IREGION_GET(reg) (((reg) >> 16) & 0xffU) +#define ARMV7M_MPU_TYPE_DREGION_GET(reg) (((reg) >> 8) & 0xffU) +#define ARMV7M_MPU_TYPE_SEPARATE (1U << 0) + uint32_t type; + +#define ARMV7M_MPU_CTRL_PRIVDEFENA (1U << 2) +#define ARMV7M_MPU_CTRL_HFNMIENA (1U << 1) +#define ARMV7M_MPU_CTRL_ENABLE (1U << 0) + uint32_t ctrl; + + uint32_t rnr; + +#define ARMV7M_MPU_RBAR_ADDR_SHIFT 5 +#define ARMV7M_MPU_RBAR_ADDR_MASK \ + ((0x7ffffffU) << ARMV7M_MPU_RBAR_ADDR_SHIFT) +#define ARMV7M_MPU_RBAR_ADDR(val) \ + (((val) << ARMV7M_MPU_RBAR_ADDR_SHIFT) & ARMV7M_MPU_RBAR_ADDR_MASK) +#define ARMV7M_MPU_RBAR_ADDR_GET(reg) \ + (((val) & ARMV7M_MPU_RBAR_ADDR_MASK) >> ARMV7M_MPU_RBAR_ADDR_SHIFT) +#define ARMV7M_MPU_RBAR_ADDR_SET(reg, val) \ + (((reg) & ~ARMV7M_MPU_RBAR_ADDR_MASK) | ARMV7M_MPU_RBAR_ADDR(val)) +#define ARMV7M_MPU_RBAR_VALID (1U << 4) +#define ARMV7M_MPU_RBAR_REGION_SHIFT 0 +#define ARMV7M_MPU_RBAR_REGION_MASK \ + ((0xfU) << ARMV7M_MPU_RBAR_REGION_SHIFT) +#define ARMV7M_MPU_RBAR_REGION(val) \ + (((val) << ARMV7M_MPU_RBAR_REGION_SHIFT) & ARMV7M_MPU_RBAR_REGION_MASK) +#define ARMV7M_MPU_RBAR_REGION_GET(reg) \ + (((val) & ARMV7M_MPU_RBAR_REGION_MASK) >> ARMV7M_MPU_RBAR_REGION_SHIFT) +#define ARMV7M_MPU_RBAR_REGION_SET(reg, val) \ + (((reg) & ~ARMV7M_MPU_RBAR_REGION_MASK) | ARMV7M_MPU_RBAR_REGION(val)) + uint32_t rbar; + +#define ARMV7M_MPU_RASR_XN (1U << 28) +#define ARMV7M_MPU_RASR_AP_SHIFT 24 +#define ARMV7M_MPU_RASR_AP_MASK \ + ((0x7U) << ARMV7M_MPU_RASR_AP_SHIFT) +#define ARMV7M_MPU_RASR_AP(val) \ + (((val) << ARMV7M_MPU_RASR_AP_SHIFT) & ARMV7M_MPU_RASR_AP_MASK) +#define ARMV7M_MPU_RASR_AP_GET(reg) \ + (((val) & ARMV7M_MPU_RASR_AP_MASK) >> ARMV7M_MPU_RASR_AP_SHIFT) +#define ARMV7M_MPU_RASR_AP_SET(reg, val) \ + (((reg) & ~ARMV7M_MPU_RASR_AP_MASK) | ARMV7M_MPU_RASR_AP(val)) +#define ARMV7M_MPU_RASR_TEX_SHIFT 19 +#define ARMV7M_MPU_RASR_TEX_MASK \ + ((0x7U) << ARMV7M_MPU_RASR_TEX_SHIFT) +#define ARMV7M_MPU_RASR_TEX(val) \ + (((val) << ARMV7M_MPU_RASR_TEX_SHIFT) & ARMV7M_MPU_RASR_TEX_MASK) +#define ARMV7M_MPU_RASR_TEX_GET(reg) \ + (((val) & ARMV7M_MPU_RASR_TEX_MASK) >> ARMV7M_MPU_RASR_TEX_SHIFT) +#define ARMV7M_MPU_RASR_TEX_SET(reg, val) \ + (((reg) & ~ARMV7M_MPU_RASR_TEX_MASK) | ARMV7M_MPU_RASR_TEX(val)) +#define ARMV7M_MPU_RASR_S (1U << 18) +#define ARMV7M_MPU_RASR_C (1U << 17) +#define ARMV7M_MPU_RASR_B (1U << 16) +#define ARMV7M_MPU_RASR_SRD_SHIFT 8 +#define ARMV7M_MPU_RASR_SRD_MASK \ + ((0xffU) << ARMV7M_MPU_RASR_SRD_SHIFT) +#define ARMV7M_MPU_RASR_SRD(val) \ + (((val) << ARMV7M_MPU_RASR_SRD_SHIFT) & ARMV7M_MPU_RASR_SRD_MASK) +#define ARMV7M_MPU_RASR_SRD_GET(reg) \ + (((val) & ARMV7M_MPU_RASR_SRD_MASK) >> ARMV7M_MPU_RASR_SRD_SHIFT) +#define ARMV7M_MPU_RASR_SRD_SET(reg, val) \ + (((reg) & ~ARMV7M_MPU_RASR_SRD_MASK) | ARMV7M_MPU_RASR_SRD(val)) +#define ARMV7M_MPU_RASR_SIZE_SHIFT 1 +#define ARMV7M_MPU_RASR_SIZE_MASK \ + ((0x1fU) << ARMV7M_MPU_RASR_SIZE_SHIFT) +#define ARMV7M_MPU_RASR_SIZE(val) \ + (((val) << ARMV7M_MPU_RASR_SIZE_SHIFT) & ARMV7M_MPU_RASR_SIZE_MASK) +#define ARMV7M_MPU_RASR_SIZE_GET(reg) \ + (((val) & ARMV7M_MPU_RASR_SIZE_MASK) >> ARMV7M_MPU_RASR_SIZE_SHIFT) +#define ARMV7M_MPU_RASR_SIZE_SET(reg, val) \ + (((reg) & ~ARMV7M_MPU_RASR_SIZE_MASK) | ARMV7M_MPU_RASR_SIZE(val)) +#define ARMV7M_MPU_RASR_ENABLE (1U << 0) + uint32_t rasr; + + uint32_t rbar_a1; + uint32_t rasr_a1; + uint32_t rbar_a2; + uint32_t rasr_a2; + uint32_t rbar_a3; + uint32_t rasr_a3; +} ARMV7M_MPU; + +typedef enum { + ARMV7M_MPU_AP_PRIV_NO_USER_NO, + ARMV7M_MPU_AP_PRIV_RW_USER_NO, + ARMV7M_MPU_AP_PRIV_RW_USER_RO, + ARMV7M_MPU_AP_PRIV_RW_USER_RW, + ARMV7M_MPU_AP_PRIV_RO_USER_NO = 0x5, + ARMV7M_MPU_AP_PRIV_RO_USER_RO, +} ARMV7M_MPU_Access_permissions; + +typedef enum { + ARMV7M_MPU_ATTR_R = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RO_USER_NO) + | ARMV7M_MPU_RASR_C | ARMV7M_MPU_RASR_XN, + ARMV7M_MPU_ATTR_RW = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RW_USER_NO) + | ARMV7M_MPU_RASR_C | ARMV7M_MPU_RASR_XN | ARMV7M_MPU_RASR_B, + ARMV7M_MPU_ATTR_RWX = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RW_USER_NO) + | ARMV7M_MPU_RASR_C | ARMV7M_MPU_RASR_B, + ARMV7M_MPU_ATTR_X = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_NO_USER_NO) + | ARMV7M_MPU_RASR_C, + ARMV7M_MPU_ATTR_RX = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RO_USER_NO) + | ARMV7M_MPU_RASR_C, + ARMV7M_MPU_ATTR_IO = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RW_USER_NO) + | ARMV7M_MPU_RASR_XN, +} ARMV7M_MPU_Attributes; + +typedef enum { + ARMV7M_MPU_SIZE_32_B = 0x4, + ARMV7M_MPU_SIZE_64_B, + ARMV7M_MPU_SIZE_128_B, + ARMV7M_MPU_SIZE_256_B, + ARMV7M_MPU_SIZE_512_B, + ARMV7M_MPU_SIZE_1_KB, + ARMV7M_MPU_SIZE_2_KB, + ARMV7M_MPU_SIZE_4_KB, + ARMV7M_MPU_SIZE_8_KB, + ARMV7M_MPU_SIZE_16_KB, + ARMV7M_MPU_SIZE_32_KB, + ARMV7M_MPU_SIZE_64_KB, + ARMV7M_MPU_SIZE_128_KB, + ARMV7M_MPU_SIZE_256_KB, + ARMV7M_MPU_SIZE_512_KB, + ARMV7M_MPU_SIZE_1_MB, + ARMV7M_MPU_SIZE_2_MB, + ARMV7M_MPU_SIZE_4_MB, + ARMV7M_MPU_SIZE_8_MB, + ARMV7M_MPU_SIZE_16_MB, + ARMV7M_MPU_SIZE_32_MB, + ARMV7M_MPU_SIZE_64_MB, + ARMV7M_MPU_SIZE_128_MB, + ARMV7M_MPU_SIZE_256_MB, + ARMV7M_MPU_SIZE_512_MB, + ARMV7M_MPU_SIZE_1_GB, + ARMV7M_MPU_SIZE_2_GB, + ARMV7M_MPU_SIZE_4_GB +} ARMV7M_MPU_Size; + +typedef struct { + uint32_t rbar; + uint32_t rasr; +} ARMV7M_MPU_Region; + +#define ARMV7M_MPU_REGION_INITIALIZER(idx, addr, size, attr) \ + { \ + ((addr) & ARMV7M_MPU_RBAR_ADDR_MASK) \ + | ARMV7M_MPU_RBAR_VALID \ + | ARMV7M_MPU_RBAR_REGION(idx), \ + ARMV7M_MPU_RASR_SIZE(size) | (attr) | ARMV7M_MPU_RASR_ENABLE \ + } + +#define ARMV7M_MPU_REGION_DISABLED_INITIALIZER(idx) \ + { \ + ARMV7M_MPU_RBAR_VALID | ARMV7M_MPU_RBAR_REGION(idx), \ + 0 \ + } + +#define ARMV7M_SCS_BASE 0xe000e000 +#define ARMV7M_ICTAC_BASE (ARMV7M_SCS_BASE + 0x0) +#define ARMV7M_SYSTICK_BASE (ARMV7M_SCS_BASE + 0x10) +#define ARMV7M_NVIC_BASE (ARMV7M_SCS_BASE + 0x100) +#define ARMV7M_SCB_BASE (ARMV7M_SCS_BASE + 0xd00) +#define ARMV7M_MPU_BASE (ARMV7M_SCS_BASE + 0xd90) + +#define _ARMV7M_ICTAC \ + ((volatile ARMV7M_ICTAC *) ARMV7M_ICTAC_BASE) +#define _ARMV7M_SCB \ + ((volatile ARMV7M_SCB *) ARMV7M_SCB_BASE) +#define _ARMV7M_Systick \ + ((volatile ARMV7M_Systick *) ARMV7M_SYSTICK_BASE) +#define _ARMV7M_NVIC \ + ((volatile ARMV7M_NVIC *) ARMV7M_NVIC_BASE) +#define _ARMV7M_MPU \ + ((volatile ARMV7M_MPU *) ARMV7M_MPU_BASE) + +#define ARMV7M_VECTOR_MSP 0 +#define ARMV7M_VECTOR_RESET 1 +#define ARMV7M_VECTOR_NMI 2 +#define ARMV7M_VECTOR_HARD_FAULT 3 +#define ARMV7M_VECTOR_MEM_MANAGE 4 +#define ARMV7M_VECTOR_BUS_FAULT 5 +#define ARMV7M_VECTOR_USAGE_FAULT 6 +#define ARMV7M_VECTOR_SVC 11 +#define ARMV7M_VECTOR_DEBUG_MONITOR 12 +#define ARMV7M_VECTOR_PENDSV 14 +#define ARMV7M_VECTOR_SYSTICK 15 +#define ARMV7M_VECTOR_IRQ(n) ((n) + 16) +#define ARMV7M_IRQ_OF_VECTOR(n) ((n) - 16) + +#define ARMV7M_EXCEPTION_PRIORITY_LOWEST 255 + +static inline bool _ARMV7M_Is_vector_an_irq( int vector ) +{ + return vector >= 16; +} + +static inline uint32_t _ARMV7M_Get_basepri(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], basepri\n" : [val] "=&r" (val)); + return val; +} + +static inline void _ARMV7M_Set_basepri(uint32_t val) +{ + __asm__ volatile ("msr basepri, %[val]\n" : : [val] "r" (val)); +} + +static inline uint32_t _ARMV7M_Get_primask(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], primask\n" : [val] "=&r" (val)); + return val; +} + +static inline void _ARMV7M_Set_primask(uint32_t val) +{ + __asm__ volatile ("msr primask, %[val]\n" : : [val] "r" (val)); +} + +static inline uint32_t _ARMV7M_Get_faultmask(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], faultmask\n" : [val] "=&r" (val)); + return val; +} + +static inline void _ARMV7M_Set_faultmask(uint32_t val) +{ + __asm__ volatile ("msr faultmask, %[val]\n" : : [val] "r" (val)); +} + +static inline uint32_t _ARMV7M_Get_control(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], control\n" : [val] "=&r" (val)); + return val; +} + +static inline void _ARMV7M_Set_control(uint32_t val) +{ + __asm__ volatile ("msr control, %[val]\n" : : [val] "r" (val)); +} + +static inline uint32_t _ARMV7M_Get_MSP(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], msp\n" : [val] "=&r" (val)); + return val; +} + +static inline void _ARMV7M_Set_MSP(uint32_t val) +{ + __asm__ volatile ("msr msp, %[val]\n" : : [val] "r" (val)); +} + +static inline uint32_t _ARMV7M_Get_PSP(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], psp\n" : [val] "=&r" (val)); + return val; +} + +static inline void _ARMV7M_Set_PSP(uint32_t val) +{ + __asm__ volatile ("msr psp, %[val]\n" : : [val] "r" (val)); +} + +static inline uint32_t _ARMV7M_Get_XPSR(void) +{ + uint32_t val; + __asm__ volatile ("mrs %[val], xpsr\n" : [val] "=&r" (val)); + return val; +} + +static inline bool _ARMV7M_NVIC_Is_enabled( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + return (_ARMV7M_NVIC->iser [index] & bit) != 0; +} + +static inline void _ARMV7M_NVIC_Set_enable( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + _ARMV7M_NVIC->iser [index] = bit; +} + +static inline void _ARMV7M_NVIC_Clear_enable( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + _ARMV7M_NVIC->icer [index] = bit; +} + +static inline bool _ARMV7M_NVIC_Is_pending( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + return (_ARMV7M_NVIC->ispr [index] & bit) != 0; +} + +static inline void _ARMV7M_NVIC_Set_pending( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + _ARMV7M_NVIC->ispr [index] = bit; +} + +static inline void _ARMV7M_NVIC_Clear_pending( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + _ARMV7M_NVIC->icpr [index] = bit; +} + +static inline bool _ARMV7M_NVIC_Is_active( int irq ) +{ + int index = irq >> 5; + uint32_t bit = 1U << (irq & 0x1f); + + return (_ARMV7M_NVIC->iabr [index] & bit) != 0; +} + +static inline void _ARMV7M_NVIC_Set_priority( int irq, int priority ) +{ + _ARMV7M_NVIC->ipr [irq] = (uint8_t) priority; +} + +static inline int _ARMV7M_NVIC_Get_priority( int irq ) +{ + return _ARMV7M_NVIC->ipr [irq]; +} + +int _ARMV7M_Get_exception_priority( int vector ); + +void _ARMV7M_Set_exception_priority( int vector, int priority ); + +ARMV7M_Exception_handler _ARMV7M_Get_exception_handler( int index ); + +void _ARMV7M_Set_exception_handler( + int index, + ARMV7M_Exception_handler handler +); + +/** + * @brief ARMV7M set exception priority and handler. + */ +void _ARMV7M_Set_exception_priority_and_handler( + int index, + int priority, + ARMV7M_Exception_handler handler +); + +void _ARMV7M_Exception_default( void ); + +void _ARMV7M_Interrupt_service_enter( void ); + +void _ARMV7M_Interrupt_service_leave( void ); + +void _ARMV7M_Pendable_service_call( void ); + +void _ARMV7M_Supervisor_call( void ); + +#endif /* ASM */ + +#endif /* ARM_MULTILIB_ARCH_V7M */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* RTEMS_SCORE_ARMV7M_H */ diff --git a/include/rtems/score/assert.h b/include/rtems/score/assert.h new file mode 100644 index 0000000000..d4253f87da --- /dev/null +++ b/include/rtems/score/assert.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ASSERT_H +#define _RTEMS_SCORE_ASSERT_H + +#include <rtems/score/basedefs.h> + +#if defined( RTEMS_DEBUG ) + #include <assert.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Assertion similar to assert() controlled via RTEMS_DEBUG instead of + * NDEBUG. + */ +#if defined( RTEMS_DEBUG ) + + /** + * @brief Macro with method name used in assert output + * + * Given the variations in compilers and standards, we have to poke a bit. + * + * @note This is based on the code in newlib's assert.h. + */ + #ifndef __RTEMS_ASSERT_FUNCTION + /* Use g++'s demangled names in C++. */ + #if defined __cplusplus && defined __GNUC__ + #define __RTEMS_ASSERT_FUNCTION __PRETTY_FUNCTION__ + + /* C99 requires the use of __func__. */ + #elif __STDC_VERSION__ >= 199901L + #define __RTEMS_ASSERT_FUNCTION __func__ + + /* Older versions of gcc don't have __func__ but can use __FUNCTION__. */ + #elif __GNUC__ >= 2 + #define __RTEMS_ASSERT_FUNCTION __FUNCTION__ + + /* failed to detect __func__ support. */ + #else + #define __RTEMS_ASSERT_FUNCTION ((char *) 0) + #endif + #endif /* !__RTEMS_ASSERT_FUNCTION */ + + #if !defined( RTEMS_SCHEDSIM ) + /* normal build is newlib. */ + + void __assert_func(const char *, int, const char *, const char *) + RTEMS_NO_RETURN; + + #define _Assert( _e ) \ + ( ( _e ) ? \ + ( void ) 0 : \ + __assert_func( __FILE__, __LINE__, __RTEMS_ASSERT_FUNCTION, #_e ) ) + + #elif defined(__linux__) + /* Scheduler simulator has only beed tested on glibc. */ + #define _Assert( _e ) \ + ( ( _e ) ? \ + ( void ) 0 : \ + __assert_fail( #_e, __FILE__, __LINE__, __RTEMS_ASSERT_FUNCTION ) ) + #else + #error "Implement RTEMS assert support for this C Library" + #endif + +#else + #define _Assert( _e ) ( ( void ) 0 ) +#endif + +/** + * @brief Like _Assert(), but only armed if RTEMS_SMP is defined. + */ +#if defined( RTEMS_SMP ) + #define _SMP_Assert( _e ) _Assert( _e ) +#else + #define _SMP_Assert( _e ) ( ( void ) 0 ) +#endif + +/** + * @brief Returns true if thread dispatching is allowed. + * + * Thread dispatching can be repressed via _Thread_Disable_dispatch() or + * _ISR_Disable(). + */ +#if defined( RTEMS_DEBUG ) + bool _Debug_Is_thread_dispatching_allowed( void ); +#endif + +/** + * @brief Returns true if the current thread of execution owns the giant lock. + */ +#if defined( RTEMS_DEBUG ) + #if defined( RTEMS_SMP ) + bool _Debug_Is_owner_of_giant( void ); + #else + #define _Debug_Is_owner_of_giant() (true) + #endif +#endif + +/** + * @brief Returns true if the current thread of execution owns the allocator + * mutex. + */ +#if defined( RTEMS_DEBUG ) + bool _Debug_Is_owner_of_allocator( void ); +#endif + +/** + * @brief Asserts that this point is not reached during run-time. + */ +#if RTEMS_SCHEDSIM +#define _Assert_Not_reached() +#else +#define _Assert_Not_reached() _Assert( 0 ) +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_ASSERT_H */ diff --git a/include/rtems/score/atomic.h b/include/rtems/score/atomic.h new file mode 100644 index 0000000000..526926926f --- /dev/null +++ b/include/rtems/score/atomic.h @@ -0,0 +1,156 @@ +/** + * @file + * + * @ingroup ScoreAtomic + * + * @brief Atomic Operations API + */ + +/* + * COPYRIGHT (c) 2012-2013 Deng Hengyi. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ATOMIC_H +#define _RTEMS_SCORE_ATOMIC_H + +#include <rtems/score/cpuatomic.h> + +/** + * @defgroup ScoreAtomic Atomic Operations + * + * @ingroup Score + * + * @brief Support for atomic operations. + * + * Atomic operations can be used to implement low-level synchronization + * primitives on SMP systems, like spin locks. All atomic operations are + * defined in terms of C11 (ISO/IEC 9899:2011) or C++11 (ISO/IEC 14882:2011). + * For documentation use the standard documents. + * + * @{ + */ + +typedef CPU_atomic_Uint Atomic_Uint; + +typedef CPU_atomic_Ulong Atomic_Ulong; + +typedef CPU_atomic_Uintptr Atomic_Uintptr; + +typedef CPU_atomic_Flag Atomic_Flag; + +typedef CPU_atomic_Order Atomic_Order; + +#define ATOMIC_ORDER_RELAXED CPU_ATOMIC_ORDER_RELAXED + +#define ATOMIC_ORDER_ACQUIRE CPU_ATOMIC_ORDER_ACQUIRE + +#define ATOMIC_ORDER_RELEASE CPU_ATOMIC_ORDER_RELEASE + +#define ATOMIC_ORDER_ACQ_REL CPU_ATOMIC_ORDER_ACQ_REL + +#define ATOMIC_ORDER_SEQ_CST CPU_ATOMIC_ORDER_SEQ_CST + +#define ATOMIC_INITIALIZER_UINT( value ) CPU_ATOMIC_INITIALIZER_UINT( value ) + +#define ATOMIC_INITIALIZER_ULONG( value ) CPU_ATOMIC_INITIALIZER_ULONG( value ) + +#define ATOMIC_INITIALIZER_UINTPTR( value ) CPU_ATOMIC_INITIALIZER_UINTPTR( value ) + +#define ATOMIC_INITIALIZER_FLAG CPU_ATOMIC_INITIALIZER_FLAG + +#define _Atomic_Fence( order ) _CPU_atomic_Fence( order ) + +#define _Atomic_Init_uint( obj, desired ) \ + _CPU_atomic_Init_uint( obj, desired ) + +#define _Atomic_Init_ulong( obj, desired ) \ + _CPU_atomic_Init_ulong( obj, desired ) + +#define _Atomic_Init_uintptr( obj, desired ) \ + _CPU_atomic_Init_uintptr( obj, desired ) + +#define _Atomic_Load_uint( obj, order ) \ + _CPU_atomic_Load_uint( obj, order ) + +#define _Atomic_Load_ulong( obj, order ) \ + _CPU_atomic_Load_ulong( obj, order ) + +#define _Atomic_Load_uintptr( obj, order ) \ + _CPU_atomic_Load_uintptr( obj, order ) + +#define _Atomic_Store_uint( obj, desr, order ) \ + _CPU_atomic_Store_uint( obj, desr, order ) + +#define _Atomic_Store_ulong( obj, desr, order ) \ + _CPU_atomic_Store_ulong( obj, desr, order ) + +#define _Atomic_Store_uintptr( obj, desr, order ) \ + _CPU_atomic_Store_uintptr( obj, desr, order ) + +#define _Atomic_Fetch_add_uint( obj, arg, order ) \ + _CPU_atomic_Fetch_add_uint( obj, arg, order ) + +#define _Atomic_Fetch_add_ulong( obj, arg, order ) \ + _CPU_atomic_Fetch_add_ulong( obj, arg, order ) + +#define _Atomic_Fetch_add_uintptr( obj, arg, order ) \ + _CPU_atomic_Fetch_add_uintptr( obj, arg, order ) + +#define _Atomic_Fetch_sub_uint( obj, arg, order ) \ + _CPU_atomic_Fetch_sub_uint( obj, arg, order ) + +#define _Atomic_Fetch_sub_ulong( obj, arg, order ) \ + _CPU_atomic_Fetch_sub_ulong( obj, arg, order ) + +#define _Atomic_Fetch_sub_uintptr( obj, arg, order ) \ + _CPU_atomic_Fetch_sub_uintptr( obj, arg, order ) + +#define _Atomic_Fetch_or_uint( obj, arg, order ) \ + _CPU_atomic_Fetch_or_uint( obj, arg, order ) + +#define _Atomic_Fetch_or_ulong( obj, arg, order ) \ + _CPU_atomic_Fetch_or_ulong( obj, arg, order ) + +#define _Atomic_Fetch_or_uintptr( obj, arg, order ) \ + _CPU_atomic_Fetch_or_uintptr( obj, arg, order ) + +#define _Atomic_Fetch_and_uint( obj, arg, order ) \ + _CPU_atomic_Fetch_and_uint( obj, arg, order ) + +#define _Atomic_Fetch_and_ulong( obj, arg, order ) \ + _CPU_atomic_Fetch_and_ulong( obj, arg, order ) + +#define _Atomic_Fetch_and_uintptr( obj, arg, order ) \ + _CPU_atomic_Fetch_and_uintptr( obj, arg, order ) + +#define _Atomic_Exchange_uint( obj, desr, order ) \ + _CPU_atomic_Exchange_uint( obj, desr, order ) + +#define _Atomic_Exchange_ulong( obj, desr, order ) \ + _CPU_atomic_Exchange_ulong( obj, desr, order ) + +#define _Atomic_Exchange_uintptr( obj, desr, order ) \ + _CPU_atomic_Exchange_uintptr( obj, desr, order ) + +#define _Atomic_Compare_exchange_uint( obj, expected, desired, succ, fail ) \ + _CPU_atomic_Compare_exchange_uint( obj, expected, desired, succ, fail ) + +#define _Atomic_Compare_exchange_ulong( obj, expected, desired, succ, fail ) \ + _CPU_atomic_Compare_exchange_ulong( obj, expected, desired, succ, fail ) + +#define _Atomic_Compare_exchange_uintptr( obj, expected, desired, succ, fail ) \ + _CPU_atomic_Compare_exchange_uintptr( obj, expected, desired, succ, fail ) + +#define _Atomic_Flag_clear( obj, order ) \ + _CPU_atomic_Flag_clear( obj, order ) + +#define _Atomic_Flag_test_and_set( obj, order ) \ + _CPU_atomic_Flag_test_and_set( obj, order ) + +/** @} */ + +#endif /* _RTEMS_SCORE_ATOMIC_H */ diff --git a/include/rtems/score/avr.h b/include/rtems/score/avr.h new file mode 100644 index 0000000000..064a2c8762 --- /dev/null +++ b/include/rtems/score/avr.h @@ -0,0 +1,111 @@ +/** + * @file + * + * @brief Intel AVR Set up Basic CPU Dependency Settings Based on + * Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specified to the avr port. + */ + +/* + * COPYRIGHT 2004, Ralf Corsepius, Ulm, Germany. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + */ + +#ifndef _RTEMS_SCORE_AVR_H +#define _RTEMS_SCORE_AVR_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the NO CPU family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ +#if defined(__AVR__) + +#if defined(__AVR_ARCH__) +#if __AVR_ARCH__ == 1 +#define CPU_MODEL_NAME "avr1" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 2 +#define CPU_MODEL_NAME "avr2" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 3 +#define CPU_MODEL_NAME "avr3" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 4 +#define CPU_MODEL_NAME "avr4" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 5 +#define CPU_MODEL_NAME "avr5" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 25 +#define CPU_MODEL_NAME "avr25" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 31 +#define CPU_MODEL_NAME "avr31" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 35 +#define CPU_MODEL_NAME "avr35" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 51 +#define CPU_MODEL_NAME "avr51" +#define AVR_HAS_FPU 1 + +#elif __AVR_ARCH__ == 6 +#define CPU_MODEL_NAME "avr6" +#define AVR_HAS_FPU 1 + +#else +#error "Unsupported __AVR_ARCH__" +#endif +#else +#error "__AVR_ARCH__ undefined" +#endif + +#else + +#error "Unsupported CPU Model" + +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "avr" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_AVR_H */ diff --git a/include/rtems/score/basedefs.h b/include/rtems/score/basedefs.h new file mode 100644 index 0000000000..7e282c1883 --- /dev/null +++ b/include/rtems/score/basedefs.h @@ -0,0 +1,385 @@ +/** + * @file + * + * @ingroup Score + * + * @brief Basic Definitions + */ + +/* + * COPYRIGHT (c) 1989-2007. + * On-Line Applications Research Corporation (OAR). + * + * Copyright (c) 2010-2015 embedded brains GmbH. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_BASEDEFS_H +#define _RTEMS_BASEDEFS_H + +/** + * @defgroup ScoreBaseDefs Basic Definitions + * + * @ingroup Score + */ +/**@{*/ + +#include <rtems/score/cpuopts.h> + +#ifndef ASM + #include <stddef.h> + #include <stdbool.h> + #include <stdint.h> + + /* + * FIXME: This include should not be present. In older RTEMS versions + * <rtems.h> provided <limits.h> indirectly. This include is here to not + * break application source files that relied on this accidentally. + */ + #include <limits.h> + + /* + * FIXME: This include should not be present. In older RTEMS versions + * <rtems.h> provided <string.h> indirectly. This include is here to not + * break application source files that relied on this accidentally. + */ + #include <string.h> +#endif + +#ifndef TRUE + /** + * This ensures that RTEMS has TRUE defined in all situations. + */ + #define TRUE 1 +#endif + +#ifndef FALSE + /** + * This ensures that RTEMS has FALSE defined in all situations. + */ + #define FALSE 0 +#endif + +#if TRUE == FALSE + #error "TRUE equals FALSE" +#endif + +/** + * The following ensures that all data is declared in the space + * of the initialization routine for either the Initialization Manager + * or the initialization file for the appropriate API. It is + * referenced as "external" in every other file. + */ +#ifdef SCORE_INIT + #undef SCORE_EXTERN + #define SCORE_EXTERN +#else + #undef SCORE_EXTERN + #define SCORE_EXTERN extern +#endif + +/** + * The following ensures that all data is declared in the space + * of the initialization routine for either the Initialization Manager + * or the initialization file for the appropriate API. It is + * referenced as "external" in every other file. + */ +#ifdef SAPI_INIT + #undef SAPI_EXTERN + #define SAPI_EXTERN +#else + #undef SAPI_EXTERN + #define SAPI_EXTERN extern +#endif + +/** + * The following ensures that all data is declared in the space + * of the initialization routine for either the Initialization Manager + * or the initialization file for the appropriate API. It is + * referenced as "external" in every other file. + */ +#ifdef RTEMS_API_INIT + #undef RTEMS_EXTERN + #define RTEMS_EXTERN +#else + #undef RTEMS_EXTERN + #define RTEMS_EXTERN extern +#endif + +/** + * The following ensures that all data is declared in the space + * of the initialization routine for either the Initialization Manager + * or the initialization file for the appropriate API. It is + * referenced as "external" in every other file. + */ +#ifdef POSIX_API_INIT + #undef POSIX_EXTERN + #define POSIX_EXTERN +#else + #undef POSIX_EXTERN + #define POSIX_EXTERN extern +#endif + +/** + * The following (in conjunction with compiler arguments) are used + * to choose between the use of static inline functions and macro + * functions. The static inline implementation allows better + * type checking with no cost in code size or execution speed. + */ +#ifdef __GNUC__ + #define RTEMS_INLINE_ROUTINE static __inline__ +#else + #define RTEMS_INLINE_ROUTINE static inline +#endif + +/** + * The following macro is a compiler specific way to ensure that memory + * writes are not reordered around certian points. This specifically can + * impact interrupt disable and thread dispatching critical sections. + */ +#ifdef __GNUC__ + #define RTEMS_COMPILER_MEMORY_BARRIER() __asm__ volatile("" ::: "memory") +#else + #define RTEMS_COMPILER_MEMORY_BARRIER() +#endif + +/** + * The following macro is a compiler specific way to indicate that + * the method will NOT return to the caller. This can assist the + * compiler in code generation and avoid unreachable paths. This + * can impact the code generated following calls to + * rtems_fatal_error_occurred and _Terminate. + */ +#if defined(RTEMS_SCHEDSIM) + #define RTEMS_NO_RETURN +#elif defined(__GNUC__) + #define RTEMS_NO_RETURN __attribute__((__noreturn__)) +#else + #define RTEMS_NO_RETURN +#endif + +/* Provided for backward compatibility */ +#define RTEMS_COMPILER_NO_RETURN_ATTRIBUTE RTEMS_NO_RETURN + +/** + * The following defines a compiler specific attribute which informs + * the compiler that the method has no effect except the return value + * and that the return value depends only on parameters and/or global + * variables. + */ +#ifdef __GNUC__ + #define RTEMS_PURE __attribute__((__pure__)) +#else + #define RTEMS_PURE +#endif + +/* Provided for backward compatibility */ +#define RTEMS_COMPILER_PURE_ATTRIBUTE RTEMS_PURE + +/** + * Instructs the compiler to issue a warning whenever a variable or function + * with this attribute will be used. + */ +#ifdef __GNUC__ + #define RTEMS_DEPRECATED __attribute__((__deprecated__)) +#else + #define RTEMS_DEPRECATED +#endif + +/* Provided for backward compatibility */ +#define RTEMS_COMPILER_DEPRECATED_ATTRIBUTE RTEMS_DEPRECATED + +/** + * @brief Instructs the compiler to place a specific variable or function in + * the specified section. + */ +#if defined(__GNUC__) + #define RTEMS_SECTION( _section ) __attribute__((__section__(_section))) +#else + #define RTEMS_SECTION( _section ) +#endif + +/** + * @brief Instructs the compiler that a specific variable or function is used. + */ +#if defined(__GNUC__) + #define RTEMS_USED __attribute__((__used__)) +#else + #define RTEMS_USED +#endif + +/** + * Instructs the compiler that a specific variable is deliberately unused. + * This can occur when reading volatile device memory or skipping arguments + * in a variable argument method. + */ +#if defined(__GNUC__) + #define RTEMS_UNUSED __attribute__((__unused__)) +#else + #define RTEMS_UNUSED +#endif + +/* Provided for backward compatibility */ +#define RTEMS_COMPILER_UNUSED_ATTRIBUTE RTEMS_UNUSED + +/** + * Instructs the compiler that a specific structure or union members will be + * placed so that the least memory is used. + */ +#if defined(__GNUC__) + #define RTEMS_PACKED __attribute__((__packed__)) +#else + #define RTEMS_PACKED +#endif + +/* Provided for backward compatibility */ +#define RTEMS_COMPILER_PACKED_ATTRIBUTE RTEMS_PACKED + +#if __cplusplus >= 201103L + #define RTEMS_STATIC_ASSERT(cond, msg) \ + static_assert(cond, # msg) +#elif __STDC_VERSION__ >= 201112L + #define RTEMS_STATIC_ASSERT(cond, msg) \ + _Static_assert(cond, # msg) +#else + #define RTEMS_STATIC_ASSERT(cond, msg) \ + typedef int rtems_static_assert_ ## msg [(cond) ? 1 : -1] +#endif + +#define RTEMS_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) + +/* + * Zero-length arrays are valid in C99 as flexible array members. C++11 + * doesn't allow flexible array members. Use the GNU extension which is also + * supported by other compilers. + */ +#define RTEMS_ZERO_LENGTH_ARRAY 0 + +/** + * @brief Returns a pointer to the container of a specified member pointer. + * + * @param[in] _m The pointer to a member of the container. + * @param[in] _type The type of the container. + * @param[in] _member_name The designator name of the container member. + */ +#define RTEMS_CONTAINER_OF( _m, _type, _member_name ) \ + ( (_type *) ( (uintptr_t) ( _m ) - offsetof( _type, _member_name ) ) ) + +#ifdef __cplusplus +#define RTEMS_DEQUALIFY_DEPTHX( _ptr_level, _type, _var ) \ + (const_cast<_type>( _var )) +#else /* Standard C code */ + +/* The reference type idea based on libHX by Jan Engelhardt */ +#define RTEMS_TYPEOF_REFX(_ptr_level, _ptr_type) \ + typeof(_ptr_level(union { int z; typeof(_ptr_type) x; }){0}.x) + +#if defined(__GNUC__) && !defined(ASM) +#if ((__GNUC__ * 1000 + __GNUC_MINOR__) >= 4004) +extern void* RTEMS_DEQUALIFY_types_not_compatible(void) + __attribute__((error ("RTEMS_DEQUALIFY types differ not only by volatile and const"))); +#else +extern void RTEMS_DEQUALIFY_types_not_compatible(void); +#endif +#define RTEMS_DEQUALIFY_DEPTHX( _ptr_level, _type, _var ) ( \ + __builtin_choose_expr( __builtin_types_compatible_p ( \ + RTEMS_TYPEOF_REFX( _ptr_level, _var ), \ + RTEMS_TYPEOF_REFX( _ptr_level, _type ) \ + ) || __builtin_types_compatible_p ( _type, void * ), \ + (_type)(_var), \ + RTEMS_DEQUALIFY_types_not_compatible() \ + ) \ +) +#endif /*__GNUC__*/ +#endif /*__cplusplus*/ + +#ifndef RTEMS_DECONST +#ifdef RTEMS_DEQUALIFY_DEPTHX +#define RTEMS_DECONST( _type, _var ) \ + RTEMS_DEQUALIFY_DEPTHX( *, _type, _var ) +#else /*RTEMS_DEQUALIFY_DEPTHX*/ +/** + * @brief Removes the const qualifier from a type of a variable. + * + * @param[in] _type The target type for the variable. + * @param[in] _var The variable. + */ +#define RTEMS_DECONST( _type, _var ) \ + ((_type)(uintptr_t)(const void *) ( _var )) + +#endif /*RTEMS_DEQUALIFY_DEPTHX*/ +#endif /*RTEMS_DECONST*/ + +#ifndef RTEMS_DEVOLATILE +#ifdef RTEMS_DEQUALIFY_DEPTHX +#define RTEMS_DEVOLATILE( _type, _var ) \ + RTEMS_DEQUALIFY_DEPTHX( *, _type, _var ) +#else /*RTEMS_DEQUALIFY_DEPTHX*/ +/** + * @brief Removes the volatile qualifier from a type of a variable. + * + * @param[in] _type The target type for the variable. + * @param[in] _var The variable. + */ +#define RTEMS_DEVOLATILE( _type, _var ) \ + ((_type)(uintptr_t)(volatile void *) ( _var )) + +#endif /*RTEMS_DEQUALIFY_DEPTHX*/ +#endif /*RTEMS_DEVOLATILE*/ + +#ifndef RTEMS_DEQUALIFY +#ifdef RTEMS_DEQUALIFY_DEPTHX +#define RTEMS_DEQUALIFY( _type, _var ) \ + RTEMS_DEQUALIFY_DEPTHX( *, _type, _var ) +#else /*RTEMS_DEQUALIFY_DEPTHX*/ +/** + * @brief Removes the all qualifiers from a type of a variable. + * + * @param[in] _type The target type for the variable. + * @param[in] _var The variable. + */ +#define RTEMS_DEQUALIFY( _type, _var ) \ + ((_type)(uintptr_t)(const volatile void *) ( _var )) + +#endif /*RTEMS_DEQUALIFY_DEPTHX*/ +#endif /*RTEMS_DEQUALIFY*/ + +/** + * @brief Concatenates _x and _y without expanding. + */ +#define RTEMS_CONCAT( _x, _y ) _x##_y + +/** + * @brief Concatenates expansion of _x and expansion of _y. + */ +#define RTEMS_XCONCAT( _x, _y ) RTEMS_CONCAT( _x, _y ) + +/** + * @brief Stringifies _x without expanding. + */ +#define RTEMS_STRING( _x ) #_x + +/** + * @brief Stringifies expansion of _x. + */ +#define RTEMS_XSTRING( _x ) RTEMS_STRING( _x ) + +#ifndef ASM + #ifdef RTEMS_DEPRECATED_TYPES + typedef bool boolean; + typedef float single_precision; + typedef double double_precision; + #endif + + /** + * XXX: Eventually proc_ptr needs to disappear!!! + */ + typedef void * proc_ptr; +#endif + +/**@}*/ + +#endif /* _RTEMS_BASEDEFS_H */ diff --git a/include/rtems/score/bfin.h b/include/rtems/score/bfin.h new file mode 100644 index 0000000000..caa3a51830 --- /dev/null +++ b/include/rtems/score/bfin.h @@ -0,0 +1,69 @@ +/** + * @file + * + * @brief Blackfin Set up Basic CPU Dependency Settings Based on + * Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specified to the Blackfin port. + */ + +/* + * + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * modified by Alain Schaefer <alain.schaefer@easc.ch> + * and Antonio Giovanini <antonio@atos.com.br> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + */ + +#ifndef _RTEMS_SCORE_BFIN_H +#define _RTEMS_SCORE_BFIN_H + +#ifdef __cplusplus +extern "C" { +#endif + + +/* + * This file contains the information required to build + * RTEMS for a particular member of the Blackfin family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ +#if defined(__BFIN__) +#define CPU_MODEL_NAME "BF533" +#define BF_HAS_FPU 0 +#else + +#error "Unsupported CPU Model" + +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "BFIN" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_BFIN_H */ diff --git a/include/rtems/score/chain.h b/include/rtems/score/chain.h new file mode 100644 index 0000000000..e358262e6e --- /dev/null +++ b/include/rtems/score/chain.h @@ -0,0 +1,102 @@ +/** + * @file + * + * @ingroup ScoreChain + * + * @brief Chain Handler API + */ + +/* + * Copyright (c) 2010 embedded brains GmbH. + * + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CHAIN_H +#define _RTEMS_SCORE_CHAIN_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreChain Chain Handler + * + * @ingroup Score + * + * The Chain Handler is used to manage sets of entities. This handler + * provides two data structures. The Chain Node data structure is included + * as the first part of every data structure that will be placed on + * a chain. The second data structure is Chain Control which is used + * to manage a set of Chain Nodes. + */ +/**@{*/ + +/** + * @typedef Chain_Node + * + * This type definition promotes the name for the Chain Node used by + * all RTEMS code. It is a separate type definition because a forward + * reference is required to define it. See @ref Chain_Node_struct for + * detailed information. + */ +typedef struct Chain_Node_struct Chain_Node; + +/** + * @struct Chain_Node_struct + * + * This is used to manage each element (node) which is placed + * on a chain. + * + * @note Typically, a more complicated structure will use the + * chain package. The more complicated structure will + * include a chain node as the first element in its + * control structure. It will then call the chain package + * with a pointer to that node element. The node pointer + * and the higher level structure start at the same address + * so the user can cast the pointers back and forth. + * + */ +struct Chain_Node_struct { + /** This points to the node after this one on this chain. */ + Chain_Node *next; + /** This points to the node immediate prior to this one on this chain. */ + Chain_Node *previous; +}; + +/** + * @struct Chain_Control + * + * This is used to manage a chain. A chain consists of a doubly + * linked list of zero or more nodes. + * + * @note This implementation does not require special checks for + * manipulating the first and last elements on the chain. + * To accomplish this the @a Chain_Control structure is + * treated as two overlapping @ref Chain_Node structures. + */ +typedef union { + struct { + Chain_Node Node; + Chain_Node *fill; + } Head; + + struct { + Chain_Node *fill; + Chain_Node Node; + } Tail; +} Chain_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/chainimpl.h b/include/rtems/score/chainimpl.h new file mode 100644 index 0000000000..08cbab6bce --- /dev/null +++ b/include/rtems/score/chainimpl.h @@ -0,0 +1,957 @@ +/** + * @file + * + * @brief Chain Handler API + */ + +/* + * Copyright (c) 2010 embedded brains GmbH. + * + * COPYRIGHT (c) 1989-2014. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CHAINIMPL_H +#define _RTEMS_SCORE_CHAINIMPL_H + +#include <rtems/score/chain.h> +#include <rtems/score/address.h> +#include <rtems/score/assert.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreChain + */ +/**@{**/ + +/** + * @brief Chain initializer for an empty chain with designator @a name. + */ +#define CHAIN_INITIALIZER_EMPTY(name) \ + { { { &(name).Tail.Node, NULL }, &(name).Head.Node } } + +/** + * @brief Chain initializer for a chain with one @a node. + * + * @see CHAIN_NODE_INITIALIZER_ONE_NODE_CHAIN(). + */ +#define CHAIN_INITIALIZER_ONE_NODE( node ) \ + { { { (node), NULL }, (node) } } + +/** + * @brief Chain node initializer for a @a chain containing exactly this node. + * + * @see CHAIN_INITIALIZER_ONE_NODE(). + */ +#define CHAIN_NODE_INITIALIZER_ONE_NODE_CHAIN( chain ) \ + { &(chain)->Tail.Node, &(chain)->Head.Node } + +/** + * @brief Chain definition for an empty chain with designator @a name. + */ +#define CHAIN_DEFINE_EMPTY(name) \ + Chain_Control name = CHAIN_INITIALIZER_EMPTY(name) + +/** + * @brief Initialize a chain header. + * + * This routine initializes @a the_chain structure to manage the + * contiguous array of @a number_nodes nodes which starts at + * @a starting_address. Each node is of @a node_size bytes. + * + * @param[in] the_chain specifies the chain to initialize + * @param[in] starting_address is the starting address of the array + * of elements + * @param[in] number_nodes is the numebr of nodes that will be in the chain + * @param[in] node_size is the size of each node + */ +void _Chain_Initialize( + Chain_Control *the_chain, + void *starting_address, + size_t number_nodes, + size_t node_size +); + +/** + * @brief Extract the specified node from a chain. + * + * This routine extracts @a the_node from the chain on which it resides. + * It disables interrupts to ensure the atomicity of the extract operation. + * + * @param[in] the_node is the node to be extracted + * + * - INTERRUPT LATENCY: + * + single case + */ +void _Chain_Extract( + Chain_Node *the_node +); + +/** + * @brief Obtain the first node on a chain. + * + * This function removes the first node from @a the_chain and returns + * a pointer to that node. If @a the_chain is empty, then NULL is returned. + * + * @retval This method returns a pointer a node. If a node was removed, + * then a pointer to that node is returned. If @a the_chain was + * empty, then NULL is returned. + * + * @note It disables interrupts to ensure the atomicity of the get operation. + */ +Chain_Node *_Chain_Get( + Chain_Control *the_chain +); + +/** + * @brief Insert a node on a chain. + * + * This routine inserts @a the_node on a chain immediately following + * @a after_node. + * + * @param[in] after_node is the pointer to the node in chain to be + * inserted after + * @param[in] the_node is the pointer to the node to be inserted + * + * @note It disables interrupts to ensure the atomicity + * of the insert operation. + * + * - INTERRUPT LATENCY: + * + single case + */ +void _Chain_Insert( + Chain_Node *after_node, + Chain_Node *the_node +); + +/** + * @brief Append a node on the end of a chain. + * + * This routine appends @a the_node onto the end of @a the_chain. + * + * @note It disables interrupts to ensure the atomicity of the + * append operation. + */ +void _Chain_Append( + Chain_Control *the_chain, + Chain_Node *the_node +); + +/** + * @brief Append a node and check if the chain was empty before. + * + * This routine appends the_node onto the end of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be appended. + * + * @note It disables interrupts to ensure the atomicity of the append + * operation. + * + * @retval true The chain was empty before. + * @retval false The chain contained at least one node before. + */ +bool _Chain_Append_with_empty_check( + Chain_Control *the_chain, + Chain_Node *the_node +); + +/** + * @brief Prepend a node and check if the chain was empty before. + * + * This routine prepends the_node onto the front of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be prepended. + * + * @note It disables interrupts to ensure the atomicity of the append + * operation. + * + * @retval true The chain was empty before. + * @retval false The chain contained at least one node before. + */ +bool _Chain_Prepend_with_empty_check( + Chain_Control *the_chain, + Chain_Node *the_node +); + +/** + * @brief Get the first node and check if the chain is empty afterwards. + * + * This function removes the first node from the_chain and returns + * a pointer to that node in @a the_node. If the_chain is empty, then NULL is + * returned. + * + * @param[in] the_chain is the chain to attempt to get the first node from. + * @param[out] the_node is the first node on the chain or NULL if the chain is + * empty. + * + * @note It disables interrupts to ensure the atomicity of the append + * operation. + * + * @retval true The chain is empty now. + * @retval false The chain contains at least one node now. + * + * - INTERRUPT LATENCY: + * + single case + */ +bool _Chain_Get_with_empty_check( + Chain_Control *the_chain, + Chain_Node **the_node +); + +/** + * @brief Returns the node count of the chain. + * + * @param[in] chain The chain. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * operation. + * + * @retval The node count of the chain. + */ +size_t _Chain_Node_count_unprotected( const Chain_Control *chain ); + +/** + * @brief Set off chain. + * + * This function sets the next field of the @a node to NULL indicating the @a + * node is not part of a chain. + * + * @param[in] node the node set to off chain. + */ +RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain( + Chain_Node *node +) +{ + node->next = NULL; +} + +/** + * @brief Is the node off chain. + * + * This function returns true if the @a node is not on a chain. A @a node is + * off chain if the next field is set to NULL. + * + * @param[in] node is the node off chain. + * + * @retval true The @a node is off chain. + * @retval false The @a node is not off chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_node_off_chain( + const Chain_Node *node +) +{ + return node->next == NULL; +} + +/** + * @brief Are two nodes equal. + * + * This function returns true if @a left and @a right are equal, + * and false otherwise. + * + * @param[in] left is the node on the left hand side of the comparison. + * @param[in] right is the node on the left hand side of the comparison. + * + * @retval true @a left and @a right are equal. + * @retval false @a left and @a right are not equal. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Are_nodes_equal( + const Chain_Node *left, + const Chain_Node *right +) +{ + return left == right; +} + +/** + * @brief Is the chain node pointer NULL. + * + * This function returns true if the_node is NULL and false otherwise. + * + * @param[in] the_node is the node pointer to check. + * + * @retval true @a the_node is @c NULL. + * @retval false @a the_node is not @c NULL. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_null_node( + const Chain_Node *the_node +) +{ + return (the_node == NULL); +} + +/** + * @brief Return pointer to chain head. + * + * This function returns a pointer to the head node on the chain. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the permanent head node of the chain. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Head( + Chain_Control *the_chain +) +{ + return &the_chain->Head.Node; +} + +/** + * @brief Return pointer to immutable chain head. + * + * This function returns a pointer to the head node on the chain. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the permanent head node of the chain. + */ +RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_head( + const Chain_Control *the_chain +) +{ + return &the_chain->Head.Node; +} + +/** + * @brief Return pointer to chain tail. + * + * This function returns a pointer to the tail node on the chain. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the permanent tail node of the chain. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Tail( + Chain_Control *the_chain +) +{ + return &the_chain->Tail.Node; +} + +/** + * @brief Return pointer to immutable chain tail. + * + * This function returns a pointer to the tail node on the chain. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the permanent tail node of the chain. + */ +RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_tail( + const Chain_Control *the_chain +) +{ + return &the_chain->Tail.Node; +} + +/** + * @brief Return pointer to chain's first node. + * + * This function returns a pointer to the first node on the chain after the + * head. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the first node of the chain. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First( + Chain_Control *the_chain +) +{ + return _Chain_Head( the_chain )->next; +} + +/** + * @brief Return pointer to immutable chain's first node. + * + * This function returns a pointer to the first node on the chain after the + * head. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the first node of the chain. + */ +RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first( + const Chain_Control *the_chain +) +{ + return _Chain_Immutable_head( the_chain )->next; +} + +/** + * @brief Return pointer to chain's last node. + * + * This function returns a pointer to the last node on the chain just before + * the tail. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the last node of the chain. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Last( + Chain_Control *the_chain +) +{ + return _Chain_Tail( the_chain )->previous; +} + +/** + * @brief Return pointer to immutable chain's last node. + * + * This function returns a pointer to the last node on the chain just before + * the tail. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This method returns the last node of the chain. + */ +RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_last( + const Chain_Control *the_chain +) +{ + return _Chain_Immutable_tail( the_chain )->previous; +} + +/** + * @brief Return pointer the next node from this node. + * + * This function returns a pointer to the next node after this node. + * + * @param[in] the_node is the node to be operated upon. + * + * @return This method returns the next node on the chain. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Next( + Chain_Node *the_node +) +{ + return the_node->next; +} + +/** + * @brief Return pointer the immutable next node from this node. + * + * This function returns a pointer to the next node after this node. + * + * @param[in] the_node is the node to be operated upon. + * + * @return This method returns the next node on the chain. + */ +RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_next( + const Chain_Node *the_node +) +{ + return the_node->next; +} + +/** + * @brief Return pointer the previous node from this node. + * + * This function returns a pointer to the previous node on this chain. + * + * @param[in] the_node is the node to be operated upon. + * + * @return This method returns the previous node on the chain. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Previous( + Chain_Node *the_node +) +{ + return the_node->previous; +} + +/** + * @brief Return pointer the immutable previous node from this node. + * + * This function returns a pointer to the previous node on this chain. + * + * @param[in] the_node is the node to be operated upon. + * + * @return This method returns the previous node on the chain. + */ +RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_previous( + const Chain_Node *the_node +) +{ + return the_node->previous; +} + +/** + * @brief Is the chain empty. + * + * This function returns true if there a no nodes on @a the_chain and + * false otherwise. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @retval true There are no nodes on @a the_chain. + * @retval false There are nodes on @a the_chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_empty( + const Chain_Control *the_chain +) +{ + return _Chain_Immutable_first( the_chain ) + == _Chain_Immutable_tail( the_chain ); +} + +/** + * @brief Is this the first node on the chain. + * + * This function returns true if the_node is the first node on a chain and + * false otherwise. + * + * @param[in] the_node is the node the caller wants to know if it is + * the first node on a chain. + * + * @retval true @a the_node is the first node on a chain. + * @retval false @a the_node is not the first node on a chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_first( + const Chain_Node *the_node +) +{ + return (the_node->previous->previous == NULL); +} + +/** + * @brief Is this the last node on the chain. + * + * This function returns true if @a the_node is the last node on a chain and + * false otherwise. + * + * @param[in] the_node is the node to check as the last node. + * + * @retval true @a the_node is the last node on a chain. + * @retval false @a the_node is not the last node on a chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_last( + const Chain_Node *the_node +) +{ + return (the_node->next->next == NULL); +} + +/** + * @brief Does this chain have only one node. + * + * This function returns true if there is only one node on @a the_chain and + * false otherwise. + * + * @param[in] the_chain is the chain to be operated upon. + * + * @return This function returns true if there is only one node on + * @a the_chain and false otherwise. + * + * @retval true There is only one node on @a the_chain. + * @retval false There is more than one node on @a the_chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Has_only_one_node( + const Chain_Control *the_chain +) +{ + return _Chain_Immutable_first( the_chain ) + == _Chain_Immutable_last( the_chain ); +} + +/** + * @brief Is this node the chain head. + * + * This function returns true if @a the_node is the head of @a the_chain and + * false otherwise. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to check for being the Chain Head. + * + * @retval true @a the_node is the head of @a the_chain. + * @retval false @a the_node is not the head of @a the_chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_head( + const Chain_Control *the_chain, + const Chain_Node *the_node +) +{ + return (the_node == _Chain_Immutable_head( the_chain )); +} + +/** + * @brief Is this node the chail tail. + * + * This function returns true if @a the_node is the tail of @a the_chain and + * false otherwise. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to check for being the Chain Tail. + * + * @retval true @a the_node is the tail of @a the_chain. + * @retval false @a the_node is not the tail of @a the_chain. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Is_tail( + const Chain_Control *the_chain, + const Chain_Node *the_node +) +{ + return (the_node == _Chain_Immutable_tail( the_chain )); +} + +/** + * @brief Initialize this chain as empty. + * + * This routine initializes the specified chain to contain zero nodes. + * + * @param[in] the_chain is the chain to be initialized. + */ +RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty( + Chain_Control *the_chain +) +{ + Chain_Node *head; + Chain_Node *tail; + + _Assert( the_chain != NULL ); + + head = _Chain_Head( the_chain ); + tail = _Chain_Tail( the_chain ); + + head->next = tail; + head->previous = NULL; + tail->previous = head; +} + +/** + * @brief Extract this node (unprotected). + * + * This routine extracts the_node from the chain on which it resides. + * It does NOT disable interrupts to ensure the atomicity of the + * extract operation. + * + * @param[in] the_node is the node to be extracted. + */ +RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected( + Chain_Node *the_node +) +{ + Chain_Node *next; + Chain_Node *previous; + + next = the_node->next; + previous = the_node->previous; + next->previous = previous; + previous->next = next; +} + +/** + * @brief Get the first node (unprotected). + * + * This function removes the first node from the_chain and returns + * a pointer to that node. It does NOT disable interrupts to ensure + * the atomicity of the get operation. + * + * @param[in] the_chain is the chain to attempt to get the first node from. + * + * @return This method returns the first node on the chain even if it is + * the Chain Tail. + * + * @note This routine assumes that there is at least one node on the chain + * and always returns a node even if it is the Chain Tail. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_first_unprotected( + Chain_Control *the_chain +) +{ + Chain_Node *head = _Chain_Head( the_chain ); + Chain_Node *old_first = head->next; + Chain_Node *new_first = old_first->next; + + head->next = new_first; + new_first->previous = head; + + return old_first; +} + +/** + * @brief Get the first node (unprotected). + * + * This function removes the first node from the_chain and returns + * a pointer to that node. If the_chain is empty, then NULL is returned. + * + * @param[in] the_chain is the chain to attempt to get the first node from. + * + * @return This method returns the first node on the chain or NULL if the + * chain is empty. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * get operation. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected( + Chain_Control *the_chain +) +{ + if ( !_Chain_Is_empty(the_chain)) + return _Chain_Get_first_unprotected(the_chain); + else + return NULL; +} + +/** + * @brief Insert a node (unprotected). + * + * This routine inserts the_node on a chain immediately following + * after_node. + * + * @param[in] after_node is the node which will precede @a the_node on the + * chain. + * @param[in] the_node is the node to be inserted. + * + * @note It does NOT disable interrupts to ensure the atomicity + * of the extract operation. + */ +RTEMS_INLINE_ROUTINE void _Chain_Insert_unprotected( + Chain_Node *after_node, + Chain_Node *the_node +) +{ + Chain_Node *before_node; + + the_node->previous = after_node; + before_node = after_node->next; + after_node->next = the_node; + the_node->next = before_node; + before_node->previous = the_node; +} + +/** + * @brief Append a node (unprotected). + * + * This routine appends the_node onto the end of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be appended. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * append operation. + */ +RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected( + Chain_Control *the_chain, + Chain_Node *the_node +) +{ + Chain_Node *tail = _Chain_Tail( the_chain ); + Chain_Node *old_last = tail->previous; + + the_node->next = tail; + tail->previous = the_node; + old_last->next = the_node; + the_node->previous = old_last; +} + +/** + * @brief Append a node on the end of a chain if the node is in the off chain + * state (unprotected). + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * append operation. + * + * @see _Chain_Append_unprotected() and _Chain_Is_node_off_chain(). + */ +RTEMS_INLINE_ROUTINE void _Chain_Append_if_is_off_chain_unprotected( + Chain_Control *the_chain, + Chain_Node *the_node +) +{ + if ( _Chain_Is_node_off_chain( the_node ) ) { + _Chain_Append_unprotected( the_chain, the_node ); + } +} + +/** + * @brief Prepend a node (unprotected). + * + * This routine prepends the_node onto the front of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be prepended. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * prepend operation. + */ +RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected( + Chain_Control *the_chain, + Chain_Node *the_node +) +{ + _Chain_Insert_unprotected(_Chain_Head(the_chain), the_node); +} + +/** + * @brief Prepend a node (protected). + * + * This routine prepends the_node onto the front of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be prepended. + * + * @note It disables interrupts to ensure the atomicity of the + * prepend operation. + */ +RTEMS_INLINE_ROUTINE void _Chain_Prepend( + Chain_Control *the_chain, + Chain_Node *the_node +) +{ + _Chain_Insert(_Chain_Head(the_chain), the_node); +} + +/** + * @brief Append a node and check if the chain was empty before (unprotected). + * + * This routine appends the_node onto the end of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be appended. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * append operation. + * + * @retval true The chain was empty before. + * @retval false The chain contained at least one node before. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Append_with_empty_check_unprotected( + Chain_Control *the_chain, + Chain_Node *the_node +) +{ + bool was_empty = _Chain_Is_empty( the_chain ); + + _Chain_Append_unprotected( the_chain, the_node ); + + return was_empty; +} + +/** + * @brief Prepend a node and check if the chain was empty before (unprotected). + * + * This routine prepends the_node onto the front of the_chain. + * + * @param[in] the_chain is the chain to be operated upon. + * @param[in] the_node is the node to be prepended. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * prepend operation. + * + * @retval true The chain was empty before. + * @retval false The chain contained at least one node before. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Prepend_with_empty_check_unprotected( + Chain_Control *the_chain, + Chain_Node *the_node +) +{ + bool was_empty = _Chain_Is_empty( the_chain ); + + _Chain_Prepend_unprotected( the_chain, the_node ); + + return was_empty; +} + +/** + * @brief Get the first node and check if the chain is empty afterwards + * (unprotected). + * + * This function removes the first node from the_chain and returns + * a pointer to that node in @a the_node. If the_chain is empty, then NULL is + * returned. + * + * @param[in] the_chain is the chain to attempt to get the first node from. + * @param[out] the_node is the first node on the chain or NULL if the chain is + * empty. + * + * @note It does NOT disable interrupts to ensure the atomicity of the + * get operation. + * + * @retval true The chain is empty now. + * @retval false The chain contains at least one node now. + */ +RTEMS_INLINE_ROUTINE bool _Chain_Get_with_empty_check_unprotected( + Chain_Control *the_chain, + Chain_Node **the_node +) +{ + bool is_empty_now = true; + Chain_Node *head = _Chain_Head( the_chain ); + Chain_Node *tail = _Chain_Tail( the_chain ); + Chain_Node *old_first = head->next; + + if ( old_first != tail ) { + Chain_Node *new_first = old_first->next; + + head->next = new_first; + new_first->previous = head; + + *the_node = old_first; + + is_empty_now = new_first == tail; + } else + *the_node = NULL; + + return is_empty_now; +} + +/** + * @brief Chain node order. + * + * @param[in] left The left node. + * @param[in] right The right node. + * + * @retval true According to the order the left node precedes the right node. + * @retval false Otherwise. + */ +typedef bool ( *Chain_Node_order )( + const Chain_Node *left, + const Chain_Node *right +); + +/** + * @brief Inserts a node into the chain according to the order relation. + * + * After the operation the chain contains the node to insert and the order + * relation holds for all nodes from the head up to the inserted node. Nodes + * after the inserted node are not moved. + * + * @param[in,out] chain The chain. + * @param[in,out] to_insert The node to insert. + * @param[in] order The order relation. + */ +RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected( + Chain_Control *chain, + Chain_Node *to_insert, + Chain_Node_order order +) +{ + const Chain_Node *tail = _Chain_Immutable_tail( chain ); + Chain_Node *next = _Chain_First( chain ); + + while ( next != tail && !( *order )( to_insert, next ) ) { + next = _Chain_Next( next ); + } + + _Chain_Insert_unprotected( _Chain_Previous( next ), to_insert ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/context.h b/include/rtems/score/context.h new file mode 100644 index 0000000000..7e59f05d60 --- /dev/null +++ b/include/rtems/score/context.h @@ -0,0 +1,172 @@ +/** + * @file rtems/score/context.h + * + * @brief Information About Each Thread's Context + * + * This include file contains all information about each thread's context. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CONTEXT_H +#define _RTEMS_SCORE_CONTEXT_H + +/** + * @defgroup ScoreContext Context Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which abstracts thread context + * management in a portable manner. + * + * The context switch needed variable is contained in the per cpu + * data structure. + */ +/**@{*/ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rtems/score/cpu.h> + +/** + * @brief Size of floating point context area. + * + * This constant defines the number of bytes required + * to store a full floating point context. + */ +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) + #define CONTEXT_FP_SIZE CPU_CONTEXT_FP_SIZE +#else + #define CONTEXT_FP_SIZE 0 +#endif + +/** + * @brief Initialize context area. + * + * This routine initializes @a _the_context such that the stack + * pointer, interrupt level, and entry point are correct for the + * thread's initial state. + * + * @param[in] _the_context will be initialized + * @param[in] _stack is the lowest physical address of the thread's + * context + * @param[in] _size is the size in octets of the thread's context + * @param[in] _isr is the ISR enable level for this thread + * @param[in] _entry is this thread's entry point + * @param[in] _is_fp is set to true if this thread has floating point + * enabled + * @param[in] _tls_area The thread-local storage (TLS) area begin. + */ +#define _Context_Initialize( _the_context, _stack, _size, _isr, _entry, \ + _is_fp, _tls_area ) \ + _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry, \ + _is_fp, _tls_area ) + +/** + * This macro is invoked from _Thread_Handler to do whatever CPU + * specific magic is required that must be done in the context of + * the thread when it starts. + * + * If the CPU architecture does not require any magic, then this + * macro is empty. + */ + +#if defined(_CPU_Context_Initialization_at_thread_begin) + #define _Context_Initialization_at_thread_begin() \ + _CPU_Context_Initialization_at_thread_begin() +#else + #define _Context_Initialization_at_thread_begin() +#endif + +/** + * @brief Perform context switch. + * + * This routine saves the current context into the @a _executing + * context record and restores the context specified by @a _heir. + * + * @param[in] _executing is the currently executing thread's context + * @param[in] _heir is the context of the thread to be switched to + */ +#define _Context_Switch( _executing, _heir ) \ + _CPU_Context_switch( _executing, _heir ) + +/** + * @brief Restart currently executing thread. + * + * This routine restarts the calling thread by restoring its initial + * stack pointer and returning to the thread's entry point. + * + * @param[in] _the_context is the context of the thread to restart + */ +#define _Context_Restart_self( _the_context ) \ + _CPU_Context_Restart_self( _the_context ) + +/** + * @brief Return starting address of floating point context. + * + * This function returns the starting address of the floating + * point context save area. It is assumed that the are reserved + * for the floating point save area is large enough. + * + * @param[in] _base is lowest physical address of the floating point + * context save area. + * @param[in] _offset is the offset into the floating point area + * + * @retval the initial FP context pointer + */ +#define _Context_Fp_start( _base, _offset ) \ + _CPU_Context_Fp_start( (_base), (_offset) ) + +/** + * @brief Initialize floating point context area. + * + * This routine initializes the floating point context save + * area to contain an initial known state. + * + * @param[in] _fp_area is the base address of the floating point + * context save area to initialize. + */ +#define _Context_Initialize_fp( _fp_area ) \ + _CPU_Context_Initialize_fp( _fp_area ) + +/** + * @brief Restore floating point context area. + * + * This routine restores the floating point context contained + * in the @a _fp area. It is assumed that the current + * floating point context has been saved by a previous invocation + * of @a _Context_Save_fp. + * + * @param[in] _fp points to the floating point context area to restore. + */ +#define _Context_Restore_fp( _fp ) \ + _CPU_Context_restore_fp( _fp ) + +/** + * @brief Save floating point context area. + * + * This routine saves the current floating point context + * in the @a _fp area. + * + * @param[in] _fp points to the floating point context area to restore. + */ +#define _Context_Save_fp( _fp ) \ + _CPU_Context_save_fp( _fp ) + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/copyrt.h b/include/rtems/score/copyrt.h new file mode 100644 index 0000000000..c3757bfae4 --- /dev/null +++ b/include/rtems/score/copyrt.h @@ -0,0 +1,50 @@ +/** + * @file rtems/score/copyrt.h + * + * @brief Copyright Notice for RTEMS + * + * This include file contains the copyright notice for RTEMS + * which is included in every binary copy of the executive. + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COPYRT_H +#define _RTEMS_SCORE_COPYRT_H + +/** + * @defgroup SuperCoreCopyright RTEMS Copyright Notice + * + * @ingroup Score + */ +/**@{*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * This is the copyright string for RTEMS. + */ +#ifdef SCORE_INIT +const char _Copyright_Notice[] = +"COPYRIGHT (c) 1989-2008.\n\ +On-Line Applications Research Corporation (OAR).\n"; +#else +extern const char _Copyright_Notice[]; +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ +#endif +/* end of include file */ diff --git a/include/rtems/score/corebarrier.h b/include/rtems/score/corebarrier.h new file mode 100644 index 0000000000..ba706be3e3 --- /dev/null +++ b/include/rtems/score/corebarrier.h @@ -0,0 +1,91 @@ +/** + * @file rtems/score/corebarrier.h + * + * @brief Constants and Structures Associated with the Barrier Handler + * + * This include file contains all the constants and structures associated + * with the Barrier Handler. + */ + +/* + * COPYRIGHT (c) 1989-2007. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COREBARRIER_H +#define _RTEMS_SCORE_COREBARRIER_H + +#include <rtems/score/threadq.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreBarrier Barrier Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * Barrier services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +/** + * Flavors of barriers. + */ +typedef enum { + /** This specifies that the barrier will automatically release when + * the user specified number of threads have arrived at the barrier. + */ + CORE_BARRIER_AUTOMATIC_RELEASE, + /** This specifies that the user will have to manually release the barrier + * in order to release the waiting threads. + */ + CORE_BARRIER_MANUAL_RELEASE +} CORE_barrier_Disciplines; + +/** + * The following defines the control block used to manage the + * attributes of each barrier. + */ +typedef struct { + /** This field indicates whether the barrier is automatic or manual. + */ + CORE_barrier_Disciplines discipline; + /** This element indicates the number of threads which must arrive at the + * barrier to trip the automatic release. + */ + uint32_t maximum_count; +} CORE_barrier_Attributes; + +/** + * The following defines the control block used to manage each + * barrier. + */ +typedef struct { + /** This field is the Waiting Queue used to manage the set of tasks + * which are blocked waiting for the barrier to be released. + */ + Thread_queue_Control Wait_queue; + /** This element is the set of attributes which define this instance's + * behavior. + */ + CORE_barrier_Attributes Attributes; + /** This element contains the current number of thread waiting for this + * barrier to be released. */ + uint32_t number_of_waiting_threads; +} CORE_barrier_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/corebarrierimpl.h b/include/rtems/score/corebarrierimpl.h new file mode 100644 index 0000000000..e8b330dcb6 --- /dev/null +++ b/include/rtems/score/corebarrierimpl.h @@ -0,0 +1,194 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the SuperCore Barrier + * + * This include file contains all of the inlined routines associated + * with the SuperCore barrier. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COREBARRIERIMPL_H +#define _RTEMS_SCORE_COREBARRIERIMPL_H + +#include <rtems/score/corebarrier.h> +#include <rtems/score/thread.h> +#include <rtems/score/threadqimpl.h> +#include <rtems/score/watchdog.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreBarrier + */ +/**@{**/ + +/** + * Core Barrier handler return statuses. + */ +typedef enum { + /** This status indicates that the operation completed successfully. */ + CORE_BARRIER_STATUS_SUCCESSFUL, + /** This status indicates that the barrier is configured for automatic + * release and the caller tripped the automatic release. The caller + * thus did not block. + */ + CORE_BARRIER_STATUS_AUTOMATICALLY_RELEASED, + /** This status indicates that the thread was blocked waiting for an + * operation to complete and the barrier was deleted. + */ + CORE_BARRIER_WAS_DELETED, + /** This status indicates that the calling task was willing to block + * but the operation was unable to complete within the time allotted + * because the resource never became available. + */ + CORE_BARRIER_TIMEOUT +} CORE_barrier_Status; + +/** + * @brief Core barrier last status value. + * + * This is the last status value. + */ +#define CORE_BARRIER_STATUS_LAST CORE_BARRIER_TIMEOUT + +/** + * The following type defines the callout which the API provides + * to support global/multiprocessor operations on barriers. + */ +typedef void ( *CORE_barrier_API_mp_support_callout )( + Thread_Control *, + Objects_Id + ); + +/** + * @brief Initialize core barrier. + * + * This routine initializes the barrier based on the parameters passed. + * + * @param[in] the_barrier is the barrier to initialize + * @param[in] the_barrier_attributes define the behavior of this instance + */ +void _CORE_barrier_Initialize( + CORE_barrier_Control *the_barrier, + CORE_barrier_Attributes *the_barrier_attributes +); + +RTEMS_INLINE_ROUTINE void _CORE_barrier_Destroy( + CORE_barrier_Control *the_barrier +) +{ + _Thread_queue_Destroy( &the_barrier->Wait_queue ); +} + +/** + * @brief Wait for the barrier. + * + * This routine wait for the barrier to be released. If the barrier + * is set to automatic and this is the appropriate thread, then it returns + * immediately. Otherwise, the calling thread is blocked until the barrier + * is released. + * + * @param[in] the_barrier is the barrier to wait for + * @param[in,out] executing The currently executing thread. + * @param[in] id is the id of the object being waited upon + * @param[in] wait is true if the calling thread is willing to wait + * @param[in] timeout is the number of ticks the calling thread is willing + * to wait if @a wait is true. + * @param[in] api_barrier_mp_support is the routine to invoke if the + * thread unblocked is remote + * + * @note Status is returned via the thread control block. + */ +void _CORE_barrier_Wait( + CORE_barrier_Control *the_barrier, + Thread_Control *executing, + Objects_Id id, + bool wait, + Watchdog_Interval timeout, + CORE_barrier_API_mp_support_callout api_barrier_mp_support +); + +/** + * @brief Manually release the barrier. + * + * This routine manually releases the barrier. All of the threads waiting + * for the barrier will be readied. + * + * @param[in] the_barrier is the barrier to surrender + * @param[in] id is the id of the object for a remote unblock + * @param[in] api_barrier_mp_support is the routine to invoke if the + * thread unblocked is remote + * + * @retval the number of unblocked threads + */ +uint32_t _CORE_barrier_Release( + CORE_barrier_Control *the_barrier, + Objects_Id id, + CORE_barrier_API_mp_support_callout api_barrier_mp_support +); + +/** + * This routine assists in the deletion of a barrier by flushing the + * associated wait queue. + * + * @param[in] _the_barrier is the barrier to flush + * @param[in] _remote_extract_callout is the routine to invoke if the + * thread unblocked is remote + * @param[in] _status is the status to be returned to the unblocked thread + */ +#define _CORE_barrier_Flush( _the_barrier, _remote_extract_callout, _status) \ + _Thread_queue_Flush( \ + &((_the_barrier)->Wait_queue), \ + (_remote_extract_callout), \ + (_status) \ + ) + +/** + * This function returns true if the automatic release attribute is + * enabled in the @a attribute_set and false otherwise. + * + * @param[in] the_attribute is the attribute set to test + * + * @return true if the priority attribute is enabled + */ +RTEMS_INLINE_ROUTINE bool _CORE_barrier_Is_automatic( + CORE_barrier_Attributes *the_attribute +) +{ + return + (the_attribute->discipline == CORE_BARRIER_AUTOMATIC_RELEASE); +} + +/** + * This routine returns the number of threads currently waiting at the barrier. + * + * @param[in] the_barrier is the barrier to obtain the number of blocked + * threads for + * @return the current count of this barrier + */ +RTEMS_INLINE_ROUTINE uint32_t _CORE_barrier_Get_number_of_waiting_threads( + CORE_barrier_Control *the_barrier +) +{ + return the_barrier->number_of_waiting_threads; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/coremsg.h b/include/rtems/score/coremsg.h new file mode 100644 index 0000000000..84a3a97f0e --- /dev/null +++ b/include/rtems/score/coremsg.h @@ -0,0 +1,190 @@ +/** + * @file rtems/score/coremsg.h + * + * @brief Constants and Structures Associated with the Message Queue Handler. + * + * This include file contains all the constants and structures associated + * with the Message queue Handler. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COREMSG_H +#define _RTEMS_SCORE_COREMSG_H + +#include <rtems/score/chain.h> +#include <rtems/score/threadq.h> +#include <rtems/score/watchdog.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreMessageQueue Message Queue Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * Message Queue services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +#if defined(RTEMS_POSIX_API) + /** + * This macro is defined when an API is enabled that requires that the + * Message Queue Handler include support for priority based enqueuing + * of messages. + */ + #define RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY +#endif + +#if defined(RTEMS_POSIX_API) + /** + * This macro is defined when an API is enabled that requires that the + * Message Queue Handler include support for notification of enqueuing + * a message. + */ + #define RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION +#endif + +#if defined(RTEMS_POSIX_API) + /** + * This macro is defined when an API is enabled that requires the + * Message Queue Handler include support for blocking send operations. + */ + #define RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND +#endif + +/** + * @brief Data types needed to manipulate the contents of message buffers. + * + * The following defines the data types needed to manipulate + * the contents of message buffers. + * + * @note The buffer field is normally longer than a single uint32_t + * but since messages are variable length we just make a ptr to 1. + */ +typedef struct { + /** This field is the size of this message. */ + size_t size; + /** This field contains the actual message. */ + uint32_t buffer[1]; +} CORE_message_queue_Buffer; + +/** + * @brief The organization of a message buffer. + * + * The following records define the organization of a message + * buffer. + */ +typedef struct { + /** This element allows this structure to be placed on chains. */ + Chain_Node Node; + #if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY) + /** This field is the priority of this message. */ + int priority; + #endif + /** This field points to the contents of the message. */ + CORE_message_queue_Buffer Contents; +} CORE_message_queue_Buffer_control; + +/** + * @brief The possible blocking disciplines for a message queue. + * + * This enumerated types defines the possible blocking disciplines + * for a message queue. + */ +typedef enum { + /** This value indicates that blocking tasks are in FIFO order. */ + CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO, + /** This value indicates that blocking tasks are in priority order. */ + CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY +} CORE_message_queue_Disciplines; + +/** + * @brief Control block used to manage the attributes of each message queue. + * + * The following defines the control block used to manage the + * attributes of each message queue. + */ +typedef struct { + /** This field specifies the order in which blocking tasks will be ordered. */ + CORE_message_queue_Disciplines discipline; +} CORE_message_queue_Attributes; + +#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) + /** + * @brief Type for a notification handler. + * + * The following defines the type for a Notification handler. A + * notification handler is invoked when the message queue makes a + * 0->1 transition on pending messages. + */ + typedef void (*CORE_message_queue_Notify_Handler)( void * ); +#endif + +/** + * @brief Control block used to manage each message queue. + * + * The following defines the control block used to manage each + * Message Queue. + */ +typedef struct { + /** This field is the Waiting Queue used to manage the set of tasks + * which are blocked waiting to receive a message from this queue. + */ + Thread_queue_Control Wait_queue; + /** This element is the set of attributes which define this instance's + * behavior. + */ + CORE_message_queue_Attributes Attributes; + /** This element is maximum number of messages which may be pending + * at any given time. + */ + uint32_t maximum_pending_messages; + /** This element is the number of messages which are currently pending. + */ + uint32_t number_of_pending_messages; + /** This is the size in bytes of the largest message which may be + * sent via this queue. + */ + size_t maximum_message_size; + /** This chain is the set of pending messages. It may be ordered by + * message priority or in FIFO order. + */ + Chain_Control Pending_messages; + /** This is the address of the memory allocated for message buffers. + * It is allocated are part of message queue initialization and freed + * as part of destroying it. + */ + CORE_message_queue_Buffer *message_buffers; + #if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) + /** This is the routine invoked when the message queue transitions + * from zero (0) messages pending to one (1) message pending. + */ + CORE_message_queue_Notify_Handler notify_handler; + /** This field is the argument passed to the @ref notify_argument. */ + void *notify_argument; + #endif + /** This chain is the set of inactive messages. A message is inactive + * when it does not contain a pending message. + */ + Chain_Control Inactive_messages; +} CORE_message_queue_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/coremsgimpl.h b/include/rtems/score/coremsgimpl.h new file mode 100644 index 0000000000..382ce92815 --- /dev/null +++ b/include/rtems/score/coremsgimpl.h @@ -0,0 +1,618 @@ +/** + * @file + * + * @brief Inlined Routines in the Core Message Handler + * + * This include file contains the static inline implementation of all + * inlined routines in the Core Message Handler. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COREMSGIMPL_H +#define _RTEMS_SCORE_COREMSGIMPL_H + +#include <rtems/score/coremsg.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/threaddispatch.h> +#include <rtems/score/threadqimpl.h> + +#include <limits.h> +#include <string.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreMessageQueue + */ +/**@{**/ + +/** + * @brief Used when appending messages onto a message queue. + * + * This is the priority constant used when appending messages onto + * a message queue. + */ +#define CORE_MESSAGE_QUEUE_SEND_REQUEST INT_MAX + +/** + * @brief Used when prepending messages onto a message queue. + * + * This is the priority constant used when prepending messages onto + * a message queue. + */ +#define CORE_MESSAGE_QUEUE_URGENT_REQUEST INT_MIN + +/** + * @brief The modes in which a message may be submitted to a message queue. + * + * The following type details the modes in which a message + * may be submitted to a message queue. The message may be posted + * in a send or urgent fashion. + * + * @note All other values are message priorities. Numerically smaller + * priorities indicate higher priority messages. + */ +typedef int CORE_message_queue_Submit_types; + +/** + * @brief The possible set of Core Message Queue handler return statuses. + * + * This enumerated type defines the possible set of Core Message + * Queue handler return statuses. + */ +typedef enum { + /** This value indicates the operation completed sucessfully. */ + CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL, + /** This value indicates that the message was too large for this queue. */ + CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE, + /** This value indicates that there are too many messages pending. */ + CORE_MESSAGE_QUEUE_STATUS_TOO_MANY, + /** This value indicates that a receive was unsuccessful. */ + CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED, + /** This value indicates that a blocking send was unsuccessful. */ + CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT, + /** This value indicates that the message queue being blocked upon + * was deleted while the thread was waiting. + */ + CORE_MESSAGE_QUEUE_STATUS_WAS_DELETED, + /** This value indicates that the thread had to timeout while waiting + * to receive a message because one did not become available. + */ + CORE_MESSAGE_QUEUE_STATUS_TIMEOUT, + /** This value indicates that a blocking receive was unsuccessful. */ + CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_WAIT +} CORE_message_queue_Status; + +/** + * @brief Core message queue last status value. + * + * This is the last status value. + */ +#define CORE_MESSAGE_QUEUE_STATUS_LAST CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_WAIT + +/** + * @brief Callout provides to support global/multiprocessor operations. + * + * The following type defines the callout which the API provides + * to support global/multiprocessor operations on message_queues. + */ +typedef void ( *CORE_message_queue_API_mp_support_callout )( + Thread_Control *, + Objects_Id + ); + +/** + * @brief Initialize a message queue. + * + * This package is the implementation of the CORE Message Queue Handler. + * This core object provides task synchronization and communication functions + * via messages passed to queue objects. + * + * This routine initializes @a the_message_queue + * based on the parameters passed. + * + * @param[in] the_message_queue points to the message queue to initialize + * @param[in] the_message_queue_attributes points to the attributes that + * will be used with this message queue instance + * @param[in] maximum_pending_messages is the maximum number of messages + * that will be allowed to pend at any given time + * @param[in] maximum_message_size is the size of largest message that + * may be sent to this message queue instance + * + * @retval true if the message queue can be initialized. In general, + * false will only be returned if memory for the pending + * messages cannot be allocated. + */ +bool _CORE_message_queue_Initialize( + CORE_message_queue_Control *the_message_queue, + CORE_message_queue_Attributes *the_message_queue_attributes, + uint32_t maximum_pending_messages, + size_t maximum_message_size +); + +/** + * @brief Close a message queue. + * + * This package is the implementation of the CORE Message Queue Handler. + * This core object provides task synchronization and communication functions + * via messages passed to queue objects + * + * This function closes a message by returning all allocated space and + * flushing @a the_message_queue's task wait queue. + * + * @param[in] the_message_queue points to the message queue to close + * @param[in] remote_extract_callout is the routine to call for each thread + * that is extracted from the set of waiting threads + * @param[in] status is the status that each waiting thread will return + * from it's blocking service + */ +void _CORE_message_queue_Close( + CORE_message_queue_Control *the_message_queue, + Thread_queue_Flush_callout remote_extract_callout, + uint32_t status +); + +/** + * @brief Flush pending messages. + * + * This package is the implementation of the CORE Message Queue Handler. + * This core object provides task synchronization and communication functions + * via messages passed to queue objects. + * + * This function flushes @a the_message_queue's pending message queue. The + * number of messages flushed from the queue is returned. + * + * @param[in] the_message_queue points to the message queue to flush + * @param[in] lock_context The lock context of the interrupt disable. + * + * @retval This method returns the number of message pending messages flushed. + */ +uint32_t _CORE_message_queue_Flush( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +); + +#if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) +/** + * @brief Flush waiting threads. + * + * This function flushes the threads which are blocked on + * @a the_message_queue's pending message queue. They are + * unblocked whether blocked sending or receiving. It returns + * the number of messages flushed from the queue. + * + * @param[in] the_message_queue points to the message queue to flush + * @retval number of messages flushed from the queue + */ + void _CORE_message_queue_Flush_waiting_threads( + CORE_message_queue_Control *the_message_queue + ); +#endif + +/** + * @brief Broadcast a message to the message queue. + * + * This package is the implementation of the CORE Message Queue Handler. + * This core object provides task synchronization and communication functions + * via messages passed to queue objects. + * + * This function sends a message for every thread waiting on the queue and + * returns the number of threads made ready by the message. + * + * @param[in] the_message_queue points to the message queue + * @param[in] buffer is the starting address of the message to broadcast + * @param[in] size is the size of the message being broadcast + * @param[in] id is the RTEMS object Id associated with this message queue. + * It is used when unblocking a remote thread. + * @param[in] api_message_queue_mp_support is the routine to invoke if + * a thread that is unblocked is actually a remote thread. + * @param[out] count points to the variable that will contain the + * number of tasks that are sent this message + * @param[in] lock_context The lock context of the interrupt disable. + * @retval @a *count will contain the number of messages sent + * @retval indication of the successful completion or reason for failure + */ +CORE_message_queue_Status _CORE_message_queue_Broadcast( + CORE_message_queue_Control *the_message_queue, + const void *buffer, + size_t size, + Objects_Id id, + CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, + uint32_t *count, + ISR_lock_Context *lock_context +); + +/** + * @brief Submit a message to the message queue. + * + * This routine implements the send and urgent message functions. It + * processes a message that is to be submitted to the designated + * message queue. The message will either be processed as a + * send message which it will be inserted at the rear of the queue + * or it will be processed as an urgent message which will be inserted + * at the front of the queue. + * + * @param[in] the_message_queue points to the message queue + * @param[in] buffer is the starting address of the message to send + * @param[in] size is the size of the message being send + * @param[in] id is the RTEMS object Id associated with this message queue. + * It is used when unblocking a remote thread. + * @param[in] api_message_queue_mp_support is the routine to invoke if + * a thread that is unblocked is actually a remote thread. + * @param[in] submit_type determines whether the message is prepended, + * appended, or enqueued in priority order. + * @param[in] wait indicates whether the calling thread is willing to block + * if the message queue is full. + * @param[in] timeout is the maximum number of clock ticks that the calling + * thread is willing to block if the message queue is full. + * @param[in] lock_context The lock context of the interrupt disable. + * @retval indication of the successful completion or reason for failure + */ +CORE_message_queue_Status _CORE_message_queue_Submit( + CORE_message_queue_Control *the_message_queue, + Thread_Control *executing, + const void *buffer, + size_t size, + Objects_Id id, + CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, + CORE_message_queue_Submit_types submit_type, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +); + +/** + * @brief Size a message from the message queue. + * + * This package is the implementation of the CORE Message Queue Handler. + * This core object provides task synchronization and communication functions + * via messages passed to queue objects. + * + * This kernel routine dequeues a message, copies the message buffer to + * a given destination buffer, and frees the message buffer to the + * inactive message pool. The thread will be blocked if wait is true, + * otherwise an error will be given to the thread if no messages are available. + * + * @param[in] the_message_queue points to the message queue + * @param[in] id is the RTEMS object Id associated with this message queue. + * It is used when unblocking a remote thread. + * @param[in] buffer is the starting address of the message buffer to + * to be filled in with a message + * @param[in] size_p is a pointer to the size of the @a buffer and + * indicates the maximum size message that the caller can receive. + * @param[in] wait indicates whether the calling thread is willing to block + * if the message queue is empty. + * @param[in] timeout is the maximum number of clock ticks that the calling + * thread is willing to block if the message queue is empty. + * @param[in] lock_context The lock context of the interrupt disable. + * + * @retval indication of the successful completion or reason for failure. + * On success, the location pointed to @a size_p will contain the + * size of the received message. + * + * @note Returns message priority via return area in TCB. + * + * - INTERRUPT LATENCY: + * + available + * + wait + */ +void _CORE_message_queue_Seize( + CORE_message_queue_Control *the_message_queue, + Thread_Control *executing, + Objects_Id id, + void *buffer, + size_t *size_p, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +); + +/** + * @brief Insert a message into the message queue. + * + * This kernel routine inserts the specified message into the + * message queue. It is assumed that the message has been filled + * in before this routine is called. + * + * @param[in] the_message_queue points to the message queue + * @param[in] the_message is the message to enqueue + * @param[in] submit_type determines whether the message is prepended, + * appended, or enqueued in priority order. + * + * - INTERRUPT LATENCY: + * + insert + */ +void _CORE_message_queue_Insert_message( + CORE_message_queue_Control *the_message_queue, + CORE_message_queue_Buffer_control *the_message, + CORE_message_queue_Submit_types submit_type +); + +/** + * This routine sends a message to the end of the specified message queue. + */ +RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Send( + CORE_message_queue_Control *the_message_queue, + const void *buffer, + size_t size, + Objects_Id id, + CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +) +{ + return _CORE_message_queue_Submit( + the_message_queue, + _Thread_Executing, + buffer, + size, + id, + api_message_queue_mp_support, + CORE_MESSAGE_QUEUE_SEND_REQUEST, + wait, /* sender may block */ + timeout, /* timeout interval */ + lock_context + ); +} + +/** + * This routine sends a message to the front of the specified message queue. + */ +RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Urgent( + CORE_message_queue_Control *the_message_queue, + const void *buffer, + size_t size, + Objects_Id id, + CORE_message_queue_API_mp_support_callout api_message_queue_mp_support, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +) +{ + return _CORE_message_queue_Submit( + the_message_queue, + _Thread_Executing, + buffer, + size, + id, + api_message_queue_mp_support, + CORE_MESSAGE_QUEUE_URGENT_REQUEST, + wait, /* sender may block */ + timeout, /* timeout interval */ + lock_context + ); +} + +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Acquire( &the_message_queue->Wait_queue, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire_critical( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Acquire_critical( &the_message_queue->Wait_queue, lock_context ); + + #if defined(RTEMS_MULTIPROCESSING) + /* + * In case RTEMS_MULTIPROCESSING is enabled, then we have to prevent + * deletion of the executing thread after the thread queue operations. + */ + _Thread_Dispatch_disable(); + #endif +} + +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Release( + CORE_message_queue_Control *the_message_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Release( &the_message_queue->Wait_queue, lock_context ); + #if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_enable( _Per_CPU_Get() ); + #endif +} + +/** + * This routine copies the contents of the source message buffer + * to the destination message buffer. + */ +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Copy_buffer ( + const void *source, + void *destination, + size_t size +) +{ + memcpy(destination, source, size); +} + +/** + * This function allocates a message buffer from the inactive + * message buffer chain. + */ +RTEMS_INLINE_ROUTINE CORE_message_queue_Buffer_control * +_CORE_message_queue_Allocate_message_buffer ( + CORE_message_queue_Control *the_message_queue +) +{ + return (CORE_message_queue_Buffer_control *) + _Chain_Get_unprotected( &the_message_queue->Inactive_messages ); +} + +/** + * This routine frees a message buffer to the inactive + * message buffer chain. + */ +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Free_message_buffer ( + CORE_message_queue_Control *the_message_queue, + CORE_message_queue_Buffer_control *the_message +) +{ + _Chain_Append_unprotected( &the_message_queue->Inactive_messages, &the_message->Node ); +} + +/** + * This function returns the priority of @a the_message. + * + * @note It encapsulates the optional behavior that message priority is + * disabled if no API requires it. + */ +RTEMS_INLINE_ROUTINE int _CORE_message_queue_Get_message_priority ( + const CORE_message_queue_Buffer_control *the_message +) +{ + #if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY) + return the_message->priority; + #else + return 0; + #endif +} + +/** + * This function sets the priority of @a the_message. + * + * @note It encapsulates the optional behavior that message priority is + * disabled if no API requires it. + */ +RTEMS_INLINE_ROUTINE void _CORE_message_queue_Set_message_priority ( + CORE_message_queue_Buffer_control *the_message, + int priority +) +{ + #if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY) + the_message->priority = priority; + #endif +} + +/** + * This function removes the first message from the_message_queue + * and returns a pointer to it. + */ +RTEMS_INLINE_ROUTINE + CORE_message_queue_Buffer_control *_CORE_message_queue_Get_pending_message ( + CORE_message_queue_Control *the_message_queue +) +{ + return (CORE_message_queue_Buffer_control *) + _Chain_Get_unprotected( &the_message_queue->Pending_messages ); +} + +/** + * This function returns true if the priority attribute is + * enabled in the attribute_set and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _CORE_message_queue_Is_priority( + CORE_message_queue_Attributes *the_attribute +) +{ + return + (the_attribute->discipline == CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY); +} + +#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) + /** + * This function returns true if notification is enabled on this message + * queue and false otherwise. + */ + RTEMS_INLINE_ROUTINE bool _CORE_message_queue_Is_notify_enabled ( + CORE_message_queue_Control *the_message_queue + ) + { + return (the_message_queue->notify_handler != NULL); + } +#endif + +/** + * This routine initializes the notification information for + * @a the_message_queue. + */ +#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION) + RTEMS_INLINE_ROUTINE void _CORE_message_queue_Set_notify ( + CORE_message_queue_Control *the_message_queue, + CORE_message_queue_Notify_Handler the_handler, + void *the_argument + ) + { + the_message_queue->notify_handler = the_handler; + the_message_queue->notify_argument = the_argument; + } +#else + /* turn it into nothing if not enabled */ + #define _CORE_message_queue_Set_notify( \ + the_message_queue, the_handler, the_argument ) +#endif + +RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Dequeue_receiver( + CORE_message_queue_Control *the_message_queue, + const void *buffer, + size_t size, + CORE_message_queue_Submit_types submit_type, + ISR_lock_Context *lock_context +) +{ + Thread_Control *the_thread; + + /* + * If there are pending messages, then there can't be threads + * waiting for us to send them a message. + * + * NOTE: This check is critical because threads can block on + * send and receive and this ensures that we are broadcasting + * the message to threads waiting to receive -- not to send. + */ + if ( the_message_queue->number_of_pending_messages != 0 ) { + return NULL; + } + + /* + * There must be no pending messages if there is a thread waiting to + * receive a message. + */ + the_thread = _Thread_queue_First_locked( &the_message_queue->Wait_queue ); + if ( the_thread == NULL ) { + return NULL; + } + + *(size_t *) the_thread->Wait.return_argument = size; + the_thread->Wait.count = (uint32_t) submit_type; + + _CORE_message_queue_Copy_buffer( + buffer, + the_thread->Wait.return_argument_second.mutable_object, + size + ); + + _Thread_queue_Extract_critical( + &the_message_queue->Wait_queue.Queue, + the_message_queue->Wait_queue.operations, + the_thread, + lock_context + ); + + return the_thread; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/coremutex.h b/include/rtems/score/coremutex.h new file mode 100644 index 0000000000..ccf6066740 --- /dev/null +++ b/include/rtems/score/coremutex.h @@ -0,0 +1,181 @@ +/** + * @file + * + * @brief CORE Mutex API + * + * This include file contains all the constants and structures associated with + * the Mutex Handler. A mutex is an enhanced version of the standard Dijkstra + * binary semaphore used to provide synchronization and mutual exclusion + * capabilities. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COREMUTEX_H +#define _RTEMS_SCORE_COREMUTEX_H + +#include <rtems/score/thread.h> +#include <rtems/score/threadq.h> +#include <rtems/score/priority.h> +#include <rtems/score/watchdog.h> +#include <rtems/score/interr.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreMutex Mutex Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * Mutex services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +/** + * @brief The blocking disciplines for a mutex. + * + * This enumerated type defines the blocking disciplines for a mutex. + */ +typedef enum { + /** This specifies that threads will wait for the mutex in FIFO order. */ + CORE_MUTEX_DISCIPLINES_FIFO, + /** This specifies that threads will wait for the mutex in priority order. */ + CORE_MUTEX_DISCIPLINES_PRIORITY, + /** This specifies that threads will wait for the mutex in priority order. + * Additionally, the Priority Inheritance Protocol will be in effect. + */ + CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT, + /** This specifies that threads will wait for the mutex in priority order. + * Additionally, the Priority Ceiling Protocol will be in effect. + */ + CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING +} CORE_mutex_Disciplines; + +/** + * @brief The possible behaviors for lock nesting. + * + * This enumerated type defines the possible behaviors for + * lock nesting. + */ +typedef enum { + /** + * This sequence has no blocking or errors: + * + * + lock(m) + * + lock(m) + * + unlock(m) + * + unlock(m) + */ + CORE_MUTEX_NESTING_ACQUIRES, +#if defined(RTEMS_POSIX_API) + /** + * This sequence returns an error at the indicated point: + * + * + lock(m) + * + lock(m) - already locked error + * + unlock(m) + */ + CORE_MUTEX_NESTING_IS_ERROR, +#endif + /** + * This sequence performs as indicated: + * + lock(m) + * + lock(m) - deadlocks or timeouts + * + unlock(m) - releases + */ + CORE_MUTEX_NESTING_BLOCKS +} CORE_mutex_Nesting_behaviors; + +/** + * @brief The control block used to manage attributes of each mutex. + * + * The following defines the control block used to manage the + * attributes of each mutex. + */ +typedef struct { + /** This field determines what the behavior of this mutex instance will + * be when attempting to acquire the mutex when it is already locked. + */ + CORE_mutex_Nesting_behaviors lock_nesting_behavior; + /** When this field is true, then only the thread that locked the mutex + * is allowed to unlock it. + */ + bool only_owner_release; + /** This field indicates whether threads waiting on the mutex block in + * FIFO or priority order. + */ + CORE_mutex_Disciplines discipline; + /** This field contains the ceiling priority to be used if that protocol + * is selected. + */ + Priority_Control priority_ceiling; +} CORE_mutex_Attributes; + +#ifdef __RTEMS_STRICT_ORDER_MUTEX__ +/** + * @brief The control block to manage lock chain of priority inheritance mutex. + * + * The following defines the control block used to manage lock chain of + * priority inheritance mutex. + */ + typedef struct{ + /** This field is a chian of locked mutex by a thread,new mutex will + * be added to the head of queue, and the mutex which will be released + * must be the head of queue. + */ + Chain_Node lock_queue; + /** This field is the priority of thread before locking this mutex + * + */ + Priority_Control priority_before; + } CORE_mutex_order_list; +#endif + +/** + * @brief Control block used to manage each mutex. + * + * The following defines the control block used to manage each mutex. + */ +typedef struct { + /** This field is the Waiting Queue used to manage the set of tasks + * which are blocked waiting to lock the mutex. + */ + Thread_queue_Control Wait_queue; + /** This element is the set of attributes which define this instance's + * behavior. + */ + CORE_mutex_Attributes Attributes; + /** This element contains the number of times the mutex has been acquired + * nested. This must be zero (0) before the mutex is actually unlocked. + */ + uint32_t nest_count; + /** This element points to the thread which is currently holding this mutex. + * The holder is the last thread to successfully lock the mutex and which + * has not unlocked it. If the thread is not locked, there is no holder. + */ + Thread_Control *holder; +#ifdef __RTEMS_STRICT_ORDER_MUTEX__ + /** This field is used to manipulate the priority inheritance mutex queue*/ + CORE_mutex_order_list queue; +#endif + +} CORE_mutex_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/coremuteximpl.h b/include/rtems/score/coremuteximpl.h new file mode 100644 index 0000000000..f57fdfcfd5 --- /dev/null +++ b/include/rtems/score/coremuteximpl.h @@ -0,0 +1,546 @@ +/** + * @file + * + * @ingroup ScoreMutex + * + * @brief CORE Mutex Implementation + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_COREMUTEXIMPL_H +#define _RTEMS_SCORE_COREMUTEXIMPL_H + +#include <rtems/score/coremutex.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/sysstate.h> +#include <rtems/score/threadimpl.h> +#include <rtems/score/threadqimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreMutex + */ +/**@{**/ + +/** + * @brief Callout which provides to support global/multiprocessor operations. + * + * The following type defines the callout which the API provides + * to support global/multiprocessor operations on mutexes. + */ +typedef void ( *CORE_mutex_API_mp_support_callout )( + Thread_Control *, + Objects_Id + ); + +/** + * @brief The possible Mutex handler return statuses. + * + * This enumerated type defines the possible Mutex handler return statuses. + */ +typedef enum { + /** This status indicates that the operation completed successfully. */ + CORE_MUTEX_STATUS_SUCCESSFUL, + /** This status indicates that the calling task did not want to block + * and the operation was unable to complete immediately because the + * resource was unavailable. + */ + CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT, +#if defined(RTEMS_POSIX_API) + /** This status indicates that an attempt was made to relock a mutex + * for which nesting is not configured. + */ + CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED, +#endif + /** This status indicates that an attempt was made to release a mutex + * by a thread other than the thread which locked it. + */ + CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE, + /** This status indicates that the thread was blocked waiting for an + * operation to complete and the mutex was deleted. + */ + CORE_MUTEX_WAS_DELETED, + /** This status indicates that the calling task was willing to block + * but the operation was unable to complete within the time allotted + * because the resource never became available. + */ + CORE_MUTEX_TIMEOUT, + +#if defined(__RTEMS_STRICT_ORDER_MUTEX__) + /** This status indicates that a thread not release the mutex which has + * the priority inheritance property in a right order. + */ + CORE_MUTEX_RELEASE_NOT_ORDER, +#endif + + /** This status indicates that a thread of logically greater importance + * than the ceiling priority attempted to lock this mutex. + */ + CORE_MUTEX_STATUS_CEILING_VIOLATED + +} CORE_mutex_Status; + +/** + * @brief The last status value. + * + * This is the last status value. + */ +#define CORE_MUTEX_STATUS_LAST CORE_MUTEX_STATUS_CEILING_VIOLATED + +/** + * @brief Initializes the mutex based on the parameters passed. + * + * This routine initializes the mutex based on the parameters passed. + * + * @param[in,out] the_mutex is the mutex to initalize + * @param[in,out] executing The currently executing thread. + * @param[in] the_mutex_attributes is the attributes associated with this + * mutex instance + * @param[in] initially_locked If true, then the mutex is initially locked by + * the executing thread. + * + * @retval This method returns CORE_MUTEX_STATUS_SUCCESSFUL if successful. + */ +CORE_mutex_Status _CORE_mutex_Initialize( + CORE_mutex_Control *the_mutex, + Thread_Control *executing, + const CORE_mutex_Attributes *the_mutex_attributes, + bool initially_locked +); + +RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex ) +{ + _Thread_queue_Destroy( &the_mutex->Wait_queue ); +} + +/** + * @brief Attempt to receive a unit from the_mutex. + * + * This routine attempts to receive a unit from the_mutex. + * If a unit is available or if the wait flag is false, then the routine + * returns. Otherwise, the calling task is blocked until a unit becomes + * available. + * + * @param[in,out] executing The currently executing thread. + * @param[in,out] the_mutex is the mutex to attempt to lock + * @param[in] lock_context is the interrupt level + * + * @retval This routine returns 0 if "trylock" can resolve whether or not + * the mutex is immediately obtained or there was an error attempting to + * get it. It returns 1 to indicate that the caller cannot obtain + * the mutex and will have to block to do so. + * + * @note For performance reasons, this routine is implemented as + * a macro that uses two support routines. + */ + +RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock_body( + CORE_mutex_Control *the_mutex, + Thread_Control *executing, + ISR_lock_Context *lock_context +); + +#if defined(__RTEMS_DO_NOT_INLINE_CORE_MUTEX_SEIZE__) + /** + * @brief Interrupt trylock CORE mutex seize. + * + * When doing test coverage analysis or trying to minimize the code + * space for RTEMS, it is often helpful to not inline this method + * multiple times. It is fairly large and has a high branch complexity + * which makes it harder to get full binary test coverage. + * + * @param[in] the_mutex will attempt to lock + * @param[in] _executing points to the executing thread + * @param[in] level_p is the interrupt level + */ + int _CORE_mutex_Seize_interrupt_trylock( + CORE_mutex_Control *the_mutex, + Thread_Control *executing, + ISR_lock_Context *lock_context + ); +#else + /** + * The default is to favor speed and inlining this definitely saves + * a few instructions. This is very important for mutex performance. + * + * @param[in] _mutex will attempt to lock + * @param[in] _executing points to the executing thread + * @param[in] _lock_context is the interrupt level + */ + #define _CORE_mutex_Seize_interrupt_trylock( _mutex, _executing, _lock_context ) \ + _CORE_mutex_Seize_interrupt_trylock_body( _mutex, _executing, _lock_context ) +#endif + +/** + * @brief Performs the blocking portion of a mutex obtain. + * + * This routine performs the blocking portion of a mutex obtain. + * It is an actual subroutine and is not implemented as something + * that may be inlined. + * + * @param[in,out] the_mutex is the mutex to attempt to lock + * @param[in,out] executing The currently executing thread. + * @param[in] timeout is the maximum number of ticks to block + * @param[in] lock_context is the interrupt level + */ +void _CORE_mutex_Seize_interrupt_blocking( + CORE_mutex_Control *the_mutex, + Thread_Control *executing, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +); + +/** + * @brief Verifies that a mutex blocking seize is performed safely. + * + * This macro is to verify that a mutex blocking seize is + * performed from a safe system state. For example, one + * cannot block inside an isr. + * + * @retval this method returns true if dispatch is in an unsafe state. + */ +#define _CORE_mutex_Check_dispatch_for_seize(_wait) \ + (!_Thread_Dispatch_is_enabled() \ + && (_wait) \ + && (_System_state_Get() >= SYSTEM_STATE_UP)) + +/** + * @brief Attempt to obtain the mutex. + * + * This routine attempts to obtain the mutex. If the mutex is available, + * then it will return immediately. Otherwise, it will invoke the + * support routine @a _Core_mutex_Seize_interrupt_blocking. + * + * @param[in] the_mutex is the mutex to attempt to lock + * @param[in] id is the Id of the owning API level Semaphore object + * @param[in] wait is true if the thread is willing to wait + * @param[in] timeout is the maximum number of ticks to block + * @param[in] lock_context is a temporary variable used to contain the ISR + * disable level cookie + * + * @note If the mutex is called from an interrupt service routine, + * with context switching disabled, or before multitasking, + * then a fatal error is generated. + * + * The logic on this routine is as follows: + * + * * If incorrect system state + * return an error + * * If mutex is available without any contention or blocking + * obtain it with interrupts disabled and returned + * * If the caller is willing to wait + * then they are blocked. + */ +RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize_body( + CORE_mutex_Control *the_mutex, + Thread_Control *executing, + Objects_Id id, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +) +{ + if ( _CORE_mutex_Check_dispatch_for_seize( wait ) ) { + _Terminate( + INTERNAL_ERROR_CORE, + false, + INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE + ); + } + _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context ); + if ( _CORE_mutex_Seize_interrupt_trylock( the_mutex, executing, lock_context ) ) { + if ( !wait ) { + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + executing->Wait.return_code = + CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT; + } else { + executing->Wait.id = id; + _CORE_mutex_Seize_interrupt_blocking( + the_mutex, + executing, + timeout, + lock_context + ); + } + } +} + +/** + * This method is used to obtain a core mutex. + * + * @param[in] _the_mutex is the mutex to attempt to lock + * @param[in] _executing The currently executing thread. + * @param[in] _id is the Id of the owning API level Semaphore object + * @param[in] _wait is true if the thread is willing to wait + * @param[in] _timeout is the maximum number of ticks to block + * @param[in] _lock_context is a temporary variable used to contain the ISR + * disable level cookie + */ +#if defined(__RTEMS_DO_NOT_INLINE_CORE_MUTEX_SEIZE__) + void _CORE_mutex_Seize( + CORE_mutex_Control *_the_mutex, + Thread_Control *_executing, + Objects_Id _id, + bool _wait, + Watchdog_Interval _timeout, + ISR_lock_Context *_lock_context + ); +#else + #define _CORE_mutex_Seize( \ + _the_mutex, _executing, _id, _wait, _timeout, _lock_context ) \ + _CORE_mutex_Seize_body( \ + _the_mutex, _executing, _id, _wait, _timeout, _lock_context ) +#endif + +/** + * @brief Frees a unit to the mutex. + * + * This routine frees a unit to the mutex. If a task was blocked waiting for + * a unit from this mutex, then that task will be readied and the unit + * given to that task. Otherwise, the unit will be returned to the mutex. + * + * @param[in] the_mutex is the mutex to surrender + * @param[in] id is the id of the RTEMS Object associated with this mutex + * @param[in] api_mutex_mp_support is the routine that will be called when + * unblocking a remote mutex + * @param[in] lock_context is the interrupt level + * + * @retval an indication of whether the routine succeeded or failed + */ +CORE_mutex_Status _CORE_mutex_Surrender( + CORE_mutex_Control *the_mutex, + Objects_Id id, + CORE_mutex_API_mp_support_callout api_mutex_mp_support, + ISR_lock_Context *lock_context +); + +/** + * @brief Flush all waiting threads. + * + * This routine assists in the deletion of a mutex by flushing the associated + * wait queue. + * + * @param[in] the_mutex is the mutex to flush + * @param[in] remote_extract_callout is the routine to invoke when a remote + * thread is extracted + * @param[in] status is the status value which each unblocked thread will + * return to its caller. + */ +void _CORE_mutex_Flush( + CORE_mutex_Control *the_mutex, + Thread_queue_Flush_callout remote_extract_callout, + uint32_t status +); + +/** + * @brief Is mutex locked. + * + * This routine returns true if the mutex specified is locked and false + * otherwise. + * + * @param[in] the_mutex is the mutex to check. + * + * @retval true The mutex is locked. + * @retval false The mutex is not locked. + */ +RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_locked( + const CORE_mutex_Control *the_mutex +) +{ + return the_mutex->holder != NULL; +} + +/** + * @brief Does core mutex use FIFO blocking. + * + * This routine returns true if the mutex's wait discipline is FIFO and false + * otherwise. + * + * @param[in] the_attribute is the attribute set of the mutex. + * + * @retval true The mutex is using FIFO blocking order. + * @retval false The mutex is not using FIFO blocking order. + */ +RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_fifo( + const CORE_mutex_Attributes *the_attribute +) +{ + return the_attribute->discipline == CORE_MUTEX_DISCIPLINES_FIFO; +} + +/** + * @brief Doex core mutex use priority blocking. + * + * This routine returns true if the mutex's wait discipline is PRIORITY and + * false otherwise. + * + * @param[in] the_attribute is the attribute set of the mutex. + * + * @retval true The mutex is using priority blocking order. + * @retval false The mutex is not using priority blocking order. + * + */ +RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_priority( + CORE_mutex_Attributes *the_attribute +) +{ + return the_attribute->discipline == CORE_MUTEX_DISCIPLINES_PRIORITY; +} + +/** + * @brief Does mutex use priority inheritance. + * + * This routine returns true if the mutex's wait discipline is + * INHERIT_PRIORITY and false otherwise. + * + * @param[in] the_attribute is the attribute set of the mutex. + * + * @retval true The mutex is using priority inheritance. + * @retval false The mutex is not using priority inheritance. + */ +RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_inherit_priority( + CORE_mutex_Attributes *the_attribute +) +{ + return the_attribute->discipline == CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT; +} + +/** + * @brief Does mutex use priority ceiling. + * + * This routine returns true if the mutex's wait discipline is + * PRIORITY_CEILING and false otherwise. + * + * @param[in] the_attribute is the attribute set of the mutex. + * + * @retval true The mutex is using priority ceiling. + * @retval false The mutex is not using priority ceiling. + */ +RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_priority_ceiling( + CORE_mutex_Attributes *the_attribute +) +{ + return the_attribute->discipline == CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING; +} + +/* + * Seize Mutex with Quick Success Path + * + * NOTE: There is no MACRO version of this routine. A body is in + * coremutexseize.c that is duplicated from the .inl by hand. + * + * NOTE: The Doxygen for this routine is in the .h file. + */ + +RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock_body( + CORE_mutex_Control *the_mutex, + Thread_Control *executing, + ISR_lock_Context *lock_context +) +{ + /* disabled when you get here */ + + executing->Wait.return_code = CORE_MUTEX_STATUS_SUCCESSFUL; + if ( !_CORE_mutex_Is_locked( the_mutex ) ) { + the_mutex->holder = executing; + the_mutex->nest_count = 1; + if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) || + _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ){ + +#ifdef __RTEMS_STRICT_ORDER_MUTEX__ + _Chain_Prepend_unprotected( &executing->lock_mutex, + &the_mutex->queue.lock_queue ); + the_mutex->queue.priority_before = executing->current_priority; +#endif + + executing->resource_count++; + } + + if ( !_CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) { + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + return 0; + } /* else must be CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING + * + * we possibly bump the priority of the current holder -- which + * happens to be _Thread_Executing. + */ + { + Priority_Control ceiling; + Priority_Control current; + + ceiling = the_mutex->Attributes.priority_ceiling; + current = executing->current_priority; + if ( current == ceiling ) { + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + return 0; + } + + if ( current > ceiling ) { + Per_CPU_Control *cpu_self; + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + _Thread_Raise_priority( executing, ceiling ); + _Thread_Dispatch_enable( cpu_self ); + return 0; + } + /* if ( current < ceiling ) */ { + executing->Wait.return_code = CORE_MUTEX_STATUS_CEILING_VIOLATED; + the_mutex->holder = NULL; + the_mutex->nest_count = 0; /* undo locking above */ + executing->resource_count--; /* undo locking above */ + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + return 0; + } + } + return 0; + } + + /* + * At this point, we know the mutex was not available. If this thread + * is the thread that has locked the mutex, let's see if we are allowed + * to nest access. + */ + if ( _Thread_Is_executing( the_mutex->holder ) ) { + switch ( the_mutex->Attributes.lock_nesting_behavior ) { + case CORE_MUTEX_NESTING_ACQUIRES: + the_mutex->nest_count++; + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + return 0; + #if defined(RTEMS_POSIX_API) + case CORE_MUTEX_NESTING_IS_ERROR: + executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED; + _Thread_queue_Release( &the_mutex->Wait_queue, lock_context ); + return 0; + #endif + case CORE_MUTEX_NESTING_BLOCKS: + break; + } + } + + /* + * The mutex is not available and the caller must deal with the possibility + * of blocking. + */ + return 1; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/corerwlock.h b/include/rtems/score/corerwlock.h new file mode 100644 index 0000000000..f211339548 --- /dev/null +++ b/include/rtems/score/corerwlock.h @@ -0,0 +1,91 @@ +/** + * @file rtems/score/corerwlock.h + * + * @brief Constants and Structures Associated with the RWLock Handler + * + * This include file contains all the constants and structures associated + * with the RWLock Handler. + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CORERWLOCK_H +#define _RTEMS_SCORE_CORERWLOCK_H + +#include <rtems/score/threadq.h> + +/** + * @defgroup ScoreRWLock RWLock Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * RWLock services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * RWLock State. + */ +typedef enum { + /** This indicates the the RWLock is not currently locked. + */ + CORE_RWLOCK_UNLOCKED, + /** This indicates the the RWLock is currently locked for reading. + */ + CORE_RWLOCK_LOCKED_FOR_READING, + /** This indicates the the RWLock is currently locked for reading. + */ + CORE_RWLOCK_LOCKED_FOR_WRITING +} CORE_RWLock_States; + +/** + * The following defines the control block used to manage the + * attributes of each RWLock. + */ +typedef struct { + /** This field indicates XXX. + */ + int XXX; +} CORE_RWLock_Attributes; + +/** + * The following defines the control block used to manage each + * RWLock. + */ +typedef struct { + /** This field is the Waiting Queue used to manage the set of tasks + * which are blocked waiting for the RWLock to be released. + */ + Thread_queue_Control Wait_queue; + /** This element is the set of attributes which define this instance's + * behavior. + */ + CORE_RWLock_Attributes Attributes; + /** This element is the current state of the RWLock. + */ + CORE_RWLock_States current_state; + /** This element contains the current number of thread waiting for this + * RWLock to be released. */ + uint32_t number_of_readers; +} CORE_RWLock_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/corerwlockimpl.h b/include/rtems/score/corerwlockimpl.h new file mode 100644 index 0000000000..e619574d89 --- /dev/null +++ b/include/rtems/score/corerwlockimpl.h @@ -0,0 +1,197 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the SuperCore RWLock + * + * This include file contains all of the inlined routines associated + * with the SuperCore RWLock. + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CORERWLOCKIMPL_H +#define _RTEMS_SCORE_CORERWLOCKIMPL_H + +#include <rtems/score/corerwlock.h> +#include <rtems/score/thread.h> +#include <rtems/score/threadqimpl.h> +#include <rtems/score/watchdog.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreRWLock + */ +/**@{**/ + +/** + * The following type defines the callout which the API provides + * to support global/multiprocessor operations on RWLocks. + */ +typedef void ( *CORE_RWLock_API_mp_support_callout )( + Thread_Control *, + Objects_Id + ); + +/** + * Core RWLock handler return statuses. + */ +typedef enum { + /** This status indicates that the operation completed successfully. */ + CORE_RWLOCK_SUCCESSFUL, + /** This status indicates that the thread was blocked waiting for an */ + CORE_RWLOCK_WAS_DELETED, + /** This status indicates that the rwlock was not immediately available. */ + CORE_RWLOCK_UNAVAILABLE, + /** This status indicates that the calling task was willing to block + * but the operation was unable to complete within the time allotted + * because the resource never became available. + */ + CORE_RWLOCK_TIMEOUT +} CORE_RWLock_Status; + +/** This is the last status value. + */ +#define CORE_RWLOCK_STATUS_LAST CORE_RWLOCK_TIMEOUT + +/** + * This is used to denote that a thread is blocking waiting for + * read-only access to the RWLock. + */ +#define CORE_RWLOCK_THREAD_WAITING_FOR_READ 0 + +/** + * This is used to denote that a thread is blocking waiting for + * write-exclusive access to the RWLock. + */ +#define CORE_RWLOCK_THREAD_WAITING_FOR_WRITE 1 + +/** + * @brief Initialize a RWlock. + * + * This routine initializes the RWLock based on the parameters passed. + * + * @param[in] the_rwlock is the RWLock to initialize + * @param[in] the_rwlock_attributes define the behavior of this instance + */ +void _CORE_RWLock_Initialize( + CORE_RWLock_Control *the_rwlock, + CORE_RWLock_Attributes *the_rwlock_attributes +); + +RTEMS_INLINE_ROUTINE void _CORE_RWLock_Destroy( + CORE_RWLock_Control *the_rwlock +) +{ + _Thread_queue_Destroy( &the_rwlock->Wait_queue ); +} + +/** + * @brief Obtain RWLock for reading. + * + * This routine attempts to obtain the RWLock for read access. + * + * @param[in] the_rwlock is the RWLock to wait for + * @param[in] id is the id of the object being waited upon + * @param[in] wait is true if the calling thread is willing to wait + * @param[in] timeout is the number of ticks the calling thread is willing + * to wait if @a wait is true. + * @param[in] api_rwlock_mp_support is the routine to invoke if the + * thread unblocked is remote + * + * @note Status is returned via the thread control block. + */ + +void _CORE_RWLock_Obtain_for_reading( + CORE_RWLock_Control *the_rwlock, + Thread_Control *executing, + Objects_Id id, + bool wait, + Watchdog_Interval timeout, + CORE_RWLock_API_mp_support_callout api_rwlock_mp_support +); + +/** + * @brief Obtain RWLock for writing. + * + * This routine attempts to obtain the RWLock for write exclusive access. + * + * @param[in] the_rwlock is the RWLock to wait for + * @param[in] id is the id of the object being waited upon + * @param[in] wait is true if the calling thread is willing to wait + * @param[in] timeout is the number of ticks the calling thread is willing + * to wait if @a wait is true. + * @param[in] api_rwlock_mp_support is the routine to invoke if the + * thread unblocked is remote + * + * @note Status is returned via the thread control block. + */ +void _CORE_RWLock_Obtain_for_writing( + CORE_RWLock_Control *the_rwlock, + Thread_Control *executing, + Objects_Id id, + bool wait, + Watchdog_Interval timeout, + CORE_RWLock_API_mp_support_callout api_rwlock_mp_support +); + +/** + * @brief Release the RWLock. + * + * This routine manually releases @a the_rwlock. All of the threads waiting + * for the RWLock will be readied. + * + * @param[in] the_rwlock is the RWLock to surrender + * + * @retval Status is returned to indicate successful or failure. + */ +CORE_RWLock_Status _CORE_RWLock_Release( + CORE_RWLock_Control *the_rwlock, + Thread_Control *executing +); + +/** + * This routine assists in the deletion of a RWLock by flushing the + * associated wait queue. + * + * @param[in] _the_rwlock is the RWLock to flush + * @param[in] _remote_extract_callout is the routine to invoke if the + * thread unblocked is remote + * @param[in] _status is the status to be returned to the unblocked thread + */ +#define _CORE_RWLock_Flush( _the_rwlock, _remote_extract_callout, _status) \ + _Thread_queue_Flush( \ + &((_the_rwlock)->Wait_queue), \ + (_remote_extract_callout), \ + (_status) \ + ) + +/** + * This method is used to initialize core rwlock attributes. + * + * @param[in] the_attributes pointer to the attributes to initialize. + */ +RTEMS_INLINE_ROUTINE void _CORE_RWLock_Initialize_attributes( + CORE_RWLock_Attributes *the_attributes +) +{ + the_attributes->XXX = 0; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/coresem.h b/include/rtems/score/coresem.h new file mode 100644 index 0000000000..5f871f063e --- /dev/null +++ b/include/rtems/score/coresem.h @@ -0,0 +1,89 @@ +/** + * @file rtems/score/coresem.h + * + * @brief Data Associated with the Counting Semaphore Handler + * + * This include file contains all the constants and structures associated + * with the Counting Semaphore Handler. A counting semaphore is the + * standard Dijkstra binary semaphore used to provide synchronization + * and mutual exclusion capabilities. + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CORESEM_H +#define _RTEMS_SCORE_CORESEM_H + +#include <rtems/score/threadq.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSemaphore Semaphore Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * Semaphore services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +/** + * Blocking disciplines for a semaphore. + */ +typedef enum { + /** This specifies that threads will wait for the semaphore in FIFO order. */ + CORE_SEMAPHORE_DISCIPLINES_FIFO, + /** This specifies that threads will wait for the semaphore in + * priority order. + */ + CORE_SEMAPHORE_DISCIPLINES_PRIORITY +} CORE_semaphore_Disciplines; + +/** + * The following defines the control block used to manage the + * attributes of each semaphore. + */ +typedef struct { + /** This element indicates the maximum count this semaphore may have. */ + uint32_t maximum_count; + /** This field indicates whether threads waiting on the semaphore block in + * FIFO or priority order. + */ + CORE_semaphore_Disciplines discipline; +} CORE_semaphore_Attributes; + +/** + * The following defines the control block used to manage each + * counting semaphore. + */ +typedef struct { + /** This field is the Waiting Queue used to manage the set of tasks + * which are blocked waiting to obtain the semaphore. + */ + Thread_queue_Control Wait_queue; + /** This element is the set of attributes which define this instance's + * behavior. + */ + CORE_semaphore_Attributes Attributes; + /** This element contains the current count of this semaphore. */ + uint32_t count; +} CORE_semaphore_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/coresemimpl.h b/include/rtems/score/coresemimpl.h new file mode 100644 index 0000000000..a6a30bcc15 --- /dev/null +++ b/include/rtems/score/coresemimpl.h @@ -0,0 +1,284 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the SuperCore Semaphore + * + * This include file contains all of the inlined routines associated + * with the SuperCore semaphore. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CORESEMIMPL_H +#define _RTEMS_SCORE_CORESEMIMPL_H + +#include <rtems/score/coresem.h> +#include <rtems/score/objectimpl.h> +#include <rtems/score/threaddispatch.h> +#include <rtems/score/threadqimpl.h> +#include <rtems/score/statesimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreSemaphore + */ +/**@{**/ + +/** + * Core Semaphore handler return statuses. + */ +typedef enum { + /** This status indicates that the operation completed successfully. */ + CORE_SEMAPHORE_STATUS_SUCCESSFUL, + /** This status indicates that the calling task did not want to block + * and the operation was unable to complete immediately because the + * resource was unavailable. + */ + CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT, + /** This status indicates that the thread was blocked waiting for an + * operation to complete and the semaphore was deleted. + */ + CORE_SEMAPHORE_WAS_DELETED, + /** This status indicates that the calling task was willing to block + * but the operation was unable to complete within the time allotted + * because the resource never became available. + */ + CORE_SEMAPHORE_TIMEOUT, + /** This status indicates that an attempt was made to unlock the semaphore + * and this would have made its count greater than that allowed. + */ + CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED +} CORE_semaphore_Status; + +/** + * @brief Core semaphore last status value. + * + * This is the last status value. + */ +#define CORE_SEMAPHORE_STATUS_LAST CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED + +/** + * The following type defines the callout which the API provides + * to support global/multiprocessor operations on semaphores. + */ +typedef void ( *CORE_semaphore_API_mp_support_callout )( + Thread_Control *, + Objects_Id + ); + +/** + * @brief Initialize the semaphore based on the parameters passed. + * + * This package is the implementation of the CORE Semaphore Handler. + * This core object utilizes standard Dijkstra counting semaphores to provide + * synchronization and mutual exclusion capabilities. + * + * This routine initializes the semaphore based on the parameters passed. + * + * @param[in] the_semaphore is the semaphore to initialize + * @param[in] the_semaphore_attributes define the behavior of this instance + * @param[in] initial_value is the initial count of the semaphore + */ +void _CORE_semaphore_Initialize( + CORE_semaphore_Control *the_semaphore, + const CORE_semaphore_Attributes *the_semaphore_attributes, + uint32_t initial_value +); + +RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy( + CORE_semaphore_Control *the_semaphore +) +{ + _Thread_queue_Destroy( &the_semaphore->Wait_queue ); +} + +/** + * @brief Surrender a unit to a semaphore. + * + * This routine frees a unit to the semaphore. If a task was blocked waiting + * for a unit from this semaphore, then that task will be readied and the unit + * given to that task. Otherwise, the unit will be returned to the semaphore. + * + * @param[in] the_semaphore is the semaphore to surrender + * @param[in] id is the Id of the API level Semaphore object associated + * with this instance of a SuperCore Semaphore + * @param[in] api_semaphore_mp_support is the routine to invoke if the + * thread unblocked is remote + * @param[in] lock_context is a temporary variable used to contain the ISR + * disable level cookie + * + * @retval an indication of whether the routine succeeded or failed + */ +RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Surrender( + CORE_semaphore_Control *the_semaphore, + Objects_Id id, + CORE_semaphore_API_mp_support_callout api_semaphore_mp_support, + ISR_lock_Context *lock_context +) +{ + Thread_Control *the_thread; + CORE_semaphore_Status status; + + status = CORE_SEMAPHORE_STATUS_SUCCESSFUL; + + _Thread_queue_Acquire_critical( &the_semaphore->Wait_queue, lock_context ); + + the_thread = _Thread_queue_First_locked( &the_semaphore->Wait_queue ); + if ( the_thread != NULL ) { +#if defined(RTEMS_MULTIPROCESSING) + _Thread_Dispatch_disable(); +#endif + + _Thread_queue_Extract_critical( + &the_semaphore->Wait_queue.Queue, + the_semaphore->Wait_queue.operations, + the_thread, + lock_context + ); + +#if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) + (*api_semaphore_mp_support) ( the_thread, id ); + + _Thread_Dispatch_enable( _Per_CPU_Get() ); +#endif + } else { + if ( the_semaphore->count < the_semaphore->Attributes.maximum_count ) + the_semaphore->count += 1; + else + status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED; + + _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context ); + } + + return status; +} + +/** + * @brief Core semaphore flush. + * + * This package is the implementation of the CORE Semaphore Handler. + * This core object utilizes standard Dijkstra counting semaphores to provide + * synchronization and mutual exclusion capabilities. + * + * This routine assists in the deletion of a semaphore by flushing the + * associated wait queue. + * + * @param[in] the_semaphore is the semaphore to flush + * @param[in] remote_extract_callout is the routine to invoke if the + * thread unblocked is remote + * @param[in] status is the status to be returned to the unblocked thread + */ +RTEMS_INLINE_ROUTINE void _CORE_semaphore_Flush( + CORE_semaphore_Control *the_semaphore, + Thread_queue_Flush_callout remote_extract_callout, + uint32_t status +) +{ + _Thread_queue_Flush( + &the_semaphore->Wait_queue, + remote_extract_callout, + status + ); +} + +/** + * This function returns true if the priority attribute is + * enabled in the @a attribute_set and false otherwise. + * + * @param[in] the_attribute is the attribute set to test + * + * @return true if the priority attribute is enabled + */ +RTEMS_INLINE_ROUTINE bool _CORE_semaphore_Is_priority( + const CORE_semaphore_Attributes *the_attribute +) +{ + return ( the_attribute->discipline == CORE_SEMAPHORE_DISCIPLINES_PRIORITY ); +} + +/** + * This routine returns the current count associated with the semaphore. + * + * @param[in] the_semaphore is the semaphore to obtain the count of + * + * @return the current count of this semaphore + */ +RTEMS_INLINE_ROUTINE uint32_t _CORE_semaphore_Get_count( + CORE_semaphore_Control *the_semaphore +) +{ + return the_semaphore->count; +} + +/** + * This routine attempts to receive a unit from the_semaphore. + * If a unit is available or if the wait flag is false, then the routine + * returns. Otherwise, the calling task is blocked until a unit becomes + * available. + * + * @param[in] the_semaphore is the semaphore to obtain + * @param[in,out] executing The currently executing thread. + * @param[in] id is the Id of the owning API level Semaphore object + * @param[in] wait is true if the thread is willing to wait + * @param[in] timeout is the maximum number of ticks to block + * @param[in] lock_context is a temporary variable used to contain the ISR + * disable level cookie + * + * @note There is currently no MACRO version of this routine. + */ +RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize( + CORE_semaphore_Control *the_semaphore, + Thread_Control *executing, + Objects_Id id, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +) +{ + /* disabled when you get here */ + + executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL; + _Thread_queue_Acquire_critical( &the_semaphore->Wait_queue, lock_context ); + if ( the_semaphore->count != 0 ) { + the_semaphore->count -= 1; + _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context ); + return; + } + + if ( !wait ) { + _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context ); + executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT; + return; + } + + executing->Wait.id = id; + _Thread_queue_Enqueue_critical( + &the_semaphore->Wait_queue.Queue, + the_semaphore->Wait_queue.operations, + executing, + STATES_WAITING_FOR_SEMAPHORE, + timeout, + CORE_SEMAPHORE_TIMEOUT, + lock_context + ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/corespinlock.h b/include/rtems/score/corespinlock.h new file mode 100644 index 0000000000..ca50eed5e2 --- /dev/null +++ b/include/rtems/score/corespinlock.h @@ -0,0 +1,79 @@ +/** + * @file rtems/score/corespinlock.h + * + * @brief Constants and Structures Associated with the Spinlock Handler + * + * This include file contains all the constants and structures associated + * with the Spinlock Handler. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CORESPINLOCK_H +#define _RTEMS_SCORE_CORESPINLOCK_H + +#include <rtems/score/object.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSpinlock Spinlock Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * Spinlock services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +/** + * The following defines the control block used to manage the + * attributes of each spinlock. + */ +typedef struct { + /** This element indicates XXX + */ + uint32_t XXX; +} CORE_spinlock_Attributes; + +/** + * The following defines the control block used to manage each + * spinlock. + */ +typedef struct { + /** XXX may not be needed */ + CORE_spinlock_Attributes Attributes; + + /** This field is the lock. + */ + volatile uint32_t lock; + + /** This field is a count of the current number of threads using + * this spinlock. It includes the thread holding the lock as well + * as those waiting. + */ + volatile uint32_t users; + + /** This field is the Id of the thread holding the lock. It may or may + * not be the thread which acquired it. + */ + volatile Objects_Id holder; +} CORE_spinlock_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/corespinlockimpl.h b/include/rtems/score/corespinlockimpl.h new file mode 100644 index 0000000000..fe6f9b67f5 --- /dev/null +++ b/include/rtems/score/corespinlockimpl.h @@ -0,0 +1,155 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the SuperCore Spinlock + * + * This include file contains all of the inlined routines associated + * with the SuperCore spinlock. + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CORESPINLOCKIMPL_H +#define _RTEMS_SCORE_CORESPINLOCKIMPL_H + +#include <rtems/score/corespinlock.h> +#include <rtems/score/watchdog.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreSpinlock + */ +/**@{**/ + +/** + * Core Spinlock handler return statuses. + */ +typedef enum { + /** This status indicates that the operation completed successfully. */ + CORE_SPINLOCK_SUCCESSFUL, + /** This status indicates that the current thread already holds the spinlock. + * An attempt to relock it will result in deadlock. + */ + CORE_SPINLOCK_HOLDER_RELOCKING, + /** This status indicates that the current thread is attempting to unlock a + * spinlock that is held by another thread. + */ + CORE_SPINLOCK_NOT_HOLDER, + /** This status indicates that a thread reached the limit of time it + * was willing to wait on the spin lock. + */ + CORE_SPINLOCK_TIMEOUT, + /** This status indicates that a thread is currently waiting for this + * spin lock. + */ + CORE_SPINLOCK_IS_BUSY, + /** This status indicates that the spinlock is currently locked and thus + * unavailable. + */ + CORE_SPINLOCK_UNAVAILABLE, + /** This status indicates that the spinlock is not currently locked and thus + * should not be released. + */ + CORE_SPINLOCK_NOT_LOCKED +} CORE_spinlock_Status; + +/** This is a shorthand for the last status code. */ +#define CORE_SPINLOCK_STATUS_LAST CORE_SPINLOCK_NOT_LOCKED + +/** This indicates the lock is available. */ +#define CORE_SPINLOCK_UNLOCKED 0 + +/** This indicates the lock is unavailable. */ +#define CORE_SPINLOCK_LOCKED 1 + +/** + * @brief Initialize the spinlock. + * + * This routine initializes the spinlock based on the parameters passed. + * + * @param[in] the_spinlock is the spinlock control block to initialize + * @param[in] the_spinlock_attributes define the behavior of this instance + */ +void _CORE_spinlock_Initialize( + CORE_spinlock_Control *the_spinlock, + CORE_spinlock_Attributes *the_spinlock_attributes +); + +/** + * @brief Wait for spinlock. + * + * This routine wait for the spinlock to be released. If the spinlock + * is set to automatic and this is the appropriate thread, then it returns + * immediately. Otherwise, the calling thread is blocked until the spinlock + * is released. + * + * @param[in] the_spinlock is the spinlock to wait for + * @param[in] wait is true if willing to wait + * @param[in] timeout is the maximum number of ticks to spin (0 is forever) + * + * @retval A status is returned which indicates the success or failure of + * this operation. + */ +CORE_spinlock_Status _CORE_spinlock_Wait( + CORE_spinlock_Control *the_spinlock, + bool wait, + Watchdog_Interval timeout +); + +/** + * @brief Manually release the spinlock. + * + * This routine manually releases the spinlock. All of the threads waiting + * for the spinlock will be readied. + * + * @param[in] the_spinlock is the spinlock to surrender + */ +CORE_spinlock_Status _CORE_spinlock_Release( + CORE_spinlock_Control *the_spinlock +); + +/** + * This method is used to initialize core spinlock attributes. + * + * @param[in] the_attributes pointer to the attributes to initialize. + */ +RTEMS_INLINE_ROUTINE void _CORE_spinlock_Initialize_attributes( + CORE_spinlock_Attributes *the_attributes +) +{ + the_attributes->XXX = 0; +} + +/** + * This method is used to determine if the spinlock is available or not. + * + * @param[in] the_spinlock will be checked + * + * @return This method will return true if the spinlock is busy + * and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _CORE_spinlock_Is_busy( + CORE_spinlock_Control *the_spinlock +) +{ + return (the_spinlock->users != 0); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/cpuatomic.h b/include/rtems/score/cpuatomic.h new file mode 100644 index 0000000000..598ee76b20 --- /dev/null +++ b/include/rtems/score/cpuatomic.h @@ -0,0 +1,14 @@ +/* + * COPYRIGHT (c) 2012-2013 Deng Hengyi. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ATOMIC_CPU_H +#define _RTEMS_SCORE_ATOMIC_CPU_H + +#include <rtems/score/cpustdatomic.h> + +#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */ diff --git a/include/rtems/score/cpuset.h b/include/rtems/score/cpuset.h new file mode 100644 index 0000000000..ebdb35dfd1 --- /dev/null +++ b/include/rtems/score/cpuset.h @@ -0,0 +1,67 @@ +/** + * @file rtems/score/cpuset.h + * + * @brief Information About the CPU Set + * + * This include file contains all information about the thread + * CPU Set. + */ + +/* + * COPYRIGHT (c) 2014. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CPUSET_H +#define _RTEMS_SCORE_CPUSET_H + +#include <rtems/score/basedefs.h> + +#ifdef __RTEMS_HAVE_SYS_CPUSET_H__ + +#include <sys/cpuset.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreCpuset SuperCore CPU Set + * + * @ingroup Score + * + * This handler encapsulates functionality which is used in the management + * of thread's CPU set. + */ +/**@{*/ + +/** + * The following defines the control block used to manage the cpuset. + * The names do not include affinity in the front in case the set is + * ever used for something other than affinity. The usage in thread + * uses the attribute affinity such that accesses will read + * thread->affinity.set. + */ +typedef struct { + /** This is the size of the set */ + size_t setsize; + /** This is the preallocated space to store the set */ + cpu_set_t preallocated; + /** This is a pointer to the set in use */ + cpu_set_t *set; +} CPU_set_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* __RTEMS_HAVE_SYS_CPUSET_H__ */ + +#endif /* _RTEMS_SCORE_CPUSET_H */ +/* end of include file */ diff --git a/include/rtems/score/cpusetimpl.h b/include/rtems/score/cpusetimpl.h new file mode 100644 index 0000000000..226c3a1806 --- /dev/null +++ b/include/rtems/score/cpusetimpl.h @@ -0,0 +1,144 @@ +/** + * @file + * + * @brief Implementation Helper for CPU Set + * + * This file contains the implementation helpers inlines and prototypes for + * CPU set methods. + */ + +/* + * COPYRIGHT (c) 2014. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CPUSETIMPL_H +#define _RTEMS_SCORE_CPUSETIMPL_H + +#include <rtems/score/cpuset.h> +#include <rtems/score/smp.h> + +#include <limits.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __RTEMS_HAVE_SYS_CPUSET_H__ + +/** + * @brief Determine If the CPU Set if Valid + * + * This routine validates a cpuset size corresponds to the system + * correct size, that at least one valid cpu is set and that no invalid + * cpus are set. + * + * @param[in] cpuset is the cpuset to validate + * @param[in] setsize is the number of CPUs in the cpuset + * + * @return true if the set is valid + */ +bool _CPU_set_Is_valid( const cpu_set_t *cpuset, size_t setsize ); + +/** + * @brief Print the CPU Set + * + * This routine will print the value of the given cpuset. + * + * @param[in] description is a string to print before the value. + * @param[in] cpuset is the cpuset to validate + */ +void _CPU_set_Show( const char *description, const cpu_set_t *cpuset); + +/** + * @brief Print the Default CPU Set + * + * This routine will print the value of the default cpuset. + * + * @param[in] description is a string to print before the value. + */ +void _CPU_set_Show_default( const char *description ); + +/** + * @brief Obtain the Default CPU Set + * + * This routine returns the default cpuset for this system. + * + * @return a pointer to the default cpuset. + */ +const CPU_set_Control *_CPU_set_Default(void); + +/** + * @brief Obtain the Maximum Number of CPUs Representable in CPU Set + * + * This routine returns maximum number of CPUs that can be represented + * in the @a setsize specified. + * + * @return the number of representable cores in the cpuset + */ +RTEMS_INLINE_ROUTINE size_t _CPU_set_Maximum_CPU_count( size_t setsize ) +{ + return setsize * CHAR_BIT; +} + +/** + * @brief Is this CPU Set Large Enough? + * + * This method can be used to determine if a particular cpuset is + * large enough for the number of CPUs in the system. + * + * @param[in] setsize is the number of CPUs in the cpuset + * + * @return true if the @a setsize is sufficient + */ +RTEMS_INLINE_ROUTINE bool _CPU_set_Is_large_enough( + size_t setsize +) +{ + return _CPU_set_Maximum_CPU_count( setsize ) >= _SMP_Get_processor_count(); +} + +/** + * @brief Fill in CPU_set_Control + * + * This method fills in a CPU_set_Control based upon a cpu_set_t and + * size information. + * + * @param[in] cpuset is the cpuset to validate + * @param[in] setsize is the number of CPUs in the cpuset + */ +static inline void _CPU_set_Set( + size_t setsize, + cpu_set_t *cpuset, + CPU_set_Control *set +) +{ + set->set = &set->preallocated; + set->setsize = setsize; + CPU_COPY( set->set, cpuset ); +} +#endif + +/** + * @brief Initialize the CPU Set Handler + * + * This routine validates a cpuset sets at least one valid cpu and that + * it does not set any invalid cpus. + */ +#if __RTEMS_HAVE_SYS_CPUSET_H__ && defined( RTEMS_SMP ) +void _CPU_set_Handler_initialization(void); +#else +#define _CPU_set_Handler_initialization() do { } while ( 0 ) +#endif + +/**@}*/ + +#ifdef __cplusplus +} +#endif +#endif +/* end of include file */ diff --git a/include/rtems/score/cpustdatomic.h b/include/rtems/score/cpustdatomic.h new file mode 100644 index 0000000000..fb7ba2d89c --- /dev/null +++ b/include/rtems/score/cpustdatomic.h @@ -0,0 +1,682 @@ +/** + * @file + * + * @brief Atomic Operations CPU API + */ + +/* + * COPYRIGHT (c) 2013 Deng Hengyi. + * Copyright (c) 2015 embedded brains GmbH. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_CPUSTDATOMIC_H +#define _RTEMS_SCORE_CPUSTDATOMIC_H + +#include <rtems/score/basedefs.h> + +#ifdef RTEMS_SMP + #if defined(__cplusplus) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) + /* + * The GCC 4.9 ships its own <stdatomic.h> which is not C++ compatible. The + * suggested solution was to include <atomic> in case C++ is used. This works + * at least with GCC 4.9. See also: + * + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932 + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60940 + */ + #include <atomic> + #define _RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC + #else + #include <stdatomic.h> + #define _RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC + #endif +#else + #include <rtems/score/isrlevel.h> +#endif + +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + +typedef std::atomic_uint CPU_atomic_Uint; + +typedef std::atomic_ulong CPU_atomic_Ulong; + +typedef std::atomic_uintptr_t CPU_atomic_Uintptr; + +typedef std::atomic_flag CPU_atomic_Flag; + +typedef std::memory_order CPU_atomic_Order; + +#define CPU_ATOMIC_ORDER_RELAXED std::memory_order_relaxed + +#define CPU_ATOMIC_ORDER_ACQUIRE std::memory_order_acquire + +#define CPU_ATOMIC_ORDER_RELEASE std::memory_order_release + +#define CPU_ATOMIC_ORDER_ACQ_REL std::memory_order_acq_rel + +#define CPU_ATOMIC_ORDER_SEQ_CST std::memory_order_seq_cst + +#define CPU_ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value ) + +#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value ) + +#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value ) + +#define CPU_ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT + +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + +typedef atomic_uint CPU_atomic_Uint; + +typedef atomic_ulong CPU_atomic_Ulong; + +typedef atomic_uintptr_t CPU_atomic_Uintptr; + +typedef atomic_flag CPU_atomic_Flag; + +typedef memory_order CPU_atomic_Order; + +#define CPU_ATOMIC_ORDER_RELAXED memory_order_relaxed + +#define CPU_ATOMIC_ORDER_ACQUIRE memory_order_acquire + +#define CPU_ATOMIC_ORDER_RELEASE memory_order_release + +#define CPU_ATOMIC_ORDER_ACQ_REL memory_order_acq_rel + +#define CPU_ATOMIC_ORDER_SEQ_CST memory_order_seq_cst + +#define CPU_ATOMIC_INITIALIZER_UINT( value ) ATOMIC_VAR_INIT( value ) + +#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ATOMIC_VAR_INIT( value ) + +#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ATOMIC_VAR_INIT( value ) + +#define CPU_ATOMIC_INITIALIZER_FLAG ATOMIC_FLAG_INIT + +#else + +typedef unsigned int CPU_atomic_Uint; + +typedef unsigned long CPU_atomic_Ulong; + +typedef uintptr_t CPU_atomic_Uintptr; + +typedef bool CPU_atomic_Flag; + +typedef int CPU_atomic_Order; + +#define CPU_ATOMIC_ORDER_RELAXED 0 + +#define CPU_ATOMIC_ORDER_ACQUIRE 2 + +#define CPU_ATOMIC_ORDER_RELEASE 3 + +#define CPU_ATOMIC_ORDER_ACQ_REL 4 + +#define CPU_ATOMIC_ORDER_SEQ_CST 5 + +#define CPU_ATOMIC_INITIALIZER_UINT( value ) ( value ) + +#define CPU_ATOMIC_INITIALIZER_ULONG( value ) ( value ) + +#define CPU_ATOMIC_INITIALIZER_UINTPTR( value ) ( value ) + +#define CPU_ATOMIC_INITIALIZER_FLAG false + +#endif + +static inline void _CPU_atomic_Fence( CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + std::atomic_thread_fence( order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_thread_fence( order ); +#else + (void) order; + RTEMS_COMPILER_MEMORY_BARRIER(); +#endif +} + +static inline void _CPU_atomic_Init_uint( CPU_atomic_Uint *obj, unsigned int desired ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->store( desired ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_init( obj, desired ); +#else + *obj = desired; +#endif +} + +static inline void _CPU_atomic_Init_ulong( CPU_atomic_Ulong *obj, unsigned long desired ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->store( desired ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_init( obj, desired ); +#else + *obj = desired; +#endif +} + +static inline void _CPU_atomic_Init_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->store( desired ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_init( obj, desired ); +#else + *obj = desired; +#endif +} + +static inline unsigned int _CPU_atomic_Load_uint( const CPU_atomic_Uint *obj, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->load( order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_load_explicit( obj, order ); +#else + unsigned int val; + + (void) order; + val = *obj; + RTEMS_COMPILER_MEMORY_BARRIER(); + + return val; +#endif +} + +static inline unsigned long _CPU_atomic_Load_ulong( const CPU_atomic_Ulong *obj, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->load( order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_load_explicit( obj, order ); +#else + unsigned long val; + + (void) order; + val = *obj; + RTEMS_COMPILER_MEMORY_BARRIER(); + + return val; +#endif +} + +static inline uintptr_t _CPU_atomic_Load_uintptr( const CPU_atomic_Uintptr *obj, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->load( order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_load_explicit( obj, order ); +#else + uintptr_t val; + + (void) order; + val = *obj; + RTEMS_COMPILER_MEMORY_BARRIER(); + + return val; +#endif +} + +static inline void _CPU_atomic_Store_uint( CPU_atomic_Uint *obj, unsigned int desired, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->store( desired ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_store_explicit( obj, desired, order ); +#else + (void) order; + RTEMS_COMPILER_MEMORY_BARRIER(); + *obj = desired; +#endif +} + +static inline void _CPU_atomic_Store_ulong( CPU_atomic_Ulong *obj, unsigned long desired, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->store( desired ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_store_explicit( obj, desired, order ); +#else + (void) order; + RTEMS_COMPILER_MEMORY_BARRIER(); + *obj = desired; +#endif +} + +static inline void _CPU_atomic_Store_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->store( desired ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_store_explicit( obj, desired, order ); +#else + (void) order; + RTEMS_COMPILER_MEMORY_BARRIER(); + *obj = desired; +#endif +} + +static inline unsigned int _CPU_atomic_Fetch_add_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_add( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_add_explicit( obj, arg, order ); +#else + unsigned int val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val + arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned long _CPU_atomic_Fetch_add_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_add( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_add_explicit( obj, arg, order ); +#else + unsigned long val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val + arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline uintptr_t _CPU_atomic_Fetch_add_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_add( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_add_explicit( obj, arg, order ); +#else + uintptr_t val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val + arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned int _CPU_atomic_Fetch_sub_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_sub( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_sub_explicit( obj, arg, order ); +#else + unsigned int val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val - arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned long _CPU_atomic_Fetch_sub_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_sub( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_sub_explicit( obj, arg, order ); +#else + unsigned long val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val - arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline uintptr_t _CPU_atomic_Fetch_sub_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_sub( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_sub_explicit( obj, arg, order ); +#else + uintptr_t val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val - arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned int _CPU_atomic_Fetch_or_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_or( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_or_explicit( obj, arg, order ); +#else + unsigned int val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val | arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned long _CPU_atomic_Fetch_or_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_or( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_or_explicit( obj, arg, order ); +#else + unsigned long val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val | arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline uintptr_t _CPU_atomic_Fetch_or_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_or( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_or_explicit( obj, arg, order ); +#else + uintptr_t val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val | arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned int _CPU_atomic_Fetch_and_uint( CPU_atomic_Uint *obj, unsigned int arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_and( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_and_explicit( obj, arg, order ); +#else + unsigned int val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val & arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned long _CPU_atomic_Fetch_and_ulong( CPU_atomic_Ulong *obj, unsigned long arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_and( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_and_explicit( obj, arg, order ); +#else + unsigned long val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val & arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline uintptr_t _CPU_atomic_Fetch_and_uintptr( CPU_atomic_Uintptr *obj, uintptr_t arg, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->fetch_and( arg, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_fetch_and_explicit( obj, arg, order ); +#else + uintptr_t val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = val & arg; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned int _CPU_atomic_Exchange_uint( CPU_atomic_Uint *obj, unsigned int desired, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->exchange( desired, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_exchange_explicit( obj, desired, order ); +#else + unsigned int val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = desired; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline unsigned long _CPU_atomic_Exchange_ulong( CPU_atomic_Ulong *obj, unsigned long desired, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->exchange( desired, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_exchange_explicit( obj, desired, order ); +#else + unsigned long val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = desired; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline uintptr_t _CPU_atomic_Exchange_uintptr( CPU_atomic_Uintptr *obj, uintptr_t desired, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->exchange( desired, order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_exchange_explicit( obj, desired, order ); +#else + uintptr_t val; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + val = *obj; + *obj = desired; + _ISR_Enable( level ); + + return val; +#endif +} + +static inline bool _CPU_atomic_Compare_exchange_uint( CPU_atomic_Uint *obj, unsigned int *expected, unsigned int desired, CPU_atomic_Order succ, CPU_atomic_Order fail ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->compare_exchange_strong( *expected, desired, succ, fail ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail ); +#else + bool success; + ISR_Level level; + unsigned int actual; + + (void) succ; + (void) fail; + _ISR_Disable( level ); + actual = *obj; + success = ( actual == *expected ); + if ( success ) { + *obj = desired; + } else { + *expected = actual; + } + _ISR_Enable( level ); + + return success; +#endif +} + +static inline bool _CPU_atomic_Compare_exchange_ulong( CPU_atomic_Ulong *obj, unsigned long *expected, unsigned long desired, CPU_atomic_Order succ, CPU_atomic_Order fail ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->compare_exchange_strong( *expected, desired, succ, fail ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail ); +#else + bool success; + ISR_Level level; + unsigned long actual; + + (void) succ; + (void) fail; + _ISR_Disable( level ); + actual = *obj; + success = ( actual == *expected ); + if ( success ) { + *obj = desired; + } else { + *expected = actual; + } + _ISR_Enable( level ); + + return success; +#endif +} + +static inline bool _CPU_atomic_Compare_exchange_uintptr( CPU_atomic_Uintptr *obj, uintptr_t *expected, uintptr_t desired, CPU_atomic_Order succ, CPU_atomic_Order fail ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->compare_exchange_strong( *expected, desired, succ, fail ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_compare_exchange_strong_explicit( obj, expected, desired, succ, fail ); +#else + bool success; + ISR_Level level; + uintptr_t actual; + + (void) succ; + (void) fail; + _ISR_Disable( level ); + actual = *obj; + success = ( actual == *expected ); + if ( success ) { + *obj = desired; + } else { + *expected = actual; + } + _ISR_Enable( level ); + + return success; +#endif +} + +static inline void _CPU_atomic_Flag_clear( CPU_atomic_Flag *obj, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + obj->clear( order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + atomic_flag_clear_explicit( obj, order ); +#else + (void) order; + *obj = false; +#endif +} + +static inline bool _CPU_atomic_Flag_test_and_set( CPU_atomic_Flag *obj, CPU_atomic_Order order ) +{ +#if defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_ATOMIC) + return obj->test_and_set( order ); +#elif defined(_RTEMS_SCORE_CPUSTDATOMIC_USE_STDATOMIC) + return atomic_flag_test_and_set_explicit( obj, order ); +#else + bool flag; + ISR_Level level; + + (void) order; + _ISR_Disable( level ); + flag = *obj; + *obj = true; + _ISR_Enable( level ); + + return flag; +#endif +} + +#endif /* _RTEMS_SCORE_CPUSTDATOMIC_H */ diff --git a/include/rtems/score/epiphany-utility.h b/include/rtems/score/epiphany-utility.h new file mode 100644 index 0000000000..bf223f93bf --- /dev/null +++ b/include/rtems/score/epiphany-utility.h @@ -0,0 +1,180 @@ +/** + * @file + * + * @ingroup ScoreCPU + * + * @brief This include file contains macros pertaining to the + * Epiphany processor family. + */ + +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _EPIPHANY_UTILITY_H +#define _EPIPHANY_UTILITY_H + +/* eCore IRQs */ +typedef enum +{ + START, + SW_EXCEPTION, + MEM_FAULT, + TIMER0, + TIMER1, + SMP_MESSAGE, + DMA0, + DMA1, + SER, +} EPIPHANY_IRQ_PER_CORE_T; + +/* Per-core IO mapped register addresses + * @see Epiphany architecture reference. + */ +#define EPIPHANY_PER_CORE_REG_CONFIG 0xF0400 +#define EPIPHANY_PER_CORE_REG_STATUS 0xF0404 +#define EPIPHANY_PER_CORE_REG_PC 0xF0408 +#define EPIPHANY_PER_CORE_REG_DEBUGSTATUS 0xF040C +#define EPIPHANY_PER_CORE_REG_LC 0xF0414 +#define EPIPHANY_PER_CORE_REG_LS 0xF0418 +#define EPIPHANY_PER_CORE_REG_LE 0xF041C +#define EPIPHANY_PER_CORE_REG_IRET 0xF0420 +#define EPIPHANY_PER_CORE_REG_IMASK 0xF0424 +#define EPIPHANY_PER_CORE_REG_ILAT 0xF0428 +#define EPIPHANY_PER_CORE_REG_ILATST 0xF042C +#define EPIPHANY_PER_CORE_REG_ILATCL 0xF0430 +#define EPIPHANY_PER_CORE_REG_IPEND 0xF0434 +#define EPIPHANY_PER_CORE_REG_FSTATUS 0xF0440 +#define EPIPHANY_PER_CORE_REG_DEBUGCMD 0xF0448 +#define EPIPHANY_PER_CORE_REG_RESETCORE 0xF070C + +/* Event timer registers */ +#define EPIPHANY_PER_CORE_REG_CTIMER0 0xF0438 +#define EPIPHANY_PER_CORE_REG_CTIMER1 0xF043C + +/* Processor control registers */ +#define EPIPHANY_PER_CORE_REG_MEMSTATUS 0xF0604 +#define EPIPHANY_PER_CORE_REG_MEMPROTECT 0xF0608 + +/* DMA Registers */ +#define EPIPHANY_PER_CORE_REG_DMA0CONFIG 0xF0500 +#define EPIPHANY_PER_CORE_REG_DMA0STRIDE 0xF0504 +#define EPIPHANY_PER_CORE_REG_DMA0COUNT 0xF0508 +#define EPIPHANY_PER_CORE_REG_DMA0SRCADDR 0xF050C +#define EPIPHANY_PER_CORE_REG_DMA0DSTADDR 0xF0510 +#define EPIPHANY_PER_CORE_REG_DMA0AUTO0 0xF0514 +#define EPIPHANY_PER_CORE_REG_DMA0AUTO1 0xF0518 +#define EPIPHANY_PER_CORE_REG_DMA0STATUS 0xF051C +#define EPIPHANY_PER_CORE_REG_DMA1CONFIG 0xF0520 +#define EPIPHANY_PER_CORE_REG_DMA1STRIDE 0xF0524 +#define EPIPHANY_PER_CORE_REG_DMA1COUNT 0xF0528 +#define EPIPHANY_PER_CORE_REG_DMA1SRCADDR 0xF052C +#define EPIPHANY_PER_CORE_REG_DMA1DSTADDR 0xF0530 +#define EPIPHANY_PER_CORE_REG_DMA1AUTO0 0xF0534 +#define EPIPHANY_PER_CORE_REG_DMA1AUTO1 0xF0538 +#define EPIPHANY_PER_CORE_REG_DMA1STATUS 0xF053C + +/* Mesh Node Control Registers */ +#define EPIPHANY_PER_CORE_REG_MESHCONFIG 0xF0700 +#define EPIPHANY_PER_CORE_REG_COREID 0xF0704 +#define EPIPHANY_PER_CORE_REG_MULTICAST 0xF0708 +#define EPIPHANY_PER_CORE_REG_CMESHROUTE 0xF0710 +#define EPIPHANY_PER_CORE_REG_XMESHROUTE 0xF0714 +#define EPIPHANY_PER_CORE_REG_RMESHROUTE 0xF0718 + +/* This macros constructs an address space of epiphany cores + * from their IDs. + */ +#define EPIPHANY_COREID_TO_MSB_ADDR(id) (id) << 20 + +/* Construct a complete/absolute IO mapped address register from + * core ID and register name + */ +#define EPIPHANY_GET_REG_ABSOLUTE_ADDR(coreid, reg) \ + (EPIPHANY_COREID_TO_MSB_ADDR(coreid) | (reg)) + +#define EPIPHANY_REG(reg) (uint32_t *) (reg) + +/* Read register with its absolute address */ +static inline uint32_t read_epiphany_reg(volatile uint32_t reg_addr) +{ + return *(EPIPHANY_REG(reg_addr)); +} + +/* Write register with its abolute address */ +static inline void write_epiphany_reg(volatile uint32_t reg_addr, uint32_t val) +{ + *(EPIPHANY_REG(reg_addr)) = val; +} + +/* Epiphany uses 12 bits for defining core IDs, while RTEMS uses + * linear IDs. The following function converts RTEMS linear IDs to + * Epiphany corresponding ones + */ +static const uint32_t map[16] = +{ + 0x808, 0x809, 0x80A, 0x80B, + 0x848, 0x849, 0x84A, 0x84B, + 0x888, 0x889, 0x88A, 0x88B, + 0x8C8, 0x8C9, 0x8CA, 0x8CB +}; + +static inline uint32_t rtems_coreid_to_epiphany_map(uint32_t rtems_id) +{ + return map[rtems_id]; +} + +/* Epiphany uses 12 bits for defining core IDs, while RTEMS uses + * linear IDs. The following function is used to map Epiphany IDs to + * RTEMS linear IDs. + */ +static inline uint32_t epiphany_coreid_to_rtems_map(uint32_t epiphany_id) +{ + register uint32_t coreid asm ("r17") = epiphany_id; + + /* Mapping from Epiphany IDs to 0-16 IDs macro */ + __asm__ __volatile__(" \ + movfs r17, coreid \ + mov r19, #0x003 \ + mov r20, #0x0F0 \ + and r19, r17, r19 \ + and r20, r17, r20 \ + lsr r20, r20, #4 \ + add r17, r19, r20 \ + "); + + /* coreid or r17 now holds the rtems core id */ + return coreid; +} + +static inline uint32_t _Epiphany_Get_current_processor() +{ + uint32_t coreid; + + asm volatile ("movfs %0, coreid" : "=r" (coreid): ); + + return epiphany_coreid_to_rtems_map(coreid); +} +#endif /* _EPIPHANY_UTILITY_H */ diff --git a/include/rtems/score/epiphany.h b/include/rtems/score/epiphany.h new file mode 100644 index 0000000000..60d975581f --- /dev/null +++ b/include/rtems/score/epiphany.h @@ -0,0 +1,64 @@ +/** + * @file rtems/score/epiphany.h + */ + +/* + * Copyright (c) 2015 University of York. + * Hesham ALMatary <hmka501@york.ac.uk> + * + * COPYRIGHT (c) 1989-1999, 2010. + * On-Line Applications Research Corporation (OAR). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _RTEMS_SCORE_EPIPHANY_H +#define _RTEMS_SCORE_EPIPHANY_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the Epiphany family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + + /* + * Define the name of the CPU family and specific model. + */ + +#define CPU_NAME "EPIPHANY" +#define CPU_MODEL_NAME "EPIPHANY" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_EPIPHANY_H */ diff --git a/include/rtems/score/freechain.h b/include/rtems/score/freechain.h new file mode 100644 index 0000000000..1540c0e2a1 --- /dev/null +++ b/include/rtems/score/freechain.h @@ -0,0 +1,111 @@ +/** + * @file + * + * @ingroup ScoreFreechain + * + * @brief Freechain Handler API + */ +/* + * Copyright (c) 2013 Gedare Bloom. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_FREECHAIN_H +#define _RTEMS_SCORE_FREECHAIN_H + +#include <rtems/score/basedefs.h> +#include <rtems/score/chain.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreFreechain Freechain Handler + * + * @ingroup Score + * + * The Freechain Handler is used to manage a chain of nodes, of which size can + * automatically increase when there is no free node left. This handler + * provides one data structure: Freechain_Control. + * + * @{ + */ + +/** + * @brief Allocator function. + */ +typedef void *( *Freechain_Allocator )( size_t size ); + +/** + * @brief The freechain control. + */ +typedef struct { + /** + * @brief Chain of free nodes. + */ + Chain_Control Free; +} Freechain_Control; + +/** + * @brief Initializes a freechain. + * + * This routine initializes the freechain control structure to manage a chain + * of nodes. In case the freechain is empty the extend handler is called to + * get more nodes. + * + * @param[in] freechain The freechain control to initialize. + * @param[in] allocator The allocator function. + * @param[in] number_nodes The initial number of nodes. + * @param[in] node_size The node size. + */ +void _Freechain_Initialize( + Freechain_Control *freechain, + Freechain_Allocator allocator, + size_t number_nodes, + size_t node_size +); + +/** + * @brief Gets a node from the freechain. + * + * @param[in] freechain The freechain control. + * @param[in] allocator The allocator function. + * @param[in] number_nodes_to_extend The number of nodes in case an extend is + * necessary due to an empty freechain. + * @param[in] node_size The node size. + * + * @retval NULL The freechain is empty and the extend operation failed. + * @retval otherwise Pointer to a node. The node ownership passes to the + * caller. + */ +void *_Freechain_Get( + Freechain_Control *freechain, + Freechain_Allocator allocator, + size_t number_nodes_to_extend, + size_t node_size +); + +/** + * @brief Puts a node back onto the freechain. + * + * @param[in] freechain The freechain control. + * @param[in] node The node to put back. The node may be @c NULL, in this case + * the function does nothing. + */ +void _Freechain_Put( + Freechain_Control *freechain, + void *node +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/h8300.h b/include/rtems/score/h8300.h new file mode 100644 index 0000000000..e30343cb84 --- /dev/null +++ b/include/rtems/score/h8300.h @@ -0,0 +1,44 @@ +/** + * @file + * + * @brief Information Required to Build RTEMS for a Particular Member + * of the Hitachi H8/300 Family + * + * This file contains information pertaining to the Hitachi H8/300 + * processor family. + */ + +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_H8300_H +#define _RTEMS_SCORE_H8300_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the "h8300" + * family when executing in protected mode. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + */ + +#define CPU_NAME "Hitachi H8300" +#define CPU_MODEL_NAME "h8300" +#define H8300_HAS_FPU 0 + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/rtems/score/heap.h b/include/rtems/score/heap.h new file mode 100644 index 0000000000..5db22cf2ed --- /dev/null +++ b/include/rtems/score/heap.h @@ -0,0 +1,517 @@ +/** + * @file + * + * @ingroup ScoreHeap + * + * @brief Heap Handler API + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_HEAP_H +#define _RTEMS_SCORE_HEAP_H + +#include <rtems/score/cpu.h> +#include <rtems/score/thread.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef RTEMS_DEBUG + #define HEAP_PROTECTION +#endif + +/** + * @defgroup ScoreHeap Heap Handler + * + * @ingroup Score + * + * @brief The Heap Handler provides a heap. + * + * A heap is a doubly linked list of variable size blocks which are allocated + * using the first fit method. Garbage collection is performed each time a + * block is returned to the heap by coalescing neighbor blocks. Control + * information for both allocated and free blocks is contained in the heap + * area. A heap control structure contains control information for the heap. + * + * The alignment routines could be made faster should we require only powers of + * two to be supported for page size, alignment and boundary arguments. The + * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this + * value is only required to be multiple of two and explicitly not required to + * be a power of two. + * + * There are two kinds of blocks. One sort describes a free block from which + * we can allocate memory. The other blocks are used and provide an allocated + * memory area. The free blocks are accessible via a list of free blocks. + * + * Blocks or areas cover a continuous set of memory addresses. They have a + * begin and end address. The end address is not part of the set. The size of + * a block or area equals the distance between the begin and end address in + * units of bytes. + * + * Free blocks look like: + * <table> + * <tr> + * <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the + * previous block is free, <br> otherwise it may contain data used by + * the previous block</td> + * </tr> + * <tr> + * <td>block size and a flag which indicates if the previous block is free + * or used, <br> this field contains always valid data regardless of the + * block usage</td> + * </tr> + * <tr><td>pointer to next block (this field is page size aligned)</td></tr> + * <tr><td>pointer to previous block</td></tr> + * <tr><td colspan=2>free space</td></tr> + * </table> + * + * Used blocks look like: + * <table> + * <tr> + * <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the + * previous block is free,<br>otherwise it may contain data used by + * the previous block</td> + * </tr> + * <tr> + * <td>block size and a flag which indicates if the previous block is free + * or used, <br> this field contains always valid data regardless of the + * block usage</td> + * </tr> + * <tr><td>begin of allocated area (this field is page size aligned)</td></tr> + * <tr><td>allocated space</td></tr> + * <tr><td colspan=2>allocated space</td></tr> + * </table> + * + * The heap area after initialization contains two blocks and looks like: + * <table> + * <tr><th>Label</th><th colspan=2>Content</th></tr> + * <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr> + * <tr> + * <td>first_block->prev_size</td> + * <td colspan=2> + * subordinate heap area end address (this will be used to maintain a + * linked list of scattered heap areas) + * </td> + * </tr> + * <tr> + * <td>first_block->size</td> + * <td colspan=2>size available for allocation + * | @c HEAP_PREV_BLOCK_USED</td> + * </tr> + * <tr> + * <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td> + * <td rowspan=3>memory area available for allocation</td> + * </tr> + * <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr> + * <tr><td>...</td></tr> + * <tr> + * <td>last_block->prev_size</td><td colspan=2>size of first block</td> + * </tr> + * <tr> + * <td>last_block->size</td> + * <td colspan=2>first block begin address - last block begin address</td> + * </tr> + * <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr> + * </table> + * The next block of the last block is the first block. Since the first + * block indicates that the previous block is used, this ensures that the + * last block appears as used for the _Heap_Is_used() and _Heap_Is_free() + * functions. + */ +/**@{**/ + +typedef struct Heap_Control Heap_Control; + +typedef struct Heap_Block Heap_Block; + +#ifndef HEAP_PROTECTION + #define HEAP_PROTECTION_HEADER_SIZE 0 +#else + #define HEAP_PROTECTOR_COUNT 2 + + #define HEAP_BEGIN_PROTECTOR_0 ((uintptr_t) 0xfd75a98f) + #define HEAP_BEGIN_PROTECTOR_1 ((uintptr_t) 0xbfa1f177) + #define HEAP_END_PROTECTOR_0 ((uintptr_t) 0xd6b8855e) + #define HEAP_END_PROTECTOR_1 ((uintptr_t) 0x13a44a5b) + + #define HEAP_FREE_PATTERN ((uintptr_t) 0xe7093cdf) + + #define HEAP_PROTECTION_OBOLUS ((Heap_Block *) 1) + + typedef void (*_Heap_Protection_handler)( + Heap_Control *heap, + Heap_Block *block + ); + + typedef struct { + _Heap_Protection_handler block_initialize; + _Heap_Protection_handler block_check; + _Heap_Protection_handler block_error; + void *handler_data; + Heap_Block *first_delayed_free_block; + Heap_Block *last_delayed_free_block; + uintptr_t delayed_free_block_count; + uintptr_t delayed_free_fraction; + } Heap_Protection; + + typedef struct { + uintptr_t protector [HEAP_PROTECTOR_COUNT]; + Heap_Block *next_delayed_free_block; + Thread_Control *task; + void *tag; + } Heap_Protection_block_begin; + + typedef struct { + uintptr_t protector [HEAP_PROTECTOR_COUNT]; + } Heap_Protection_block_end; + + #define HEAP_PROTECTION_HEADER_SIZE \ + (sizeof(Heap_Protection_block_begin) + sizeof(Heap_Protection_block_end)) +#endif + +/** + * @brief The block header consists of the two size fields + * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag). + */ +#define HEAP_BLOCK_HEADER_SIZE \ + (2 * sizeof(uintptr_t) + HEAP_PROTECTION_HEADER_SIZE) + +/** + * @brief Description for free or used blocks. + */ +struct Heap_Block { + /** + * @brief Size of the previous block or part of the allocated area of the + * previous block. + * + * This field is only valid if the previous block is free. This case is + * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the + * @a size_and_flag field of the current block. + * + * In a used block only the @a size_and_flag field needs to be valid. The + * @a prev_size field of the current block is maintained by the previous + * block. The current block can use the @a prev_size field in the next block + * for allocation. + */ + uintptr_t prev_size; + + #ifdef HEAP_PROTECTION + Heap_Protection_block_begin Protection_begin; + #endif + + /** + * @brief Contains the size of the current block and a flag which indicates + * if the previous block is free or used. + * + * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is + * used, otherwise the previous block is free. A used previous block may + * claim the @a prev_size field for allocation. This trick allows to + * decrease the overhead in the used blocks by the size of the @a prev_size + * field. As sizes are required to be multiples of two, the least + * significant bits would be always zero. We use this bit to store the flag. + * + * This field is always valid. + */ + uintptr_t size_and_flag; + + #ifdef HEAP_PROTECTION + Heap_Protection_block_end Protection_end; + #endif + + /** + * @brief Pointer to the next free block or part of the allocated area. + * + * This field is page size aligned and begins of the allocated area in case + * the block is used. + * + * This field is only valid if the block is free and thus part of the free + * block list. + */ + Heap_Block *next; + + /** + * @brief Pointer to the previous free block or part of the allocated area. + * + * This field is only valid if the block is free and thus part of the free + * block list. + */ + Heap_Block *prev; +}; + +/** + * @brief Run-time heap statistics. + * + * The value @a searches / @a allocs gives the mean number of searches per + * allocation, while @a max_search gives maximum number of searches ever + * performed on a single allocation call. + */ +typedef struct { + /** + * @brief Lifetime number of bytes allocated from this heap. + * + * This value is an integral multiple of the page size. + */ + uint64_t lifetime_allocated; + + /** + * @brief Lifetime number of bytes freed to this heap. + * + * This value is an integral multiple of the page size. + */ + uint64_t lifetime_freed; + + /** + * @brief Size of the allocatable area in bytes. + * + * This value is an integral multiple of the page size. + */ + uintptr_t size; + + /** + * @brief Current free size in bytes. + * + * This value is an integral multiple of the page size. + */ + uintptr_t free_size; + + /** + * @brief Minimum free size ever in bytes. + * + * This value is an integral multiple of the page size. + */ + uintptr_t min_free_size; + + /** + * @brief Current number of free blocks. + */ + uint32_t free_blocks; + + /** + * @brief Maximum number of free blocks ever. + */ + uint32_t max_free_blocks; + + /** + * @brief Current number of used blocks. + */ + uint32_t used_blocks; + + /** + * @brief Maximum number of blocks searched ever. + */ + uint32_t max_search; + + /** + * @brief Total number of searches. + */ + uint32_t searches; + + /** + * @brief Total number of successful allocations. + */ + uint32_t allocs; + + /** + * @brief Total number of failed allocations. + */ + uint32_t failed_allocs; + + /** + * @brief Total number of successful frees. + */ + uint32_t frees; + + /** + * @brief Total number of successful resizes. + */ + uint32_t resizes; +} Heap_Statistics; + +/** + * @brief Control block used to manage a heap. + */ +struct Heap_Control { + Heap_Block free_list; + uintptr_t page_size; + uintptr_t min_block_size; + uintptr_t area_begin; + uintptr_t area_end; + Heap_Block *first_block; + Heap_Block *last_block; + Heap_Statistics stats; + #ifdef HEAP_PROTECTION + Heap_Protection Protection; + #endif +}; + +/** + * @brief Information about blocks. + */ +typedef struct { + /** + * @brief Number of blocks of this type. + */ + uint32_t number; + + /** + * @brief Largest block of this type. + */ + uint32_t largest; + + /** + * @brief Total size of the blocks of this type. + */ + uint32_t total; +} Heap_Information; + +/** + * @brief Information block returned by _Heap_Get_information(). + */ +typedef struct { + Heap_Information Free; + Heap_Information Used; + Heap_Statistics Stats; +} Heap_Information_block; + +/** + * @brief Heap area structure for table based heap initialization and + * extension. + * + * @see Heap_Initialization_or_extend_handler. + */ +typedef struct { + void *begin; + uintptr_t size; +} Heap_Area; + +/** + * @brief Heap initialization and extend handler type. + * + * This helps to do a table based heap initialization and extension. Create a + * table of Heap_Area elements and iterate through it. Set the handler to + * _Heap_Initialize() in the first iteration and then to _Heap_Extend(). + * + * @see Heap_Area, _Heap_Initialize(), _Heap_Extend(), or _Heap_No_extend(). + */ +typedef uintptr_t (*Heap_Initialization_or_extend_handler)( + Heap_Control *heap, + void *area_begin, + uintptr_t area_size, + uintptr_t page_size_or_unused +); + +/** + * @brief Extends the memory available for the heap @a heap using the memory + * area starting at @a area_begin of size @a area_size bytes. + * + * There are no alignment requirements. The memory area must be big enough to + * contain some maintainance blocks. It must not overlap parts of the current + * heap areas. Disconnected subordinate heap areas will lead to used blocks + * which cover the gaps. Extending with an inappropriate memory area will + * corrupt the heap. + * + * The unused fourth parameter is provided to have the same signature as + * _Heap_Initialize(). + * + * Returns the extended space available for allocation, or zero in case of failure. + * + * @see Heap_Initialization_or_extend_handler. + */ +uintptr_t _Heap_Extend( + Heap_Control *heap, + void *area_begin, + uintptr_t area_size, + uintptr_t unused +); + +/** + * @brief This function returns always zero. + * + * This function only returns zero and does nothing else. + * + * Returns always zero. + * + * @see Heap_Initialization_or_extend_handler. + */ +uintptr_t _Heap_No_extend( + Heap_Control *unused_0, + void *unused_1, + uintptr_t unused_2, + uintptr_t unused_3 +); + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up( + uintptr_t value, + uintptr_t alignment +) +{ + uintptr_t remainder = value % alignment; + + if ( remainder != 0 ) { + return value - remainder + alignment; + } else { + return value; + } +} + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size ) +{ + return _Heap_Align_up( sizeof( Heap_Block ), page_size ); +} + +/** + * @brief Returns the worst case overhead to manage a memory area. + */ +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead( + uintptr_t page_size +) +{ + if ( page_size != 0 ) { + page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT ); + } else { + page_size = CPU_ALIGNMENT; + } + + return 2 * (page_size - 1) + HEAP_BLOCK_HEADER_SIZE; +} + +/** + * @brief Returns the size with administration and alignment overhead for one + * allocation. + */ +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Size_with_overhead( + uintptr_t page_size, + uintptr_t size, + uintptr_t alignment +) +{ + if ( page_size != 0 ) { + page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT ); + } else { + page_size = CPU_ALIGNMENT; + } + + if ( page_size < alignment ) { + page_size = alignment; + } + + return HEAP_BLOCK_HEADER_SIZE + page_size - 1 + size; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/heapimpl.h b/include/rtems/score/heapimpl.h new file mode 100644 index 0000000000..a8948edd6f --- /dev/null +++ b/include/rtems/score/heapimpl.h @@ -0,0 +1,601 @@ +/** + * @file + * + * @ingroup ScoreHeap + * + * @brief Heap Handler Implementation + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_HEAPIMPL_H +#define _RTEMS_SCORE_HEAPIMPL_H + +#include <rtems/score/heap.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreHeap + */ +/**@{**/ + +/** + * @brief See also @ref Heap_Block.size_and_flag. + */ +#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1) + +/** + * @brief Size of the part at the block begin which may be used for allocation + * in charge of the previous block. + */ +#define HEAP_ALLOC_BONUS sizeof(uintptr_t) + +/** + * @brief See _Heap_Resize_block(). + */ +typedef enum { + HEAP_RESIZE_SUCCESSFUL, + HEAP_RESIZE_UNSATISFIED, + HEAP_RESIZE_FATAL_ERROR +} Heap_Resize_status; + +/** + * @brief Gets the first and last block for the heap area with begin + * @a heap_area_begin and size @a heap_area_size. + * + * A page size of @a page_size and minimal block size of @a min_block_size will + * be used for calculation. + * + * Nothing will be written to this area. + * + * In case of success the pointers to the first and last block will be returned + * via @a first_block_ptr and @a last_block_ptr. + * + * Returns @c true if the area is big enough, and @c false otherwise. + */ +bool _Heap_Get_first_and_last_block( + uintptr_t heap_area_begin, + uintptr_t heap_area_size, + uintptr_t page_size, + uintptr_t min_block_size, + Heap_Block **first_block_ptr, + Heap_Block **last_block_ptr +); + +/** + * @brief Initializes the heap control block @a heap to manage the area + * starting at @a area_begin of size @a area_size bytes. + * + * Blocks of memory are allocated from the heap in multiples of @a page_size + * byte units. If the @a page_size is equal to zero or is not multiple of + * @c CPU_ALIGNMENT, it is aligned up to the nearest @c CPU_ALIGNMENT boundary. + * + * Returns the maximum memory available, or zero in case of failure. + * + * @see Heap_Initialization_or_extend_handler. + */ +uintptr_t _Heap_Initialize( + Heap_Control *heap, + void *area_begin, + uintptr_t area_size, + uintptr_t page_size +); + +/** + * @brief Allocates a memory area of size @a size bytes from the heap @a heap. + * + * If the alignment parameter @a alignment is not equal to zero, the allocated + * memory area will begin at an address aligned by this value. + * + * If the boundary parameter @a boundary is not equal to zero, the allocated + * memory area will fulfill a boundary constraint. The boundary value + * specifies the set of addresses which are aligned by the boundary value. The + * interior of the allocated memory area will not contain an element of this + * set. The begin or end address of the area may be a member of the set. + * + * A size value of zero will return a unique address which may be freed with + * _Heap_Free(). + * + * Returns a pointer to the begin of the allocated memory area, or @c NULL if + * no memory is available or the parameters are inconsistent. + */ +void *_Heap_Allocate_aligned_with_boundary( + Heap_Control *heap, + uintptr_t size, + uintptr_t alignment, + uintptr_t boundary +); + +/** + * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero. + */ +RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned( + Heap_Control *heap, + uintptr_t size, + uintptr_t alignment +) +{ + return _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 ); +} + +/** + * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and + * boundary equals zero. + */ +RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size ) +{ + return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 ); +} + +/** + * @brief Frees the allocated memory area starting at @a addr in the heap + * @a heap. + * + * Inappropriate values for @a addr may corrupt the heap. + * + * Returns @c true in case of success, and @c false otherwise. + */ +bool _Heap_Free( Heap_Control *heap, void *addr ); + +/** + * @brief Walks the heap @a heap to verify its integrity. + * + * If @a dump is @c true, then diagnostic messages will be printed to standard + * output. In this case @a source is used to mark the output lines. + * + * Returns @c true if no errors occurred, and @c false if the heap is corrupt. + */ +bool _Heap_Walk( + Heap_Control *heap, + int source, + bool dump +); + +/** + * @brief Heap block visitor. + * + * @see _Heap_Iterate(). + * + * @retval true Stop the iteration. + * @retval false Continue the iteration. + */ +typedef bool (*Heap_Block_visitor)( + const Heap_Block *block, + uintptr_t block_size, + bool block_is_used, + void *visitor_arg +); + +/** + * @brief Iterates over all blocks of the heap. + * + * For each block the @a visitor with the argument @a visitor_arg will be + * called. + */ +void _Heap_Iterate( + Heap_Control *heap, + Heap_Block_visitor visitor, + void *visitor_arg +); + +/** + * @brief Greedy allocate that empties the heap. + * + * Afterwards the heap has at most @a block_count allocatable blocks of sizes + * specified by @a block_sizes. The @a block_sizes must point to an array with + * @a block_count members. All other blocks are used. + * + * @see _Heap_Greedy_free(). + */ +Heap_Block *_Heap_Greedy_allocate( + Heap_Control *heap, + const uintptr_t *block_sizes, + size_t block_count +); + +/** + * @brief Greedy allocate all blocks except the largest free block. + * + * Afterwards the heap has at most one allocatable block. This block is the + * largest free block if it exists. The allocatable size of this block is + * stored in @a allocatable_size. All other blocks are used. + * + * @see _Heap_Greedy_free(). + */ +Heap_Block *_Heap_Greedy_allocate_all_except_largest( + Heap_Control *heap, + uintptr_t *allocatable_size +); + +/** + * @brief Frees blocks of a greedy allocation. + * + * The @a blocks must be the return value of _Heap_Greedy_allocate(). + */ +void _Heap_Greedy_free( + Heap_Control *heap, + Heap_Block *blocks +); + +/** + * @brief Returns information about used and free blocks for the heap @a heap + * in @a info. + */ +void _Heap_Get_information( + Heap_Control *heap, + Heap_Information_block *info +); + +/** + * @brief Returns information about free blocks for the heap @a heap in + * @a info. + */ +void _Heap_Get_free_information( + Heap_Control *heap, + Heap_Information *info +); + +/** + * @brief Returns the size of the allocatable memory area starting at @a addr + * in @a size. + * + * The size value may be greater than the initially requested size in + * _Heap_Allocate_aligned_with_boundary(). + * + * Inappropriate values for @a addr will not corrupt the heap, but may yield + * invalid size values. + * + * Returns @a true if successful, and @c false otherwise. + */ +bool _Heap_Size_of_alloc_area( + Heap_Control *heap, + void *addr, + uintptr_t *size +); + +/** + * @brief Resizes the block of the allocated memory area starting at @a addr. + * + * The new memory area will have a size of at least @a size bytes. A resize + * may be impossible and depends on the current heap usage. + * + * The size available for allocation in the current block before the resize + * will be returned in @a old_size. The size available for allocation in + * the resized block will be returned in @a new_size. If the resize was not + * successful, then a value of zero will be returned in @a new_size. + * + * Inappropriate values for @a addr may corrupt the heap. + */ +Heap_Resize_status _Heap_Resize_block( + Heap_Control *heap, + void *addr, + uintptr_t size, + uintptr_t *old_size, + uintptr_t *new_size +); + +/** + * @brief Allocates the memory area starting at @a alloc_begin of size + * @a alloc_size bytes in the block @a block. + * + * The block may be split up into multiple blocks. The previous and next block + * may be used or free. Free block parts which form a vaild new block will be + * inserted into the free list or merged with an adjacent free block. If the + * block is used, they will be inserted after the free list head. If the block + * is free, they will be inserted after the previous block in the free list. + * + * Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the + * heap. + * + * Returns the block containing the allocated memory area. + */ +Heap_Block *_Heap_Block_allocate( + Heap_Control *heap, + Heap_Block *block, + uintptr_t alloc_begin, + uintptr_t alloc_size +); + +#ifndef HEAP_PROTECTION + #define _Heap_Protection_block_initialize( heap, block ) ((void) 0) + #define _Heap_Protection_block_check( heap, block ) ((void) 0) + #define _Heap_Protection_block_error( heap, block ) ((void) 0) + #define _Heap_Protection_free_all_delayed_blocks( heap ) ((void) 0) +#else + static inline void _Heap_Protection_block_initialize( + Heap_Control *heap, + Heap_Block *block + ) + { + (*heap->Protection.block_initialize)( heap, block ); + } + + static inline void _Heap_Protection_block_check( + Heap_Control *heap, + Heap_Block *block + ) + { + (*heap->Protection.block_check)( heap, block ); + } + + static inline void _Heap_Protection_block_error( + Heap_Control *heap, + Heap_Block *block + ) + { + (*heap->Protection.block_error)( heap, block ); + } + + static inline void _Heap_Protection_free_all_delayed_blocks( Heap_Control *heap ) + { + uintptr_t large = 0 + - (uintptr_t) HEAP_BLOCK_HEADER_SIZE + - (uintptr_t) HEAP_ALLOC_BONUS + - (uintptr_t) 1; + void *p = _Heap_Allocate( heap, large ); + _Heap_Free( heap, p ); + } +#endif + +/** + * @brief Sets the fraction of delayed free blocks that is actually freed + * during memory shortage. + * + * The default is to free half the delayed free blocks. This is equal to a + * fraction value of two. + * + * @param[in] heap The heap control. + * @param[in] fraction The fraction is one divided by this fraction value. + */ +RTEMS_INLINE_ROUTINE void _Heap_Protection_set_delayed_free_fraction( + Heap_Control *heap, + uintptr_t fraction +) +{ +#ifdef HEAP_PROTECTION + heap->Protection.delayed_free_fraction = fraction; +#else + (void) heap; + (void) fraction; +#endif +} + +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap ) +{ + return &heap->free_list; +} + +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap ) +{ + return &heap->free_list; +} + +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap ) +{ + return _Heap_Free_list_head(heap)->next; +} + +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap ) +{ + return _Heap_Free_list_tail(heap)->prev; +} + +RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block ) +{ + Heap_Block *next = block->next; + Heap_Block *prev = block->prev; + + prev->next = next; + next->prev = prev; +} + +RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace( + Heap_Block *old_block, + Heap_Block *new_block +) +{ + Heap_Block *next = old_block->next; + Heap_Block *prev = old_block->prev; + + new_block->next = next; + new_block->prev = prev; + + next->prev = new_block; + prev->next = new_block; +} + +RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after( + Heap_Block *block_before, + Heap_Block *new_block +) +{ + Heap_Block *next = block_before->next; + + new_block->next = next; + new_block->prev = block_before; + block_before->next = new_block; + next->prev = new_block; +} + +RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_before( + Heap_Block *block_next, + Heap_Block *new_block +) +{ + Heap_Block *prev = block_next->prev; + + new_block->next = block_next; + new_block->prev = prev; + prev->next = new_block; + block_next->prev = new_block; +} + +RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned( + uintptr_t value, + uintptr_t alignment +) +{ + return (value % alignment) == 0; +} + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down( + uintptr_t value, + uintptr_t alignment +) +{ + return value - (value % alignment); +} + +/** + * @brief Returns the block which is @a offset away from @a block. + */ +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at( + const Heap_Block *block, + uintptr_t offset +) +{ + return (Heap_Block *) ((uintptr_t) block + offset); +} + +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block( + const Heap_Block *block +) +{ + return (Heap_Block *) ((uintptr_t) block - block->prev_size); +} + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block( + const Heap_Block *block +) +{ + return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE; +} + +RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area( + uintptr_t alloc_begin, + uintptr_t page_size +) +{ + return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size ) + - HEAP_BLOCK_HEADER_SIZE); +} + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block ) +{ + return block->size_and_flag & ~HEAP_PREV_BLOCK_USED; +} + +RTEMS_INLINE_ROUTINE void _Heap_Block_set_size( + Heap_Block *block, + uintptr_t size +) +{ + uintptr_t flag = block->size_and_flag & HEAP_PREV_BLOCK_USED; + + block->size_and_flag = size | flag; +} + +RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block ) +{ + return block->size_and_flag & HEAP_PREV_BLOCK_USED; +} + +RTEMS_INLINE_ROUTINE bool _Heap_Is_used( + const Heap_Block *block +) +{ + const Heap_Block *const next_block = + _Heap_Block_at( block, _Heap_Block_size( block ) ); + + return _Heap_Is_prev_used( next_block ); +} + +RTEMS_INLINE_ROUTINE bool _Heap_Is_free( + const Heap_Block *block +) +{ + return !_Heap_Is_used( block ); +} + +RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap( + const Heap_Control *heap, + const Heap_Block *block +) +{ + return (uintptr_t) block >= (uintptr_t) heap->first_block + && (uintptr_t) block <= (uintptr_t) heap->last_block; +} + +/** + * @brief Sets the size of the last block for heap @a heap. + * + * The next block of the last block will be the first block. Since the first + * block indicates that the previous block is used, this ensures that the last + * block appears as used for the _Heap_Is_used() and _Heap_Is_free() + * functions. + * + * This feature will be used to terminate the scattered heap area list. See + * also _Heap_Extend(). + */ +RTEMS_INLINE_ROUTINE void _Heap_Set_last_block_size( Heap_Control *heap ) +{ + _Heap_Block_set_size( + heap->last_block, + (uintptr_t) heap->first_block - (uintptr_t) heap->last_block + ); +} + +/** + * @brief Returns the size of the allocatable area in bytes. + * + * This value is an integral multiple of the page size. + */ +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap ) +{ + return heap->stats.size; +} + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b ) +{ + return a > b ? a : b; +} + +RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min( uintptr_t a, uintptr_t b ) +{ + return a < b ? a : b; +} + +#ifdef RTEMS_DEBUG + #define RTEMS_HEAP_DEBUG +#endif + +#ifdef RTEMS_HEAP_DEBUG + #include <assert.h> + #define _HAssert( cond ) \ + do { \ + if ( !(cond) ) { \ + __assert( __FILE__, __LINE__, #cond ); \ + } \ + } while (0) +#else + #define _HAssert( cond ) ((void) 0) +#endif + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/i386.h b/include/rtems/score/i386.h new file mode 100644 index 0000000000..875526ad62 --- /dev/null +++ b/include/rtems/score/i386.h @@ -0,0 +1,261 @@ +/** + * @file + * + * @brief Intel I386 CPU Dependent Source + * + * This include file contains information pertaining to the Intel + * i386 processor. + */ + +/* + * COPYRIGHT (c) 1989-2013. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_I386_H +#define _RTEMS_SCORE_I386_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This section contains the information required to build + * RTEMS for a particular member of the Intel i386 + * family when executing in protected mode. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + * + * Currently recognized: + * i386_fp (i386 DX or SX w/i387) + * i486dx + * pentium + * pentiumpro + * + * CPU Model Feature Flags: + * + * I386_HAS_BSWAP: Defined to "1" if the instruction for endian swapping + * (bswap) should be used. This instruction appears to + * be present in all i486's and above. + * + * I386_HAS_FPU: Defined to "1" if the CPU has an FPU. + * As of at least gcc 4.7, i386 soft-float was obsoleted. + * Thus this is always set to "1". + */ +#define I386_HAS_FPU 1 + +#if defined(__pentiumpro__) + + #define CPU_MODEL_NAME "Pentium Pro" + +#elif defined(__i586__) + + #if defined(__pentium__) + #define CPU_MODEL_NAME "Pentium" + #elif defined(__k6__) + #define CPU_MODEL_NAME "K6" + #else + #define CPU_MODEL_NAME "i586" + #endif + +#elif defined(__i486__) + + #define CPU_MODEL_NAME "i486dx" + +#elif defined(__i386__) + + #define I386_HAS_BSWAP 0 + #define CPU_MODEL_NAME "i386 with i387" + +#else + #error "Unknown CPU Model" +#endif + +/* + * Set default values for CPU model feature flags + * + * NOTE: These settings are chosen to reflect most of the family members. + */ +#ifndef I386_HAS_BSWAP +#define I386_HAS_BSWAP 1 +#endif + +/* + * Define the name of the CPU family. + */ +#define CPU_NAME "Intel i386" + +#ifndef ASM + +/* + * The following routine swaps the endian format of an unsigned int. + * It must be static so it can be referenced indirectly. + */ + +static inline uint32_t i386_swap_u32( + uint32_t value +) +{ + uint32_t lout; + +#if (I386_HAS_BSWAP == 0) + __asm__ volatile( "rorw $8,%%ax;" + "rorl $16,%0;" + "rorw $8,%%ax" : "=a" (lout) : "0" (value) ); +#else + __asm__ volatile( "bswap %0" : "=r" (lout) : "0" (value)); +#endif + return( lout ); +} + +static inline uint16_t i386_swap_u16( + uint16_t value +) +{ + unsigned short sout; + + __asm__ volatile( "rorw $8,%0" : "=r" (sout) : "0" (value)); + return (sout); +} + +/* + * Added for pagination management + */ +static inline unsigned int i386_get_cr0(void) +{ + register unsigned int segment = 0; + + __asm__ volatile ( "movl %%cr0,%0" : "=r" (segment) : "0" (segment) ); + + return segment; +} + +static inline void i386_set_cr0(unsigned int segment) +{ + __asm__ volatile ( "movl %0,%%cr0" : "=r" (segment) : "0" (segment) ); +} + +static inline unsigned int i386_get_cr2(void) +{ + register unsigned int segment = 0; + + __asm__ volatile ( "movl %%cr2,%0" : "=r" (segment) : "0" (segment) ); + + return segment; +} + +static inline unsigned int i386_get_cr3(void) +{ + register unsigned int segment = 0; + + __asm__ volatile ( "movl %%cr3,%0" : "=r" (segment) : "0" (segment) ); + + return segment; +} + +static inline void i386_set_cr3(unsigned int segment) +{ + __asm__ volatile ( "movl %0,%%cr3" : "=r" (segment) : "0" (segment) ); +} + +/* routines */ + +/* + * i386_Logical_to_physical + * + * Converts logical address to physical address. + */ +void *i386_Logical_to_physical( + unsigned short segment, + void *address +); + +/* + * i386_Physical_to_logical + * + * Converts physical address to logical address. + */ +void *i386_Physical_to_logical( + unsigned short segment, + void *address +); + +/** + * @brief Converts real mode pointer {segment, offset} to physical address. + * + * i386_Real_to_physical + * + * @param[in] segment used with \p offset to compute physical address + * @param[in] offset used with \p segment to compute physical address + * @retval physical address + */ +RTEMS_INLINE_ROUTINE void *i386_Real_to_physical( + uint16_t segment, + uint16_t offset) +{ + return (void *)(((uint32_t)segment<<4)+offset); +} + +/** + * @brief Retreives real mode pointer elements {segmnet, offset} from + * physical address. + * + * i386_Physical_to_real + * Function returns the highest segment (base) address possible. + * Example: input address - 0x4B3A2 + * output segment - 0x4B3A + * offset - 0x2 + * input address - 0x10F12E + * output segment - 0xFFFF + * offset - 0xF13E + * + * @param[in] address address to be converted, must be less than 0x10FFEF + * @param[out] segment segment computed from \p address + * @param[out] offset offset computed from \p address + * @retval 0 address not convertible + * @retval 1 segment and offset extracted + */ +int i386_Physical_to_real( + void *address, + uint16_t *segment, + uint16_t *offset +); + +/* + * "Simpler" names for a lot of the things defined in this file + */ + +/* segment access routines */ + +#define get_cs() i386_get_cs() +#define get_ds() i386_get_ds() +#define get_es() i386_get_es() +#define get_ss() i386_get_ss() +#define get_fs() i386_get_fs() +#define get_gs() i386_get_gs() + +#define CPU_swap_u32( _value ) i386_swap_u32( _value ) +#define CPU_swap_u16( _value ) i386_swap_u16( _value ) + +/* i80x86 I/O instructions */ + +#define outport_byte( _port, _value ) i386_outport_byte( _port, _value ) +#define outport_word( _port, _value ) i386_outport_word( _port, _value ) +#define outport_long( _port, _value ) i386_outport_long( _port, _value ) +#define inport_byte( _port, _value ) i386_inport_byte( _port, _value ) +#define inport_word( _port, _value ) i386_inport_word( _port, _value ) +#define inport_long( _port, _value ) i386_inport_long( _port, _value ) + + +#ifdef __cplusplus +} +#endif + +#endif /* !ASM */ + +#endif diff --git a/include/rtems/score/idtr.h b/include/rtems/score/idtr.h new file mode 100644 index 0000000000..a79af40792 --- /dev/null +++ b/include/rtems/score/idtr.h @@ -0,0 +1,66 @@ +/** + * @file + * + * @brief Intel I386 Data Structures + * + * This file contains definitions for data structure related + * to Intel system programming. More information can be found + * on Intel site and more precisely in the following book : + * + * Pentium Processor familly + * Developper's Manual + * + * Volume 3 : Architecture and Programming Manual + * + * Formerly contained in and extracted from libcpu/i386/cpu.h. + */ + +/* + * COPYRIGHT (C) 1998 Eric Valette (valette@crf.canon.fr) + * Canon Centre Recherche France. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Applications must not include this file directly. + */ + +#ifndef _RTEMS_SCORE_IDTR_H +#define _RTEMS_SCORE_IDTR_H + +/* + * See page 14.9 Figure 14-2. + * + */ +typedef struct +{ + unsigned int low_offsets_bits:16; + unsigned int segment_selector:16; + unsigned int fixed_value_bits:8; + unsigned int gate_type:5; + unsigned int privilege:2; + unsigned int present:1; + unsigned int high_offsets_bits:16; +} interrupt_gate_descriptor; + +/* + * C callable function enabling to create a interrupt_gate_descriptor + */ +extern void create_interrupt_gate_descriptor (interrupt_gate_descriptor*, rtems_raw_irq_hdl); + +/* + * C callable function enabling to get easily usable info from + * the actual value of IDT register. + */ +extern void i386_get_info_from_IDTR (interrupt_gate_descriptor** table, + unsigned* limit); + +/* + * C callable function enabling to change the value of IDT register. Must be called + * with interrupts masked at processor level!!!. + */ +extern void i386_set_IDTR (interrupt_gate_descriptor* table, + unsigned limit); + +#endif diff --git a/include/rtems/score/interr.h b/include/rtems/score/interr.h new file mode 100644 index 0000000000..f09d6e90a5 --- /dev/null +++ b/include/rtems/score/interr.h @@ -0,0 +1,240 @@ +/** + * @file rtems/score/interr.h + * + * @brief Constants and Prototypes Related to the Internal Error Handler + * + * This include file contains constants and prototypes related + * to the Internal Error Handler. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_INTERR_H +#define _RTEMS_SCORE_INTERR_H + +#include <stdbool.h> +#include <stdint.h> + +#include <rtems/system.h> + +/** + * @defgroup ScoreIntErr Internal Error Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * Semaphore services used in all of the APIs supported by RTEMS. + */ +/**@{*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief This type lists the possible sources from which an error + * can be reported. + */ +typedef enum { + /** + * @brief Errors of the core system. + * + * @see Internal_errors_Core_list. + */ + INTERNAL_ERROR_CORE, + + /** + * @brief Errors of the RTEMS API. + */ + INTERNAL_ERROR_RTEMS_API, + + /** + * @brief Errors of the POSIX API. + */ + INTERNAL_ERROR_POSIX_API, + + /** + * @brief Fatal source for the block device cache. + * + * @see rtems_bdbuf_fatal_code. + */ + RTEMS_FATAL_SOURCE_BDBUF, + + /** + * @brief Fatal source for application specific errors. + * + * The fatal code is application specific. + */ + RTEMS_FATAL_SOURCE_APPLICATION, + + /** + * @brief Fatal source of exit(). + * + * The fatal code is the exit() status code. + */ + RTEMS_FATAL_SOURCE_EXIT, + + /** + * @brief Fatal source for BSP errors. + * + * The fatal codes are defined in <bsp/fatal.h>. Examples are interrupt and + * exception initialization. + * + * @see bsp_fatal_code and bsp_fatal(). + */ + RTEMS_FATAL_SOURCE_BSP, + + /** + * @brief Fatal source of assert(). + * + * The fatal code is the pointer value of the assert context. + * + * @see rtems_assert_context. + */ + RTEMS_FATAL_SOURCE_ASSERT, + + /** + * @brief Fatal source of the stack checker. + * + * The fatal code is the object name of the executing task. + */ + RTEMS_FATAL_SOURCE_STACK_CHECKER, + + /** + * @brief Fatal source of the exceptions. + * + * The fatal code is the pointer value of the exception frame pointer. + * + * @see rtems_exception_frame and rtems_exception_frame_print(). + */ + RTEMS_FATAL_SOURCE_EXCEPTION, + + /** + * @brief Fatal source of SMP domain. + * + * @see SMP_Fatal_code. + */ + RTEMS_FATAL_SOURCE_SMP, + + /** + * @brief The last available fatal source. + * + * This enum value ensures that the enum type needs at least 32-bits for + * architectures with short enums. + */ + RTEMS_FATAL_SOURCE_LAST = 0xffffffff +} Internal_errors_Source; + +/** + * A list of errors which are generated internally by the executive core. + */ +typedef enum { + INTERNAL_ERROR_NO_CONFIGURATION_TABLE, + INTERNAL_ERROR_NO_CPU_TABLE, + INTERNAL_ERROR_TOO_LITTLE_WORKSPACE, + INTERNAL_ERROR_WORKSPACE_ALLOCATION, + INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL, + INTERNAL_ERROR_THREAD_EXITTED, + INTERNAL_ERROR_INCONSISTENT_MP_INFORMATION, + INTERNAL_ERROR_INVALID_NODE, + INTERNAL_ERROR_NO_MPCI, + INTERNAL_ERROR_BAD_PACKET, + INTERNAL_ERROR_OUT_OF_PACKETS, + INTERNAL_ERROR_OUT_OF_GLOBAL_OBJECTS, + INTERNAL_ERROR_OUT_OF_PROXIES, + INTERNAL_ERROR_INVALID_GLOBAL_ID, + INTERNAL_ERROR_BAD_STACK_HOOK, + INTERNAL_ERROR_BAD_ATTRIBUTES, + INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY, + OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL, + INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE, + INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0, + OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP, + INTERNAL_ERROR_GXX_KEY_ADD_FAILED, + INTERNAL_ERROR_GXX_MUTEX_INIT_FAILED, + INTERNAL_ERROR_NO_MEMORY_FOR_HEAP, + INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR, + INTERNAL_ERROR_RESOURCE_IN_USE +} Internal_errors_Core_list; + +typedef CPU_Uint32ptr Internal_errors_t; + +/** + * This type holds the fatal error information. + */ +typedef struct { + /** This is the source of the error. */ + Internal_errors_Source the_source; + /** This indicates if the error is internal of external. */ + bool is_internal; + /** This is the error code. */ + Internal_errors_t the_error; +} Internal_errors_Information; + +/** + * When a fatal error occurs, the error information is stored here. + */ +extern Internal_errors_Information _Internal_errors_What_happened; + +/** + * @brief Initiates system termination. + * + * This routine is invoked when the application or the executive itself + * determines that a fatal error has occurred or a final system state is + * reached (for example after exit()). + * + * The first action is to disable interrupts. + * + * The second action of this function is to call the fatal handler of the user + * extensions. For the initial extensions the following conditions are + * required + * - a valid stack pointer and enough stack space, + * - a valid code memory, and + * - valid read-only data. + * + * For the initial extensions the read-write data (including BSS segment) is + * not required on single processor configurations. On SMP configurations + * however the read-write data must be initialized since this function must + * determine the state of the other processors and request them to shut-down if + * necessary. + * + * Non-initial extensions require in addition valid read-write data. The BSP + * may install an initial extension that performs a system reset. In this case + * the non-initial extensions will be not called. + * + * Once all fatal handler executed the error information will be stored to + * _Internal_errors_What_happened and the system state is set to + * SYSTEM_STATE_TERMINATED. + * + * The final step is to call the CPU specific _CPU_Fatal_halt(). + * + * @param[in] the_source The fatal source indicating the subsystem the fatal + * condition originated in. + * @param[in] is_internal Indicates if the fatal condition was generated + * internally to the executive. + * @param[in] the_error The fatal error code. This value must be interpreted + * with respect to the source. + * + * @see rtems_fatal_error_occurred() and rtems_fatal(). + */ +void _Terminate( + Internal_errors_Source the_source, + bool is_internal, + Internal_errors_t the_error +) RTEMS_NO_RETURN; + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/interrupts.h b/include/rtems/score/interrupts.h new file mode 100644 index 0000000000..5ae172a4f2 --- /dev/null +++ b/include/rtems/score/interrupts.h @@ -0,0 +1,81 @@ +/** + * @file + * + * @brief Intel I386 Interrupt Macros + * + * Formerly contained in and extracted from libcpu/i386/cpu.h + */ + +/* + * COPYRIGHT (c) 1998 valette@crf.canon.fr + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Applications must not include this file directly. + */ + +#ifndef _RTEMS_SCORE_INTERRUPTS_H +#define _RTEMS_SCORE_INTERRUPTS_H + +#ifndef ASM + +struct __rtems_raw_irq_connect_data__; + +typedef void (*rtems_raw_irq_hdl) (void); +typedef void (*rtems_raw_irq_enable) (const struct __rtems_raw_irq_connect_data__*); +typedef void (*rtems_raw_irq_disable) (const struct __rtems_raw_irq_connect_data__*); +typedef int (*rtems_raw_irq_is_enabled) (const struct __rtems_raw_irq_connect_data__*); + +/** + * @name Interrupt Level Macros + * + */ +/**@{**/ + +#define i386_disable_interrupts( _level ) \ + { \ + __asm__ volatile ( "pushf ; \ + cli ; \ + pop %0" \ + : "=rm" ((_level)) \ + ); \ + } + +#define i386_enable_interrupts( _level ) \ + { \ + __asm__ volatile ( "push %0 ; \ + popf" \ + : : "rm" ((_level)) : "cc" \ + ); \ + } + +#define i386_flash_interrupts( _level ) \ + { \ + __asm__ volatile ( "push %0 ; \ + popf ; \ + cli" \ + : : "rm" ((_level)) : "cc" \ + ); \ + } + +#define i386_get_interrupt_level( _level ) \ + do { \ + register uint32_t _eflags; \ + \ + __asm__ volatile ( "pushf ; \ + pop %0" \ + : "=rm" ((_eflags)) \ + ); \ + \ + _level = (_eflags & EFLAGS_INTR_ENABLE) ? 0 : 1; \ + } while (0) + +#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level ) +#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level ) + +/** @} */ + +#endif +#endif diff --git a/include/rtems/score/iosh7032.h b/include/rtems/score/iosh7032.h new file mode 100644 index 0000000000..3750024a64 --- /dev/null +++ b/include/rtems/score/iosh7032.h @@ -0,0 +1,220 @@ +/* + * This include file contains information pertaining to the Hitachi SH + * processor. + * + * NOTE: NOT ALL VALUES HAVE BEEN CHECKED !! + * + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * Based on "iosh7030.h" distributed with Hitachi's EVB's tutorials, which + * contained no copyright notice. + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef __IOSH7030_H +#define __IOSH7030_H + +/* + * After each line is explained whether the access is char short or long. + * The functions read/writeb, w, l, 8, 16, 32 can be found + * in exec/score/cpu/sh/sh_io.h + * + * 8 bit == char ( readb, writeb, read8, write8) + * 16 bit == short ( readw, writew, read16, write16 ) + * 32 bit == long ( readl, writel, read32, write32 ) + */ + +#define SCI0_SMR 0x05fffec0 /* char */ +#define SCI0_BRR 0x05fffec1 /* char */ +#define SCI0_SCR 0x05fffec2 /* char */ +#define SCI0_TDR 0x05fffec3 /* char */ +#define SCI0_SSR 0x05fffec4 /* char */ +#define SCI0_RDR 0x05fffec5 /* char */ + +#define SCI1_SMR 0x05fffec8 /* char */ +#define SCI1_BRR 0x05fffec9 /* char */ +#define SCI1_SCR 0x05fffeca /* char */ +#define SCI1_TDR 0x05fffecb /* char */ +#define SCI1_SSR 0x05fffecc /* char */ +#define SCI1_RDR 0x05fffecd /* char */ + + +#define ADDRAH 0x05fffee0 /* char */ +#define ADDRAL 0x05fffee1 /* char */ +#define ADDRBH 0x05fffee2 /* char */ +#define ADDRBL 0x05fffee3 /* char */ +#define ADDRCH 0x05fffee4 /* char */ +#define ADDRCL 0x05fffee5 /* char */ +#define ADDRDH 0x05fffee6 /* char */ +#define ADDRDL 0x05fffee7 /* char */ +#define AD_DRA 0x05fffee0 /* short */ +#define AD_DRB 0x05fffee2 /* short */ +#define AD_DRC 0x05fffee4 /* short */ +#define AD_DRD 0x05fffee6 /* short */ +#define ADCSR 0x05fffee8 /* char */ +#define ADCR 0x05fffee9 /* char */ + +/*ITU SHARED*/ +#define ITU_TSTR 0x05ffff00 /* char */ +#define ITU_TSNC 0x05ffff01 /* char */ +#define ITU_TMDR 0x05ffff02 /* char */ +#define ITU_TFCR 0x05ffff03 /* char */ + +/*ITU CHANNEL 0*/ +#define ITU_TCR0 0x05ffff04 /* char */ +#define ITU_TIOR0 0x05ffff05 /* char */ +#define ITU_TIER0 0x05ffff06 /* char */ +#define ITU_TSR0 0x05ffff07 /* char */ +#define ITU_TCNT0 0x05ffff08 /* short */ +#define ITU_GRA0 0x05ffff0a /* short */ +#define ITU_GRB0 0x05ffff0c /* short */ + + /*ITU CHANNEL 1*/ +#define ITU_TCR1 0x05ffff0E /* char */ +#define ITU_TIOR1 0x05ffff0F /* char */ +#define ITU_TIER1 0x05ffff10 /* char */ +#define ITU_TSR1 0x05ffff11 /* char */ +#define ITU_TCNT1 0x05ffff12 /* short */ +#define ITU_GRA1 0x05ffff14 /* short */ +#define ITU_GRB1 0x05ffff16 /* short */ + + + /*ITU CHANNEL 2*/ +#define ITU_TCR2 0x05ffff18 /* char */ +#define ITU_TIOR2 0x05ffff19 /* char */ +#define ITU_TIER2 0x05ffff1A /* char */ +#define ITU_TSR2 0x05ffff1B /* char */ +#define ITU_TCNT2 0x05ffff1C /* short */ +#define ITU_GRA2 0x05ffff1E /* short */ +#define ITU_GRB2 0x05ffff20 /* short */ + + /*ITU CHANNEL 3*/ +#define ITU_TCR3 0x05ffff22 /* char */ +#define ITU_TIOR3 0x05ffff23 /* char */ +#define ITU_TIER3 0x05ffff24 /* char */ +#define ITU_TSR3 0x05ffff25 /* char */ +#define ITU_TCNT3 0x05ffff26 /* short */ +#define ITU_GRA3 0x05ffff28 /* short */ +#define ITU_GRB3 0x05ffff2A /* short */ +#define ITU_BRA3 0x05ffff2C /* short */ +#define ITU_BRB3 0x05ffff2E /* short */ + + /*ITU CHANNELS 0-4 SHARED*/ +#define ITU_TOCR 0x05ffff31 /* char */ + + /*ITU CHANNEL 4*/ +#define ITU_TCR4 0x05ffff32 /* char */ +#define ITU_TIOR4 0x05ffff33 /* char */ +#define ITU_TIER4 0x05ffff34 /* char */ +#define ITU_TSR4 0x05ffff35 /* char */ +#define ITU_TCNT4 0x05ffff36 /* short */ +#define ITU_GRA4 0x05ffff38 /* short */ +#define ITU_GRB4 0x05ffff3A /* short */ +#define ITU_BRA4 0x05ffff3C /* short */ +#define ITU_BRB4 0x05ffff3E /* short */ + + /*DMAC CHANNELS 0-3 SHARED*/ +#define DMAOR 0x05ffff48 /* short */ + + /*DMAC CHANNEL 0*/ +#define DMA_SAR0 0x05ffff40 /* long */ +#define DMA_DAR0 0x05ffff44 /* long */ +#define DMA_TCR0 0x05ffff4a /* short */ +#define DMA_CHCR0 0x05ffff4e /* short */ + + /*DMAC CHANNEL 1*/ +#define DMA_SAR1 0x05ffff50 /* long */ +#define DMA_DAR1 0x05ffff54 /* long */ +#define DMA_TCR1 0x05fffF5a /* short */ +#define DMA_CHCR1 0x05ffff5e /* short */ + + /*DMAC CHANNEL 3*/ +#define DMA_SAR3 0x05ffff60 /* long */ +#define DMA_DAR3 0x05ffff64 /* long */ +#define DMA_TCR3 0x05fffF6a /* short */ +#define DMA_CHCR3 0x05ffff6e /* short */ + +/*DMAC CHANNEL 4*/ +#define DMA_SAR4 0x05ffff70 /* long */ +#define DMA_DAR4 0x05ffff74 /* long */ +#define DMA_TCR4 0x05fffF7a /* short */ +#define DMA_CHCR4 0x05ffff7e /* short */ + +/*INTC*/ +#define INTC_IPRA 0x05ffff84 /* short */ +#define INTC_IPRB 0x05ffff86 /* short */ +#define INTC_IPRC 0x05ffff88 /* short */ +#define INTC_IPRD 0x05ffff8A /* short */ +#define INTC_IPRE 0x05ffff8C /* short */ +#define INTC_ICR 0x05ffff8E /* short */ + +/*UBC*/ +#define UBC_BARH 0x05ffff90 /* short */ +#define UBC_BARL 0x05ffff92 /* short */ +#define UBC_BAMRH 0x05ffff94 /* short */ +#define UBC_BAMRL 0x05ffff96 /* short */ +#define UBC_BBR 0x05ffff98 /* short */ + +/*BSC*/ +#define BSC_BCR 0x05ffffA0 /* short */ +#define BSC_WCR1 0x05ffffA2 /* short */ +#define BSC_WCR2 0x05ffffA4 /* short */ +#define BSC_WCR3 0x05ffffA6 /* short */ +#define BSC_DCR 0x05ffffA8 /* short */ +#define BSC_PCR 0x05ffffAA /* short */ +#define BSC_RCR 0x05ffffAC /* short */ +#define BSC_RTCSR 0x05ffffAE /* short */ +#define BSC_RTCNT 0x05ffffB0 /* short */ +#define BSC_RTCOR 0x05ffffB2 /* short */ + +/*WDT*/ +#define WDT_TCSR 0x05ffffB8 /* char */ +#define WDT_TCNT 0x05ffffB9 /* char */ +#define WDT_RSTCSR 0x05ffffBB /* char */ + +/*POWER DOWN STATE*/ +#define PDT_SBYCR 0x05ffffBC /* char */ + +/*PORT A*/ +#define PADR 0x05ffffC0 /* short */ + +/*PORT B*/ +#define PBDR 0x05ffffC2 /* short */ + + /*PORT C*/ +#define PCDR 0x05ffffD0 /* short */ + +/*PFC*/ +#define PFC_PAIOR 0x05ffffC4 /* short */ +#define PFC_PBIOR 0x05ffffC6 /* short */ +#define PFC_PACR1 0x05ffffC8 /* short */ +#define PFC_PACR2 0x05ffffCA /* short */ +#define PFC_PBCR1 0x05ffffCC /* short */ +#define PFC_PBCR2 0x05ffffCE /* short */ +#define PFC_CASCR 0x05ffffEE /* short */ + +/*TPC*/ +#define TPC_TPMR 0x05ffffF0 /* short */ +#define TPC_TPCR 0x05ffffF1 /* short */ +#define TPC_NDERH 0x05ffffF2 /* short */ +#define TPC_NDERL 0x05ffffF3 /* short */ +#define TPC_NDRB 0x05ffffF4 /* char */ +#define TPC_NDRA 0x05ffffF5 /* char */ +#define TPC_NDRB1 0x05ffffF6 /* char */ +#define TPC_NDRA1 0x05ffffF7 /* char */ + +#endif diff --git a/include/rtems/score/iosh7045.h b/include/rtems/score/iosh7045.h new file mode 100644 index 0000000000..db3252b72d --- /dev/null +++ b/include/rtems/score/iosh7045.h @@ -0,0 +1,322 @@ +/* + * This include file contains information pertaining to the Hitachi SH + * processor. + * + * NOTE: NOT ALL VALUES HAVE BEEN CHECKED !! + * + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * Based on "iosh7030.h" distributed with Hitachi's EVB's tutorials, which + * contained no copyright notice. + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Modified to reflect on-chip registers for sh7045 processor, based on + * "Register.h" distributed with Hitachi's EVB7045F tutorials, and which + * contained no copyright notice: + * John M. Mills (jmills@tga.com) + * TGA Technologies, Inc. + * 100 Pinnacle Way, Suite 140 + * Norcross, GA 30071 U.S.A. + * August, 1999 + * + * This modified file may be copied and distributed in accordance + * the above-referenced license. It is provided for critique and + * developmental purposes without any warranty nor representation + * by the authors or by TGA Technologies. + */ + +#ifndef __IOSH7045_H +#define __IOSH7045_H + +/* + * After each line is explained whether the access is char short or long. + * The functions read/writeb, w, l, 8, 16, 32 can be found + * in exec/score/cpu/sh/sh_io.h + * + * 8 bit == char ( readb, writeb, read8, write8) + * 16 bit == short ( readw, writew, read16, write16 ) + * 32 bit == long ( readl, writel, read32, write32 ) + * JMM: Addresses noted "[char, ]short,word" are per Hitachi _SuperH_RISC_ + * ENGINE_..Hardware_Manual; alignment access-restrictions may apply + */ + +#define REG_BASE 0xFFFF8000 + +/* SCI0 Registers */ +#define SCI_SMR0 (REG_BASE + 0x01a0) /*char: Serial mode ch 0 */ +#define SCI_BRR0 (REG_BASE + 0x01a1) /*char: Bit rate ch 0 */ +#define SCI_SCR0 (REG_BASE + 0x01a2) /*char: Serial control ch 0 */ +#define SCI_TDR0 (REG_BASE + 0x01a3) /*char: Transmit data ch 0 */ +#define SCI_SSR0 (REG_BASE + 0x01a4) /*char: Serial status ch 0 */ +#define SCI_RDR0 (REG_BASE + 0x01a5) /*char: Receive data ch 0 */ + +#define SCI0_SMR SCI_SMR0 + +/* SCI1 Registers */ +#define SCI_SMR1 (REG_BASE + 0x01b0) /* char: Serial mode ch 1 */ +#define SCI_BRR1 (REG_BASE + 0x01b1) /* char: Bit rate ch 1 */ +#define SCI_SCR1 (REG_BASE + 0x01b2) /* char: Serial control ch 1 */ +#define SCI_TDR1 (REG_BASE + 0x01b3) /* char: Transmit data ch 1 */ +#define SCI_SSR1 (REG_BASE + 0x01b4) /* char: Serial status ch 1 */ +#define SCI_RDR1 (REG_BASE + 0x01b5) /* char: Receive data ch 1 */ + +#define SCI1_SMR SCI_SMR1 + +/* ADI */ +/* High Speed A/D (Excluding A-Mask Part)*/ +#define ADDRA (REG_BASE + 0x03F0) /* short */ +#define ADDRB (REG_BASE + 0x03F2) /* short */ +#define ADDRC (REG_BASE + 0x03F4) /* short */ +#define ADDRD (REG_BASE + 0x03F6) /* short */ +#define ADDRE (REG_BASE + 0x03F8) /* short */ +#define ADDRF (REG_BASE + 0x03FA) /* short */ +#define ADDRG (REG_BASE + 0x03FC) /* short */ +#define ADDRH (REG_BASE + 0x03FE) /* short */ +#define ADCSR (REG_BASE + 0x03E0) /* char */ +#define ADCR (REG_BASE + 0x03E1) /* char */ + +/* Mid-Speed A/D (A-Mask part)*/ +#define ADDRA0 (REG_BASE + 0x0400) /* char, short */ +#define ADDRA0H (REG_BASE + 0x0400) /* char, short */ +#define ADDRA0L (REG_BASE + 0x0401) /* char */ +#define ADDRB0 (REG_BASE + 0x0402) /* char, short */ +#define ADDRB0H (REG_BASE + 0x0402) /* char, short */ +#define ADDRB0L (REG_BASE + 0x0403) /* char */ +#define ADDRC0 (REG_BASE + 0x0404) /* char, short */ +#define ADDRC0H (REG_BASE + 0x0404) /* char, short */ +#define ADDRC0L (REG_BASE + 0x0405) /* char */ +#define ADDRD0 (REG_BASE + 0x0406) /* char, short */ +#define ADDRD0H (REG_BASE + 0x0406) /* char, short */ +#define ADDRD0L (REG_BASE + 0x0407) /* char */ +#define ADCSR0 (REG_BASE + 0x0410) /* char */ +#define ADCR0 (REG_BASE + 0x0412) /* char */ +#define ADDRA1 (REG_BASE + 0x0408) /* char, short */ +#define ADDRA1H (REG_BASE + 0x0408) /* char, short */ +#define ADDRA1L (REG_BASE + 0x0409) /* char */ +#define ADDRB1 (REG_BASE + 0x040A) /* char, short */ +#define ADDRB1H (REG_BASE + 0x040A) /* char, short */ +#define ADDRB1L (REG_BASE + 0x040B) /* char */ +#define ADDRC1 (REG_BASE + 0x040C) /* char, short */ +#define ADDRC1H (REG_BASE + 0x040C) /* char, short */ +#define ADDRC1L (REG_BASE + 0x040D) /* char */ +#define ADDRD1 (REG_BASE + 0x040E) /* char, short */ +#define ADDRD1H (REG_BASE + 0x040E) /* char, short */ +#define ADDRD1L (REG_BASE + 0x040F) /* char */ +#define ADCSR1 (REG_BASE + 0x0411) /* char */ +#define ADCR1 (REG_BASE + 0x0413) /* char */ + +/*MTU SHARED*/ +#define MTU_TSTR (REG_BASE + 0x0240) /* char, short, word */ +#define MTU_TSYR (REG_BASE + 0x0241) /* char, short, word */ +#define MTU_ICSR (REG_BASE + 0x03C0) /* input lev. CSR */ +#define MTU_OCSR (REG_BASE + 0x03C0) /* output lev. CSR */ + +/*MTU CHANNEL 0*/ +#define MTU_TCR0 (REG_BASE + 0x0260) /* char, short, word */ +#define MTU_TMDR0 (REG_BASE + 0x0261) /* char, short, word */ +#define MTU_TIORH0 (REG_BASE + 0x0262) /* char, short, word */ +#define MTU_TIORL0 (REG_BASE + 0x0263) /* char, short, word */ +#define MTU_TIER0 (REG_BASE + 0x0264) /* char, short, word */ +#define MTU_TSR0 (REG_BASE + 0x0265) /* char, short, word */ +#define MTU_TCNT0 (REG_BASE + 0x0266) /* short, word */ +#define MTU_GR0A (REG_BASE + 0x0268) /* short, word */ +#define MTU_GR0B (REG_BASE + 0x026A) /* short, word */ +#define MTU_GR0C (REG_BASE + 0x026C) /* short, word */ +#define MTU_GR0D (REG_BASE + 0x026E) /* short, word */ + +/*MTU CHANNEL 1*/ +#define MTU_TCR1 (REG_BASE + 0x0280) /* char, short, word */ +#define MTU_TMDR1 (REG_BASE + 0x0281) /* char, short, word */ +#define MTU_TIOR1 (REG_BASE + 0x0282) /* char, short, word */ +#define MTU_TIER1 (REG_BASE + 0x0284) /* char, short, word */ +#define MTU_TSR1 (REG_BASE + 0x0285) /* char, short, word */ +#define MTU_TCNT1 (REG_BASE + 0x0286) /* short, word */ +#define MTU_GR1A (REG_BASE + 0x0288) /* short, word */ +#define MTU_GR1B (REG_BASE + 0x028A) /* short, word */ + +/*MTU CHANNEL 2*/ +#define MTU_TCR2 (REG_BASE + 0x02A0) /* char, short, word */ +#define MTU_TMDR2 (REG_BASE + 0x02A1) /* char, short, word */ +#define MTU_TIOR2 (REG_BASE + 0x02A2) /* char, short, word */ +#define MTU_TIER2 (REG_BASE + 0x02A4) /* char, short, word */ +#define MTU_TSR2 (REG_BASE + 0x02A5) /* char, short, word */ +#define MTU_TCNT2 (REG_BASE + 0x02A6) /* short, word */ +#define MTU_GR2A (REG_BASE + 0x02A8) /* short, word */ +#define MTU_GR2B (REG_BASE + 0x02AA) /* short, word */ + +/*MTU CHANNELS 3-4 SHARED*/ +#define MTU_TOER (REG_BASE + 0x020A) /* char, short, word */ +#define MTU_TOCR (REG_BASE + 0x020B) /* char, short, word */ +#define MTU_TGCR (REG_BASE + 0x020D) /* char, short, word */ +#define MTU_TCDR (REG_BASE + 0x0214) /* short, word */ +#define MTU_TDDR (REG_BASE + 0x0216) /* short, word */ +#define MTU_TCNTS (REG_BASE + 0x0220) /* short, word */ +#define MTU_TCBR (REG_BASE + 0x0222) /* short, word */ + +/*MTU CHANNEL 3*/ +#define MTU_TCR3 (REG_BASE + 0x0200) /* char, short, word */ +#define MTU_TMDR3 (REG_BASE + 0x0202) /* char, short, word */ +#define MTU_TIORH3 (REG_BASE + 0x0204) /* char, short, word */ +#define MTU_TIORL3 (REG_BASE + 0x0205) /* char, short, word */ +#define MTU_TIER3 (REG_BASE + 0x0208) /* char, short, word */ +#define MTU_TSR3 (REG_BASE + 0x022C) /* char, short, word */ +#define MTU_TCNT3 (REG_BASE + 0x0210) /* short, word */ +#define MTU_GR3A (REG_BASE + 0x0218) /* short, word */ +#define MTU_GR3B (REG_BASE + 0x021A) /* short, word */ +#define MTU_GR3C (REG_BASE + 0x0224) /* short, word */ +#define MTU_GR3D (REG_BASE + 0x0226) /* short, word */ + +/*MTU CHANNEL 4*/ +#define MTU_TCR4 (REG_BASE + 0x0201) /* char, short, word */ +#define MTU_TMDR4 (REG_BASE + 0x0203) /* char, short, word */ +#define MTU_TIOR4 (REG_BASE + 0x0206) /* char, short, word */ +#define MTU_TIORH4 (REG_BASE + 0x0206) /* char, short, word */ +#define MTU_TIORL4 (REG_BASE + 0x0207) /* char, short, word */ +#define MTU_TIER4 (REG_BASE + 0x0209) /* char, short, word */ +#define MTU_TSR4 (REG_BASE + 0x022D) /* char, short, word */ +#define MTU_TCNT4 (REG_BASE + 0x0212) /* short, word */ +#define MTU_GR4A (REG_BASE + 0x021C) /* short, word */ +#define MTU_GR4B (REG_BASE + 0x021E) /* short, word */ +#define MTU_GR4C (REG_BASE + 0x0228) /* short, word */ +#define MTU_GR4D (REG_BASE + 0x022A) /* short, word */ + +/*DMAC CHANNELS 0-3 SHARED*/ +#define DMAOR (REG_BASE + 0x06B0) /* short */ + +/*DMAC CHANNEL 0*/ +#define DMA_SAR0 (REG_BASE + 0x06C0) /* short, word */ +#define DMA_DAR0 (REG_BASE + 0x06C4) /* short, word */ +#define DMA_DMATCR0 (REG_BASE + 0x06C8) /* short, word */ +#define DMA_CHCR0 (REG_BASE + 0x06CC) /* short, word */ + +/*DMAC CHANNEL 1*/ +#define DMA_SAR1 (REG_BASE + 0x06D0) /* short, word */ +#define DMA_DAR1 (REG_BASE + 0x06D4) /* short, word */ +#define DMA_DMATCR1 (REG_BASE + 0x06D8) /* short, wordt */ +#define DMA_CHCR1 (REG_BASE + 0x06DC) /* short, word */ + +/*DMAC CHANNEL 3*/ +#define DMA_SAR3 (REG_BASE + 0x06E0) /* short, word */ +#define DMA_DAR3 (REG_BASE + 0x06E4) /* short, word */ +#define DMA_DMATCR3 (REG_BASE + 0x06E8) /* short, word */ +#define DMA_CHCR3 (REG_BASE + 0x06EC) /* short, word */ + +/*DMAC CHANNEL 4*/ +#define DMA_SAR4 (REG_BASE + 0x06F0) /* short, word */ +#define DMA_DAR4 (REG_BASE + 0x06F4) /* short, word */ +#define DMA_DMATCR4 (REG_BASE + 0x06F8) /* short, word */ +#define DMA_CHCR4 (REG_BASE + 0x06FC) /* short, word */ + +/*Data Transfer Controller*/ +#define DTC_DTEA (REG_BASE + 0x0700) /* char, short, word */ +#define DTC_DTEB (REG_BASE + 0x0701) /* char, short(?), word(?) */ +#define DTC_DTEC (REG_BASE + 0x0702) /* char, short(?), word(?) */ +#define DTC_DTED (REG_BASE + 0x0703) /* char, short(?), word(?) */ +#define DTC_DTEE (REG_BASE + 0x0704) /* char, short(?), word(?) */ +#define DTC_DTCSR (REG_BASE + 0x0706) /* char, short, word */ +#define DTC_DTBR (REG_BASE + 0x0708) /* short, word */ + +/*Cache Memory*/ +#define CAC_CCR (REG_BASE + 0x0740) /* char, short, word */ + +/*INTC*/ +#define INTC_IPRA (REG_BASE + 0x0348) /* char, short, word */ +#define INTC_IPRB (REG_BASE + 0x034A) /* char, short, word */ +#define INTC_IPRC (REG_BASE + 0x034C) /* char, short, word */ +#define INTC_IPRD (REG_BASE + 0x034E) /* char, short, word */ +#define INTC_IPRE (REG_BASE + 0x0350) /* char, short, word */ +#define INTC_IPRF (REG_BASE + 0x0352) /* char, short, word */ +#define INTC_IPRG (REG_BASE + 0x0354) /* char, short, word */ +#define INTC_IPRH (REG_BASE + 0x0356) /* char, short, word */ +#define INTC_ICR (REG_BASE + 0x0358) /* char, short, word */ +#define INTC_ISR (REG_BASE + 0x035A) /* char, short, word */ + +/*Flash (F-ZTAT)*/ +#define FL_FLMCR1 (REG_BASE + 0x0580) /* Fl.Mem.Contr.Reg 1: char */ +#define FL_FLMCR2 (REG_BASE + 0x0581) /* Fl.Mem.Contr.Reg 2: char */ +#define FL_EBR1 (REG_BASE + 0x0582) /* Fl.Mem.Erase Blk.1: char */ +#define FL_EBR2 (REG_BASE + 0x0584) /* Fl.Mem.Erase Blk.2: char */ +#define FL_RAMER (REG_BASE + 0x0628) /* Ram Emul.Reg.- char,short,word */ + +/*UBC*/ +#define UBC_BARH (REG_BASE + 0x0600) /* char, short, word */ +#define UBC_BARL (REG_BASE + 0x0602) /* char, short, word */ +#define UBC_BAMRH (REG_BASE + 0x0604) /* char, short, word */ +#define UBC_BAMRL (REG_BASE + 0x0606) /* char, short, word */ +#define UBC_BBR (REG_BASE + 0x0608) /* char, short, word */ +/*BSC*/ +#define BSC_BCR1 (REG_BASE + 0x0620) /* short */ +#define BSC_BCR2 (REG_BASE + 0x0622) /* short */ +#define BSC_WCR1 (REG_BASE + 0x0624) /* short */ +#define BSC_WCR2 (REG_BASE + 0x0626) /* short */ +#define BSC_DCR (REG_BASE + 0x062A) /* short */ +#define BSC_RTCSR (REG_BASE + 0x062C) /* short */ +#define BSC_RTCNT (REG_BASE + 0x062E) /* short */ +#define BSC_RTCOR (REG_BASE + 0x0630) /* short */ + +/*WDT*/ +#define WDT_R_TCSR (REG_BASE + 0x0610) /* rd: char */ +#define WDT_R_TCNT (REG_BASE + 0x0611) /* rd: char */ +#define WDT_R_RSTCSR (REG_BASE + 0x0613) /* rd: char */ +#define WDT_W_TCSR (REG_BASE + 0x0610) /* wrt: short */ +#define WDT_W_TCNT (REG_BASE + 0x0610) /* wrt: short */ +#define WDT_W_RSTCSR (REG_BASE + 0x0612) /* wrt: short */ + +/*POWER DOWN STATE*/ +#define PDT_SBYCR (REG_BASE + 0x0614) /* char */ + +/* Port I/O Control Registers */ +#define IO_PADRH (REG_BASE + 0x0380) /* Port A Data Register */ +#define IO_PADRL (REG_BASE + 0x0382) /* Port A Data Register */ +#define IO_PBDR (REG_BASE + 0x0390) /* Port B Data Register */ +#define IO_PCDR (REG_BASE + 0x0392) /* Port C Data Register */ +#define IO_PDDRH (REG_BASE + 0x03A0) /* Port D Data Register */ +#define IO_PDDRL (REG_BASE + 0x03A2) /* Port D Data Register */ +#define IO_PEDR (REG_BASE + 0x03B0) /* Port E Data Register */ +#define IO_PFDR (REG_BASE + 0x03B2) /* Port F Data Register */ + +/*Pin Function Control Register*/ +#define PFC_PAIORH (REG_BASE + 0x0384) /* Port A I/O Reg. H */ +#define PFC_PAIORL (REG_BASE + 0x0386) /* Port A I/O Reg. L */ +#define PFC_PACRH (REG_BASE + 0x0388) /* Port A Ctr. Reg. H */ +#define PFC_PACRL1 (REG_BASE + 0x038C) /* Port A Ctr. Reg. L1 */ +#define PFC_PACRL2 (REG_BASE + 0x038E) /* Port A Ctr. Reg. L2 */ +#define PFC_PBIOR (REG_BASE + 0x0394) /* Port B I/O Register */ +#define PFC_PBCR1 (REG_BASE + 0x0398) /* Port B Ctr. Reg. R1 */ +#define PFC_PBCR2 (REG_BASE + 0x039A) /* Port B Ctr. Reg. R2 */ +#define PFC_PCIOR (REG_BASE + 0x0396) /* Port C I/O Register */ +#define PFC_PCCR (REG_BASE + 0x039C) /* Port C Ctr. Reg. */ +#define PFC_PDIORH (REG_BASE + 0x03A4) /* Port D I/O Reg. H */ +#define PFC_PDIORL (REG_BASE + 0x03A6) /* Port D I/O Reg. L */ +#define PFC_PDCRH1 (REG_BASE + 0x03A8) /* Port D Ctr. Reg. H1 */ +#define PFC_PDCRH2 (REG_BASE + 0x03AA) /* Port D Ctr. Reg. H2 */ +#define PFC_PDCRL (REG_BASE + 0x03AC) /* Port D Ctr. Reg. L */ +#define PFC_PEIOR (REG_BASE + 0x03B4) /* Port E I/O Register */ +#define PFC_PECR1 (REG_BASE + 0x03B8) /* Port E Ctr. Reg. 1 */ +#define PFC_PECR2 (REG_BASE + 0x03BA) /* Port E Ctr. Reg. 2 */ +#define PFC_IFCR (REG_BASE + 0x03C8) /* short */ + +/*Compare/Match Timer*/ +#define CMT_CMSTR (REG_BASE + 0x3D0) /* Start Reg. char, short, word */ +#define CMT_CMCSR0 (REG_BASE + 0x3D2) /* C0 SCR short, word */ +#define CMT_CMCNT0 (REG_BASE + 0x3D4) /* C0 Counter char, short, word */ +#define CMT_CMCOR0 (REG_BASE + 0x3D6) /* C0 Const.Reg. char, short, word */ +#define CMT_CMCSR1 (REG_BASE + 0x3D8) /* C1 SCR short, word */ +#define CMT_CMCNT1 (REG_BASE + 0x3DA) /* C1 Counter char, short, word */ +#define CMT_CMCOR1 (REG_BASE + 0x3DC) /* C1 Const.Reg. char, short, word */ + +#endif diff --git a/include/rtems/score/iosh7750.h b/include/rtems/score/iosh7750.h new file mode 100644 index 0000000000..c5c532dbc2 --- /dev/null +++ b/include/rtems/score/iosh7750.h @@ -0,0 +1,47 @@ +/* + * This include file contains information pertaining to the Hitachi SH + * processor. + * + * NOTE: NOT ALL VALUES HAVE BEEN CHECKED !! + * + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * Based on "iosh7030.h" distributed with Hitachi's EVB's tutorials, which + * contained no copyright notice. + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Modified to reflect on-chip registers for sh7045 processor, based on + * "Register.h" distributed with Hitachi's EVB7045F tutorials, and which + * contained no copyright notice: + * John M. Mills (jmills@tga.com) + * TGA Technologies, Inc. + * 100 Pinnacle Way, Suite 140 + * Norcross, GA 30071 U.S.A. + * August, 1999 + * + * This modified file may be copied and distributed in accordance + * the above-referenced license. It is provided for critique and + * developmental purposes without any warranty nor representation + * by the authors or by TGA Technologies. + */ + +#ifndef __IOSH7750_H +#define __IOSH7750_H + +#include <rtems/score/sh7750_regs.h> + +#endif diff --git a/include/rtems/score/ipl.h b/include/rtems/score/ipl.h new file mode 100644 index 0000000000..9ce2d87e0e --- /dev/null +++ b/include/rtems/score/ipl.h @@ -0,0 +1,73 @@ +/* ipl.h + * + * IPL console driver + * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia + * Author: Victor V. Vengerov <vvv@oktet.ru> + * + * Based on work: + * Author: Ralf Corsepius (corsepiu@faw.uni-ulm.de) + * + * COPYRIGHT (c) 1989-1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef __IPL_DRIVER_h +#define __IPL_DRIVER_h + +#ifdef __cplusplus +extern "C" { +#endif + +#define IPL_DRIVER_TABLE_ENTRY \ + { ipl_console_initialize, ipl_console_open, ipl_console_close, \ + ipl_console_read, ipl_console_write, ipl_console_control } + + +#define NULL_SUCCESSFUL RTEMS_SUCCESSFUL + +rtems_device_driver ipl_console_initialize( + rtems_device_major_number, + rtems_device_minor_number, + void * +); + +rtems_device_driver ipl_console_open( + rtems_device_major_number, + rtems_device_minor_number, + void * +); + +rtems_device_driver ipl_console_close( + rtems_device_major_number, + rtems_device_minor_number, + void * +); + +rtems_device_driver ipl_console_read( + rtems_device_major_number, + rtems_device_minor_number, + void * +); + +rtems_device_driver ipl_console_write( + rtems_device_major_number, + rtems_device_minor_number, + void * +); + +rtems_device_driver ipl_console_control( + rtems_device_major_number, + rtems_device_minor_number, + void * +); + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/ispsh7032.h b/include/rtems/score/ispsh7032.h new file mode 100644 index 0000000000..9c7cee8bd6 --- /dev/null +++ b/include/rtems/score/ispsh7032.h @@ -0,0 +1,162 @@ +/* + * This include file contains information pertaining to the Hitachi SH + * processor. + * + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef __CPU_ISPS_H +#define __CPU_ISPS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rtems/score/types.h> + +extern void __ISR_Handler( uint32_t vector ); + + +/* + * interrupt vector table offsets + */ +#define NMI_ISP_V 11 +#define USB_ISP_V 12 +#define IRQ0_ISP_V 64 +#define IRQ1_ISP_V 65 +#define IRQ2_ISP_V 66 +#define IRQ3_ISP_V 67 +#define IRQ4_ISP_V 68 +#define IRQ5_ISP_V 69 +#define IRQ6_ISP_V 70 +#define IRQ7_ISP_V 71 +#define DMA0_ISP_V 72 +#define DMA1_ISP_V 74 +#define DMA2_ISP_V 76 +#define DMA3_ISP_V 78 + +#define IMIA0_ISP_V 80 +#define IMIB0_ISP_V 81 +#define OVI0_ISP_V 82 + +#define IMIA1_ISP_V 84 +#define IMIB1_ISP_V 85 +#define OVI1_ISP_V 86 + +#define IMIA2_ISP_V 88 +#define IMIB2_ISP_V 89 +#define OVI2_ISP_V 90 + +#define IMIA3_ISP_V 92 +#define IMIB3_ISP_V 93 +#define OVI3_ISP_V 94 + +#define IMIA4_ISP_V 96 +#define IMIB4_ISP_V 97 +#define OVI4_ISP_V 98 + +#define ERI0_ISP_V 100 +#define RXI0_ISP_V 101 +#define TXI0_ISP_V 102 +#define TEI0_ISP_V 103 + +#define ERI1_ISP_V 104 +#define RXI1_ISP_V 105 +#define TXI1_ISP_V 106 +#define TEI1_ISP_V 107 + +#define PRT_ISP_V 108 +#define ADU_ISP_V 109 +#define WDT_ISP_V 112 +#define DREF_ISP_V 113 + + +/* dummy ISP */ +extern void _dummy_isp( void ); + +/* Non Maskable Interrupt */ +extern void _nmi_isp( void ); + +/* User Break Controller */ +extern void _usb_isp( void ); + +/* External interrupts 0-7 */ +extern void _irq0_isp( void ); +extern void _irq1_isp( void ); +extern void _irq2_isp( void ); +extern void _irq3_isp( void ); +extern void _irq4_isp( void ); +extern void _irq5_isp( void ); +extern void _irq6_isp( void ); +extern void _irq7_isp( void ); + +/* DMA - Controller */ +extern void _dma0_isp( void ); +extern void _dma1_isp( void ); +extern void _dma2_isp( void ); +extern void _dma3_isp( void ); + +/* Interrupt Timer Unit */ +/* Timer 0 */ +extern void _imia0_isp( void ); +extern void _imib0_isp( void ); +extern void _ovi0_isp( void ); +/* Timer 1 */ +extern void _imia1_isp( void ); +extern void _imib1_isp( void ); +extern void _ovi1_isp( void ); +/* Timer 2 */ +extern void _imia2_isp( void ); +extern void _imib2_isp( void ); +extern void _ovi2_isp( void ); +/* Timer 3 */ +extern void _imia3_isp( void ); +extern void _imib3_isp( void ); +extern void _ovi3_isp( void ); +/* Timer 4 */ +extern void _imia4_isp( void ); +extern void _imib4_isp( void ); +extern void _ovi4_isp( void ); + +/* seriell interfaces */ +extern void _eri0_isp( void ); +extern void _rxi0_isp( void ); +extern void _txi0_isp( void ); +extern void _tei0_isp( void ); +extern void _eri1_isp( void ); +extern void _rxi1_isp( void ); +extern void _txi1_isp( void ); +extern void _tei1_isp( void ); + +/* Parity Control Unit of the Bus State Controllers */ +extern void _prt_isp( void ); + +/* ADC */ +extern void _adu_isp( void ); + +/* Watchdog Timer */ +extern void _wdt_isp( void ); + +/* DRAM refresh control unit of bus state controller */ +extern void _dref_isp( void ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/rtems/score/ispsh7045.h b/include/rtems/score/ispsh7045.h new file mode 100644 index 0000000000..fb9f5297ae --- /dev/null +++ b/include/rtems/score/ispsh7045.h @@ -0,0 +1,208 @@ +/* + * This include file contains information pertaining to the Hitachi SH + * processor. + * + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Modified to reflect isp entries for sh7045 processor: + * John M. Mills (jmills@tga.com) + * TGA Technologies, Inc. + * 100 Pinnacle Way, Suite 140 + * Norcross, GA 30071 U.S.A. + * + * + * This modified file may be copied and distributed in accordance + * the above-referenced license. It is provided for critique and + * developmental purposes without any warranty nor representation + * by the authors or by TGA Technologies. + */ + +#ifndef __CPU_ISPS_H +#define __CPU_ISPS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rtems/score/types.h> + +extern void __ISR_Handler( uint32_t vector ); + + +/* + * interrupt vector table offsets + */ +#define NMI_ISP_V 11 +#define USB_ISP_V 12 +#define IRQ0_ISP_V 64 +#define IRQ1_ISP_V 65 +#define IRQ2_ISP_V 66 +#define IRQ3_ISP_V 67 +#define IRQ4_ISP_V 68 +#define IRQ5_ISP_V 69 +#define IRQ6_ISP_V 70 +#define IRQ7_ISP_V 71 +#define DMA0_ISP_V 72 +#define DMA1_ISP_V 76 +#define DMA2_ISP_V 80 +#define DMA3_ISP_V 84 + +#define MTUA0_ISP_V 88 +#define MTUB0_ISP_V 89 +#define MTUC0_ISP_V 90 +#define MTUD0_ISP_V 91 +#define MTUV0_ISP_V 92 + +#define MTUA1_ISP_V 96 +#define MTUB1_ISP_V 97 +#define MTUV1_ISP_V 100 +#define MTUU1_ISP_V 101 + +#define MTUA2_ISP_V 104 +#define MTUB2_ISP_V 105 +#define MTUV2_ISP_V 108 +#define MTUU2_ISP_V 109 + +#define MTUA3_ISP_V 112 +#define MTUB3_ISP_V 113 +#define MTUC3_ISP_V 114 +#define MTUD3_ISP_V 115 +#define MTUV3_ISP_V 116 + +#define MTUA4_ISP_V 120 +#define MTUB4_ISP_V 121 +#define MTUC4_ISP_V 122 +#define MTUD4_ISP_V 123 +#define MTUV4_ISP_V 124 + +#define ERI0_ISP_V 128 +#define RXI0_ISP_V 129 +#define TXI0_ISP_V 130 +#define TEI0_ISP_V 131 + +#define ERI1_ISP_V 132 +#define RXI1_ISP_V 133 +#define TXI1_ISP_V 134 +#define TEI1_ISP_V 135 + +#define ADI0_ISP_V 136 +#define ADI1_ISP_V 137 +#define DTC_ISP_V 140 /* Data Transfer Controller */ +#define CMT0_ISP_V 144 /* Compare Match Timer */ +#define CMT1_ISP_V 148 +#define WDT_ISP_V 152 /* Wtachdog Timer */ +#define CMI_ISP_V 153 /* BSC RAS interrupt */ +#define OEI_ISP_V 156 /* I/O Port */ +#define DREF_ISP_V CMI_ISP_V /* DRAM Refresh from BSC */ +#if 0 +#define PRT_ISP_V /* parity error - no equivalent */ +#endif + +/* dummy ISP */ +extern void _dummy_isp( void ); + +/* Non Maskable Interrupt */ +extern void _nmi_isp( void ); + +/* User Break Controller */ +extern void _usb_isp( void ); + +/* External interrupts 0-7 */ +extern void _irq0_isp( void ); +extern void _irq1_isp( void ); +extern void _irq2_isp( void ); +extern void _irq3_isp( void ); +extern void _irq4_isp( void ); +extern void _irq5_isp( void ); +extern void _irq6_isp( void ); +extern void _irq7_isp( void ); + +/* DMA - Controller */ +extern void _dma0_isp( void ); +extern void _dma1_isp( void ); +extern void _dma2_isp( void ); +extern void _dma3_isp( void ); + +/* Interrupt Timer Unit */ +/* Timer 0 */ +extern void _mtua0_isp( void ); +extern void _mtub0_isp( void ); +extern void _mtuc0_isp( void ); +extern void _mtud0_isp( void ); +extern void _mtuv0_isp( void ); +/* Timer 1 */ +extern void _mtua1_isp( void ); +extern void _mtub1_isp( void ); +extern void _mtuv1_isp( void ); +extern void _mtuu1_isp( void ); +/* Timer 2 */ +extern void _mtua2_isp( void ); +extern void _mtub2_isp( void ); +extern void _mtuv2_isp( void ); +extern void _mtuu2_isp( void ); +/* Timer 3 */ +extern void _mtua3_isp( void ); +extern void _mtub3_isp( void ); +extern void _mtuc3_isp( void ); +extern void _mtud3_isp( void ); +extern void _mtuv3_isp( void ); +/* Timer 4 */ +extern void _mtua4_isp( void ); +extern void _mtub4_isp( void ); +extern void _mtuc4_isp( void ); +extern void _mtud4_isp( void ); +extern void _mtuv4_isp( void ); + +/* serial interfaces */ +extern void _eri0_isp( void ); +extern void _rxi0_isp( void ); +extern void _txi0_isp( void ); +extern void _tei0_isp( void ); +extern void _eri1_isp( void ); +extern void _rxi1_isp( void ); +extern void _txi1_isp( void ); +extern void _tei1_isp( void ); + +/* ADC */ +extern void _adi0_isp( void ); +extern void _adi1_isp( void ); + +/* Data Transfer Controller */ +extern void _dtci_isp( void ); + +/* Compare Match Timer */ +extern void _cmt0_isp( void ); +extern void _cmt1_isp( void ); + +/* Watchdog Timer */ +extern void _wdt_isp( void ); + +/* DRAM refresh control unit of bus state controller */ +extern void _bsc_isp( void ); + +/* I/O Port */ +extern void _oei_isp( void ); + +/* Parity Control Unit of the Bus State Controllers */ +/* extern void _prt_isp( void ); */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/rtems/score/ispsh7750.h b/include/rtems/score/ispsh7750.h new file mode 100644 index 0000000000..396644a241 --- /dev/null +++ b/include/rtems/score/ispsh7750.h @@ -0,0 +1,62 @@ +/* + * This include file contains information pertaining to the Hitachi + * SH7750 processor. + * + * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia + * Author: Victor V. Vengerov <vvv@oktet.ru> + * + * Based on work of: + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Modified to reflect isp entries for sh7045 processor: + * John M. Mills (jmills@tga.com) + * TGA Technologies, Inc. + * 100 Pinnacle Way, Suite 140 + * Norcross, GA 30071 U.S.A. + * + * + * This modified file may be copied and distributed in accordance + * the above-referenced license. It is provided for critique and + * developmental purposes without any warranty nor representation + * by the authors or by TGA Technologies. + */ + +#ifndef __CPU_ISPS_H +#define __CPU_ISPS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rtems/score/types.h> + +/* dummy ISP */ +extern void _dummy_isp( void ); + +extern void __ISR_Handler( uint32_t vector ); + +/* This variable contains VBR value used to pass control when debug, error + * or virtual memory exceptions occured. + */ +extern void *_VBR_Saved; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/rtems/score/isr.h b/include/rtems/score/isr.h new file mode 100644 index 0000000000..f76cbcbda8 --- /dev/null +++ b/include/rtems/score/isr.h @@ -0,0 +1,155 @@ +/** + * @file rtems/score/isr.h + * + * @brief Data Related to the Management of Processor Interrupt Levels + * + * This include file contains all the constants and structures associated + * with the management of processor interrupt levels. This handler + * supports interrupt critical sections, vectoring of user interrupt + * handlers, nesting of interrupts, and manipulating interrupt levels. + */ + +/* + * COPYRIGHT (c) 1989-2012. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ISR_H +#define _RTEMS_SCORE_ISR_H + +#include <rtems/score/isrlevel.h> + +/** + * @defgroup ScoreISR ISR Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which provides the foundation + * ISR services used in all of the APIs supported by RTEMS. + * + * The ISR Nest level counter variable is maintained as part of the + * per cpu data structure. + */ +/**@{*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * The following type defines the type used to manage the vectors. + */ +typedef uint32_t ISR_Vector_number; + +/** + * Return type for ISR Handler + */ +typedef void ISR_Handler; + +#if (CPU_SIMPLE_VECTORED_INTERRUPTS == FALSE) + +typedef void * ISR_Handler_entry; + +#else +/** + * Pointer to an ISR Handler + */ +#if (CPU_ISR_PASSES_FRAME_POINTER == 1) +typedef ISR_Handler ( *ISR_Handler_entry )( + ISR_Vector_number, + CPU_Interrupt_frame * + ); +#else +typedef ISR_Handler ( *ISR_Handler_entry )( + ISR_Vector_number + ); +#endif + +/** + * The following declares the Vector Table. Application + * interrupt service routines are vectored by the ISR Handler via this table. + */ +extern ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS ]; +#endif + +/** + * @brief Initialize the ISR handler. + * + * This routine performs the initialization necessary for the ISR handler. + */ +void _ISR_Handler_initialization ( void ); + +/** + * @brief Install interrupt handler vector. + * + * This routine installs new_handler as the interrupt service routine + * for the specified vector. The previous interrupt service routine is + * returned as old_handler. + * + * LM32 Specific Information: + * XXX document implementation including references if appropriate + * + * @param[in] _vector is the vector number + * @param[in] _new_handler is ISR handler to install + * @param[in] _old_handler is a pointer to a variable which will be set + * to the old handler + * + * @retval *_old_handler will be set to the old ISR handler + */ +#define _ISR_Install_vector( _vector, _new_handler, _old_handler ) \ + _CPU_ISR_install_vector( _vector, _new_handler, _old_handler ) + +/** + * @brief ISR interrupt dispatcher. + * + * This routine is the interrupt dispatcher. ALL interrupts + * are vectored to this routine so that minimal context can be saved + * and setup performed before the application's high-level language + * interrupt service routine is invoked. After the application's + * interrupt service routine returns control to this routine, it + * will determine if a thread dispatch is necessary. If so, it will + * ensure that the necessary thread scheduling operations are + * performed when the outermost interrupt service routine exits. + * + * @note Typically implemented in assembly language. + */ +void _ISR_Handler( void ); + +/** + * @brief ISR wrapper for thread dispatcher. + * + * This routine provides a wrapper so that the routine + * @ref _Thread_Dispatch can be invoked when a reschedule is necessary + * at the end of the outermost interrupt service routine. This + * wrapper is necessary to establish the processor context needed + * by _Thread_Dispatch and to save the processor context which is + * corrupted by _Thread_Dispatch. This context typically consists + * of registers which are not preserved across routine invocations. + * + * @note Typically mplemented in assembly language. + */ +void _ISR_Dispatch( void ); + +/** + * @brief Checks if an ISR in progress. + * + * This function returns true if the processor is currently servicing + * and interrupt and false otherwise. A return value of true indicates + * that the caller is an interrupt service routine, NOT a thread. + * + * @retval This methods returns true when called from an ISR. + */ +bool _ISR_Is_in_progress( void ); + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/isrlevel.h b/include/rtems/score/isrlevel.h new file mode 100644 index 0000000000..2823df7d27 --- /dev/null +++ b/include/rtems/score/isrlevel.h @@ -0,0 +1,156 @@ +/** + * @file rtems/score/isrlevel.h + * + * @brief ISR Level Type + * + * This include file defines the ISR Level type. It exists to + * simplify include dependencies. It is part of the ISR Handler. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ISR_LEVEL_h +#define _RTEMS_SCORE_ISR_LEVEL_h + +#include <rtems/score/cpu.h> +#include <rtems/score/assert.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreISR ISR Handler + * + * @ingroup Score + * + * @addtogroup ScoreISR ISR Handler + */ +/**@{*/ + +/** + * The following type defines the control block used to manage + * the interrupt level portion of the status register. + */ +typedef uint32_t ISR_Level; + +/** + * @brief Disables interrupts on this processor. + * + * This macro disables all interrupts on this processor so that a critical + * section of code is protected from concurrent access by interrupts of this + * processor. Disabling of interrupts disables thread dispatching on the + * processor as well. + * + * On SMP configurations other processors can enter such sections if not + * protected by other means. + * + * @param[out] _level The argument @a _level will contain the previous + * interrupt mask level. + */ +#define _ISR_Disable( _level ) \ + do { \ + _CPU_ISR_Disable( _level ); \ + _Assert( _Debug_Is_owner_of_giant() ); \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + } while (0) + +/** + * @brief Enables interrupts on this processor. + * + * This macro restores the interrupt status on the processor with the + * interrupt level value obtained by _ISR_Disable(). It is used at the end of + * a critical section of code to enable interrupts so they can be processed + * again. + * + * @param[in] _level The interrupt level previously obtained by + * _ISR_Disable(). + */ +#define _ISR_Enable( _level ) \ + do { \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + _Assert( _Debug_Is_owner_of_giant() ); \ + _CPU_ISR_Enable( _level ); \ + } while (0) + +/** + * @brief Temporarily enables interrupts on this processor. + * + * This macro temporarily enables interrupts to the previous + * interrupt mask level and then disables all interrupts so that + * the caller can continue into the second part of a critical + * section. + * + * This routine is used to temporarily enable interrupts + * during a long critical section. It is used in long sections of + * critical code when a point is reached at which interrupts can + * be temporarily enabled. Deciding where to flash interrupts + * in a long critical section is often difficult and the point + * must be selected with care to ensure that the critical section + * properly protects itself. + * + * @param[in] _level The interrupt level previously obtained by + * _ISR_Disable(). + */ +#define _ISR_Flash( _level ) \ + do { \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + _Assert( _Debug_Is_owner_of_giant() ); \ + _CPU_ISR_Flash( _level ); \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + } while (0) + +/** + * @brief Return current interrupt level. + * + * This routine returns the current interrupt level. + * + * LM32 Specific Information: + * XXX document implementation including references if appropriate + * + * @retval This method returns the current level. + */ +#define _ISR_Get_level() \ + _CPU_ISR_Get_level() + +/** + * @brief Set current interrupt level. + * + * This routine sets the current interrupt level to that specified + * by @a _new_level. The new interrupt level is effective when the + * routine exits. + * + * @param[in] _new_level contains the desired interrupt level. + */ +#define _ISR_Set_level( _new_level ) \ + do { \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + _CPU_ISR_Set_level( _new_level ); \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + } while (0) + +#define _ISR_Disable_without_giant( _level ) \ + do { \ + _CPU_ISR_Disable( _level ); \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + } while (0) + +#define _ISR_Enable_without_giant( _level ) \ + do { \ + RTEMS_COMPILER_MEMORY_BARRIER(); \ + _CPU_ISR_Enable( _level ); \ + } while (0) + +/**@}*/ + +#ifdef __cplusplus +} +#endif +#endif diff --git a/include/rtems/score/isrlock.h b/include/rtems/score/isrlock.h new file mode 100644 index 0000000000..994eb48ed0 --- /dev/null +++ b/include/rtems/score/isrlock.h @@ -0,0 +1,368 @@ +/** + * @file + * + * @ingroup ScoreISRLocks + * + * @brief ISR Locks + */ + +/* + * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_ISR_LOCK_H +#define _RTEMS_SCORE_ISR_LOCK_H + +#include <rtems/score/isrlevel.h> +#include <rtems/score/smplock.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreISRLocks ISR Locks + * + * @ingroup ScoreISR + * + * @brief Low-level lock to protect critical sections accessed by threads and + * interrupt service routines. + * + * On single processor configurations the ISR locks degrade to simple ISR + * disable/enable sequences. No additional storage or objects are required. + * + * This synchronization primitive is supported on SMP configurations. Here SMP + * locks are used. + * + * @{ + */ + +/** + * @brief ISR lock control. + * + * @warning Empty structures are implementation-defined in C. GCC gives them a + * size of zero. In C++ empty structures have a non-zero size. + */ +typedef struct { +#if defined( RTEMS_SMP ) + SMP_lock_Control Lock; +#endif +} ISR_lock_Control; + +/** + * @brief Local ISR lock context for acquire and release pairs. + */ +typedef struct { +#if defined( RTEMS_SMP ) + SMP_lock_Context Lock_context; +#else + ISR_Level isr_level; +#endif +#if defined( RTEMS_PROFILING ) + /** + * @brief The last interrupt disable instant in CPU counter ticks. + */ + CPU_Counter_ticks ISR_disable_instant; +#endif +} ISR_lock_Context; + +/** + * @brief Defines an ISR lock member. + * + * Do not add a ';' after this macro. + * + * @param _designator The designator for the interrupt lock. + */ +#if defined( RTEMS_SMP ) + #define ISR_LOCK_MEMBER( _designator ) ISR_lock_Control _designator; +#else + #define ISR_LOCK_MEMBER( _designator ) +#endif + +/** + * @brief Declares an ISR lock variable. + * + * Do not add a ';' after this macro. + * + * @param _qualifier The qualifier for the interrupt lock, e.g. extern. + * @param _designator The designator for the interrupt lock. + */ +#if defined( RTEMS_SMP ) + #define ISR_LOCK_DECLARE( _qualifier, _designator ) \ + _qualifier ISR_lock_Control _designator; +#else + #define ISR_LOCK_DECLARE( _qualifier, _designator ) +#endif + +/** + * @brief Defines an ISR lock variable. + * + * Do not add a ';' after this macro. + * + * @param _qualifier The qualifier for the interrupt lock, e.g. static. + * @param _designator The designator for the interrupt lock. + * @param _name The name for the interrupt lock. It must be a string. The + * name is only used if profiling is enabled. + */ +#if defined( RTEMS_SMP ) + #define ISR_LOCK_DEFINE( _qualifier, _designator, _name ) \ + _qualifier ISR_lock_Control _designator = { SMP_LOCK_INITIALIZER( _name ) }; +#else + #define ISR_LOCK_DEFINE( _qualifier, _designator, _name ) +#endif + +/** + * @brief Defines an ISR lock variable reference. + * + * Do not add a ';' after this macro. + * + * @param _designator The designator for the interrupt lock reference. + * @param _target The target for the interrupt lock reference. + */ +#if defined( RTEMS_SMP ) + #define ISR_LOCK_REFERENCE( _designator, _target ) \ + ISR_lock_Control *_designator = _target; +#else + #define ISR_LOCK_REFERENCE( _designator, _target ) +#endif + +/** + * @brief Initializer for static initialization of ISR locks. + * + * @param _name The name for the interrupt lock. It must be a string. The + * name is only used if profiling is enabled. + */ +#if defined( RTEMS_SMP ) + #define ISR_LOCK_INITIALIZER( _name ) \ + { SMP_LOCK_INITIALIZER( _name ) } +#else + #define ISR_LOCK_INITIALIZER( _name ) \ + { } +#endif + +/** + * @brief Initializes an ISR lock. + * + * Concurrent initialization leads to unpredictable results. + * + * @param[in] _lock The ISR lock control. + * @param[in] _name The name for the ISR lock. This name must be a + * string persistent throughout the life time of this lock. The name is only + * used if profiling is enabled. + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_Initialize( _lock, _name ) \ + _SMP_lock_Initialize( &( _lock )->Lock, _name ) +#else + #define _ISR_lock_Initialize( _lock, _name ) +#endif + +/** + * @brief Destroys an ISR lock. + * + * Concurrent destruction leads to unpredictable results. + * + * @param[in] _lock The ISR lock control. + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_Destroy( _lock ) \ + _SMP_lock_Destroy( &( _lock )->Lock ) +#else + #define _ISR_lock_Destroy( _lock ) +#endif + +/** + * @brief Acquires an ISR lock. + * + * Interrupts will be disabled. On SMP configurations this function acquires + * an SMP lock. + * + * This function can be used in thread and interrupt context. + * + * @param[in] _lock The ISR lock control. + * @param[in] _context The local ISR lock context for an acquire and release + * pair. + * + * @see _ISR_lock_Release_and_ISR_enable(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_ISR_disable_and_acquire( _lock, _context ) \ + _SMP_lock_ISR_disable_and_acquire( \ + &( _lock )->Lock, \ + &( _context )->Lock_context \ + ) +#else + #define _ISR_lock_ISR_disable_and_acquire( _lock, _context ) \ + _ISR_Disable( ( _context )->isr_level ) +#endif + +/** + * @brief Releases an ISR lock. + * + * The interrupt status will be restored. On SMP configurations this function + * releases an SMP lock. + * + * This function can be used in thread and interrupt context. + * + * @param[in] _lock The ISR lock control. + * @param[in] _context The local ISR lock context for an acquire and release + * pair. + * + * @see _ISR_lock_ISR_disable_and_acquire(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_Release_and_ISR_enable( _lock, _context ) \ + _SMP_lock_Release_and_ISR_enable( \ + &( _lock )->Lock, \ + &( _context )->Lock_context \ + ) +#else + #define _ISR_lock_Release_and_ISR_enable( _lock, _context ) \ + _ISR_Enable( ( _context )->isr_level ) +#endif + +/** + * @brief Acquires an ISR lock inside an ISR disabled section. + * + * The interrupt status will remain unchanged. On SMP configurations this + * function acquires an SMP lock. + * + * In case the executing context can be interrupted by higher priority + * interrupts and these interrupts enter the critical section protected by this + * lock, then the result is unpredictable. + * + * @param[in] _lock The ISR lock control. + * @param[in] _context The local ISR lock context for an acquire and release + * pair. + * + * @see _ISR_lock_Release(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_Acquire( _lock, _context ) \ + _SMP_lock_Acquire( \ + &( _lock )->Lock, \ + &( _context )->Lock_context \ + ) +#else + #define _ISR_lock_Acquire( _lock, _context ) +#endif + +/** + * @brief Releases an ISR lock inside an ISR disabled section. + * + * The interrupt status will remain unchanged. On SMP configurations this + * function releases an SMP lock. + * + * @param[in] _lock The ISR lock control. + * @param[in] _context The local ISR lock context for an acquire and release + * pair. + * + * @see _ISR_lock_Acquire(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_Release( _lock, _context ) \ + _SMP_lock_Release( \ + &( _lock )->Lock, \ + &( _context )->Lock_context \ + ) +#else + #define _ISR_lock_Release( _lock, _context ) +#endif + +/** + * @brief Flashes an ISR lock. + * + * On uni-processor configurations this a simple _ISR_Flash(). On SMP + * configurations this function releases an SMP lock, restores the interrupt + * status, then disables interrupts and acquires the SMP lock again. + * + * This function can be used in thread and interrupt context. + * + * @param[in] _lock The ISR lock control. + * @param[in] _context The local ISR lock context for an acquire and release + * pair. + * + * @see _ISR_lock_ISR_disable_and_acquire() and + * _ISR_lock_Release_and_ISR_enable(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_Flash( _lock, _context ) \ + _SMP_lock_Release_and_ISR_enable( \ + &( _lock )->Lock, \ + &( _context )->Lock_context \ + ); \ + _SMP_lock_ISR_disable_and_acquire( \ + &( _lock )->Lock, \ + &( _context )->Lock_context \ + ) +#else + #define _ISR_lock_Flash( _lock, _context ) \ + _ISR_Flash( ( _context )->isr_level ) +#endif + +#if defined( RTEMS_PROFILING ) + #define _ISR_lock_ISR_disable_profile( _context ) \ + ( _context )->ISR_disable_instant = _CPU_Counter_read(); +#else + #define _ISR_lock_ISR_disable_profile( _context ) +#endif + +/** + * @brief Disables interrupts and saves the previous interrupt state in the ISR + * lock context. + * + * This function can be used in thread and interrupt context. + * + * @param[in] _context The local ISR lock context to store the interrupt state. + * + * @see _ISR_lock_ISR_enable(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_ISR_disable( _context ) \ + do { \ + _ISR_Disable_without_giant( ( _context )->Lock_context.isr_level ); \ + _ISR_lock_ISR_disable_profile( _context ) \ + } while ( 0 ) +#else + #define _ISR_lock_ISR_disable( _context ) \ + do { \ + _ISR_Disable( ( _context )->isr_level ); \ + _ISR_lock_ISR_disable_profile( _context ) \ + } while ( 0 ) +#endif + +/** + * @brief Restores the saved interrupt state of the ISR lock context. + * + * This function can be used in thread and interrupt context. + * + * @param[in] _context The local ISR lock context containing the saved + * interrupt state. + * + * @see _ISR_lock_ISR_disable(). + */ +#if defined( RTEMS_SMP ) + #define _ISR_lock_ISR_enable( _context ) \ + _ISR_Enable_without_giant( ( _context )->Lock_context.isr_level ) +#else + #define _ISR_lock_ISR_enable( _context ) \ + _ISR_Enable( ( _context )->isr_level ) +#endif + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_ISR_LOCK_H */ diff --git a/include/rtems/score/lm32.h b/include/rtems/score/lm32.h new file mode 100644 index 0000000000..e8fecdf2f6 --- /dev/null +++ b/include/rtems/score/lm32.h @@ -0,0 +1,112 @@ +/** + * @file + * + * @brief LM32 Set up Basic CPU Dependency Settings Based on Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specified to the NO CPU port. + */ +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * Jukka Pietarinen <jukka.pietarinen@mrf.fi>, 2008, + * Micro-Research Finland Oy + */ + +#ifndef _RTEMS_SCORE_LM32_H +#define _RTEMS_SCORE_LM32_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the NO CPU family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +#if defined(rtems_multilib) +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ + +#define CPU_MODEL_NAME "rtems_multilib" +#define LM32_HAS_FPU 0 + +#elif defined(__lm32__) + +#define CPU_MODEL_NAME "lm32" +#define LM32_HAS_FPU 0 + +#else + +#error "Unsupported CPU Model" + +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "LM32" + +#ifdef __cplusplus +} +#endif + +#define lm32_read_interrupts( _ip) \ + __asm__ volatile ("rcsr %0, ip":"=r"(_ip)); + +#define lm32_disable_interrupts( _level ) \ + do { register uint32_t ie; \ + __asm__ volatile ("rcsr %0,ie":"=r"(ie)); \ + (_level) = ie; \ + ie &= (~0x0001); \ + __asm__ volatile ("wcsr ie,%0"::"r"(ie)); \ + } while (0) + +#define lm32_enable_interrupts( _level ) \ + __asm__ volatile ("wcsr ie,%0"::"r"(_level)); + +#define lm32_flash_interrupts( _level ) \ + do { register uint32_t ie; \ + __asm__ volatile ("wcsr ie,%0"::"r"(_level)); \ + ie = (_level) & (~0x0001); \ + __asm__ volatile ("wcsr ie,%0"::"r"(ie)); \ + } while (0) + +#define lm32_interrupt_unmask( _mask ) \ + do { register uint32_t im; \ + __asm__ volatile ("rcsr %0,im":"=r"(im)); \ + im |= _mask; \ + __asm__ volatile ("wcsr im,%0"::"r"(im)); \ + } while (0) + +#define lm32_interrupt_mask( _mask ) \ + do { register uint32_t im; \ + __asm__ volatile ("rcsr %0,im":"=r"(im)); \ + im &= ~(_mask); \ + __asm__ volatile ("wcsr im,%0"::"r"(im)); \ + } while (0) + +#define lm32_interrupt_ack( _mask ) \ + do { register uint32_t ip = _mask; \ + __asm__ volatile ("wcsr ip,%0"::"r"(ip)); \ + } while (0) + +#endif /* _RTEMS_SCORE_LM32_H */ diff --git a/include/rtems/score/m32c.h b/include/rtems/score/m32c.h new file mode 100644 index 0000000000..e1936fdcfb --- /dev/null +++ b/include/rtems/score/m32c.h @@ -0,0 +1,76 @@ +/** + * @file + * + * @brief M32C Set up Basic CPU Dependency Settings Based on Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specified to the NO CPU port. + */ + +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_NO_CPU_H +#define _RTEMS_SCORE_NO_CPU_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the NO CPU family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +#if defined(rtems_multilib) +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ + +#define CPU_MODEL_NAME "rtems_multilib" +#define NOCPU_HAS_FPU 1 + +#elif defined(__m32c__) + +#define CPU_MODEL_NAME "m32c" +#define M32C_HAS_FPU 0 + +#else + +#error "Unsupported CPU Model" + +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "m32c" + +#define m32c_get_flg( _flg ) \ + __asm__ volatile( "stc flg, %0" : "=r" (_flg)) + +#define m32c_set_flg( _flg ) \ + __asm__ volatile( "ldc %1, flg" : "=r" (_flg) : "r" (_flg) ) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_NO_CPU_H */ diff --git a/include/rtems/score/m32r.h b/include/rtems/score/m32r.h new file mode 100644 index 0000000000..cd9113460c --- /dev/null +++ b/include/rtems/score/m32r.h @@ -0,0 +1,70 @@ +/** + * @file + * + * @brief Set up Basic CPU Dependency Settings Based on Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specified to the NO CPU port. + */ + +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_NO_CPU_H +#define _RTEMS_SCORE_NO_CPU_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the NO CPU family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +#if defined(rtems_multilib) +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ + +#define CPU_MODEL_NAME "rtems_multilib" +#define NOCPU_HAS_FPU 1 + +#elif defined(__m32r__) + +#define CPU_MODEL_NAME "m32r" +#define NOCPU_HAS_FPU 1 + +#else + +#error "Unsupported CPU Model" + +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "NO CPU" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_NO_CPU_H */ diff --git a/include/rtems/score/m68k.h b/include/rtems/score/m68k.h new file mode 100644 index 0000000000..818578c078 --- /dev/null +++ b/include/rtems/score/m68k.h @@ -0,0 +1,502 @@ +/** + * @file + * + * @brief Motorola M68K CPU Dependent Source + * + * This include file contains information pertaining to the Motorola + * m68xxx processor family. + */ + +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_M68K_H +#define _RTEMS_SCORE_M68K_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This section contains the information required to build + * RTEMS for a particular member of the Motorola MC68xxx + * family. It does this by setting variables to indicate + * which implementation dependent features are present in + * a particular member of the family. + * + * Currently recognized: + * -m68000 + * -m68000 -msoft-float + * -m68020 + * -m68020 -msoft-float + * -m68030 + * -m68040 -msoft-float + * -m68040 + * -m68040 -msoft-float + * -m68060 + * -m68060 -msoft-float + * -m68302 (no FP) (deprecated, use -m68000) + * -m68332 (no FP) (deprecated, use -mcpu32) + * -mcpu32 (no FP) + * -m5200 (no FP) + * -m528x (no FP, ISA A+) + * + * As of gcc 2.8.1 and egcs 1.1, there is no distinction made between + * the CPU32 and CPU32+. The option -mcpu32 generates code which can + * be run on either core. RTEMS distinguishes between these two cores + * because they have different alignment rules which impact performance. + * If you are using a CPU32+, then the symbol RTEMS__mcpu32p__ should + * be defined in your custom file (see make/custom/gen68360.cfg for an + * example of how to do this. If gcc ever distinguishes between these + * two cores, then RTEMS__mcpu32p__ usage will be replaced with the + * appropriate compiler defined predefine. + * + * Here is some information on the 040 variants (courtesy of Doug McBride, + * mcbride@rodin.colorado.edu): + * + * "The 68040 is a superset of the 68EC040 and the 68LC040. The + * 68EC040 and 68LC040 do not have FPU's. The 68LC040 and the + * 68EC040 have renamed the DLE pin as JS0 which must be tied to + * Gnd or Vcc. The 68EC040 has renamed the MDIS pin as JS1. The + * 68EC040 has access control units instead of memory management units. + * The 68EC040 should not have the PFLUSH or PTEST instructions executed + * (cause an indeterminate result). The 68EC040 and 68LC040 do not + * implement the DLE or multiplexed bus modes. The 68EC040 does not + * implement the output buffer impedance selection mode of operation." + * + * M68K_HAS_EXTB_L is used to enable/disable usage of the extb.l instruction + * which is not available for 68000 or 68ec000 cores (68000, 68001, 68008, + * 68010, 68302, 68306, 68307). This instruction is available on the 68020 + * up and the cpu32 based models. + * + * M68K_HAS_MISALIGNED is non-zero if the CPU allows byte-misaligned + * data access (68020, 68030, 68040, 68060, CPU32+). + * + * NOTE: + * Eventually it would be nice to evaluate doing a lot of this section + * by having each model specify which core it uses and then go from there. + */ + +/* + * Handle the Coldfire family based on the instruction set. + */ +#if defined(__mcoldfire__) + +# define CPU_NAME "Motorola ColdFire" + +# if defined(__mcfisaa__) +/* Motorola ColdFire ISA A */ +# define CPU_MODEL_NAME "mcfisaa" +# define M68K_HAS_VBR 1 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_PREINDEXING 0 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 + +# elif defined(__mcfisaaplus__) +/* Motorola ColdFire ISA A+ */ +# define CPU_MODEL_NAME "mcfisaaplus" +# define M68K_HAS_VBR 1 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_PREINDEXING 0 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 + +# elif defined(__mcfisab__) +/* Motorola ColdFire ISA B */ +# define CPU_MODEL_NAME "mcfisab" +# define M68K_HAS_VBR 1 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_PREINDEXING 0 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 + +# else +# error "Unsupported Coldfire ISA -- Please notify RTEMS" +# endif + +/* + * Assume the FPU support is independent. I think it is just the ISA B + * instruction set. + */ +# if defined (__mcffpu__) +# define M68K_HAS_FPU 1 + /* + * td: can we be sure that all CFs with FPU also have an EMAC? + */ +# define M68K_HAS_EMAC 1 +# define M68K_HAS_FPSP_PACKAGE 0 +# else +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 +# endif + +/* + * Tiny RTEMS support. Small stack and limited priorities. + * + * These CPUs have very limited on-CPU memory which cannot + * be expanded. We have to be gentle with them or nothing + * will every run. + */ +# if (defined(__mcf_cpu_52221) || \ + defined(__mcf_cpu_52223) || \ + defined(__mcf_cpu_52230) || \ + defined(__mcf_cpu_52231) || \ + defined(__mcf_cpu_52232) || \ + defined(__mcf_cpu_52233) || \ + defined(__mcf_cpu_52234) || \ + defined(__mcf_cpu_52235) || \ + defined(__mcf_cpu_52225) || \ + defined(__mcf_cpu_52235)) + #define M68K_CPU_STACK_MINIMUM_SIZE 1024 + /* Define the lowest priority. Based from 0 to this is 16 levels. */ + #define M68K_CPU_PRIORITY_MAXIMUM 15 +# else + #define M68K_CPU_STACK_MINIMUM_SIZE 4096 + /* Use the default number of priorities */ + #define M68K_CPU_PRIORITY_MAXIMUM 255 +# endif + +#else + +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. Notice the only exception to this is that + * gcc does not distinguish between CPU32 and CPU32+. This + * feature selection logic is setup such that if RTEMS__mcpu32p__ + * is defined, then CPU32+ rules are used. Otherwise, the safe + * but less efficient CPU32 rules are used for the CPU32+. + */ + +# define CPU_NAME "Motorola MC68xxx" + +/* + * One stack size fits all 68000 processors. + */ +# define M68K_CPU_STACK_MINIMUM_SIZE 4096 + +# if (defined(__mc68020__) && !defined(__mcpu32__)) + +# define CPU_MODEL_NAME "m68020" +# define M68K_HAS_VBR 1 +# define M68K_HAS_SEPARATE_STACKS 1 +# define M68K_HAS_BFFFO 1 +# define M68K_HAS_PREINDEXING 1 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 +# if defined (__HAVE_68881__) +# define M68K_HAS_FPU 1 +# define M68K_HAS_FPSP_PACKAGE 0 +# else +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 +# endif + +# elif defined(__mc68030__) + +# define CPU_MODEL_NAME "m68030" +# define M68K_HAS_VBR 1 +# define M68K_HAS_SEPARATE_STACKS 1 +# define M68K_HAS_BFFFO 1 +# define M68K_HAS_PREINDEXING 1 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 +# if defined (__HAVE_68881__) +# define M68K_HAS_FPU 1 +# define M68K_HAS_FPSP_PACKAGE 0 +# else +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 +# endif + +# elif defined(__mc68040__) + +# define CPU_MODEL_NAME "m68040" +# define M68K_HAS_VBR 1 +# define M68K_HAS_SEPARATE_STACKS 1 +# define M68K_HAS_BFFFO 1 +# define M68K_HAS_PREINDEXING 1 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 +# if defined (__HAVE_68881__) +# define M68K_HAS_FPU 1 +# define M68K_HAS_FPSP_PACKAGE 1 +# else +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 +# endif + +# elif defined(__mc68060__) + +# define CPU_MODEL_NAME "m68060" +# define M68K_HAS_VBR 1 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_BFFFO 1 +# define M68K_HAS_PREINDEXING 1 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 +# if defined (__HAVE_68881__) +# define M68K_HAS_FPU 1 +# define M68K_HAS_FPSP_PACKAGE 0 +# else +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 +# endif + +# elif defined(__mc68302__) + +# define CPU_MODEL_NAME "m68302" +# define M68K_HAS_VBR 0 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_PREINDEXING 0 +# define M68K_HAS_EXTB_L 0 +# define M68K_HAS_MISALIGNED 0 +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 + + /* gcc and egcs do not distinguish between CPU32 and CPU32+ */ +# elif defined(RTEMS__mcpu32p__) + +# define CPU_MODEL_NAME "mcpu32+" +# define M68K_HAS_VBR 1 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_PREINDEXING 1 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 1 +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 + +# elif defined(__mcpu32__) + +# define CPU_MODEL_NAME "mcpu32" +# define M68K_HAS_VBR 1 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_PREINDEXING 1 +# define M68K_HAS_EXTB_L 1 +# define M68K_HAS_MISALIGNED 0 +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 + +# elif defined(__mc68000__) + +# define CPU_MODEL_NAME "m68000" +# define M68K_HAS_VBR 0 +# define M68K_HAS_SEPARATE_STACKS 0 +# define M68K_HAS_BFFFO 0 +# define M68K_HAS_PREINDEXING 0 +# define M68K_HAS_EXTB_L 0 +# define M68K_HAS_MISALIGNED 0 +# if defined (__HAVE_68881__) +# define M68K_HAS_FPU 1 +# define M68K_HAS_FPSP_PACKAGE 0 +# else +# define M68K_HAS_FPU 0 +# define M68K_HAS_FPSP_PACKAGE 0 +# endif + +# else + +# error "Unsupported 68000 CPU model -- are you sure you're running a 68k compiler?" + +# endif + +/* + * No Tiny RTEMS support on the standard 68000 family. + */ +# define M68K_CPU_STACK_MINIMUM_SIZE 4096 +# define M68K_CPU_PRIORITY_MAXIMUM 255 + +#endif + +/* + * OBSOLETE: Backward compatibility only - Don't use. + * Use __mcoldfire__ instead. + */ +#if defined(__mcoldfire__) +#define M68K_COLDFIRE_ARCH 1 +#else +#define M68K_COLDFIRE_ARCH 0 +#endif + +#ifndef ASM + +#if ( defined(__mcoldfire__) ) +#define m68k_disable_interrupts( _level ) \ + do { register uint32_t _tmpsr = 0x0700; \ + __asm__ volatile ( "move.w %%sr,%0\n\t" \ + "or.l %0,%1\n\t" \ + "move.w %1,%%sr" \ + : "=d" (_level), "=d"(_tmpsr) : "1"(_tmpsr) \ + : "cc" ); \ + } while( 0 ) +#else +#define m68k_disable_interrupts( _level ) \ + __asm__ volatile ( "move.w %%sr,%0\n\t" \ + "or.w #0x0700,%%sr" \ + : "=d" (_level) \ + : : "cc" ) +#endif + +#define m68k_enable_interrupts( _level ) \ + __asm__ volatile ( "move.w %0,%%sr " : : "d" (_level) : "cc"); + +#if ( defined(__mcoldfire__) ) +#define m68k_flash_interrupts( _level ) \ + do { register uint32_t _tmpsr = 0x0700; \ + asm volatile ( "move.w %2,%%sr\n\t" \ + "or.l %2,%1\n\t" \ + "move.w %1,%%sr" \ + : "=d"(_tmpsr) : "0"(_tmpsr), "d"(_level) \ + : "cc"); \ + } while( 0 ) +#else +#define m68k_flash_interrupts( _level ) \ + __asm__ volatile ( "move.w %0,%%sr\n\t" \ + "or.w #0x0700,%%sr" \ + : : "d" (_level) \ + : "cc" ) +#endif + +#define m68k_get_interrupt_level( _level ) \ + do { \ + register uint32_t _tmpsr; \ + \ + __asm__ volatile( "move.w %%sr,%0" : "=d" (_tmpsr)); \ + _level = (_tmpsr & 0x0700) >> 8; \ + } while (0) + +#define m68k_set_interrupt_level( _newlevel ) \ + do { \ + register uint32_t _tmpsr; \ + \ + __asm__ volatile( "move.w %%sr,%0" : "=d" (_tmpsr)); \ + _tmpsr = (_tmpsr & 0xf8ff) | ((_newlevel) << 8); \ + __asm__ volatile( "move.w %0,%%sr" : : "d" (_tmpsr)); \ + } while (0) + +#if ( M68K_HAS_VBR == 1 && !defined(__mcoldfire__) ) +#define m68k_get_vbr( vbr ) \ + __asm__ volatile ( "movec %%vbr,%0 " : "=r" (vbr)) + +#define m68k_set_vbr( vbr ) \ + __asm__ volatile ( "movec %0,%%vbr " : : "r" (vbr)) + +#elif ( defined(__mcoldfire__) ) +extern void* _VBR; +#define m68k_get_vbr( _vbr ) _vbr = &_VBR + +#define m68k_set_vbr( _vbr ) \ + do { \ + __asm__ volatile ( "movec %0,%%vbr " : : "r" (_vbr)); \ + _VBR = (void *)_vbr; \ + } while(0) + +#else +#define m68k_get_vbr( _vbr ) _vbr = (void *)_VBR +#define m68k_set_vbr( _vbr ) +#endif + +/* + * Access Control Registers + */ +#define m68k_set_cacr(_cacr) __asm__ volatile ("movec %0,%%cacr" : : "d" (_cacr)) +#define m68k_set_acr0(_acr0) __asm__ volatile ("movec %0,%%acr0" : : "d" (_acr0)) +#define m68k_set_acr1(_acr1) __asm__ volatile ("movec %0,%%acr1" : : "d" (_acr1)) + +/* + * The following routine swaps the endian format of an unsigned int. + * It must be static because it is referenced indirectly. + */ +#if ( defined(__mcoldfire__) ) + +/* There are no rotate commands in Coldfire architecture. We will use + * generic implementation of endian swapping for Coldfire. + */ +static inline uint32_t m68k_swap_u32( + uint32_t value + ) +{ + uint32_t byte1, byte2, byte3, byte4, swapped; + + byte4 = (value >> 24) & 0xff; + byte3 = (value >> 16) & 0xff; + byte2 = (value >> 8) & 0xff; + byte1 = value & 0xff; + + swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4; + return( swapped ); +} + +static inline uint16_t m68k_swap_u16( + uint16_t value +) +{ + return (((value & 0xff) << 8) | ((value >> 8) & 0xff)); +} + +#else + +static inline uint32_t m68k_swap_u32( + uint32_t value +) +{ + uint32_t swapped = value; + + __asm__ volatile( "rorw #8,%0" : "=d" (swapped) : "0" (swapped) ); + __asm__ volatile( "swap %0" : "=d" (swapped) : "0" (swapped) ); + __asm__ volatile( "rorw #8,%0" : "=d" (swapped) : "0" (swapped) ); + + return( swapped ); +} + +static inline uint16_t m68k_swap_u16( + uint16_t value +) +{ + uint16_t swapped = value; + + __asm__ volatile( "rorw #8,%0" : "=d" (swapped) : "0" (swapped) ); + + return( swapped ); +} +#endif + +#define CPU_swap_u32( value ) m68k_swap_u32( value ) +#define CPU_swap_u16( value ) m68k_swap_u16( value ) + + +/* + * _CPU_virtual_to_physical + * + * This function is used to map virtual addresses to physical + * addresses. + * + * FIXME: ASSUMES THAT VIRTUAL ADDRESSES ARE THE SAME AS THE + * PHYSICAL ADDRESSES + */ +static inline void * _CPU_virtual_to_physical ( + const void * d_addr ) +{ + return (void *) d_addr; +} + + +#endif /* !ASM */ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_M68K_H */ diff --git a/include/rtems/score/mips.h b/include/rtems/score/mips.h new file mode 100644 index 0000000000..4c2c53fa6d --- /dev/null +++ b/include/rtems/score/mips.h @@ -0,0 +1,299 @@ +/** + * @file rtems/score/mips.h + * + * @brief Information to build RTEMS for a "no cpu" while in protected mode. + * + * This file contains the information required to build + * RTEMS for a particular member of the "no cpu" + * family when executing in protected mode. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + */ + +/* + * COPYRIGHT (c) 1989-2001. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_MIPS_H +#define _RTEMS_SCORE_MIPS_H + +/** + * @defgroup ScoreMips RTEMS no cpu Build Information + * + * @ingroup Score + * + */ +/**@{*/ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ASM +#include <rtems/mips/idtcpu.h> +#endif + +/* + * SR bits that enable/disable interrupts + * + * NOTE: XXX what about SR_ERL? + */ + +#if (__mips == 3) || (__mips == 32) +#ifdef ASM +#define SR_INTERRUPT_ENABLE_BITS 0x01 +#else +#define SR_INTERRUPT_ENABLE_BITS SR_IE +#endif + +#elif __mips == 1 +#define SR_INTERRUPT_ENABLE_BITS SR_IEC + +#else +#error "mips interrupt enable bits: unknown architecture level!" +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the "no cpu" + * family when executing in protected mode. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + */ + +#if defined(__mips_soft_float) +#define MIPS_HAS_FPU 0 +#else +#define MIPS_HAS_FPU 1 +#endif + + +#if (__mips == 1) +#define CPU_MODEL_NAME "ISA Level 1 or 2" +#elif (__mips == 3) || (__mips == 32) +#if defined(__mips64) +#define CPU_MODEL_NAME "ISA Level 4" +#else +#define CPU_MODEL_NAME "ISA Level 3" +#endif +#else +#error "Unknown MIPS ISA level" +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "MIPS" + +/* + * RTEMS Vector numbers for exception conditions. This is a direct + * map to the causes. + */ + +#define MIPS_EXCEPTION_BASE 0 + +#define MIPS_EXCEPTION_INT MIPS_EXCEPTION_BASE+0 +#define MIPS_EXCEPTION_MOD MIPS_EXCEPTION_BASE+1 +#define MIPS_EXCEPTION_TLBL MIPS_EXCEPTION_BASE+2 +#define MIPS_EXCEPTION_TLBS MIPS_EXCEPTION_BASE+3 +#define MIPS_EXCEPTION_ADEL MIPS_EXCEPTION_BASE+4 +#define MIPS_EXCEPTION_ADES MIPS_EXCEPTION_BASE+5 +#define MIPS_EXCEPTION_IBE MIPS_EXCEPTION_BASE+6 +#define MIPS_EXCEPTION_DBE MIPS_EXCEPTION_BASE+7 +#define MIPS_EXCEPTION_SYSCALL MIPS_EXCEPTION_BASE+8 +#define MIPS_EXCEPTION_BREAK MIPS_EXCEPTION_BASE+9 +#define MIPS_EXCEPTION_RI MIPS_EXCEPTION_BASE+10 +#define MIPS_EXCEPTION_CPU MIPS_EXCEPTION_BASE+11 +#define MIPS_EXCEPTION_OVERFLOW MIPS_EXCEPTION_BASE+12 +#define MIPS_EXCEPTION_TRAP MIPS_EXCEPTION_BASE+13 +#define MIPS_EXCEPTION_VCEI MIPS_EXCEPTION_BASE+14 +/* FPE only on mips2 and higher */ +#define MIPS_EXCEPTION_FPE MIPS_EXCEPTION_BASE+15 +#define MIPS_EXCEPTION_C2E MIPS_EXCEPTION_BASE+16 +/* 17-22 reserved */ +#define MIPS_EXCEPTION_WATCH MIPS_EXCEPTION_BASE+23 +/* 24-30 reserved */ +#define MIPS_EXCEPTION_VCED MIPS_EXCEPTION_BASE+31 + +#define MIPS_INTERRUPT_BASE MIPS_EXCEPTION_BASE+32 + +/* + * Some macros to access registers + */ + +#define mips_get_sr( _x ) \ + do { \ + __asm__ volatile( "mfc0 %0, $12; nop" : "=r" (_x) : ); \ + } while (0) + +#define mips_set_sr( _x ) \ + do { \ + register unsigned int __x = (_x); \ + __asm__ volatile( "mtc0 %0, $12; nop" : : "r" (__x) ); \ + } while (0) + + +/* + * Access the Cause register + */ + +#define mips_get_cause( _x ) \ + do { \ + __asm__ volatile( "mfc0 %0, $13; nop" : "=r" (_x) : ); \ + } while (0) + + +#define mips_set_cause( _x ) \ + do { \ + register unsigned int __x = (_x); \ + __asm__ volatile( "mtc0 %0, $13; nop" : : "r" (__x) ); \ + } while (0) + + + + +/* + * Access the Debug Cache Invalidate Control register + */ + +#define mips_get_dcic( _x ) \ + do { \ + __asm__ volatile( "mfc0 %0, $7; nop" : "=r" (_x) : ); \ + } while (0) + + +#define mips_set_dcic( _x ) \ + do { \ + register unsigned int __x = (_x); \ + __asm__ volatile( "mtc0 %0, $7; nop" : : "r" (__x) ); \ + } while (0) + + + + +/* + * Access the Breakpoint Program Counter & Mask registers + * (_x for BPC, _y for mask) + */ + +#define mips_get_bpcrm( _x, _y ) \ + do { \ + __asm__ volatile( "mfc0 %0, $3; nop" : "=r" (_x) : ); \ + __asm__ volatile( "mfc0 %0, $11; nop" : "=r" (_y) : ); \ + } while (0) + + +#define mips_set_bpcrm( _x, _y ) \ + do { \ + register unsigned int __x = (_x); \ + register unsigned int __y = (_y); \ + __asm__ volatile( "mtc0 %0, $11; nop" : : "r" (__y) ); \ + __asm__ volatile( "mtc0 %0, $3; nop" : : "r" (__x) ); \ + } while (0) + + + + + + +/* + * Access the Breakpoint Data Address & Mask registers + * (_x for BDA, _y for mask) + */ + +#define mips_get_bdarm( _x, _y ) \ + do { \ + __asm__ volatile( "mfc0 %0, $5; nop" : "=r" (_x) : ); \ + __asm__ volatile( "mfc0 %0, $9; nop" : "=r" (_y) : ); \ + } while (0) + + +#define mips_set_bdarm( _x, _y ) \ + do { \ + register unsigned int __x = (_x); \ + register unsigned int __y = (_y); \ + __asm__ volatile( "mtc0 %0, $9; nop" : : "r" (__y) ); \ + __asm__ volatile( "mtc0 %0, $5; nop" : : "r" (__x) ); \ + } while (0) + + + + + + + +/* + * Access FCR31 + */ + +#if ( MIPS_HAS_FPU == 1 ) + +#define mips_get_fcr31( _x ) \ + do { \ + __asm__ volatile( "cfc1 %0, $31; nop" : "=r" (_x) : ); \ + } while(0) + + +#define mips_set_fcr31( _x ) \ + do { \ + register unsigned int __x = (_x); \ + __asm__ volatile( "ctc1 %0, $31; nop" : : "r" (__x) ); \ + } while(0) + +#else + +#define mips_get_fcr31( _x ) +#define mips_set_fcr31( _x ) + +#endif + +/* + * Manipulate interrupt mask + * + * mips_unmask_interrupt( _mask) + * enables interrupts - mask is positioned so it only needs to be or'ed + * into the status reg. This also does some other things !!!! Caution + * should be used if invoking this while in the middle of a debugging + * session where the client may have nested interrupts. + * + * mips_mask_interrupt( _mask ) + * disable the interrupt - mask is the complement of the bits to be + * cleared - i.e. to clear ext int 5 the mask would be - 0xffff7fff + * + * + * NOTE: mips_mask_interrupt() used to be disable_int(). + * mips_unmask_interrupt() used to be enable_int(). + * + */ + +#define mips_enable_in_interrupt_mask( _mask ) \ + do { \ + unsigned int _sr; \ + mips_get_sr( _sr ); \ + _sr |= (_mask); \ + mips_set_sr( _sr ); \ + } while (0) + +#define mips_disable_in_interrupt_mask( _mask ) \ + do { \ + unsigned int _sr; \ + mips_get_sr( _sr ); \ + _sr &= ~(_mask); \ + mips_set_sr( _sr ); \ + } while (0) + +#ifdef __cplusplus +} +#endif + +/**@}*/ +#endif /* _RTEMS_SCORE_MIPS_H */ +/* end of include file */ diff --git a/include/rtems/score/moxie.h b/include/rtems/score/moxie.h new file mode 100644 index 0000000000..66f771aadf --- /dev/null +++ b/include/rtems/score/moxie.h @@ -0,0 +1,43 @@ +/** + * @file rtems/score/moxie.h + */ + +/* + * This file contains information pertaining to the Moxie processor. + * + * COPYRIGHT (c) 2011, 2013 + * Anthony Green + * + * Based on code with the following copyright... + * COPYRIGHT (c) 1989-1999, 2010. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_MOXIE_H +#define _RTEMS_SCORE_MOXIE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the "moxie" + * family when executing in protected mode. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + */ + +#define CPU_NAME "Moxie" +#define CPU_MODEL_NAME "MoxieLite" + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/rtems/score/mrsp.h b/include/rtems/score/mrsp.h new file mode 100644 index 0000000000..08f96ac4ac --- /dev/null +++ b/include/rtems/score/mrsp.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_MRSP_H +#define _RTEMS_SCORE_MRSP_H + +#include <rtems/score/cpuopts.h> + +#if defined(RTEMS_SMP) + +#include <rtems/score/chain.h> +#include <rtems/score/isrlock.h> +#include <rtems/score/scheduler.h> +#include <rtems/score/thread.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreMRSP Multiprocessor Resource Sharing Protocol Handler + * + * @ingroup Score + * + * @brief Multiprocessor Resource Sharing Protocol (MrsP). + * + * The Multiprocessor Resource Sharing Protocol (MrsP) is defined in A. Burns + * and A.J. Wellings, A Schedulability Compatible Multiprocessor Resource + * Sharing Protocol - MrsP, Proceedings of the 25th Euromicro Conference on + * Real-Time Systems (ECRTS 2013), July 2013. It is a generalization of the + * Priority Ceiling Protocol to SMP systems. Each MrsP semaphore uses a + * ceiling priority per scheduler instance. A task obtaining or owning a MrsP + * semaphore will execute with the ceiling priority for its scheduler instance + * as specified by the MrsP semaphore object. Tasks waiting to get ownership + * of a MrsP semaphore will not relinquish the processor voluntarily. In case + * the owner of a MrsP semaphore gets preempted it can ask all tasks waiting + * for this semaphore to help out and temporarily borrow the right to execute + * on one of their assigned processors. + * + * @{ + */ + +/** + * @brief MrsP status code. + * + * The values are chosen to directly map to RTEMS status codes. In case this + * implementation is used for other APIs, then for example the errno values can + * be added with a bit shift. + */ +typedef enum { + MRSP_SUCCESSFUL = 0, + MRSP_TIMEOUT = 6, + MRSP_INVALID_NUMBER = 10, + MRSP_RESOUCE_IN_USE = 12, + MRSP_UNSATISFIED = 13, + MRSP_INCORRECT_STATE = 14, + MRSP_INVALID_PRIORITY = 19, + MRSP_NOT_OWNER_OF_RESOURCE = 23, + MRSP_NO_MEMORY = 26, + + /** + * @brief Internal state used for MRSP_Rival::status to indicate that this + * rival waits for resource ownership. + */ + MRSP_WAIT_FOR_OWNERSHIP = 255 +} MRSP_Status; + +typedef struct MRSP_Control MRSP_Control; + +/** + * @brief MrsP rival. + * + * The rivals are used by threads waiting for resource ownership. They are + * registered in the MrsP control block. + */ +typedef struct { + /** + * @brief The node for registration in the MrsP rival chain. + * + * The chain operations are protected by the MrsP control lock. + * + * @see MRSP_Control::Rivals. + */ + Chain_Node Node; + + /** + * @brief The corresponding MrsP control block. + */ + MRSP_Control *resource; + + /** + * @brief Identification of the rival thread. + */ + Thread_Control *thread; + + /** + * @brief The initial priority of the thread at the begin of the resource + * obtain sequence. + * + * Used to restore the priority after a release of this resource or timeout. + */ + Priority_Control initial_priority; + + /** + * @brief The initial help state of the thread at the begin of the resource + * obtain sequence. + * + * Used to restore this state after a timeout. + */ + Scheduler_Help_state initial_help_state; + + /** + * @brief The rival status. + * + * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP. The rival will + * busy wait until a status change happens. This can be MRSP_SUCCESSFUL or + * MRSP_TIMEOUT. State changes are protected by the MrsP control lock. + */ + volatile MRSP_Status status; +} MRSP_Rival; + +/** + * @brief MrsP control block. + */ +struct MRSP_Control { + /** + * @brief Basic resource control. + */ + Resource_Control Resource; + + /** + * @brief A chain of MrsP rivals waiting for resource ownership. + * + * @see MRSP_Rival::Node. + */ + Chain_Control Rivals; + + /** + * @brief Lock to protect the resource dependency tree. + */ + ISR_LOCK_MEMBER( Lock ) + + /** + * @brief The initial priority of the owner before it was elevated to the + * ceiling priority. + */ + Priority_Control initial_priority_of_owner; + + /** + * @brief One ceiling priority per scheduler instance. + */ + Priority_Control *ceiling_priorities; +}; + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* RTEMS_SMP */ + +#endif /* _RTEMS_SCORE_MRSP_H */ diff --git a/include/rtems/score/mrspimpl.h b/include/rtems/score/mrspimpl.h new file mode 100644 index 0000000000..bc9ed4b511 --- /dev/null +++ b/include/rtems/score/mrspimpl.h @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2014-2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_MRSPIMPL_H +#define _RTEMS_SCORE_MRSPIMPL_H + +#include <rtems/score/mrsp.h> + +#if defined(RTEMS_SMP) + +#include <rtems/score/assert.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/resourceimpl.h> +#include <rtems/score/schedulerimpl.h> +#include <rtems/score/watchdogimpl.h> +#include <rtems/score/wkspace.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup ScoreMRSP + * + * @{ + */ + +/* + * FIXME: Operations with the resource dependency tree are protected by the + * global scheduler lock. Since the scheduler lock should be scheduler + * instance specific in the future this will only work temporarily. A more + * sophisticated locking strategy is necessary. + */ + +RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context ) +{ + _ISR_lock_Acquire( &_Scheduler_Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context ) +{ + _ISR_lock_Release( &_Scheduler_Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter( + Thread_Control *thread, + Priority_Control *new_priority, + void *arg +) +{ + *new_priority = _Thread_Priority_highest( + thread->real_priority, + *new_priority + ); + + return *new_priority != thread->current_priority; +} + +RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority( + Thread_Control *thread, + Priority_Control initial_priority +) +{ + /* + * The Thread_Control::resource_count is used by the normal priority ceiling + * or priority inheritance semaphores. + */ + if ( thread->resource_count == 0 ) { + _Thread_Change_priority( + thread, + initial_priority, + NULL, + _MRSP_Restore_priority_filter, + true + ); + } +} + +RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership( + MRSP_Control *mrsp, + Thread_Control *new_owner, + Priority_Control initial_priority, + Priority_Control ceiling_priority, + ISR_lock_Context *lock_context +) +{ + Per_CPU_Control *cpu_self; + + _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource ); + _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node ); + mrsp->initial_priority_of_owner = initial_priority; + _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER ); + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + + _Thread_Raise_priority( new_owner, ceiling_priority ); + + _Thread_Dispatch_enable( cpu_self ); +} + +RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize( + MRSP_Control *mrsp, + Priority_Control ceiling_priority, + Thread_Control *executing, + bool initially_locked +) +{ + uint32_t scheduler_count = _Scheduler_Count; + uint32_t i; + + if ( initially_locked ) { + return MRSP_INVALID_NUMBER; + } + + mrsp->ceiling_priorities = _Workspace_Allocate( + sizeof( *mrsp->ceiling_priorities ) * scheduler_count + ); + if ( mrsp->ceiling_priorities == NULL ) { + return MRSP_NO_MEMORY; + } + + for ( i = 0 ; i < scheduler_count ; ++i ) { + mrsp->ceiling_priorities[ i ] = ceiling_priority; + } + + _Resource_Initialize( &mrsp->Resource ); + _Chain_Initialize_empty( &mrsp->Rivals ); + _ISR_lock_Initialize( &mrsp->Lock, "MrsP" ); + + return MRSP_SUCCESSFUL; +} + +RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_ceiling_priority( + MRSP_Control *mrsp, + uint32_t scheduler_index +) +{ + return mrsp->ceiling_priorities[ scheduler_index ]; +} + +RTEMS_INLINE_ROUTINE void _MRSP_Set_ceiling_priority( + MRSP_Control *mrsp, + uint32_t scheduler_index, + Priority_Control ceiling_priority +) +{ + mrsp->ceiling_priorities[ scheduler_index ] = ceiling_priority; +} + +RTEMS_INLINE_ROUTINE void _MRSP_Timeout( + Objects_Id id, + void *arg +) +{ + MRSP_Rival *rival = arg; + MRSP_Control *mrsp = rival->resource; + Thread_Control *thread = rival->thread; + ISR_lock_Context lock_context; + + (void) id; + + _ISR_lock_ISR_disable_and_acquire( &mrsp->Lock, &lock_context ); + + if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) { + ISR_lock_Context giant_lock_context; + + _MRSP_Giant_acquire( &giant_lock_context ); + + _Chain_Extract_unprotected( &rival->Node ); + _Resource_Node_extract( &thread->Resource_node ); + _Resource_Node_set_dependency( &thread->Resource_node, NULL ); + _Scheduler_Thread_change_help_state( thread, rival->initial_help_state ); + _Scheduler_Thread_change_resource_root( thread, thread ); + + _MRSP_Giant_release( &giant_lock_context ); + + rival->status = MRSP_TIMEOUT; + + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &lock_context ); + } else { + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &lock_context ); + } +} + +RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership( + MRSP_Control *mrsp, + Resource_Node *owner, + Thread_Control *executing, + Priority_Control initial_priority, + Priority_Control ceiling_priority, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +) +{ + MRSP_Status status; + MRSP_Rival rival; + bool initial_life_protection; + Per_CPU_Control *cpu_self; + ISR_lock_Context giant_lock_context; + + rival.thread = executing; + rival.resource = mrsp; + rival.initial_priority = initial_priority; + + _MRSP_Giant_acquire( &giant_lock_context ); + + rival.initial_help_state = + _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL ); + rival.status = MRSP_WAIT_FOR_OWNERSHIP; + + _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node ); + _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node ); + _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource ); + _Scheduler_Thread_change_resource_root( + executing, + THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) ) + ); + + _MRSP_Giant_release( &giant_lock_context ); + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + + _Thread_Raise_priority( executing, ceiling_priority ); + + if ( timeout > 0 ) { + _Watchdog_Initialize( + &executing->Timer, + _MRSP_Timeout, + 0, + &rival + ); + _Watchdog_Insert_ticks( &executing->Timer, timeout ); + } + + initial_life_protection = _Thread_Set_life_protection( true ); + _Thread_Dispatch_enable( cpu_self ); + + _Assert( _Debug_Is_thread_dispatching_allowed() ); + + /* Wait for state change */ + do { + status = rival.status; + } while ( status == MRSP_WAIT_FOR_OWNERSHIP ); + + _Thread_Set_life_protection( initial_life_protection ); + + if ( timeout > 0 ) { + _Watchdog_Remove_ticks( &executing->Timer ); + + if ( status == MRSP_TIMEOUT ) { + _MRSP_Restore_priority( executing, initial_priority ); + } + } + + return status; +} + +RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain( + MRSP_Control *mrsp, + Thread_Control *executing, + bool wait, + Watchdog_Interval timeout, + ISR_lock_Context *lock_context +) +{ + MRSP_Status status; + const Scheduler_Control *scheduler = _Scheduler_Get_own( executing ); + uint32_t scheduler_index = _Scheduler_Get_index( scheduler ); + Priority_Control initial_priority = executing->current_priority; + Priority_Control ceiling_priority = + _MRSP_Get_ceiling_priority( mrsp, scheduler_index ); + bool priority_ok = !_Thread_Priority_less_than( + ceiling_priority, + initial_priority + ); + Resource_Node *owner; + + if ( !priority_ok) { + _ISR_lock_ISR_enable( lock_context ); + return MRSP_INVALID_PRIORITY; + } + + _ISR_lock_Acquire( &mrsp->Lock, lock_context ); + owner = _Resource_Get_owner( &mrsp->Resource ); + if ( owner == NULL ) { + _MRSP_Claim_ownership( + mrsp, + executing, + initial_priority, + ceiling_priority, + lock_context + ); + status = MRSP_SUCCESSFUL; + } else if ( + wait + && _Resource_Node_get_root( owner ) != &executing->Resource_node + ) { + status = _MRSP_Wait_for_ownership( + mrsp, + owner, + executing, + initial_priority, + ceiling_priority, + timeout, + lock_context + ); + } else { + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + /* Not available, nested access or deadlock */ + status = MRSP_UNSATISFIED; + } + + return status; +} + +RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release( + MRSP_Control *mrsp, + Thread_Control *executing, + ISR_lock_Context *lock_context +) +{ + Priority_Control initial_priority; + Per_CPU_Control *cpu_self; + ISR_lock_Context giant_lock_context; + + if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) { + _ISR_lock_ISR_enable( lock_context ); + return MRSP_NOT_OWNER_OF_RESOURCE; + } + + if ( + !_Resource_Is_most_recently_obtained( + &mrsp->Resource, + &executing->Resource_node + ) + ) { + _ISR_lock_ISR_enable( lock_context ); + return MRSP_INCORRECT_STATE; + } + + initial_priority = mrsp->initial_priority_of_owner; + + _ISR_lock_Acquire( &mrsp->Lock, lock_context ); + + _MRSP_Giant_acquire( &giant_lock_context ); + + _Resource_Extract( &mrsp->Resource ); + + if ( _Chain_Is_empty( &mrsp->Rivals ) ) { + _Resource_Set_owner( &mrsp->Resource, NULL ); + } else { + MRSP_Rival *rival = (MRSP_Rival *) + _Chain_Get_first_unprotected( &mrsp->Rivals ); + Thread_Control *new_owner; + + /* + * This must be inside the critical section since the status prevents a + * potential double extraction in _MRSP_Timeout(). + */ + rival->status = MRSP_SUCCESSFUL; + + new_owner = rival->thread; + mrsp->initial_priority_of_owner = rival->initial_priority; + _Resource_Node_extract( &new_owner->Resource_node ); + _Resource_Node_set_dependency( &new_owner->Resource_node, NULL ); + _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource ); + _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node ); + _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER ); + _Scheduler_Thread_change_resource_root( new_owner, new_owner ); + } + + if ( !_Resource_Node_owns_resources( &executing->Resource_node ) ) { + _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF ); + } + + _MRSP_Giant_release( &giant_lock_context ); + + cpu_self = _Thread_Dispatch_disable_critical( lock_context ); + _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context ); + + _MRSP_Restore_priority( executing, initial_priority ); + + _Thread_Dispatch_enable( cpu_self ); + + return MRSP_SUCCESSFUL; +} + +RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Destroy( MRSP_Control *mrsp ) +{ + if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) { + return MRSP_RESOUCE_IN_USE; + } + + _ISR_lock_Destroy( &mrsp->Lock ); + _Workspace_Free( mrsp->ceiling_priorities ); + + return MRSP_SUCCESSFUL; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* RTEMS_SMP */ + +#endif /* _RTEMS_SCORE_MRSPIMPL_H */ diff --git a/include/rtems/score/nios2-count-zeros.h b/include/rtems/score/nios2-count-zeros.h new file mode 100644 index 0000000000..0ec259aa0c --- /dev/null +++ b/include/rtems/score/nios2-count-zeros.h @@ -0,0 +1,70 @@ +/* + * Author: Jeffrey O. Hill + * + * Copyright 2012. Los Alamos National Security, LLC. + * This material was produced under U.S. Government contract + * DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL), + * which is operated by Los Alamos National Security, LLC for + * the U.S. Department of Energy. The U.S. Government has rights + * to use, reproduce, and distribute this software. NEITHER THE + * GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY + * WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR + * THE USE OF THIS SOFTWARE. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _NIOS2_COUNT_ZEROS_H +#define _NIOS2_COUNT_ZEROS_H + +#include <stdint.h> + +#include <rtems/score/bitfield.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* + * This implementation is currently much more efficient than + * the GCC provided __builtin_clz + */ +static inline unsigned _Nios2_Count_leading_zeros( uint32_t p ) +{ + unsigned bitIdx; + + if ( p <= 0xffffu ) { + if ( p < 0x100u ) { + bitIdx = __log2table[ p ] + 24u; + } else { + bitIdx = __log2table[ p >> 8u ] + 16u; + } + } else { + p >>= 16u; + + if ( p < 0x100u ) { + bitIdx = __log2table[ p ] + 8u; + } else { + bitIdx = __log2table[ p >> 8u ]; + } + } + + return bitIdx; +} + +/* + * This implementation is currently much more efficient than + * the GCC provided __builtin_ctz + */ +static inline unsigned _Nios2_Count_trailing_zeros( uint32_t p ) +{ + return 31u - _Nios2_Count_leading_zeros( p & ( -p ) ); +} + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _NIOS2_COUNT_ZEROS_H */ diff --git a/include/rtems/score/nios2-utility.h b/include/rtems/score/nios2-utility.h new file mode 100644 index 0000000000..d5eb4b3597 --- /dev/null +++ b/include/rtems/score/nios2-utility.h @@ -0,0 +1,516 @@ +/** + * @file + * + * @brief NIOS II Utility + */ +/* + * Copyright (c) 2011 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Obere Lagerstr. 30 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_NIOS2_UTILITY_H +#define _RTEMS_SCORE_NIOS2_UTILITY_H + +#define NIOS2_CTLREG_INDEX_STATUS 0 +#define NIOS2_CTLREG_INDEX_ESTATUS 1 +#define NIOS2_CTLREG_INDEX_BSTATUS 2 +#define NIOS2_CTLREG_INDEX_IENABLE 3 +#define NIOS2_CTLREG_INDEX_IPENDING 4 +#define NIOS2_CTLREG_INDEX_CPUID 5 +#define NIOS2_CTLREG_INDEX_EXCEPTION 7 +#define NIOS2_CTLREG_INDEX_PTEADDR 8 +#define NIOS2_CTLREG_INDEX_TLBACC 9 +#define NIOS2_CTLREG_INDEX_TLBMISC 10 +#define NIOS2_CTLREG_INDEX_BADADDR 12 +#define NIOS2_CTLREG_INDEX_CONFIG 13 +#define NIOS2_CTLREG_INDEX_MPUBASE 14 +#define NIOS2_CTLREG_INDEX_MPUACC 15 + +#define NIOS2_CONTEXT_OFFSET_R16 0 +#define NIOS2_CONTEXT_OFFSET_R17 4 +#define NIOS2_CONTEXT_OFFSET_R18 8 +#define NIOS2_CONTEXT_OFFSET_R19 12 +#define NIOS2_CONTEXT_OFFSET_R20 16 +#define NIOS2_CONTEXT_OFFSET_R21 20 +#define NIOS2_CONTEXT_OFFSET_R22 24 +#define NIOS2_CONTEXT_OFFSET_R23 28 +#define NIOS2_CONTEXT_OFFSET_FP 32 +#define NIOS2_CONTEXT_OFFSET_STATUS 36 +#define NIOS2_CONTEXT_OFFSET_SP 40 +#define NIOS2_CONTEXT_OFFSET_RA 44 +#define NIOS2_CONTEXT_OFFSET_THREAD_DISPATCH_DISABLED 48 +#define NIOS2_CONTEXT_OFFSET_STACK_MPUBASE 52 +#define NIOS2_CONTEXT_OFFSET_STACK_MPUACC 56 + +#define NIOS2_ISR_STATUS_MASK_IIC 0xfffffffe +#define NIOS2_ISR_STATUS_BITS_IIC 0x00000000 + +#define NIOS2_ISR_STATUS_MASK_EIC_IL 0xfffffc0f +#define NIOS2_ISR_STATUS_BITS_EIC_IL 0x000003f0 + +#define NIOS2_ISR_STATUS_MASK_EIC_RSIE 0xf7ffffff +#define NIOS2_ISR_STATUS_BITS_EIC_RSIE 0x00000000 + +#define NIOS2_STATUS_RSIE (1 << 23) +#define NIOS2_STATUS_NMI (1 << 22) +#define NIOS2_STATUS_PRS_OFFSET 16 +#define NIOS2_STATUS_PRS_MASK (0x3f << NIOS2_STATUS_PRS_OFFSET) +#define NIOS2_STATUS_CRS_OFFSET 10 +#define NIOS2_STATUS_CRS_MASK (0x3f << NIOS2_STATUS_CRS_OFFSET) +#define NIOS2_STATUS_IL_OFFSET 4 +#define NIOS2_STATUS_IL_MASK (0x3f << NIOS2_STATUS_IL_OFFSET) +#define NIOS2_STATUS_IH (1 << 3) +#define NIOS2_STATUS_EH (1 << 2) +#define NIOS2_STATUS_U (1 << 1) +#define NIOS2_STATUS_PIE (1 << 0) + +#define NIOS2_EXCEPTION_CAUSE_OFFSET 2 +#define NIOS2_EXCEPTION_CAUSE_MASK (0x1f << NIOS2_EXCEPTION_CAUSE_OFFSET) + +#define NIOS2_PTEADDR_PTBASE_OFFSET 22 +#define NIOS2_PTEADDR_PTBASE_MASK (0x3ff << NIOS2_PTEADDR_PTBASE_OFFSET) +#define NIOS2_PTEADDR_VPN_OFFSET 2 +#define NIOS2_PTEADDR_VPN_MASK (0xfffff << NIOS2_PTEADDR_VPN_OFFSET) + +#define NIOS2_TLBACC_IG_OFFSET 25 +#define NIOS2_TLBACC_IG_MASK (0x3ff << NIOS2_TLBACC_IG_OFFSET) +#define NIOS2_TLBACC_C (1 << 24) +#define NIOS2_TLBACC_R (1 << 23) +#define NIOS2_TLBACC_W (1 << 22) +#define NIOS2_TLBACC_X (1 << 21) +#define NIOS2_TLBACC_G (1 << 20) +#define NIOS2_TLBACC_PFN_OFFSET 2 +#define NIOS2_TLBACC_PFN_MASK (0xfffff << NIOS2_TLBACC_PFN_OFFSET) + +#define NIOS2_TLBMISC_WAY_OFFSET 20 +#define NIOS2_TLBMISC_WAY_MASK (0xf << NIOS2_TLBMISC_WAY_OFFSET) +#define NIOS2_TLBMISC_RD (1 << 19) +#define NIOS2_TLBMISC_WE (1 << 18) +#define NIOS2_TLBMISC_PID_OFFSET 5 +#define NIOS2_TLBMISC_PID_MASK (0x3fff << NIOS2_TLBMISC_PID_OFFSET) +#define NIOS2_TLBMISC_DBL (1 << 3) +#define NIOS2_TLBMISC_BAD (1 << 2) +#define NIOS2_TLBMISC_PERM (1 << 1) +#define NIOS2_TLBMISC_D (1 << 0) + +#define NIOS2_CONFIG_ANI (1 << 1) +#define NIOS2_CONFIG_PE (1 << 0) + +#define NIOS2_MPUBASE_BASE_OFFSET 6 +#define NIOS2_MPUBASE_BASE_MASK (0x1ffffff << NIOS2_MPUBASE_BASE_OFFSET) +#define NIOS2_MPUBASE_INDEX_OFFSET 1 + +/* Avoid redefines with Altera HAL */ +#define NIOS2_MPUBASE_INDEX_MASK (0x0000003e) + +#define NIOS2_MPUBASE_D (1 << 0) + +#define NIOS2_MPUACC_MASK_OFFSET 6 + +/* Avoid redefines with Altera HAL */ +#define NIOS2_MPUACC_MASK_MASK (0x7fffffc0) + +#define NIOS2_MPUACC_LIMIT_OFFSET 6 + +/* Avoid redefines with Altera HAL */ +#define NIOS2_MPUACC_LIMIT_MASK (0xffffffc0) + +#define NIOS2_MPUACC_C (1 << 5) +#define NIOS2_MPUACC_PERM_OFFSET 2 + +/* Avoid redefines with Altera HAL */ +#define NIOS2_MPUACC_PERM_MASK (0x0000001c) + +#define NIOS2_MPUACC_RD (1 << 1) +#define NIOS2_MPUACC_WR (1 << 0) + +#ifndef ASM + +#include <stddef.h> +#include <stdint.h> +#include <stdbool.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Nios II specific thread dispatch disabled indicator. + * + * This global variable is used by the interrupt dispatch support for the + * external interrupt controller (EIC) with shadow registers. This makes it + * possible to do the thread dispatch after an interrupt without disabled + * interrupts and thus probably reduce the maximum interrupt latency. Its + * purpose is to prevent unbounded stack usage of the interrupted thread. + */ +extern uint32_t _Nios2_Thread_dispatch_disabled; + +/** + * @brief This global symbol specifies the status register mask used to disable + * interrupts. + * + * The board support package must provide a global symbol with this name to + * specify the status register mask used in _CPU_ISR_Disable(). + */ +extern char _Nios2_ISR_Status_mask []; + +/** + * @brief This symbol specifies the status register bits used to disable + * interrupts. + * + * The board support package must provide a global symbol with this name to + * specify the status register bits used in _CPU_ISR_Disable(). + */ +extern char _Nios2_ISR_Status_bits []; + +static inline void _Nios2_Flush_pipeline( void ) +{ + __asm__ volatile ("flushp"); +} + +static inline uint32_t _Nios2_Get_ctlreg_status( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_STATUS ); +} + +static inline void _Nios2_Set_ctlreg_status( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_STATUS, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_estatus( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_ESTATUS ); +} + +static inline void _Nios2_Set_ctlreg_estatus( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_ESTATUS, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_bstatus( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_BSTATUS ); +} + +static inline void _Nios2_Set_ctlreg_bstatus( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_BSTATUS, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_ienable( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_IENABLE ); +} + +static inline void _Nios2_Set_ctlreg_ienable( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_IENABLE, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_ipending( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_IPENDING ); +} + +static inline uint32_t _Nios2_Get_ctlreg_cpuid( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_CPUID ); +} + +static inline uint32_t _Nios2_Get_ctlreg_exception( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_EXCEPTION ); +} + +static inline uint32_t _Nios2_Get_ctlreg_pteaddr( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_PTEADDR ); +} + +static inline void _Nios2_Set_ctlreg_pteaddr( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_PTEADDR, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_tlbacc( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_TLBACC ); +} + +static inline void _Nios2_Set_ctlreg_tlbacc( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_TLBACC, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_tlbmisc( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_TLBMISC ); +} + +static inline void _Nios2_Set_ctlreg_tlbmisc( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_TLBMISC, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_badaddr( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_BADADDR ); +} + +static inline uint32_t _Nios2_Get_ctlreg_config( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_CONFIG ); +} + +static inline void _Nios2_Set_ctlreg_config( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_CONFIG, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_mpubase( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_MPUBASE ); +} + +static inline void _Nios2_Set_ctlreg_mpubase( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_MPUBASE, (int) value ); +} + +static inline uint32_t _Nios2_Get_ctlreg_mpuacc( void ) +{ + return (uint32_t) __builtin_rdctl( NIOS2_CTLREG_INDEX_MPUACC ); +} + +static inline void _Nios2_Set_ctlreg_mpuacc( uint32_t value ) +{ + __builtin_wrctl( NIOS2_CTLREG_INDEX_MPUACC, (int) value ); +} + +static inline uint32_t _Nios2_ISR_Get_status_mask( void ) +{ + return (uint32_t) &_Nios2_ISR_Status_mask [0]; +} + +static inline uint32_t _Nios2_ISR_Get_status_bits( void ) +{ + return (uint32_t) &_Nios2_ISR_Status_bits [0]; +} + +static inline bool _Nios2_Has_internal_interrupt_controller( void ) +{ + return _Nios2_ISR_Get_status_mask() == NIOS2_ISR_STATUS_MASK_IIC; +} + +uint32_t _Nios2_ISR_Set_level( uint32_t new_level, uint32_t status ); + +typedef struct { + int data_address_width; + int instruction_address_width; + int data_region_size_log2; + int instruction_region_size_log2; + int data_region_count; + int instruction_region_count; + int data_index_for_stack_protection; + bool region_uses_limit; + bool enable_data_cache_for_stack; +} Nios2_MPU_Configuration; + +void _Nios2_MPU_Set_configuration( const Nios2_MPU_Configuration *config ); + +const Nios2_MPU_Configuration *_Nios2_MPU_Get_configuration( void ); + +typedef enum { + NIOS2_MPU_INST_PERM_SVR_NONE_USER_NONE = 0, + NIOS2_MPU_INST_PERM_SVR_EXECUTE_USER_NONE, + NIOS2_MPU_INST_PERM_SVR_EXECUTE_USER_EXECUTE, + NIOS2_MPU_DATA_PERM_SVR_NONE_USER_NONE = 0, + NIOS2_MPU_DATA_PERM_SVR_READONLY_USER_NONE, + NIOS2_MPU_DATA_PERM_SVR_READONLY_USER_READONLY, + NIOS2_MPU_DATA_PERM_SVR_READWRITE_USER_NONE = 4, + NIOS2_MPU_DATA_PERM_SVR_READWRITE_USER_READONLY, + NIOS2_MPU_DATA_PERM_SVR_READWRITE_USER_READWRITE +} Nios2_MPU_Region_permissions; + +typedef struct { + int index; + const void *base; + const void *end; + Nios2_MPU_Region_permissions perm; + bool data; + bool cacheable; + bool read; + bool write; +} Nios2_MPU_Region_descriptor; + +#define NIOS2_MPU_REGION_DESC_INST( index, base, end ) \ + { \ + (index), (base), (end), NIOS2_MPU_INST_PERM_SVR_EXECUTE_USER_NONE, \ + false, false, false, true \ + } + +#define NIOS2_MPU_REGION_DESC_DATA_RO( index, base, end ) \ + { \ + (index), (base), (end), NIOS2_MPU_DATA_PERM_SVR_READONLY_USER_NONE, \ + true, true, false, true \ + } + +#define NIOS2_MPU_REGION_DESC_DATA_RW( index, base, end ) \ + { \ + (index), (base), (end), NIOS2_MPU_DATA_PERM_SVR_READWRITE_USER_NONE, \ + true, true, false, true \ + } + +#define NIOS2_MPU_REGION_DESC_DATA_IO( index, base, end ) \ + { \ + (index), (base), (end), NIOS2_MPU_DATA_PERM_SVR_READWRITE_USER_NONE, \ + true, false, false, true \ + } + +static inline int _Nios2_MPU_Get_region_count( + const Nios2_MPU_Configuration *config, + bool data +) +{ + return data ? + config->data_region_count + : config->instruction_region_count; +} + +static inline bool _Nios2_MPU_Is_valid_index( + const Nios2_MPU_Configuration *config, + int index, + bool data +) +{ + return 0 <= index + && index < _Nios2_MPU_Get_region_count( config, data ); +} + +bool _Nios2_MPU_Setup_region_registers( + const Nios2_MPU_Configuration *config, + const Nios2_MPU_Region_descriptor *desc, + uint32_t *mpubase, + uint32_t *mpuacc +); + +bool _Nios2_MPU_Get_region_descriptor( + const Nios2_MPU_Configuration *config, + int index, + bool data, + Nios2_MPU_Region_descriptor *desc +); + +/** + * @brief Searches the region table part for a disabled region. + * + * The table will be searched between indices @a begin and @a end. The @a end + * index is not part of the search range. If @a end is negative, then the + * region count will be used. Thus a @a begin of 0 and a @a end of -1 will + * specify the complete table. + * + * @retval -1 No disabled region is available. + * @retval other Index of disabled region. + */ +int _Nios2_MPU_Get_disabled_region_index( + const Nios2_MPU_Configuration *config, + bool data, + int begin, + int end +); + +/** + * @brief Adds a region according to region descriptor @a desc. + * + * If @a force is true, then an enabled region will be overwritten. + * + * @retval true Successful operation. + * @retval false Invalid region descriptor or region already in use. + */ +bool _Nios2_MPU_Add_region( + const Nios2_MPU_Configuration *config, + const Nios2_MPU_Region_descriptor *desc, + bool force +); + +static inline void _Nios2_MPU_Get_region_registers( + int index, + bool data, + uint32_t *mpubase, + uint32_t *mpuacc +) +{ + uint32_t base = (uint32_t) + (((index << NIOS2_MPUBASE_INDEX_OFFSET) & NIOS2_MPUBASE_INDEX_MASK) + | (data ? NIOS2_MPUBASE_D : 0)); + + _Nios2_Set_ctlreg_mpubase( base ); + _Nios2_Set_ctlreg_mpuacc( NIOS2_MPUACC_RD ); + _Nios2_Flush_pipeline(); + *mpubase = _Nios2_Get_ctlreg_mpubase() | base; + *mpuacc = _Nios2_Get_ctlreg_mpuacc(); +} + +static inline void _Nios2_MPU_Set_region_registers( + uint32_t mpubase, + uint32_t mpuacc +) +{ + _Nios2_Set_ctlreg_mpubase( mpubase ); + _Nios2_Set_ctlreg_mpuacc( mpuacc ); + _Nios2_Flush_pipeline(); +} + +static inline void _Nios2_MPU_Enable( void ) +{ + uint32_t config = _Nios2_Get_ctlreg_config(); + + _Nios2_Set_ctlreg_config( config | NIOS2_CONFIG_PE ); +} + +static inline uint32_t _Nios2_MPU_Disable( void ) +{ + uint32_t config = _Nios2_Get_ctlreg_config(); + uint32_t config_pe = NIOS2_CONFIG_PE; + + _Nios2_Set_ctlreg_config( config & ~config_pe ); + + return config; +} + +static inline void _Nios2_MPU_Restore( uint32_t config ) +{ + _Nios2_Set_ctlreg_config( config ); +} + +uint32_t _Nios2_MPU_Disable_protected( void ); + +void _Nios2_MPU_Reset( const Nios2_MPU_Configuration *config ); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#else /* ASM */ + + .macro NIOS2_ASM_DISABLE_INTERRUPTS new_status, current_status + movhi \new_status, %hiadj(_Nios2_ISR_Status_mask) + addi \new_status, \new_status, %lo(_Nios2_ISR_Status_mask) + and \new_status, \current_status, \new_status + ori \new_status, \new_status, %lo(_Nios2_ISR_Status_bits) + wrctl status, \new_status + .endm + +#endif /* ASM */ + +#endif /* _RTEMS_SCORE_NIOS2_UTILITY_H */ diff --git a/include/rtems/score/nios2.h b/include/rtems/score/nios2.h new file mode 100644 index 0000000000..26d76bcbfa --- /dev/null +++ b/include/rtems/score/nios2.h @@ -0,0 +1,65 @@ +/** + * @file + * + * @brief NIOS II Set up Basic CPU Dependency Settings Based on + * Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specific to the NIOS2 port. + */ + +/* + * COPYRIGHT (c) 1989-1999. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + */ + +#ifndef _RTEMS_SCORE_NIOS2_H +#define _RTEMS_SCORE_NIOS2_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the NIOS2 family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +/* + * Define the name of the CPU family and specific model. + */ + +#define CPU_NAME "NIOS2" +#define CPU_MODEL_NAME "nios2" + +/* + * See also nios2-rtems-gcc -print-multi-lib for all valid combinations of + * + * -mno-hw-mul + * -mhw-mulx + * -mstack-check + * -pg + * -EB + * -mcustom-fpu-cfg=60-1 + * -mcustom-fpu-cfg=60-2 + */ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_NIOS2_H */ diff --git a/include/rtems/score/object.h b/include/rtems/score/object.h new file mode 100644 index 0000000000..70e5fe630c --- /dev/null +++ b/include/rtems/score/object.h @@ -0,0 +1,441 @@ +/** + * @file rtems/score/object.h + * + * @brief Constants and Structures Associated with the Object Handler + * + * This include file contains all the constants and structures associated + * with the Object Handler. This Handler provides mechanisms which + * can be used to initialize and manipulate all objects which have ids. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_OBJECT_H +#define _RTEMS_SCORE_OBJECT_H + +#include <rtems/score/basedefs.h> +#include <rtems/score/cpu.h> +#include <rtems/score/chain.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup Score SuperCore + * + * @brief Provides services for all APIs. + */ +/**@{*/ + +#if defined(RTEMS_POSIX_API) + /** + * This macro is defined when an API is enabled that requires the + * use of strings for object names. Since the Classic API uses + * 32-bit unsigned integers and not strings, this allows us to + * disable this in the smallest RTEMS configuratinos. + */ + #define RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES +#endif + +/** + * @defgroup ScoreCPU CPU Architecture Support + * + * @ingroup Score + * + * @brief Provides CPU architecture dependent services. + */ +/**@{*/ + +/** + * @defgroup ScoreObject Object Handler + * + * @ingroup Score + */ +/**@{*/ + +/** + * The following type defines the control block used to manage + * object names. + */ +typedef union { + #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES) + /** This is a pointer to a string name. */ + const char *name_p; + #endif + /** This is the actual 32-bit "raw" integer name. */ + uint32_t name_u32; +} Objects_Name; + +#if defined(RTEMS_USE_16_BIT_OBJECT) +/** + * The following type defines the control block used to manage + * object IDs. The format is as follows (0=LSB): + * + * Bits 0 .. 7 = index (up to 254 objects of a type) + * Bits 8 .. 10 = API (up to 7 API classes) + * Bits 11 .. 15 = class (up to 31 object types per API) + */ +typedef uint16_t Objects_Id; + +/** + * This type is used to store the maximum number of allowed objects + * of each type. + */ +typedef uint8_t Objects_Maximum; + +#define OBJECTS_INDEX_START_BIT 0U +#define OBJECTS_API_START_BIT 8U +#define OBJECTS_CLASS_START_BIT 11U + +#define OBJECTS_INDEX_MASK (Objects_Id)0x00ffU +#define OBJECTS_API_MASK (Objects_Id)0x0700U +#define OBJECTS_CLASS_MASK (Objects_Id)0xF800U + +#define OBJECTS_INDEX_VALID_BITS (Objects_Id)0x00ffU +#define OBJECTS_API_VALID_BITS (Objects_Id)0x0007U +/* OBJECTS_NODE_VALID_BITS should not be used with 16 bit Ids */ +#define OBJECTS_CLASS_VALID_BITS (Objects_Id)0x001fU + +#define OBJECTS_UNLIMITED_OBJECTS 0x8000U + +#define OBJECTS_ID_INITIAL_INDEX (0) +#define OBJECTS_ID_FINAL_INDEX (0xff) + +#else +/** + * The following type defines the control block used to manage + * object IDs. The format is as follows (0=LSB): + * + * Bits 0 .. 15 = index (up to 65535 objects of a type) + * Bits 16 .. 23 = node (up to 255 nodes) + * Bits 24 .. 26 = API (up to 7 API classes) + * Bits 27 .. 31 = class (up to 31 object types per API) + */ +typedef uint32_t Objects_Id; + +/** + * This type is used to store the maximum number of allowed objects + * of each type. + */ +typedef uint16_t Objects_Maximum; + +/** + * This is the bit position of the starting bit of the index portion of + * the object Id. + */ +#define OBJECTS_INDEX_START_BIT 0U +/** + * This is the bit position of the starting bit of the node portion of + * the object Id. + */ +#define OBJECTS_NODE_START_BIT 16U + +/** + * This is the bit position of the starting bit of the API portion of + * the object Id. + */ +#define OBJECTS_API_START_BIT 24U + +/** + * This is the bit position of the starting bit of the class portion of + * the object Id. + */ +#define OBJECTS_CLASS_START_BIT 27U + +/** + * This mask is used to extract the index portion of an object Id. + */ +#define OBJECTS_INDEX_MASK (Objects_Id)0x0000ffffU + +/** + * This mask is used to extract the node portion of an object Id. + */ +#define OBJECTS_NODE_MASK (Objects_Id)0x00ff0000U + +/** + * This mask is used to extract the API portion of an object Id. + */ +#define OBJECTS_API_MASK (Objects_Id)0x07000000U + +/** + * This mask is used to extract the class portion of an object Id. + */ +#define OBJECTS_CLASS_MASK (Objects_Id)0xf8000000U + +/** + * This mask represents the bits that is used to ensure no extra bits + * are set after shifting to extract the index portion of an object Id. + */ +#define OBJECTS_INDEX_VALID_BITS (Objects_Id)0x0000ffffU + +/** + * This mask represents the bits that is used to ensure no extra bits + * are set after shifting to extract the node portion of an object Id. + */ +#define OBJECTS_NODE_VALID_BITS (Objects_Id)0x000000ffU + +/** + * This mask represents the bits that is used to ensure no extra bits + * are set after shifting to extract the API portion of an object Id. + */ +#define OBJECTS_API_VALID_BITS (Objects_Id)0x00000007U + +/** + * This mask represents the bits that is used to ensure no extra bits + * are set after shifting to extract the class portion of an object Id. + */ +#define OBJECTS_CLASS_VALID_BITS (Objects_Id)0x0000001fU + +/** + * Mask to enable unlimited objects. This is used in the configuration + * table when specifying the number of configured objects. + */ +#define OBJECTS_UNLIMITED_OBJECTS 0x80000000U + +/** + * This is the lowest value for the index portion of an object Id. + */ +#define OBJECTS_ID_INITIAL_INDEX (0) + +/** + * This is the highest value for the index portion of an object Id. + */ +#define OBJECTS_ID_FINAL_INDEX (0xffffU) +#endif + +/** + * This enumerated type is used in the class field of the object ID. + */ +typedef enum { + OBJECTS_NO_API = 0, + OBJECTS_INTERNAL_API = 1, + OBJECTS_CLASSIC_API = 2, + OBJECTS_POSIX_API = 3, + OBJECTS_FAKE_OBJECTS_API = 7 +} Objects_APIs; + +/** This macro is used to generically specify the last API index. */ +#define OBJECTS_APIS_LAST OBJECTS_POSIX_API + +/** + * The following defines the Object Control Block used to manage + * each object local to this node. + */ +typedef struct { + /** This is the chain node portion of an object. */ + Chain_Node Node; + /** This is the object's ID. */ + Objects_Id id; + /** This is the object's name. */ + Objects_Name name; +} Objects_Control; + +#if defined( RTEMS_MULTIPROCESSING ) +/** + * This defines the Global Object Control Block used to manage + * objects resident on other nodes. It is derived from Object. + */ +typedef struct { + /** This is an object control structure. */ + Objects_Control Object; + /** This is the name of the object. Using an unsigned thirty two + * bit value is broken but works. If any API is MP with variable + * length names .. BOOM!!!! + */ + uint32_t name; +} Objects_MP_Control; +#endif + +/** + * No object can have this ID. + */ +#define OBJECTS_ID_NONE 0 + +/** + * The following defines the constant which may be used + * with _Objects_Get to manipulate the calling task. + */ +#define OBJECTS_ID_OF_SELF ((Objects_Id) 0) + +/** + * The following constant is used to specify that a name to ID search + * should search through all nodes. + */ +#define OBJECTS_SEARCH_ALL_NODES 0 + +/** + * The following constant is used to specify that a name to ID search + * should search through all nodes except the current node. + */ +#define OBJECTS_SEARCH_OTHER_NODES 0x7FFFFFFE + +/** + * The following constant is used to specify that a name to ID search + * should search only on this node. + */ +#define OBJECTS_SEARCH_LOCAL_NODE 0x7FFFFFFF + +/** + * The following constant is used to specify that a name to ID search + * is being asked for the ID of the currently executing task. + */ +#define OBJECTS_WHO_AM_I 0 + +/** + * This macros calculates the lowest ID for the specified api, class, + * and node. + */ +#define OBJECTS_ID_INITIAL(_api, _class, _node) \ + _Objects_Build_id( (_api), (_class), (_node), OBJECTS_ID_INITIAL_INDEX ) + +/** + * This macro specifies the highest object ID value + */ +#define OBJECTS_ID_FINAL ((Objects_Id)~0) + +/** + * This macro is used to build a thirty-two bit style name from + * four characters. The most significant byte will be the + * character @a _C1. + * + * @param[in] _C1 is the first character of the name + * @param[in] _C2 is the second character of the name + * @param[in] _C3 is the third character of the name + * @param[in] _C4 is the fourth character of the name + */ +#define _Objects_Build_name( _C1, _C2, _C3, _C4 ) \ + ( (uint32_t)(_C1) << 24 | \ + (uint32_t)(_C2) << 16 | \ + (uint32_t)(_C3) << 8 | \ + (uint32_t)(_C4) ) + +/** + * This function returns the API portion of the ID. + * + * @param[in] id is the object Id to be processed. + * + * @return This method returns an object Id constructed from the arguments. + */ +RTEMS_INLINE_ROUTINE Objects_APIs _Objects_Get_API( + Objects_Id id +) +{ + return (Objects_APIs) ((id >> OBJECTS_API_START_BIT) & OBJECTS_API_VALID_BITS); +} + +/** + * This function returns the class portion of the ID. + * + * @param[in] id is the object Id to be processed + */ +RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_class( + Objects_Id id +) +{ + return (uint32_t) + ((id >> OBJECTS_CLASS_START_BIT) & OBJECTS_CLASS_VALID_BITS); +} + +/** + * This function returns the node portion of the ID. + * + * @param[in] id is the object Id to be processed + * + * @return This method returns the node portion of an object ID. + */ +RTEMS_INLINE_ROUTINE uint32_t _Objects_Get_node( + Objects_Id id +) +{ + /* + * If using 16-bit Ids, then there is no node field and it MUST + * be a single processor system. + */ + #if defined(RTEMS_USE_16_BIT_OBJECT) + return 1; + #else + return (id >> OBJECTS_NODE_START_BIT) & OBJECTS_NODE_VALID_BITS; + #endif +} + +/** + * This function returns the index portion of the ID. + * + * @param[in] id is the Id to be processed + * + * @return This method returns the class portion of the specified object ID. + */ +RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Get_index( + Objects_Id id +) +{ + return + (Objects_Maximum)((id >> OBJECTS_INDEX_START_BIT) & + OBJECTS_INDEX_VALID_BITS); +} + +/** + * This function builds an object's id from the processor node and index + * values specified. + * + * @param[in] the_api indicates the API associated with this Id. + * @param[in] the_class indicates the class of object. + * It is specific to @a the_api. + * @param[in] node is the node where this object resides. + * @param[in] index is the instance number of this object. + * + * @return This method returns an object Id constructed from the arguments. + */ +RTEMS_INLINE_ROUTINE Objects_Id _Objects_Build_id( + Objects_APIs the_api, + uint16_t the_class, + uint8_t node, + uint16_t index +) +{ + return (( (Objects_Id) the_api ) << OBJECTS_API_START_BIT) | + (( (Objects_Id) the_class ) << OBJECTS_CLASS_START_BIT) | + #if !defined(RTEMS_USE_16_BIT_OBJECT) + (( (Objects_Id) node ) << OBJECTS_NODE_START_BIT) | + #endif + (( (Objects_Id) index ) << OBJECTS_INDEX_START_BIT); +} + +/** + * Returns if the object maximum specifies unlimited objects. + * + * @param[in] maximum The object maximum specification. + * + * @retval true Unlimited objects are available. + * @retval false The object count is fixed. + */ +RTEMS_INLINE_ROUTINE bool _Objects_Is_unlimited( uint32_t maximum ) +{ + return (maximum & OBJECTS_UNLIMITED_OBJECTS) != 0; +} + +/* + * We cannot use an inline function for this since it may be evaluated at + * compile time. + */ +#define _Objects_Maximum_per_allocation( maximum ) \ + ((Objects_Maximum) ((maximum) & ~OBJECTS_UNLIMITED_OBJECTS)) + +/**@}*/ +/**@}*/ +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/objectimpl.h b/include/rtems/score/objectimpl.h new file mode 100644 index 0000000000..80c50a5b56 --- /dev/null +++ b/include/rtems/score/objectimpl.h @@ -0,0 +1,1043 @@ +/** + * @file + * + * @brief Inlined Routines in the Object Handler + * + * This include file contains the static inline implementation of all + * of the inlined routines in the Object Handler. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_OBJECTIMPL_H +#define _RTEMS_SCORE_OBJECTIMPL_H + +#include <rtems/score/object.h> +#include <rtems/score/apimutex.h> +#include <rtems/score/isrlock.h> +#include <rtems/score/threaddispatch.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreObject + * + * @{ + */ + +/** + * Functions which compare names are prototyped like this. + */ +typedef bool (*Objects_Name_comparators)( + void * /* name_1 */, + void * /* name_2 */, + uint16_t /* length */ +); + +/** + * This enumerated type is used in the class field of the object ID + * for RTEMS internal object classes. + */ +typedef enum { + OBJECTS_INTERNAL_NO_CLASS = 0, + OBJECTS_INTERNAL_THREADS = 1, + OBJECTS_INTERNAL_MUTEXES = 2 +} Objects_Internal_API; + +/** This macro is used to generically specify the last API index. */ +#define OBJECTS_INTERNAL_CLASSES_LAST OBJECTS_INTERNAL_MUTEXES + +/** + * This enumerated type is used in the class field of the object ID + * for the RTEMS Classic API. + */ +typedef enum { + OBJECTS_CLASSIC_NO_CLASS = 0, + OBJECTS_RTEMS_TASKS = 1, + OBJECTS_RTEMS_TIMERS = 2, + OBJECTS_RTEMS_SEMAPHORES = 3, + OBJECTS_RTEMS_MESSAGE_QUEUES = 4, + OBJECTS_RTEMS_PARTITIONS = 5, + OBJECTS_RTEMS_REGIONS = 6, + OBJECTS_RTEMS_PORTS = 7, + OBJECTS_RTEMS_PERIODS = 8, + OBJECTS_RTEMS_EXTENSIONS = 9, + OBJECTS_RTEMS_BARRIERS = 10 +} Objects_Classic_API; + +/** This macro is used to generically specify the last API index. */ +#define OBJECTS_RTEMS_CLASSES_LAST OBJECTS_RTEMS_BARRIERS + +/** + * This enumerated type is used in the class field of the object ID + * for the POSIX API. + */ +typedef enum { + OBJECTS_POSIX_NO_CLASS = 0, + OBJECTS_POSIX_THREADS = 1, + OBJECTS_POSIX_KEYS = 2, + OBJECTS_POSIX_INTERRUPTS = 3, + OBJECTS_POSIX_MESSAGE_QUEUE_FDS = 4, + OBJECTS_POSIX_MESSAGE_QUEUES = 5, + OBJECTS_POSIX_MUTEXES = 6, + OBJECTS_POSIX_SEMAPHORES = 7, + OBJECTS_POSIX_CONDITION_VARIABLES = 8, + OBJECTS_POSIX_TIMERS = 9, + OBJECTS_POSIX_BARRIERS = 10, + OBJECTS_POSIX_SPINLOCKS = 11, + OBJECTS_POSIX_RWLOCKS = 12 +} Objects_POSIX_API; + +/** This macro is used to generically specify the last API index. */ +#define OBJECTS_POSIX_CLASSES_LAST OBJECTS_POSIX_RWLOCKS + +/* + * For fake objects, which have an object identifier, but no objects + * information block. + */ +typedef enum { + OBJECTS_FAKE_OBJECTS_NO_CLASS = 0, + OBJECTS_FAKE_OBJECTS_SCHEDULERS = 1 +} Objects_Fake_objects_API; + +/** + * This enumerated type lists the locations which may be returned + * by _Objects_Get. These codes indicate the success of locating + * an object with the specified ID. + */ +typedef enum { +#if defined(RTEMS_MULTIPROCESSING) + OBJECTS_REMOTE = 2, /* object is remote */ +#endif + OBJECTS_LOCAL = 0, /* object is local */ + OBJECTS_ERROR = 1 /* id was invalid */ +} Objects_Locations; + +/** + * The following type defines the callout used when a local task + * is extracted from a remote thread queue (i.e. it's proxy must + * extracted from the remote queue). + */ +typedef void ( *Objects_Thread_queue_Extract_callout )( void * ); + +/** + * The following defines the structure for the information used to + * manage each class of objects. + */ +typedef struct { + /** This field indicates the API of this object class. */ + Objects_APIs the_api; + /** This is the class of this object set. */ + uint16_t the_class; + /** This is the minimum valid id of this object class. */ + Objects_Id minimum_id; + /** This is the maximum valid id of this object class. */ + Objects_Id maximum_id; + /** This is the maximum number of objects in this class. */ + Objects_Maximum maximum; + /** This is the true if unlimited objects in this class. */ + bool auto_extend; + /** This is the number of objects in a block. */ + Objects_Maximum allocation_size; + /** This is the size in bytes of each object instance. */ + size_t size; + /** This points to the table of local objects. */ + Objects_Control **local_table; + /** This is the chain of inactive control blocks. */ + Chain_Control Inactive; + /** This is the number of objects on the Inactive list. */ + Objects_Maximum inactive; + /** This is the number of inactive objects per block. */ + uint32_t *inactive_per_block; + /** This is a table to the chain of inactive object memory blocks. */ + void **object_blocks; + #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES) + /** This is true if names are strings. */ + bool is_string; + #endif + /** This is the maximum length of names. */ + uint16_t name_length; + #if defined(RTEMS_MULTIPROCESSING) + /** This is this object class' method called when extracting a thread. */ + Objects_Thread_queue_Extract_callout extract; + /** This is this object class' pointer to the global name table */ + Chain_Control *global_table; + #endif +} Objects_Information; + +/** + * The following is referenced to the node number of the local node. + */ +#if defined(RTEMS_MULTIPROCESSING) +SCORE_EXTERN uint16_t _Objects_Local_node; +#else +#define _Objects_Local_node ((uint16_t)1) +#endif + +/** + * The following is referenced to the number of nodes in the system. + */ +#if defined(RTEMS_MULTIPROCESSING) +SCORE_EXTERN uint16_t _Objects_Maximum_nodes; +#else +#define _Objects_Maximum_nodes 1 +#endif + +/** + * The following is the list of information blocks per API for each object + * class. From the ID, we can go to one of these information blocks, + * and obtain a pointer to the appropriate object control block. + */ +SCORE_EXTERN Objects_Information + **_Objects_Information_table[OBJECTS_APIS_LAST + 1]; + +/** + * This function extends an object class information record. + * + * @param[in] information points to an object class information block. + */ +void _Objects_Extend_information( + Objects_Information *information +); + +/** + * @brief Shrink an object class information record + * + * This function shrink an object class information record. + * The object's name and object space are released. The local_table + * etc block does not shrink. The InActive list needs to be scanned + * to find the objects are remove them. + * + * @param[in] information points to an object class information block. + */ +void _Objects_Shrink_information( + Objects_Information *information +); + +/** + * @brief Initialize object Information + * + * This function initializes an object class information record. + * SUPPORTS_GLOBAL is true if the object class supports global + * objects, and false otherwise. Maximum indicates the number + * of objects required in this class and size indicates the size + * in bytes of each control block for this object class. The + * name length and string designator are also set. In addition, + * the class may be a task, therefore this information is also included. + * + * @param[in] information points to an object class information block. + * @param[in] the_api indicates the API associated with this information block. + * @param[in] the_class indicates the class of object being managed + * by this information block. It is specific to @a the_api. + * @param[in] maximum is the maximum number of instances of this object + * class which may be concurrently active. + * @param[in] size is the size of the data structure for this class. + * @param[in] is_string is true if this object uses string style names. + * @param[in] maximum_name_length is the maximum length of object names. + */ +void _Objects_Initialize_information ( + Objects_Information *information, + Objects_APIs the_api, + uint16_t the_class, + uint32_t maximum, + uint16_t size, + bool is_string, + uint32_t maximum_name_length +#if defined(RTEMS_MULTIPROCESSING) + , + bool supports_global, + Objects_Thread_queue_Extract_callout extract +#endif +); + +/** + * @brief Object API Maximum Class + * + * This function returns the highest numeric value of a valid + * API for the specified @a api. + * + * @param[in] api is the API of interest + * + * @retval A positive integer on success and 0 otherwise. + */ +unsigned int _Objects_API_maximum_class( + uint32_t api +); + +/** + * @brief Allocates an object without locking the allocator mutex. + * + * This function can be called in two contexts + * - the executing thread is the owner of the object allocator mutex, or + * - in case the system state is not up, e.g. during sequential system + * initialization. + * + * @param[in] information The object information block. + * + * @retval NULL No object available. + * @retval object The allocated object. + * + * @see _Objects_Allocate() and _Objects_Free(). + */ +Objects_Control *_Objects_Allocate_unprotected( + Objects_Information *information +); + +/** + * @brief Allocates an object. + * + * This function locks the object allocator mutex via + * _Objects_Allocator_lock(). The caller must later unlock the object + * allocator mutex via _Objects_Allocator_unlock(). The caller must unlock the + * mutex in any case, even if the allocation failed due to resource shortage. + * + * A typical object allocation code looks like this: + * @code + * rtems_status_code some_create( rtems_id *id ) + * { + * rtems_status_code sc; + * Some_Control *some; + * + * // The object allocator mutex protects the executing thread from + * // asynchronous thread restart and deletion. + * some = (Some_Control *) _Objects_Allocate( &_Some_Information ); + * + * if ( some != NULL ) { + * _Some_Initialize( some ); + * sc = RTEMS_SUCCESSFUL; + * } else { + * sc = RTEMS_TOO_MANY; + * } + * + * _Objects_Allocator_unlock(); + * + * return sc; + * } + * @endcode + * + * @param[in] information The object information block. + * + * @retval NULL No object available. + * @retval object The allocated object. + * + * @see _Objects_Free(). + */ +Objects_Control *_Objects_Allocate( Objects_Information *information ); + +/** + * @brief Frees an object. + * + * Appends the object to the chain of inactive objects. + * + * @param[in] information The object information block. + * @param[in] the_object The object to free. + * + * @see _Objects_Allocate(). + * + * A typical object deletion code looks like this: + * @code + * rtems_status_code some_delete( rtems_id id ) + * { + * rtems_status_code sc; + * Some_Control *some; + * Objects_Locations location; + * + * // The object allocator mutex protects the executing thread from + * // asynchronous thread restart and deletion. + * _Objects_Allocator_lock(); + * + * // This will disable thread dispatching, so this starts a thread dispatch + * // critical section. + * some = (Semaphore_Control *) + * _Objects_Get( &_Some_Information, id, &location ); + * + * switch ( location ) { + * case OBJECTS_LOCAL: + * // After the object close an object get with this identifier will + * // fail. + * _Objects_Close( &_Some_Information, &some->Object ); + * + * _Some_Delete( some ); + * + * // This enables thread dispatching, so the thread dispatch critical + * // section ends here. + * _Objects_Put( &some->Object ); + * + * // Thread dispatching is enabled. The object free is only protected + * // by the object allocator mutex. + * _Objects_Free( &_Some_Information, &some->Object ); + * + * sc = RTEMS_SUCCESSFUL; + * break; + * default: + * sc = RTEMS_INVALID_ID; + * break; + * } + * + * _Objects_Allocator_unlock(); + * + * return sc; + * } + * @endcode + */ +void _Objects_Free( + Objects_Information *information, + Objects_Control *the_object +); + +/** + * This function implements the common portion of the object + * identification directives. This directive returns the object + * id associated with name. If more than one object of this class + * is named name, then the object to which the id belongs is + * arbitrary. Node indicates the extent of the search for the + * id of the object named name. If the object class supports global + * objects, then the search can be limited to a particular node + * or allowed to encompass all nodes. + */ +typedef enum { + OBJECTS_NAME_OR_ID_LOOKUP_SUCCESSFUL, + OBJECTS_INVALID_NAME, + OBJECTS_INVALID_ADDRESS, + OBJECTS_INVALID_ID, + OBJECTS_INVALID_NODE +} Objects_Name_or_id_lookup_errors; + +/** + * This macro defines the first entry in the + * @ref Objects_Name_or_id_lookup_errors enumerated list. + */ +#define OBJECTS_NAME_ERRORS_FIRST OBJECTS_NAME_OR_ID_LOOKUP_SUCCESSFUL + +/** + * This macro defines the last entry in the + * @ref Objects_Name_or_id_lookup_errors enumerated list. + */ +#define OBJECTS_NAME_ERRORS_LAST OBJECTS_INVALID_NODE + +/** + * @brief Converts an object name to an Id. + * + * This method converts an object name to an Id. It performs a look up + * using the object information block for this object class. + * + * @param[in] information points to an object class information block. + * @param[in] name is the name of the object to find. + * @param[in] node is the set of nodes to search. + * @param[in] id will contain the Id if the search is successful. + * + * @retval This method returns one of the values from the + * @ref Objects_Name_or_id_lookup_errors enumeration to indicate + * successful or failure. On success @a id will contain the Id of + * the requested object. + */ +Objects_Name_or_id_lookup_errors _Objects_Name_to_id_u32( + Objects_Information *information, + uint32_t name, + uint32_t node, + Objects_Id *id +); + +#if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES) +/** + * @brief Converts an object name to an Id. + * + * This method converts an object name to an Id. It performs a look up + * using the object information block for this object class. + * + * @param[in] information points to an object class information block. + * @param[in] name is the name of the object to find. + * @param[in] id will contain the Id if the search is successful. + * + * @retval This method returns one of the values from the + * @ref Objects_Name_or_id_lookup_errors enumeration to indicate + * successful or failure. On success @a id will contain the Id of + * the requested object. + */ +Objects_Name_or_id_lookup_errors _Objects_Name_to_id_string( + Objects_Information *information, + const char *name, + Objects_Id *id +); +#endif + +/** + * @brief Implements the common portion of the object Id to name directives. + * + * This function implements the common portion of the object Id + * to name directives. This function returns the name + * associated with object id. + * + * @param[in] id is the Id of the object whose name we are locating. + * @param[in] name will contain the name of the object, if found. + * + * @retval This method returns one of the values from the + * @ref Objects_Name_or_id_lookup_errors enumeration to indicate + * successful or failure. On success @a name will contain the name of + * the requested object. + * + * @note This function currently does not support string names. + */ +Objects_Name_or_id_lookup_errors _Objects_Id_to_name ( + Objects_Id id, + Objects_Name *name +); + +/** + * @brief Maps object ids to object control blocks. + * + * This function maps object ids to object control blocks. + * If id corresponds to a local object, then it returns + * the_object control pointer which maps to id and location + * is set to OBJECTS_LOCAL. If the object class supports global + * objects and the object id is global and resides on a remote + * node, then location is set to OBJECTS_REMOTE, and the_object + * is undefined. Otherwise, location is set to OBJECTS_ERROR + * and the_object is undefined. + * + * @param[in] information points to an object class information block. + * @param[in] id is the Id of the object whose name we are locating. + * @param[in] location will contain an indication of success or failure. + * + * @retval This method returns one of the values from the + * @ref Objects_Name_or_id_lookup_errors enumeration to indicate + * successful or failure. On success @a id will contain the Id of + * the requested object. + * + * @note _Objects_Get returns with dispatching disabled for + * local and remote objects. _Objects_Get_isr_disable returns with + * dispatching disabled for remote objects and interrupts for local + * objects. + */ +Objects_Control *_Objects_Get ( + Objects_Information *information, + Objects_Id id, + Objects_Locations *location +); + +/** + * @brief Maps object ids to object control blocks. + * + * This function maps object ids to object control blocks. + * If id corresponds to a local object, then it returns + * the_object control pointer which maps to id and location + * is set to OBJECTS_LOCAL. If the object class supports global + * objects and the object id is global and resides on a remote + * node, then location is set to OBJECTS_REMOTE, and the_object + * is undefined. Otherwise, location is set to OBJECTS_ERROR + * and the_object is undefined. + * + * @param[in] information points to an object class information block. + * @param[in] id is the Id of the object whose name we are locating. + * @param[in] location will contain an indication of success or failure. + * @param[in] lock_context is the previous interrupt state being turned. + * + * @retval This method returns one of the values from the + * @ref Objects_Name_or_id_lookup_errors enumeration to indicate + * successful or failure. On success @a name will contain the name of + * the requested object. + * + * @note _Objects_Get returns with dispatching disabled for + * local and remote objects. _Objects_Get_isr_disable returns with + * dispatchng disabled for remote objects and interrupts for local + * objects. + */ +Objects_Control *_Objects_Get_isr_disable( + Objects_Information *information, + Objects_Id id, + Objects_Locations *location, + ISR_lock_Context *lock_context +); + +/** + * @brief Maps object ids to object control blocks. + * + * This function maps object ids to object control blocks. + * If id corresponds to a local object, then it returns + * the_object control pointer which maps to id and location + * is set to OBJECTS_LOCAL. If the object class supports global + * objects and the object id is global and resides on a remote + * node, then location is set to OBJECTS_REMOTE, and the_object + * is undefined. Otherwise, location is set to OBJECTS_ERROR + * and the_object is undefined. + * + * @param[in] information points to an object class information block. + * @param[in] id is the Id of the object whose name we are locating. + * @param[in] location will contain an indication of success or failure. + * + * @retval This method returns one of the values from the + * @ref Objects_Name_or_id_lookup_errors enumeration to indicate + * successful or failure. On success @a id will contain the Id of + * the requested object. + * + * @note _Objects_Get returns with dispatching disabled for + * local and remote objects. _Objects_Get_isr_disable returns with + * dispatching disabled for remote objects and interrupts for local + * objects. + */ +Objects_Control *_Objects_Get_no_protection( + Objects_Information *information, + Objects_Id id, + Objects_Locations *location +); + +/** + * Like @ref _Objects_Get, but is used to find "next" open object. + * + * @param[in] information points to an object class information block. + * @param[in] id is the Id of the object whose name we are locating. + * @param[in] location_p will contain an indication of success or failure. + * @param[in] next_id_p is the Id of the next object we will look at. + * + * @retval This method returns the pointer to the object located or + * NULL on error. + */ +Objects_Control *_Objects_Get_next( + Objects_Information *information, + Objects_Id id, + Objects_Locations *location_p, + Objects_Id *next_id_p +); + +/** + * @brief Get object information. + * + * This function return the information structure given + * an the API and Class. This can be done independent of + * the existence of any objects created by the API. + * + * @param[in] the_api indicates the API for the information we want + * @param[in] the_class indicates the Class for the information we want + * + * @retval This method returns a pointer to the Object Information Table + * for the class of objects which corresponds to this object ID. + */ +Objects_Information *_Objects_Get_information( + Objects_APIs the_api, + uint16_t the_class +); + +/** + * @brief Get information of an object from an ID. + * + * This function return the information structure given + * an @a id of an object. + * + * @param[in] id is the object ID to get the information from + * + * @retval This method returns a pointer to the Object Information Table + * for the class of objects which corresponds to this object ID. + */ +Objects_Information *_Objects_Get_information_id( + Objects_Id id +); + +/** + * @brief Gets object name in the form of a C string. + * + * This method objects the name of an object and returns its name + * in the form of a C string. It attempts to be careful about + * overflowing the user's string and about returning unprintable characters. + * + * @param[in] id is the object to obtain the name of + * @param[in] length indicates the length of the caller's buffer + * @param[in] name points a string which will be filled in. + * + * @retval This method returns @a name or NULL on error. @a *name will + * contain the name if successful. + */ +char *_Objects_Get_name_as_string( + Objects_Id id, + size_t length, + char *name +); + +/** + * @brief Set objects name. + * + * This method sets the object name to either a copy of a string + * or up to the first four characters of the string based upon + * whether this object class uses strings for names. + * + * @param[in] information points to the object information structure + * @param[in] the_object is the object to operate upon + * @param[in] name is a pointer to the name to use + * + * @retval If successful, true is returned. Otherwise false is returned. + */ +bool _Objects_Set_name( + Objects_Information *information, + Objects_Control *the_object, + const char *name +); + +/** + * @brief Removes object from namespace. + * + * This function removes @a the_object from the namespace. + * + * @param[in] information points to an Object Information Table. + * @param[in] the_object is a pointer to an object. + */ +void _Objects_Namespace_remove( + Objects_Information *information, + Objects_Control *the_object +); + +/** + * @brief Close object. + * + * This function removes the_object control pointer and object name + * in the Local Pointer and Local Name Tables. + * + * @param[in] information points to an Object Information Table + * @param[in] the_object is a pointer to an object + */ +void _Objects_Close( + Objects_Information *information, + Objects_Control *the_object +); + +/** + * @brief Returns the count of active objects. + * + * @param[in] information The object information table. + * + * @retval The count of active objects. + */ +Objects_Maximum _Objects_Active_count( + const Objects_Information *information +); + +RTEMS_INLINE_ROUTINE Objects_Maximum _Objects_Extend_size( + const Objects_Information *information +) +{ + return information->auto_extend ? information->allocation_size : 0; +} + +/** + * This function returns true if the api is valid. + * + * @param[in] the_api is the api portion of an object ID. + * + * @return This method returns true if the specified api value is valid + * and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Objects_Is_api_valid( + uint32_t the_api +) +{ + if ( !the_api || the_api > OBJECTS_APIS_LAST ) + return false; + return true; +} + +/** + * This function returns true if the node is of the local object, and + * false otherwise. + * + * @param[in] node is the node number and corresponds to the node number + * portion of an object ID. + * + * @return This method returns true if the specified node is the local node + * and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Objects_Is_local_node( + uint32_t node +) +{ + return ( node == _Objects_Local_node ); +} + +/** + * This function returns true if the id is of a local object, and + * false otherwise. + * + * @param[in] id is an object ID + * + * @return This method returns true if the specified object Id is local + * and false otherwise. + * + * @note On a single processor configuration, this always returns true. + */ +RTEMS_INLINE_ROUTINE bool _Objects_Is_local_id( +#if defined(RTEMS_MULTIPROCESSING) + Objects_Id id +#else + Objects_Id id RTEMS_UNUSED +#endif +) +{ +#if defined(RTEMS_MULTIPROCESSING) + return _Objects_Is_local_node( _Objects_Get_node(id) ); +#else + return true; +#endif +} + +/** + * This function returns true if left and right are equal, + * and false otherwise. + * + * @param[in] left is the Id on the left hand side of the comparison + * @param[in] right is the Id on the right hand side of the comparison + * + * @return This method returns true if the specified object IDs are equal + * and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Objects_Are_ids_equal( + Objects_Id left, + Objects_Id right +) +{ + return ( left == right ); +} + +/** + * This function returns a pointer to the local_table object + * referenced by the index. + * + * @param[in] information points to an Object Information Table + * @param[in] index is the index of the object the caller wants to access + * + * @return This method returns a pointer to a local object or NULL if the + * index is invalid and RTEMS_DEBUG is enabled. + */ +RTEMS_INLINE_ROUTINE Objects_Control *_Objects_Get_local_object( + Objects_Information *information, + uint16_t index +) +{ + /* + * This routine is ONLY to be called from places in the code + * where the Id is known to be good. Therefore, this should NOT + * occur in normal situations. + */ + #if defined(RTEMS_DEBUG) + if ( index > information->maximum ) + return NULL; + #endif + return information->local_table[ index ]; +} + +/** + * This function sets the pointer to the local_table object + * referenced by the index. + * + * @param[in] information points to an Object Information Table + * @param[in] index is the index of the object the caller wants to access + * @param[in] the_object is the local object pointer + * + * @note This routine is ONLY to be called in places where the + * index portion of the Id is known to be good. This is + * OK since it is normally called from object create/init + * or delete/destroy operations. + */ + +RTEMS_INLINE_ROUTINE void _Objects_Set_local_object( + Objects_Information *information, + uint32_t index, + Objects_Control *the_object +) +{ + /* + * This routine is ONLY to be called from places in the code + * where the Id is known to be good. Therefore, this should NOT + * occur in normal situations. + */ + #if defined(RTEMS_DEBUG) + if ( index > information->maximum ) + return; + #endif + + information->local_table[ index ] = the_object; +} + +/** + * This function sets the pointer to the local_table object + * referenced by the index to a NULL so the object Id is invalid + * after this call. + * + * @param[in] information points to an Object Information Table + * @param[in] the_object is the local object pointer + * + * @note This routine is ONLY to be called in places where the + * index portion of the Id is known to be good. This is + * OK since it is normally called from object create/init + * or delete/destroy operations. + */ + +RTEMS_INLINE_ROUTINE void _Objects_Invalidate_Id( + Objects_Information *information, + Objects_Control *the_object +) +{ + _Assert( information != NULL ); + _Assert( the_object != NULL ); + + _Objects_Set_local_object( + information, + _Objects_Get_index( the_object->id ), + NULL + ); +} + +/** + * This function places the_object control pointer and object name + * in the Local Pointer and Local Name Tables, respectively. + * + * @param[in] information points to an Object Information Table + * @param[in] the_object is a pointer to an object + * @param[in] name is the name of the object to make accessible + */ +RTEMS_INLINE_ROUTINE void _Objects_Open( + Objects_Information *information, + Objects_Control *the_object, + Objects_Name name +) +{ + _Assert( information != NULL ); + _Assert( the_object != NULL ); + + the_object->name = name; + + _Objects_Set_local_object( + information, + _Objects_Get_index( the_object->id ), + the_object + ); +} + +/** + * This function places the_object control pointer and object name + * in the Local Pointer and Local Name Tables, respectively. + * + * @param[in] information points to an Object Information Table + * @param[in] the_object is a pointer to an object + * @param[in] name is the name of the object to make accessible + */ +RTEMS_INLINE_ROUTINE void _Objects_Open_u32( + Objects_Information *information, + Objects_Control *the_object, + uint32_t name +) +{ + /* ASSERT: information->is_string == false */ + the_object->name.name_u32 = name; + + _Objects_Set_local_object( + information, + _Objects_Get_index( the_object->id ), + the_object + ); +} + +/** + * This function places the_object control pointer and object name + * in the Local Pointer and Local Name Tables, respectively. + * + * @param[in] information points to an Object Information Table + * @param[in] the_object is a pointer to an object + * @param[in] name is the name of the object to make accessible + */ +RTEMS_INLINE_ROUTINE void _Objects_Open_string( + Objects_Information *information, + Objects_Control *the_object, + const char *name +) +{ + #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES) + /* ASSERT: information->is_string */ + the_object->name.name_p = name; + #endif + + _Objects_Set_local_object( + information, + _Objects_Get_index( the_object->id ), + the_object + ); +} + +/** + * @brief Puts back an object obtained with _Objects_Get(). + * + * This function decrements the thread dispatch disable level. The + * _Thread_Dispatch() is called if the level reaches zero. + */ +RTEMS_INLINE_ROUTINE void _Objects_Put( + Objects_Control *the_object +) +{ + (void) the_object; + _Thread_Enable_dispatch(); +} + +/** + * @brief Puts back an object obtained with _Objects_Get(). + * + * This function decrements the thread dispatch disable level. The + * _Thread_Dispatch() is not called if the level reaches zero, thus a thread + * dispatch will not take place immediately on the current processor. + */ +RTEMS_INLINE_ROUTINE void _Objects_Put_without_thread_dispatch( + Objects_Control *the_object +) +{ + (void) the_object; + _Thread_Unnest_dispatch(); +} + +/** + * @brief Locks the object allocator mutex. + * + * While holding the allocator mutex the executing thread is protected from + * asynchronous thread restart and deletion. + * + * The usage of the object allocator mutex with the thread life protection + * makes it possible to allocate and free objects without thread dispatching + * disabled. The usage of a unified workspace and unlimited objects may lead + * to heap fragmentation. Thus the execution time of the _Objects_Allocate() + * function may increase during system run-time. + * + * @see _Objects_Allocator_unlock() and _Objects_Allocate(). + */ +RTEMS_INLINE_ROUTINE void _Objects_Allocator_lock( void ) +{ + _RTEMS_Lock_allocator(); +} + +/** + * @brief Unlocks the object allocator mutex. + * + * In case the mutex is fully unlocked, then this function restores the + * previous thread life protection state and thus may not return if the + * executing thread was restarted or deleted in the mean-time. + */ +RTEMS_INLINE_ROUTINE void _Objects_Allocator_unlock( void ) +{ + _RTEMS_Unlock_allocator(); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#if defined(RTEMS_MULTIPROCESSING) +#include <rtems/score/objectmp.h> +#endif + + +#endif +/* end of include file */ diff --git a/include/rtems/score/onceimpl.h b/include/rtems/score/onceimpl.h new file mode 100644 index 0000000000..d81f1d0b31 --- /dev/null +++ b/include/rtems/score/onceimpl.h @@ -0,0 +1,48 @@ +/** + * @file + * + * @ingroup ScoreOnce + * + * @brief Once API + */ + +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_ONCE_H +#define _RTEMS_ONCE_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreOnce Once Functions. + * + * @ingroup Score + * + * @brief The _Once() function for pthread_once() and rtems_gxx_once(). + * + * @{ + */ + +int _Once( int *once_state, void (*init_routine)(void) ); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_ONCE_H */ diff --git a/include/rtems/score/or1k-utility.h b/include/rtems/score/or1k-utility.h new file mode 100644 index 0000000000..98bbe41b00 --- /dev/null +++ b/include/rtems/score/or1k-utility.h @@ -0,0 +1,396 @@ +/** + * @file + * + * @brief OR1K utility + */ +/* + * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_OR1K_UTILITY_H +#define _RTEMS_SCORE_OR1K_UTILITY_H + +/* SPR groups definitions */ +#define SPR_GRP_SHAMT 11 +#define SPR_GRP0_SYS_CTRL (0 << SPR_GRP_SHAMT) +#define SPR_GRP1_DMMU (1 << SPR_GRP_SHAMT) +#define SPR_GRP2_IMMU (2 << SPR_GRP_SHAMT) +#define SPR_GRP3_DC (3 << SPR_GRP_SHAMT) +#define SPR_GRP4_IC (4 << SPR_GRP_SHAMT) +#define SPR_GRP5_MAC (5 << SPR_GRP_SHAMT) +#define SPR_GRP6_DEBUG (6 << SPR_GRP_SHAMT) +#define SPR_GRP7_PERF_CTR (7 << SPR_GRP_SHAMT) +#define SPR_GRP8_PWR_MNG (8 << SPR_GRP_SHAMT) +#define SPR_GRP9_PIC (9 << SPR_GRP_SHAMT) +#define SPR_GPR10_TICK_TMR (10 << SPR_GRP_SHAMT) +#define SPR_GPR11_FPU (11 << SPR_GRP_SHAMT) + +/* SPR registers definitions */ + +/* Group 0: System control registers */ +#define CPU_OR1K_SPR_VR (SPR_GRP0_SYS_CTRL + 0) +#define CPU_OR1K_SPR_UPR (SPR_GRP0_SYS_CTRL + 1) +#define CPU_OR1K_SPR_CPUCFGR (SPR_GRP0_SYS_CTRL + 2) +#define CPU_OR1K_SPR_DMMUCFGR (SPR_GRP0_SYS_CTRL + 3) +#define CPU_OR1K_SPR_IMMUCFGR (SPR_GRP0_SYS_CTRL + 4) +#define CPU_OR1K_SPR_DCCFGR (SPR_GRP0_SYS_CTRL + 5) +#define CPU_OR1K_SPR_ICCFGR (SPR_GRP0_SYS_CTRL + 6) +#define CPU_OR1K_SPR_DCFGR (SPR_GRP0_SYS_CTRL + 7) +#define CPU_OR1K_SPR_PCCFGR (SPR_GRP0_SYS_CTRL + 8) +#define CPU_OR1K_SPR_VR2 (SPR_GRP0_SYS_CTRL + 9) +#define CPU_OR1K_SPR_AVR (SPR_GRP0_SYS_CTRL + 10) +#define CPU_OR1K_SPR_EVBAR (SPR_GRP0_SYS_CTRL + 11) +#define CPU_OR1K_SPR_AECR (SPR_GRP0_SYS_CTRL + 12) +#define CPU_OR1K_SPR_AESR (SPR_GRP0_SYS_CTRL + 13) +#define CPU_OR1K_SPR_NPC (SPR_GRP0_SYS_CTRL + 16) +#define CPU_OR1K_SPR_SR (SPR_GRP0_SYS_CTRL + 17) +#define CPU_OR1K_SPR_PPC (SPR_GRP0_SYS_CTRL + 18) +#define CPU_OR1K_SPR_FPCSR (SPR_GRP0_SYS_CTRL + 20) +#define CPU_OR1K_SPR_EPCR0 (SPR_GRP0_SYS_CTRL + 32) +#define CPU_OR1K_SPR_EPCR1 (SPR_GRP0_SYS_CTRL + 33) +#define CPU_OR1K_SPR_EPCR2 (SPR_GRP0_SYS_CTRL + 34) +#define CPU_OR1K_SPR_EPCR3 (SPR_GRP0_SYS_CTRL + 35) +#define CPU_OR1K_SPR_EPCR4 (SPR_GRP0_SYS_CTRL + 36) +#define CPU_OR1K_SPR_EPCR5 (SPR_GRP0_SYS_CTRL + 37) +#define CPU_OR1K_SPR_EPCR6 (SPR_GRP0_SYS_CTRL + 38) +#define CPU_OR1K_SPR_EPCR7 (SPR_GRP0_SYS_CTRL + 39) +#define CPU_OR1K_SPR_EPCR8 (SPR_GRP0_SYS_CTRL + 40) +#define CPU_OR1K_SPR_EPCR9 (SPR_GRP0_SYS_CTRL + 41) +#define CPU_OR1K_SPR_EPCR10 (SPR_GRP0_SYS_CTRL + 42) +#define CPU_OR1K_SPR_EPCR11 (SPR_GRP0_SYS_CTRL + 43) +#define CPU_OR1K_SPR_EPCR12 (SPR_GRP0_SYS_CTRL + 44) +#define CPU_OR1K_SPR_EPCR13 (SPR_GRP0_SYS_CTRL + 45) +#define CPU_OR1K_SPR_EPCR14 (SPR_GRP0_SYS_CTRL + 46) +#define CPU_OR1K_SPR_EPCR15 (SPR_GRP0_SYS_CTRL + 47) +#define CPU_OR1K_SPR_EEAR0 (SPR_GRP0_SYS_CTRL + 48) +#define CPU_OR1K_SPR_EEAR1 (SPR_GRP0_SYS_CTRL + 49) +#define CPU_OR1K_SPR_EEAR2 (SPR_GRP0_SYS_CTRL + 50) +#define CPU_OR1K_SPR_EEAR3 (SPR_GRP0_SYS_CTRL + 51) +#define CPU_OR1K_SPR_EEAR4 (SPR_GRP0_SYS_CTRL + 52) +#define CPU_OR1K_SPR_EEAR5 (SPR_GRP0_SYS_CTRL + 53) +#define CPU_OR1K_SPR_EEAR6 (SPR_GRP0_SYS_CTRL + 54) +#define CPU_OR1K_SPR_EEAR7 (SPR_GRP0_SYS_CTRL + 55) +#define CPU_OR1K_SPR_EEAR8 (SPR_GRP0_SYS_CTRL + 56) +#define CPU_OR1K_SPR_EEAR9 (SPR_GRP0_SYS_CTRL + 57) +#define CPU_OR1K_SPR_EEAR10 (SPR_GRP0_SYS_CTRL + 58) +#define CPU_OR1K_SPR_EEAR11 (SPR_GRP0_SYS_CTRL + 59) +#define CPU_OR1K_SPR_EEAR12 (SPR_GRP0_SYS_CTRL + 60) +#define CPU_OR1K_SPR_EEAR13 (SPR_GRP0_SYS_CTRL + 61) +#define CPU_OR1K_SPR_EEAR14 (SPR_GRP0_SYS_CTRL + 62) +#define CPU_OR1K_SPR_EEAR15 (SPR_GRP0_SYS_CTRL + 63) +#define CPU_OR1K_SPR_ESR0 (SPR_GRP0_SYS_CTRL + 64) +#define CPU_OR1K_SPR_ESR1 (SPR_GRP0_SYS_CTRL + 65) +#define CPU_OR1K_SPR_ESR2 (SPR_GRP0_SYS_CTRL + 66) +#define CPU_OR1K_SPR_ESR3 (SPR_GRP0_SYS_CTRL + 67) +#define CPU_OR1K_SPR_ESR4 (SPR_GRP0_SYS_CTRL + 68) +#define CPU_OR1K_SPR_ESR5 (SPR_GRP0_SYS_CTRL + 69) +#define CPU_OR1K_SPR_ESR6 (SPR_GRP0_SYS_CTRL + 70) +#define CPU_OR1K_SPR_ESR7 (SPR_GRP0_SYS_CTRL + 71) +#define CPU_OR1K_SPR_ESR8 (SPR_GRP0_SYS_CTRL + 72) +#define CPU_OR1K_SPR_ESR9 (SPR_GRP0_SYS_CTRL + 73) +#define CPU_OR1K_SPR_ESR10 (SPR_GRP0_SYS_CTRL + 74) +#define CPU_OR1K_SPR_ESR11 (SPR_GRP0_SYS_CTRL + 75) +#define CPU_OR1K_SPR_ESR12 (SPR_GRP0_SYS_CTRL + 76) +#define CPU_OR1K_SPR_ESR13 (SPR_GRP0_SYS_CTRL + 77) +#define CPU_OR1K_SPR_ESR14 (SPR_GRP0_SYS_CTRL + 78) +#define CPU_OR1K_SPR_ESR15 (SPR_GRP0_SYS_CTRL + 79) + +/* Shadow registers base */ +#define CPU_OR1K_SPR_GPR32 (SPR_GRP0_SYS_CTRL + 1024) + +/* Group1: Data MMU registers */ +#define CPU_OR1K_SPR_DMMUCR (SPR_GRP1_DMMU + 0) +#define CPU_OR1K_SPR_DMMUPR (SPR_GRP1_DMMU + 1) +#define CPU_OR1K_SPR_DTLBEIR (SPR_GRP1_DMMU + 2) +#define CPU_OR1K_SPR_DATBMR0 (SPR_GRP1_DMMU + 4) +#define CPU_OR1K_SPR_DATBMR1 (SPR_GRP1_DMMU + 5) +#define CPU_OR1K_SPR_DATBMR2 (SPR_GRP1_DMMU + 6) +#define CPU_OR1K_SPR_DATBMR3 (SPR_GRP1_DMMU + 7) +#define CPU_OR1K_SPR_DATBTR0 (SPR_GRP1_DMMU + 8) +#define CPU_OR1K_SPR_DATBTR1 (SPR_GRP1_DMMU + 9) +#define CPU_OR1K_SPR_DATBTR2 (SPR_GRP1_DMMU + 10) +#define CPU_OR1K_SPR_DATBTR3 (SPR_GRP1_DMMU + 11) + +/* Group2: Instruction MMU registers */ +#define CPU_OR1K_SPR_IMMUCR (SPR_GRP2_IMMU + 0) +#define CPU_OR1K_SPR_IMMUPR (SPR_GRP2_IMMU + 1) +#define CPU_OR1K_SPR_ITLBEIR (SPR_GRP2_IMMU + 2) +#define CPU_OR1K_SPR_IATBMR0 (SPR_GRP2_IMMU + 4) +#define CPU_OR1K_SPR_IATBMR1 (SPR_GRP2_IMMU + 5) +#define CPU_OR1K_SPR_IATBMR2 (SPR_GRP2_IMMU + 6) +#define CPU_OR1K_SPR_IATBMR3 (SPR_GRP2_IMMU + 7) +#define CPU_OR1K_SPR_IATBTR0 (SPR_GRP2_IMMU + 8) +#define CPU_OR1K_SPR_IATBTR1 (SPR_GRP2_IMMU + 9) +#define CPU_OR1K_SPR_IATBTR2 (SPR_GRP2_IMMU + 10) +#define CPU_OR1K_SPR_IATBTR3 (SPR_GRP2_IMMU + 11) + +/* Group3: Data Cache registers */ +#define CPU_OR1K_SPR_DCCR (SPR_GRP3_DC + 0) +#define CPU_OR1K_SPR_DCBPR (SPR_GRP3_DC + 1) +#define CPU_OR1K_SPR_DCBFR (SPR_GRP3_DC + 2) +#define CPU_OR1K_SPR_DCBIR (SPR_GRP3_DC + 3) +#define CPU_OR1K_SPR_DCBWR (SPR_GRP3_DC + 4) +#define CPU_OR1K_SPR_DCBLR (SPR_GRP3_DC + 5) + +/* Group4: Instruction Cache registers */ +#define CPU_OR1K_SPR_ICCR (SPR_GRP4_IC + 0) +#define CPU_OR1K_SPR_ICBPR (SPR_GRP4_IC + 1) +#define CPU_OR1K_SPR_ICBIR (SPR_GRP4_IC + 2) +#define CPU_OR1K_SPR_ICBLR (SPR_GRP4_IC + 3) + +/* Group5: MAC registers */ +#define CPU_OR1K_SPR_MACLO (SPR_GRP5_MAC + 1) +#define CPU_OR1K_SPR_MACHI (SPR_GRP5_MAC + 2) + +/* Group6: Debug registers */ +#define CPU_OR1K_SPR_DVR0 (SPR_GRP6_DEBUG + 0) +#define CPU_OR1K_SPR_DVR1 (SPR_GRP6_DEBUG + 1) +#define CPU_OR1K_SPR_DVR2 (SPR_GRP6_DEBUG + 2) +#define CPU_OR1K_SPR_DVR3 (SPR_GRP6_DEBUG + 3) +#define CPU_OR1K_SPR_DVR4 (SPR_GRP6_DEBUG + 4) +#define CPU_OR1K_SPR_DVR5 (SPR_GRP6_DEBUG + 5) +#define CPU_OR1K_SPR_DVR6 (SPR_GRP6_DEBUG + 6) +#define CPU_OR1K_SPR_DVR7 (SPR_GRP6_DEBUG + 7) +#define CPU_OR1K_SPR_DCR0 (SPR_GRP6_DEBUG + 8) +#define CPU_OR1K_SPR_DCR1 (SPR_GRP6_DEBUG + 9) +#define CPU_OR1K_SPR_DCR2 (SPR_GRP6_DEBUG + 10) +#define CPU_OR1K_SPR_DCR3 (SPR_GRP6_DEBUG + 11) +#define CPU_OR1K_SPR_DCR4 (SPR_GRP6_DEBUG + 12) +#define CPU_OR1K_SPR_DCR5 (SPR_GRP6_DEBUG + 13) +#define CPU_OR1K_SPR_DCR6 (SPR_GRP6_DEBUG + 14) +#define CPU_OR1K_SPR_DCR7 (SPR_GRP6_DEBUG + 15) +#define CPU_OR1K_SPR_DMR1 (SPR_GRP6_DEBUG + 16) +#define CPU_OR1K_SPR_DMR2 (SPR_GRP6_DEBUG + 17) +#define CPU_OR1K_SPR_DCWR0 (SPR_GRP6_DEBUG + 18) +#define CPU_OR1K_SPR_DCWR1 (SPR_GRP6_DEBUG + 19) +#define CPU_OR1K_SPR_DSR (SPR_GRP6_DEBUG + 20) +#define CPU_OR1K_SPR_DRR (SPR_GRP6_DEBUG + 21) + +/* Group7: Performance counters registers */ +#define CPU_OR1K_SPR_PCCR0 (SPR_GRP7_PERF_CTR + 0) +#define CPU_OR1K_SPR_PCCR1 (SPR_GRP7_PERF_CTR + 1) +#define CPU_OR1K_SPR_PCCR2 (SPR_GRP7_PERF_CTR + 2) +#define CPU_OR1K_SPR_PCCR3 (SPR_GRP7_PERF_CTR + 3) +#define CPU_OR1K_SPR_PCCR4 (SPR_GRP7_PERF_CTR + 4) +#define CPU_OR1K_SPR_PCCR5 (SPR_GRP7_PERF_CTR + 5) +#define CPU_OR1K_SPR_PCCR6 (SPR_GRP7_PERF_CTR + 6) +#define CPU_OR1K_SPR_PCCR7 (SPR_GRP7_PERF_CTR + 7) +#define CPU_OR1K_SPR_PCMR0 (SPR_GRP7_PERF_CTR + 8) +#define CPU_OR1K_SPR_PCMR1 (SPR_GRP7_PERF_CTR + 9) +#define CPU_OR1K_SPR_PCMR2 (SPR_GRP7_PERF_CTR + 10) +#define CPU_OR1K_SPR_PCMR3 (SPR_GRP7_PERF_CTR + 11) +#define CPU_OR1K_SPR_PCMR4 (SPR_GRP7_PERF_CTR + 12) +#define CPU_OR1K_SPR_PCMR5 (SPR_GRP7_PERF_CTR + 13) +#define CPU_OR1K_SPR_PCMR6 (SPR_GRP7_PERF_CTR + 14) +#define CPU_OR1K_SPR_PCMR7 (SPR_GRP7_PERF_CTR + 15) + +/* Group8: Power management register */ +#define CPU_OR1K_SPR_PMR (SPR_GRP8_PWR_MNG + 0) + +/* Group9: PIC registers */ +#define CPU_OR1K_SPR_PICMR (SPR_GRP9_PIC + 0) +#define CPU_OR1K_SPR_PICSR (SPR_GRP9_PIC + 2) + +/* Group10: Tick Timer registers */ +#define CPU_OR1K_SPR_TTMR (SPR_GPR10_TICK_TMR + 0) +#define CPU_OR1K_SPR_TTCR (SPR_GPR10_TICK_TMR + 1) + + /* Shift amount macros for bits position in Supervision Register */ +#define CPU_OR1K_SPR_SR_SHAMT_SM (0) +#define CPU_OR1K_SPR_SR_SHAMT_TEE (1) +#define CPU_OR1K_SPR_SR_SHAMT_IEE (2) +#define CPU_OR1K_SPR_SR_SHAMT_DCE (3) +#define CPU_OR1K_SPR_SR_SHAMT_ICE (4) +#define CPU_OR1K_SPR_SR_SHAMT_DME (5) +#define CPU_OR1K_SPR_SR_SHAMT_IME (6) +#define CPU_OR1K_SPR_SR_SHAMT_LEE (7) +#define CPU_OR1K_SPR_SR_SHAMT_CE (8) +#define CPU_OR1K_SPR_SR_SHAMT_F (9) +#define CPU_OR1K_SPR_SR_SHAMT_CY (10) +#define CPU_OR1K_SPR_SR_SHAMT_OV (11) +#define CPU_OR1K_SPR_SR_SHAMT_OVE (12) +#define CPU_OR1K_SPR_SR_SHAMT_DSX (13) +#define CPU_OR1K_SPR_SR_SHAMT_EPH (14) +#define CPU_OR1K_SPR_SR_SHAMT_FO (15) +#define CPU_OR1K_SPR_SR_SHAMT_SUMRA (16) +#define CPU_OR1K_SPR_SR_SHAMT_CID (28) + +/* Supervision Mode Register. @see OpenRISC architecture manual*/ + + /* Supervisor Mode */ +#define CPU_OR1K_SPR_SR_SM (1 << CPU_OR1K_SPR_SR_SHAMT_SM) +/* Tick Timer Exception Enabled */ +#define CPU_OR1K_SPR_SR_TEE (1 << CPU_OR1K_SPR_SR_SHAMT_TEE) +/* Interrupt Exception Enabled */ +#define CPU_OR1K_SPR_SR_IEE (1 << CPU_OR1K_SPR_SR_SHAMT_IEE) +/* Data Cache Enable */ +#define CPU_OR1K_SPR_SR_DCE (1 << CPU_OR1K_SPR_SR_SHAMT_DCE) +/* Instruction Cache Enable */ +#define CPU_OR1K_SPR_SR_ICE (1 << CPU_OR1K_SPR_SR_SHAMT_ICE) +/* Data MMU Enable */ +#define CPU_OR1K_SPR_SR_DME (1 << CPU_OR1K_SPR_SR_SHAMT_DME) +/* Instruction MMU Enable */ +#define CPU_OR1K_SPR_SR_IME (1 << CPU_OR1K_SPR_SR_SHAMT_IME) +/* Little Endian Enable */ +#define CPU_OR1K_SPR_SR_LEE (1 << CPU_OR1K_SPR_SR_SHAMT_LEE) +/* CID Enable */ +#define CPU_OR1K_SPR_SR_CE (1 << CPU_OR1K_SPR_SR_SHAMT_CE) +/* Conditional branch flag */ +#define CPU_OR1K_SPR_SR_F (1 << CPU_OR1K_SPR_SR_SHAMT_F) +/* Carry flag */ +#define CPU_OR1K_SPR_SR_CY (1 << CPU_OR1K_SPR_SR_SHAMT_CY) +/* Overflow flag */ +#define CPU_OR1K_SPR_SR_OV (1 << CPU_OR1K_SPR_SR_SHAMT_OV) +/* Overflow flag Exception */ +#define CPU_OR1K_SPR_SR_OVE (1 << CPU_OR1K_SPR_SR_SHAMT_OVE) +/* Delay Slot Exception */ +#define CPU_OR1K_SPR_SR_DSX (1 << CPU_OR1K_SPR_SR_SHAMT_DSX) + /* Exception Prefix High */ +#define CPU_OR1K_SPR_SR_EPH (1 << CPU_OR1K_SPR_SR_SHAMT_EPH) +/* Fixed One */ +#define CPU_OR1K_SPR_SR_FO (1 << CPU_OR1K_SPR_SR_SHAMT_FO) +/* SPRs User Mode Read Access */ +#define CPU_OR1K_SPR_SR_SUMRA (1 << CPU_OR1K_SPR_SR_SHAMT_SUMRA) +/*Context ID (Fast Context Switching) */ +#define CPU_OR1K_SPR_SR_CID (F << CPU_OR1K_SPR_SR_SHAMT_CID) + +/* Tick timer configuration bits */ +#define CPU_OR1K_SPR_TTMR_SHAMT_IP 28 +#define CPU_OR1K_SPR_TTMR_SHAMT_IE 29 +#define CPU_OR1K_SPR_TTMR_SHAMT_MODE 30 + +#define CPU_OR1K_SPR_TTMR_TP_MASK (0x0FFFFFFF) +#define CPU_OR1K_SPR_TTMR_IP (1 << CPU_OR1K_SPR_TTMR_SHAMT_IP) +#define CPU_OR1K_SPR_TTMR_IE (1 << CPU_OR1K_SPR_TTMR_SHAMT_IE) +#define CPU_OR1K_SPR_TTMR_MODE_RESTART (1 << CPU_OR1K_SPR_TTMR_SHAMT_MODE) +#define CPU_OR1K_SPR_TTMR_MODE_ONE_SHOT (2 << CPU_OR1K_SPR_TTMR_SHAMT_MODE) +#define CPU_OR1K_SPR_TTMR_MODE_CONT (3 << CPU_OR1K_SPR_TTMR_SHAMT_MODE) + +/* Power management register bits */ + +/* Shift amount macros for bit positions in Power Management register */ +#define CPU_OR1K_SPR_PMR_SHAMT_SDF 0 +#define CPU_OR1K_SPR_PMR_SHAMT_DME 4 +#define CPU_OR1K_SPR_PMR_SHAMT_SME 5 +#define CPU_OR1K_SPR_PMR_SHAMT_DCGE 6 +#define CPU_OR1K_SPR_PMR_SHAMT_SUME 7 + +#define CPU_OR1K_SPR_PMR_SDF (0xF << CPU_OR1K_SPR_PMR_SHAMT_SDF) +#define CPU_OR1K_SPR_PMR_DME (1 << CPU_OR1K_SPR_PMR_SHAMT_DME) +#define CPU_OR1K_SPR_PMR_SME (1 << CPU_OR1K_SPR_PMR_SHAMT_SME) +#define CPU_OR1K_SPR_PMR_DCGE (1 << CPU_OR1K_SPR_PMR_SHAMT_DCGE) +#define CPU_OR1K_SPR_PMR_SUME (1 << CPU_OR1K_SPR_PMR_SHAMT_SUME) + +#ifndef ASM + +#include <stddef.h> +#include <stdint.h> +#include <stdbool.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Supervision Mode registers definitions. + * + * @see OpenRISC architecture manual - revision 0. + */ +typedef enum { + OR1K_EXCEPTION_RESET = 1, + OR1K_EXCEPTION_BUS_ERR = 2, + OR1K_EXCEPTION_D_PF = 3, /* Data Page Fault */ + OR1K_EXCEPTION_I_PF = 4, /* Instruction Page Fault */ + OR1K_EXCEPTION_TICK_TIMER = 5, + OR1K_EXCEPTION_ALIGNMENT = 6, + OR1K_EXCEPTION_I_UNDEF= 7, /* Undefiend instruction */ + OR1K_EXCEPTION_IRQ = 8, /* External interrupt */ + OR1K_EXCPETION_D_TLB = 9, /* Data TLB miss */ + OR1K_EXCPETION_I_TLB = 10, /* Instruction TLB miss */ + OR1K_EXCPETION_RANGE = 11, /* Range exception */ + OR1K_EXCPETION_SYS_CALL = 12, + OR1K_EXCPETION_FP = 13, /* Floating point exception */ + OR1K_EXCPETION_TRAP = 14, /* Caused by l.trap instruction or by debug unit */ + OR1K_EXCPETION_RESERVED1 = 15, + OR1K_EXCPETION_RESERVED2 = 16, + OR1K_EXCPETION_RESERVED3 = 17, + MAX_EXCEPTIONS = 17, + OR1K_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff +} OR1K_Symbolic_exception_name; + +static inline uint32_t _OR1K_mfspr(uint32_t reg) +{ + uint32_t spr_value; + + asm volatile ( + "l.mfspr %0, %1, 0;\n\t" + : "=r" (spr_value) : "r" (reg)); + + return spr_value; +} + +static inline void _OR1K_mtspr(uint32_t reg, uint32_t value) +{ + asm volatile ( + "l.mtspr %1, %0, 0;\n\t" + :: "r" (value), "r" (reg) + ); +} + +/** + * @brief The slow down feature takes advantage of the low-power + * dividers in external clock generation circuitry to enable full + * functionality, but at a lower frequency so that power consumption + * is reduced. @see OpenRISC architecture manual, power management section. + * + * @param[in] value is 4 bit value to be written in PMR[SDF]. + * A lower value specifies higher expected performance from the processor core. + * + */ +#define _OR1K_CPU_SlowDown(value) \ + _OR1K_mtspr(CPU_OR1K_SPR_PMR, (value & CPU_OR1K_SPR_PMR_SDF)) + + +#define _OR1K_CPU_Doze() \ + _OR1K_mtspr(CPU_OR1K_SPR_PMR, CPU_OR1K_SPR_PMR_DME) + + +#define _OR1K_CPU_Sleep() \ + _OR1K_mtspr(CPU_OR1K_SPR_PMR, CPU_OR1K_SPR_PMR_SME) + +#define _OR1K_CPU_Suspend() \ + _OR1K_mtspr(CPU_OR1K_SPR_PMR, CPU_OR1K_SPR_PMR_SME) + +static inline void _OR1K_Sync_mem( void ) +{ + asm volatile("l.msync"); +} + +static inline void _OR1K_Sync_pipeline( void ) +{ + asm volatile("l.psync"); +} + +/** + * @brief or1ksim simulator can be sent a halt signal from RTEMS to tell + * the running or1ksim process on the host machine to exit. The following + * implementation has no effect on QEMU or hardware implementation and will + * be treated as normal l.nop. + * + */ +#define _OR1KSIM_CPU_Halt() \ + asm volatile ("l.nop 0xc") + +#ifdef __cplusplus +} +#endif + +#else /* ASM */ + +#endif /* ASM */ + +#endif /* _RTEMS_SCORE_OR1K_UTILITY_H */ diff --git a/include/rtems/score/or1k.h b/include/rtems/score/or1k.h new file mode 100644 index 0000000000..e1a3ddce42 --- /dev/null +++ b/include/rtems/score/or1k.h @@ -0,0 +1,49 @@ +/** + * @file rtems/score/or1k.h + */ + +/* + * This file contains information pertaining to the OR1K processor. + * + * COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com> + * + * Based on code with the following copyright... + * COPYRIGHT (c) 1989-1999, 2010. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_OR1K_H +#define _RTEMS_SCORE_OR1K_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the OR1K family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + + /* + * Define the name of the CPU family and specific model. + */ + +#define CPU_NAME "OR1K" +#define CPU_MODEL_NAME "OR1200" + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_OR1K_H */ diff --git a/include/rtems/score/percpu.h b/include/rtems/score/percpu.h new file mode 100644 index 0000000000..806c290b7c --- /dev/null +++ b/include/rtems/score/percpu.h @@ -0,0 +1,688 @@ +/** + * @file rtems/score/percpu.h + * + * This include file defines the per CPU information required + * by RTEMS. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_PERCPU_H +#define _RTEMS_PERCPU_H + +#include <rtems/score/cpu.h> + +#if defined( ASM ) + #include <rtems/asm.h> +#else + #include <rtems/score/assert.h> + #include <rtems/score/isrlevel.h> + #include <rtems/score/smp.h> + #include <rtems/score/smplock.h> + #include <rtems/score/timestamp.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined( RTEMS_SMP ) + /* + * This ensures that on SMP configurations the individual per-CPU controls + * are on different cache lines to prevent false sharing. This define can be + * used in assembler code to easily get the per-CPU control for a particular + * processor. + */ + #if defined( RTEMS_PROFILING ) + #define PER_CPU_CONTROL_SIZE_LOG2 8 + #else + #define PER_CPU_CONTROL_SIZE_LOG2 7 + #endif + + #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 ) +#endif + +#if !defined( ASM ) + +struct _Thread_Control; + +struct Scheduler_Context; + +/** + * @defgroup PerCPU RTEMS Per CPU Information + * + * @ingroup Score + * + * This defines the per CPU state information required by RTEMS + * and the BSP. In an SMP configuration, there will be multiple + * instances of this data structure -- one per CPU -- and the + * current CPU number will be used as the index. + */ + +/**@{*/ + +#if defined( RTEMS_SMP ) + +/** + * @brief State of a processor. + * + * The processor state controls the life cycle of processors at the lowest + * level. No multi-threading or other high-level concepts matter here. + * + * State changes must be initiated via _Per_CPU_State_change(). This function + * may not return in case someone requested a shutdown. The + * _SMP_Send_message() function will be used to notify other processors about + * state changes if the other processor is in the up state. + * + * Due to the sequential nature of the basic system initialization one + * processor has a special role. It is the processor executing the boot_card() + * function. This processor is called the boot processor. All other + * processors are called secondary. + * + * @dot + * digraph states { + * i [label="PER_CPU_STATE_INITIAL"]; + * rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"]; + * reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"]; + * u [label="PER_CPU_STATE_UP"]; + * s [label="PER_CPU_STATE_SHUTDOWN"]; + * i -> rdy [label="processor\ncompleted initialization"]; + * rdy -> reqsm [label="boot processor\ncompleted initialization"]; + * reqsm -> u [label="processor\nstarts multitasking"]; + * i -> s; + * rdy -> s; + * reqsm -> s; + * u -> s; + * } + * @enddot + */ +typedef enum { + /** + * @brief The per CPU controls are initialized to zero. + * + * The boot processor executes the sequential boot code in this state. The + * secondary processors should perform their basic initialization now and + * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this + * is complete. + */ + PER_CPU_STATE_INITIAL, + + /** + * @brief Processor is ready to start multitasking. + * + * The secondary processor performed its basic initialization and is ready to + * receive inter-processor interrupts. Interrupt delivery must be disabled + * in this state, but requested inter-processor interrupts must be recorded + * and must be delivered once the secondary processor enables interrupts for + * the first time. The boot processor will wait for all secondary processors + * to change into this state. In case a secondary processor does not reach + * this state the system will not start. The secondary processors wait now + * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set + * by the boot processor once all secondary processors reached the + * PER_CPU_STATE_READY_TO_START_MULTITASKING state. + */ + PER_CPU_STATE_READY_TO_START_MULTITASKING, + + /** + * @brief Multitasking start of processor is requested. + * + * The boot processor completed system initialization and is about to perform + * a context switch to its heir thread. Secondary processors should now + * issue a context switch to the heir thread. This normally enables + * interrupts on the processor for the first time. + */ + PER_CPU_STATE_REQUEST_START_MULTITASKING, + + /** + * @brief Normal multitasking state. + */ + PER_CPU_STATE_UP, + + /** + * @brief This is the terminal state. + */ + PER_CPU_STATE_SHUTDOWN +} Per_CPU_State; + +#endif /* defined( RTEMS_SMP ) */ + +/** + * @brief Per-CPU statistics. + */ +typedef struct { +#if defined( RTEMS_PROFILING ) + /** + * @brief The thread dispatch disabled begin instant in CPU counter ticks. + * + * This value is used to measure the time of disabled thread dispatching. + */ + CPU_Counter_ticks thread_dispatch_disabled_instant; + + /** + * @brief The maximum time of disabled thread dispatching in CPU counter + * ticks. + */ + CPU_Counter_ticks max_thread_dispatch_disabled_time; + + /** + * @brief The maximum time spent to process a single sequence of nested + * interrupts in CPU counter ticks. + * + * This is the time interval between the change of the interrupt nest level + * from zero to one and the change back from one to zero. + */ + CPU_Counter_ticks max_interrupt_time; + + /** + * @brief The maximum interrupt delay in CPU counter ticks if supported by + * the hardware. + */ + CPU_Counter_ticks max_interrupt_delay; + + /** + * @brief Count of times when the thread dispatch disable level changes from + * zero to one in thread context. + * + * This value may overflow. + */ + uint64_t thread_dispatch_disabled_count; + + /** + * @brief Total time of disabled thread dispatching in CPU counter ticks. + * + * The average time of disabled thread dispatching is the total time of + * disabled thread dispatching divided by the thread dispatch disabled + * count. + * + * This value may overflow. + */ + uint64_t total_thread_dispatch_disabled_time; + + /** + * @brief Count of times when the interrupt nest level changes from zero to + * one. + * + * This value may overflow. + */ + uint64_t interrupt_count; + + /** + * @brief Total time of interrupt processing in CPU counter ticks. + * + * The average time of interrupt processing is the total time of interrupt + * processing divided by the interrupt count. + * + * This value may overflow. + */ + uint64_t total_interrupt_time; +#endif /* defined( RTEMS_PROFILING ) */ +} Per_CPU_Stats; + +/** + * @brief Per CPU Core Structure + * + * This structure is used to hold per core state information. + */ +typedef struct Per_CPU_Control { + /** + * @brief CPU port specific control. + */ + CPU_Per_CPU_control cpu_per_cpu; + + #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \ + (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE) + /** + * This contains a pointer to the lower range of the interrupt stack for + * this CPU. This is the address allocated and freed. + */ + void *interrupt_stack_low; + + /** + * This contains a pointer to the interrupt stack pointer for this CPU. + * It will be loaded at the beginning on an ISR. + */ + void *interrupt_stack_high; + #endif + + /** + * This contains the current interrupt nesting level on this + * CPU. + */ + uint32_t isr_nest_level; + + /** + * @brief The thread dispatch critical section nesting counter which is used + * to prevent context switches at inopportune moments. + */ + volatile uint32_t thread_dispatch_disable_level; + + /** + * @brief This is the thread executing on this processor. + * + * This field is not protected by a lock. The only writer is this processor. + * + * On SMP configurations a thread may be registered as executing on more than + * one processor in case a thread migration is in progress. On SMP + * configurations use _Thread_Is_executing_on_a_processor() to figure out if + * a thread context is executing on a processor. + */ + struct _Thread_Control *executing; + + /** + * @brief This is the heir thread for this processor. + * + * This field is not protected by a lock. The only writer after multitasking + * start is the scheduler owning this processor. It is assumed that stores + * to pointers are atomic on all supported SMP architectures. The CPU port + * specific code (inter-processor interrupt handling and + * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the + * last value written. + * + * A thread can be a heir on at most one processor in the system. + * + * @see _Thread_Get_heir_and_make_it_executing(). + */ + struct _Thread_Control *heir; + + /** + * @brief This is set to true when this processor needs to run the thread + * dispatcher. + * + * It is volatile since interrupts may alter this flag. + * + * This field is not protected by a lock and must be accessed only by this + * processor. Code (e.g. scheduler and post-switch action requests) running + * on another processors must use an inter-processor interrupt to set the + * thread dispatch necessary indicator to true. + * + * @see _Thread_Get_heir_and_make_it_executing(). + */ + volatile bool dispatch_necessary; + + /** This is the time of the last context switch on this CPU. */ + Timestamp_Control time_of_last_context_switch; + + #if defined( RTEMS_SMP ) + /** + * @brief This lock protects some parts of the low-level thread dispatching. + * + * We must use a ticket lock here since we cannot transport a local context + * through the context switch. + * + * @see _Thread_Dispatch(). + */ + SMP_ticket_lock_Control Lock; + + #if defined( RTEMS_PROFILING ) + /** + * @brief Lock statistics for the per-CPU lock. + */ + SMP_lock_Stats Lock_stats; + + /** + * @brief Lock statistics context for the per-CPU lock. + */ + SMP_lock_Stats_context Lock_stats_context; + #endif + + /** + * @brief Context for the Giant lock acquire and release pair of this + * processor. + */ + SMP_lock_Context Giant_lock_context; + + /** + * @brief Bit field for SMP messages. + * + * This bit field is not protected locks. Atomic operations are used to + * set and get the message bits. + */ + Atomic_Ulong message; + + /** + * @brief The scheduler context of the scheduler owning this processor. + */ + const struct Scheduler_Context *scheduler_context; + + /** + * @brief Indicates the current state of the CPU. + * + * This field is protected by the _Per_CPU_State_lock lock. + * + * @see _Per_CPU_State_change(). + */ + Per_CPU_State state; + + /** + * @brief Indicates if the processor has been successfully started via + * _CPU_SMP_Start_processor(). + */ + bool started; + #endif + + Per_CPU_Stats Stats; +} Per_CPU_Control; + +#if defined( RTEMS_SMP ) +typedef struct { + Per_CPU_Control per_cpu; + char unused_space_for_cache_line_alignment + [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ]; +} Per_CPU_Control_envelope; +#else +typedef struct { + Per_CPU_Control per_cpu; +} Per_CPU_Control_envelope; +#endif + +/** + * @brief Set of Per CPU Core Information + * + * This is an array of per CPU core information. + */ +extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT; + +#if defined( RTEMS_SMP ) +#define _Per_CPU_Acquire( cpu ) \ + _SMP_ticket_lock_Acquire( \ + &( cpu )->Lock, \ + &( cpu )->Lock_stats, \ + &( cpu )->Lock_stats_context \ + ) +#else +#define _Per_CPU_Acquire( cpu ) \ + do { \ + (void) ( cpu ); \ + } while ( 0 ) +#endif + +#if defined( RTEMS_SMP ) +#define _Per_CPU_Release( cpu ) \ + _SMP_ticket_lock_Release( \ + &( cpu )->Lock, \ + &( cpu )->Lock_stats_context \ + ) +#else +#define _Per_CPU_Release( cpu ) \ + do { \ + (void) ( cpu ); \ + } while ( 0 ) +#endif + +#if defined( RTEMS_SMP ) +#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \ + do { \ + _ISR_Disable_without_giant( isr_cookie ); \ + _Per_CPU_Acquire( cpu ); \ + } while ( 0 ) +#else +#define _Per_CPU_ISR_disable_and_acquire( cpu, isr_cookie ) \ + do { \ + _ISR_Disable( isr_cookie ); \ + (void) ( cpu ); \ + } while ( 0 ) +#endif + +#if defined( RTEMS_SMP ) +#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \ + do { \ + _Per_CPU_Release( cpu ); \ + _ISR_Enable_without_giant( isr_cookie ); \ + } while ( 0 ) +#else +#define _Per_CPU_Release_and_ISR_enable( cpu, isr_cookie ) \ + do { \ + (void) ( cpu ); \ + _ISR_Enable( isr_cookie ); \ + } while ( 0 ) +#endif + +#if defined( RTEMS_SMP ) +#define _Per_CPU_Acquire_all( isr_cookie ) \ + do { \ + uint32_t ncpus = _SMP_Get_processor_count(); \ + uint32_t cpu; \ + _ISR_Disable( isr_cookie ); \ + for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \ + _Per_CPU_Acquire( _Per_CPU_Get_by_index( cpu ) ); \ + } \ + } while ( 0 ) +#else +#define _Per_CPU_Acquire_all( isr_cookie ) \ + _ISR_Disable( isr_cookie ) +#endif + +#if defined( RTEMS_SMP ) +#define _Per_CPU_Release_all( isr_cookie ) \ + do { \ + uint32_t ncpus = _SMP_Get_processor_count(); \ + uint32_t cpu; \ + for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { \ + _Per_CPU_Release( _Per_CPU_Get_by_index( cpu ) ); \ + } \ + _ISR_Enable( isr_cookie ); \ + } while ( 0 ) +#else +#define _Per_CPU_Release_all( isr_cookie ) \ + _ISR_Enable( isr_cookie ) +#endif + +/* + * If we get the current processor index in a context which allows thread + * dispatching, then we may already run on another processor right after the + * read instruction. There are very few cases in which this makes sense (here + * we can use _Per_CPU_Get_snapshot()). All other places must use + * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG. + */ +#if defined( _CPU_Get_current_per_CPU_control ) + #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control() +#else + #define _Per_CPU_Get_snapshot() \ + ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu ) +#endif + +#if defined( RTEMS_SMP ) +static inline Per_CPU_Control *_Per_CPU_Get( void ) +{ + Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot(); + + _Assert( + cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0 + ); + + return cpu_self; +} +#else +#define _Per_CPU_Get() _Per_CPU_Get_snapshot() +#endif + +static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index ) +{ + return &_Per_CPU_Information[ index ].per_cpu; +} + +static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu ) +{ + const Per_CPU_Control_envelope *per_cpu_envelope = + ( const Per_CPU_Control_envelope * ) cpu; + + return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] ); +} + +static inline bool _Per_CPU_Is_processor_started( + const Per_CPU_Control *cpu +) +{ +#if defined( RTEMS_SMP ) + return cpu->started; +#else + (void) cpu; + + return true; +#endif +} + +#if defined( RTEMS_SMP ) + +static inline void _Per_CPU_Send_interrupt( const Per_CPU_Control *cpu ) +{ + _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu ) ); +} + +/** + * @brief Allocate and Initialize Per CPU Structures + * + * This method allocates and initialize the per CPU structure. + */ +void _Per_CPU_Initialize(void); + +void _Per_CPU_State_change( + Per_CPU_Control *cpu, + Per_CPU_State new_state +); + +/** + * @brief Waits for a processor to change into a non-initial state. + * + * This function should be called only in _CPU_SMP_Start_processor() if + * required by the CPU port or BSP. + * + * @code + * bool _CPU_SMP_Start_processor(uint32_t cpu_index) + * { + * uint32_t timeout = 123456; + * + * start_the_processor(cpu_index); + * + * return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout); + * } + * @endcode + * + * @param[in] cpu_index The processor index. + * @param[in] timeout_in_ns The timeout in nanoseconds. Use a value of zero to + * wait forever if necessary. + * + * @retval true The processor is in a non-initial state. + * @retval false The timeout expired before the processor reached a non-initial + * state. + */ +bool _Per_CPU_State_wait_for_non_initial_state( + uint32_t cpu_index, + uint32_t timeout_in_ns +); + +#endif /* defined( RTEMS_SMP ) */ + +/* + * On a non SMP system, the _SMP_Get_current_processor() is defined to 0. + * Thus when built for non-SMP, there should be no performance penalty. + */ +#define _Thread_Dispatch_disable_level \ + _Per_CPU_Get()->thread_dispatch_disable_level +#define _Thread_Heir \ + _Per_CPU_Get()->heir +#define _Thread_Executing \ + _Per_CPU_Get()->executing +#define _ISR_Nest_level \ + _Per_CPU_Get()->isr_nest_level +#define _CPU_Interrupt_stack_low \ + _Per_CPU_Get()->interrupt_stack_low +#define _CPU_Interrupt_stack_high \ + _Per_CPU_Get()->interrupt_stack_high +#define _Thread_Dispatch_necessary \ + _Per_CPU_Get()->dispatch_necessary +#define _Thread_Time_of_last_context_switch \ + _Per_CPU_Get()->time_of_last_context_switch + +/** + * @brief Returns the thread control block of the executing thread. + * + * This function can be called in any context. On SMP configurations + * interrupts are disabled to ensure that the processor index is used + * consistently. + * + * @return The thread control block of the executing thread. + */ +RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void ) +{ + struct _Thread_Control *executing; + + #if defined( RTEMS_SMP ) + ISR_Level level; + + _ISR_Disable_without_giant( level ); + #endif + + executing = _Thread_Executing; + + #if defined( RTEMS_SMP ) + _ISR_Enable_without_giant( level ); + #endif + + return executing; +} + +/**@}*/ + +#endif /* !defined( ASM ) */ + +#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) + +#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \ + (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE) + /* + * If this CPU target lets RTEMS allocates the interrupt stack, then + * we need to have places in the per CPU table to hold them. + */ + #define PER_CPU_INTERRUPT_STACK_LOW \ + CPU_PER_CPU_CONTROL_SIZE + #define PER_CPU_INTERRUPT_STACK_HIGH \ + PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER + #define PER_CPU_END_STACK \ + PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER + + #define INTERRUPT_STACK_LOW \ + (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW) + #define INTERRUPT_STACK_HIGH \ + (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH) +#else + #define PER_CPU_END_STACK \ + CPU_PER_CPU_CONTROL_SIZE +#endif + +/* + * These are the offsets of the required elements in the per CPU table. + */ +#define PER_CPU_ISR_NEST_LEVEL \ + PER_CPU_END_STACK +#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \ + PER_CPU_ISR_NEST_LEVEL + 4 +#define PER_CPU_OFFSET_EXECUTING \ + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4 +#define PER_CPU_OFFSET_HEIR \ + PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER +#define PER_CPU_DISPATCH_NEEDED \ + PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER + +#define THREAD_DISPATCH_DISABLE_LEVEL \ + (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL) +#define ISR_NEST_LEVEL \ + (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL) +#define DISPATCH_NEEDED \ + (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED) + +#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/powerpc.h b/include/rtems/score/powerpc.h new file mode 100644 index 0000000000..c640b2f783 --- /dev/null +++ b/include/rtems/score/powerpc.h @@ -0,0 +1,177 @@ +/** + * @file + * + * @brief IBM/Motorola Power Pc Definitions + * + * This file contains definitions for the IBM/Motorola PowerPC + * family members. + */ + +/* + * Author: Andrew Bray <andy@i-cubed.co.uk> + * + * COPYRIGHT (c) 1995 by i-cubed ltd. + * + * MPC860 support code was added by Jay Monkman <jmonkman@frasca.com> + * MPC8260 support added by Andy Dachs <a.dachs@sstl.co.uk> + * Surrey Satellite Technology Limited + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of i-cubed limited not be used in + * advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. + * i-cubed limited makes no representations about the suitability + * of this software for any purpose. + * + * Derived from c/src/exec/cpu/no_cpu/no_cpu.h: + * + * COPYRIGHT (c) 1989-1997. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may in + * the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + * + * + * Note: + * This file is included by both C and assembler code ( -DASM ) + */ + + +#ifndef _RTEMS_SCORE_POWERPC_H +#define _RTEMS_SCORE_POWERPC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rtems/score/types.h> + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "PowerPC" + +/* + * This file contains the information required to build + * RTEMS for the PowerPC family. + */ + +/* Generic ppc */ + +#ifdef _SOFT_FLOAT +#define CPU_MODEL_NAME "Generic (no FPU)" +#elif defined(__NO_FPRS__) || defined(__SPE__) +#define CPU_MODEL_NAME "Generic (E500/float-gprs/SPE)" +#else +#define CPU_MODEL_NAME "Generic (classic FPU)" +#endif + +#define PPC_ALIGNMENT 8 + +#ifdef __PPC_CPU_E6500__ +#define PPC_DEFAULT_CACHE_LINE_POWER 6 +#else +#define PPC_DEFAULT_CACHE_LINE_POWER 5 +#endif + +#define PPC_DEFAULT_CACHE_LINE_SIZE (1 << PPC_DEFAULT_CACHE_LINE_POWER) + +#define PPC_STRUCTURE_ALIGNMENT PPC_DEFAULT_CACHE_LINE_SIZE + +/* + * Application binary interfaces. + * + * PPC_ABI MUST be defined as one of these. + * Only big endian is currently supported. + */ + +/* + * SVR4 ABI + */ +#define PPC_ABI_SVR4 2 +/* + * Embedded ABI + */ +#define PPC_ABI_EABI 3 + +/* + * Default to the EABI used by current GNU tools + */ + +#ifndef PPC_ABI +#define PPC_ABI PPC_ABI_EABI +#endif + +/* + * Use worst case stack alignment. For the EABI an 8-byte alignment would be + * sufficient. + */ + +#define PPC_STACK_ALIGN_POWER 4 +#define PPC_STACK_ALIGNMENT (1 << PPC_STACK_ALIGN_POWER) + +/* + * Assume PPC_HAS_FPU to be a synonym for _SOFT_FLOAT. + */ + +#if defined(_SOFT_FLOAT) \ + || defined(__NO_FPRS__) /* e500 has unified integer/FP registers */ \ + || defined(__PPC_CPU_E6500__) +#define PPC_HAS_FPU 0 +#else +#define PPC_HAS_FPU 1 +#endif + +#if defined(__PPC_CPU_E6500__) && defined(__ALTIVEC__) +#define PPC_MULTILIB_ALTIVEC +#endif + +#if defined(__PPC_CPU_E6500__) && !defined(_SOFT_FLOAT) +#define PPC_MULTILIB_FPU +#endif + +/* + * Unless specified above, If the model has FP support, it is assumed to + * support doubles (8-byte floating point numbers). + * + * If the model does NOT have FP support, then the model does + * NOT have double length FP registers. + */ + +#if (PPC_HAS_FPU) +#define PPC_HAS_DOUBLE 1 +#else +#define PPC_HAS_DOUBLE 0 +#endif + +/* + * Assemblers. + * PPC_ASM MUST be defined as one of these. + * + * PPC_ASM_ELF: ELF assembler. Currently used for all ABIs. + * + * NOTE: Only PPC_ABI_ELF is currently fully supported. + * + * Also NOTE: cpukit doesn't need this but asm.h which is defined + * in cpukit for consistency with other ports does. + */ + +#define PPC_ASM_ELF 0 + +/* + * Default to the assembler format used by the current GNU tools. + */ +#define PPC_ASM PPC_ASM_ELF + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_POWERPC_H */ diff --git a/include/rtems/score/priority.h b/include/rtems/score/priority.h new file mode 100644 index 0000000000..0a772f62a2 --- /dev/null +++ b/include/rtems/score/priority.h @@ -0,0 +1,90 @@ +/** + * @file rtems/score/priority.h + * + * @brief Thread Priority Manipulation Routines + * + * This include file contains all thread priority manipulation routines. + * This Handler provides mechanisms which can be used to + * initialize and manipulate thread priorities. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_PRIORITY_H +#define _RTEMS_SCORE_PRIORITY_H + +/** + * @defgroup ScorePriority Priority Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which is used to manage + * thread priorities. At the SuperCore level 256 priority levels + * are supported with lower numbers representing logically more important + * threads. The priority level 0 is reserved for internal RTEMS use. + * Typically it is assigned to threads which defer internal RTEMS + * actions from an interrupt to thread level to improve interrupt response. + * Priority level 255 is assigned to the IDLE thread and really should not + * be used by application threads. The default IDLE thread implementation + * is an infinite "branch to self" loop which never yields to other threads + * at the same priority. + */ +/**@{*/ + +/* + * Processor specific information. + */ +#include <rtems/score/cpu.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * The following type defines the control block used to manage + * thread priorities. + * + * @note Priority 0 is reserved for internal threads only. + */ +typedef uint32_t Priority_Control; + +/** This defines the highest (most important) thread priority. */ +#define PRIORITY_MINIMUM 0 + +/** + * @brief This defines the priority of pseudo-ISR threads. + * + * Examples are the MPCI and timer server threads. + */ +#define PRIORITY_PSEUDO_ISR PRIORITY_MINIMUM + +/** This defines the default lowest (least important) thread priority. */ +#if defined (CPU_PRIORITY_MAXIMUM) + #define PRIORITY_DEFAULT_MAXIMUM CPU_PRIORITY_MAXIMUM +#else + #define PRIORITY_DEFAULT_MAXIMUM 255 +#endif + +/** This defines the lowest (least important) thread priority. */ +#define PRIORITY_MAXIMUM rtems_maximum_priority + +/** + * This variable contains the configured number of priorities + */ +extern uint8_t rtems_maximum_priority; + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/prioritybitmap.h b/include/rtems/score/prioritybitmap.h new file mode 100644 index 0000000000..f363fe4257 --- /dev/null +++ b/include/rtems/score/prioritybitmap.h @@ -0,0 +1,82 @@ +/** + * @file rtems/score/prioritybitmap.h + * + * @brief Manipulation Routines for the Bitmap Priority Queue Implementation + * + * This include file contains all thread priority manipulation routines for + * the bit map priority queue implementation. + */ + +/* + * COPYRIGHT (c) 1989-2010. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_PRIORITYBITMAP_H +#define _RTEMS_SCORE_PRIORITYBITMAP_H + +#include <rtems/score/cpu.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScorePriorityBitmap Bitmap Priority Thread Routines + * + * @ingroup Score + */ +/**@{*/ + +/* + * The definition of the Priority_bit_map_Word type is CPU dependent. + * + */ + +typedef struct { + /** + * @brief Each sixteen bit entry in this word is associated with one of the + * sixteen entries in the bit map. + */ + Priority_bit_map_Word major_bit_map; + + /** + * @brief Each bit in the bit map indicates whether or not there are threads + * ready at a particular priority. + * + * The mapping of individual priority levels to particular bits is processor + * dependent as is the value of each bit used to indicate that threads are + * ready at that priority. + */ + Priority_bit_map_Word bit_map[ 16 ]; +} Priority_bit_map_Control; + +/** + * The following record defines the information associated with + * each thread to manage its interaction with the priority bit maps. + */ +typedef struct { + /** This is the address of minor bit map slot. */ + Priority_bit_map_Word *minor; + /** This is the priority bit map ready mask. */ + Priority_bit_map_Word ready_major; + /** This is the priority bit map ready mask. */ + Priority_bit_map_Word ready_minor; + /** This is the priority bit map block mask. */ + Priority_bit_map_Word block_major; + /** This is the priority bit map block mask. */ + Priority_bit_map_Word block_minor; +} Priority_bit_map_Information; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/prioritybitmapimpl.h b/include/rtems/score/prioritybitmapimpl.h new file mode 100644 index 0000000000..de90ef77ae --- /dev/null +++ b/include/rtems/score/prioritybitmapimpl.h @@ -0,0 +1,263 @@ +/** + * @file + * + * @brief Inlined Routines in the Priority Handler Bit Map Implementation + * + * This file contains the static inline implementation of all inlined + * routines in the Priority Handler bit map implementation + */ + +/* + * COPYRIGHT (c) 1989-2010. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_PRIORITYBITMAPIMPL_H +#define _RTEMS_SCORE_PRIORITYBITMAPIMPL_H + +#include <rtems/score/prioritybitmap.h> +#include <rtems/score/priority.h> + +#include <string.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScorePriority + */ +/**@{**/ + +#if ( CPU_USE_GENERIC_BITFIELD_DATA == TRUE ) + +/** + * This table is used by the generic bitfield routines to perform + * a highly optimized bit scan without the use of special CPU + * instructions. + */ +extern const unsigned char __log2table[256]; + +#endif + +/** + * @brief Gets the @a _bit_number of the first bit set in the specified value. + * + * This routine returns the @a _bit_number of the first bit set + * in the specified value. The correspondence between @a _bit_number + * and actual bit position is processor dependent. The search for + * the first bit set may run from most to least significant bit + * or vice-versa. + * + * @param[in] _value is the value to bit scan. + * @param[in] _bit_number is the position of the first bit set. + * + * @note This routine is used when the executing thread is removed + * from the ready state and, as a result, its performance has a + * significant impact on the performance of the executive as a whole. + * + * @note This routine must be a macro because if a CPU specific version + * is used it will most likely use inline assembly. + */ +#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE ) +#define _Bitfield_Find_first_bit( _value, _bit_number ) \ + _CPU_Bitfield_Find_first_bit( _value, _bit_number ) +#else +#define _Bitfield_Find_first_bit( _value, _bit_number ) \ + { \ + register uint32_t __value = (uint32_t) (_value); \ + register const unsigned char *__p = __log2table; \ + \ + if ( __value < 0x100 ) \ + (_bit_number) = (Priority_bit_map_Word)( __p[ __value ] + 8 ); \ + else \ + (_bit_number) = (Priority_bit_map_Word)( __p[ __value >> 8 ] ); \ + } +#endif + +#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE ) +/** + * This method returns the priority bit mask for the specified major + * or minor bit number. + * + * @param[in] _bit_number is the bit number for which we need a mask + * + * @retval the priority bit mask + * + * @note This may simply be a pass through to a CPU dependent implementation. + */ +#define _Priority_Mask( _bit_number ) \ + _CPU_Priority_Mask( _bit_number ) +#endif + +#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE ) +/** + * This method returns the bit index position for the specified priority. + * + * @param[in] _priority is the priority for which we need the index. + * + * @retval This method returns the array index into the priority bit map. + * + * @note This may simply be a pass through to a CPU dependent implementation. + */ +#define _Priority_Bits_index( _priority ) \ + _CPU_Priority_bits_index( _priority ) +#endif + +/** + * This function returns the major portion of the_priority. + */ + +RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Major ( + Priority_Control the_priority +) +{ + return (Priority_bit_map_Word)( the_priority / 16 ); +} + +/** + * This function returns the minor portion of the_priority. + */ + +RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Minor ( + Priority_Control the_priority +) +{ + return (Priority_bit_map_Word)( the_priority % 16 ); +} + +#if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) + +/** + * This function returns the mask associated with the major or minor + * number passed to it. + */ + +RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Mask ( + uint32_t bit_number +) +{ + return (Priority_bit_map_Word)(0x8000u >> bit_number); +} + +/** + * This function returns the mask bit inverted. + */ + +RTEMS_INLINE_ROUTINE Priority_bit_map_Word _Priority_Mask_invert ( + uint32_t mask +) +{ + return (Priority_bit_map_Word)(~mask); +} + +/** + * This function translates the bit numbers returned by the bit scan + * of a priority bit field into something suitable for use as + * a major or minor component of a priority. + */ + +RTEMS_INLINE_ROUTINE uint32_t _Priority_Bits_index ( + uint32_t bit_number +) +{ + return bit_number; +} + +#endif + +RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize( + Priority_bit_map_Control *bit_map +) +{ + memset( bit_map, 0, sizeof( *bit_map ) ); +} + +/** + * Priority Queue implemented by bit map + */ + +RTEMS_INLINE_ROUTINE void _Priority_bit_map_Add ( + Priority_bit_map_Control *bit_map, + Priority_bit_map_Information *bit_map_info +) +{ + *bit_map_info->minor |= bit_map_info->ready_minor; + bit_map->major_bit_map |= bit_map_info->ready_major; +} + +RTEMS_INLINE_ROUTINE void _Priority_bit_map_Remove ( + Priority_bit_map_Control *bit_map, + Priority_bit_map_Information *bit_map_info +) +{ + *bit_map_info->minor &= bit_map_info->block_minor; + if ( *bit_map_info->minor == 0 ) + bit_map->major_bit_map &= bit_map_info->block_major; +} + +RTEMS_INLINE_ROUTINE Priority_Control _Priority_bit_map_Get_highest( + const Priority_bit_map_Control *bit_map +) +{ + Priority_bit_map_Word minor; + Priority_bit_map_Word major; + + /* Avoid problems with some inline ASM statements */ + Priority_bit_map_Word tmp; + + tmp = bit_map->major_bit_map; + _Bitfield_Find_first_bit( tmp, major ); + + tmp = bit_map->bit_map[ major ]; + _Bitfield_Find_first_bit( tmp, minor ); + + return (_Priority_Bits_index( major ) << 4) + + _Priority_Bits_index( minor ); +} + +RTEMS_INLINE_ROUTINE bool _Priority_bit_map_Is_empty( + const Priority_bit_map_Control *bit_map +) +{ + return bit_map->major_bit_map == 0; +} + +RTEMS_INLINE_ROUTINE void _Priority_bit_map_Initialize_information( + Priority_bit_map_Control *bit_map, + Priority_bit_map_Information *bit_map_info, + Priority_Control new_priority +) +{ + Priority_bit_map_Word major; + Priority_bit_map_Word minor; + Priority_bit_map_Word mask; + + major = _Priority_Major( new_priority ); + minor = _Priority_Minor( new_priority ); + + bit_map_info->minor = &bit_map->bit_map[ _Priority_Bits_index( major ) ]; + + mask = _Priority_Mask( major ); + bit_map_info->ready_major = mask; + /* Add _Priority_Mask_invert to non-generic bitfield then change this code. */ + bit_map_info->block_major = (Priority_bit_map_Word)(~((uint32_t)mask)); + + mask = _Priority_Mask( minor ); + bit_map_info->ready_minor = mask; + /* Add _Priority_Mask_invert to non-generic bitfield then change this code. */ + bit_map_info->block_minor = (Priority_bit_map_Word)(~((uint32_t)mask)); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/profiling.h b/include/rtems/score/profiling.h new file mode 100644 index 0000000000..a6ab283ae6 --- /dev/null +++ b/include/rtems/score/profiling.h @@ -0,0 +1,134 @@ +/** + * @file + * + * @ingroup ScoreProfiling + * + * @brief Profiling Support API + */ + +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_PROFILING +#define _RTEMS_SCORE_PROFILING + +#include <rtems/score/percpu.h> +#include <rtems/score/isrlock.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreProfiling Profiling Support + * + * @brief Profiling support. + * + * @{ + */ + +static inline void _Profiling_Thread_dispatch_disable( + Per_CPU_Control *cpu, + uint32_t previous_thread_dispatch_disable_level +) +{ +#if defined( RTEMS_PROFILING ) + if ( previous_thread_dispatch_disable_level == 0 ) { + Per_CPU_Stats *stats = &cpu->Stats; + + stats->thread_dispatch_disabled_instant = _CPU_Counter_read(); + ++stats->thread_dispatch_disabled_count; + } +#else + (void) cpu; + (void) previous_thread_dispatch_disable_level; +#endif +} + +static inline void _Profiling_Thread_dispatch_disable_critical( + Per_CPU_Control *cpu, + uint32_t previous_thread_dispatch_disable_level, + const ISR_lock_Context *lock_context +) +{ +#if defined( RTEMS_PROFILING ) + if ( previous_thread_dispatch_disable_level == 0 ) { + Per_CPU_Stats *stats = &cpu->Stats; + + stats->thread_dispatch_disabled_instant = lock_context->ISR_disable_instant; + ++stats->thread_dispatch_disabled_count; + } +#else + (void) cpu; + (void) previous_thread_dispatch_disable_level; + (void) lock_context; +#endif +} + +static inline void _Profiling_Thread_dispatch_enable( + Per_CPU_Control *cpu, + uint32_t new_thread_dispatch_disable_level +) +{ +#if defined( RTEMS_PROFILING ) + if ( new_thread_dispatch_disable_level == 0 ) { + Per_CPU_Stats *stats = &cpu->Stats; + CPU_Counter_ticks now = _CPU_Counter_read(); + CPU_Counter_ticks delta = _CPU_Counter_difference( + now, + stats->thread_dispatch_disabled_instant + ); + + stats->total_thread_dispatch_disabled_time += delta; + + if ( stats->max_thread_dispatch_disabled_time < delta ) { + stats->max_thread_dispatch_disabled_time = delta; + } + } +#else + (void) cpu; + (void) new_thread_dispatch_disable_level; +#endif +} + +static inline void _Profiling_Update_max_interrupt_delay( + Per_CPU_Control *cpu, + CPU_Counter_ticks interrupt_delay +) +{ +#if defined( RTEMS_PROFILING ) + Per_CPU_Stats *stats = &cpu->Stats; + + if ( stats->max_interrupt_delay < interrupt_delay ) { + stats->max_interrupt_delay = interrupt_delay; + } +#else + (void) cpu; + (void) interrupt_delay; +#endif +} + +void _Profiling_Outer_most_interrupt_entry_and_exit( + Per_CPU_Control *cpu, + CPU_Counter_ticks interrupt_entry_instant, + CPU_Counter_ticks interrupt_exit_instant +); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_PROFILING */ diff --git a/include/rtems/score/protectedheap.h b/include/rtems/score/protectedheap.h new file mode 100644 index 0000000000..a08fa36cf3 --- /dev/null +++ b/include/rtems/score/protectedheap.h @@ -0,0 +1,172 @@ +/** + * @file + * + * @ingroup ScoreProtHeap + * + * @brief Protected Heap Handler API + */ + +/* + * COPYRIGHT (c) 1989-2007. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_PROTECTED_HEAP_H +#define _RTEMS_SCORE_PROTECTED_HEAP_H + +#include <rtems/score/heapimpl.h> +#include <rtems/score/apimutex.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreProtHeap Protected Heap Handler + * + * @ingroup ScoreHeap + * + * @brief Provides protected heap services. + * + * The @ref ScoreAllocatorMutex is used to protect the heap accesses. + * + */ +/**@{**/ + +/** + * @brief See _Heap_Initialize(). + */ +RTEMS_INLINE_ROUTINE uintptr_t _Protected_heap_Initialize( + Heap_Control *heap, + void *area_begin, + uintptr_t area_size, + uintptr_t page_size +) +{ + return _Heap_Initialize( heap, area_begin, area_size, page_size ); +} + +/** + * @brief See _Heap_Extend(). + * + * Returns @a true in case of success, and @a false otherwise. + */ +bool _Protected_heap_Extend( + Heap_Control *heap, + void *area_begin, + uintptr_t area_size +); + +/** + * @brief See _Heap_Allocate_aligned_with_boundary(). + */ +void *_Protected_heap_Allocate_aligned_with_boundary( + Heap_Control *heap, + uintptr_t size, + uintptr_t alignment, + uintptr_t boundary +); + +/** + * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero. + */ +RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate_aligned( + Heap_Control *heap, + uintptr_t size, + uintptr_t alignment +) +{ + return + _Protected_heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 ); +} + +/** + * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and + * boundary equals zero. + */ +RTEMS_INLINE_ROUTINE void *_Protected_heap_Allocate( + Heap_Control *heap, + uintptr_t size +) +{ + return _Protected_heap_Allocate_aligned_with_boundary( heap, size, 0, 0 ); +} + +/** + * @brief See _Heap_Size_of_alloc_area(). + */ +bool _Protected_heap_Get_block_size( + Heap_Control *heap, + void *addr, + uintptr_t *size +); + +/** + * @brief See _Heap_Resize_block(). + * + * Returns @a true in case of success, and @a false otherwise. + */ +bool _Protected_heap_Resize_block( + Heap_Control *heap, + void *addr, + uintptr_t size +); + +/** + * @brief See _Heap_Free(). + * + * Returns @a true in case of success, and @a false otherwise. + */ +bool _Protected_heap_Free( Heap_Control *heap, void *addr ); + +/** + * @brief See _Heap_Walk(). + */ +bool _Protected_heap_Walk( Heap_Control *heap, int source, bool dump ); + +/** + * @brief See _Heap_Iterate(). + */ +void _Protected_heap_Iterate( + Heap_Control *heap, + Heap_Block_visitor visitor, + void *visitor_arg +); + +/** + * @brief See _Heap_Get_information(). + * + * Returns @a true in case of success, and @a false otherwise. + */ +bool _Protected_heap_Get_information( + Heap_Control *heap, + Heap_Information_block *info +); + +/** + * @brief See _Heap_Get_free_information(). + * + * Returns @a true in case of success, and @a false otherwise. + */ +bool _Protected_heap_Get_free_information( + Heap_Control *heap, + Heap_Information *info +); + +/** + * @brief See _Heap_Get_size(). + */ +uintptr_t _Protected_heap_Get_size( Heap_Control *heap ); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/rbtree.h b/include/rtems/score/rbtree.h new file mode 100644 index 0000000000..7e41c7a4c5 --- /dev/null +++ b/include/rtems/score/rbtree.h @@ -0,0 +1,493 @@ +/** + * @file rtems/score/rbtree.h + * + * @brief Constants and Structures Associated with the Red-Black Tree Handler + * + * This include file contains all the constants and structures associated + * with the Red-Black Tree Handler. + */ + +/* + * Copyright (c) 2010 Gedare Bloom. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_RBTREE_H +#define _RTEMS_SCORE_RBTREE_H + +#include <sys/tree.h> +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreRBTree Red-Black Tree Handler + * + * @ingroup Score + * + * The Red-Black Tree Handler is used to manage sets of entities. This handler + * provides two data structures. The rbtree Node data structure is included + * as the first part of every data structure that will be placed on + * a RBTree. The second data structure is rbtree Control which is used + * to manage a set of rbtree Nodes. + */ +/**@{*/ + +/** + * @brief Red-black tree node. + * + * This is used to manage each node (element) which is placed on a red-black + * tree. + */ +typedef struct RBTree_Node { + RB_ENTRY(RBTree_Node) Node; +} RBTree_Node; + +/** + * @brief Red-black tree control. + * + * This is used to manage a red-black tree. A red-black tree consists of a + * tree of zero or more nodes. + */ +typedef RB_HEAD(RBTree_Control, RBTree_Node) RBTree_Control; + +/** + * @brief Integer type for compare results. + * + * The type is large enough to represent pointers and 32-bit signed integers. + * + * @see RBTree_Compare. + */ +typedef long RBTree_Compare_result; + +/** + * @brief Compares two red-black tree nodes. + * + * @param[in] first The first node. + * @param[in] second The second node. + * + * @retval positive The key value of the first node is greater than the one of + * the second node. + * @retval 0 The key value of the first node is equal to the one of the second + * node. + * @retval negative The key value of the first node is less than the one of the + * second node. + */ +typedef RBTree_Compare_result ( *RBTree_Compare )( + const RBTree_Node *first, + const RBTree_Node *second +); + +/** + * @brief Initializer for an empty red-black tree with designator @a name. + */ +#define RBTREE_INITIALIZER_EMPTY( name ) \ + RB_INITIALIZER( name ) + +/** + * @brief Definition for an empty red-black tree with designator @a name. + */ +#define RBTREE_DEFINE_EMPTY( name ) \ + RBTree_Control name = RBTREE_INITIALIZER_EMPTY( name ) + +/** + * @brief Tries to find a node for the specified key in the tree. + * + * @param[in] the_rbtree The red-black tree control. + * @param[in] the_node A node specifying the key. + * @param[in] compare The node compare function. + * @param[in] is_unique If true, then return the first node with a key equal to + * the one of the node specified if it exits, else return the last node if it + * exists. + * + * @retval node A node corresponding to the key. If the tree is not unique + * and contains duplicate keys, the set of duplicate keys acts as FIFO. + * @retval NULL No node exists in the tree for the key. + */ +RBTree_Node *_RBTree_Find( + const RBTree_Control *the_rbtree, + const RBTree_Node *the_node, + RBTree_Compare compare, + bool is_unique +); + +/** + * @brief Inserts the node into the red-black tree. + * + * In case the node is already a node of a tree, then this function yields + * unpredictable results. + * + * @param[in] the_rbtree The red-black tree control. + * @param[in] the_node The node to insert. + * @param[in] compare The node compare function. + * @param[in] is_unique If true, then reject nodes with a duplicate key, else + * insert nodes in FIFO order in case the key value is equal to existing nodes. + * + * @retval NULL Successfully inserted. + * @retval existing_node This is a unique insert and there exists a node with + * an equal key in the tree already. + */ +RBTree_Node *_RBTree_Insert( + RBTree_Control *the_rbtree, + RBTree_Node *the_node, + RBTree_Compare compare, + bool is_unique +); + +/** + * @brief Rebalances the red-black tree after insertion of the node. + * + * @param[in] the_rbtree The red-black tree control. + * @param[in] the_node The most recently inserted node. + */ +void _RBTree_Insert_color( + RBTree_Control *the_rbtree, + RBTree_Node *the_node +); + +/** + * @brief Adds a child node to a parent node. + * + * @param[in] child The child node. + * @param[in] parent The parent node. + * @param[in] link The child node link of the parent node. + */ +RTEMS_INLINE_ROUTINE void _RBTree_Add_child( + RBTree_Node *child, + RBTree_Node *parent, + RBTree_Node **link +) +{ + RB_SET( child, parent, Node ); + *link = child; +} + +/** + * @brief Inserts the node into the red-black tree using the specified parent + * node and link. + * + * @param[in] the_rbtree The red-black tree control. + * @param[in] the_node The node to insert. + * @param[in] parent The parent node. + * @param[in] link The child node link of the parent node. + * + * @code + * #include <rtems/score/rbtree.h> + * + * typedef struct { + * int value; + * RBTree_Node Node; + * } Some_Node; + * + * bool _Some_Less( + * const RBTree_Node *a, + * const RBTree_Node *b + * ) + * { + * const Some_Node *aa = RTEMS_CONTAINER_OF( a, Some_Node, Node ); + * const Some_Node *bb = RTEMS_CONTAINER_OF( b, Some_Node, Node ); + * + * return aa->value < bb->value; + * } + * + * void _Some_Insert( + * RBTree_Control *the_rbtree, + * Some_Node *the_node + * ) + * { + * RBTree_Node **link = _RBTree_Root_reference( the_rbtree ); + * RBTree_Node *parent = NULL; + * + * while ( *link != NULL ) { + * parent = *link; + * + * if ( _Some_Less( &the_node->Node, parent ) ) { + * link = _RBTree_Left_reference( parent ); + * } else { + * link = _RBTree_Right_reference( parent ); + * } + * } + * + * _RBTree_Insert_with_parent( the_rbtree, &the_node->Node, parent, link ); + * } + * @endcode + */ +RTEMS_INLINE_ROUTINE void _RBTree_Insert_with_parent( + RBTree_Control *the_rbtree, + RBTree_Node *the_node, + RBTree_Node *parent, + RBTree_Node **link +) +{ + _RBTree_Add_child( the_node, parent, link ); + _RBTree_Insert_color( the_rbtree, the_node ); +} + +/** + * @brief Extracts (removes) the node from the red-black tree. + * + * This function does not set the node off-tree. In case this is desired, then + * call _RBTree_Set_off_tree() after the extraction. + * + * In case the node to extract is not a node of the tree, then this function + * yields unpredictable results. + * + * @param[in] the_rbtree The red-black tree control. + * @param[in] the_node The node to extract. + */ +void _RBTree_Extract( + RBTree_Control *the_rbtree, + RBTree_Node *the_node +); + +/** + * @brief Sets a red-black tree node as off-tree. + * + * Do not use this function on nodes which are a part of a tree. + * + * @param[in] the_node The node to set off-tree. + * + * @see _RBTree_Is_node_off_tree(). + */ +RTEMS_INLINE_ROUTINE void _RBTree_Set_off_tree( RBTree_Node *the_node ) +{ + RB_COLOR( the_node, Node ) = -1; +} + +/** + * @brief Returns true, if this red-black tree node is off-tree, and false + * otherwise. + * + * @param[in] the_node The node to test. + * + * @retval true The node is not a part of a tree (off-tree). + * @retval false Otherwise. + * + * @see _RBTree_Set_off_tree(). + */ +RTEMS_INLINE_ROUTINE bool _RBTree_Is_node_off_tree( + const RBTree_Node *the_node +) +{ + return RB_COLOR( the_node, Node ) == -1; +} + +/** + * @brief Returns a pointer to root node of the red-black tree. + * + * The root node may change after insert or extract operations. + * + * @param[in] the_rbtree The red-black tree control. + * + * @retval NULL The tree is empty. + * @retval root The root node. + * + * @see _RBTree_Is_root(). + */ +RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Root( + const RBTree_Control *the_rbtree +) +{ + return RB_ROOT( the_rbtree ); +} + +/** + * @brief Returns a reference to the root pointer of the red-black tree. + */ +RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Root_reference( + RBTree_Control *the_rbtree +) +{ + return &RB_ROOT( the_rbtree ); +} + +/** + * @brief Returns a pointer to the parent of this node. + * + * The node must have a parent, thus it is invalid to use this function for the + * root node or a node that is not part of a tree. To test for the root node + * compare with _RBTree_Root() or use _RBTree_Is_root(). + * + * @param[in] the_node The node of interest. + * + * @retval parent The parent of this node. + * @retval undefined The node is the root node or not part of a tree. + */ +RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Parent( + const RBTree_Node *the_node +) +{ + return RB_PARENT( the_node, Node ); +} + +/** + * @brief Return pointer to the left of this node. + * + * This function returns a pointer to the left node of this node. + * + * @param[in] the_node is the node to be operated upon. + * + * @return This method returns the left node on the rbtree. + */ +RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Left( + const RBTree_Node *the_node +) +{ + return RB_LEFT( the_node, Node ); +} + +/** + * @brief Returns a reference to the left child pointer of the red-black tree + * node. + */ +RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Left_reference( + RBTree_Node *the_node +) +{ + return &RB_LEFT( the_node, Node ); +} + +/** + * @brief Return pointer to the right of this node. + * + * This function returns a pointer to the right node of this node. + * + * @param[in] the_node is the node to be operated upon. + * + * @return This method returns the right node on the rbtree. + */ +RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Right( + const RBTree_Node *the_node +) +{ + return RB_RIGHT( the_node, Node ); +} + +/** + * @brief Returns a reference to the right child pointer of the red-black tree + * node. + */ +RTEMS_INLINE_ROUTINE RBTree_Node **_RBTree_Right_reference( + RBTree_Node *the_node +) +{ + return &RB_RIGHT( the_node, Node ); +} + +/** + * @brief Is the RBTree empty. + * + * This function returns true if there are no nodes on @a the_rbtree and + * false otherwise. + * + * @param[in] the_rbtree is the rbtree to be operated upon. + * + * @retval true There are no nodes on @a the_rbtree. + * @retval false There are nodes on @a the_rbtree. + */ +RTEMS_INLINE_ROUTINE bool _RBTree_Is_empty( + const RBTree_Control *the_rbtree +) +{ + return RB_EMPTY( the_rbtree ); +} + +/** + * @brief Returns true if this node is the root node of a red-black tree, and + * false otherwise. + * + * The root node may change after insert or extract operations. In case the + * node is not a node of a tree, then this function yields unpredictable + * results. + * + * @param[in] the_node The node of interest. + * + * @retval true The node is the root node. + * @retval false Otherwise. + * + * @see _RBTree_Root(). + */ +RTEMS_INLINE_ROUTINE bool _RBTree_Is_root( + const RBTree_Node *the_node +) +{ + return _RBTree_Parent( the_node ) == NULL; +} + +/** + * @brief Initialize this RBTree as empty. + * + * This routine initializes @a the_rbtree to contain zero nodes. + */ +RTEMS_INLINE_ROUTINE void _RBTree_Initialize_empty( + RBTree_Control *the_rbtree +) +{ + RB_INIT( the_rbtree ); +} + +/** + * @brief Returns the minimum node of the red-black tree. + * + * @param[in] the_rbtree The red-black tree control. + * + * @retval NULL The red-black tree is empty. + * @retval node The minimum node. + */ +RBTree_Node *_RBTree_Minimum( const RBTree_Control *the_rbtree ); + +/** + * @brief Returns the maximum node of the red-black tree. + * + * @param[in] the_rbtree The red-black tree control. + * + * @retval NULL The red-black tree is empty. + * @retval node The maximum node. + */ +RBTree_Node *_RBTree_Maximum( const RBTree_Control *the_rbtree ); + +/** + * @brief Returns the predecessor of a node. + * + * @param[in] node is the node. + * + * @retval NULL The predecessor does not exist. Otherwise it returns + * the predecessor node. + */ +RBTree_Node *_RBTree_Predecessor( const RBTree_Node *node ); + +/** + * @brief Returns the successor of a node. + * + * @param[in] node is the node. + * + * @retval NULL The successor does not exist. Otherwise the successor node. + */ +RBTree_Node *_RBTree_Successor( const RBTree_Node *node ); + +/** + * @brief Replaces a node in the red-black tree without a rebalance. + * + * @param[in] the_rbtree The red-black tree control. + * @param[in] victim The victim node. + * @param[in] replacement The replacement node. + */ +void _RBTree_Replace_node( + RBTree_Control *the_rbtree, + RBTree_Node *victim, + RBTree_Node *replacement +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/rbtreeimpl.h b/include/rtems/score/rbtreeimpl.h new file mode 100644 index 0000000000..9c748eb01c --- /dev/null +++ b/include/rtems/score/rbtreeimpl.h @@ -0,0 +1,93 @@ +/** + * @file + * + * @brief Inlined Routines Associated with Red-Black Trees + * + * This include file contains the bodies of the routines which are + * associated with Red-Black Trees and inlined. + * + * @note The routines in this file are ordered from simple + * to complex. No other RBTree Handler routine is referenced + * unless it has already been defined. + */ + +/* + * Copyright (c) 2010-2012 Gedare Bloom. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_RBTREEIMPL_H +#define _RTEMS_SCORE_RBTREEIMPL_H + +#include <rtems/score/rbtree.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreRBTree + */ +/**@{**/ + +/** + * @brief Red-black tree visitor. + * + * @param[in] node The node. + * @param[in] visitor_arg The visitor argument. + * + * @retval true Stop the iteration. + * @retval false Continue the iteration. + * + * @see _RBTree_Iterate(). + */ +typedef bool (*RBTree_Visitor)( + const RBTree_Node *node, + void *visitor_arg +); + +/** + * @brief Red-black tree iteration. + * + * @param[in] rbtree The red-black tree. + * @param[in] visitor The visitor. + * @param[in] visitor_arg The visitor argument. + */ +void _RBTree_Iterate( + const RBTree_Control *rbtree, + RBTree_Visitor visitor, + void *visitor_arg +); + +RTEMS_INLINE_ROUTINE bool _RBTree_Is_equal( + RBTree_Compare_result compare_result +) +{ + return compare_result == 0; +} + +RTEMS_INLINE_ROUTINE bool _RBTree_Is_greater( + RBTree_Compare_result compare_result +) +{ + return compare_result > 0; +} + +RTEMS_INLINE_ROUTINE bool _RBTree_Is_lesser( + RBTree_Compare_result compare_result +) +{ + return compare_result < 0; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/registers.h b/include/rtems/score/registers.h new file mode 100644 index 0000000000..254fef701b --- /dev/null +++ b/include/rtems/score/registers.h @@ -0,0 +1,72 @@ +/** + * @file + * + * @brief Intel CPU Constants and Definitions + * + * This file contains definition and constants related to Intel Cpu + */ + +/* + * COPYRIGHT (c) 1998 valette@crf.canon.fr + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_REGISTERS_H +#define _RTEMS_SCORE_REGISTERS_H + +/* + * definition related to EFLAGS + */ +#define EFLAGS_CARRY 0x1 +#define EFLAGS_PARITY 0x4 + +#define EFLAGS_AUX_CARRY 0x10 +#define EFLAGS_ZERO 0x40 +#define EFLAGS_SIGN 0x80 + +#define EFLAGS_TRAP 0x100 +#define EFLAGS_INTR_ENABLE 0x200 +#define EFLAGS_DIRECTION 0x400 +#define EFLAGS_OVERFLOW 0x800 + +#define EFLAGS_IOPL_MASK 0x3000 +#define EFLAGS_NESTED_TASK 0x8000 + +#define EFLAGS_RESUME 0x10000 +#define EFLAGS_VIRTUAL_MODE 0x20000 +#define EFLAGS_ALIGN_CHECK 0x40000 +#define EFLAGS_VIRTUAL_INTR 0x80000 + +#define EFLAGS_VIRTUAL_INTR_PEND 0x100000 +#define EFLAGS_ID 0x200000 + +/* + * definitions related to CR0 + */ +#define CR0_PROTECTION_ENABLE 0x1 +#define CR0_MONITOR_COPROC 0x2 +#define CR0_COPROC_SOFT_EMUL 0x4 +#define CR0_FLOATING_INSTR_EXCEPTION 0x8 + +#define CR0_EXTENSION_TYPE 0x10 +#define CR0_NUMERIC_ERROR 0x20 + +#define CR0_WRITE_PROTECT 0x10000 +#define CR0_ALIGMENT_MASK 0x40000 + +#define CR0_NO_WRITE_THROUGH 0x20000000 +#define CR0_PAGE_LEVEL_CACHE_DISABLE 0x40000000 +#define CR0_PAGING 0x80000000 + +/* + * definitions related to CR3 + */ + +#define CR3_PAGE_CACHE_DISABLE 0x10 +#define CR3_PAGE_WRITE_THROUGH 0x8 +#define CR3_PAGE_DIRECTORY_MASK 0xFFFFF000 + +#endif diff --git a/include/rtems/score/resource.h b/include/rtems/score/resource.h new file mode 100644 index 0000000000..ecf84a7275 --- /dev/null +++ b/include/rtems/score/resource.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_RESOURCE_H +#define _RTEMS_SCORE_RESOURCE_H + +#include <rtems/score/basedefs.h> +#include <rtems/score/chain.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreResource Resource Handler + * + * @ingroup Score + * + * @brief Support for resource dependency management. + * + * A resource is something that has at most one owner at a time and may have + * multiple rivals in case an owner is present. The owner and rivals are + * impersonated via resource nodes. A resource is represented via the resource + * control structure. The resource controls and nodes are organized as trees. + * It is possible to detect deadlocks via such a resource tree. The + * _Resource_Iterate() function can be used to iterate through such a resource + * tree starting at a top node. + * + * The following diagram shows an example resource tree with sixteen resource + * nodes n0 up to n15 and sixteen resources r0 up to r15. The root of this + * tree is n0. As a use case threads can be associated with resource nodes. + * In this case a thread represented by node n0 owns resources r0, r1, r2, r3, + * r6, r11 and r12 and is in the ready state. The threads represented by nodes + * n1 up to n15 wait directly or indirectly via resources owned by n0 and are + * in a blocked state. + * + * @dot + * digraph { + * n0 [style=filled, fillcolor=green]; + * n0 -> r0; + * subgraph { + * rank=same; + * n1 [style=filled, fillcolor=green]; + * r0 -> n1; + * n2 [style=filled, fillcolor=green]; + * n1 -> n2; + * n4 [style=filled, fillcolor=green]; + * n2 -> n4; + * n6 [style=filled, fillcolor=green]; + * n4 -> n6; + * n8 [style=filled, fillcolor=green]; + * n6 -> n8; + * n15 [style=filled, fillcolor=green]; + * n8 -> n15; + * } + * n1 -> r5; + * subgraph { + * rank=same; + * n3 [style=filled, fillcolor=green]; + * r5 -> n3; + * n12 [style=filled, fillcolor=green]; + * n3 -> n12; + * } + * n3 -> r10; + * r10 -> r13; + * r13 -> r15; + * subgraph { + * rank=same; + * n10 [style=filled, fillcolor=green]; + * r15 -> n10; + * } + * r5 -> r7; + * subgraph { + * rank=same; + * n11 [style=filled, fillcolor=green]; + * r7 -> n11; + * n14 [style=filled, fillcolor=green]; + * n11 -> n14; + * } + * n14 -> r4; + * r7 -> r8; + * subgraph { + * rank=same; + * n13 [style=filled, fillcolor=green]; + * r8 -> n13; + * } + * r8 -> r9; + * n8 -> r14; + * r0 -> r1; + * subgraph { + * rank=same; + * n7 [style=filled, fillcolor=green]; + * r1 -> n7; + * } + * r1 -> r2; + * r2 -> r3; + * r3 -> r6; + * r6 -> r11; + * r11 -> r12; + * subgraph { + * rank=same; + * n5 [style=filled, fillcolor=green]; + * r12 -> n5; + * n9 [style=filled, fillcolor=green]; + * n5 -> n9; + * } + * } + * @enddot + * + * The following example illustrates a deadlock situation. The root of the + * tree tries to get ownership of a resource owned by one of its children. + * + * @dot + * digraph { + * n0 [style=filled, fillcolor=green]; + * n0 -> r0; + * subgraph { + * rank=same; + * n1 [style=filled, fillcolor=green]; + * r0 -> n1; + * } + * n1 -> r1; + * n0 -> r1 [label=deadlock, style=dotted]; + * } + * @enddot + * + * @{ + */ + +typedef struct Resource_Node Resource_Node; + +typedef struct Resource_Control Resource_Control; + +/** + * @brief Resource node to reflect ownership of resources and a dependency on a + * resource. + */ +struct Resource_Node { + /** + * @brief Node to build a chain of rivals depending on a resource. + * + * @see Resource_Control::Rivals. + */ + Chain_Node Node; + + /** + * @brief A chain of resources owned by this node. + * + * @see Resource_Control::Node. + */ + Chain_Control Resources; + + /** + * @brief Reference to a resource in case this node has to wait for ownership + * of this resource. + * + * It is @c NULL in case this node has no open resource dependency. + */ + Resource_Control *dependency; + + /** + * @brief Reference to the root of the resource tree. + * + * The root references itself. + */ + Resource_Node *root; +}; + +/** + * @brief Resource control to manage ownership and rival nodes depending on a + * resource. + */ +struct Resource_Control { + /** + * @brief Node to build a chain of resources owned by a resource node. + * + * @see Resource_Node::Resources. + */ + Chain_Node Node; + + /** + * @brief A chain of rivals waiting for resource ownership. + * + * @see Resource_Node::Node. + */ + Chain_Control Rivals; + + /** + * @brief The owner of this resource. + */ + Resource_Node *owner; +}; + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_RESOURCE_H */ diff --git a/include/rtems/score/resourceimpl.h b/include/rtems/score/resourceimpl.h new file mode 100644 index 0000000000..69e9a3c5f8 --- /dev/null +++ b/include/rtems/score/resourceimpl.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_RESOURCEIMPL_H +#define _RTEMS_SCORE_RESOURCEIMPL_H + +#include <rtems/score/resource.h> +#include <rtems/score/chainimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup ScoreResource + * + * @{ + */ + +/** + * @brief Visitor function for resource node iteration. + * + * The visitor is allowed to extract the node. + * + * @param[in] node The current resource node. + * @param[in] arg The argument passed to _Resource_Iterate(). + * + * @retval true Stop the iteration. + * @retval false Continue the iteration. + */ +typedef bool (*Resource_Node_visitor)( Resource_Node *node, void *arg ); + +/** + * @brief Iterates over all nodes of a resource dependency tree. + * + * @param[in] top The top node to start the iteration. The visitor function is + * not invoked for the top node. + * @param[in] visitor The visitor function. + * @param[in] arg The argument for the visitor function. + */ +void _Resource_Iterate( + Resource_Node *top, + Resource_Node_visitor visitor, + void *arg +); + +RTEMS_INLINE_ROUTINE void _Resource_Node_initialize( Resource_Node *node ) +{ + node->dependency = NULL; + node->root = node; + _Chain_Initialize_empty( &node->Resources ); +} + +RTEMS_INLINE_ROUTINE void _Resource_Node_set_dependency( + Resource_Node *node, + Resource_Control *dependency +) +{ + node->dependency = dependency; +} + +RTEMS_INLINE_ROUTINE Resource_Node *_Resource_Node_get_root( + const Resource_Node *node +) +{ + return node->root; +} + +RTEMS_INLINE_ROUTINE void _Resource_Node_set_root( + Resource_Node *node, + Resource_Node *root +) +{ + node->root = root; +} + +RTEMS_INLINE_ROUTINE bool _Resource_Node_owns_resources( const Resource_Node *node ) +{ + return !_Chain_Is_empty( &node->Resources ); +} + +RTEMS_INLINE_ROUTINE void _Resource_Node_add_resource( + Resource_Node *node, + Resource_Control *resource +) +{ + _Chain_Prepend_unprotected( &node->Resources, &resource->Node ); +} + +RTEMS_INLINE_ROUTINE void _Resource_Node_extract( Resource_Node *node ) +{ + _Chain_Extract_unprotected( &node->Node ); +} + +RTEMS_INLINE_ROUTINE void _Resource_Initialize( Resource_Control *resource ) +{ + resource->owner = NULL; + _Chain_Initialize_empty( &resource->Rivals ); +} + +RTEMS_INLINE_ROUTINE void _Resource_Add_rival( + Resource_Control *resource, + Resource_Node *node +) +{ + _Chain_Append_unprotected( &resource->Rivals, &node->Node ); +} + +RTEMS_INLINE_ROUTINE void _Resource_Extract( Resource_Control *resource ) +{ + _Chain_Extract_unprotected( &resource->Node ); +} + +RTEMS_INLINE_ROUTINE Resource_Node *_Resource_Get_owner( + const Resource_Control *resource +) +{ + return resource->owner; +} + +RTEMS_INLINE_ROUTINE void _Resource_Set_owner( + Resource_Control *resource, + Resource_Node *owner +) +{ + resource->owner = owner; +} + +/** + * @brief Returns true if this is the most recently obtained resource of the + * node, and false otherwise. + * + * Resources are organized in last in first out order (LIFO). + * + * @param[in] resource The resource in question. + * @param[in] node The node that obtained the resource. + */ +RTEMS_INLINE_ROUTINE bool _Resource_Is_most_recently_obtained( + const Resource_Control *resource, + const Resource_Node *node +) +{ + return &resource->Node == _Chain_Immutable_first( &node->Resources ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_RESOURCEIMPL_H */ diff --git a/include/rtems/score/scheduler.h b/include/rtems/score/scheduler.h new file mode 100644 index 0000000000..5296644aeb --- /dev/null +++ b/include/rtems/score/scheduler.h @@ -0,0 +1,565 @@ +/** + * @file rtems/score/scheduler.h + * + * @brief Constants and Structures Associated with the Scheduler + * + * This include file contains all the constants and structures associated + * with the scheduler. + */ + +/* + * Copyright (C) 2010 Gedare Bloom. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULER_H +#define _RTEMS_SCORE_SCHEDULER_H + +#include <rtems/score/priority.h> +#include <rtems/score/thread.h> +#if defined(__RTEMS_HAVE_SYS_CPUSET_H__) && defined(RTEMS_SMP) + #include <sys/cpuset.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct Per_CPU_Control; + +/** + * @defgroup ScoreScheduler Scheduler Handler + * + * @ingroup Score + * + * This handler encapsulates functionality related to managing sets of threads + * that are ready for execution. + */ +/**@{*/ + +typedef struct Scheduler_Control Scheduler_Control; + +typedef struct Scheduler_Node Scheduler_Node; + +#if defined(RTEMS_SMP) + typedef Thread_Control * Scheduler_Void_or_thread; + + #define SCHEDULER_RETURN_VOID_OR_NULL return NULL +#else + typedef void Scheduler_Void_or_thread; + + #define SCHEDULER_RETURN_VOID_OR_NULL return +#endif + +/** + * @brief The scheduler operations. + */ +typedef struct { + /** @see _Scheduler_Handler_initialization() */ + void ( *initialize )( const Scheduler_Control * ); + + /** @see _Scheduler_Schedule() */ + void ( *schedule )( const Scheduler_Control *, Thread_Control *); + + /** @see _Scheduler_Yield() */ + Scheduler_Void_or_thread ( *yield )( + const Scheduler_Control *, + Thread_Control * + ); + + /** @see _Scheduler_Block() */ + void ( *block )( + const Scheduler_Control *, + Thread_Control * + ); + + /** @see _Scheduler_Unblock() */ + Scheduler_Void_or_thread ( *unblock )( + const Scheduler_Control *, + Thread_Control * + ); + + /** @see _Scheduler_Change_priority() */ + Scheduler_Void_or_thread ( *change_priority )( + const Scheduler_Control *, + Thread_Control *, + Priority_Control, + bool + ); + +#if defined(RTEMS_SMP) + /** + * Ask for help operation. + * + * @param[in] scheduler The scheduler of the thread offering help. + * @param[in] offers_help The thread offering help. + * @param[in] needs_help The thread needing help. + * + * @retval needs_help It was not possible to schedule the thread needing + * help, so it is returned to continue the search for help. + * @retval next_needs_help It was possible to schedule the thread needing + * help, but this displaced another thread eligible to ask for help. So + * this thread is returned to start a new search for help. + * @retval NULL It was possible to schedule the thread needing help, and no + * other thread needs help as a result. + * + * @see _Scheduler_Ask_for_help(). + */ + Thread_Control *( *ask_for_help )( + const Scheduler_Control *scheduler, + Thread_Control *offers_help, + Thread_Control *needs_help + ); +#endif + + /** @see _Scheduler_Node_initialize() */ + void ( *node_initialize )( const Scheduler_Control *, Thread_Control * ); + + /** @see _Scheduler_Node_destroy() */ + void ( *node_destroy )( const Scheduler_Control *, Thread_Control * ); + + /** @see _Scheduler_Update_priority() */ + void ( *update_priority )( + const Scheduler_Control *, + Thread_Control *, + Priority_Control + ); + + /** @see _Scheduler_Priority_compare() */ + int ( *priority_compare )( + Priority_Control, + Priority_Control + ); + + /** @see _Scheduler_Release_job() */ + void ( *release_job ) ( + const Scheduler_Control *, + Thread_Control *, + uint32_t + ); + + /** @see _Scheduler_Tick() */ + void ( *tick )( const Scheduler_Control *, Thread_Control * ); + + /** @see _Scheduler_Start_idle() */ + void ( *start_idle )( + const Scheduler_Control *, + Thread_Control *, + struct Per_CPU_Control * + ); + +#if defined(__RTEMS_HAVE_SYS_CPUSET_H__) && defined(RTEMS_SMP) + /** @see _Scheduler_Get_affinity() */ + bool ( *get_affinity )( + const Scheduler_Control *, + Thread_Control *, + size_t, + cpu_set_t * + ); + + /** @see _Scheduler_Set_affinity() */ + bool ( *set_affinity )( + const Scheduler_Control *, + Thread_Control *, + size_t, + const cpu_set_t * + ); +#endif +} Scheduler_Operations; + +/** + * @brief Scheduler context. + * + * The scheduler context of a particular scheduler implementation must place + * this structure at the begin of its context structure. + */ +typedef struct Scheduler_Context { +#if defined(RTEMS_SMP) + /** + * @brief Count of processors owned by this scheduler instance. + */ + uint32_t processor_count; +#endif +} Scheduler_Context; + +/** + * @brief Scheduler control. + */ +struct Scheduler_Control { + /** + * @brief Reference to a statically allocated scheduler context. + */ + Scheduler_Context *context; + + /** + * @brief The scheduler operations. + */ + Scheduler_Operations Operations; + + /** + * @brief The scheduler name. + */ + uint32_t name; +}; + +#if defined(RTEMS_SMP) +/** + * @brief State to indicate potential help for other threads. + * + * @dot + * digraph state { + * y [label="HELP YOURSELF"]; + * ao [label="HELP ACTIVE OWNER"]; + * ar [label="HELP ACTIVE RIVAL"]; + * + * y -> ao [label="obtain"]; + * y -> ar [label="wait for obtain"]; + * ao -> y [label="last release"]; + * ao -> r [label="wait for obtain"]; + * ar -> r [label="timeout"]; + * ar -> ao [label="timeout"]; + * } + * @enddot + */ +typedef enum { + /** + * @brief This scheduler node is solely used by the owner thread. + * + * This thread owns no resources using a helping protocol and thus does not + * take part in the scheduler helping protocol. No help will be provided for + * other thread. + */ + SCHEDULER_HELP_YOURSELF, + + /** + * @brief This scheduler node is owned by a thread actively owning a resource. + * + * This scheduler node can be used to help out threads. + * + * In case this scheduler node changes its state from ready to scheduled and + * the thread executes using another node, then an idle thread will be + * provided as a user of this node to temporarily execute on behalf of the + * owner thread. Thus lower priority threads are denied access to the + * processors of this scheduler instance. + * + * In case a thread actively owning a resource performs a blocking operation, + * then an idle thread will be used also in case this node is in the + * scheduled state. + */ + SCHEDULER_HELP_ACTIVE_OWNER, + + /** + * @brief This scheduler node is owned by a thread actively obtaining a + * resource currently owned by another thread. + * + * This scheduler node can be used to help out threads. + * + * The thread owning this node is ready and will give away its processor in + * case the thread owning the resource asks for help. + */ + SCHEDULER_HELP_ACTIVE_RIVAL, + + /** + * @brief This scheduler node is owned by a thread obtaining a + * resource currently owned by another thread. + * + * This scheduler node can be used to help out threads. + * + * The thread owning this node is blocked. + */ + SCHEDULER_HELP_PASSIVE +} Scheduler_Help_state; +#endif + +/** + * @brief Scheduler node for per-thread data. + */ +struct Scheduler_Node { +#if defined(RTEMS_SMP) + /** + * @brief Chain node for usage in various scheduler data structures. + * + * Strictly this is the wrong place for this field since the data structures + * to manage scheduler nodes belong to the particular scheduler + * implementation. Currently all SMP scheduler implementations use chains. + * The node is here to simplify things, just like the object node in the + * thread control block. It may be replaced with a union to add a red-black + * tree node in the future. + */ + Chain_Node Node; + + /** + * @brief The thread using this node. + */ + Thread_Control *user; + + /** + * @brief The help state of this node. + */ + Scheduler_Help_state help_state; + + /** + * @brief The thread owning this node. + */ + Thread_Control *owner; + + /** + * @brief The idle thread claimed by this node in case the help state is + * SCHEDULER_HELP_ACTIVE_OWNER. + * + * Active owners will lend their own node to an idle thread in case they + * execute currently using another node or in case they perform a blocking + * operation. This is necessary to ensure the priority ceiling protocols + * work across scheduler boundaries. + */ + Thread_Control *idle; + + /** + * @brief The thread accepting help by this node in case the help state is + * not SCHEDULER_HELP_YOURSELF. + */ + Thread_Control *accepts_help; +#endif +}; + +/** + * @brief Registered schedulers. + * + * Application provided via <rtems/confdefs.h>. + * + * @see _Scheduler_Count. + */ +extern const Scheduler_Control _Scheduler_Table[]; + +/** + * @brief Count of registered schedulers. + * + * Application provided via <rtems/confdefs.h> on SMP configurations. + * + * It is very important that this is a compile-time constant on uni-processor + * configurations (in this case RTEMS_SMP is not defined) so that the compiler + * can optimize the some loops away + * + * @see _Scheduler_Table. + */ +#if defined(RTEMS_SMP) + extern const size_t _Scheduler_Count; +#else + #define _Scheduler_Count ( (size_t) 1 ) +#endif + +#if defined(RTEMS_SMP) + /** + * @brief The scheduler assignment default attributes. + */ + #define SCHEDULER_ASSIGN_DEFAULT UINT32_C(0x0) + + /** + * @brief The presence of this processor is optional. + */ + #define SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL SCHEDULER_ASSIGN_DEFAULT + + /** + * @brief The presence of this processor is mandatory. + */ + #define SCHEDULER_ASSIGN_PROCESSOR_MANDATORY UINT32_C(0x1) + + /** + * @brief Scheduler assignment. + */ + typedef struct { + /** + * @brief The scheduler for this processor. + */ + const Scheduler_Control *scheduler; + + /** + * @brief The scheduler assignment attributes. + * + * Use @ref SCHEDULER_ASSIGN_DEFAULT to select default attributes. + * + * The presence of a processor can be + * - @ref SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL, or + * - @ref SCHEDULER_ASSIGN_PROCESSOR_MANDATORY. + */ + uint32_t attributes; + } Scheduler_Assignment; + + /** + * @brief The scheduler assignments. + * + * The length of this array must be equal to the maximum processors. + * + * Application provided via <rtems/confdefs.h>. + * + * @see _Scheduler_Table and rtems_configuration_get_maximum_processors(). + */ + extern const Scheduler_Assignment _Scheduler_Assignments[]; +#endif + +#if defined(RTEMS_SMP) + /** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] offers_help Unused. + * @param[in] needs_help Unused. + * + * @retval NULL Always. + */ + Thread_Control *_Scheduler_default_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *offers_help, + Thread_Control *needs_help + ); + + #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \ + _Scheduler_default_Ask_for_help, +#else + #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP +#endif + +/** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + */ +void _Scheduler_default_Schedule( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + */ +void _Scheduler_default_Node_initialize( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + */ +void _Scheduler_default_Node_destroy( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + * @param[in] new_priority Unused. + */ +void _Scheduler_default_Update_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority +); + +/** + * @brief Does nothing. + * + * @param[in] scheduler Unused. + * @param[in] the_thread Unused. + * @param[in] deadline Unused. + */ +void _Scheduler_default_Release_job( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + uint32_t deadline +); + +/** + * @brief Performs tick operations depending on the CPU budget algorithm for + * each executing thread. + * + * This routine is invoked as part of processing each clock tick. + * + * @param[in] scheduler The scheduler. + * @param[in] executing An executing thread. + */ +void _Scheduler_default_Tick( + const Scheduler_Control *scheduler, + Thread_Control *executing +); + +/** + * @brief Starts an idle thread. + * + * @param[in] scheduler The scheduler. + * @param[in] the_thread An idle thread. + * @param[in] cpu This parameter is unused. + */ +void _Scheduler_default_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + struct Per_CPU_Control *cpu +); + +#if defined(__RTEMS_HAVE_SYS_CPUSET_H__) && defined(RTEMS_SMP) + /** + * @brief Get affinity for the default scheduler. + * + * @param[in] scheduler The scheduler instance. + * @param[in] thread The associated thread. + * @param[in] cpusetsize The size of the cpuset. + * @param[out] cpuset Affinity set containing all CPUs. + * + * @retval 0 Successfully got cpuset + * @retval -1 The cpusetsize is invalid for the system + */ + bool _Scheduler_default_Get_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + size_t cpusetsize, + cpu_set_t *cpuset + ); + + /** + * @brief Set affinity for the default scheduler. + * + * @param[in] scheduler The scheduler instance. + * @param[in] thread The associated thread. + * @param[in] cpusetsize The size of the cpuset. + * @param[in] cpuset Affinity new affinity set. + * + * @retval 0 Successful + * + * This method always returns successful and does not save + * the cpuset. + */ + bool _Scheduler_default_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + size_t cpusetsize, + const cpu_set_t *cpuset + ); + + #define SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + , _Scheduler_default_Get_affinity \ + , _Scheduler_default_Set_affinity +#else + #define SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY +#endif + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulercbs.h b/include/rtems/score/schedulercbs.h new file mode 100644 index 0000000000..5558d6e919 --- /dev/null +++ b/include/rtems/score/schedulercbs.h @@ -0,0 +1,356 @@ +/** + * @file rtems/score/schedulercbs.h + * + * @brief Thread manipulation for the CBS scheduler + * + * This include file contains all the constants and structures associated + * with the manipulation of threads for the CBS scheduler. + */ + +/* + * Copryight (c) 2011 Petr Benes. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERCBS_H +#define _RTEMS_SCORE_SCHEDULERCBS_H + +#include <rtems/score/chain.h> +#include <rtems/score/priority.h> +#include <rtems/score/scheduler.h> +#include <rtems/score/rbtree.h> +#include <rtems/score/scheduleredf.h> +#include <rtems/rtems/signal.h> +#include <rtems/rtems/timer.h> +#include <rtems/score/thread.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSchedulerCBS CBS Scheduler + * + * @ingroup ScoreScheduler + */ +/**@{*/ + +/** + * Entry points for the Constant Bandwidth Server Scheduler. + * + * @note: The CBS scheduler is an enhancement of EDF scheduler, + * therefor some routines are similar. + */ +#define SCHEDULER_CBS_ENTRY_POINTS \ + { \ + _Scheduler_EDF_Initialize, /* initialize entry point */ \ + _Scheduler_EDF_Schedule, /* schedule entry point */ \ + _Scheduler_EDF_Yield, /* yield entry point */ \ + _Scheduler_EDF_Block, /* block entry point */ \ + _Scheduler_CBS_Unblock, /* unblock entry point */ \ + _Scheduler_EDF_Change_priority, /* change priority entry point */ \ + SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \ + _Scheduler_CBS_Node_initialize, /* node initialize entry point */ \ + _Scheduler_default_Node_destroy, /* node destroy entry point */ \ + _Scheduler_EDF_Update_priority, /* update priority entry point */ \ + _Scheduler_EDF_Priority_compare, /* compares two priorities */ \ + _Scheduler_CBS_Release_job, /* new period of task */ \ + _Scheduler_default_Tick, /* tick entry point */ \ + _Scheduler_default_Start_idle /* start idle entry point */ \ + SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + } + +/* Return values for CBS server. */ +#define SCHEDULER_CBS_OK 0 +#define SCHEDULER_CBS_ERROR_GENERIC -16 +#define SCHEDULER_CBS_ERROR_NO_MEMORY -17 +#define SCHEDULER_CBS_ERROR_INVALID_PARAMETER -18 +#define SCHEDULER_CBS_ERROR_UNAUTHORIZED -19 +#define SCHEDULER_CBS_ERROR_UNIMPLEMENTED -20 +#define SCHEDULER_CBS_ERROR_MISSING_COMPONENT -21 +#define SCHEDULER_CBS_ERROR_INCONSISTENT_STATE -22 +#define SCHEDULER_CBS_ERROR_SYSTEM_OVERLOAD -23 +#define SCHEDULER_CBS_ERROR_INTERNAL_ERROR -24 +#define SCHEDULER_CBS_ERROR_NOT_FOUND -25 +#define SCHEDULER_CBS_ERROR_FULL -26 +#define SCHEDULER_CBS_ERROR_EMPTY -27 +#define SCHEDULER_CBS_ERROR_NOSERVER SCHEDULER_CBS_ERROR_NOT_FOUND + +/** Maximum number of simultaneous servers. */ +extern const uint32_t _Scheduler_CBS_Maximum_servers; + +/** Server id. */ +typedef uint32_t Scheduler_CBS_Server_id; + +/** Callback function invoked when a budget overrun of a task occurs. */ +typedef void (*Scheduler_CBS_Budget_overrun)( + Scheduler_CBS_Server_id server_id +); + +/** + * This structure handles server parameters. + */ +typedef struct { + /** Relative deadline of the server. */ + time_t deadline; + /** Budget (computation time) of the server. */ + time_t budget; +} Scheduler_CBS_Parameters; + +/** + * This structure represents a time server. + */ +typedef struct { + /** + * Task id. + * + * @note: The current implementation of CBS handles only one task per server. + */ + rtems_id task_id; + /** Server paramenters. */ + Scheduler_CBS_Parameters parameters; + /** Callback function invoked when a budget overrun occurs. */ + Scheduler_CBS_Budget_overrun cbs_budget_overrun; + + /** + * @brief Indicates if this CBS server is initialized. + * + * @see _Scheduler_CBS_Create_server() and _Scheduler_CBS_Destroy_server(). + */ + bool initialized; +} Scheduler_CBS_Server; + +/** + * This structure handles CBS specific data of a thread. + */ +typedef struct { + /** EDF scheduler specific data of a task. */ + Scheduler_EDF_Node Base; + /** CBS server specific data of a task. */ + Scheduler_CBS_Server *cbs_server; +} Scheduler_CBS_Node; + + +/** + * List of servers. The @a Scheduler_CBS_Server is the index to the array + * of pointers to @a _Scheduler_CBS_Server_list. + */ +extern Scheduler_CBS_Server _Scheduler_CBS_Server_list[]; + +/** + * @brief Unblocks a thread from the queue. + * + * This routine adds @a the_thread to the scheduling decision, that is, + * adds it to the ready queue and updates any appropriate scheduling + * variables, for example the heir thread. It is checked whether the + * remaining budget is sufficient. If not, the thread continues as a + * new job in order to protect concurrent threads. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread will be unblocked. + * + * @note This has to be asessed as missed deadline of the current job. + */ +Scheduler_Void_or_thread _Scheduler_CBS_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Called when a new job of task is released. + * + * This routine is called when a new job of task is released. + * It is called only from Rate Monotonic manager in the beginning + * of new period. Deadline has to be shifted and budget replenished. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread is the owner of the job. + * @param[in] length of the new job from now. If equal to 0, + * the job was cancelled or deleted. + */ + +void _Scheduler_CBS_Release_job ( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + uint32_t length +); + +/** + * @brief _Scheduler_CBS_Initialize + * + * Initializes the CBS library. + * + * @retval status code. + */ +int _Scheduler_CBS_Initialize(void); + +/** + * @brief Attach a task to an already existing server. + * + * Attach a task to an already existing server. + * + * @retval status code. + */ +int _Scheduler_CBS_Attach_thread ( + Scheduler_CBS_Server_id server_id, + rtems_id task_id +); + +/** + * @brief Detach from the CBS Server. + * + * Detach from the CBS Server. + * + * @retval status code. + */ +int _Scheduler_CBS_Detach_thread ( + Scheduler_CBS_Server_id server_id, + rtems_id task_id +); + +/** + * @brief Cleanup resources associated to the CBS Library. + * + * Cleanup resources associated to the CBS Library. + * + * @retval status code. + */ +int _Scheduler_CBS_Cleanup (void); + +/** + * @brief Create a new server with specified parameters. + * + * Create a new server with specified parameters. + * + * @retval status code. + */ +int _Scheduler_CBS_Create_server ( + Scheduler_CBS_Parameters *params, + Scheduler_CBS_Budget_overrun budget_overrun_callback, + rtems_id *server_id +); + +/** + * @brief Detach all tasks from a server and destroy it. + * + * Detach all tasks from a server and destroy it. + * + * @param[in] server_id is the ID of the server + * + * @retval status code. + */ +int _Scheduler_CBS_Destroy_server ( + Scheduler_CBS_Server_id server_id +); + +/** + * @brief Retrieve the approved budget. + * + * Retrieve the budget that has been approved for the subsequent + * server instances. + * + * @retval status code. + */ +int _Scheduler_CBS_Get_approved_budget ( + Scheduler_CBS_Server_id server_id, + time_t *approved_budget +); + +/** + * @brief Retrieve remaining budget for the current server instance. + * + * Retrieve remaining budget for the current server instance. + * + * @retval status code. + */ +int _Scheduler_CBS_Get_remaining_budget ( + Scheduler_CBS_Server_id server_id, + time_t *remaining_budget +); + +/** + * @brief Get relative time info. + * + * Retrieve time info relative to @a server_id. The server status code is returned. + * + * @param[in] server_id is the server to get the status code from. + * @param[in] exec_time is the execution time. + * @param[in] abs_time is not apparently used. + * + * @retval status code. + */ +int _Scheduler_CBS_Get_execution_time ( + Scheduler_CBS_Server_id server_id, + time_t *exec_time, + time_t *abs_time +); + +/** + * @brief Retrieve CBS scheduling parameters. + * + * Retrieve CBS scheduling parameters. + * + * @retval status code. + */ +int _Scheduler_CBS_Get_parameters ( + Scheduler_CBS_Server_id server_id, + Scheduler_CBS_Parameters *params +); + +/** + * @brief Get a thread server id. + * + * Get a thread server id, or SCHEDULER_CBS_ERROR_NOT_FOUND if it is not + * attached to any server. + * + * @retval status code. + */ +int _Scheduler_CBS_Get_server_id ( + rtems_id task_id, + Scheduler_CBS_Server_id *server_id +); + +/** + * @brief Set parameters for CBS scheduling. + * + * Change CBS scheduling parameters. + * + * @param[in] server_id is the ID of the server. + * @param[in] parameters are the parameters to set. + * + * @retval status code. + */ +int _Scheduler_CBS_Set_parameters ( + Scheduler_CBS_Server_id server_id, + Scheduler_CBS_Parameters *parameters +); + +/** + * @brief Invoked when a limited time quantum is exceeded. + * + * This routine is invoked when a limited time quantum is exceeded. + */ +void _Scheduler_CBS_Budget_callout( + Thread_Control *the_thread +); + +/** + * @brief Initializes a CBS specific scheduler node of @a the_thread. + */ +void _Scheduler_CBS_Node_initialize( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulercbsimpl.h b/include/rtems/score/schedulercbsimpl.h new file mode 100644 index 0000000000..ddc79fe6da --- /dev/null +++ b/include/rtems/score/schedulercbsimpl.h @@ -0,0 +1,52 @@ +/** + * @file + * + * @brief CBS Scheduler Implementation + * + * @ingroup ScoreSchedulerCBS + */ + +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERCBSIMPL_H +#define _RTEMS_SCORE_SCHEDULERCBSIMPL_H + +#include <rtems/score/schedulercbs.h> +#include <rtems/score/schedulerimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup ScoreSchedulerCBS + * + * @{ + */ + +RTEMS_INLINE_ROUTINE Scheduler_CBS_Node *_Scheduler_CBS_Thread_get_node( + Thread_Control *the_thread +) +{ + return (Scheduler_CBS_Node *) _Scheduler_Thread_get_node( the_thread ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_SCHEDULERCBSIMPL_H */ diff --git a/include/rtems/score/scheduleredf.h b/include/rtems/score/scheduleredf.h new file mode 100644 index 0000000000..9b0d1b4099 --- /dev/null +++ b/include/rtems/score/scheduleredf.h @@ -0,0 +1,265 @@ +/** + * @file rtems/score/scheduleredf.h + * + * @brief Data Related to the Manipulation of Threads for the EDF Scheduler + * + * This include file contains all the constants and structures associated + * with the manipulation of threads for the EDF scheduler. + */ + +/* + * Copryight (c) 2011 Petr Benes. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULEREDF_H +#define _RTEMS_SCORE_SCHEDULEREDF_H + +#include <rtems/score/priority.h> +#include <rtems/score/scheduler.h> +#include <rtems/score/schedulerpriority.h> +#include <rtems/score/rbtree.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSchedulerEDF EDF Scheduler + * + * @ingroup ScoreScheduler + */ +/**@{*/ + +/** + * Entry points for the Earliest Deadline First Scheduler. + */ +#define SCHEDULER_EDF_ENTRY_POINTS \ + { \ + _Scheduler_EDF_Initialize, /* initialize entry point */ \ + _Scheduler_EDF_Schedule, /* schedule entry point */ \ + _Scheduler_EDF_Yield, /* yield entry point */ \ + _Scheduler_EDF_Block, /* block entry point */ \ + _Scheduler_EDF_Unblock, /* unblock entry point */ \ + _Scheduler_EDF_Change_priority, /* change priority entry point */ \ + SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \ + _Scheduler_EDF_Node_initialize, /* node initialize entry point */ \ + _Scheduler_default_Node_destroy, /* node destroy entry point */ \ + _Scheduler_EDF_Update_priority, /* update priority entry point */ \ + _Scheduler_EDF_Priority_compare, /* compares two priorities */ \ + _Scheduler_EDF_Release_job, /* new period of task */ \ + _Scheduler_default_Tick, /* tick entry point */ \ + _Scheduler_default_Start_idle /* start idle entry point */ \ + SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + } + +/** + * This is just a most significant bit of Priority_Control type. It + * distinguishes threads which are deadline driven (priority + * represented by a lower number than @a SCHEDULER_EDF_PRIO_MSB) from those + * ones who do not have any deadlines and thus are considered background + * tasks. + */ +#define SCHEDULER_EDF_PRIO_MSB 0x80000000 + +typedef struct { + /** + * @brief Basic scheduler context. + */ + Scheduler_Context Base; + + /** + * Top of the ready queue. + */ + RBTree_Control Ready; +} Scheduler_EDF_Context; + +/** + * @typedef Scheduler_EDF_Queue_state + * + * This enumeration distiguishes state of a thread with respect to the + * ready queue. + */ +typedef enum { + SCHEDULER_EDF_QUEUE_STATE_NOT_PRESENTLY, + SCHEDULER_EDF_QUEUE_STATE_YES, + SCHEDULER_EDF_QUEUE_STATE_NEVER_HAS_BEEN +} Scheduler_EDF_Queue_state; + +/** + * @brief Scheduler node specialization for EDF schedulers. + */ +typedef struct { + /** + * @brief Basic scheduler node. + */ + Scheduler_Node Base; + + /** + * Pointer to corresponding Thread Control Block. + */ + Thread_Control *thread; + /** + * Rbtree node related to this thread. + */ + RBTree_Node Node; + /** + * State of the thread with respect to ready queue. + */ + Scheduler_EDF_Queue_state queue_state; +} Scheduler_EDF_Node; + +/** + * @brief Initialize EDF scheduler. + * + * This routine initializes the EDF scheduler. + * + * @param[in] scheduler The scheduler instance. + */ +void _Scheduler_EDF_Initialize( const Scheduler_Control *scheduler ); + +/** + * @brief Removes thread from ready queue. + * + * This routine removes @a the_thread from the scheduling decision, + * that is, removes it from the ready queue. It performs + * any necessary scheduling operations including the selection of + * a new heir thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread is the thread to be blocked. + */ +void _Scheduler_EDF_Block( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Sets the heir thread to be the next ready thread + * in the rbtree ready queue. + * + * This kernel routine sets the heir thread to be the next ready thread + * in the rbtree ready queue. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread being scheduled. + */ +void _Scheduler_EDF_Schedule( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Initializes an EDF specific scheduler node of @a the_thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread being initialized. + */ +void _Scheduler_EDF_Node_initialize( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Updates position in the ready queue of @a the_thread. + * + * This routine updates position in the ready queue of @a the_thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread will have its scheduler specific information + * structure updated. + * @param[in] new_priority is the desired new priority. + */ +void _Scheduler_EDF_Update_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority +); + +/** + * @brief Adds @a the_thread to the scheduling decision. + * + * This routine adds @a the_thread to the scheduling decision, that is, + * adds it to the ready queue and updates any appropriate scheduling + * variables, for example the heir thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread will be unblocked. + */ +Scheduler_Void_or_thread _Scheduler_EDF_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +Scheduler_Void_or_thread _Scheduler_EDF_Change_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority, + bool prepend_it +); + +/** + * @brief invoked when a thread wishes to voluntarily + * transfer control of the processor to another thread + * with equal deadline. + * + * This routine is invoked when a thread wishes to voluntarily + * transfer control of the processor to another thread in the queue with + * equal deadline. This does not have to happen very often. + * + * This routine will remove the specified THREAD from the ready queue + * and place it back. The rbtree ready queue is responsible for FIFO ordering + * in such a case. + * + * @param[in] scheduler The scheduler instance. + * @param[in,out] the_thread The yielding thread. + */ +Scheduler_Void_or_thread _Scheduler_EDF_Yield( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Explicitly compare absolute dedlines (priorities) of threads. + * + * This routine explicitly compares absolute dedlines (priorities) of threads. + * In case of EDF scheduling time overflow is taken into account. + * + * @retval >0 for p1 > p2; 0 for p1 == p2; <0 for p1 < p2. + */ +int _Scheduler_EDF_Priority_compare ( + Priority_Control p1, + Priority_Control p2 +); + +/** + * @brief Called when a new job of task is released. + * + * This routine is called when a new job of task is released. + * It is called only from Rate Monotonic manager in the beginning + * of new period. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread is the owner of the job. + * @param[in] deadline of the new job from now. If equal to 0, + * the job was cancelled or deleted, thus a running task + * has to be suspended. + */ +void _Scheduler_EDF_Release_job ( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + uint32_t deadline +); + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/scheduleredfimpl.h b/include/rtems/score/scheduleredfimpl.h new file mode 100644 index 0000000000..4feea71e51 --- /dev/null +++ b/include/rtems/score/scheduleredfimpl.h @@ -0,0 +1,107 @@ +/** + * @file + * + * @ingroup ScoreSchedulerEDF + * + * @brief EDF Scheduler Implementation + */ + +/* + * Copryight (c) 2011 Petr Benes. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULEREDFIMPL_H +#define _RTEMS_SCORE_SCHEDULEREDFIMPL_H + +#include <rtems/score/scheduleredf.h> +#include <rtems/score/schedulerimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreSchedulerEDF + * + * @{ + */ + +RTEMS_INLINE_ROUTINE Scheduler_EDF_Context * + _Scheduler_EDF_Get_context( const Scheduler_Control *scheduler ) +{ + return (Scheduler_EDF_Context *) _Scheduler_Get_context( scheduler ); +} + +RTEMS_INLINE_ROUTINE Scheduler_EDF_Node *_Scheduler_EDF_Thread_get_node( + Thread_Control *the_thread +) +{ + return (Scheduler_EDF_Node *) _Scheduler_Thread_get_node( the_thread ); +} + +RBTree_Compare_result _Scheduler_EDF_Compare( + const RBTree_Node* n1, + const RBTree_Node* n2 +); + +RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Enqueue( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ + Scheduler_EDF_Context *context = + _Scheduler_EDF_Get_context( scheduler ); + Scheduler_EDF_Node *node = _Scheduler_EDF_Thread_get_node( the_thread ); + + _RBTree_Insert( + &context->Ready, + &node->Node, + _Scheduler_EDF_Compare, + false + ); + node->queue_state = SCHEDULER_EDF_QUEUE_STATE_YES; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Extract( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ + Scheduler_EDF_Context *context = + _Scheduler_EDF_Get_context( scheduler ); + Scheduler_EDF_Node *node = _Scheduler_EDF_Thread_get_node( the_thread ); + + _RBTree_Extract( &context->Ready, &node->Node ); +} + +RTEMS_INLINE_ROUTINE void _Scheduler_EDF_Schedule_body( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + bool force_dispatch +) +{ + Scheduler_EDF_Context *context = + _Scheduler_EDF_Get_context( scheduler ); + RBTree_Node *first = _RBTree_Minimum( &context->Ready ); + Scheduler_EDF_Node *node = + RTEMS_CONTAINER_OF( first, Scheduler_EDF_Node, Node ); + Thread_Control *heir = node->thread; + + ( void ) the_thread; + + _Scheduler_Update_heir( heir, force_dispatch ); +} + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulerimpl.h b/include/rtems/score/schedulerimpl.h new file mode 100644 index 0000000000..cadebfd02f --- /dev/null +++ b/include/rtems/score/schedulerimpl.h @@ -0,0 +1,1402 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the Manipulation of the Scheduler + * + * This inline file contains all of the inlined routines associated with + * the manipulation of the scheduler. + */ + +/* + * Copyright (C) 2010 Gedare Bloom. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * Copyright (c) 2014-2015 embedded brains GmbH + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H +#define _RTEMS_SCORE_SCHEDULERIMPL_H + +#include <rtems/score/scheduler.h> +#include <rtems/score/cpusetimpl.h> +#include <rtems/score/smpimpl.h> +#include <rtems/score/threadimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreScheduler + */ +/**@{**/ + +/** + * @brief Initializes the scheduler to the policy chosen by the user. + * + * This routine initializes the scheduler to the policy chosen by the user + * through confdefs, or to the priority scheduler with ready chains by + * default. + */ +void _Scheduler_Handler_initialization( void ); + +RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context( + const Scheduler_Control *scheduler +) +{ + return scheduler->context; +} + +RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get( + const Thread_Control *the_thread +) +{ +#if defined(RTEMS_SMP) + return the_thread->Scheduler.control; +#else + (void) the_thread; + + return &_Scheduler_Table[ 0 ]; +#endif +} + +RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own( + const Thread_Control *the_thread +) +{ +#if defined(RTEMS_SMP) + return the_thread->Scheduler.own_control; +#else + (void) the_thread; + + return &_Scheduler_Table[ 0 ]; +#endif +} + +RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index( + uint32_t cpu_index +) +{ +#if defined(RTEMS_SMP) + return _Scheduler_Assignments[ cpu_index ].scheduler; +#else + (void) cpu_index; + + return &_Scheduler_Table[ 0 ]; +#endif +} + +RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU( + const Per_CPU_Control *cpu +) +{ + uint32_t cpu_index = _Per_CPU_Get_index( cpu ); + + return _Scheduler_Get_by_CPU_index( cpu_index ); +} + +RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node( + const Thread_Control *the_thread +) +{ +#if defined(RTEMS_SMP) + return the_thread->Scheduler.own_node; +#else + return the_thread->Scheduler.node; +#endif +} + +#if defined(RTEMS_SMP) +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user( + const Scheduler_Node *node +) +{ + return node->user; +} +#endif + +/** + * The preferred method to add a new scheduler is to define the jump table + * entries and add a case to the _Scheduler_Initialize routine. + * + * Generic scheduling implementations that rely on the ready queue only can + * be found in the _Scheduler_queue_XXX functions. + */ + +/* + * Passing the Scheduler_Control* to these functions allows for multiple + * scheduler's to exist simultaneously, which could be useful on an SMP + * system. Then remote Schedulers may be accessible. How to protect such + * accesses remains an open problem. + */ + +/** + * @brief General scheduling decision. + * + * This kernel routine implements the scheduling decision logic for + * the scheduler. It does NOT dispatch. + * + * @param[in] the_thread The thread which state changed previously. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread ) +{ + const Scheduler_Control *scheduler = _Scheduler_Get( the_thread ); + + ( *scheduler->Operations.schedule )( scheduler, the_thread ); +} + +#if defined(RTEMS_SMP) +typedef struct { + Thread_Control *needs_help; + Thread_Control *next_needs_help; +} Scheduler_Ask_for_help_context ; + +RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor( + Resource_Node *resource_node, + void *arg +) +{ + bool done; + Scheduler_Ask_for_help_context *help_context = arg; + Thread_Control *previous_needs_help = help_context->needs_help; + Thread_Control *next_needs_help; + Thread_Control *offers_help = + THREAD_RESOURCE_NODE_TO_THREAD( resource_node ); + const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help ); + + next_needs_help = ( *scheduler->Operations.ask_for_help )( + scheduler, + offers_help, + previous_needs_help + ); + + done = next_needs_help != previous_needs_help; + + if ( done ) { + help_context->next_needs_help = next_needs_help; + } + + return done; +} + +/** + * @brief Ask threads depending on resources owned by the thread for help. + * + * A thread is in need for help if it lost its assigned processor due to + * pre-emption by a higher priority thread or it was not possible to assign it + * a processor since its priority is to low on its current scheduler instance. + * + * The run-time of this function depends on the size of the resource tree of + * the thread needing help and other resource trees in case threads in need for + * help are produced during this operation. + * + * @param[in] needs_help The thread needing help. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( + Thread_Control *needs_help +) +{ + do { + const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help ); + + needs_help = ( *scheduler->Operations.ask_for_help )( + scheduler, + needs_help, + needs_help + ); + + if ( needs_help != NULL ) { + Scheduler_Ask_for_help_context help_context = { needs_help, NULL }; + + _Resource_Iterate( + &needs_help->Resource_node, + _Scheduler_Ask_for_help_visitor, + &help_context + ); + + needs_help = help_context.next_needs_help; + } + } while ( needs_help != NULL ); +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary( + Thread_Control *needs_help +) +{ + if ( + needs_help != NULL + && _Resource_Node_owns_resources( &needs_help->Resource_node ) + ) { + Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help ); + + if ( + node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL + || _Scheduler_Node_get_user( node ) != needs_help + ) { + _Scheduler_Ask_for_help( needs_help ); + } + } +} +#endif + +/** + * @brief Scheduler yield with a particular thread. + * + * This routine is invoked when a thread wishes to voluntarily transfer control + * of the processor to another thread. + * + * @param[in] the_thread The yielding thread. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread ) +{ + const Scheduler_Control *scheduler = _Scheduler_Get( the_thread ); +#if defined(RTEMS_SMP) + Thread_Control *needs_help; + + needs_help = +#endif + ( *scheduler->Operations.yield )( scheduler, the_thread ); + +#if defined(RTEMS_SMP) + _Scheduler_Ask_for_help_if_necessary( needs_help ); +#endif +} + +/** + * @brief Blocks a thread with respect to the scheduler. + * + * This routine removes @a the_thread from the scheduling decision for + * the scheduler. The primary task is to remove the thread from the + * ready queue. It performs any necessary schedulering operations + * including the selection of a new heir thread. + * + * @param[in] the_thread The thread. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread ) +{ + const Scheduler_Control *scheduler = _Scheduler_Get( the_thread ); + + ( *scheduler->Operations.block )( scheduler, the_thread ); +} + +/** + * @brief Unblocks a thread with respect to the scheduler. + * + * This routine adds @a the_thread to the scheduling decision for + * the scheduler. The primary task is to add the thread to the + * ready queue per the schedulering policy and update any appropriate + * scheduling variables, for example the heir thread. + * + * @param[in] the_thread The thread. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread ) +{ + const Scheduler_Control *scheduler = _Scheduler_Get( the_thread ); +#if defined(RTEMS_SMP) + Thread_Control *needs_help; + + needs_help = +#endif + ( *scheduler->Operations.unblock )( scheduler, the_thread ); + +#if defined(RTEMS_SMP) + _Scheduler_Ask_for_help_if_necessary( needs_help ); +#endif +} + +/** + * @brief Propagates a priority change of a thread to the scheduler. + * + * The caller must ensure that the thread is in the ready state. The caller + * must ensure that the priority value actually changed and is not equal to the + * current priority value. + * + * The operation must update the heir and thread dispatch necessary variables + * in case the set of scheduled threads changes. + * + * @param[in] the_thread The thread changing its priority. + * @param[in] new_priority The new thread priority. + * @param[in] prepend_it In case this is true, then enqueue the thread as the + * first of its priority group, otherwise enqueue the thread as the last of its + * priority group. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority( + Thread_Control *the_thread, + Priority_Control new_priority, + bool prepend_it +) +{ + const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread ); +#if defined(RTEMS_SMP) + Thread_Control *needs_help; + + needs_help = +#endif + ( *scheduler->Operations.change_priority )( + scheduler, + the_thread, + new_priority, + prepend_it + ); + +#if defined(RTEMS_SMP) + _Scheduler_Ask_for_help_if_necessary( needs_help ); +#endif +} + +/** + * @brief Initializes a scheduler node. + * + * The scheduler node contains arbitrary data on function entry. The caller + * must ensure that _Scheduler_Node_destroy() will be called after a + * _Scheduler_Node_initialize() before the memory of the scheduler node is + * destroyed. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread The thread containing the scheduler node. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ + return ( *scheduler->Operations.node_initialize )( scheduler, the_thread ); +} + +/** + * @brief Destroys a scheduler node. + * + * The caller must ensure that _Scheduler_Node_destroy() will be called only + * after a corresponding _Scheduler_Node_initialize(). + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread The thread containing the scheduler node. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ + ( *scheduler->Operations.node_destroy )( scheduler, the_thread ); +} + +/** + * @brief Updates the scheduler about a priority change of a not ready thread. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( + Thread_Control *the_thread, + Priority_Control new_priority +) +{ + const Scheduler_Control *scheduler = _Scheduler_Get( the_thread ); + + ( *scheduler->Operations.update_priority )( + scheduler, + the_thread, + new_priority + ); +} + +/** + * @brief Compares two priority values. + * + * @param[in] scheduler The scheduler instance. + * @param[in] p1 The first priority value. + * @param[in] p2 The second priority value. + * + * @retval negative The value @a p1 encodes a lower priority than @a p2 in the + * intuitive sense of priority. + * @retval 0 The priorities @a p1 and @a p2 are equal. + * @retval positive The value @a p1 encodes a higher priority than @a p2 in the + * intuitive sense of priority. + * + * @see _Scheduler_Is_priority_lower_than() and + * _Scheduler_Is_priority_higher_than(). + */ +RTEMS_INLINE_ROUTINE int _Scheduler_Priority_compare( + const Scheduler_Control *scheduler, + Priority_Control p1, + Priority_Control p2 +) +{ + return ( *scheduler->Operations.priority_compare )( p1, p2 ); +} + +/** + * @brief Releases a job of a thread with respect to the scheduler. + * + * @param[in] the_thread The thread. + * @param[in] length The period length. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Release_job( + Thread_Control *the_thread, + uint32_t length +) +{ + const Scheduler_Control *scheduler = _Scheduler_Get( the_thread ); + + ( *scheduler->Operations.release_job )( scheduler, the_thread, length ); +} + +/** + * @brief Scheduler method invoked at each clock tick. + * + * This method is invoked at each clock tick to allow the scheduler + * implementation to perform any activities required. For the + * scheduler which support standard RTEMS features, this includes + * time-slicing management. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Tick( void ) +{ + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index; + + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { + const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu ); + Thread_Control *executing = cpu->executing; + + if ( scheduler != NULL && executing != NULL ) { + ( *scheduler->Operations.tick )( scheduler, executing ); + } + } +} + +/** + * @brief Starts the idle thread for a particular processor. + * + * @param[in] scheduler The scheduler instance. + * @param[in,out] the_thread The idle thread for the processor. + * @param[in,out] cpu The processor for the idle thread. + * + * @see _Thread_Create_idle(). + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Per_CPU_Control *cpu +) +{ + ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu ); +} + +#if defined(RTEMS_SMP) +RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment( + uint32_t cpu_index +) +{ + return &_Scheduler_Assignments[ cpu_index ]; +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor( + const Scheduler_Assignment *assignment +) +{ + return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0; +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor( + const Scheduler_Assignment *assignment +) +{ + return assignment->scheduler != NULL; +} +#endif /* defined(RTEMS_SMP) */ + +RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership( + const Scheduler_Control *scheduler, + uint32_t cpu_index +) +{ +#if defined(RTEMS_SMP) + const Scheduler_Assignment *assignment = + _Scheduler_Get_assignment( cpu_index ); + + return assignment->scheduler == scheduler; +#else + (void) scheduler; + (void) cpu_index; + + return true; +#endif +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Set( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ +#if defined(RTEMS_SMP) + const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread ); + + if ( current_scheduler != scheduler ) { + _Thread_Set_state( the_thread, STATES_MIGRATING ); + _Scheduler_Node_destroy( current_scheduler, the_thread ); + the_thread->Scheduler.own_control = scheduler; + the_thread->Scheduler.control = scheduler; + _Scheduler_Node_initialize( scheduler, the_thread ); + _Scheduler_Update_priority( the_thread, the_thread->current_priority ); + _Thread_Clear_state( the_thread, STATES_MIGRATING ); + } +#else + (void) scheduler; +#endif +} + +#if defined(__RTEMS_HAVE_SYS_CPUSET_H__) + +RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set( + const Scheduler_Control *scheduler, + size_t cpusetsize, + cpu_set_t *cpuset +) +{ + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index; + + CPU_ZERO_S( cpusetsize, cpuset ); + + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { +#if defined(RTEMS_SMP) + if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) { + CPU_SET_S( (int) cpu_index, cpusetsize, cpuset ); + } +#else + (void) scheduler; + + CPU_SET_S( (int) cpu_index, cpusetsize, cpuset ); +#endif + } +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + size_t cpusetsize, + cpu_set_t *cpuset +) +{ + (void) the_thread; + + _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset ); + + return true; +} + +bool _Scheduler_Get_affinity( + Thread_Control *the_thread, + size_t cpusetsize, + cpu_set_t *cpuset +); + +RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + size_t cpusetsize, + const cpu_set_t *cpuset +) +{ + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index; + bool ok = true; + + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { +#if defined(RTEMS_SMP) + const Scheduler_Control *scheduler_of_cpu = + _Scheduler_Get_by_CPU_index( cpu_index ); + + ok = ok + && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset ) + || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset ) + && scheduler != scheduler_of_cpu ) ); +#else + (void) scheduler; + + ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset ); +#endif + } + + return ok; +} + +bool _Scheduler_Set_affinity( + Thread_Control *the_thread, + size_t cpusetsize, + const cpu_set_t *cpuset +); + +#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */ + +RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir( + Thread_Control *new_heir, + bool force_dispatch +) +{ + Thread_Control *heir = _Thread_Heir; + + if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) { + _Thread_Heir = new_heir; + _Thread_Dispatch_necessary = true; + } +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + void ( *extract )( + const Scheduler_Control *, + Thread_Control * ), + void ( *schedule )( + const Scheduler_Control *, + Thread_Control *, + bool ) +) +{ + ( *extract )( scheduler, the_thread ); + + /* TODO: flash critical section? */ + + if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) { + ( *schedule )( scheduler, the_thread, true ); + } +} + +/** + * @brief Returns true if @a p1 encodes a lower priority than @a p2 in the + * intuitive sense of priority. + */ +RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_lower_than( + const Scheduler_Control *scheduler, + Priority_Control p1, + Priority_Control p2 +) +{ + return _Scheduler_Priority_compare( scheduler, p1, p2 ) < 0; +} + +/** + * @brief Returns true if @a p1 encodes a higher priority than @a p2 in the + * intuitive sense of priority. + */ +RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_higher_than( + const Scheduler_Control *scheduler, + Priority_Control p1, + Priority_Control p2 +) +{ + return _Scheduler_Priority_compare( scheduler, p1, p2 ) > 0; +} + +RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count( + const Scheduler_Control *scheduler +) +{ +#if defined(RTEMS_SMP) + return _Scheduler_Get_context( scheduler )->processor_count; +#else + (void) scheduler; + + return 1; +#endif +} + +RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index ) +{ + return _Objects_Build_id( + OBJECTS_FAKE_OBJECTS_API, + OBJECTS_FAKE_OBJECTS_SCHEDULERS, + _Objects_Local_node, + scheduler_index + 1 + ); +} + +RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id ) +{ + uint32_t minimum_id = _Scheduler_Build_id( 0 ); + + return id - minimum_id; +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id( + Objects_Id id, + const Scheduler_Control **scheduler_p +) +{ + uint32_t index = _Scheduler_Get_index_by_id( id ); + const Scheduler_Control *scheduler = &_Scheduler_Table[ index ]; + + *scheduler_p = scheduler; + + return index < _Scheduler_Count + && _Scheduler_Get_processor_count( scheduler ) > 0; +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id ) +{ + const Scheduler_Control *scheduler; + bool ok = _Scheduler_Get_by_id( id, &scheduler ); + + (void) scheduler; + + return ok; +} + +RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index( + const Scheduler_Control *scheduler +) +{ + return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]); +} + +RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node( + const Thread_Control *the_thread +) +{ + return the_thread->Scheduler.node; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize( + Scheduler_Node *node, + Thread_Control *the_thread +) +{ +#if defined(RTEMS_SMP) + node->user = the_thread; + node->help_state = SCHEDULER_HELP_YOURSELF; + node->owner = the_thread; + node->idle = NULL; + node->accepts_help = the_thread; +#else + (void) node; + (void) the_thread; +#endif +} + +#if defined(RTEMS_SMP) +/** + * @brief Gets an idle thread from the scheduler instance. + * + * @param[in] context The scheduler instance context. + * + * @retval idle An idle thread for use. This function must always return an + * idle thread. If none is available, then this is a fatal error. + */ +typedef Thread_Control *( *Scheduler_Get_idle_thread )( + Scheduler_Context *context +); + +/** + * @brief Releases an idle thread to the scheduler instance for reuse. + * + * @param[in] context The scheduler instance context. + * @param[in] idle The idle thread to release + */ +typedef void ( *Scheduler_Release_idle_thread )( + Scheduler_Context *context, + Thread_Control *idle +); + +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner( + const Scheduler_Node *node +) +{ + return node->owner; +} + +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle( + const Scheduler_Node *node +) +{ + return node->idle; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user( + Scheduler_Node *node, + Thread_Control *user +) +{ + node->user = user; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node( + Thread_Control *the_thread, + Scheduler_Node *node +) +{ + the_thread->Scheduler.node = node; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node( + Thread_Control *the_thread, + Scheduler_Node *node, + const Thread_Control *previous_user_of_node +) +{ + const Scheduler_Control *scheduler = + _Scheduler_Get_own( previous_user_of_node ); + + the_thread->Scheduler.control = scheduler; + _Scheduler_Thread_set_node( the_thread, node ); +} + +extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ]; + +RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state( + Thread_Control *the_thread, + Thread_Scheduler_state new_state +) +{ + _Assert( + _Scheduler_Thread_state_valid_state_changes + [ the_thread->Scheduler.state ][ new_state ] + ); + + the_thread->Scheduler.state = new_state; +} + +/** + * @brief Changes the scheduler help state of a thread. + * + * @param[in] the_thread The thread. + * @param[in] new_help_state The new help state. + * + * @return The previous help state. + */ +RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state( + Thread_Control *the_thread, + Scheduler_Help_state new_help_state +) +{ + Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread ); + Scheduler_Help_state previous_help_state = node->help_state; + + node->help_state = new_help_state; + + return previous_help_state; +} + +/** + * @brief Changes the resource tree root of a thread. + * + * For each node of the resource sub-tree specified by the top thread the + * scheduler asks for help. So the root thread gains access to all scheduler + * nodes corresponding to the resource sub-tree. In case a thread previously + * granted help is displaced by this operation, then the scheduler asks for + * help using its remaining resource tree. + * + * The run-time of this function depends on the size of the resource sub-tree + * and other resource trees in case threads in need for help are produced + * during this operation. + * + * @param[in] top The thread specifying the resource sub-tree top. + * @param[in] root The thread specifying the new resource sub-tree root. + */ +void _Scheduler_Thread_change_resource_root( + Thread_Control *top, + Thread_Control *root +); + +RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread( + Scheduler_Node *node, + Thread_Control *idle +) +{ + _Assert( + node->help_state == SCHEDULER_HELP_ACTIVE_OWNER + || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL + ); + _Assert( _Scheduler_Node_get_idle( node ) == NULL ); + _Assert( + _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node ) + ); + + _Scheduler_Thread_set_node( idle, node ); + + _Scheduler_Node_set_user( node, idle ); + node->idle = idle; +} + +/** + * @brief Use an idle thread for this scheduler node. + * + * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL + * helping state may use an idle thread for the scheduler node owned by itself + * in case it executes currently using another scheduler node or in case it is + * in a blocking state. + * + * @param[in] context The scheduler instance context. + * @param[in] node The node which wants to use the idle thread. + * @param[in] get_idle_thread Function to get an idle thread. + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread( + Scheduler_Context *context, + Scheduler_Node *node, + Scheduler_Get_idle_thread get_idle_thread +) +{ + Thread_Control *idle = ( *get_idle_thread )( context ); + + _Scheduler_Set_idle_thread( node, idle ); + + return idle; +} + +typedef enum { + SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE, + SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE, + SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK +} Scheduler_Try_to_schedule_action; + +/** + * @brief Try to schedule this scheduler node. + * + * @param[in] context The scheduler instance context. + * @param[in] node The node which wants to get scheduled. + * @param[in] idle A potential idle thread used by a potential victim node. + * @param[in] get_idle_thread Function to get an idle thread. + * + * @retval true This node can be scheduled. + * @retval false Otherwise. + */ +RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action +_Scheduler_Try_to_schedule_node( + Scheduler_Context *context, + Scheduler_Node *node, + Thread_Control *idle, + Scheduler_Get_idle_thread get_idle_thread +) +{ + Scheduler_Try_to_schedule_action action; + Thread_Control *owner; + Thread_Control *user; + + action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE; + + if ( node->help_state == SCHEDULER_HELP_YOURSELF ) { + return action; + } + + owner = _Scheduler_Node_get_owner( node ); + user = _Scheduler_Node_get_user( node ); + + if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) { + if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); + } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) { + if ( idle != NULL ) { + action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE; + } else { + _Scheduler_Use_idle_thread( context, node, get_idle_thread ); + } + } else { + _Scheduler_Node_set_user( node, owner ); + } + } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { + if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); + } else if ( idle != NULL ) { + action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE; + } else { + _Scheduler_Use_idle_thread( context, node, get_idle_thread ); + } + } else { + _Assert( node->help_state == SCHEDULER_HELP_PASSIVE ); + + if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); + } else { + action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK; + } + } + + return action; +} + +/** + * @brief Release an idle thread using this scheduler node. + * + * @param[in] context The scheduler instance context. + * @param[in] node The node which may have an idle thread as user. + * @param[in] release_idle_thread Function to release an idle thread. + * + * @retval idle The idle thread which used this node. + * @retval NULL This node had no idle thread as an user. + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread( + Scheduler_Context *context, + Scheduler_Node *node, + Scheduler_Release_idle_thread release_idle_thread +) +{ + Thread_Control *idle = _Scheduler_Node_get_idle( node ); + + if ( idle != NULL ) { + Thread_Control *owner = _Scheduler_Node_get_owner( node ); + + node->idle = NULL; + _Scheduler_Node_set_user( node, owner ); + _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY ); + _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node ); + + ( *release_idle_thread )( context, idle ); + } + + return idle; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread( + Scheduler_Node *needs_idle, + Scheduler_Node *uses_idle, + Thread_Control *idle +) +{ + uses_idle->idle = NULL; + _Scheduler_Node_set_user( + uses_idle, + _Scheduler_Node_get_owner( uses_idle ) + ); + _Scheduler_Set_idle_thread( needs_idle, idle ); +} + +/** + * @brief Block this scheduler node. + * + * @param[in] context The scheduler instance context. + * @param[in] thread The thread which wants to get blocked referencing this + * node. This is not necessarily the user of this node in case the node + * participates in the scheduler helping protocol. + * @param[in] node The node which wants to get blocked. + * @param[in] is_scheduled This node is scheduled. + * @param[in] get_idle_thread Function to get an idle thread. + * + * @retval true Continue with the blocking operation. + * @retval false Otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_Node *node, + bool is_scheduled, + Scheduler_Get_idle_thread get_idle_thread +) +{ + Thread_Control *old_user; + Thread_Control *new_user; + + _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED ); + + if ( node->help_state == SCHEDULER_HELP_YOURSELF ) { + _Assert( thread == _Scheduler_Node_get_user( node ) ); + + return true; + } + + new_user = NULL; + + if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { + if ( is_scheduled ) { + _Assert( thread == _Scheduler_Node_get_user( node ) ); + old_user = thread; + new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); + } + } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) { + if ( is_scheduled ) { + old_user = _Scheduler_Node_get_user( node ); + + if ( thread == old_user ) { + Thread_Control *owner = _Scheduler_Node_get_owner( node ); + + if ( + thread != owner + && owner->Scheduler.state == THREAD_SCHEDULER_READY + ) { + new_user = owner; + _Scheduler_Node_set_user( node, new_user ); + } else { + new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); + } + } + } + } else { + /* Not implemented, this is part of the OMIP support path. */ + _Assert(0); + } + + if ( new_user != NULL ) { + Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); + + _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); + _Thread_Set_CPU( new_user, cpu ); + _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user ); + } + + return false; +} + +/** + * @brief Unblock this scheduler node. + * + * @param[in] context The scheduler instance context. + * @param[in] the_thread The thread which wants to get unblocked. + * @param[in] node The node which wants to get unblocked. + * @param[in] is_scheduled This node is scheduled. + * @param[in] release_idle_thread Function to release an idle thread. + * + * @retval true Continue with the unblocking operation. + * @retval false Otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node( + Scheduler_Context *context, + Thread_Control *the_thread, + Scheduler_Node *node, + bool is_scheduled, + Scheduler_Release_idle_thread release_idle_thread +) +{ + bool unblock; + + if ( is_scheduled ) { + Thread_Control *old_user = _Scheduler_Node_get_user( node ); + Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); + Thread_Control *idle = _Scheduler_Release_idle_thread( + context, + node, + release_idle_thread + ); + Thread_Control *owner = _Scheduler_Node_get_owner( node ); + Thread_Control *new_user; + + if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { + _Assert( idle != NULL ); + new_user = the_thread; + } else if ( idle != NULL ) { + _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); + new_user = the_thread; + } else if ( the_thread != owner ) { + _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); + _Assert( old_user != the_thread ); + _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY ); + new_user = the_thread; + _Scheduler_Node_set_user( node, new_user ); + } else { + _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); + _Assert( old_user != the_thread ); + _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY ); + new_user = NULL; + } + + if ( new_user != NULL ) { + _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); + _Thread_Set_CPU( new_user, cpu ); + _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user ); + } + + unblock = false; + } else { + _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY ); + + unblock = true; + } + + return unblock; +} + +/** + * @brief Asks a ready scheduler node for help. + * + * @param[in] node The ready node offering help. + * @param[in] needs_help The thread needing help. + * + * @retval needs_help The thread needing help. + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help( + Scheduler_Node *node, + Thread_Control *needs_help +) +{ + _Scheduler_Node_set_user( node, needs_help ); + + return needs_help; +} + +/** + * @brief Asks a scheduled scheduler node for help. + * + * @param[in] context The scheduler instance context. + * @param[in] node The scheduled node offering help. + * @param[in] offers_help The thread offering help. + * @param[in] needs_help The thread needing help. + * @param[in] previous_accepts_help The previous thread accepting help by this + * scheduler node. + * @param[in] release_idle_thread Function to release an idle thread. + * + * @retval needs_help The previous thread accepting help by this scheduler node + * which was displaced by the thread needing help. + * @retval NULL There are no more threads needing help. + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help( + Scheduler_Context *context, + Scheduler_Node *node, + Thread_Control *offers_help, + Thread_Control *needs_help, + Thread_Control *previous_accepts_help, + Scheduler_Release_idle_thread release_idle_thread +) +{ + Thread_Control *next_needs_help = NULL; + Thread_Control *old_user = NULL; + Thread_Control *new_user = NULL; + + if ( + previous_accepts_help != needs_help + && _Scheduler_Thread_get_node( previous_accepts_help ) == node + ) { + Thread_Control *idle = _Scheduler_Release_idle_thread( + context, + node, + release_idle_thread + ); + + if ( idle != NULL ) { + old_user = idle; + } else { + _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help ); + old_user = previous_accepts_help; + } + + if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) { + new_user = needs_help; + } else { + _Assert( + node->help_state == SCHEDULER_HELP_ACTIVE_OWNER + || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL + ); + _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node ); + + new_user = offers_help; + } + + if ( previous_accepts_help != offers_help ) { + next_needs_help = previous_accepts_help; + } + } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) { + Thread_Control *idle = _Scheduler_Release_idle_thread( + context, + node, + release_idle_thread + ); + + if ( idle != NULL ) { + old_user = idle; + } else { + old_user = _Scheduler_Node_get_user( node ); + } + + new_user = needs_help; + } else { + _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ); + } + + if ( new_user != old_user ) { + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); + + _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY ); + _Scheduler_Thread_set_scheduler_and_node( + old_user, + _Scheduler_Thread_get_own_node( old_user ), + old_user + ); + + _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); + _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help ); + + _Scheduler_Node_set_user( node, new_user ); + _Thread_Set_CPU( new_user, cpu ); + _Thread_Dispatch_update_heir( cpu_self, cpu, new_user ); + } + + return next_needs_help; +} + +/** + * @brief Asks a blocked scheduler node for help. + * + * @param[in] context The scheduler instance context. + * @param[in] node The scheduled node offering help. + * @param[in] offers_help The thread offering help. + * @param[in] needs_help The thread needing help. + * + * @retval true Enqueue this scheduler node. + * @retval false Otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help( + Scheduler_Context *context, + Scheduler_Node *node, + Thread_Control *offers_help, + Thread_Control *needs_help +) +{ + bool enqueue; + + _Assert( node->help_state == SCHEDULER_HELP_PASSIVE ); + + if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Scheduler_Node_set_user( node, needs_help ); + _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help ); + + enqueue = true; + } else { + enqueue = false; + } + + return enqueue; +} +#endif + +ISR_LOCK_DECLARE( extern, _Scheduler_Lock ) + +/** + * @brief Acquires the scheduler instance of the thread. + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context for _Scheduler_Release(). + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Acquire( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ + (void) the_thread; + _ISR_lock_ISR_disable_and_acquire( &_Scheduler_Lock, lock_context ); +} + +/** + * @brief Releases the scheduler instance of the thread. + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context used for _Scheduler_Acquire(). + */ +RTEMS_INLINE_ROUTINE void _Scheduler_Release( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ + (void) the_thread; + _ISR_lock_Release_and_ISR_enable( &_Scheduler_Lock, lock_context ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulerpriority.h b/include/rtems/score/schedulerpriority.h new file mode 100644 index 0000000000..5859af12f3 --- /dev/null +++ b/include/rtems/score/schedulerpriority.h @@ -0,0 +1,211 @@ +/** + * @file rtems/score/schedulerpriority.h + * + * @brief Thread Manipulation with the Priority-Based Scheduler + * + * This include file contains all the constants and structures associated + * with the manipulation of threads for the priority-based scheduler. + */ + +/* + * Copryight (c) 2010 Gedare Bloom. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERPRIORITY_H +#define _RTEMS_SCORE_SCHEDULERPRIORITY_H + +#include <rtems/score/chain.h> +#include <rtems/score/prioritybitmap.h> +#include <rtems/score/scheduler.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSchedulerDPS Deterministic Priority Scheduler + * + * @ingroup ScoreScheduler + */ +/**@{*/ + +/** + * Entry points for the Deterministic Priority Based Scheduler. + */ +#define SCHEDULER_PRIORITY_ENTRY_POINTS \ + { \ + _Scheduler_priority_Initialize, /* initialize entry point */ \ + _Scheduler_priority_Schedule, /* schedule entry point */ \ + _Scheduler_priority_Yield, /* yield entry point */ \ + _Scheduler_priority_Block, /* block entry point */ \ + _Scheduler_priority_Unblock, /* unblock entry point */ \ + _Scheduler_priority_Change_priority, /* change priority entry point */ \ + SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \ + _Scheduler_default_Node_initialize, /* node initialize entry point */ \ + _Scheduler_default_Node_destroy, /* node destroy entry point */ \ + _Scheduler_priority_Update_priority, /* update priority entry point */ \ + _Scheduler_priority_Priority_compare, /* compares two priorities */ \ + _Scheduler_default_Release_job, /* new period of task */ \ + _Scheduler_default_Tick, /* tick entry point */ \ + _Scheduler_default_Start_idle /* start idle entry point */ \ + SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + } + +typedef struct { + /** + * @brief Basic scheduler context. + */ + Scheduler_Context Base; + + /** + * @brief Bit map to indicate non-empty ready queues. + */ + Priority_bit_map_Control Bit_map; + + /** + * @brief One ready queue per priority level. + */ + Chain_Control Ready[ 0 ]; +} Scheduler_priority_Context; + +/** + * @brief Data for ready queue operations. + */ +typedef struct { + /** This field points to the Ready FIFO for this thread's priority. */ + Chain_Control *ready_chain; + + /** This field contains precalculated priority map indices. */ + Priority_bit_map_Information Priority_map; +} Scheduler_priority_Ready_queue; + +/** + * @brief Scheduler node specialization for Deterministic Priority schedulers. + */ +typedef struct { + /** + * @brief Basic scheduler node. + */ + Scheduler_Node Base; + + /** + * @brief The associated ready queue of this node. + */ + Scheduler_priority_Ready_queue Ready_queue; +} Scheduler_priority_Node; + +/** + * @brief Initializes the priority scheduler. + * This routine initializes the priority scheduler. + */ +void _Scheduler_priority_Initialize( const Scheduler_Control *scheduler ); + +/** + * @brief Removes @a the_thread from the scheduling decision. + * + * This routine removes @a the_thread from the scheduling decision, + * that is, removes it from the ready queue. It performs + * any necessary scheduling operations including the selection of + * a new heir thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread is the thread to be blocked + */ +void _Scheduler_priority_Block( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Sets the heir thread to be the next ready thread. + * + * This kernel routine sets the heir thread to be the next ready thread + * by invoking the_scheduler->ready_queue->operations->first(). + */ +void _Scheduler_priority_Schedule( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Updates the scheduler node to reflect the new priority of the + * thread. + */ +void _Scheduler_priority_Update_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority +); + +/** + * @brief Add @a the_thread to the scheduling decision. + * + * This routine adds @a the_thread to the scheduling decision, + * that is, adds it to the ready queue and + * updates any appropriate scheduling variables, for example the heir thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread will be unblocked + */ +Scheduler_Void_or_thread _Scheduler_priority_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +Scheduler_Void_or_thread _Scheduler_priority_Change_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority, + bool prepend_it +); + +/** + * @brief The specified THREAD yields. + * + * This routine is invoked when a thread wishes to voluntarily + * transfer control of the processor to another thread in the queue. + * + * This routine will remove the specified THREAD from the ready queue + * and place it immediately at the rear of this chain. Reset timeslice + * and yield the processor functions both use this routine, therefore if + * reset is true and this is the only thread on the queue then the + * timeslice counter is reset. The heir THREAD will be updated if the + * running is also the currently the heir. + * + * - INTERRUPT LATENCY: + * + ready chain + * + select heir + * + * @param[in] scheduler The scheduler instance. + * @param[in,out] the_thread The yielding thread. + */ +Scheduler_Void_or_thread _Scheduler_priority_Yield( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Compare two priorities. + * + * This routine compares two priorities. + * + * @retval >0 for p1 > p2; 0 for p1 == p2; <0 for p1 < p2. + */ +int _Scheduler_priority_Priority_compare( + Priority_Control p1, + Priority_Control p2 +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulerpriorityimpl.h b/include/rtems/score/schedulerpriorityimpl.h new file mode 100644 index 0000000000..ab5abdc65f --- /dev/null +++ b/include/rtems/score/schedulerpriorityimpl.h @@ -0,0 +1,245 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the Manipulation of the + * Priority-Based Scheduling Structures + * + * This inline file contains all of the inlined routines associated with + * the manipulation of the priority-based scheduling structures. + */ + +/* + * Copyright (C) 2010 Gedare Bloom. + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYIMPL_H +#define _RTEMS_SCORE_SCHEDULERPRIORITYIMPL_H + +#include <rtems/score/schedulerpriority.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/prioritybitmapimpl.h> +#include <rtems/score/schedulerimpl.h> +#include <rtems/score/thread.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreSchedulerDPS + */ +/**@{**/ + +RTEMS_INLINE_ROUTINE Scheduler_priority_Context * + _Scheduler_priority_Get_context( const Scheduler_Control *scheduler ) +{ + return (Scheduler_priority_Context *) _Scheduler_Get_context( scheduler ); +} + +RTEMS_INLINE_ROUTINE Scheduler_priority_Node *_Scheduler_priority_Thread_get_node( + Thread_Control *the_thread +) +{ + return (Scheduler_priority_Node *) _Scheduler_Thread_get_node( the_thread ); +} + +/** + * @brief Ready queue initialization. + * + * This routine initializes @a ready_queues for priority-based scheduling. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_initialize( + Chain_Control *ready_queues +) +{ + size_t index; + + /* initialize ready queue structures */ + for( index=0; index <= PRIORITY_MAXIMUM; index++) + _Chain_Initialize_empty( &ready_queues[index] ); +} + +/** + * @brief Enqueues a node on the specified ready queue. + * + * The node is placed as the last element of its priority group. + * + * @param[in] node The node to enqueue. + * @param[in] ready_queue The ready queue. + * @param[in] bit_map The priority bit map of the scheduler instance. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue( + Chain_Node *node, + Scheduler_priority_Ready_queue *ready_queue, + Priority_bit_map_Control *bit_map +) +{ + Chain_Control *ready_chain = ready_queue->ready_chain; + + _Chain_Append_unprotected( ready_chain, node ); + _Priority_bit_map_Add( bit_map, &ready_queue->Priority_map ); +} + +/** + * @brief Enqueues a node on the specified ready queue as first. + * + * The node is placed as the first element of its priority group. + * + * @param[in] node The node to enqueue as first. + * @param[in] ready_queue The ready queue. + * @param[in] bit_map The priority bit map of the scheduler instance. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_enqueue_first( + Chain_Node *node, + Scheduler_priority_Ready_queue *ready_queue, + Priority_bit_map_Control *bit_map +) +{ + Chain_Control *ready_chain = ready_queue->ready_chain; + + _Chain_Prepend_unprotected( ready_chain, node ); + _Priority_bit_map_Add( bit_map, &ready_queue->Priority_map ); +} + +/** + * @brief Extracts a node from the specified ready queue. + * + * @param[in] node The node to extract. + * @param[in] ready_queue The ready queue. + * @param[in] bit_map The priority bit map of the scheduler instance. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_extract( + Chain_Node *node, + Scheduler_priority_Ready_queue *ready_queue, + Priority_bit_map_Control *bit_map +) +{ + Chain_Control *ready_chain = ready_queue->ready_chain; + + if ( _Chain_Has_only_one_node( ready_chain ) ) { + _Chain_Initialize_empty( ready_chain ); + _Priority_bit_map_Remove( bit_map, &ready_queue->Priority_map ); + } else { + _Chain_Extract_unprotected( node ); + } +} + +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Extract_body( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ + Scheduler_priority_Context *context = + _Scheduler_priority_Get_context( scheduler ); + Scheduler_priority_Node *node = _Scheduler_priority_Thread_get_node( the_thread ); + + _Scheduler_priority_Ready_queue_extract( + &the_thread->Object.Node, + &node->Ready_queue, + &context->Bit_map + ); +} + +/** + * @brief Return a pointer to the first node. + * + * This routines returns a pointer to the first node on @a ready_queues. + * + * @param[in] bit_map The priority bit map of the scheduler instance. + * @param[in] ready_queues The ready queues of the scheduler instance. + * + * @return This method returns the first node. + */ +RTEMS_INLINE_ROUTINE Chain_Node *_Scheduler_priority_Ready_queue_first( + Priority_bit_map_Control *bit_map, + Chain_Control *ready_queues +) +{ + Priority_Control index = _Priority_bit_map_Get_highest( bit_map ); + Chain_Node *first = _Chain_First( &ready_queues[ index ] ); + + _Assert( first != _Chain_Tail( &ready_queues[ index ] ) ); + + return first; +} + +/** + * @brief Scheduling decision logic. + * + * This kernel routine implements scheduling decision logic + * for priority-based scheduling. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Schedule_body( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + bool force_dispatch +) +{ + Scheduler_priority_Context *context = + _Scheduler_priority_Get_context( scheduler ); + Thread_Control *heir = (Thread_Control *) + _Scheduler_priority_Ready_queue_first( + &context->Bit_map, + &context->Ready[ 0 ] + ); + + ( void ) the_thread; + + _Scheduler_Update_heir( heir, force_dispatch ); +} + +/** + * @brief Updates the specified ready queue data according to the new priority + * value. + * + * @param[in] ready_queue The ready queue. + * @param[in] new_priority The new priority. + * @param[in] bit_map The priority bit map of the scheduler instance. + * @param[in] ready_queues The ready queues of the scheduler instance. + */ +RTEMS_INLINE_ROUTINE void _Scheduler_priority_Ready_queue_update( + Scheduler_priority_Ready_queue *ready_queue, + Priority_Control new_priority, + Priority_bit_map_Control *bit_map, + Chain_Control *ready_queues +) +{ + ready_queue->ready_chain = &ready_queues[ new_priority ]; + + _Priority_bit_map_Initialize_information( + bit_map, + &ready_queue->Priority_map, + new_priority + ); +} + +/** + * @brief Priority comparison. + * + * This routine implements priority comparison for priority-based + * scheduling. + * + * @return >0 for higher priority, 0 for equal and <0 for lower priority. + */ +RTEMS_INLINE_ROUTINE int _Scheduler_priority_Priority_compare_body( + Priority_Control p1, + Priority_Control p2 +) +{ + /* High priority in priority scheduler is represented by low numbers. */ + return ( p2 - p1 ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulerprioritysmp.h b/include/rtems/score/schedulerprioritysmp.h new file mode 100644 index 0000000000..d8ce7dc1dd --- /dev/null +++ b/include/rtems/score/schedulerprioritysmp.h @@ -0,0 +1,145 @@ +/** + * @file + * + * @ingroup ScoreSchedulerPrioritySMP + * + * @brief Deterministic Priority SMP Scheduler API + */ + +/* + * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H +#define _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H + +#include <rtems/score/scheduler.h> +#include <rtems/score/schedulerpriority.h> +#include <rtems/score/schedulersmp.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreSchedulerPrioritySMP Deterministic Priority SMP Scheduler + * + * @ingroup ScoreSchedulerSMP + * + * This is an implementation of the global fixed priority scheduler (G-FP). It + * uses one ready chain per priority to ensure constant time insert operations. + * The scheduled chain uses linear insert operations and has at most processor + * count entries. Since the processor and priority count are constants all + * scheduler operations complete in a bounded execution time. + * + * The thread preempt mode will be ignored. + * + * @{ + */ + +/** + * @brief Scheduler context specialization for Deterministic Priority SMP + * schedulers. + */ +typedef struct { + Scheduler_SMP_Context Base; + Priority_bit_map_Control Bit_map; + Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ]; +} Scheduler_priority_SMP_Context; + +/** + * @brief Scheduler node specialization for Deterministic Priority SMP + * schedulers. + */ +typedef struct { + /** + * @brief SMP scheduler node. + */ + Scheduler_SMP_Node Base; + + /** + * @brief The associated ready queue of this node. + */ + Scheduler_priority_Ready_queue Ready_queue; +} Scheduler_priority_SMP_Node; + +/** + * @brief Entry points for the Priority SMP Scheduler. + */ +#define SCHEDULER_PRIORITY_SMP_ENTRY_POINTS \ + { \ + _Scheduler_priority_SMP_Initialize, \ + _Scheduler_default_Schedule, \ + _Scheduler_priority_SMP_Yield, \ + _Scheduler_priority_SMP_Block, \ + _Scheduler_priority_SMP_Unblock, \ + _Scheduler_priority_SMP_Change_priority, \ + _Scheduler_priority_SMP_Ask_for_help, \ + _Scheduler_priority_SMP_Node_initialize, \ + _Scheduler_default_Node_destroy, \ + _Scheduler_priority_SMP_Update_priority, \ + _Scheduler_priority_Priority_compare, \ + _Scheduler_default_Release_job, \ + _Scheduler_default_Tick, \ + _Scheduler_SMP_Start_idle \ + SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + } + +void _Scheduler_priority_SMP_Initialize( const Scheduler_Control *scheduler ); + +void _Scheduler_priority_SMP_Node_initialize( + const Scheduler_Control *scheduler, + Thread_Control *thread +); + +void _Scheduler_priority_SMP_Block( + const Scheduler_Control *scheduler, + Thread_Control *thread +); + +Thread_Control *_Scheduler_priority_SMP_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *thread +); + +Thread_Control *_Scheduler_priority_SMP_Change_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority, + bool prepend_it +); + +Thread_Control *_Scheduler_priority_SMP_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *needs_help, + Thread_Control *offers_help +); + +void _Scheduler_priority_SMP_Update_priority( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Priority_Control new_priority +); + +Thread_Control *_Scheduler_priority_SMP_Yield( + const Scheduler_Control *scheduler, + Thread_Control *thread +); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H */ diff --git a/include/rtems/score/schedulersimple.h b/include/rtems/score/schedulersimple.h new file mode 100644 index 0000000000..201c1b6628 --- /dev/null +++ b/include/rtems/score/schedulersimple.h @@ -0,0 +1,159 @@ +/** + * @file rtems/score/schedulersimple.h + * + * @brief Manipulation of Threads Simple-Priority-Based Ready Queue + * + * This include file contains all the constants and structures associated + * with the manipulation of threads on a simple-priority-based ready queue. + */ + +/* + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERSIMPLE_H +#define _RTEMS_SCORE_SCHEDULERSIMPLE_H + +#include <rtems/score/scheduler.h> +#include <rtems/score/schedulerpriority.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSchedulerSimple Simple Priority Scheduler + * + * @ingroup ScoreScheduler + */ +/**@{*/ + +/** + * Entry points for Scheduler Simple + */ +#define SCHEDULER_SIMPLE_ENTRY_POINTS \ + { \ + _Scheduler_simple_Initialize, /* initialize entry point */ \ + _Scheduler_simple_Schedule, /* schedule entry point */ \ + _Scheduler_simple_Yield, /* yield entry point */ \ + _Scheduler_simple_Block, /* block entry point */ \ + _Scheduler_simple_Unblock, /* unblock entry point */ \ + _Scheduler_simple_Change_priority, /* change priority entry point */ \ + SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \ + _Scheduler_default_Node_initialize, /* node initialize entry point */ \ + _Scheduler_default_Node_destroy, /* node destroy entry point */ \ + _Scheduler_default_Update_priority, /* update priority entry point */ \ + _Scheduler_priority_Priority_compare, /* compares two priorities */ \ + _Scheduler_default_Release_job, /* new period of task */ \ + _Scheduler_default_Tick, /* tick entry point */ \ + _Scheduler_default_Start_idle /* start idle entry point */ \ + SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + } + +/** + * @brief Simple scheduler context. + */ +typedef struct { + /** + * @brief Basic scheduler context. + */ + Scheduler_Context Base; + + /** + * @brief One ready queue for all ready threads. + */ + Chain_Control Ready; +} Scheduler_simple_Context; + +/** + * @brief Initialize simple scheduler. + * + * This routine initializes the simple scheduler. + */ +void _Scheduler_simple_Initialize( const Scheduler_Control *scheduler ); + +/** + * This routine sets the heir thread to be the next ready thread + * on the ready queue by getting the first node in the scheduler + * information. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread causing the scheduling operation. + */ +void _Scheduler_simple_Schedule( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Invoked when a thread wishes to voluntarily + * transfer control of the processor to another thread in the queue. + * + * This routine is invoked when a thread wishes to voluntarily + * transfer control of the processor to another thread in the queue. + * It will remove the specified THREAD from the scheduler.informaiton + * (where the ready queue is stored) and place it immediately at the + * between the last entry of its priority and the next priority thread. + * Reset timeslice and yield the processor functions both use this routine, + * therefore if reset is true and this is the only thread on the queue then + * the timeslice counter is reset. The heir THREAD will be updated if the + * running is also the currently the heir. + * + * @param[in] scheduler The scheduler instance. + * @param[in,out] the_thread The yielding thread. + */ +Scheduler_Void_or_thread _Scheduler_simple_Yield( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Remove a simple-priority-based thread from the queue. + * + * This routine removes @a the_thread from the scheduling decision, + * that is, removes it from the ready queue. It performs + * any necessary scheduling operations including the selection of + * a new heir thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread is the thread that is to be blocked + */ +void _Scheduler_simple_Block( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +/** + * @brief Unblock a simple-priority-based thread. + * + * This routine adds @a the_thread to the scheduling decision, + * that is, adds it to the ready queue and + * updates any appropriate scheduling variables, for example the heir thread. + * + * @param[in] scheduler The scheduler instance. + * @param[in] the_thread is the thread that is to be unblocked + */ +Scheduler_Void_or_thread _Scheduler_simple_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +); + +Scheduler_Void_or_thread _Scheduler_simple_Change_priority( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Priority_Control new_priority, + bool prepend_it +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulersimpleimpl.h b/include/rtems/score/schedulersimpleimpl.h new file mode 100644 index 0000000000..b73a1b2c78 --- /dev/null +++ b/include/rtems/score/schedulersimpleimpl.h @@ -0,0 +1,119 @@ +/** + * @file + * + * @brief Inlined Routines Associated with the Manipulation of the + * Priority-Based Scheduling Structures + * + * This inline file contains all of the inlined routines associated with + * the manipulation of the priority-based scheduling structures. + */ + +/* + * Copyright (C) 2011 On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERSIMPLEIMPL_H +#define _RTEMS_SCORE_SCHEDULERSIMPLEIMPL_H + +#include <rtems/score/schedulersimple.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/schedulerimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreSchedulerSimple + */ +/**@{**/ + +RTEMS_INLINE_ROUTINE Scheduler_simple_Context * + _Scheduler_simple_Get_context( const Scheduler_Control *scheduler ) +{ + return (Scheduler_simple_Context *) _Scheduler_Get_context( scheduler ); +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Insert_priority_lifo_order( + const Chain_Node *to_insert, + const Chain_Node *next +) +{ + const Thread_Control *thread_to_insert = (const Thread_Control *) to_insert; + const Thread_Control *thread_next = (const Thread_Control *) next; + + return thread_to_insert->current_priority <= thread_next->current_priority; +} + +RTEMS_INLINE_ROUTINE bool _Scheduler_simple_Insert_priority_fifo_order( + const Chain_Node *to_insert, + const Chain_Node *next +) +{ + const Thread_Control *thread_to_insert = (const Thread_Control *) to_insert; + const Thread_Control *thread_next = (const Thread_Control *) next; + + return thread_to_insert->current_priority < thread_next->current_priority; +} + +RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert_priority_lifo( + Chain_Control *chain, + Thread_Control *to_insert +) +{ + _Chain_Insert_ordered_unprotected( + chain, + &to_insert->Object.Node, + _Scheduler_simple_Insert_priority_lifo_order + ); +} + +RTEMS_INLINE_ROUTINE void _Scheduler_simple_Insert_priority_fifo( + Chain_Control *chain, + Thread_Control *to_insert +) +{ + _Chain_Insert_ordered_unprotected( + chain, + &to_insert->Object.Node, + _Scheduler_simple_Insert_priority_fifo_order + ); +} + +RTEMS_INLINE_ROUTINE void _Scheduler_simple_Extract( + const Scheduler_Control *scheduler, + Thread_Control *the_thread +) +{ + (void) scheduler; + + _Chain_Extract_unprotected( &the_thread->Object.Node ); +} + +RTEMS_INLINE_ROUTINE void _Scheduler_simple_Schedule_body( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + bool force_dispatch +) +{ + Scheduler_simple_Context *context = + _Scheduler_simple_Get_context( scheduler ); + Thread_Control *heir = (Thread_Control *) _Chain_First( &context->Ready ); + + ( void ) the_thread; + + _Scheduler_Update_heir( heir, force_dispatch ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/schedulersmp.h b/include/rtems/score/schedulersmp.h new file mode 100644 index 0000000000..c85445dd7a --- /dev/null +++ b/include/rtems/score/schedulersmp.h @@ -0,0 +1,127 @@ +/** + * @file + * + * @brief SMP Scheduler API + * + * @ingroup ScoreSchedulerSMP + */ + +/* + * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERSMP_H +#define _RTEMS_SCORE_SCHEDULERSMP_H + +#include <rtems/score/chain.h> +#include <rtems/score/scheduler.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreSchedulerSMP SMP Scheduler + * + * @ingroup ScoreScheduler + * + * @{ + */ + +/** + * @brief Scheduler context specialization for SMP schedulers. + */ +typedef struct { + /** + * @brief Basic scheduler context. + */ + Scheduler_Context Base; + + /** + * @brief The chain of scheduled nodes. + */ + Chain_Control Scheduled; + + /** + * @brief Chain of the available idle threads. + * + * Idle threads are used for the scheduler helping protocol. It is crucial + * that the idle threads preserve their relative order. This is the case for + * this priority based scheduler. + */ + Chain_Control Idle_threads; +} Scheduler_SMP_Context; + +/** + * @brief SMP scheduler node states. + */ +typedef enum { + /** + * @brief This scheduler node is blocked. + * + * A scheduler node is blocked if the corresponding thread is not ready. + */ + SCHEDULER_SMP_NODE_BLOCKED, + + /** + * @brief The scheduler node is scheduled. + * + * A scheduler node is scheduled if the corresponding thread is ready and the + * scheduler allocated a processor for it. A scheduled node is assigned to + * exactly one processor. The count of scheduled nodes in this scheduler + * instance equals the processor count owned by the scheduler instance. + */ + SCHEDULER_SMP_NODE_SCHEDULED, + + /** + * @brief This scheduler node is ready. + * + * A scheduler node is ready if the corresponding thread is ready and the + * scheduler did not allocate a processor for it. + */ + SCHEDULER_SMP_NODE_READY +} Scheduler_SMP_Node_state; + +/** + * @brief Scheduler node specialization for SMP schedulers. + */ +typedef struct { + /** + * @brief Basic scheduler node. + */ + Scheduler_Node Base; + + /** + * @brief The state of this node. + */ + Scheduler_SMP_Node_state state; + + /** + * @brief The current priority of thread owning this node. + */ + Priority_Control priority; +} Scheduler_SMP_Node; + +void _Scheduler_SMP_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *thread, + struct Per_CPU_Control *cpu +); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_SCHEDULERSMP_H */ diff --git a/include/rtems/score/schedulersmpimpl.h b/include/rtems/score/schedulersmpimpl.h new file mode 100644 index 0000000000..a395f2c0ba --- /dev/null +++ b/include/rtems/score/schedulersmpimpl.h @@ -0,0 +1,1089 @@ +/** + * @file + * + * @brief SMP Scheduler Implementation + * + * @ingroup ScoreSchedulerSMP + */ + +/* + * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H +#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H + +#include <rtems/score/schedulersmp.h> +#include <rtems/score/assert.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/schedulersimpleimpl.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup ScoreSchedulerSMP + * + * The scheduler nodes can be in four states + * - @ref SCHEDULER_SMP_NODE_BLOCKED, + * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and + * - @ref SCHEDULER_SMP_NODE_READY. + * + * State transitions are triggered via basic operations + * - _Scheduler_SMP_Enqueue_ordered(), + * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and + * - _Scheduler_SMP_Block(). + * + * @dot + * digraph { + * node [style="filled"]; + * + * bs [label="BLOCKED"]; + * ss [label="SCHEDULED", fillcolor="green"]; + * rs [label="READY", fillcolor="red"]; + * + * edge [label="enqueue"]; + * edge [fontcolor="darkgreen", color="darkgreen"]; + * + * bs -> ss; + * + * edge [fontcolor="red", color="red"]; + * + * bs -> rs; + * + * edge [label="enqueue other"]; + * + * ss -> rs; + * + * edge [label="block"]; + * edge [fontcolor="black", color="black"]; + * + * ss -> bs; + * rs -> bs; + * + * edge [label="block other"]; + * edge [fontcolor="darkgreen", color="darkgreen"]; + * + * rs -> ss; + * } + * @enddot + * + * During system initialization each processor of the scheduler instance starts + * with an idle thread assigned to it. Lets have a look at an example with two + * idle threads I and J with priority 5. We also have blocked threads A, B and + * C with priorities 1, 2 and 3 respectively. The scheduler nodes are ordered + * with respect to the thread priority from left to right in the below + * diagrams. The highest priority node (lowest priority number) is the + * leftmost node. Since the processor assignment is independent of the thread + * priority the processor indices may move from one state to the other. + * + * @dot + * digraph { + * node [style="filled"]; + * edge [dir="none"]; + * subgraph { + * rank = same; + * + * i [label="I (5)", fillcolor="green"]; + * j [label="J (5)", fillcolor="green"]; + * a [label="A (1)"]; + * b [label="B (2)"]; + * c [label="C (3)"]; + * i -> j; + * } + * + * subgraph { + * rank = same; + * + * p0 [label="PROCESSOR 0", shape="box"]; + * p1 [label="PROCESSOR 1", shape="box"]; + * } + * + * i -> p0; + * j -> p1; + * } + * @enddot + * + * Lets start A. For this an enqueue operation is performed. + * + * @dot + * digraph { + * node [style="filled"]; + * edge [dir="none"]; + * + * subgraph { + * rank = same; + * + * i [label="I (5)", fillcolor="green"]; + * j [label="J (5)", fillcolor="red"]; + * a [label="A (1)", fillcolor="green"]; + * b [label="B (2)"]; + * c [label="C (3)"]; + * a -> i; + * } + * + * subgraph { + * rank = same; + * + * p0 [label="PROCESSOR 0", shape="box"]; + * p1 [label="PROCESSOR 1", shape="box"]; + * } + * + * i -> p0; + * a -> p1; + * } + * @enddot + * + * Lets start C. + * + * @dot + * digraph { + * node [style="filled"]; + * edge [dir="none"]; + * + * subgraph { + * rank = same; + * + * a [label="A (1)", fillcolor="green"]; + * c [label="C (3)", fillcolor="green"]; + * i [label="I (5)", fillcolor="red"]; + * j [label="J (5)", fillcolor="red"]; + * b [label="B (2)"]; + * a -> c; + * i -> j; + * } + * + * subgraph { + * rank = same; + * + * p0 [label="PROCESSOR 0", shape="box"]; + * p1 [label="PROCESSOR 1", shape="box"]; + * } + * + * c -> p0; + * a -> p1; + * } + * @enddot + * + * Lets start B. + * + * @dot + * digraph { + * node [style="filled"]; + * edge [dir="none"]; + * + * subgraph { + * rank = same; + * + * a [label="A (1)", fillcolor="green"]; + * b [label="B (2)", fillcolor="green"]; + * c [label="C (3)", fillcolor="red"]; + * i [label="I (5)", fillcolor="red"]; + * j [label="J (5)", fillcolor="red"]; + * a -> b; + * c -> i -> j; + * } + * + * subgraph { + * rank = same; + * + * p0 [label="PROCESSOR 0", shape="box"]; + * p1 [label="PROCESSOR 1", shape="box"]; + * } + * + * b -> p0; + * a -> p1; + * } + * @enddot + * + * Lets change the priority of thread A to 4. + * + * @dot + * digraph { + * node [style="filled"]; + * edge [dir="none"]; + * + * subgraph { + * rank = same; + * + * b [label="B (2)", fillcolor="green"]; + * c [label="C (3)", fillcolor="green"]; + * a [label="A (4)", fillcolor="red"]; + * i [label="I (5)", fillcolor="red"]; + * j [label="J (5)", fillcolor="red"]; + * b -> c; + * a -> i -> j; + * } + * + * subgraph { + * rank = same; + * + * p0 [label="PROCESSOR 0", shape="box"]; + * p1 [label="PROCESSOR 1", shape="box"]; + * } + * + * b -> p0; + * c -> p1; + * } + * @enddot + * + * Now perform a blocking operation with thread B. Please note that thread A + * migrated now from processor 0 to processor 1 and thread C still executes on + * processor 1. + * + * @dot + * digraph { + * node [style="filled"]; + * edge [dir="none"]; + * + * subgraph { + * rank = same; + * + * c [label="C (3)", fillcolor="green"]; + * a [label="A (4)", fillcolor="green"]; + * i [label="I (5)", fillcolor="red"]; + * j [label="J (5)", fillcolor="red"]; + * b [label="B (2)"]; + * c -> a; + * i -> j; + * } + * + * subgraph { + * rank = same; + * + * p0 [label="PROCESSOR 0", shape="box"]; + * p1 [label="PROCESSOR 1", shape="box"]; + * } + * + * a -> p0; + * c -> p1; + * } + * @enddot + * + * @{ + */ + +typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )( + Scheduler_Context *context, + Scheduler_Node *node +); + +typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )( + Scheduler_Context *context, + Scheduler_Node *filter, + Chain_Node_order order +); + +typedef void ( *Scheduler_SMP_Extract )( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +); + +typedef void ( *Scheduler_SMP_Insert )( + Scheduler_Context *context, + Scheduler_Node *node_to_insert +); + +typedef void ( *Scheduler_SMP_Move )( + Scheduler_Context *context, + Scheduler_Node *node_to_move +); + +typedef void ( *Scheduler_SMP_Update )( + Scheduler_Context *context, + Scheduler_Node *node_to_update, + Priority_Control new_priority +); + +typedef Thread_Control *( *Scheduler_SMP_Enqueue )( + Scheduler_Context *context, + Scheduler_Node *node_to_enqueue, + Thread_Control *needs_help +); + +typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )( + Scheduler_Context *context, + Scheduler_Node *node_to_enqueue +); + +typedef void ( *Scheduler_SMP_Allocate_processor )( + Scheduler_Context *context, + Thread_Control *scheduled, + Thread_Control *victim +); + +static inline bool _Scheduler_SMP_Insert_priority_lifo_order( + const Chain_Node *to_insert, + const Chain_Node *next +) +{ + const Scheduler_SMP_Node *node_to_insert = + (const Scheduler_SMP_Node *) to_insert; + const Scheduler_SMP_Node *node_next = + (const Scheduler_SMP_Node *) next; + + return node_to_insert->priority <= node_next->priority; +} + +static inline bool _Scheduler_SMP_Insert_priority_fifo_order( + const Chain_Node *to_insert, + const Chain_Node *next +) +{ + const Scheduler_SMP_Node *node_to_insert = + (const Scheduler_SMP_Node *) to_insert; + const Scheduler_SMP_Node *node_next = + (const Scheduler_SMP_Node *) next; + + return node_to_insert->priority < node_next->priority; +} + +static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self( + Scheduler_Context *context +) +{ + return (Scheduler_SMP_Context *) context; +} + +static inline void _Scheduler_SMP_Initialize( + Scheduler_SMP_Context *self +) +{ + _Chain_Initialize_empty( &self->Scheduled ); + _Chain_Initialize_empty( &self->Idle_threads ); +} + +static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node( + Thread_Control *thread +) +{ + return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread ); +} + +static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node( + Thread_Control *thread +) +{ + return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread ); +} + +static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast( + Scheduler_Node *node +) +{ + return (Scheduler_SMP_Node *) node; +} + +static inline void _Scheduler_SMP_Node_initialize( + Scheduler_SMP_Node *node, + Thread_Control *thread +) +{ + _Scheduler_Node_do_initialize( &node->Base, thread ); + node->state = SCHEDULER_SMP_NODE_BLOCKED; +} + +static inline void _Scheduler_SMP_Node_update_priority( + Scheduler_SMP_Node *node, + Priority_Control new_priority +) +{ + node->priority = new_priority; +} + +extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ]; + +static inline void _Scheduler_SMP_Node_change_state( + Scheduler_SMP_Node *node, + Scheduler_SMP_Node_state new_state +) +{ + _Assert( + _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ] + ); + + node->state = new_state; +} + +static inline bool _Scheduler_SMP_Is_processor_owned_by_us( + const Scheduler_Context *context, + const Per_CPU_Control *cpu +) +{ + return cpu->scheduler_context == context; +} + +static inline Thread_Control *_Scheduler_SMP_Get_idle_thread( + Scheduler_Context *context +) +{ + Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); + Thread_Control *idle = (Thread_Control *) + _Chain_Get_first_unprotected( &self->Idle_threads ); + + _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) ); + + return idle; +} + +static inline void _Scheduler_SMP_Release_idle_thread( + Scheduler_Context *context, + Thread_Control *idle +) +{ + Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); + + _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node ); +} + +static inline void _Scheduler_SMP_Allocate_processor_lazy( + Scheduler_Context *context, + Thread_Control *scheduled_thread, + Thread_Control *victim_thread +) +{ + Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread ); + Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread ); + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + Thread_Control *heir; + + _Assert( _ISR_Get_level() != 0 ); + + if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) { + if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) { + heir = scheduled_cpu->heir; + _Thread_Dispatch_update_heir( + cpu_self, + scheduled_cpu, + scheduled_thread + ); + } else { + /* We have to force a migration to our processor set */ + _Assert( + scheduled_thread->Scheduler.debug_real_cpu->heir != scheduled_thread + ); + heir = scheduled_thread; + } + } else { + heir = scheduled_thread; + } + + if ( heir != victim_thread ) { + _Thread_Set_CPU( heir, victim_cpu ); + _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir ); + } +} + +static inline void _Scheduler_SMP_Allocate_processor( + Scheduler_Context *context, + Scheduler_Node *scheduled, + Scheduler_Node *victim, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled ); + Thread_Control *victim_thread = _Scheduler_Node_get_user( victim ); + + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( scheduled ), + SCHEDULER_SMP_NODE_SCHEDULED + ); + _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED ); + + ( *allocate_processor )( context, scheduled_thread, victim_thread ); +} + +static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled( + Scheduler_Context *context, + Scheduler_Node *filter, + Chain_Node_order order +) +{ + Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); + Chain_Control *scheduled = &self->Scheduled; + Scheduler_Node *lowest_scheduled = + (Scheduler_Node *) _Chain_Last( scheduled ); + + (void) filter; + (void) order; + + _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) ); + + return lowest_scheduled; +} + +static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled( + Scheduler_Context *context, + Scheduler_Node *node, + Scheduler_Node *lowest_scheduled, + Scheduler_SMP_Insert insert_scheduled, + Scheduler_SMP_Move move_from_scheduled_to_ready, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Thread_Control *needs_help; + Scheduler_Try_to_schedule_action action; + + action = _Scheduler_Try_to_schedule_node( + context, + node, + _Scheduler_Node_get_idle( lowest_scheduled ), + _Scheduler_SMP_Get_idle_thread + ); + + if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { + Thread_Control *lowest_scheduled_user = + _Scheduler_Node_get_user( lowest_scheduled ); + Thread_Control *idle; + + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( lowest_scheduled ), + SCHEDULER_SMP_NODE_READY + ); + _Scheduler_Thread_change_state( + lowest_scheduled_user, + THREAD_SCHEDULER_READY + ); + + _Scheduler_SMP_Allocate_processor( + context, + node, + lowest_scheduled, + allocate_processor + ); + + ( *insert_scheduled )( context, node ); + ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); + + idle = _Scheduler_Release_idle_thread( + context, + lowest_scheduled, + _Scheduler_SMP_Release_idle_thread + ); + if ( idle == NULL ) { + needs_help = lowest_scheduled_user; + } else { + needs_help = NULL; + } + } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) { + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( lowest_scheduled ), + SCHEDULER_SMP_NODE_READY + ); + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( node ), + SCHEDULER_SMP_NODE_SCHEDULED + ); + + ( *insert_scheduled )( context, node ); + ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); + + _Scheduler_Exchange_idle_thread( + node, + lowest_scheduled, + _Scheduler_Node_get_idle( lowest_scheduled ) + ); + + needs_help = NULL; + } else { + _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( node ), + SCHEDULER_SMP_NODE_BLOCKED + ); + needs_help = NULL; + } + + return needs_help; +} + +/** + * @brief Enqueues a node according to the specified order function. + * + * The node must not be in the scheduled state. + * + * @param[in] context The scheduler instance context. + * @param[in] node The node to enqueue. + * @param[in] needs_help The thread needing help in case the node cannot be + * scheduled. + * @param[in] order The order function. + * @param[in] insert_ready Function to insert a node into the set of ready + * nodes. + * @param[in] insert_scheduled Function to insert a node into the set of + * scheduled nodes. + * @param[in] move_from_scheduled_to_ready Function to move a node from the set + * of scheduled nodes to the set of ready nodes. + * @param[in] get_lowest_scheduled Function to select the node from the + * scheduled nodes to replace. It may not be possible to find one, in this + * case a pointer must be returned so that the order functions returns false + * if this pointer is passed as the second argument to the order function. + * @param[in] allocate_processor Function to allocate a processor to a node + * based on the rules of the scheduler. + */ +static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered( + Scheduler_Context *context, + Scheduler_Node *node, + Thread_Control *needs_help, + Chain_Node_order order, + Scheduler_SMP_Insert insert_ready, + Scheduler_SMP_Insert insert_scheduled, + Scheduler_SMP_Move move_from_scheduled_to_ready, + Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Scheduler_Node *lowest_scheduled = + ( *get_lowest_scheduled )( context, node, order ); + + if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { + needs_help = _Scheduler_SMP_Enqueue_to_scheduled( + context, + node, + lowest_scheduled, + insert_scheduled, + move_from_scheduled_to_ready, + allocate_processor + ); + } else { + ( *insert_ready )( context, node ); + } + + return needs_help; +} + +/** + * @brief Enqueues a scheduled node according to the specified order + * function. + * + * @param[in] context The scheduler instance context. + * @param[in] node The node to enqueue. + * @param[in] order The order function. + * @param[in] extract_from_ready Function to extract a node from the set of + * ready nodes. + * @param[in] get_highest_ready Function to get the highest ready node. + * @param[in] insert_ready Function to insert a node into the set of ready + * nodes. + * @param[in] insert_scheduled Function to insert a node into the set of + * scheduled nodes. + * @param[in] move_from_ready_to_scheduled Function to move a node from the set + * of ready nodes to the set of scheduled nodes. + * @param[in] allocate_processor Function to allocate a processor to a node + * based on the rules of the scheduler. + */ +static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered( + Scheduler_Context *context, + Scheduler_Node *node, + Chain_Node_order order, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Get_highest_ready get_highest_ready, + Scheduler_SMP_Insert insert_ready, + Scheduler_SMP_Insert insert_scheduled, + Scheduler_SMP_Move move_from_ready_to_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Thread_Control *needs_help; + + do { + Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node ); + + /* + * The node has been extracted from the scheduled chain. We have to place + * it now on the scheduled or ready set. + */ + if ( ( *order )( &node->Node, &highest_ready->Node ) ) { + ( *insert_scheduled )( context, node ); + + needs_help = NULL; + } else { + Scheduler_Try_to_schedule_action action; + + action = _Scheduler_Try_to_schedule_node( + context, + highest_ready, + _Scheduler_Node_get_idle( node ), + _Scheduler_SMP_Get_idle_thread + ); + + if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { + Thread_Control *user = _Scheduler_Node_get_user( node ); + Thread_Control *idle; + + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( node ), + SCHEDULER_SMP_NODE_READY + ); + _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY ); + + _Scheduler_SMP_Allocate_processor( + context, + highest_ready, + node, + allocate_processor + ); + + ( *insert_ready )( context, node ); + ( *move_from_ready_to_scheduled )( context, highest_ready ); + + idle = _Scheduler_Release_idle_thread( + context, + node, + _Scheduler_SMP_Release_idle_thread + ); + if ( idle == NULL ) { + needs_help = user; + } else { + needs_help = NULL; + } + } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) { + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( node ), + SCHEDULER_SMP_NODE_READY + ); + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( highest_ready ), + SCHEDULER_SMP_NODE_SCHEDULED + ); + + ( *insert_ready )( context, node ); + ( *move_from_ready_to_scheduled )( context, highest_ready ); + + _Scheduler_Exchange_idle_thread( + highest_ready, + node, + _Scheduler_Node_get_idle( node ) + ); + + needs_help = NULL; + } else { + _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); + + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( highest_ready ), + SCHEDULER_SMP_NODE_BLOCKED + ); + + ( *extract_from_ready )( context, highest_ready ); + + continue; + } + } + } while ( false ); + + return needs_help; +} + +static inline void _Scheduler_SMP_Extract_from_scheduled( + Scheduler_Node *node +) +{ + _Chain_Extract_unprotected( &node->Node ); +} + +static inline void _Scheduler_SMP_Schedule_highest_ready( + Scheduler_Context *context, + Scheduler_Node *victim, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Get_highest_ready get_highest_ready, + Scheduler_SMP_Move move_from_ready_to_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + do { + Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim ); + Scheduler_Try_to_schedule_action action; + + action = _Scheduler_Try_to_schedule_node( + context, + highest_ready, + NULL, + _Scheduler_SMP_Get_idle_thread + ); + + if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { + _Scheduler_SMP_Allocate_processor( + context, + highest_ready, + victim, + allocate_processor + ); + + ( *move_from_ready_to_scheduled )( context, highest_ready ); + } else { + _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); + + _Scheduler_SMP_Node_change_state( + _Scheduler_SMP_Node_downcast( highest_ready ), + SCHEDULER_SMP_NODE_BLOCKED + ); + + ( *extract_from_ready )( context, highest_ready ); + + continue; + } + } while ( false ); +} + +/** + * @brief Blocks a thread. + * + * @param[in] context The scheduler instance context. + * @param[in] thread The thread of the scheduling operation. + * @param[in] extract_from_ready Function to extract a node from the set of + * ready nodes. + * @param[in] get_highest_ready Function to get the highest ready node. + * @param[in] move_from_ready_to_scheduled Function to move a node from the set + * of ready nodes to the set of scheduled nodes. + */ +static inline void _Scheduler_SMP_Block( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Get_highest_ready get_highest_ready, + Scheduler_SMP_Move move_from_ready_to_scheduled, + Scheduler_SMP_Allocate_processor allocate_processor +) +{ + Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); + bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED; + bool block; + + _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY ); + + block = _Scheduler_Block_node( + context, + thread, + &node->Base, + is_scheduled, + _Scheduler_SMP_Get_idle_thread + ); + if ( block ) { + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); + + if ( is_scheduled ) { + _Scheduler_SMP_Extract_from_scheduled( &node->Base ); + + _Scheduler_SMP_Schedule_highest_ready( + context, + &node->Base, + extract_from_ready, + get_highest_ready, + move_from_ready_to_scheduled, + allocate_processor + ); + } else { + ( *extract_from_ready )( context, &node->Base ); + } + } +} + +static inline Thread_Control *_Scheduler_SMP_Unblock( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_SMP_Enqueue enqueue_fifo +) +{ + Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); + bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED; + bool unblock = _Scheduler_Unblock_node( + context, + thread, + &node->Base, + is_scheduled, + _Scheduler_SMP_Release_idle_thread + ); + Thread_Control *needs_help; + + if ( unblock ) { + if ( node->state == SCHEDULER_SMP_NODE_BLOCKED ) { + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); + + needs_help = ( *enqueue_fifo )( context, &node->Base, thread ); + } else { + _Assert( node->state == SCHEDULER_SMP_NODE_READY ); + _Assert( + node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER + || node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL + ); + _Assert( node->Base.idle == NULL ); + + if ( node->Base.accepts_help == thread ) { + needs_help = thread; + } else { + needs_help = NULL; + } + } + } else { + needs_help = NULL; + } + + return needs_help; +} + +static inline Thread_Control *_Scheduler_SMP_Change_priority( + Scheduler_Context *context, + Thread_Control *thread, + Priority_Control new_priority, + bool prepend_it, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Update update, + Scheduler_SMP_Enqueue enqueue_fifo, + Scheduler_SMP_Enqueue enqueue_lifo, + Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo, + Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo +) +{ + Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread ); + Thread_Control *needs_help; + + if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) { + _Scheduler_SMP_Extract_from_scheduled( &node->Base ); + + ( *update )( context, &node->Base, new_priority ); + + if ( prepend_it ) { + needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base ); + } else { + needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base ); + } + } else if ( node->state == SCHEDULER_SMP_NODE_READY ) { + ( *extract_from_ready )( context, &node->Base ); + + ( *update )( context, &node->Base, new_priority ); + + if ( prepend_it ) { + needs_help = ( *enqueue_lifo )( context, &node->Base, NULL ); + } else { + needs_help = ( *enqueue_fifo )( context, &node->Base, NULL ); + } + } else { + ( *update )( context, &node->Base, new_priority ); + + needs_help = NULL; + } + + return needs_help; +} + +static inline Thread_Control *_Scheduler_SMP_Ask_for_help( + Scheduler_Context *context, + Thread_Control *offers_help, + Thread_Control *needs_help, + Scheduler_SMP_Enqueue enqueue_fifo +) +{ + Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help ); + Thread_Control *next_needs_help = NULL; + Thread_Control *previous_accepts_help; + + previous_accepts_help = node->Base.accepts_help; + node->Base.accepts_help = needs_help; + + switch ( node->state ) { + case SCHEDULER_SMP_NODE_READY: + next_needs_help = + _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help ); + break; + case SCHEDULER_SMP_NODE_SCHEDULED: + next_needs_help = _Scheduler_Ask_scheduled_node_for_help( + context, + &node->Base, + offers_help, + needs_help, + previous_accepts_help, + _Scheduler_SMP_Release_idle_thread + ); + break; + case SCHEDULER_SMP_NODE_BLOCKED: + if ( + _Scheduler_Ask_blocked_node_for_help( + context, + &node->Base, + offers_help, + needs_help + ) + ) { + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); + + next_needs_help = ( *enqueue_fifo )( + context, + &node->Base, + needs_help + ); + } + break; + } + + return next_needs_help; +} + +static inline Thread_Control *_Scheduler_SMP_Yield( + Scheduler_Context *context, + Thread_Control *thread, + Scheduler_SMP_Extract extract_from_ready, + Scheduler_SMP_Enqueue enqueue_fifo, + Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo +) +{ + Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); + Thread_Control *needs_help; + + if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) { + _Scheduler_SMP_Extract_from_scheduled( &node->Base ); + + needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base ); + } else { + ( *extract_from_ready )( context, &node->Base ); + + needs_help = ( *enqueue_fifo )( context, &node->Base, NULL ); + } + + return needs_help; +} + +static inline void _Scheduler_SMP_Insert_scheduled_lifo( + Scheduler_Context *context, + Scheduler_Node *node_to_insert +) +{ + Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); + + _Chain_Insert_ordered_unprotected( + &self->Scheduled, + &node_to_insert->Node, + _Scheduler_SMP_Insert_priority_lifo_order + ); +} + +static inline void _Scheduler_SMP_Insert_scheduled_fifo( + Scheduler_Context *context, + Scheduler_Node *node_to_insert +) +{ + Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); + + _Chain_Insert_ordered_unprotected( + &self->Scheduled, + &node_to_insert->Node, + _Scheduler_SMP_Insert_priority_fifo_order + ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */ diff --git a/include/rtems/score/sh.h b/include/rtems/score/sh.h new file mode 100644 index 0000000000..4e26478b8b --- /dev/null +++ b/include/rtems/score/sh.h @@ -0,0 +1,277 @@ +/** + * @file + * + * @brief Hitachi SH CPU Department Source + * + * This include file contains information pertaining to the Hitachi SH + * processor. + */ + +/* + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * COPYRIGHT (c) 1997-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE + * + * + * COPYRIGHT (c) 1998-2001. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SH_H +#define _RTEMS_SCORE_SH_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the "SH" family. + * + * It does this by setting variables to indicate which implementation + * dependent features are present in a particular member of the family. + */ + +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ + +#if defined(__SH2E__) || defined(__SH3E__) + +/* FIXME: SH-DSP context not currently supported */ +#define SH_HAS_FPU 0 + +#elif defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) + +/* + * Define this if you want to use XD-registers. + * Then this registers will be saved/restored on context switch. + * ! They will not be saved/restored on interrupts! + */ +#define SH4_USE_X_REGISTERS 0 + +#if defined(__LITTLE_ENDIAN__) +#define SH_HAS_FPU 1 +#else +/* FIXME: Context_Control_fp does not support big endian */ +#warning FPU not supported +#define SH_HAS_FPU 0 +#endif + +#elif defined(__sh1__) || defined(__sh2__) || defined(__sh3__) +#define SH_HAS_FPU 0 +#else +#warning Cannot detect FPU support, assuming no FPU +#define SH_HAS_FPU 0 +#endif + +/* this should not be here */ +#ifndef CPU_MODEL_NAME +#define CPU_MODEL_NAME "SH-Multilib" +#endif + +/* + * If the following macro is set to 0 there will be no software irq stack + */ + +#ifndef SH_HAS_SEPARATE_STACKS +#define SH_HAS_SEPARATE_STACKS 1 +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "Hitachi SH" + +#ifndef ASM + +#if defined(__sh1__) || defined(__sh2__) + +/* + * Mask for disabling interrupts + */ +#define SH_IRQDIS_VALUE 0xf0 + +#define sh_disable_interrupts( _level ) \ + __asm__ volatile ( \ + "stc sr,%0\n\t" \ + "ldc %1,sr\n\t"\ + : "=&r" (_level ) \ + : "r" (SH_IRQDIS_VALUE) ); + +#define sh_enable_interrupts( _level ) \ + __asm__ volatile( "ldc %0,sr\n\t" \ + "nop\n\t" \ + :: "r" (_level) ); + +/* + * This temporarily restores the interrupt to _level before immediately + * disabling them again. This is used to divide long RTEMS critical + * sections into two or more parts. The parameter _level is not + * modified. + */ + +#define sh_flash_interrupts( _level ) \ + __asm__ volatile( \ + "ldc %1,sr\n\t" \ + "nop\n\t" \ + "ldc %0,sr\n\t" \ + "nop\n\t" \ + : : "r" (SH_IRQDIS_VALUE), "r" (_level) ); + +#else + +#define SH_IRQDIS_MASK 0xf0 + +#define sh_disable_interrupts( _level ) \ + __asm__ volatile ( \ + "stc sr,%0\n\t" \ + "mov %0,r5\n\t" \ + "or %1,r5\n\t" \ + "ldc r5,sr\n\t"\ + : "=&r" (_level ) \ + : "r" (SH_IRQDIS_MASK) \ + : "r5" ); + +#define sh_enable_interrupts( _level ) \ + __asm__ volatile( "ldc %0,sr\n\t" \ + "nop\n\t" \ + :: "r" (_level) ); + +/* + * This temporarily restores the interrupt to _level before immediately + * disabling them again. This is used to divide long RTEMS critical + * sections into two or more parts. The parameter _level is not + * modified. + */ + +#define sh_flash_interrupts( _level ) \ + __asm__ volatile( \ + "stc sr,r5\n\t" \ + "ldc %1,sr\n\t" \ + "nop\n\t" \ + "or %0,r5\n\t" \ + "ldc r5,sr\n\t" \ + "nop\n\t" \ + : : "r" (SH_IRQDIS_MASK), "r" (_level) : "r5"); + +#endif + +#define sh_get_interrupt_level( _level ) \ +{ \ + register uint32_t _tmpsr ; \ + \ + __asm__ volatile( "stc sr, %0" : "=r" (_tmpsr) ); \ + _level = (_tmpsr & 0xf0) >> 4 ; \ +} + +#define sh_set_interrupt_level( _newlevel ) \ +{ \ + register uint32_t _tmpsr; \ + \ + __asm__ volatile ( "stc sr, %0" : "=r" (_tmpsr) ); \ + _tmpsr = ( _tmpsr & ~0xf0 ) | ((_newlevel) << 4) ; \ + __asm__ volatile( "ldc %0,sr" :: "r" (_tmpsr) ); \ +} + +/* + * The following routine swaps the endian format of an unsigned int. + * It must be static because it is referenced indirectly. + */ + +static inline uint32_t sh_swap_u32( + uint32_t value +) +{ + register uint32_t swapped; + + __asm__ volatile ( + "swap.b %1,%0; " + "swap.w %0,%0; " + "swap.b %0,%0" + : "=r" (swapped) + : "r" (value) ); + + return( swapped ); +} + +static inline uint16_t sh_swap_u16( + uint16_t value +) +{ + register uint16_t swapped ; + + __asm__ volatile ( "swap.b %1,%0" : "=r" (swapped) : "r" (value) ); + + return( swapped ); +} + +#define CPU_swap_u32( value ) sh_swap_u32( value ) +#define CPU_swap_u16( value ) sh_swap_u16( value ) + +extern unsigned int sh_set_irq_priority( + unsigned int irq, + unsigned int prio ); + +#endif /* !ASM */ + +/* + * Bits on SH-4 registers. + * See SH-4 Programming manual for more details. + * + * Added by Alexandra Kossovsky <sasha@oktet.ru> + */ + +#if defined(__SH4__) +#define SH4_SR_MD 0x40000000 /* Priveleged mode */ +#define SH4_SR_RB 0x20000000 /* General register bank specifier */ +#define SH4_SR_BL 0x10000000 /* Exeption/interrupt masking bit */ +#define SH4_SR_FD 0x00008000 /* FPU disable bit */ +#define SH4_SR_M 0x00000200 /* For signed division: + divisor (module) is negative */ +#define SH4_SR_Q 0x00000100 /* For signed division: + dividend (and quotient) is negative */ +#define SH4_SR_IMASK 0x000000f0 /* Interrupt mask level */ +#define SH4_SR_IMASK_S 4 +#define SH4_SR_S 0x00000002 /* Saturation for MAC instruction: + if set, data in MACH/L register + is restricted to 48/32 bits + for MAC.W/L instructions */ +#define SH4_SR_T 0x00000001 /* 1 if last condiyion was true */ +#define SH4_SR_RESERV 0x8fff7d0d /* Reserved bits, read/write as 0 */ + +/* FPSCR -- FPU Status/Control Register */ +#define SH4_FPSCR_FR 0x00200000 /* FPU register bank specifier */ +#define SH4_FPSCR_SZ 0x00100000 /* FMOV 64-bit transfer mode */ +#define SH4_FPSCR_PR 0x00080000 /* Double-percision floating-point + operations flag */ + /* SH4_FPSCR_SZ & SH4_FPSCR_PR != 1 */ +#define SH4_FPSCR_DN 0x00040000 /* Treat denormalized number as zero */ +#define SH4_FPSCR_CAUSE 0x0003f000 /* FPU exeption cause field */ +#define SH4_FPSCR_CAUSE_S 12 +#define SH4_FPSCR_ENABLE 0x00000f80 /* FPU exeption enable field */ +#define SH4_FPSCR_ENABLE_s 7 +#define SH4_FPSCR_FLAG 0x0000007d /* FPU exeption flag field */ +#define SH4_FPSCR_FLAG_S 2 +#define SH4_FPSCR_RM 0x00000001 /* Rounding mode: + 1/0 -- round to zero/nearest */ +#define SH4_FPSCR_RESERV 0xffd00000 /* Reserved bits, read/write as 0 */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/rtems/score/sh4_regs.h b/include/rtems/score/sh4_regs.h new file mode 100644 index 0000000000..074dc6d7a1 --- /dev/null +++ b/include/rtems/score/sh4_regs.h @@ -0,0 +1,51 @@ +/* + * Bits on SH-4 registers. + * See SH-4 Programming manual for more details. + * + * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia + * Author: Alexandra Kossovsky <sasha@oktet.ru> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef __SH4_REGS_H__ +#define __SH4_REGS_H__ + +/* SR -- Status Register */ +#define SH4_SR_MD 0x40000000 /* Priveleged mode */ +#define SH4_SR_RB 0x20000000 /* General register bank specifier */ +#define SH4_SR_BL 0x10000000 /* Exeption/interrupt masking bit */ +#define SH4_SR_FD 0x00008000 /* FPU disable bit */ +#define SH4_SR_M 0x00000200 /* For signed division: + divisor (module) is negative */ +#define SH4_SR_Q 0x00000100 /* For signed division: + dividend (and quotient) is negative */ +#define SH4_SR_IMASK 0x000000f0 /* Interrupt mask level */ +#define SH4_SR_IMASK_S 4 +#define SH4_SR_S 0x00000002 /* Saturation for MAC instruction: + if set, data in MACH/L register + is restricted to 48/32 bits + for MAC.W/L instructions */ +#define SH4_SR_T 0x00000001 /* 1 if last condiyion was true */ +#define SH4_SR_RESERV 0x8fff7d0d /* Reserved bits, read/write as 0 */ + +/* FPSCR -- FPU Starus/Control Register */ +#define SH4_FPSCR_FR 0x00200000 /* FPU register bank specifier */ +#define SH4_FPSCR_SZ 0x00100000 /* FMOV 64-bit transfer mode */ +#define SH4_FPSCR_PR 0x00080000 /* Double-percision floating-point + operations flag */ + /* SH4_FPSCR_SZ & SH4_FPSCR_PR != 1 */ +#define SH4_FPSCR_DN 0x00040000 /* Treat denormalized number as zero */ +#define SH4_FPSCR_CAUSE 0x0003f000 /* FPU exeption cause field */ +#define SH4_FPSCR_CAUSE_S 12 +#define SH4_FPSCR_ENABLE 0x00000f80 /* FPU exeption enable field */ +#define SH4_FPSCR_ENABLE_s 7 +#define SH4_FPSCR_FLAG 0x0000007d /* FPU exeption flag field */ +#define SH4_FPSCR_FLAG_S 2 +#define SH4_FPSCR_RM 0x00000001 /* Rounding mode: + 1/0 -- round to zero/nearest */ +#define SH4_FPSCR_RESERV 0xffd00000 /* Reserved bits, read/write as 0 */ + +#endif diff --git a/include/rtems/score/sh7750_regs.h b/include/rtems/score/sh7750_regs.h new file mode 100644 index 0000000000..b65f9b6e51 --- /dev/null +++ b/include/rtems/score/sh7750_regs.h @@ -0,0 +1,1613 @@ +/* + * SH-7750 memory-mapped registers + * This file based on information provided in the following document: + * "Hitachi SuperH (tm) RISC engine. SH7750 Series (SH7750, SH7750S) + * Hardware Manual" + * Document Number ADE-602-124C, Rev. 4.0, 4/21/00, Hitachi Ltd. + * + * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia + * Author: Alexandra Kossovsky <sasha@oktet.ru> + * Victor V. Vengerov <vvv@oktet.ru> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef __SH7750_REGS_H__ +#define __SH7750_REGS_H__ + +/* + * All register has 2 addresses: in 0xff000000 - 0xffffffff (P4 address) and + * in 0x1f000000 - 0x1fffffff (area 7 address) + */ +#define SH7750_P4_BASE 0xff000000 /* Accessable only in + priveleged mode */ +#define SH7750_A7_BASE 0x1f000000 /* Accessable only using TLB */ + +#define SH7750_P4_REG32(ofs) (SH7750_P4_BASE + (ofs)) +#define SH7750_A7_REG32(ofs) (SH7750_A7_BASE + (ofs)) + +/* + * MMU Registers + */ + +/* Page Table Entry High register - PTEH */ +#define SH7750_PTEH_REGOFS 0x000000 /* offset */ +#define SH7750_PTEH SH7750_P4_REG32(SH7750_PTEH_REGOFS) +#define SH7750_PTEH_A7 SH7750_A7_REG32(SH7750_PTEH_REGOFS) +#define SH7750_PTEH_VPN 0xfffffd00 /* Virtual page number */ +#define SH7750_PTEH_VPN_S 10 +#define SH7750_PTEH_ASID 0x000000ff /* Address space identifier */ +#define SH7750_PTEH_ASID_S 0 + +/* Page Table Entry Low register - PTEL */ +#define SH7750_PTEL_REGOFS 0x000004 /* offset */ +#define SH7750_PTEL SH7750_P4_REG32(SH7750_PTEL_REGOFS) +#define SH7750_PTEL_A7 SH7750_A7_REG32(SH7750_PTEL_REGOFS) +#define SH7750_PTEL_PPN 0x1ffffc00 /* Physical page number */ +#define SH7750_PTEL_PPN_S 10 +#define SH7750_PTEL_V 0x00000100 /* Validity (0-entry is invalid) */ +#define SH7750_PTEL_SZ1 0x00000080 /* Page size bit 1 */ +#define SH7750_PTEL_SZ0 0x00000010 /* Page size bit 0 */ +#define SH7750_PTEL_SZ_1KB 0x00000000 /* 1-kbyte page */ +#define SH7750_PTEL_SZ_4KB 0x00000010 /* 4-kbyte page */ +#define SH7750_PTEL_SZ_64KB 0x00000080 /* 64-kbyte page */ +#define SH7750_PTEL_SZ_1MB 0x00000090 /* 1-Mbyte page */ +#define SH7750_PTEL_PR 0x00000060 /* Protection Key Data */ +#define SH7750_PTEL_PR_ROPO 0x00000000 /* read-only in priv mode */ +#define SH7750_PTEL_PR_RWPO 0x00000020 /* read-write in priv mode */ +#define SH7750_PTEL_PR_ROPU 0x00000040 /* read-only in priv or user mode*/ +#define SH7750_PTEL_PR_RWPU 0x00000060 /* read-write in priv or user mode*/ +#define SH7750_PTEL_C 0x00000008 /* Cacheability + (0 - page not cacheable) */ +#define SH7750_PTEL_D 0x00000004 /* Dirty bit (1 - write has been + performed to a page) */ +#define SH7750_PTEL_SH 0x00000002 /* Share Status bit (1 - page are + shared by processes) */ +#define SH7750_PTEL_WT 0x00000001 /* Write-through bit, specifies the + cache write mode: + 0 - Copy-back mode + 1 - Write-through mode */ + +/* Page Table Entry Assistance register - PTEA */ +#define SH7750_PTEA_REGOFS 0x000034 /* offset */ +#define SH7750_PTEA SH7750_P4_REG32(SH7750_PTEA_REGOFS) +#define SH7750_PTEA_A7 SH7750_A7_REG32(SH7750_PTEA_REGOFS) +#define SH7750_PTEA_TC 0x00000008 /* Timing Control bit + 0 - use area 5 wait states + 1 - use area 6 wait states */ +#define SH7750_PTEA_SA 0x00000007 /* Space Attribute bits: */ +#define SH7750_PTEA_SA_UNDEF 0x00000000 /* 0 - undefined */ +#define SH7750_PTEA_SA_IOVAR 0x00000001 /* 1 - variable-size I/O space */ +#define SH7750_PTEA_SA_IO8 0x00000002 /* 2 - 8-bit I/O space */ +#define SH7750_PTEA_SA_IO16 0x00000003 /* 3 - 16-bit I/O space */ +#define SH7750_PTEA_SA_CMEM8 0x00000004 /* 4 - 8-bit common memory space*/ +#define SH7750_PTEA_SA_CMEM16 0x00000005 /* 5 - 16-bit common memory space*/ +#define SH7750_PTEA_SA_AMEM8 0x00000006 /* 6 - 8-bit attr memory space */ +#define SH7750_PTEA_SA_AMEM16 0x00000007 /* 7 - 16-bit attr memory space */ + + +/* Translation table base register */ +#define SH7750_TTB_REGOFS 0x000008 /* offset */ +#define SH7750_TTB SH7750_P4_REG32(SH7750_TTB_REGOFS) +#define SH7750_TTB_A7 SH7750_A7_REG32(SH7750_TTB_REGOFS) + +/* TLB exeption address register - TEA */ +#define SH7750_TEA_REGOFS 0x00000c /* offset */ +#define SH7750_TEA SH7750_P4_REG32(SH7750_TEA_REGOFS) +#define SH7750_TEA_A7 SH7750_A7_REG32(SH7750_TEA_REGOFS) + +/* MMU control register - MMUCR */ +#define SH7750_MMUCR_REGOFS 0x000010 /* offset */ +#define SH7750_MMUCR SH7750_P4_REG32(SH7750_MMUCR_REGOFS) +#define SH7750_MMUCR_A7 SH7750_A7_REG32(SH7750_MMUCR_REGOFS) +#define SH7750_MMUCR_AT 0x00000001 /* Address translation bit */ +#define SH7750_MMUCR_TI 0x00000004 /* TLB invalidate */ +#define SH7750_MMUCR_SV 0x00000100 /* Single Virtual Mode bit */ +#define SH7750_MMUCR_SQMD 0x00000200 /* Store Queue Mode bit */ +#define SH7750_MMUCR_URC 0x0000FC00 /* UTLB Replace Counter */ +#define SH7750_MMUCR_URC_S 10 +#define SH7750_MMUCR_URB 0x00FC0000 /* UTLB Replace Boundary */ +#define SH7750_MMUCR_URB_S 18 +#define SH7750_MMUCR_LRUI 0xFC000000 /* Least Recently Used ITLB */ +#define SH7750_MMUCR_LRUI_S 26 + + + + +/* + * Cache registers + * IC -- instructions cache + * OC -- operand cache + */ + +/* Cache Control Register - CCR */ +#define SH7750_CCR_REGOFS 0x00001c /* offset */ +#define SH7750_CCR SH7750_P4_REG32(SH7750_CCR_REGOFS) +#define SH7750_CCR_A7 SH7750_A7_REG32(SH7750_CCR_REGOFS) + +#define SH7750_CCR_IIX 0x00008000 /* IC index enable bit */ +#define SH7750_CCR_ICI 0x00000800 /* IC invalidation bit: + set it to clear IC */ +#define SH7750_CCR_ICE 0x00000100 /* IC enable bit */ +#define SH7750_CCR_OIX 0x00000080 /* OC index enable bit */ +#define SH7750_CCR_ORA 0x00000020 /* OC RAM enable bit + if you set OCE = 0, + you should set ORA = 0 */ +#define SH7750_CCR_OCI 0x00000008 /* OC invalidation bit */ +#define SH7750_CCR_CB 0x00000004 /* Copy-back bit for P1 area */ +#define SH7750_CCR_WT 0x00000002 /* Write-through bit for P0,U0,P3 area */ +#define SH7750_CCR_OCE 0x00000001 /* OC enable bit */ + +/* Queue address control register 0 - QACR0 */ +#define SH7750_QACR0_REGOFS 0x000038 /* offset */ +#define SH7750_QACR0 SH7750_P4_REG32(SH7750_QACR0_REGOFS) +#define SH7750_QACR0_A7 SH7750_A7_REG32(SH7750_QACR0_REGOFS) + +/* Queue address control register 1 - QACR1 */ +#define SH7750_QACR1_REGOFS 0x00003c /* offset */ +#define SH7750_QACR1 SH7750_P4_REG32(SH7750_QACR1_REGOFS) +#define SH7750_QACR1_A7 SH7750_A7_REG32(SH7750_QACR1_REGOFS) + + +/* + * Exeption-related registers + */ + +/* Immediate data for TRAPA instuction - TRA */ +#define SH7750_TRA_REGOFS 0x000020 /* offset */ +#define SH7750_TRA SH7750_P4_REG32(SH7750_TRA_REGOFS) +#define SH7750_TRA_A7 SH7750_A7_REG32(SH7750_TRA_REGOFS) + +#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ +#define SH7750_TRA_IMM_S 2 + +/* Exeption event register - EXPEVT */ +#define SH7750_EXPEVT_REGOFS 0x000024 +#define SH7750_EXPEVT SH7750_P4_REG32(SH7750_EXPEVT_REGOFS) +#define SH7750_EXPEVT_A7 SH7750_A7_REG32(SH7750_EXPEVT_REGOFS) + +#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_EXPEVT_EX_S 0 + +/* Interrupt event register */ +#define SH7750_INTEVT_REGOFS 0x000028 +#define SH7750_INTEVT SH7750_P4_REG32(SH7750_INTEVT_REGOFS) +#define SH7750_INTEVT_A7 SH7750_A7_REG32(SH7750_INTEVT_REGOFS) +#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_INTEVT_EX_S 0 + +/* + * Exception/interrupt codes + */ +#define SH7750_EVT_TO_NUM(evt) ((evt) >> 5) + +/* Reset exception category */ +#define SH7750_EVT_POWER_ON_RST 0x000 /* Power-on reset */ +#define SH7750_EVT_MANUAL_RST 0x020 /* Manual reset */ +#define SH7750_EVT_TLB_MULT_HIT 0x140 /* TLB multiple-hit exception */ + +/* General exception category */ +#define SH7750_EVT_USER_BREAK 0x1E0 /* User break */ +#define SH7750_EVT_IADDR_ERR 0x0E0 /* Instruction address error */ +#define SH7750_EVT_TLB_READ_MISS 0x040 /* ITLB miss exception / + DTLB miss exception (read) */ +#define SH7750_EVT_TLB_READ_PROTV 0x0A0 /* ITLB protection violation / + DTLB protection violation (read)*/ +#define SH7750_EVT_ILLEGAL_INSTR 0x180 /* General Illegal Instruction + exception */ +#define SH7750_EVT_SLOT_ILLEGAL_INSTR 0x1A0 /* Slot Illegal Instruction + exception */ +#define SH7750_EVT_FPU_DISABLE 0x800 /* General FPU disable exception*/ +#define SH7750_EVT_SLOT_FPU_DISABLE 0x820 /* Slot FPU disable exception */ +#define SH7750_EVT_DATA_READ_ERR 0x0E0 /* Data address error (read) */ +#define SH7750_EVT_DATA_WRITE_ERR 0x100 /* Data address error (write) */ +#define SH7750_EVT_DTLB_WRITE_MISS 0x060 /* DTLB miss exception (write) */ +#define SH7750_EVT_DTLB_WRITE_PROTV 0x0C0 /* DTLB protection violation + exception (write) */ +#define SH7750_EVT_FPU_EXCEPTION 0x120 /* FPU exception */ +#define SH7750_EVT_INITIAL_PGWRITE 0x080 /* Initial Page Write exception */ +#define SH7750_EVT_TRAPA 0x160 /* Unconditional trap (TRAPA) */ + +/* Interrupt exception category */ +#define SH7750_EVT_NMI 0x1C0 /* Non-maskable interrupt */ +#define SH7750_EVT_IRQ0 0x200 /* External Interrupt 0 */ +#define SH7750_EVT_IRQ1 0x220 /* External Interrupt 1 */ +#define SH7750_EVT_IRQ2 0x240 /* External Interrupt 2 */ +#define SH7750_EVT_IRQ3 0x260 /* External Interrupt 3 */ +#define SH7750_EVT_IRQ4 0x280 /* External Interrupt 4 */ +#define SH7750_EVT_IRQ5 0x2A0 /* External Interrupt 5 */ +#define SH7750_EVT_IRQ6 0x2C0 /* External Interrupt 6 */ +#define SH7750_EVT_IRQ7 0x2E0 /* External Interrupt 7 */ +#define SH7750_EVT_IRQ8 0x300 /* External Interrupt 8 */ +#define SH7750_EVT_IRQ9 0x320 /* External Interrupt 9 */ +#define SH7750_EVT_IRQA 0x340 /* External Interrupt A */ +#define SH7750_EVT_IRQB 0x360 /* External Interrupt B */ +#define SH7750_EVT_IRQC 0x380 /* External Interrupt C */ +#define SH7750_EVT_IRQD 0x3A0 /* External Interrupt D */ +#define SH7750_EVT_IRQE 0x3C0 /* External Interrupt E */ + +/* Peripheral Module Interrupts - Timer Unit (TMU) */ +#define SH7750_EVT_TUNI0 0x400 /* TMU Underflow Interrupt 0 */ +#define SH7750_EVT_TUNI1 0x420 /* TMU Underflow Interrupt 1 */ +#define SH7750_EVT_TUNI2 0x440 /* TMU Underflow Interrupt 2 */ +#define SH7750_EVT_TICPI2 0x460 /* TMU Input Capture Interrupt 2*/ + +/* Peripheral Module Interrupts - Real-Time Clock (RTC) */ +#define SH7750_EVT_RTC_ATI 0x480 /* Alarm Interrupt Request */ +#define SH7750_EVT_RTC_PRI 0x4A0 /* Periodic Interrupt Request */ +#define SH7750_EVT_RTC_CUI 0x4C0 /* Carry Interrupt Request */ + +/* Peripheral Module Interrupts - Serial Communication Interface (SCI) */ +#define SH7750_EVT_SCI_ERI 0x4E0 /* Receive Error */ +#define SH7750_EVT_SCI_RXI 0x500 /* Receive Data Register Full */ +#define SH7750_EVT_SCI_TXI 0x520 /* Transmit Data Register Empty */ +#define SH7750_EVT_SCI_TEI 0x540 /* Transmit End */ + +/* Peripheral Module Interrupts - Watchdog Timer (WDT) */ +#define SH7750_EVT_WDT_ITI 0x560 /* Interval Timer Interrupt + (used when WDT operates in + interval timer mode) */ + +/* Peripheral Module Interrupts - Memory Refresh Unit (REF) */ +#define SH7750_EVT_REF_RCMI 0x580 /* Compare-match Interrupt */ +#define SH7750_EVT_REF_ROVI 0x5A0 /* Refresh Counter Overflow + interrupt */ + +/* Peripheral Module Interrupts - Hitachi User Debug Interface (H-UDI) */ +#define SH7750_EVT_HUDI 0x600 /* UDI interrupt */ + +/* Peripheral Module Interrupts - General-Purpose I/O (GPIO) */ +#define SH7750_EVT_GPIO 0x620 /* GPIO Interrupt */ + +/* Peripheral Module Interrupts - DMA Controller (DMAC) */ +#define SH7750_EVT_DMAC_DMTE0 0x640 /* DMAC 0 Transfer End Interrupt*/ +#define SH7750_EVT_DMAC_DMTE1 0x660 /* DMAC 1 Transfer End Interrupt*/ +#define SH7750_EVT_DMAC_DMTE2 0x680 /* DMAC 2 Transfer End Interrupt*/ +#define SH7750_EVT_DMAC_DMTE3 0x6A0 /* DMAC 3 Transfer End Interrupt*/ +#define SH7750_EVT_DMAC_DMAE 0x6C0 /* DMAC Address Error Interrupt */ + +/* Peripheral Module Interrupts - Serial Communication Interface with FIFO */ +/* (SCIF) */ +#define SH7750_EVT_SCIF_ERI 0x700 /* Receive Error */ +#define SH7750_EVT_SCIF_RXI 0x720 /* Receive FIFO Data Full or + Receive Data ready interrupt */ +#define SH7750_EVT_SCIF_BRI 0x740 /* Break or overrun error */ +#define SH7750_EVT_SCIF_TXI 0x760 /* Transmit FIFO Data Empty */ + +/* + * Power Management + */ +#define SH7750_STBCR_REGOFS 0xC00004 /* offset */ +#define SH7750_STBCR SH7750_P4_REG32(SH7750_STBCR_REGOFS) +#define SH7750_STBCR_A7 SH7750_A7_REG32(SH7750_STBCR_REGOFS) + +#define SH7750_STBCR_STBY 0x80 /* Specifies a transition to standby mode: + 0 - Transition to SLEEP mode on SLEEP + 1 - Transition to STANDBY mode on SLEEP*/ +#define SH7750_STBCR_PHZ 0x40 /* State of peripheral module pins in + standby mode: + 0 - normal state + 1 - high-impendance state */ + +#define SH7750_STBCR_PPU 0x20 /* Peripheral module pins pull-up controls*/ +#define SH7750_STBCR_MSTP4 0x10 /* Stopping the clock supply to DMAC */ +#define SH7750_STBCR_DMAC_STP SH7750_STBCR_MSTP4 +#define SH7750_STBCR_MSTP3 0x08 /* Stopping the clock supply to SCIF */ +#define SH7750_STBCR_SCIF_STP SH7750_STBCR_MSTP3 +#define SH7750_STBCR_MSTP2 0x04 /* Stopping the clock supply to TMU */ +#define SH7750_STBCR_TMU_STP SH7750_STBCR_MSTP2 +#define SH7750_STBCR_MSTP1 0x02 /* Stopping the clock supply to RTC */ +#define SH7750_STBCR_RTC_STP SH7750_STBCR_MSTP1 +#define SH7750_STBCR_MSPT0 0x01 /* Stopping the clock supply to SCI */ +#define SH7750_STBCR_SCI_STP SH7750_STBCR_MSTP0 + +#define SH7750_STBCR_STBY 0x80 + + +#define SH7750_STBCR2_REGOFS 0xC00010 /* offset */ +#define SH7750_STBCR2 SH7750_P4_REG32(SH7750_STBCR2_REGOFS) +#define SH7750_STBCR2_A7 SH7750_A7_REG32(SH7750_STBCR2_REGOFS) + +#define SH7750_STBCR2_DSLP 0x80 /* Specifies transition to deep sleep mode: + 0 - transition to sleep or standby mode + as it is specified in STBY bit + 1 - transition to deep sleep mode on + execution of SLEEP instruction */ +#define SH7750_STBCR2_MSTP6 0x02 /* Stopping the clock supply to Store Queue + in the cache controller */ +#define SH7750_STBCR2_SQ_STP SH7750_STBCR2_MSTP6 +#define SH7750_STBCR2_MSTP5 0x01 /* Stopping the clock supply to the User + Break Controller (UBC) */ +#define SH7750_STBCR2_UBC_STP SH7750_STBCR2_MSTP5 + +/* + * Clock Pulse Generator (CPG) + */ +#define SH7750_FRQCR_REGOFS 0xC00000 /* offset */ +#define SH7750_FRQCR SH7750_P4_REG32(SH7750_FRQCR_REGOFS) +#define SH7750_FRQCR_A7 SH7750_A7_REG32(SH7750_FRQCR_REGOFS) + +#define SH7750_FRQCR_CKOEN 0x0800 /* Clock Output Enable + 0 - CKIO pin goes to HiZ/pullup + 1 - Clock is output from CKIO */ +#define SH7750_FRQCR_PLL1EN 0x0400 /* PLL circuit 1 enable */ +#define SH7750_FRQCR_PLL2EN 0x0200 /* PLL circuit 2 enable */ + +#define SH7750_FRQCR_IFC 0x01C0 /* CPU clock frequency division ratio: */ +#define SH7750_FRQCR_IFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_IFCDIV2 0x0040 /* 1 - * 1/2 */ +#define SH7750_FRQCR_IFCDIV3 0x0080 /* 2 - * 1/3 */ +#define SH7750_FRQCR_IFCDIV4 0x00C0 /* 3 - * 1/4 */ +#define SH7750_FRQCR_IFCDIV6 0x0100 /* 4 - * 1/6 */ +#define SH7750_FRQCR_IFCDIV8 0x0140 /* 5 - * 1/8 */ + +#define SH7750_FRQCR_BFC 0x0038 /* Bus clock frequency division ratio: */ +#define SH7750_FRQCR_BFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_BFCDIV2 0x0008 /* 1 - * 1/2 */ +#define SH7750_FRQCR_BFCDIV3 0x0010 /* 2 - * 1/3 */ +#define SH7750_FRQCR_BFCDIV4 0x0018 /* 3 - * 1/4 */ +#define SH7750_FRQCR_BFCDIV6 0x0020 /* 4 - * 1/6 */ +#define SH7750_FRQCR_BFCDIV8 0x0028 /* 5 - * 1/8 */ + +#define SH7750_FRQCR_PFC 0x0007 /* Peripheral module clock frequency + division ratio: */ +#define SH7750_FRQCR_PFCDIV2 0x0000 /* 0 - * 1/2 */ +#define SH7750_FRQCR_PFCDIV3 0x0001 /* 1 - * 1/3 */ +#define SH7750_FRQCR_PFCDIV4 0x0002 /* 2 - * 1/4 */ +#define SH7750_FRQCR_PFCDIV6 0x0003 /* 3 - * 1/6 */ +#define SH7750_FRQCR_PFCDIV8 0x0004 /* 4 - * 1/8 */ + +/* + * Watchdog Timer (WDT) + */ + +/* Watchdog Timer Counter register - WTCNT */ +#define SH7750_WTCNT_REGOFS 0xC00008 /* offset */ +#define SH7750_WTCNT SH7750_P4_REG32(SH7750_WTCNT_REGOFS) +#define SH7750_WTCNT_A7 SH7750_A7_REG32(SH7750_WTCNT_REGOFS) +#define SH7750_WTCNT_KEY 0x5A00 /* When WTCNT byte register written, + you have to set the upper byte to + 0x5A */ + +/* Watchdog Timer Control/Status register - WTCSR */ +#define SH7750_WTCSR_REGOFS 0xC0000C /* offset */ +#define SH7750_WTCSR SH7750_P4_REG32(SH7750_WTCSR_REGOFS) +#define SH7750_WTCSR_A7 SH7750_A7_REG32(SH7750_WTCSR_REGOFS) +#define SH7750_WTCSR_KEY 0xA500 /* When WTCSR byte register written, + you have to set the upper byte to + 0xA5 */ +#define SH7750_WTCSR_TME 0x80 /* Timer enable (1-upcount start) */ +#define SH7750_WTCSR_MODE 0x40 /* Timer Mode Select: */ +#define SH7750_WTCSR_MODE_WT 0x40 /* Watchdog Timer Mode */ +#define SH7750_WTCSR_MODE_IT 0x00 /* Interval Timer Mode */ +#define SH7750_WTCSR_RSTS 0x20 /* Reset Select: */ +#define SH7750_WTCSR_RST_MAN 0x20 /* Manual Reset */ +#define SH7750_WTCSR_RST_PWR 0x00 /* Power-on Reset */ +#define SH7750_WTCSR_WOVF 0x10 /* Watchdog Timer Overflow Flag */ +#define SH7750_WTCSR_IOVF 0x08 /* Interval Timer Overflow Flag */ +#define SH7750_WTCSR_CKS 0x07 /* Clock Select: */ +#define SH7750_WTCSR_CKS_DIV32 0x00 /* 1/32 of frequency divider 2 input */ +#define SH7750_WTCSR_CKS_DIV64 0x01 /* 1/64 */ +#define SH7750_WTCSR_CKS_DIV128 0x02 /* 1/128 */ +#define SH7750_WTCSR_CKS_DIV256 0x03 /* 1/256 */ +#define SH7750_WTCSR_CKS_DIV512 0x04 /* 1/512 */ +#define SH7750_WTCSR_CKS_DIV1024 0x05 /* 1/1024 */ +#define SH7750_WTCSR_CKS_DIV2048 0x06 /* 1/2048 */ +#define SH7750_WTCSR_CKS_DIV4096 0x07 /* 1/4096 */ + +/* + * Real-Time Clock (RTC) + */ +/* 64-Hz Counter Register (byte, read-only) - R64CNT */ +#define SH7750_R64CNT_REGOFS 0xC80000 /* offset */ +#define SH7750_R64CNT SH7750_P4_REG32(SH7750_R64CNT_REGOFS) +#define SH7750_R64CNT_A7 SH7750_A7_REG32(SH7750_R64CNT_REGOFS) + +/* Second Counter Register (byte, BCD-coded) - RSECCNT */ +#define SH7750_RSECCNT_REGOFS 0xC80004 /* offset */ +#define SH7750_RSECCNT SH7750_P4_REG32(SH7750_RSECCNT_REGOFS) +#define SH7750_RSECCNT_A7 SH7750_A7_REG32(SH7750_RSECCNT_REGOFS) + +/* Minute Counter Register (byte, BCD-coded) - RMINCNT */ +#define SH7750_RMINCNT_REGOFS 0xC80008 /* offset */ +#define SH7750_RMINCNT SH7750_P4_REG32(SH7750_RMINCNT_REGOFS) +#define SH7750_RMINCNT_A7 SH7750_A7_REG32(SH7750_RMINCNT_REGOFS) + +/* Hour Counter Register (byte, BCD-coded) - RHRCNT */ +#define SH7750_RHRCNT_REGOFS 0xC8000C /* offset */ +#define SH7750_RHRCNT SH7750_P4_REG32(SH7750_RHRCNT_REGOFS) +#define SH7750_RHRCNT_A7 SH7750_A7_REG32(SH7750_RHRCNT_REGOFS) + +/* Day-of-Week Counter Register (byte) - RWKCNT */ +#define SH7750_RWKCNT_REGOFS 0xC80010 /* offset */ +#define SH7750_RWKCNT SH7750_P4_REG32(SH7750_RWKCNT_REGOFS) +#define SH7750_RWKCNT_A7 SH7750_A7_REG32(SH7750_RWKCNT_REGOFS) + +#define SH7750_RWKCNT_SUN 0 /* Sunday */ +#define SH7750_RWKCNT_MON 1 /* Monday */ +#define SH7750_RWKCNT_TUE 2 /* Tuesday */ +#define SH7750_RWKCNT_WED 3 /* Wednesday */ +#define SH7750_RWKCNT_THU 4 /* Thursday */ +#define SH7750_RWKCNT_FRI 5 /* Friday */ +#define SH7750_RWKCNT_SAT 6 /* Saturday */ + +/* Day Counter Register (byte, BCD-coded) - RDAYCNT */ +#define SH7750_RDAYCNT_REGOFS 0xC80014 /* offset */ +#define SH7750_RDAYCNT SH7750_P4_REG32(SH7750_RDAYCNT_REGOFS) +#define SH7750_RDAYCNT_A7 SH7750_A7_REG32(SH7750_RDAYCNT_REGOFS) + +/* Month Counter Register (byte, BCD-coded) - RMONCNT */ +#define SH7750_RMONCNT_REGOFS 0xC80018 /* offset */ +#define SH7750_RMONCNT SH7750_P4_REG32(SH7750_RMONCNT_REGOFS) +#define SH7750_RMONCNT_A7 SH7750_A7_REG32(SH7750_RMONCNT_REGOFS) + +/* Year Counter Register (half, BCD-coded) - RYRCNT */ +#define SH7750_RYRCNT_REGOFS 0xC8001C /* offset */ +#define SH7750_RYRCNT SH7750_P4_REG32(SH7750_RYRCNT_REGOFS) +#define SH7750_RYRCNT_A7 SH7750_A7_REG32(SH7750_RYRCNT_REGOFS) + +/* Second Alarm Register (byte, BCD-coded) - RSECAR */ +#define SH7750_RSECAR_REGOFS 0xC80020 /* offset */ +#define SH7750_RSECAR SH7750_P4_REG32(SH7750_RSECAR_REGOFS) +#define SH7750_RSECAR_A7 SH7750_A7_REG32(SH7750_RSECAR_REGOFS) +#define SH7750_RSECAR_ENB 0x80 /* Second Alarm Enable */ + +/* Minute Alarm Register (byte, BCD-coded) - RMINAR */ +#define SH7750_RMINAR_REGOFS 0xC80024 /* offset */ +#define SH7750_RMINAR SH7750_P4_REG32(SH7750_RMINAR_REGOFS) +#define SH7750_RMINAR_A7 SH7750_A7_REG32(SH7750_RMINAR_REGOFS) +#define SH7750_RMINAR_ENB 0x80 /* Minute Alarm Enable */ + +/* Hour Alarm Register (byte, BCD-coded) - RHRAR */ +#define SH7750_RHRAR_REGOFS 0xC80028 /* offset */ +#define SH7750_RHRAR SH7750_P4_REG32(SH7750_RHRAR_REGOFS) +#define SH7750_RHRAR_A7 SH7750_A7_REG32(SH7750_RHRAR_REGOFS) +#define SH7750_RHRAR_ENB 0x80 /* Hour Alarm Enable */ + +/* Day-of-Week Alarm Register (byte) - RWKAR */ +#define SH7750_RWKAR_REGOFS 0xC8002C /* offset */ +#define SH7750_RWKAR SH7750_P4_REG32(SH7750_RWKAR_REGOFS) +#define SH7750_RWKAR_A7 SH7750_A7_REG32(SH7750_RWKAR_REGOFS) +#define SH7750_RWKAR_ENB 0x80 /* Day-of-week Alarm Enable */ + +#define SH7750_RWKAR_SUN 0 /* Sunday */ +#define SH7750_RWKAR_MON 1 /* Monday */ +#define SH7750_RWKAR_TUE 2 /* Tuesday */ +#define SH7750_RWKAR_WED 3 /* Wednesday */ +#define SH7750_RWKAR_THU 4 /* Thursday */ +#define SH7750_RWKAR_FRI 5 /* Friday */ +#define SH7750_RWKAR_SAT 6 /* Saturday */ + +/* Day Alarm Register (byte, BCD-coded) - RDAYAR */ +#define SH7750_RDAYAR_REGOFS 0xC80030 /* offset */ +#define SH7750_RDAYAR SH7750_P4_REG32(SH7750_RDAYAR_REGOFS) +#define SH7750_RDAYAR_A7 SH7750_A7_REG32(SH7750_RDAYAR_REGOFS) +#define SH7750_RDAYAR_ENB 0x80 /* Day Alarm Enable */ + +/* Month Counter Register (byte, BCD-coded) - RMONAR */ +#define SH7750_RMONAR_REGOFS 0xC80034 /* offset */ +#define SH7750_RMONAR SH7750_P4_REG32(SH7750_RMONAR_REGOFS) +#define SH7750_RMONAR_A7 SH7750_A7_REG32(SH7750_RMONAR_REGOFS) +#define SH7750_RMONAR_ENB 0x80 /* Month Alarm Enable */ + +/* RTC Control Register 1 (byte) - RCR1 */ +#define SH7750_RCR1_REGOFS 0xC80038 /* offset */ +#define SH7750_RCR1 SH7750_P4_REG32(SH7750_RCR1_REGOFS) +#define SH7750_RCR1_A7 SH7750_A7_REG32(SH7750_RCR1_REGOFS) +#define SH7750_RCR1_CF 0x80 /* Carry Flag */ +#define SH7750_RCR1_CIE 0x10 /* Carry Interrupt Enable */ +#define SH7750_RCR1_AIE 0x08 /* Alarm Interrupt Enable */ +#define SH7750_RCR1_AF 0x01 /* Alarm Flag */ + +/* RTC Control Register 2 (byte) - RCR2 */ +#define SH7750_RCR2_REGOFS 0xC8003C /* offset */ +#define SH7750_RCR2 SH7750_P4_REG32(SH7750_RCR2_REGOFS) +#define SH7750_RCR2_A7 SH7750_A7_REG32(SH7750_RCR2_REGOFS) +#define SH7750_RCR2_PEF 0x80 /* Periodic Interrupt Flag */ +#define SH7750_RCR2_PES 0x70 /* Periodic Interrupt Enable: */ +#define SH7750_RCR2_PES_DIS 0x00 /* Periodic Interrupt Disabled */ +#define SH7750_RCR2_PES_DIV256 0x10 /* Generated at 1/256 sec interval */ +#define SH7750_RCR2_PES_DIV64 0x20 /* Generated at 1/64 sec interval */ +#define SH7750_RCR2_PES_DIV16 0x30 /* Generated at 1/16 sec interval */ +#define SH7750_RCR2_PES_DIV4 0x40 /* Generated at 1/4 sec interval */ +#define SH7750_RCR2_PES_DIV2 0x50 /* Generated at 1/2 sec interval */ +#define SH7750_RCR2_PES_x1 0x60 /* Generated at 1 sec interval */ +#define SH7750_RCR2_PES_x2 0x70 /* Generated at 2 sec interval */ +#define SH7750_RCR2_RTCEN 0x08 /* RTC Crystal Oscillator is Operated */ +#define SH7750_RCR2_ADJ 0x04 /* 30-Second Adjastment */ +#define SH7750_RCR2_RESET 0x02 /* Frequency divider circuits are reset*/ +#define SH7750_RCR2_START 0x01 /* 0 - sec, min, hr, day-of-week, month, + year counters are stopped + 1 - sec, min, hr, day-of-week, month, + year counters operate normally */ + + +/* + * Timer Unit (TMU) + */ +/* Timer Output Control Register (byte) - TOCR */ +#define SH7750_TOCR_REGOFS 0xD80000 /* offset */ +#define SH7750_TOCR SH7750_P4_REG32(SH7750_TOCR_REGOFS) +#define SH7750_TOCR_A7 SH7750_A7_REG32(SH7750_TOCR_REGOFS) +#define SH7750_TOCR_TCOE 0x01 /* Timer Clock Pin Control: + 0 - TCLK is used as external clock + input or input capture control + 1 - TCLK is used as on-chip RTC + output clock pin */ + +/* Timer Start Register (byte) - TSTR */ +#define SH7750_TSTR_REGOFS 0xD80004 /* offset */ +#define SH7750_TSTR SH7750_P4_REG32(SH7750_TSTR_REGOFS) +#define SH7750_TSTR_A7 SH7750_A7_REG32(SH7750_TSTR_REGOFS) +#define SH7750_TSTR_STR2 0x04 /* TCNT2 performs count operations */ +#define SH7750_TSTR_STR1 0x02 /* TCNT1 performs count operations */ +#define SH7750_TSTR_STR0 0x01 /* TCNT0 performs count operations */ +#define SH7750_TSTR_STR(n) (1 << (n)) + +/* Timer Constant Register - TCOR0, TCOR1, TCOR2 */ +#define SH7750_TCOR_REGOFS(n) (0xD80008 + ((n)*12)) /* offset */ +#define SH7750_TCOR(n) SH7750_P4_REG32(SH7750_TCOR_REGOFS(n)) +#define SH7750_TCOR_A7(n) SH7750_A7_REG32(SH7750_TCOR_REGOFS(n)) +#define SH7750_TCOR0 SH7750_TCOR(0) +#define SH7750_TCOR1 SH7750_TCOR(1) +#define SH7750_TCOR2 SH7750_TCOR(2) +#define SH7750_TCOR0_A7 SH7750_TCOR_A7(0) +#define SH7750_TCOR1_A7 SH7750_TCOR_A7(1) +#define SH7750_TCOR2_A7 SH7750_TCOR_A7(2) + +/* Timer Counter Register - TCNT0, TCNT1, TCNT2 */ +#define SH7750_TCNT_REGOFS(n) (0xD8000C + ((n)*12)) /* offset */ +#define SH7750_TCNT(n) SH7750_P4_REG32(SH7750_TCNT_REGOFS(n)) +#define SH7750_TCNT_A7(n) SH7750_A7_REG32(SH7750_TCNT_REGOFS(n)) +#define SH7750_TCNT0 SH7750_TCNT(0) +#define SH7750_TCNT1 SH7750_TCNT(1) +#define SH7750_TCNT2 SH7750_TCNT(2) +#define SH7750_TCNT0_A7 SH7750_TCNT_A7(0) +#define SH7750_TCNT1_A7 SH7750_TCNT_A7(1) +#define SH7750_TCNT2_A7 SH7750_TCNT_A7(2) + +/* Timer Control Register (half) - TCR0, TCR1, TCR2 */ +#define SH7750_TCR_REGOFS(n) (0xD80010 + ((n)*12)) /* offset */ +#define SH7750_TCR(n) SH7750_P4_REG32(SH7750_TCR_REGOFS(n)) +#define SH7750_TCR_A7(n) SH7750_A7_REG32(SH7750_TCR_REGOFS(n)) +#define SH7750_TCR0 SH7750_TCR(0) +#define SH7750_TCR1 SH7750_TCR(1) +#define SH7750_TCR2 SH7750_TCR(2) +#define SH7750_TCR0_A7 SH7750_TCR_A7(0) +#define SH7750_TCR1_A7 SH7750_TCR_A7(1) +#define SH7750_TCR2_A7 SH7750_TCR_A7(2) + +#define SH7750_TCR2_ICPF 0x200 /* Input Capture Interrupt Flag + (1 - input capture has occured) */ +#define SH7750_TCR_UNF 0x100 /* Underflow flag */ +#define SH7750_TCR2_ICPE 0x0C0 /* Input Capture Control: */ +#define SH7750_TCR2_ICPE_DIS 0x000 /* Input Capture function is not used*/ +#define SH7750_TCR2_ICPE_NOINT 0x080 /* Input Capture function is used, but + input capture interrupt is not + enabled */ +#define SH7750_TCR2_ICPE_INT 0x0C0 /* Input Capture function is used, + input capture interrupt enabled */ +#define SH7750_TCR_UNIE 0x020 /* Underflow Interrupt Control + (1 - underflow interrupt enabled) */ +#define SH7750_TCR_CKEG 0x018 /* Clock Edge selection: */ +#define SH7750_TCR_CKEG_RAISE 0x000 /* Count/capture on rising edge */ +#define SH7750_TCR_CKEG_FALL 0x008 /* Count/capture on falling edge */ +#define SH7750_TCR_CKEG_BOTH 0x018 /* Count/capture on both rising and + falling edges */ +#define SH7750_TCR_TPSC 0x007 /* Timer prescaler */ +#define SH7750_TCR_TPSC_DIV4 0x000 /* Counts on peripheral clock/4 */ +#define SH7750_TCR_TPSC_DIV16 0x001 /* Counts on peripheral clock/16 */ +#define SH7750_TCR_TPSC_DIV64 0x002 /* Counts on peripheral clock/64 */ +#define SH7750_TCR_TPSC_DIV256 0x003 /* Counts on peripheral clock/256 */ +#define SH7750_TCR_TPSC_DIV1024 0x004 /* Counts on peripheral clock/1024 */ +#define SH7750_TCR_TPSC_RTC 0x006 /* Counts on on-chip RTC output clk*/ +#define SH7750_TCR_TPSC_EXT 0x007 /* Counts on external clock */ + +/* Input Capture Register (read-only) - TCPR2 */ +#define SH7750_TCPR2_REGOFS 0xD8002C /* offset */ +#define SH7750_TCPR2 SH7750_P4_REG32(SH7750_TCPR2_REGOFS) +#define SH7750_TCPR2_A7 SH7750_A7_REG32(SH7750_TCPR2_REGOFS) + +/* + * Bus State Controller - BSC + */ +/* Bus Control Register 1 - BCR1 */ +#define SH7750_BCR1_REGOFS 0x800000 /* offset */ +#define SH7750_BCR1 SH7750_P4_REG32(SH7750_BCR1_REGOFS) +#define SH7750_BCR1_A7 SH7750_A7_REG32(SH7750_BCR1_REGOFS) +#define SH7750_BCR1_ENDIAN 0x80000000 /* Endianness (1 - little endian) */ +#define SH7750_BCR1_MASTER 0x40000000 /* Master/Slave mode (1-master) */ +#define SH7750_BCR1_A0MPX 0x20000000 /* Area 0 Memory Type (0-SRAM,1-MPX)*/ +#define SH7750_BCR1_IPUP 0x02000000 /* Input Pin Pull-up Control: + 0 - pull-up resistor is on for + control input pins + 1 - pull-up resistor is off */ +#define SH7750_BCR1_OPUP 0x01000000 /* Output Pin Pull-up Control: + 0 - pull-up resistor is on for + control output pins + 1 - pull-up resistor is off */ +#define SH7750_BCR1_A1MBC 0x00200000 /* Area 1 SRAM Byte Control Mode: + 0 - Area 1 SRAM is set to + normal mode + 1 - Area 1 SRAM is set to byte + control mode */ +#define SH7750_BCR1_A4MBC 0x00100000 /* Area 4 SRAM Byte Control Mode: + 0 - Area 4 SRAM is set to + normal mode + 1 - Area 4 SRAM is set to byte + control mode */ +#define SH7750_BCR1_BREQEN 0x00080000 /* BREQ Enable: + 0 - External requests are not + accepted + 1 - External requests are + accepted */ +#define SH7750_BCR1_PSHR 0x00040000 /* Partial Sharing Bit: + 0 - Master Mode + 1 - Partial-sharing Mode */ +#define SH7750_BCR1_MEMMPX 0x00020000 /* Area 1 to 6 MPX Interface: + 0 - SRAM/burst ROM interface + 1 - MPX interface */ +#define SH7750_BCR1_HIZMEM 0x00008000 /* High Impendance Control. Specifies + the state of A[25:0], BS\, CSn\, + RD/WR\, CE2A\, CE2B\ in standby + mode and when bus is released: + 0 - signals go to High-Z mode + 1 - signals driven */ +#define SH7750_BCR1_HIZCNT 0x00004000 /* High Impendance Control. Specifies + the state of the RAS\, RAS2\, WEn\, + CASn\, DQMn, RD\, CASS\, FRAME\, + RD2\ signals in standby mode and + when bus is released: + 0 - signals go to High-Z mode + 1 - signals driven */ +#define SH7750_BCR1_A0BST 0x00003800 /* Area 0 Burst ROM Control */ +#define SH7750_BCR1_A0BST_SRAM 0x0000 /* Area 0 accessed as SRAM i/f */ +#define SH7750_BCR1_A0BST_ROM4 0x0800 /* Area 0 accessed as burst ROM + interface, 4 cosequtive access*/ +#define SH7750_BCR1_A0BST_ROM8 0x1000 /* Area 0 accessed as burst ROM + interface, 8 cosequtive access*/ +#define SH7750_BCR1_A0BST_ROM16 0x1800 /* Area 0 accessed as burst ROM + interface, 16 cosequtive access*/ +#define SH7750_BCR1_A0BST_ROM32 0x2000 /* Area 0 accessed as burst ROM + interface, 32 cosequtive access*/ + +#define SH7750_BCR1_A5BST 0x00000700 /* Area 5 Burst ROM Control */ +#define SH7750_BCR1_A5BST_SRAM 0x0000 /* Area 5 accessed as SRAM i/f */ +#define SH7750_BCR1_A5BST_ROM4 0x0100 /* Area 5 accessed as burst ROM + interface, 4 cosequtive access*/ +#define SH7750_BCR1_A5BST_ROM8 0x0200 /* Area 5 accessed as burst ROM + interface, 8 cosequtive access*/ +#define SH7750_BCR1_A5BST_ROM16 0x0300 /* Area 5 accessed as burst ROM + interface, 16 cosequtive access*/ +#define SH7750_BCR1_A5BST_ROM32 0x0400 /* Area 5 accessed as burst ROM + interface, 32 cosequtive access*/ + +#define SH7750_BCR1_A6BST 0x000000E0 /* Area 6 Burst ROM Control */ +#define SH7750_BCR1_A6BST_SRAM 0x0000 /* Area 6 accessed as SRAM i/f */ +#define SH7750_BCR1_A6BST_ROM4 0x0020 /* Area 6 accessed as burst ROM + interface, 4 cosequtive access*/ +#define SH7750_BCR1_A6BST_ROM8 0x0040 /* Area 6 accessed as burst ROM + interface, 8 cosequtive access*/ +#define SH7750_BCR1_A6BST_ROM16 0x0060 /* Area 6 accessed as burst ROM + interface, 16 cosequtive access*/ +#define SH7750_BCR1_A6BST_ROM32 0x0080 /* Area 6 accessed as burst ROM + interface, 32 cosequtive access*/ + +#define SH7750_BCR1_DRAMTP 0x001C /* Area 2 and 3 Memory Type */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SRAM 0x0000 /* Area 2 and 3 are SRAM or MPX + interface. */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SDRAM 0x0008 /* Area 2 - SRAM/MPX, Area 3 - + synchronous DRAM */ +#define SH7750_BCR1_DRAMTP_2SDRAM_3SDRAM 0x000C /* Area 2 and 3 are synchronous + DRAM interface */ +#define SH7750_BCR1_DRAMTP_2SRAM_3DRAM 0x0010 /* Area 2 - SRAM/MPX, Area 3 - + DRAM interface */ +#define SH7750_BCR1_DRAMTP_2DRAM_3DRAM 0x0014 /* Area 2 and 3 are DRAM + interface */ + +#define SH7750_BCR1_A56PCM 0x00000001 /* Area 5 and 6 Bus Type: + 0 - SRAM interface + 1 - PCMCIA interface */ + +/* Bus Control Register 2 (half) - BCR2 */ +#define SH7750_BCR2_REGOFS 0x800004 /* offset */ +#define SH7750_BCR2 SH7750_P4_REG32(SH7750_BCR2_REGOFS) +#define SH7750_BCR2_A7 SH7750_A7_REG32(SH7750_BCR2_REGOFS) + +#define SH7750_BCR2_A0SZ 0xC000 /* Area 0 Bus Width */ +#define SH7750_BCR2_A0SZ_S 14 +#define SH7750_BCR2_A6SZ 0x3000 /* Area 6 Bus Width */ +#define SH7750_BCR2_A6SZ_S 12 +#define SH7750_BCR2_A5SZ 0x0C00 /* Area 5 Bus Width */ +#define SH7750_BCR2_A5SZ_S 10 +#define SH7750_BCR2_A4SZ 0x0300 /* Area 4 Bus Width */ +#define SH7750_BCR2_A4SZ_S 8 +#define SH7750_BCR2_A3SZ 0x00C0 /* Area 3 Bus Width */ +#define SH7750_BCR2_A3SZ_S 6 +#define SH7750_BCR2_A2SZ 0x0030 /* Area 2 Bus Width */ +#define SH7750_BCR2_A2SZ_S 4 +#define SH7750_BCR2_A1SZ 0x000C /* Area 1 Bus Width */ +#define SH7750_BCR2_A1SZ_S 2 +#define SH7750_BCR2_SZ_64 0 /* 64 bits */ +#define SH7750_BCR2_SZ_8 1 /* 8 bits */ +#define SH7750_BCR2_SZ_16 2 /* 16 bits */ +#define SH7750_BCR2_SZ_32 3 /* 32 bits */ +#define SH7750_BCR2_PORTEN 0x0001 /* Port Function Enable : + 0 - D51-D32 are not used as a port + 1 - D51-D32 are used as a port */ + +/* Wait Control Register 1 - WCR1 */ +#define SH7750_WCR1_REGOFS 0x800008 /* offset */ +#define SH7750_WCR1 SH7750_P4_REG32(SH7750_WCR1_REGOFS) +#define SH7750_WCR1_A7 SH7750_A7_REG32(SH7750_WCR1_REGOFS) +#define SH7750_WCR1_DMAIW 0x70000000 /* DACK Device Inter-Cycle Idle + specification */ +#define SH7750_WCR1_DMAIW_S 28 +#define SH7750_WCR1_A6IW 0x07000000 /* Area 6 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A6IW_S 24 +#define SH7750_WCR1_A5IW 0x00700000 /* Area 5 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A5IW_S 20 +#define SH7750_WCR1_A4IW 0x00070000 /* Area 4 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A4IW_S 16 +#define SH7750_WCR1_A3IW 0x00007000 /* Area 3 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A3IW_S 12 +#define SH7750_WCR1_A2IW 0x00000700 /* Area 2 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A2IW_S 8 +#define SH7750_WCR1_A1IW 0x00000070 /* Area 1 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A1IW_S 4 +#define SH7750_WCR1_A0IW 0x00000007 /* Area 0 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A0IW_S 0 + +/* Wait Control Register 2 - WCR2 */ +#define SH7750_WCR2_REGOFS 0x80000C /* offset */ +#define SH7750_WCR2 SH7750_P4_REG32(SH7750_WCR2_REGOFS) +#define SH7750_WCR2_A7 SH7750_A7_REG32(SH7750_WCR2_REGOFS) + +#define SH7750_WCR2_A6W 0xE0000000 /* Area 6 Wait Control */ +#define SH7750_WCR2_A6W_S 29 +#define SH7750_WCR2_A6B 0x1C000000 /* Area 6 Burst Pitch */ +#define SH7750_WCR2_A6B_S 26 +#define SH7750_WCR2_A5W 0x03800000 /* Area 5 Wait Control */ +#define SH7750_WCR2_A5W_S 23 +#define SH7750_WCR2_A5B 0x00700000 /* Area 5 Burst Pitch */ +#define SH7750_WCR2_A5B_S 20 +#define SH7750_WCR2_A4W 0x000E0000 /* Area 4 Wait Control */ +#define SH7750_WCR2_A4W_S 17 +#define SH7750_WCR2_A3W 0x0000E000 /* Area 3 Wait Control */ +#define SH7750_WCR2_A3W_S 13 +#define SH7750_WCR2_A2W 0x00000E00 /* Area 2 Wait Control */ +#define SH7750_WCR2_A2W_S 9 +#define SH7750_WCR2_A1W 0x000001C0 /* Area 1 Wait Control */ +#define SH7750_WCR2_A1W_S 6 +#define SH7750_WCR2_A0W 0x00000038 /* Area 0 Wait Control */ +#define SH7750_WCR2_A0W_S 3 +#define SH7750_WCR2_A0B 0x00000007 /* Area 0 Burst Pitch */ +#define SH7750_WCR2_A0B_S 0 + +#define SH7750_WCR2_WS0 0 /* 0 wait states inserted */ +#define SH7750_WCR2_WS1 1 /* 1 wait states inserted */ +#define SH7750_WCR2_WS2 2 /* 2 wait states inserted */ +#define SH7750_WCR2_WS3 3 /* 3 wait states inserted */ +#define SH7750_WCR2_WS6 4 /* 6 wait states inserted */ +#define SH7750_WCR2_WS9 5 /* 9 wait states inserted */ +#define SH7750_WCR2_WS12 6 /* 12 wait states inserted */ +#define SH7750_WCR2_WS15 7 /* 15 wait states inserted */ + +#define SH7750_WCR2_BPWS0 0 /* 0 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS1 1 /* 1 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS2 2 /* 2 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS3 3 /* 3 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS4 4 /* 4 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS5 5 /* 5 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS6 6 /* 6 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS7 7 /* 7 wait states inserted from 2nd access */ + +/* DRAM CAS\ Assertion Delay (area 3,2) */ +#define SH7750_WCR2_DRAM_CAS_ASW1 0 /* 1 cycle */ +#define SH7750_WCR2_DRAM_CAS_ASW2 1 /* 2 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW3 2 /* 3 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW4 3 /* 4 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW7 4 /* 7 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW10 5 /* 10 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW13 6 /* 13 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW16 7 /* 16 cycles */ + +/* SDRAM CAS\ Latency Cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT1 1 /* 1 cycle */ +#define SH7750_WCR2_SDRAM_CAS_LAT2 2 /* 2 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT3 3 /* 3 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT4 4 /* 4 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT5 5 /* 5 cycles */ + +/* Wait Control Register 3 - WCR3 */ +#define SH7750_WCR3_REGOFS 0x800010 /* offset */ +#define SH7750_WCR3 SH7750_P4_REG32(SH7750_WCR3_REGOFS) +#define SH7750_WCR3_A7 SH7750_A7_REG32(SH7750_WCR3_REGOFS) + +#define SH7750_WCR3_A6S 0x04000000 /* Area 6 Write Strobe Setup time */ +#define SH7750_WCR3_A6H 0x03000000 /* Area 6 Data Hold Time */ +#define SH7750_WCR3_A6H_S 24 +#define SH7750_WCR3_A5S 0x00400000 /* Area 5 Write Strobe Setup time */ +#define SH7750_WCR3_A5H 0x00300000 /* Area 5 Data Hold Time */ +#define SH7750_WCR3_A5H_S 20 +#define SH7750_WCR3_A4S 0x00040000 /* Area 4 Write Strobe Setup time */ +#define SH7750_WCR3_A4H 0x00030000 /* Area 4 Data Hold Time */ +#define SH7750_WCR3_A4H_S 16 +#define SH7750_WCR3_A3S 0x00004000 /* Area 3 Write Strobe Setup time */ +#define SH7750_WCR3_A3H 0x00003000 /* Area 3 Data Hold Time */ +#define SH7750_WCR3_A3H_S 12 +#define SH7750_WCR3_A2S 0x00000400 /* Area 2 Write Strobe Setup time */ +#define SH7750_WCR3_A2H 0x00000300 /* Area 2 Data Hold Time */ +#define SH7750_WCR3_A2H_S 8 +#define SH7750_WCR3_A1S 0x00000040 /* Area 1 Write Strobe Setup time */ +#define SH7750_WCR3_A1H 0x00000030 /* Area 1 Data Hold Time */ +#define SH7750_WCR3_A1H_S 4 +#define SH7750_WCR3_A0S 0x00000004 /* Area 0 Write Strobe Setup time */ +#define SH7750_WCR3_A0H 0x00000003 /* Area 0 Data Hold Time */ +#define SH7750_WCR3_A0H_S 0 + +#define SH7750_WCR3_DHWS_0 0 /* 0 wait states data hold time */ +#define SH7750_WCR3_DHWS_1 1 /* 1 wait states data hold time */ +#define SH7750_WCR3_DHWS_2 2 /* 2 wait states data hold time */ +#define SH7750_WCR3_DHWS_3 3 /* 3 wait states data hold time */ + +#define SH7750_MCR_REGOFS 0x800014 /* offset */ +#define SH7750_MCR SH7750_P4_REG32(SH7750_MCR_REGOFS) +#define SH7750_MCR_A7 SH7750_A7_REG32(SH7750_MCR_REGOFS) + +#define SH7750_MCR_RASD 0x80000000 /* RAS Down mode */ +#define SH7750_MCR_MRSET 0x40000000 /* SDRAM Mode Register Set */ +#define SH7750_MCR_PALL 0x00000000 /* SDRAM Precharge All cmd. Mode */ +#define SH7750_MCR_TRC 0x38000000 /* RAS Precharge Time at End of + Refresh: */ +#define SH7750_MCR_TRC_0 0x00000000 /* 0 */ +#define SH7750_MCR_TRC_3 0x08000000 /* 3 */ +#define SH7750_MCR_TRC_6 0x10000000 /* 6 */ +#define SH7750_MCR_TRC_9 0x18000000 /* 9 */ +#define SH7750_MCR_TRC_12 0x20000000 /* 12 */ +#define SH7750_MCR_TRC_15 0x28000000 /* 15 */ +#define SH7750_MCR_TRC_18 0x30000000 /* 18 */ +#define SH7750_MCR_TRC_21 0x38000000 /* 21 */ + +#define SH7750_MCR_TCAS 0x00800000 /* CAS Negation Period */ +#define SH7750_MCR_TCAS_1 0x00000000 /* 1 */ +#define SH7750_MCR_TCAS_2 0x00800000 /* 2 */ + +#define SH7750_MCR_TPC 0x00380000 /* DRAM: RAS Precharge Period + SDRAM: minimum number of cycles + until the next bank active cmd + is output after precharging */ +#define SH7750_MCR_TPC_S 19 +#define SH7750_MCR_TPC_SDRAM_1 0x00000000 /* 1 cycle */ +#define SH7750_MCR_TPC_SDRAM_2 0x00080000 /* 2 cycles */ +#define SH7750_MCR_TPC_SDRAM_3 0x00100000 /* 3 cycles */ +#define SH7750_MCR_TPC_SDRAM_4 0x00180000 /* 4 cycles */ +#define SH7750_MCR_TPC_SDRAM_5 0x00200000 /* 5 cycles */ +#define SH7750_MCR_TPC_SDRAM_6 0x00280000 /* 6 cycles */ +#define SH7750_MCR_TPC_SDRAM_7 0x00300000 /* 7 cycles */ +#define SH7750_MCR_TPC_SDRAM_8 0x00380000 /* 8 cycles */ + +#define SH7750_MCR_RCD 0x00030000 /* DRAM: RAS-CAS Assertion Delay time + SDRAM: bank active-read/write cmd + delay time */ +#define SH7750_MCR_RCD_DRAM_2 0x00000000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_DRAM_3 0x00010000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_DRAM_4 0x00020000 /* DRAM delay 4 clocks */ +#define SH7750_MCR_RCD_DRAM_5 0x00030000 /* DRAM delay 5 clocks */ +#define SH7750_MCR_RCD_SDRAM_2 0x00010000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_SDRAM_3 0x00020000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_SDRAM_4 0x00030000 /* DRAM delay 4 clocks */ + +#define SH7750_MCR_TRWL 0x0000E000 /* SDRAM Write Precharge Delay */ +#define SH7750_MCR_TRWL_1 0x00000000 /* 1 */ +#define SH7750_MCR_TRWL_2 0x00002000 /* 2 */ +#define SH7750_MCR_TRWL_3 0x00004000 /* 3 */ +#define SH7750_MCR_TRWL_4 0x00006000 /* 4 */ +#define SH7750_MCR_TRWL_5 0x00008000 /* 5 */ + +#define SH7750_MCR_TRAS 0x00001C00 /* DRAM: CAS-Before-RAS Refresh RAS + asserting period + SDRAM: Command interval after + synchronous DRAM refresh */ +#define SH7750_MCR_TRAS_DRAM_2 0x00000000 /* 2 */ +#define SH7750_MCR_TRAS_DRAM_3 0x00000400 /* 3 */ +#define SH7750_MCR_TRAS_DRAM_4 0x00000800 /* 4 */ +#define SH7750_MCR_TRAS_DRAM_5 0x00000C00 /* 5 */ +#define SH7750_MCR_TRAS_DRAM_6 0x00001000 /* 6 */ +#define SH7750_MCR_TRAS_DRAM_7 0x00001400 /* 7 */ +#define SH7750_MCR_TRAS_DRAM_8 0x00001800 /* 8 */ +#define SH7750_MCR_TRAS_DRAM_9 0x00001C00 /* 9 */ + +#define SH7750_MCR_TRAS_SDRAM_TRC_4 0x00000000 /* 4 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_5 0x00000400 /* 5 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_6 0x00000800 /* 6 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_7 0x00000C00 /* 7 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_8 0x00001000 /* 8 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_9 0x00001400 /* 9 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_10 0x00001800 /* 10 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_11 0x00001C00 /* 11 + TRC */ + +#define SH7750_MCR_BE 0x00000200 /* Burst Enable */ +#define SH7750_MCR_SZ 0x00000180 /* Memory Data Size */ +#define SH7750_MCR_SZ_64 0x00000000 /* 64 bits */ +#define SH7750_MCR_SZ_16 0x00000100 /* 16 bits */ +#define SH7750_MCR_SZ_32 0x00000180 /* 32 bits */ + +#define SH7750_MCR_AMX 0x00000078 /* Address Multiplexing */ +#define SH7750_MCR_AMX_S 3 +#define SH7750_MCR_AMX_DRAM_8BIT_COL 0x00000000 /* 8-bit column addr */ +#define SH7750_MCR_AMX_DRAM_9BIT_COL 0x00000008 /* 9-bit column addr */ +#define SH7750_MCR_AMX_DRAM_10BIT_COL 0x00000010 /* 10-bit column addr */ +#define SH7750_MCR_AMX_DRAM_11BIT_COL 0x00000018 /* 11-bit column addr */ +#define SH7750_MCR_AMX_DRAM_12BIT_COL 0x00000020 /* 12-bit column addr */ +/* See SH7750 Hardware Manual for SDRAM address multiplexor selection */ + +#define SH7750_MCR_RFSH 0x00000004 /* Refresh Control */ +#define SH7750_MCR_RMODE 0x00000002 /* Refresh Mode: */ +#define SH7750_MCR_RMODE_NORMAL 0x00000000 /* Normal Refresh Mode */ +#define SH7750_MCR_RMODE_SELF 0x00000002 /* Self-Refresh Mode */ +#define SH7750_MCR_RMODE_EDO 0x00000001 /* EDO Mode */ + +/* SDRAM Mode Set address */ +#define SH7750_SDRAM_MODE_A2_BASE 0xFF900000 +#define SH7750_SDRAM_MODE_A3_BASE 0xFF940000 +#define SH7750_SDRAM_MODE_A2_32BIT(x) (SH7750_SDRAM_MODE_A2_BASE + ((x) << 2)) +#define SH7750_SDRAM_MODE_A3_32BIT(x) (SH7750_SDRAM_MODE_A3_BASE + ((x) << 2)) +#define SH7750_SDRAM_MODE_A2_64BIT(x) (SH7750_SDRAM_MODE_A2_BASE + ((x) << 3)) +#define SH7750_SDRAM_MODE_A3_64BIT(x) (SH7750_SDRAM_MODE_A3_BASE + ((x) << 3)) + + +/* PCMCIA Control Register (half) - PCR */ +#define SH7750_PCR_REGOFS 0x800018 /* offset */ +#define SH7750_PCR SH7750_P4_REG32(SH7750_PCR_REGOFS) +#define SH7750_PCR_A7 SH7750_A7_REG32(SH7750_PCR_REGOFS) + +#define SH7750_PCR_A5PCW 0xC000 /* Area 5 PCMCIA Wait - Number of wait + states to be added to the number of + waits specified by WCR2 in a low-speed + PCMCIA wait cycle */ +#define SH7750_PCR_A5PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A5PCW_15 0x4000 /* 15 waits inserted */ +#define SH7750_PCR_A5PCW_30 0x8000 /* 30 waits inserted */ +#define SH7750_PCR_A5PCW_50 0xC000 /* 50 waits inserted */ + +#define SH7750_PCR_A6PCW 0x3000 /* Area 6 PCMCIA Wait - Number of wait + states to be added to the number of + waits specified by WCR2 in a low-speed + PCMCIA wait cycle */ +#define SH7750_PCR_A6PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A6PCW_15 0x1000 /* 15 waits inserted */ +#define SH7750_PCR_A6PCW_30 0x2000 /* 30 waits inserted */ +#define SH7750_PCR_A6PCW_50 0x3000 /* 50 waits inserted */ + +#define SH7750_PCR_A5TED 0x0E00 /* Area 5 Address-OE\/WE\ Assertion Delay, + delay time from address output to + OE\/WE\ assertion on the connected + PCMCIA interface */ +#define SH7750_PCR_A5TED_S 9 +#define SH7750_PCR_A6TED 0x01C0 /* Area 6 Address-OE\/WE\ Assertion Delay*/ +#define SH7750_PCR_A6TED_S 6 + +#define SH7750_PCR_TED_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TED_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TED_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TED_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TED_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TED_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TED_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TED_15WS 7 /* 15 Waits inserted */ + +#define SH7750_PCR_A5TEH 0x0038 /* Area 5 OE\/WE\ Negation Address delay, + address hold delay time from OE\/WE\ + negation in a write on the connected + PCMCIA interface */ +#define SH7750_PCR_A5TEH_S 3 + +#define SH7750_PCR_A6TEH 0x0007 /* Area 6 OE\/WE\ Negation Address delay*/ +#define SH7750_PCR_A6TEH_S 0 + +#define SH7750_PCR_TEH_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TEH_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TEH_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TEH_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TEH_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TEH_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TEH_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TEH_15WS 7 /* 15 Waits inserted */ + +/* Refresh Timer Control/Status Register (half) - RTSCR */ +#define SH7750_RTCSR_REGOFS 0x80001C /* offset */ +#define SH7750_RTCSR SH7750_P4_REG32(SH7750_RTCSR_REGOFS) +#define SH7750_RTCSR_A7 SH7750_A7_REG32(SH7750_RTCSR_REGOFS) + +#define SH7750_RTCSR_KEY 0xA500 /* RTCSR write key */ +#define SH7750_RTCSR_CMF 0x0080 /* Compare-Match Flag (indicates a + match between the refresh timer + counter and refresh time constant) */ +#define SH7750_RTCSR_CMIE 0x0040 /* Compare-Match Interrupt Enable */ +#define SH7750_RTCSR_CKS 0x0038 /* Refresh Counter Clock Selects */ +#define SH7750_RTCSR_CKS_DIS 0x0000 /* Clock Input Disabled */ +#define SH7750_RTCSR_CKS_CKIO_DIV4 0x0008 /* Bus Clock / 4 */ +#define SH7750_RTCSR_CKS_CKIO_DIV16 0x0010 /* Bus Clock / 16 */ +#define SH7750_RTCSR_CKS_CKIO_DIV64 0x0018 /* Bus Clock / 64 */ +#define SH7750_RTCSR_CKS_CKIO_DIV256 0x0020 /* Bus Clock / 256 */ +#define SH7750_RTCSR_CKS_CKIO_DIV1024 0x0028 /* Bus Clock / 1024 */ +#define SH7750_RTCSR_CKS_CKIO_DIV2048 0x0030 /* Bus Clock / 2048 */ +#define SH7750_RTCSR_CKS_CKIO_DIV4096 0x0038 /* Bus Clock / 4096 */ + +#define SH7750_RTCSR_OVF 0x0004 /* Refresh Count Overflow Flag */ +#define SH7750_RTCSR_OVIE 0x0002 /* Refresh Count Overflow Interrupt + Enable */ +#define SH7750_RTCSR_LMTS 0x0001 /* Refresh Count Overflow Limit Select */ +#define SH7750_RTCSR_LMTS_1024 0x0000 /* Count Limit is 1024 */ +#define SH7750_RTCSR_LMTS_512 0x0001 /* Count Limit is 512 */ + +/* Refresh Timer Counter (half) - RTCNT */ +#define SH7750_RTCNT_REGOFS 0x800020 /* offset */ +#define SH7750_RTCNT SH7750_P4_REG32(SH7750_RTCNT_REGOFS) +#define SH7750_RTCNT_A7 SH7750_A7_REG32(SH7750_RTCNT_REGOFS) + +#define SH7750_RTCNT_KEY 0xA500 /* RTCNT write key */ + +/* Refresh Time Constant Register (half) - RTCOR */ +#define SH7750_RTCOR_REGOFS 0x800024 /* offset */ +#define SH7750_RTCOR SH7750_P4_REG32(SH7750_RTCOR_REGOFS) +#define SH7750_RTCOR_A7 SH7750_A7_REG32(SH7750_RTCOR_REGOFS) + +#define SH7750_RTCOR_KEY 0xA500 /* RTCOR write key */ + +/* Refresh Count Register (half) - RFCR */ +#define SH7750_RFCR_REGOFS 0x800028 /* offset */ +#define SH7750_RFCR SH7750_P4_REG32(SH7750_RFCR_REGOFS) +#define SH7750_RFCR_A7 SH7750_A7_REG32(SH7750_RFCR_REGOFS) + +#define SH7750_RFCR_KEY 0xA400 /* RFCR write key */ + +/* + * Direct Memory Access Controller (DMAC) + */ + +/* DMA Source Address Register - SAR0, SAR1, SAR2, SAR3 */ +#define SH7750_SAR_REGOFS(n) (0xA00000 + ((n)*16)) /* offset */ +#define SH7750_SAR(n) SH7750_P4_REG32(SH7750_SAR_REGOFS(n)) +#define SH7750_SAR_A7(n) SH7750_A7_REG32(SH7750_SAR_REGOFS(n)) +#define SH7750_SAR0 SH7750_SAR(0) +#define SH7750_SAR1 SH7750_SAR(1) +#define SH7750_SAR2 SH7750_SAR(2) +#define SH7750_SAR3 SH7750_SAR(3) +#define SH7750_SAR0_A7 SH7750_SAR_A7(0) +#define SH7750_SAR1_A7 SH7750_SAR_A7(1) +#define SH7750_SAR2_A7 SH7750_SAR_A7(2) +#define SH7750_SAR3_A7 SH7750_SAR_A7(3) + +/* DMA Destination Address Register - DAR0, DAR1, DAR2, DAR3 */ +#define SH7750_DAR_REGOFS(n) (0xA00004 + ((n)*16)) /* offset */ +#define SH7750_DAR(n) SH7750_P4_REG32(SH7750_DAR_REGOFS(n)) +#define SH7750_DAR_A7(n) SH7750_A7_REG32(SH7750_DAR_REGOFS(n)) +#define SH7750_DAR0 SH7750_DAR(0) +#define SH7750_DAR1 SH7750_DAR(1) +#define SH7750_DAR2 SH7750_DAR(2) +#define SH7750_DAR3 SH7750_DAR(3) +#define SH7750_DAR0_A7 SH7750_DAR_A7(0) +#define SH7750_DAR1_A7 SH7750_DAR_A7(1) +#define SH7750_DAR2_A7 SH7750_DAR_A7(2) +#define SH7750_DAR3_A7 SH7750_DAR_A7(3) + +/* DMA Transfer Count Register - DMATCR0, DMATCR1, DMATCR2, DMATCR3 */ +#define SH7750_DMATCR_REGOFS(n) (0xA00008 + ((n)*16)) /* offset */ +#define SH7750_DMATCR(n) SH7750_P4_REG32(SH7750_DMATCR_REGOFS(n)) +#define SH7750_DMATCR_A7(n) SH7750_A7_REG32(SH7750_DMATCR_REGOFS(n)) +#define SH7750_DMATCR0_P4 SH7750_DMATCR(0) +#define SH7750_DMATCR1_P4 SH7750_DMATCR(1) +#define SH7750_DMATCR2_P4 SH7750_DMATCR(2) +#define SH7750_DMATCR3_P4 SH7750_DMATCR(3) +#define SH7750_DMATCR0_A7 SH7750_DMATCR_A7(0) +#define SH7750_DMATCR1_A7 SH7750_DMATCR_A7(1) +#define SH7750_DMATCR2_A7 SH7750_DMATCR_A7(2) +#define SH7750_DMATCR3_A7 SH7750_DMATCR_A7(3) + +/* DMA Channel Control Register - CHCR0, CHCR1, CHCR2, CHCR3 */ +#define SH7750_CHCR_REGOFS(n) (0xA0000C + ((n)*16)) /* offset */ +#define SH7750_CHCR(n) SH7750_P4_REG32(SH7750_CHCR_REGOFS(n)) +#define SH7750_CHCR_A7(n) SH7750_A7_REG32(SH7750_CHCR_REGOFS(n)) +#define SH7750_CHCR0 SH7750_CHCR(0) +#define SH7750_CHCR1 SH7750_CHCR(1) +#define SH7750_CHCR2 SH7750_CHCR(2) +#define SH7750_CHCR3 SH7750_CHCR(3) +#define SH7750_CHCR0_A7 SH7750_CHCR_A7(0) +#define SH7750_CHCR1_A7 SH7750_CHCR_A7(1) +#define SH7750_CHCR2_A7 SH7750_CHCR_A7(2) +#define SH7750_CHCR3_A7 SH7750_CHCR_A7(3) + +#define SH7750_CHCR_SSA 0xE0000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_SSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_SSA_DYNBSZ 0x20000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_SSA_IO8 0x40000000 /* 8-bit I/O space */ +#define SH7750_CHCR_SSA_IO16 0x60000000 /* 16-bit I/O space */ +#define SH7750_CHCR_SSA_CMEM8 0x80000000 /* 8-bit common memory space */ +#define SH7750_CHCR_SSA_CMEM16 0xA0000000 /* 16-bit common memory space */ +#define SH7750_CHCR_SSA_AMEM8 0xC0000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_SSA_AMEM16 0xE0000000 /* 16-bit attribute memory space */ + +#define SH7750_CHCR_STC 0x10000000 /* Source Address Wait Control Select, + specifies CS5 or CS6 space wait + control for PCMCIA access */ + +#define SH7750_CHCR_DSA 0x0E000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_DSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_DSA_DYNBSZ 0x02000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_DSA_IO8 0x04000000 /* 8-bit I/O space */ +#define SH7750_CHCR_DSA_IO16 0x06000000 /* 16-bit I/O space */ +#define SH7750_CHCR_DSA_CMEM8 0x08000000 /* 8-bit common memory space */ +#define SH7750_CHCR_DSA_CMEM16 0x0A000000 /* 16-bit common memory space */ +#define SH7750_CHCR_DSA_AMEM8 0x0C000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_DSA_AMEM16 0x0E000000 /* 16-bit attribute memory space */ + +#define SH7750_CHCR_DTC 0x01000000 /* Destination Address Wait Control + Select, specifies CS5 or CS6 + space wait control for PCMCIA + access */ + +#define SH7750_CHCR_DS 0x00080000 /* DREQ\ Select : */ +#define SH7750_CHCR_DS_LOWLVL 0x00000000 /* Low Level Detection */ +#define SH7750_CHCR_DS_FALL 0x00080000 /* Falling Edge Detection */ + +#define SH7750_CHCR_RL 0x00040000 /* Request Check Level: */ +#define SH7750_CHCR_RL_ACTH 0x00000000 /* DRAK is an active high out */ +#define SH7750_CHCR_RL_ACTL 0x00040000 /* DRAK is an active low out */ + +#define SH7750_CHCR_AM 0x00020000 /* Acknowledge Mode: */ +#define SH7750_CHCR_AM_RD 0x00000000 /* DACK is output in read cycle */ +#define SH7750_CHCR_AM_WR 0x00020000 /* DACK is output in write cycle*/ + +#define SH7750_CHCR_AL 0x00010000 /* Acknowledge Level: */ +#define SH7750_CHCR_AL_ACTH 0x00000000 /* DACK is an active high out */ +#define SH7750_CHCR_AL_ACTL 0x00010000 /* DACK is an active low out */ + +#define SH7750_CHCR_DM 0x0000C000 /* Destination Address Mode: */ +#define SH7750_CHCR_DM_FIX 0x00000000 /* Destination Addr Fixed */ +#define SH7750_CHCR_DM_INC 0x00004000 /* Destination Addr Incremented */ +#define SH7750_CHCR_DM_DEC 0x00008000 /* Destination Addr Decremented */ + +#define SH7750_CHCR_SM 0x00003000 /* Source Address Mode: */ +#define SH7750_CHCR_SM_FIX 0x00000000 /* Source Addr Fixed */ +#define SH7750_CHCR_SM_INC 0x00001000 /* Source Addr Incremented */ +#define SH7750_CHCR_SM_DEC 0x00002000 /* Source Addr Decremented */ + +#define SH7750_CHCR_RS 0x00000F00 /* Request Source Select: */ +#define SH7750_CHCR_RS_ER_DA_EA_TO_EA 0x000 /* External Request, Dual Address + Mode (External Addr Space-> + External Addr Space) */ +#define SH7750_CHCR_RS_ER_SA_EA_TO_ED 0x200 /* External Request, Single + Address Mode (External Addr + Space -> External Device) */ +#define SH7750_CHCR_RS_ER_SA_ED_TO_EA 0x300 /* External Request, Single + Address Mode, (External + Device -> External Addr + Space)*/ +#define SH7750_CHCR_RS_AR_EA_TO_EA 0x400 /* Auto-Request (External Addr + Space -> External Addr Space)*/ + +#define SH7750_CHCR_RS_AR_EA_TO_OCP 0x500 /* Auto-Request (External Addr + Space -> On-chip Peripheral + Module) */ +#define SH7750_CHCR_RS_AR_OCP_TO_EA 0x600 /* Auto-Request (On-chip + Peripheral Module -> + External Addr Space */ +#define SH7750_CHCR_RS_SCITX_EA_TO_SC 0x800 /* SCI Transmit-Data-Empty intr + transfer request (external + address space -> SCTDR1) */ +#define SH7750_CHCR_RS_SCIRX_SC_TO_EA 0x900 /* SCI Receive-Data-Full intr + transfer request (SCRDR1 -> + External Addr Space) */ +#define SH7750_CHCR_RS_SCIFTX_EA_TO_SC 0xA00 /* SCIF Transmit-Data-Empty intr + transfer request (external + address space -> SCFTDR1) */ +#define SH7750_CHCR_RS_SCIFRX_SC_TO_EA 0xB00 /* SCIF Receive-Data-Full intr + transfer request (SCFRDR2 -> + External Addr Space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_EA 0xC00 /* TMU Channel 2 (input capture + interrupt), (external address + space -> external address + space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_OCP 0xD00 /* TMU Channel 2 (input capture + interrupt), (external address + space -> on-chip peripheral + module) */ +#define SH7750_CHCR_RS_TMU2_OCP_TO_EA 0xE00 /* TMU Channel 2 (input capture + interrupt), (on-chip + peripheral module -> external + address space) */ + +#define SH7750_CHCR_TM 0x00000080 /* Transmit mode: */ +#define SH7750_CHCR_TM_CSTEAL 0x00000000 /* Cycle Steal Mode */ +#define SH7750_CHCR_TM_BURST 0x00000080 /* Burst Mode */ + +#define SH7750_CHCR_TS 0x00000070 /* Transmit Size: */ +#define SH7750_CHCR_TS_QUAD 0x00000000 /* Quadword Size (64 bits) */ +#define SH7750_CHCR_TS_BYTE 0x00000010 /* Byte Size (8 bit) */ +#define SH7750_CHCR_TS_WORD 0x00000020 /* Word Size (16 bit) */ +#define SH7750_CHCR_TS_LONG 0x00000030 /* Longword Size (32 bit) */ +#define SH7750_CHCR_TS_BLOCK 0x00000040 /* 32-byte block transfer */ + +#define SH7750_CHCR_IE 0x00000004 /* Interrupt Enable */ +#define SH7750_CHCR_TE 0x00000002 /* Transfer End */ +#define SH7750_CHCR_DE 0x00000001 /* DMAC Enable */ + +/* DMA Operation Register - DMAOR */ +#define SH7750_DMAOR_REGOFS 0xA00040 /* offset */ +#define SH7750_DMAOR SH7750_P4_REG32(SH7750_DMAOR_REGOFS) +#define SH7750_DMAOR_A7 SH7750_A7_REG32(SH7750_DMAOR_REGOFS) + +#define SH7750_DMAOR_DDT 0x00008000 /* On-Demand Data Transfer Mode */ + +#define SH7750_DMAOR_PR 0x00000300 /* Priority Mode: */ +#define SH7750_DMAOR_PR_0123 0x00000000 /* CH0 > CH1 > CH2 > CH3 */ +#define SH7750_DMAOR_PR_0231 0x00000100 /* CH0 > CH2 > CH3 > CH1 */ +#define SH7750_DMAOR_PR_2013 0x00000200 /* CH2 > CH0 > CH1 > CH3 */ +#define SH7750_DMAOR_PR_RR 0x00000300 /* Round-robin mode */ + +#define SH7750_DMAOR_COD 0x00000010 /* Check Overrun for DREQ\ */ +#define SH7750_DMAOR_AE 0x00000004 /* Address Error flag */ +#define SH7750_DMAOR_NMIF 0x00000002 /* NMI Flag */ +#define SH7750_DMAOR_DME 0x00000001 /* DMAC Master Enable */ + +/* + * Serial Communication Interface - SCI + * Serial Communication Interface with FIFO - SCIF + */ +/* SCI Receive Data Register (byte, read-only) - SCRDR1, SCFRDR2 */ +#define SH7750_SCRDR_REGOFS(n) ((n) == 1 ? 0xE00014 : 0xE80014) /* offset */ +#define SH7750_SCRDR(n) SH7750_P4_REG32(SH7750_SCRDR_REGOFS(n)) +#define SH7750_SCRDR1 SH7750_SCRDR(1) +#define SH7750_SCRDR2 SH7750_SCRDR(2) +#define SH7750_SCRDR_A7(n) SH7750_A7_REG32(SH7750_SCRDR_REGOFS(n)) +#define SH7750_SCRDR1_A7 SH7750_SCRDR_A7(1) +#define SH7750_SCRDR2_A7 SH7750_SCRDR_A7(2) + +/* SCI Transmit Data Register (byte) - SCTDR1, SCFTDR2 */ +#define SH7750_SCTDR_REGOFS(n) ((n) == 1 ? 0xE0000C : 0xE8000C) /* offset */ +#define SH7750_SCTDR(n) SH7750_P4_REG32(SH7750_SCTDR_REGOFS(n)) +#define SH7750_SCTDR1 SH7750_SCTDR(1) +#define SH7750_SCTDR2 SH7750_SCTDR(2) +#define SH7750_SCTDR_A7(n) SH7750_A7_REG32(SH7750_SCTDR_REGOFS(n)) +#define SH7750_SCTDR1_A7 SH7750_SCTDR_A7(1) +#define SH7750_SCTDR2_A7 SH7750_SCTDR_A7(2) + +/* SCI Serial Mode Register - SCSMR1(byte), SCSMR2(half) */ +#define SH7750_SCSMR_REGOFS(n) ((n) == 1 ? 0xE00000 : 0xE80000) /* offset */ +#define SH7750_SCSMR(n) SH7750_P4_REG32(SH7750_SCSMR_REGOFS(n)) +#define SH7750_SCSMR1 SH7750_SCSMR(1) +#define SH7750_SCSMR2 SH7750_SCSMR(2) +#define SH7750_SCSMR_A7(n) SH7750_A7_REG32(SH7750_SCSMR_REGOFS(n)) +#define SH7750_SCSMR1_A7 SH7750_SCSMR_A7(1) +#define SH7750_SCSMR2_A7 SH7750_SCSMR_A7(2) + +#define SH7750_SCSMR1_CA 0x80 /* Communication Mode (C/A\): */ +#define SH7750_SCSMR1_CA_ASYNC 0x00 /* Asynchronous Mode */ +#define SH7750_SCSMR1_CA_SYNC 0x80 /* Synchronous Mode */ +#define SH7750_SCSMR_CHR 0x40 /* Character Length: */ +#define SH7750_SCSMR_CHR_8 0x00 /* 8-bit data */ +#define SH7750_SCSMR_CHR_7 0x40 /* 7-bit data */ +#define SH7750_SCSMR_PE 0x20 /* Parity Enable */ +#define SH7750_SCSMR_PM 0x10 /* Parity Mode: */ +#define SH7750_SCSMR_PM_EVEN 0x00 /* Even Parity */ +#define SH7750_SCSMR_PM_ODD 0x10 /* Odd Parity */ +#define SH7750_SCSMR_STOP 0x08 /* Stop Bit Length: */ +#define SH7750_SCSMR_STOP_1 0x00 /* 1 stop bit */ +#define SH7750_SCSMR_STOP_2 0x08 /* 2 stop bit */ +#define SH7750_SCSMR1_MP 0x04 /* Multiprocessor Mode */ +#define SH7750_SCSMR_CKS 0x03 /* Clock Select */ +#define SH7750_SCSMR_CKS_S 0 +#define SH7750_SCSMR_CKS_DIV1 0x00 /* Periph clock */ +#define SH7750_SCSMR_CKS_DIV4 0x01 /* Periph clock / 4 */ +#define SH7750_SCSMR_CKS_DIV16 0x02 /* Periph clock / 16 */ +#define SH7750_SCSMR_CKS_DIV64 0x03 /* Periph clock / 64 */ + +/* SCI Serial Control Register - SCSCR1(byte), SCSCR2(half) */ +#define SH7750_SCSCR_REGOFS(n) ((n) == 1 ? 0xE00008 : 0xE80008) /* offset */ +#define SH7750_SCSCR(n) SH7750_P4_REG32(SH7750_SCSCR_REGOFS(n)) +#define SH7750_SCSCR1 SH7750_SCSCR(1) +#define SH7750_SCSCR2 SH7750_SCSCR(2) +#define SH7750_SCSCR_A7(n) SH7750_A7_REG32(SH7750_SCSCR_REGOFS(n)) +#define SH7750_SCSCR1_A7 SH7750_SCSCR_A7(1) +#define SH7750_SCSCR2_A7 SH7750_SCSCR_A7(2) + +#define SH7750_SCSCR_TIE 0x80 /* Transmit Interrupt Enable */ +#define SH7750_SCSCR_RIE 0x40 /* Receive Interrupt Enable */ +#define SH7750_SCSCR_TE 0x20 /* Transmit Enable */ +#define SH7750_SCSCR_RE 0x10 /* Receive Enable */ +#define SH7750_SCSCR1_MPIE 0x08 /* Multiprocessor Interrupt Enable */ +#define SH7750_SCSCR2_REIE 0x08 /* Receive Error Interrupt Enable */ +#define SH7750_SCSCR1_TEIE 0x04 /* Transmit End Interrupt Enable */ +#define SH7750_SCSCR1_CKE 0x03 /* Clock Enable: */ +#define SH7750_SCSCR_CKE_INTCLK 0x00 /* Use Internal Clock */ +#define SH7750_SCSCR_CKE_EXTCLK 0x02 /* Use External Clock from SCK*/ +#define SH7750_SCSCR1_CKE_ASYNC_SCK_CLKOUT 0x01 /* Use SCK as a clock output + in asynchronous mode */ + +/* SCI Serial Status Register - SCSSR1(byte), SCSSR2(half) */ +#define SH7750_SCSSR_REGOFS(n) ((n) == 1 ? 0xE00010 : 0xE80010) /* offset */ +#define SH7750_SCSSR(n) SH7750_P4_REG32(SH7750_SCSSR_REGOFS(n)) +#define SH7750_SCSSR1 SH7750_SCSSR(1) +#define SH7750_SCSSR2 SH7750_SCSSR(2) +#define SH7750_SCSSR_A7(n) SH7750_A7_REG32(SH7750_SCSSR_REGOFS(n)) +#define SH7750_SCSSR1_A7 SH7750_SCSSR_A7(1) +#define SH7750_SCSSR2_A7 SH7750_SCSSR_A7(2) + +#define SH7750_SCSSR1_TDRE 0x80 /* Transmit Data Register Empty */ +#define SH7750_SCSSR1_RDRF 0x40 /* Receive Data Register Full */ +#define SH7750_SCSSR1_ORER 0x20 /* Overrun Error */ +#define SH7750_SCSSR1_FER 0x10 /* Framing Error */ +#define SH7750_SCSSR1_PER 0x08 /* Parity Error */ +#define SH7750_SCSSR1_TEND 0x04 /* Transmit End */ +#define SH7750_SCSSR1_MPB 0x02 /* Multiprocessor Bit */ +#define SH7750_SCSSR1_MPBT 0x01 /* Multiprocessor Bit Transfer */ + +#define SH7750_SCSSR2_PERN 0xF000 /* Number of Parity Errors */ +#define SH7750_SCSSR2_PERN_S 12 +#define SH7750_SCSSR2_FERN 0x0F00 /* Number of Framing Errors */ +#define SH7750_SCSSR2_FERN_S 8 +#define SH7750_SCSSR2_ER 0x0080 /* Receive Error */ +#define SH7750_SCSSR2_TEND 0x0040 /* Transmit End */ +#define SH7750_SCSSR2_TDFE 0x0020 /* Transmit FIFO Data Empty */ +#define SH7750_SCSSR2_BRK 0x0010 /* Break Detect */ +#define SH7750_SCSSR2_FER 0x0008 /* Framing Error */ +#define SH7750_SCSSR2_PER 0x0004 /* Parity Error */ +#define SH7750_SCSSR2_RDF 0x0002 /* Receive FIFO Data Full */ +#define SH7750_SCSSR2_DR 0x0001 /* Receive Data Ready */ + +/* SCI Serial Port Register - SCSPTR1(byte) */ +#define SH7750_SCSPTR1_REGOFS 0xE0001C /* offset */ +#define SH7750_SCSPTR1 SH7750_P4_REG32(SH7750_SCSPTR1_REGOFS) +#define SH7750_SCSPTR1_A7 SH7750_A7_REG32(SH7750_SCSPTR1_REGOFS) + +#define SH7750_SCSPTR1_EIO 0x80 /* Error Interrupt Only */ +#define SH7750_SCSPTR1_SPB1IO 0x08 /* 1: Output SPB1DT bit to SCK pin */ +#define SH7750_SCSPTR1_SPB1DT 0x04 /* Serial Port Clock Port Data */ +#define SH7750_SCSPTR1_SPB0IO 0x02 /* 1: Output SPB0DT bit to TxD pin */ +#define SH7750_SCSPTR1_SPB0DT 0x01 /* Serial Port Break Data */ + +/* SCIF Serial Port Register - SCSPTR2(half) */ +#define SH7750_SCSPTR2_REGOFS 0xE80020 /* offset */ +#define SH7750_SCSPTR2 SH7750_P4_REG32(SH7750_SCSPTR2_REGOFS) +#define SH7750_SCSPTR2_A7 SH7750_A7_REG32(SH7750_SCSPTR2_REGOFS) + +#define SH7750_SCSPTR2_RTSIO 0x80 /* 1: Output RTSDT bit to RTS2\ pin */ +#define SH7750_SCSPTR2_RTSDT 0x40 /* RTS Port Data */ +#define SH7750_SCSPTR2_CTSIO 0x20 /* 1: Output CTSDT bit to CTS2\ pin */ +#define SH7750_SCSPTR2_CTSDT 0x10 /* CTS Port Data */ +#define SH7750_SCSPTR2_SPB2IO 0x02 /* 1: Output SPBDT bit to TxD2 pin */ +#define SH7750_SCSPTR2_SPB2DT 0x01 /* Serial Port Break Data */ + +/* SCI Bit Rate Register - SCBRR1(byte), SCBRR2(byte) */ +#define SH7750_SCBRR_REGOFS(n) ((n) == 1 ? 0xE00004 : 0xE80004) /* offset */ +#define SH7750_SCBRR(n) SH7750_P4_REG32(SH7750_SCBRR_REGOFS(n)) +#define SH7750_SCBRR1 SH7750_SCBRR_P4(1) +#define SH7750_SCBRR2 SH7750_SCBRR_P4(2) +#define SH7750_SCBRR_A7(n) SH7750_A7_REG32(SH7750_SCBRR_REGOFS(n)) +#define SH7750_SCBRR1_A7 SH7750_SCBRR(1) +#define SH7750_SCBRR2_A7 SH7750_SCBRR(2) + +/* SCIF FIFO Control Register - SCFCR2(half) */ +#define SH7750_SCFCR2_REGOFS 0xE80018 /* offset */ +#define SH7750_SCFCR2 SH7750_P4_REG32(SH7750_SCFCR2_REGOFS) +#define SH7750_SCFCR2_A7 SH7750_A7_REG32(SH7750_SCFCR2_REGOFS) + +#define SH7750_SCFCR2_RSTRG 0x700 /* RTS2\ Output Active Trigger; RTS2\ + signal goes to high level when the + number of received data stored in + FIFO exceeds the trigger number */ +#define SH7750_SCFCR2_RSTRG_15 0x000 /* 15 bytes */ +#define SH7750_SCFCR2_RSTRG_1 0x000 /* 1 byte */ +#define SH7750_SCFCR2_RSTRG_4 0x000 /* 4 bytes */ +#define SH7750_SCFCR2_RSTRG_6 0x000 /* 6 bytes */ +#define SH7750_SCFCR2_RSTRG_8 0x000 /* 8 bytes */ +#define SH7750_SCFCR2_RSTRG_10 0x000 /* 10 bytes */ +#define SH7750_SCFCR2_RSTRG_14 0x000 /* 14 bytes */ + +#define SH7750_SCFCR2_RTRG 0x0C0 /* Receive FIFO Data Number Trigger, + Receive Data Full (RDF) Flag sets + when number of receive data bytes is + equal or greater than the trigger + number */ +#define SH7750_SCFCR2_RTRG_1 0x000 /* 1 byte */ +#define SH7750_SCFCR2_RTRG_4 0x040 /* 4 bytes */ +#define SH7750_SCFCR2_RTRG_8 0x080 /* 8 bytes */ +#define SH7750_SCFCR2_RTRG_14 0x0C0 /* 14 bytes */ + +#define SH7750_SCFCR2_TTRG 0x030 /* Transmit FIFO Data Number Trigger, + Transmit FIFO Data Register Empty (TDFE) + flag sets when the number of remaining + transmit data bytes is equal or less + than the trigger number */ +#define SH7750_SCFCR2_TTRG_8 0x000 /* 8 bytes */ +#define SH7750_SCFCR2_TTRG_4 0x010 /* 4 bytes */ +#define SH7750_SCFCR2_TTRG_2 0x020 /* 2 bytes */ +#define SH7750_SCFCR2_TTRG_1 0x030 /* 1 byte */ + +#define SH7750_SCFCR2_MCE 0x008 /* Modem Control Enable */ +#define SH7750_SCFCR2_TFRST 0x004 /* Transmit FIFO Data Register Reset, + invalidates the transmit data in the + transmit FIFO */ +#define SH7750_SCFCR2_RFRST 0x002 /* Receive FIFO Data Register Reset, + invalidates the receive data in the + receive FIFO data register and resets + it to the empty state */ +#define SH7750_SCFCR2_LOOP 0x001 /* Loopback Test */ + +/* SCIF FIFO Data Count Register - SCFDR2(half, read-only) */ +#define SH7750_SCFDR2_REGOFS 0xE8001C /* offset */ +#define SH7750_SCFDR2 SH7750_P4_REG32(SH7750_SCFDR2_REGOFS) +#define SH7750_SCFDR2_A7 SH7750_A7_REG32(SH7750_SCFDR2_REGOFS) + +#define SH7750_SCFDR2_T 0x1F00 /* Number of untransmitted data bytes + in transmit FIFO */ +#define SH7750_SCFDR2_T_S 8 +#define SH7750_SCFDR2_R 0x001F /* Number of received data bytes in + receive FIFO */ +#define SH7750_SCFDR2_R_S 0 + +/* SCIF Line Status Register - SCLSR2(half, read-only) */ +#define SH7750_SCLSR2_REGOFS 0xE80024 /* offset */ +#define SH7750_SCLSR2 SH7750_P4_REG32(SH7750_SCLSR2_REGOFS) +#define SH7750_SCLSR2_A7 SH7750_A7_REG32(SH7750_SCLSR2_REGOFS) + +#define SH7750_SCLSR2_ORER 0x0001 /* Overrun Error */ + +/* + * SCI-based Smart Card Interface + */ +/* Smart Card Mode Register - SCSCMR1(byte) */ +#define SH7750_SCSCMR1_REGOFS 0xE00018 /* offset */ +#define SH7750_SCSCMR1 SH7750_P4_REG32(SH7750_SCSCMR1_REGOFS) +#define SH7750_SCSCMR1_A7 SH7750_A7_REG32(SH7750_SCSCMR1_REGOFS) + +#define SH7750_SCSCMR1_SDIR 0x08 /* Smart Card Data Transfer Direction: */ +#define SH7750_SCSCMR1_SDIR_LSBF 0x00 /* LSB-first */ +#define SH7750_SCSCMR1_SDIR_MSBF 0x08 /* MSB-first */ + +#define SH7750_SCSCMR1_SINV 0x04 /* Smart Card Data Inversion */ +#define SH7750_SCSCMR1_SMIF 0x01 /* Smart Card Interface Mode Select */ + +/* Smart-card specific bits in other registers */ +/* SCSMR1: */ +#define SH7750_SCSMR1_GSM 0x80 /* GSM mode select */ + +/* SCSSR1: */ +#define SH7750_SCSSR1_ERS 0x10 /* Error Signal Status */ + +/* + * I/O Ports + */ +/* Port Control Register A - PCTRA */ +#define SH7750_PCTRA_REGOFS 0x80002C /* offset */ +#define SH7750_PCTRA SH7750_P4_REG32(SH7750_PCTRA_REGOFS) +#define SH7750_PCTRA_A7 SH7750_A7_REG32(SH7750_PCTRA_REGOFS) + +#define SH7750_PCTRA_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRA_PBNPUP(n) (1 << ((n)*2+1)) /* Bit n is not pulled up */ +#define SH7750_PCTRA_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRA_PBOUT(n) (1 << ((n)*2)) /* Bit n is an output */ + +/* Port Data Register A - PDTRA(half) */ +#define SH7750_PDTRA_REGOFS 0x800030 /* offset */ +#define SH7750_PDTRA SH7750_P4_REG32(SH7750_PDTRA_REGOFS) +#define SH7750_PDTRA_A7 SH7750_A7_REG32(SH7750_PDTRA_REGOFS) + +#define SH7750_PDTRA_BIT(n) (1 << (n)) + +/* Port Control Register B - PCTRB */ +#define SH7750_PCTRB_REGOFS 0x800040 /* offset */ +#define SH7750_PCTRB SH7750_P4_REG32(SH7750_PCTRB_REGOFS) +#define SH7750_PCTRB_A7 SH7750_A7_REG32(SH7750_PCTRB_REGOFS) + +#define SH7750_PCTRB_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRB_PBNPUP(n) (1 << ((n-16)*2+1)) /* Bit n is not pulled up */ +#define SH7750_PCTRB_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRB_PBOUT(n) (1 << ((n-16)*2)) /* Bit n is an output */ + +/* Port Data Register B - PDTRB(half) */ +#define SH7750_PDTRB_REGOFS 0x800044 /* offset */ +#define SH7750_PDTRB SH7750_P4_REG32(SH7750_PDTRB_REGOFS) +#define SH7750_PDTRB_A7 SH7750_A7_REG32(SH7750_PDTRB_REGOFS) + +#define SH7750_PDTRB_BIT(n) (1 << ((n)-16)) + +/* GPIO Interrupt Control Register - GPIOIC(half) */ +#define SH7750_GPIOIC_REGOFS 0x800048 /* offset */ +#define SH7750_GPIOIC SH7750_P4_REG32(SH7750_GPIOIC_REGOFS) +#define SH7750_GPIOIC_A7 SH7750_A7_REG32(SH7750_GPIOIC_REGOFS) + +#define SH7750_GPIOIC_PTIREN(n) (1 << (n)) /* Port n is used as a GPIO int */ + +/* + * Interrupt Controller - INTC + */ +/* Interrupt Control Register - ICR (half) */ +#define SH7750_ICR_REGOFS 0xD00000 /* offset */ +#define SH7750_ICR SH7750_P4_REG32(SH7750_ICR_REGOFS) +#define SH7750_ICR_A7 SH7750_A7_REG32(SH7750_ICR_REGOFS) + +#define SH7750_ICR_NMIL 0x8000 /* NMI Input Level */ +#define SH7750_ICR_MAI 0x4000 /* NMI Interrupt Mask */ + +#define SH7750_ICR_NMIB 0x0200 /* NMI Block Mode: */ +#define SH7750_ICR_NMIB_BLK 0x0000 /* NMI requests held pending while + SR.BL bit is set to 1 */ +#define SH7750_ICR_NMIB_NBLK 0x0200 /* NMI requests detected when SR.BL bit + set to 1 */ + +#define SH7750_ICR_NMIE 0x0100 /* NMI Edge Select: */ +#define SH7750_ICR_NMIE_FALL 0x0000 /* Interrupt request detected on falling + edge of NMI input */ +#define SH7750_ICR_NMIE_RISE 0x0100 /* Interrupt request detected on rising + edge of NMI input */ + +#define SH7750_ICR_IRLM 0x0080 /* IRL Pin Mode: */ +#define SH7750_ICR_IRLM_ENC 0x0000 /* IRL\ pins used as a level-encoded + interrupt requests */ +#define SH7750_ICR_IRLM_RAW 0x0080 /* IRL\ pins used as a four independent + interrupt requests */ + +/* Interrupt Priority Register A - IPRA (half) */ +#define SH7750_IPRA_REGOFS 0xD00004 /* offset */ +#define SH7750_IPRA SH7750_P4_REG32(SH7750_IPRA_REGOFS) +#define SH7750_IPRA_A7 SH7750_A7_REG32(SH7750_IPRA_REGOFS) + +#define SH7750_IPRA_TMU0 0xF000 /* TMU0 interrupt priority */ +#define SH7750_IPRA_TMU0_S 12 +#define SH7750_IPRA_TMU1 0x0F00 /* TMU1 interrupt priority */ +#define SH7750_IPRA_TMU1_S 8 +#define SH7750_IPRA_TMU2 0x00F0 /* TMU2 interrupt priority */ +#define SH7750_IPRA_TMU2_S 4 +#define SH7750_IPRA_RTC 0x000F /* RTC interrupt priority */ +#define SH7750_IPRA_RTC_S 0 + +/* Interrupt Priority Register B - IPRB (half) */ +#define SH7750_IPRB_REGOFS 0xD00008 /* offset */ +#define SH7750_IPRB SH7750_P4_REG32(SH7750_IPRB_REGOFS) +#define SH7750_IPRB_A7 SH7750_A7_REG32(SH7750_IPRB_REGOFS) + +#define SH7750_IPRB_WDT 0xF000 /* WDT interrupt priority */ +#define SH7750_IPRB_WDT_S 12 +#define SH7750_IPRB_REF 0x0F00 /* Memory Refresh unit interrupt + priority */ +#define SH7750_IPRB_REF_S 8 +#define SH7750_IPRB_SCI1 0x00F0 /* SCI1 interrupt priority */ +#define SH7750_IPRB_SCI1_S 4 + +/* Interrupt Priority Register C - IPRC (half) */ +#define SH7750_IPRC_REGOFS 0xD00004 /* offset */ +#define SH7750_IPRC SH7750_P4_REG32(SH7750_IPRC_REGOFS) +#define SH7750_IPRC_A7 SH7750_A7_REG32(SH7750_IPRC_REGOFS) + +#define SH7750_IPRC_GPIO 0xF000 /* GPIO interrupt priority */ +#define SH7750_IPRC_GPIO_S 12 +#define SH7750_IPRC_DMAC 0x0F00 /* DMAC interrupt priority */ +#define SH7750_IPRC_DMAC_S 8 +#define SH7750_IPRC_SCIF 0x00F0 /* SCIF interrupt priority */ +#define SH7750_IPRC_SCIF_S 4 +#define SH7750_IPRC_HUDI 0x000F /* H-UDI interrupt priority */ +#define SH7750_IPRC_HUDI_S 0 + + +/* + * User Break Controller registers + */ +#define SH7750_BARA 0x200000 /* Break address regiser A */ +#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ +#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ +#define SH7750_BARB 0x20000c /* Break address regiser B */ +#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ +#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ +#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ +#define SH7750_BDRB 0x200018 /* Break data regiser B */ +#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ +#define SH7750_BRCR 0x200020 /* Break control register */ + +#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ + +#endif diff --git a/include/rtems/score/sh_io.h b/include/rtems/score/sh_io.h new file mode 100644 index 0000000000..8d81965f78 --- /dev/null +++ b/include/rtems/score/sh_io.h @@ -0,0 +1,51 @@ +/** + * @file + * + * @brief Macros to Access Memory Mapped Devices on the SH7000-Architecture + * + * These are some macros to access memory mapped devices + * on the SH7000-architecture. + */ + +/* + * Inspired from the linux kernel's include/asm/io.h + * + * Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and + * Bernd Becker (becker@faw.uni-ulm.de) + * + * COPYRIGHT (c) 1996-1998, FAW Ulm, Germany + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * + * COPYRIGHT (c) 1998-2001. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SH_IO_H +#define _RTEMS_SCORE_SH_IO_H + +#define readb(addr) (*(volatile unsigned char *) (addr)) +#define readw(addr) (*(volatile unsigned short *) (addr)) +#define readl(addr) (*(volatile unsigned int *) (addr)) +#define read8(addr) (*(volatile uint8_t *) (addr)) +#define read16(addr) (*(volatile uint16_t *) (addr)) +#define read32(addr) (*(volatile uint32_t *) (addr)) + +#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b)) +#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b)) +#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b)) +#define write8(b,addr) ((*(volatile uint8_t *) (addr)) = (b)) +#define write16(b,addr) ((*(volatile uint16_t *) (addr)) = (b)) +#define write32(b,addr) ((*(volatile uint32_t *) (addr)) = (b)) + +#define inb(addr) readb(addr) +#define outb(b,addr) writeb(b,addr) + +#endif diff --git a/include/rtems/score/smp.h b/include/rtems/score/smp.h new file mode 100644 index 0000000000..8886a57dc6 --- /dev/null +++ b/include/rtems/score/smp.h @@ -0,0 +1,64 @@ +/** + * @file + * + * @ingroup ScoreSMP + * + * @brief SuperCore SMP Support API + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SMP_H +#define _RTEMS_SCORE_SMP_H + +#include <rtems/score/cpu.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSMP SMP Support + * + * @ingroup Score + * + * This defines the interface of the SuperCore SMP support. + * + * @{ + */ + +#if defined( RTEMS_SMP ) + SCORE_EXTERN uint32_t _SMP_Processor_count; + + static inline uint32_t _SMP_Get_processor_count( void ) + { + return _SMP_Processor_count; + } +#else + #define _SMP_Get_processor_count() UINT32_C(1) +#endif + +#if defined( RTEMS_SMP ) + static inline uint32_t _SMP_Get_current_processor( void ) + { + return _CPU_SMP_Get_current_processor(); + } +#else + #define _SMP_Get_current_processor() UINT32_C(0) +#endif + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/smpbarrier.h b/include/rtems/score/smpbarrier.h new file mode 100644 index 0000000000..8225450b09 --- /dev/null +++ b/include/rtems/score/smpbarrier.h @@ -0,0 +1,122 @@ +/** + * @file + * + * @ingroup ScoreSMPBarrier + * + * @brief SMP Barrier API + */ + +/* + * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SMPBARRIER_H +#define _RTEMS_SCORE_SMPBARRIER_H + +#include <rtems/score/cpuopts.h> +#include <rtems/score/atomic.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreSMPBarrier SMP Barriers + * + * @ingroup Score + * + * @brief The SMP barrier provides barrier synchronization for SMP systems at + * the lowest level. + * + * The SMP barrier is implemented as a sense barrier, see also Herlihy and + * Shavit, "The Art of Multiprocessor Programming", 17.3 Sense-Reversing + * Barrier. + * + * @{ + */ + +/** + * @brief SMP barrier control. + */ +typedef struct { + Atomic_Uint value; + Atomic_Uint sense; +} SMP_barrier_Control; + +/** + * @brief SMP barrier per-thread state. + * + * Each user of the barrier must provide this per-thread state. + */ +typedef struct { + unsigned int sense; +} SMP_barrier_State; + +/** + * @brief SMP barrier control initializer for static initialization. + */ +#define SMP_BARRIER_CONTROL_INITIALIZER \ + { ATOMIC_INITIALIZER_UINT( 0U ), ATOMIC_INITIALIZER_UINT( 0U ) } + +/** + * @brief SMP barrier per-thread state initializer for static initialization. + */ +#define SMP_BARRIER_STATE_INITIALIZER { 0U } + +/** + * @brief Initializes a SMP barrier control. + * + * Concurrent initialization leads to unpredictable results. + * + * @param[out] control The SMP barrier control. + */ +static inline void _SMP_barrier_Control_initialize( + SMP_barrier_Control *control +) +{ + _Atomic_Init_uint( &control->value, 0U ); + _Atomic_Init_uint( &control->sense, 0U ); +} + +/** + * @brief Initializes a SMP barrier per-thread state. + * + * @param[out] state The SMP barrier control. + */ +static inline void _SMP_barrier_State_initialize( + SMP_barrier_State *state +) +{ + state->sense = 0U; +} + +/** + * @brief Waits on the SMP barrier until count threads rendezvoused. + * + * @param[in, out] control The SMP barrier control. + * @param[in, out] state The SMP barrier per-thread state. + * @param[in] count The thread count bound to rendezvous. + */ +void _SMP_barrier_Wait( + SMP_barrier_Control *control, + SMP_barrier_State *state, + unsigned int count +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_SMPBARRIER_H */ diff --git a/include/rtems/score/smpimpl.h b/include/rtems/score/smpimpl.h new file mode 100644 index 0000000000..09c47ecf16 --- /dev/null +++ b/include/rtems/score/smpimpl.h @@ -0,0 +1,280 @@ +/** + * @file + * + * @ingroup ScoreSMPImpl + * + * @brief SuperCore SMP Implementation + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SMPIMPL_H +#define _RTEMS_SCORE_SMPIMPL_H + +#include <rtems/score/smp.h> +#include <rtems/score/percpu.h> +#include <rtems/fatal.h> +#include <rtems/rtems/cache.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSMP SMP Support + * + * @ingroup Score + * + * This defines the interface of the SuperCore SMP support. + * + * @{ + */ + +/** + * @brief SMP message to request a processor shutdown. + * + * @see _SMP_Send_message(). + */ +#define SMP_MESSAGE_SHUTDOWN 0x1UL + +/** + * @brief SMP message to request a test handler invocation. + * + * @see _SMP_Send_message(). + */ +#define SMP_MESSAGE_TEST 0x2UL + +/** + * @brief SMP message to request a multicast action. + * + * @see _SMP_Send_message(). + */ +#define SMP_MESSAGE_MULTICAST_ACTION 0x4UL + +/** + * @brief SMP fatal codes. + */ +typedef enum { + SMP_FATAL_BOOT_PROCESSOR_NOT_ASSIGNED_TO_SCHEDULER, + SMP_FATAL_MANDATORY_PROCESSOR_NOT_PRESENT, + SMP_FATAL_MULTITASKING_START_ON_INVALID_PROCESSOR, + SMP_FATAL_MULTITASKING_START_ON_UNASSIGNED_PROCESSOR, + SMP_FATAL_SHUTDOWN, + SMP_FATAL_SHUTDOWN_RESPONSE, + SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED +} SMP_Fatal_code; + +static inline void _SMP_Fatal( SMP_Fatal_code code ) +{ + _Terminate( RTEMS_FATAL_SOURCE_SMP, false, code ); +} + +/** + * @brief Initialize SMP Handler + * + * This method initialize the SMP Handler. + */ +#if defined( RTEMS_SMP ) + void _SMP_Handler_initialize( void ); +#else + #define _SMP_Handler_initialize() \ + do { } while ( 0 ) +#endif + +#if defined( RTEMS_SMP ) + +/** + * @brief Performs high-level initialization of a secondary processor and runs + * the application threads. + * + * The low-level initialization code must call this function to hand over the + * control of this processor to RTEMS. Interrupts must be disabled. It must + * be possible to send inter-processor interrupts to this processor. Since + * interrupts are disabled the inter-processor interrupt delivery is postponed + * until interrupts are enabled the first time. Interrupts are enabled during + * the execution begin of threads in case they have interrupt level zero (this + * is the default). + * + * The pre-requisites for the call to this function are + * - disabled interrupts, + * - delivery of inter-processor interrupts is possible, + * - a valid stack pointer and enough stack space, + * - a valid code memory, and + * - a valid BSS section. + * + * This function must not be called by the main processor. The main processor + * uses _Thread_Start_multitasking() instead. + * + * This function does not return to the caller. + */ +void _SMP_Start_multitasking_on_secondary_processor( void ) + RTEMS_NO_RETURN; + +typedef void ( *SMP_Test_message_handler )( Per_CPU_Control *cpu_self ); + +extern SMP_Test_message_handler _SMP_Test_message_handler; + +/** + * @brief Sets the handler for test messages. + * + * This handler can be used to test the inter-processor interrupt + * implementation. + */ +static inline void _SMP_Set_test_message_handler( + SMP_Test_message_handler handler +) +{ + _SMP_Test_message_handler = handler; +} + +/** + * @brief Processes all pending multicast actions. + */ +void _SMP_Multicast_actions_process( void ); + +/** + * @brief Interrupt handler for inter-processor interrupts. + */ +static inline void _SMP_Inter_processor_interrupt_handler( void ) +{ + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + /* + * In the common case the inter-processor interrupt is issued to carry out a + * thread dispatch. + */ + cpu_self->dispatch_necessary = true; + + if ( _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ) != 0 ) { + unsigned long message = _Atomic_Exchange_ulong( + &cpu_self->message, + 0UL, + ATOMIC_ORDER_RELAXED + ); + + if ( ( message & SMP_MESSAGE_SHUTDOWN ) != 0 ) { + _SMP_Fatal( SMP_FATAL_SHUTDOWN_RESPONSE ); + /* does not continue past here */ + } + + if ( ( message & SMP_MESSAGE_TEST ) != 0 ) { + ( *_SMP_Test_message_handler )( cpu_self ); + } + + if ( ( message & SMP_MESSAGE_MULTICAST_ACTION ) != 0 ) { + _SMP_Multicast_actions_process(); + } + } +} + +/** + * @brief Returns true, if the processor with the specified index should be + * started. + * + * @param[in] cpu_index The processor index. + * + * @retval true The processor should be started. + * @retval false Otherwise. + */ +bool _SMP_Should_start_processor( uint32_t cpu_index ); + +/** + * @brief Sends a SMP message to a processor. + * + * The target processor may be the sending processor. + * + * @param[in] cpu_index The target processor of the message. + * @param[in] message The message. + */ +void _SMP_Send_message( uint32_t cpu_index, unsigned long message ); + +/** + * @brief Request of others CPUs. + * + * This method is invoked by RTEMS when it needs to make a request + * of the other CPUs. It should be implemented using some type of + * interprocessor interrupt. CPUs not including the originating + * CPU should receive the message. + * + * @param [in] message is message to send + */ +void _SMP_Send_message_broadcast( + unsigned long message +); + +/** + * @brief Sends a SMP message to a set of processors. + * + * The sending processor may be part of the set. + * + * @param[in] setsize The size of the set of target processors of the message. + * @param[in] cpus The set of target processors of the message. + * @param[in] message The message. + */ +void _SMP_Send_message_multicast( + const size_t setsize, + const cpu_set_t *cpus, + unsigned long message +); + +typedef void ( *SMP_Multicast_action_handler )( void *arg ); + +/** + * @brief Initiates a SMP multicast action to a set of processors. + * + * The current processor may be part of the set. + * + * @param[in] setsize The size of the set of target processors of the message. + * @param[in] cpus The set of target processors of the message. + * @param[in] handler The multicast action handler. + * @param[in] arg The multicast action argument. + */ +void _SMP_Multicast_action( + const size_t setsize, + const cpu_set_t *cpus, + SMP_Multicast_action_handler handler, + void *arg +); + +#endif /* defined( RTEMS_SMP ) */ + +/** + * @brief Requests a multitasking start on all configured and available + * processors. + */ +#if defined( RTEMS_SMP ) + void _SMP_Request_start_multitasking( void ); +#else + #define _SMP_Request_start_multitasking() \ + do { } while ( 0 ) +#endif + +/** + * @brief Requests a shutdown of all processors. + * + * This function is a part of the system termination procedure. + * + * @see _Terminate(). + */ +#if defined( RTEMS_SMP ) + void _SMP_Request_shutdown( void ); +#else + #define _SMP_Request_shutdown() \ + do { } while ( 0 ) +#endif + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/smplock.h b/include/rtems/score/smplock.h new file mode 100644 index 0000000000..f98f38a6a3 --- /dev/null +++ b/include/rtems/score/smplock.h @@ -0,0 +1,716 @@ +/** + * @file + * + * @ingroup ScoreSMPLock + * + * @brief SMP Lock API + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * Copyright (c) 2013-2015 embedded brains GmbH + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SMPLOCK_H +#define _RTEMS_SCORE_SMPLOCK_H + +#include <rtems/score/cpuopts.h> + +#if defined( RTEMS_SMP ) + +#include <rtems/score/atomic.h> +#include <rtems/score/isrlevel.h> + +#if defined( RTEMS_PROFILING ) +#include <rtems/score/chainimpl.h> +#include <string.h> +#endif + +#if defined( RTEMS_PROFILING ) +#define RTEMS_SMP_LOCK_DO_NOT_INLINE +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreSMPLock SMP Locks + * + * @ingroup Score + * + * @brief The SMP lock provides mutual exclusion for SMP systems at the lowest + * level. + * + * The SMP lock is implemented as a ticket lock. This provides fairness in + * case of concurrent lock attempts. + * + * This SMP lock API uses a local context for acquire and release pairs. Such + * a context may be used to implement for example the Mellor-Crummey and Scott + * (MCS) locks in the future. + * + * @{ + */ + +#if defined( RTEMS_PROFILING ) + +/** + * @brief Count of lock contention counters for lock statistics. + */ +#define SMP_LOCK_STATS_CONTENTION_COUNTS 4 + +/** + * @brief SMP lock statistics. + * + * The lock acquire attempt instant is the point in time right after the + * interrupt disable action in the lock acquire sequence. + * + * The lock acquire instant is the point in time right after the lock + * acquisition. This is the begin of the critical section code execution. + * + * The lock release instant is the point in time right before the interrupt + * enable action in the lock release sequence. + * + * The lock section time is the time elapsed between the lock acquire instant + * and the lock release instant. + * + * The lock acquire time is the time elapsed between the lock acquire attempt + * instant and the lock acquire instant. + */ +typedef struct { + /** + * @brief Node for SMP lock statistics chain. + */ + Chain_Node Node; + + /** + * @brief The maximum lock acquire time in CPU counter ticks. + */ + CPU_Counter_ticks max_acquire_time; + + /** + * @brief The maximum lock section time in CPU counter ticks. + */ + CPU_Counter_ticks max_section_time; + + /** + * @brief The count of lock uses. + * + * This value may overflow. + */ + uint64_t usage_count; + + /** + * @brief Total lock acquire time in nanoseconds. + * + * The average lock acquire time is the total acquire time divided by the + * lock usage count. The ration of the total section and total acquire times + * gives a measure for the lock contention. + * + * This value may overflow. + */ + uint64_t total_acquire_time; + + /** + * @brief The counts of lock acquire operations by contention. + * + * The contention count for index N corresponds to a lock acquire attempt + * with an initial queue length of N. The last index corresponds to all + * lock acquire attempts with an initial queue length greater than or equal + * to SMP_LOCK_STATS_CONTENTION_COUNTS minus one. + * + * The values may overflow. + */ + uint64_t contention_counts[SMP_LOCK_STATS_CONTENTION_COUNTS]; + + /** + * @brief Total lock section time in CPU counter ticks. + * + * The average lock section time is the total section time divided by the + * lock usage count. + * + * This value may overflow. + */ + uint64_t total_section_time; + + /** + * @brief The lock name. + */ + const char *name; +} SMP_lock_Stats; + +/** + * @brief Local context for SMP lock statistics. + */ +typedef struct { + /** + * @brief The last lock acquire instant in CPU counter ticks. + * + * This value is used to measure the lock section time. + */ + CPU_Counter_ticks acquire_instant; + + /** + * @brief The lock stats used for the last lock acquire. + */ + SMP_lock_Stats *stats; +} SMP_lock_Stats_context; + +/** + * @brief SMP lock statistics initializer for static initialization. + */ +#define SMP_LOCK_STATS_INITIALIZER( name ) \ + { { NULL, NULL }, 0, 0, 0, 0, { 0, 0, 0, 0 }, 0, name } + +/** + * @brief Initializes an SMP lock statistics block. + * + * @param[in, out] stats The SMP lock statistics block. + * @param[in] name The name for the SMP lock statistics. This name must be + * persistent throughout the life time of this statistics block. + */ +static inline void _SMP_lock_Stats_initialize( + SMP_lock_Stats *stats, + const char *name +) +{ + SMP_lock_Stats init = SMP_LOCK_STATS_INITIALIZER( name ); + + *stats = init; +} + +/** + * @brief Destroys an SMP lock statistics block. + * + * @param[in] stats The SMP lock statistics block. + */ +static inline void _SMP_lock_Stats_destroy( SMP_lock_Stats *stats ); + +/** + * @brief Updates an SMP lock statistics block during a lock release. + * + * @param[in] stats_context The SMP lock statistics context. + */ +static inline void _SMP_lock_Stats_release_update( + const SMP_lock_Stats_context *stats_context +); + +#else /* RTEMS_PROFILING */ + +#define _SMP_lock_Stats_initialize( stats, name ) do { } while ( 0 ) + +#define _SMP_lock_Stats_destroy( stats ) do { } while ( 0 ) + +#endif /* RTEMS_PROFILING */ + +/** + * @brief SMP ticket lock control. + */ +typedef struct { + Atomic_Uint next_ticket; + Atomic_Uint now_serving; +} SMP_ticket_lock_Control; + +/** + * @brief SMP ticket lock control initializer for static initialization. + */ +#define SMP_TICKET_LOCK_INITIALIZER \ + { \ + ATOMIC_INITIALIZER_UINT( 0U ), \ + ATOMIC_INITIALIZER_UINT( 0U ) \ + } + +/** + * @brief Initializes an SMP ticket lock. + * + * Concurrent initialization leads to unpredictable results. + * + * @param[in] lock The SMP ticket lock control. + * @param[in] name The name for the SMP ticket lock. This name must be + * persistent throughout the life time of this lock. + */ +static inline void _SMP_ticket_lock_Initialize( + SMP_ticket_lock_Control *lock +) +{ + _Atomic_Init_uint( &lock->next_ticket, 0U ); + _Atomic_Init_uint( &lock->now_serving, 0U ); +} + +/** + * @brief Destroys an SMP ticket lock. + * + * Concurrent destruction leads to unpredictable results. + * + * @param[in] lock The SMP ticket lock control. + */ +static inline void _SMP_ticket_lock_Destroy( SMP_ticket_lock_Control *lock ) +{ + (void) lock; +} + +static inline void _SMP_ticket_lock_Do_acquire( + SMP_ticket_lock_Control *lock +#if defined( RTEMS_PROFILING ) + , + SMP_lock_Stats *stats, + SMP_lock_Stats_context *stats_context +#endif +) +{ + unsigned int my_ticket; + unsigned int now_serving; + +#if defined( RTEMS_PROFILING ) + CPU_Counter_ticks first; + CPU_Counter_ticks second; + CPU_Counter_ticks delta; + unsigned int initial_queue_length; + + first = _CPU_Counter_read(); +#endif + + my_ticket = + _Atomic_Fetch_add_uint( &lock->next_ticket, 1U, ATOMIC_ORDER_RELAXED ); + +#if defined( RTEMS_PROFILING ) + now_serving = + _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_ACQUIRE ); + initial_queue_length = my_ticket - now_serving; + + if ( initial_queue_length > 0 ) { +#endif + + do { + now_serving = + _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_ACQUIRE ); + } while ( now_serving != my_ticket ); + +#if defined( RTEMS_PROFILING ) + } + + second = _CPU_Counter_read(); + stats_context->acquire_instant = second; + delta = _CPU_Counter_difference( second, first ); + + ++stats->usage_count; + + stats->total_acquire_time += delta; + + if ( stats->max_acquire_time < delta ) { + stats->max_acquire_time = delta; + } + + if ( initial_queue_length >= SMP_LOCK_STATS_CONTENTION_COUNTS ) { + initial_queue_length = SMP_LOCK_STATS_CONTENTION_COUNTS - 1; + } + ++stats->contention_counts[initial_queue_length]; + + stats_context->stats = stats; +#endif +} + +/** + * @brief Acquires an SMP ticket lock. + * + * This function will not disable interrupts. The caller must ensure that the + * current thread of execution is not interrupted indefinite once it obtained + * the SMP ticket lock. + * + * @param[in] lock The SMP ticket lock control. + * @param[in] stats The SMP lock statistics. + * @param[out] stats_context The SMP lock statistics context. + */ +#if defined( RTEMS_PROFILING ) + #define _SMP_ticket_lock_Acquire( lock, stats, stats_context ) \ + _SMP_ticket_lock_Do_acquire( lock, stats, stats_context ) +#else + #define _SMP_ticket_lock_Acquire( lock, stats, stats_context ) \ + _SMP_ticket_lock_Do_acquire( lock ) +#endif + +static inline void _SMP_ticket_lock_Do_release( + SMP_ticket_lock_Control *lock +#if defined( RTEMS_PROFILING ) + , + const SMP_lock_Stats_context *stats_context +#endif +) +{ + unsigned int current_ticket = + _Atomic_Load_uint( &lock->now_serving, ATOMIC_ORDER_RELAXED ); + unsigned int next_ticket = current_ticket + 1U; + +#if defined( RTEMS_PROFILING ) + _SMP_lock_Stats_release_update( stats_context ); +#endif + + _Atomic_Store_uint( &lock->now_serving, next_ticket, ATOMIC_ORDER_RELEASE ); +} + +/** + * @brief Releases an SMP ticket lock. + * + * @param[in] lock The SMP ticket lock control. + * @param[in] stats_context The SMP lock statistics context. + */ +#if defined( RTEMS_PROFILING ) + #define _SMP_ticket_lock_Release( lock, stats_context ) \ + _SMP_ticket_lock_Do_release( lock, stats_context ) +#else + #define _SMP_ticket_lock_Release( lock, stats_context ) \ + _SMP_ticket_lock_Do_release( lock ) +#endif + +/** + * @brief SMP lock control. + */ +typedef struct { + SMP_ticket_lock_Control Ticket_lock; +#if defined( RTEMS_PROFILING ) + SMP_lock_Stats Stats; +#endif +} SMP_lock_Control; + +/** + * @brief Local SMP lock context for acquire and release pairs. + */ +typedef struct { + ISR_Level isr_level; +#if defined( RTEMS_PROFILING ) + SMP_lock_Stats_context Stats_context; +#endif +} SMP_lock_Context; + +/** + * @brief SMP lock control initializer for static initialization. + */ +#if defined( RTEMS_PROFILING ) + #define SMP_LOCK_INITIALIZER( name ) \ + { SMP_TICKET_LOCK_INITIALIZER, SMP_LOCK_STATS_INITIALIZER( name ) } +#else + #define SMP_LOCK_INITIALIZER( name ) { SMP_TICKET_LOCK_INITIALIZER } +#endif + +/** + * @brief Initializes an SMP lock. + * + * Concurrent initialization leads to unpredictable results. + * + * @param[in] lock The SMP lock control. + * @param[in] name The name for the SMP lock statistics. This name must be + * persistent throughout the life time of this statistics block. + */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Initialize( + SMP_lock_Control *lock, + const char *name +); + +static inline void _SMP_lock_Initialize_body( +#else +static inline void _SMP_lock_Initialize( +#endif + SMP_lock_Control *lock, + const char *name +) +{ + _SMP_ticket_lock_Initialize( &lock->Ticket_lock ); +#if defined( RTEMS_PROFILING ) + _SMP_lock_Stats_initialize( &lock->Stats, name ); +#else + (void) name; +#endif +} + +/** + * @brief Destroys an SMP lock. + * + * Concurrent destruction leads to unpredictable results. + * + * @param[in] lock The SMP lock control. + */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Destroy( SMP_lock_Control *lock ); + +static inline void _SMP_lock_Destroy_body( SMP_lock_Control *lock ) +#else +static inline void _SMP_lock_Destroy( SMP_lock_Control *lock ) +#endif +{ + _SMP_ticket_lock_Destroy( &lock->Ticket_lock ); + _SMP_lock_Stats_destroy( &lock->Stats ); +} + +/** + * @brief Acquires an SMP lock. + * + * This function will not disable interrupts. The caller must ensure that the + * current thread of execution is not interrupted indefinite once it obtained + * the SMP lock. + * + * @param[in] lock The SMP lock control. + * @param[in] context The local SMP lock context for an acquire and release + * pair. + */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Acquire( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_Acquire_body( +#else +static inline void _SMP_lock_Acquire( +#endif + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + (void) context; + _SMP_ticket_lock_Acquire( + &lock->Ticket_lock, + &lock->Stats, + &context->Stats_context + ); +} + +/** + * @brief Releases an SMP lock. + * + * @param[in] lock The SMP lock control. + * @param[in] context The local SMP lock context for an acquire and release + * pair. + */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Release( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_Release_body( +#else +static inline void _SMP_lock_Release( +#endif + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + (void) context; + _SMP_ticket_lock_Release( + &lock->Ticket_lock, + &context->Stats_context + ); +} + +/** + * @brief Disables interrupts and acquires the SMP lock. + * + * @param[in] lock The SMP lock control. + * @param[in] context The local SMP lock context for an acquire and release + * pair. + */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_ISR_disable_and_acquire( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_ISR_disable_and_acquire_body( +#else +static inline void _SMP_lock_ISR_disable_and_acquire( +#endif + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + _ISR_Disable_without_giant( context->isr_level ); + _SMP_lock_Acquire( lock, context ); +} + +/** + * @brief Releases the SMP lock and enables interrupts. + * + * @param[in] lock The SMP lock control. + * @param[in] context The local SMP lock context for an acquire and release + * pair. + */ +#if defined( RTEMS_SMP_LOCK_DO_NOT_INLINE ) +void _SMP_lock_Release_and_ISR_enable( + SMP_lock_Control *lock, + SMP_lock_Context *context +); + +static inline void _SMP_lock_Release_and_ISR_enable_body( +#else +static inline void _SMP_lock_Release_and_ISR_enable( +#endif + SMP_lock_Control *lock, + SMP_lock_Context *context +) +{ + _SMP_lock_Release( lock, context ); + _ISR_Enable_without_giant( context->isr_level ); +} + +#if defined( RTEMS_PROFILING ) + +typedef struct { + SMP_lock_Control Lock; + Chain_Control Stats_chain; + Chain_Control Iterator_chain; +} SMP_lock_Stats_control; + +typedef struct { + Chain_Node Node; + SMP_lock_Stats *current; +} SMP_lock_Stats_iteration_context; + +extern SMP_lock_Stats_control _SMP_lock_Stats_control; + +static inline void _SMP_lock_Stats_iteration_start( + SMP_lock_Stats_iteration_context *iteration_context +) +{ + SMP_lock_Stats_control *control = &_SMP_lock_Stats_control; + SMP_lock_Context lock_context; + + _SMP_lock_ISR_disable_and_acquire( &control->Lock, &lock_context ); + + _Chain_Append_unprotected( + &control->Iterator_chain, + &iteration_context->Node + ); + iteration_context->current = + (SMP_lock_Stats *) _Chain_First( &control->Stats_chain ); + + _SMP_lock_Release_and_ISR_enable( &control->Lock, &lock_context ); +} + +static inline bool _SMP_lock_Stats_iteration_next( + SMP_lock_Stats_iteration_context *iteration_context, + SMP_lock_Stats *snapshot, + char *name, + size_t name_size +) +{ + SMP_lock_Stats_control *control = &_SMP_lock_Stats_control; + SMP_lock_Context lock_context; + SMP_lock_Stats *current; + bool valid; + + _SMP_lock_ISR_disable_and_acquire( &control->Lock, &lock_context ); + + current = iteration_context->current; + if ( !_Chain_Is_tail( &control->Stats_chain, ¤t->Node ) ) { + size_t name_len = current->name != NULL ? strlen(current->name) : 0; + + valid = true; + + iteration_context->current = (SMP_lock_Stats *) + _Chain_Next( ¤t->Node ); + + *snapshot = *current; + snapshot->name = name; + + if ( name_len >= name_size ) { + name_len = name_size - 1; + } + + name[name_len] = '\0'; + memcpy(name, current->name, name_len); + } else { + valid = false; + } + + _SMP_lock_Release_and_ISR_enable( &control->Lock, &lock_context ); + + return valid; +} + +static inline void _SMP_lock_Stats_iteration_stop( + SMP_lock_Stats_iteration_context *iteration_context +) +{ + SMP_lock_Stats_control *control = &_SMP_lock_Stats_control; + SMP_lock_Context lock_context; + + _SMP_lock_ISR_disable_and_acquire( &control->Lock, &lock_context ); + _Chain_Extract_unprotected( &iteration_context->Node ); + _SMP_lock_Release_and_ISR_enable( &control->Lock, &lock_context ); +} + +static inline void _SMP_lock_Stats_destroy( SMP_lock_Stats *stats ) +{ + if ( !_Chain_Is_node_off_chain( &stats->Node ) ) { + SMP_lock_Stats_control *control = &_SMP_lock_Stats_control; + SMP_lock_Context lock_context; + SMP_lock_Stats_iteration_context *iteration_context; + SMP_lock_Stats_iteration_context *iteration_context_tail; + SMP_lock_Stats *next_stats; + + _SMP_lock_ISR_disable_and_acquire( &control->Lock, &lock_context ); + + next_stats = (SMP_lock_Stats *) _Chain_Next( &stats->Node ); + _Chain_Extract_unprotected( &stats->Node ); + + iteration_context = (SMP_lock_Stats_iteration_context *) + _Chain_First( &control->Iterator_chain ); + iteration_context_tail = (SMP_lock_Stats_iteration_context *) + _Chain_Tail( &control->Iterator_chain ); + + while ( iteration_context != iteration_context_tail ) { + if ( iteration_context->current == stats ) { + iteration_context->current = next_stats; + } + + iteration_context = (SMP_lock_Stats_iteration_context *) + _Chain_Next( &iteration_context->Node ); + } + + _SMP_lock_Release_and_ISR_enable( &control->Lock, &lock_context ); + } +} + +static inline void _SMP_lock_Stats_release_update( + const SMP_lock_Stats_context *stats_context +) +{ + SMP_lock_Stats *stats = stats_context->stats; + CPU_Counter_ticks first = stats_context->acquire_instant; + CPU_Counter_ticks second = _CPU_Counter_read(); + CPU_Counter_ticks delta = _CPU_Counter_difference( second, first ); + + stats->total_section_time += delta; + + if ( stats->max_section_time < delta ) { + stats->max_section_time = delta; + + if ( _Chain_Is_node_off_chain( &stats->Node ) ) { + SMP_lock_Stats_control *control = &_SMP_lock_Stats_control; + SMP_lock_Context lock_context; + + _SMP_lock_ISR_disable_and_acquire( &control->Lock, &lock_context ); + _Chain_Append_unprotected( &control->Stats_chain, &stats->Node ); + _SMP_lock_Release_and_ISR_enable( &control->Lock, &lock_context ); + } + } +} + +#endif /* RTEMS_PROFILING */ + +/**@}*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* defined( RTEMS_SMP ) */ + +#endif /* _RTEMS_SCORE_SMPLOCK_H */ diff --git a/include/rtems/score/sparc.h b/include/rtems/score/sparc.h new file mode 100644 index 0000000000..ecac74de3c --- /dev/null +++ b/include/rtems/score/sparc.h @@ -0,0 +1,405 @@ +/** + * @file + * + * @brief Information Required to Build RTEMS for a Particular Member + * of the SPARC Family + * + * This file contains the information required to build + * RTEMS for a particular member of the SPARC family. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SPARC_H +#define _RTEMS_SCORE_SPARC_H + +#include <rtems/score/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * + * Currently recognized feature flags: + * + * + SPARC_HAS_FPU + * 0 - no HW FPU + * 1 - has HW FPU (assumed to be compatible w/90C602) + * + * + SPARC_HAS_BITSCAN + * 0 - does not have scan instructions + * 1 - has scan instruction (not currently implemented) + * + * + SPARC_NUMBER_OF_REGISTER_WINDOWS + * 8 is the most common number supported by SPARC implementations. + * SPARC_PSR_CWP_MASK is derived from this value. + */ + +/** + * Some higher end SPARCs have a bitscan instructions. It would + * be nice to take advantage of them. Right now, there is no + * port to a CPU model with this feature and no (untested) code + * that is based on this feature flag. + */ +#define SPARC_HAS_BITSCAN 0 + +/** + * This should be OK until a port to a higher end SPARC processor + * is made that has more than 8 register windows. If this cannot + * be determined based on multilib settings (v7/v8/v9), then the + * cpu_asm.S code that depends on this will have to move to libcpu. + */ +#define SPARC_NUMBER_OF_REGISTER_WINDOWS 8 + +/** + * This macro indicates whether this multilib variation has hardware + * floating point or not. We use the gcc cpp predefine _SOFT_FLOAT + * to determine that. + */ +#if defined(_SOFT_FLOAT) + #define SPARC_HAS_FPU 0 +#else + #define SPARC_HAS_FPU 1 +#endif + +/** + * This macro contains a string describing the multilib variant being + * build. + */ +#if SPARC_HAS_FPU + #define CPU_MODEL_NAME "w/FPU" +#else + #define CPU_MODEL_NAME "w/soft-float" +#endif + +/** + * Define the name of the CPU family. + */ +#define CPU_NAME "SPARC" + +/* + * Miscellaneous constants + */ + +/** + * PSR masks and starting bit positions + * + * NOTE: Reserved bits are ignored. + */ +#if (SPARC_NUMBER_OF_REGISTER_WINDOWS == 8) + #define SPARC_PSR_CWP_MASK 0x07 /* bits 0 - 4 */ +#elif (SPARC_NUMBER_OF_REGISTER_WINDOWS == 16) + #define SPARC_PSR_CWP_MASK 0x0F /* bits 0 - 4 */ +#elif (SPARC_NUMBER_OF_REGISTER_WINDOWS == 32) + #define SPARC_PSR_CWP_MASK 0x1F /* bits 0 - 4 */ +#else + #error "Unsupported number of register windows for this cpu" +#endif + +/** This constant is a mask for the ET bits in the PSR. */ +#define SPARC_PSR_ET_MASK 0x00000020 /* bit 5 */ +/** This constant is a mask for the PS bits in the PSR. */ +#define SPARC_PSR_PS_MASK 0x00000040 /* bit 6 */ +/** This constant is a mask for the S bits in the PSR. */ +#define SPARC_PSR_S_MASK 0x00000080 /* bit 7 */ +/** This constant is a mask for the PIL bits in the PSR. */ +#define SPARC_PSR_PIL_MASK 0x00000F00 /* bits 8 - 11 */ +/** This constant is a mask for the EF bits in the PSR. */ +#define SPARC_PSR_EF_MASK 0x00001000 /* bit 12 */ +/** This constant is a mask for the EC bits in the PSR. */ +#define SPARC_PSR_EC_MASK 0x00002000 /* bit 13 */ +/** This constant is a mask for the ICC bits in the PSR. */ +#define SPARC_PSR_ICC_MASK 0x00F00000 /* bits 20 - 23 */ +/** This constant is a mask for the VER bits in the PSR. */ +#define SPARC_PSR_VER_MASK 0x0F000000 /* bits 24 - 27 */ +/** This constant is a mask for the IMPL bits in the PSR. */ +#define SPARC_PSR_IMPL_MASK 0xF0000000 /* bits 28 - 31 */ + +/** This constant is the starting bit position of the CWP in the PSR. */ +#define SPARC_PSR_CWP_BIT_POSITION 0 /* bits 0 - 4 */ +/** This constant is the starting bit position of the ET in the PSR. */ +#define SPARC_PSR_ET_BIT_POSITION 5 /* bit 5 */ +/** This constant is the starting bit position of the PS in the PSR. */ +#define SPARC_PSR_PS_BIT_POSITION 6 /* bit 6 */ +/** This constant is the starting bit position of the S in the PSR. */ +#define SPARC_PSR_S_BIT_POSITION 7 /* bit 7 */ +/** This constant is the starting bit position of the PIL in the PSR. */ +#define SPARC_PSR_PIL_BIT_POSITION 8 /* bits 8 - 11 */ +/** This constant is the starting bit position of the EF in the PSR. */ +#define SPARC_PSR_EF_BIT_POSITION 12 /* bit 12 */ +/** This constant is the starting bit position of the EC in the PSR. */ +#define SPARC_PSR_EC_BIT_POSITION 13 /* bit 13 */ +/** This constant is the starting bit position of the ICC in the PSR. */ +#define SPARC_PSR_ICC_BIT_POSITION 20 /* bits 20 - 23 */ +/** This constant is the starting bit position of the VER in the PSR. */ +#define SPARC_PSR_VER_BIT_POSITION 24 /* bits 24 - 27 */ +/** This constant is the starting bit position of the IMPL in the PSR. */ +#define SPARC_PSR_IMPL_BIT_POSITION 28 /* bits 28 - 31 */ + +#define LEON3_ASR17_PROCESSOR_INDEX_SHIFT 28 + +/* SPARC Software Trap number definitions */ +#define SPARC_SWTRAP_SYSCALL 0 +#define SPARC_SWTRAP_IRQDIS 9 +#define SPARC_SWTRAP_IRQEN 10 + +#ifndef ASM + +/** + * This macro is a standard nop instruction. + */ +#define nop() \ + do { \ + __asm__ volatile ( "nop" ); \ + } while ( 0 ) + +/** + * @brief Macro to obtain the PSR. + * + * This macro returns the current contents of the PSR register in @a _psr. + */ +#if defined(RTEMS_PARAVIRT) + +uint32_t _SPARC_Get_PSR( void ); + +#define sparc_get_psr( _psr ) \ + (_psr) = _SPARC_Get_PSR() + +#else /* RTEMS_PARAVIRT */ + +#define sparc_get_psr( _psr ) \ + do { \ + (_psr) = 0; \ + __asm__ volatile( "rd %%psr, %0" : "=r" (_psr) : "0" (_psr) ); \ + } while ( 0 ) + +#endif /* RTEMS_PARAVIRT */ + +/** + * @brief Macro to set the PSR. + * + * This macro sets the PSR register to the value in @a _psr. + */ +#if defined(RTEMS_PARAVIRT) + +void _SPARC_Set_PSR( uint32_t new_psr ); + +#define sparc_set_psr( _psr ) \ + _SPARC_Set_PSR( _psr ) + +#else /* RTEMS_PARAVIRT */ + +#define sparc_set_psr( _psr ) \ + do { \ + __asm__ volatile ( "mov %0, %%psr " : "=r" ((_psr)) : "0" ((_psr)) ); \ + nop(); \ + nop(); \ + nop(); \ + } while ( 0 ) + +#endif /* RTEMS_PARAVIRT */ + +/** + * @brief Macro to obtain the TBR. + * + * This macro returns the current contents of the TBR register in @a _tbr. + */ +#if defined(RTEMS_PARAVIRT) + +uint32_t _SPARC_Get_TBR( void ); + +#define sparc_get_tbr( _tbr ) \ + (_tbr) = _SPARC_Get_TBR() + +#else /* RTEMS_PARAVIRT */ + +#define sparc_get_tbr( _tbr ) \ + do { \ + (_tbr) = 0; /* to avoid unitialized warnings */ \ + __asm__ volatile( "rd %%tbr, %0" : "=r" (_tbr) : "0" (_tbr) ); \ + } while ( 0 ) + +#endif /* RTEMS_PARAVIRT */ + +/** + * @brief Macro to set the TBR. + * + * This macro sets the TBR register to the value in @a _tbr. + */ +#if defined(RTEMS_PARAVIRT) + +void _SPARC_Set_TBR( uint32_t new_tbr ); + +#define sparc_set_tbr( _tbr ) \ + _SPARC_Set_TBR((_tbr)) + +#else /* RTEMS_PARAVIRT */ + +#define sparc_set_tbr( _tbr ) \ + do { \ + __asm__ volatile( "wr %0, 0, %%tbr" : "=r" (_tbr) : "0" (_tbr) ); \ + } while ( 0 ) + +#endif /* RTEMS_PARAVIRT */ + +/** + * @brief Macro to obtain the WIM. + * + * This macro returns the current contents of the WIM field in @a _wim. + */ +#define sparc_get_wim( _wim ) \ + do { \ + __asm__ volatile( "rd %%wim, %0" : "=r" (_wim) : "0" (_wim) ); \ + } while ( 0 ) + +/** + * @brief Macro to set the WIM. + * + * This macro sets the WIM field to the value in @a _wim. + */ +#define sparc_set_wim( _wim ) \ + do { \ + __asm__ volatile( "wr %0, %%wim" : "=r" (_wim) : "0" (_wim) ); \ + nop(); \ + nop(); \ + nop(); \ + } while ( 0 ) + +/** + * @brief Macro to obtain the Y register. + * + * This macro returns the current contents of the Y register in @a _y. + */ +#define sparc_get_y( _y ) \ + do { \ + __asm__ volatile( "rd %%y, %0" : "=r" (_y) : "0" (_y) ); \ + } while ( 0 ) + +/** + * @brief Macro to set the Y register. + * + * This macro sets the Y register to the value in @a _y. + */ +#define sparc_set_y( _y ) \ + do { \ + __asm__ volatile( "wr %0, %%y" : "=r" (_y) : "0" (_y) ); \ + } while ( 0 ) + +/** + * @brief SPARC disable processor interrupts. + * + * This method is invoked to disable all maskable interrupts. + * + * @return This method returns the entire PSR contents. + */ +static inline uint32_t sparc_disable_interrupts(void) +{ + register uint32_t psr __asm__("g1"); /* return value of trap handler */ + __asm__ volatile ( "ta %1\n\t" : "=r" (psr) : "i" (SPARC_SWTRAP_IRQDIS)); + return psr; +} + +/** + * @brief SPARC enable processor interrupts. + * + * This method is invoked to enable all maskable interrupts. + * + * @param[in] psr is the PSR returned by @ref sparc_disable_interrupts. + */ +static inline void sparc_enable_interrupts(uint32_t psr) +{ + register uint32_t _psr __asm__("g1") = psr; /* input to trap handler */ + + /* + * The trap instruction has a higher trap priority than the interrupts + * according to "The SPARC Architecture Manual: Version 8", Table 7-1 + * "Exception and Interrupt Request Priority and tt Values". Add a nop to + * prevent a trap instruction right after the interrupt enable trap. + */ + __asm__ volatile ( "ta %0\nnop\n" :: "i" (SPARC_SWTRAP_IRQEN), "r" (_psr)); +} + +/** + * @brief SPARC exit through system call 1 + * + * This method is invoked to go into system error halt. The optional + * arguments can be given to hypervisor, hardware debugger, simulator or + * similar. + * + * System error mode is entered when taking a trap when traps have been + * disabled. What happens when error mode is entered depends on the motherboard. + * In a typical development systems the CPU relingish control to the debugger, + * simulator, hypervisor or similar. The following steps are taken: + * + * 1. Going into system error mode by Software Trap 0 + * 2. %g1=1 (syscall 1 - Exit) + * 3. %g2=Primary exit code + * 4. %g3=Secondary exit code. Dependends on %g2 exit type. + * + * This function never returns. + * + * @param[in] exitcode1 Primary exit code stored in CPU g2 register after exit + * @param[in] exitcode2 Primary exit code stored in CPU g3 register after exit + */ +void sparc_syscall_exit(uint32_t exitcode1, uint32_t exitcode2) + RTEMS_NO_RETURN; + +/** + * @brief SPARC flash processor interrupts. + * + * This method is invoked to temporarily enable all maskable interrupts. + * + * @param[in] _psr is the PSR returned by @ref sparc_disable_interrupts. + */ +#define sparc_flash_interrupts( _psr ) \ + do { \ + sparc_enable_interrupts( (_psr) ); \ + _psr = sparc_disable_interrupts(); \ + } while ( 0 ) + +/** + * @brief SPARC obtain interrupt level. + * + * This method is invoked to obtain the current interrupt disable level. + * + * @param[in] _level is the PSR returned by @ref sparc_disable_interrupts. + */ +#define sparc_get_interrupt_level( _level ) \ + do { \ + register uint32_t _psr_level = 0; \ + \ + sparc_get_psr( _psr_level ); \ + (_level) = \ + (_psr_level & SPARC_PSR_PIL_MASK) >> SPARC_PSR_PIL_BIT_POSITION; \ + } while ( 0 ) + +static inline uint32_t _LEON3_Get_current_processor( void ) +{ + uint32_t asr17; + + __asm__ volatile ( + "rd %%asr17, %0" + : "=&r" (asr17) + ); + + return asr17 >> LEON3_ASR17_PROCESSOR_INDEX_SHIFT; +} + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_SPARC_H */ diff --git a/include/rtems/score/sparc64.h b/include/rtems/score/sparc64.h new file mode 100644 index 0000000000..d20c2ff509 --- /dev/null +++ b/include/rtems/score/sparc64.h @@ -0,0 +1,342 @@ +/** + * @file + * + * @brief Information Required to Build RTEMS for a Particular Member + * of the SPARC Family + * + * This include file contains information pertaining to the SPARC + * processor family. + */ + +/* + * COPYRIGHT (c) 1989-1999. On-Line Applications Research Corporation (OAR). + * + * This file is based on the SPARC sparc.h file. Modifications are made + * to support the SPARC64 processor. + * COPYRIGHT (c) 2010. Gedare Bloom. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SPARC_H +#define _RTEMS_SCORE_SPARC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the "sparc" family. It does + * this by setting variables to indicate which implementation + * dependent features are present in a particular member + * of the family. + * + * Currently recognized feature flags: + * + * + SPARC_HAS_FPU + * 0 - no HW FPU + * 1 - has HW FPU (assumed to be compatible w/90C602) + * + * + SPARC_HAS_BITSCAN + * 0 - does not have scan instructions + * 1 - has scan instruction (not currently implemented) + * + * + SPARC_NUMBER_OF_REGISTER_WINDOWS + * 8 is the most common number supported by SPARC implementations. + * SPARC_PSR_CWP_MASK is derived from this value. + */ + +/* + * Some higher end SPARCs have a bitscan instructions. It would + * be nice to take advantage of them. Right now, there is no + * port to a CPU model with this feature and no (untested) code + * that is based on this feature flag. + */ + +#define SPARC_HAS_BITSCAN 0 + +/* + * This should be OK until a port to a higher end SPARC processor + * is made that has more than 8 register windows. If this cannot + * be determined based on multilib settings (v7/v8/v9), then the + * cpu_asm.S code that depends on this will have to move to libcpu. + * + * SPARC v9 supports from 3 to 32 register windows. + * N_REG_WINDOWS = 8 on UltraSPARC T1 (impl. dep. #2-V8). + */ + +#define SPARC_NUMBER_OF_REGISTER_WINDOWS 8 + +/* + * This should be determined based on some soft float derived + * cpp predefine but gcc does not currently give us that information. + */ + + +#if defined(_SOFT_FLOAT) +#define SPARC_HAS_FPU 0 +#else +#define SPARC_HAS_FPU 1 +#endif + +#if SPARC_HAS_FPU +#define CPU_MODEL_NAME "w/FPU" +#else +#define CPU_MODEL_NAME "w/soft-float" +#endif + +/* + * Define the name of the CPU family. + */ + +#define CPU_NAME "SPARC" + +/* + * Miscellaneous constants + */ + +/* + * The PSR is deprecated and deleted. + * + * The following registers represent fields of the PSR: + * PIL - Processor Interrupt Level register + * CWP - Current Window Pointer register + * VER - Version register + * CCR - Condition Codes Register + * PSTATE - Processor State register + */ + +/* + * PSTATE masks and starting bit positions + * + * NOTE: Reserved bits are ignored. + */ + +#define SPARC_PSTATE_AG_MASK 0x00000001 /* bit 0 */ +#define SPARC_PSTATE_IE_MASK 0x00000002 /* bit 1 */ +#define SPARC_PSTATE_PRIV_MASK 0x00000004 /* bit 2 */ +#define SPARC_PSTATE_AM_MASK 0x00000008 /* bit 3 */ +#define SPARC_PSTATE_PEF_MASK 0x00000010 /* bit 4 */ +#define SPARC_PSTATE_MM_MASK 0x00000040 /* bit 6 */ +#define SPARC_PSTATE_TLE_MASK 0x00000100 /* bit 8 */ +#define SPARC_PSTATE_CLE_MASK 0x00000200 /* bit 9 */ + +#define SPARC_PSTATE_AG_BIT_POSITION 0 /* bit 0 */ +#define SPARC_PSTATE_IE_BIT_POSITION 1 /* bit 1 */ +#define SPARC_PSTATE_PRIV_BIT_POSITION 2 /* bit 2 */ +#define SPARC_PSTATE_AM_BIT_POSITION 3 /* bit 3 */ +#define SPARC_PSTATE_PEF_BIT_POSITION 4 /* bit 4 */ +#define SPARC_PSTATE_MM_BIT_POSITION 6 /* bit 6 */ +#define SPARC_PSTATE_TLE_BIT_POSITION 8 /* bit 8 */ +#define SPARC_PSTATE_CLE_BIT_POSITION 9 /* bit 9 */ + +#define SPARC_FPRS_FEF_MASK 0x0100 /* bit 2 */ +#define SPARC_FPRS_FEF_BIT_POSITION 2 /* bit 2 */ + +#define SPARC_TSTATE_IE_MASK 0x00000200 /* bit 9 */ + +#define SPARC_SOFTINT_TM_MASK 0x00000001 /* bit 0 */ +#define SPARC_SOFTINT_SM_MASK 0x00010000 /* bit 16 */ +#define SPARC_SOFTINT_TM_BIT_POSITION 1 /* bit 0 */ +#define SPARC_SOFTINT_SM_BIT_POSITION 17 /* bit 16 */ + +#define STACK_BIAS (2047) + +#ifdef ASM + +/* + * To enable the FPU we need to set both PSTATE.pef and FPRS.fef + */ + +#define sparc64_enable_FPU(rtmp1) \ + rdpr %pstate, rtmp1; \ + or rtmp1, SPARC_PSTATE_PEF_MASK, rtmp1; \ + wrpr %g0, rtmp1, %pstate; \ + rd %fprs, rtmp1; \ + or rtmp1, SPARC_FPRS_FEF_MASK, rtmp1; \ + wr %g0, rtmp1, %fprs + + +#endif + +#ifndef ASM + +/* + * Standard nop + */ + +#define nop() \ + do { \ + __asm__ volatile ( "nop" ); \ + } while ( 0 ) + +/* + * Get and set the pstate + */ + +#define sparc64_get_pstate( _pstate ) \ + do { \ + (_pstate) = 0; \ + __asm__ volatile( "rdpr %%pstate, %0" : "=r" (_pstate) : "0" (_pstate) ); \ + } while ( 0 ) + +#define sparc64_set_pstate( _pstate ) \ + do { \ + __asm__ volatile ( \ + "wrpr %%g0, %0, %%pstate " : "=r" ((_pstate)) : "0" ((_pstate)) ); \ + } while ( 0 ) + +/* + * Get and set the PIL + */ + +#define sparc64_get_pil( _pil ) \ + do { \ + (_pil) = 0; \ + __asm__ volatile( "rdpr %%pil, %0" : "=r" (_pil) : "0" (_pil) ); \ + } while ( 0 ) + +#define sparc64_set_pil( _pil ) \ + do { \ + __asm__ volatile ( "wrpr %%g0, %0, %%pil " : "=r" ((_pil)) : "0" ((_pil)) ); \ + } while ( 0 ) + + +/* + * Get and set the TBA + */ + +#define sparc64_get_tba( _tba ) \ + do { \ + (_tba) = 0; /* to avoid unitialized warnings */ \ + __asm__ volatile( "rdpr %%tba, %0" : "=r" (_tba) : "0" (_tba) ); \ + } while ( 0 ) + +#define sparc64_set_tba( _tba ) \ + do { \ + __asm__ volatile( "wrpr %%g0, %0, %%tba" : "=r" (_tba) : "0" (_tba) ); \ + } while ( 0 ) + +/* + * Get and set the TL (trap level) + */ + +#define sparc64_get_tl( _tl ) \ + do { \ + (_tl) = 0; /* to avoid unitialized warnings */ \ + __asm__ volatile( "rdpr %%tl, %0" : "=r" (_tl) : "0" (_tl) ); \ + } while ( 0 ) + +#define sparc64_set_tl( _tl ) \ + do { \ + __asm__ volatile( "wrpr %%g0, %0, %%tl" : "=r" (_tl) : "0" (_tl) ); \ + } while ( 0 ) + + +/* + * read the stick register + * + * Note: + * stick asr=24, mnemonic=stick + * Note: stick does not appear to be a valid ASR for US3, although it is + * implemented in US3i. + */ +#define sparc64_read_stick( _stick ) \ + do { \ + (_stick) = 0; \ + __asm__ volatile( "rd %%stick, %0" : "=r" (_stick) : "0" (_stick) ); \ + } while ( 0 ) + +/* + * write the stick_cmpr register + * + * Note: + * stick_cmpr asr=25, mnemonic=stick_cmpr + * Note: stick_cmpr does not appear to be a valid ASR for US3, although it is + * implemented in US3i. + */ +#define sparc64_write_stick_cmpr( _stick_cmpr ) \ + do { \ + __asm__ volatile( "wr %%g0, %0, %%stick_cmpr" : "=r" (_stick_cmpr) \ + : "0" (_stick_cmpr) ); \ + } while ( 0 ) + +/* + * read the Tick register + */ +#define sparc64_read_tick( _tick ) \ + do { \ + (_tick) = 0; \ + __asm__ volatile( "rd %%tick, %0" : "=r" (_tick) : "0" (_tick) ); \ + } while ( 0 ) + +/* + * write the tick_cmpr register + */ +#define sparc64_write_tick_cmpr( _tick_cmpr ) \ + do { \ + __asm__ volatile( "wr %%g0, %0, %%tick_cmpr" : "=r" (_tick_cmpr) \ + : "0" (_tick_cmpr) ); \ + } while ( 0 ) + +/* + * Clear the softint register. + * + * sun4u and sun4v: softint_clr asr = 21, with mnemonic clear_softint + */ +#define sparc64_clear_interrupt_bits( _bit_mask ) \ + do { \ + __asm__ volatile( "wr %%g0, %0, %%clear_softint" : "=r" (_bit_mask) \ + : "0" (_bit_mask)); \ + } while ( 0 ) + +/************* DEPRECATED ****************/ +/* Note: Although the y register is deprecated, gcc still uses it */ +/* + * Get and set the Y + */ + +#define sparc_get_y( _y ) \ + do { \ + __asm__ volatile( "rd %%y, %0" : "=r" (_y) : "0" (_y) ); \ + } while ( 0 ) + +#define sparc_set_y( _y ) \ + do { \ + __asm__ volatile( "wr %0, %%y" : "=r" (_y) : "0" (_y) ); \ + } while ( 0 ) + +/************* /DEPRECATED ****************/ + +/* + * Manipulate the interrupt level in the pstate + */ + +uint32_t sparc_disable_interrupts(void); +void sparc_enable_interrupts(uint32_t); + +#define sparc_flash_interrupts( _level ) \ + do { \ + uint32_t _ignored; \ + \ + sparc_enable_interrupts( (_level) ); \ + _ignored = sparc_disable_interrupts(); \ + (void) _ignored; \ + } while ( 0 ) + +#define sparc64_get_interrupt_level( _level ) \ + do { \ + _level = 0; \ + sparc64_get_pil( _level ); \ + } while ( 0 ) + +#endif /* !ASM */ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_SPARC_H */ diff --git a/include/rtems/score/stack.h b/include/rtems/score/stack.h new file mode 100644 index 0000000000..9622495092 --- /dev/null +++ b/include/rtems/score/stack.h @@ -0,0 +1,69 @@ +/** + * @file rtems/score/stack.h + * + * @brief Information About the Thread Stack Handler + * + * This include file contains all information about the thread + * Stack Handler. This Handler provides mechanisms which can be used to + * initialize and utilize stacks. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_STACK_H +#define _RTEMS_SCORE_STACK_H + +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreStack Stack Handler + * + * @ingroup Score + * + * This handler encapsulates functionality which is used in the management + * of thread stacks. + */ +/**@{*/ + +/** + * The following constant defines the minimum stack size which every + * thread must exceed. + */ +#define STACK_MINIMUM_SIZE CPU_STACK_MINIMUM_SIZE + +/** + * The following defines the control block used to manage each stack. + */ +typedef struct { + /** This is the stack size. */ + size_t size; + /** This is the low memory address of stack. */ + void *area; +} Stack_Control; + +/** + * This variable contains the the minimum stack size; + * + * @note It is instantiated and set by User Configuration via confdefs.h. + */ +extern uint32_t rtems_minimum_stack_size; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/stackimpl.h b/include/rtems/score/stackimpl.h new file mode 100644 index 0000000000..4c622345ff --- /dev/null +++ b/include/rtems/score/stackimpl.h @@ -0,0 +1,99 @@ +/** + * @file + * + * @brief Inlined Routines from the Stack Handler + * + * This file contains the static inline implementation of the inlined + * routines from the Stack Handler. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_STACKIMPL_H +#define _RTEMS_SCORE_STACKIMPL_H + +#include <rtems/score/stack.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreStack + */ +/**@{**/ + +/** + * This routine initializes the_stack record to indicate that + * size bytes of memory starting at starting_address have been + * reserved for a stack. + */ +RTEMS_INLINE_ROUTINE void _Stack_Initialize ( + Stack_Control *the_stack, + void *starting_address, + size_t size +) +{ + the_stack->area = starting_address; + the_stack->size = size; +} + +/** + * This function returns the minimum stack size configured + * for this application. + * + * @return This method returns the minimum stack size; + */ +RTEMS_INLINE_ROUTINE uint32_t _Stack_Minimum (void) +{ + return rtems_minimum_stack_size; +} + +/** + * This function returns true if size bytes is enough memory for + * a valid stack area on this processor, and false otherwise. + * + * @param[in] size is the stack size to check + * + * @return This method returns true if the stack is large enough. + */ +RTEMS_INLINE_ROUTINE bool _Stack_Is_enough ( + size_t size +) +{ + return ( size >= _Stack_Minimum() ); +} + +/** + * This function returns the appropriate stack size given the requested + * size. If the requested size is below the minimum, then the minimum + * configured stack size is returned. + * + * @param[in] size is the stack size to check + * + * @return This method returns the appropriate stack size. + */ +RTEMS_INLINE_ROUTINE size_t _Stack_Ensure_minimum ( + size_t size +) +{ + if ( size >= _Stack_Minimum() ) + return size; + return _Stack_Minimum(); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/states.h b/include/rtems/score/states.h new file mode 100644 index 0000000000..ba59af6907 --- /dev/null +++ b/include/rtems/score/states.h @@ -0,0 +1,50 @@ +/** + * @file rtems/score/states.h + * + * @brief Thread Execution State Information + * + * This include file defines thread execution state information. + */ + +/* + * COPYRIGHT (c) 1989-2006. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_STATES_H +#define _RTEMS_SCORE_STATES_H + +#include <stdint.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreStates SuperCore Thread States + * + * @ingroup Score + * + * This handler encapsulates functionality which relates to the management of + * the state bitmap associated with each thread. + */ +/**@{*/ + +/** + * The following type defines the control block used to manage a + * thread's state. + */ +typedef uint32_t States_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/statesimpl.h b/include/rtems/score/statesimpl.h new file mode 100644 index 0000000000..97cadb2888 --- /dev/null +++ b/include/rtems/score/statesimpl.h @@ -0,0 +1,464 @@ +/** + * @file + * + * @brief Inlined Routines Associated with Thread State Information + * + * This file contains the static inline implementation of the inlined + * routines associated with thread state information. + */ + +/* + * COPYRIGHT (c) 1989-2012. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_STATESIMPL_H +#define _RTEMS_SCORE_STATESIMPL_H + +#include <rtems/score/states.h> +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreStates + */ +/**@{**/ + +/* + * The following constants define the individual states which may be + * be used to compose and manipulate a thread's state. + */ + +/** This macro corresponds to a task being ready. */ +#define STATES_READY 0x00000 +/** This macro corresponds to a task being created but not yet started. */ +#define STATES_DORMANT 0x00001 +/** This macro corresponds to a task being suspended. */ +#define STATES_SUSPENDED 0x00002 +/** This macro corresponds to a task which is waiting for a timeout. */ +#define STATES_DELAYING 0x00008 +/** This macro corresponds to a task waiting until a specific TOD. */ +#define STATES_WAITING_FOR_TIME 0x00010 +/** This macro corresponds to a task waiting for a variable length buffer. */ +#define STATES_WAITING_FOR_BUFFER 0x00020 +/** This macro corresponds to a task waiting for a fixed size segment. */ +#define STATES_WAITING_FOR_SEGMENT 0x00040 +/** This macro corresponds to a task waiting for a message. */ +#define STATES_WAITING_FOR_MESSAGE 0x00080 +/** This macro corresponds to a task waiting for an event. */ +#define STATES_WAITING_FOR_EVENT 0x00100 +/** This macro corresponds to a task waiting for a semaphore. */ +#define STATES_WAITING_FOR_SEMAPHORE 0x00200 +/** This macro corresponds to a task waiting for a mutex. */ +#define STATES_WAITING_FOR_MUTEX 0x00400 +/** This macro corresponds to a task waiting for a condition variable. */ +#define STATES_WAITING_FOR_CONDITION_VARIABLE 0x00800 +/** This macro corresponds to a task waiting for a join while exiting. */ +#define STATES_WAITING_FOR_JOIN_AT_EXIT 0x01000 +/** This macro corresponds to a task waiting for a reply to an MPCI request. */ +#define STATES_WAITING_FOR_RPC_REPLY 0x02000 +/** This macro corresponds to a task waiting for a period. */ +#define STATES_WAITING_FOR_PERIOD 0x04000 +/** This macro corresponds to a task waiting for a signal. */ +#define STATES_WAITING_FOR_SIGNAL 0x08000 +/** This macro corresponds to a task waiting for a barrier. */ +#define STATES_WAITING_FOR_BARRIER 0x10000 +/** This macro corresponds to a task waiting for a RWLock. */ +#define STATES_WAITING_FOR_RWLOCK 0x20000 +/** This macro corresponds to a task waiting for a system event. */ +#define STATES_WAITING_FOR_SYSTEM_EVENT 0x40000 +/** This macro corresponds to a task waiting for BSD wakeup. */ +#define STATES_WAITING_FOR_BSD_WAKEUP 0x80000 +/** This macro corresponds to a task waiting for a task termination. */ +#define STATES_WAITING_FOR_TERMINATION 0x100000 +/** This macro corresponds to a task being a zombie. */ +#define STATES_ZOMBIE 0x200000 +/** This macro corresponds to a task migrating to another scheduler. */ +#define STATES_MIGRATING 0x400000 +/** This macro corresponds to a task restarting. */ +#define STATES_RESTARTING 0x800000 +/** This macro corresponds to a task waiting for a join. */ +#define STATES_WAITING_FOR_JOIN 0x1000000 +/** This macro corresponds to a task waiting for a <sys/lock.h> mutex. */ +#define STATES_WAITING_FOR_SYS_LOCK_MUTEX 0x2000000 +/** This macro corresponds to a task waiting for a <sys/lock.h> semaphore. */ +#define STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE 0x4000000 +/** This macro corresponds to a task waiting for a <sys/lock.h> futex. */ +#define STATES_WAITING_FOR_SYS_LOCK_FUTEX 0x8000000 + +/** This macro corresponds to a task which is in an interruptible + * blocking state. + */ +#define STATES_INTERRUPTIBLE_BY_SIGNAL 0x10000000 +/** This macro corresponds to a task waiting for a <sys/lock.h> condition. */ +#define STATES_WAITING_FOR_SYS_LOCK_CONDITION 0x20000000 + +/** This macro corresponds to a task waiting for a local object operation. */ +#define STATES_LOCALLY_BLOCKED ( STATES_WAITING_FOR_BUFFER | \ + STATES_WAITING_FOR_SEGMENT | \ + STATES_WAITING_FOR_MESSAGE | \ + STATES_WAITING_FOR_SEMAPHORE | \ + STATES_WAITING_FOR_MUTEX | \ + STATES_WAITING_FOR_CONDITION_VARIABLE | \ + STATES_WAITING_FOR_JOIN | \ + STATES_WAITING_FOR_SIGNAL | \ + STATES_WAITING_FOR_BARRIER | \ + STATES_WAITING_FOR_BSD_WAKEUP | \ + STATES_WAITING_FOR_SYS_LOCK_MUTEX | \ + STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE | \ + STATES_WAITING_FOR_SYS_LOCK_FUTEX | \ + STATES_WAITING_FOR_SYS_LOCK_CONDITION | \ + STATES_WAITING_FOR_RWLOCK ) + +/** This macro corresponds to a task waiting which is blocked. */ +#define STATES_BLOCKED ( STATES_DELAYING | \ + STATES_LOCALLY_BLOCKED | \ + STATES_WAITING_FOR_TIME | \ + STATES_WAITING_FOR_PERIOD | \ + STATES_WAITING_FOR_EVENT | \ + STATES_WAITING_FOR_RPC_REPLY | \ + STATES_WAITING_FOR_SYSTEM_EVENT | \ + STATES_INTERRUPTIBLE_BY_SIGNAL ) + +/** All state bits set to one (provided for _Thread_Ready()) */ +#define STATES_ALL_SET 0xffffffff + +/** + * This function sets the given states_to_set into the current_state + * passed in. The result is returned to the user in current_state. + * + * @param[in] states_to_set is the state bits to set + * @param[in] current_state is the state set to add them to + * + * @return This method returns the updated states value. + */ +RTEMS_INLINE_ROUTINE States_Control _States_Set ( + States_Control states_to_set, + States_Control current_state +) +{ + return (current_state | states_to_set); +} + +/** + * This function clears the given states_to_clear into the current_state + * passed in. The result is returned to the user in current_state. + * + * @param[in] states_to_clear is the state bits to clean + * @param[in] current_state is the state set to remove them from + * + * @return This method returns the updated states value. + */ +RTEMS_INLINE_ROUTINE States_Control _States_Clear ( + States_Control states_to_clear, + States_Control current_state +) +{ + return (current_state & ~states_to_clear); +} + +/** + * This function returns true if the_states indicates that the + * state is READY, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_ready ( + States_Control the_states +) +{ + return (the_states == STATES_READY); +} + +/** + * This function returns true if the DORMANT state is the ONLY state + * set in the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_only_dormant ( + States_Control the_states +) +{ + return (the_states == STATES_DORMANT); +} + +/** + * This function returns true if the DORMANT state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_dormant ( + States_Control the_states +) +{ + return (the_states & STATES_DORMANT); +} + +/** + * This function returns true if the SUSPENDED state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_suspended ( + States_Control the_states +) +{ + return (the_states & STATES_SUSPENDED); +} + +/** + * This function returns true if the DELAYING state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_delaying ( + States_Control the_states +) +{ + return (the_states & STATES_DELAYING); +} + +/** + * This function returns true if the WAITING_FOR_BUFFER state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_buffer ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_BUFFER); +} + +/** + * This function returns true if the WAITING_FOR_SEGMENT state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_segment ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_SEGMENT); +} + +/** + * This function returns true if the WAITING_FOR_MESSAGE state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_message ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_MESSAGE); +} + +/** + * This function returns true if the WAITING_FOR_EVENT state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_event ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_EVENT); +} + +/** + * This function returns true if the WAITING_FOR_SYSTEM_EVENT state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_system_event ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_SYSTEM_EVENT); +} + +/** + * This function returns true if the WAITING_FOR_MUTEX state + * is set in the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_mutex ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_MUTEX); +} + +/** + * This function returns true if the WAITING_FOR_SEMAPHORE state + * is set in the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_semaphore ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_SEMAPHORE); +} + +/** + * This function returns true if the WAITING_FOR_TIME state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_time ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_TIME); +} + +/** + * This function returns true if the WAITING_FOR_TIME state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_rpc_reply ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_RPC_REPLY); +} + +/** + * This function returns true if the WAITING_FOR_PERIOD state is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_waiting_for_period ( + States_Control the_states +) +{ + return (the_states & STATES_WAITING_FOR_PERIOD); +} + +/** + * This function returns true if the task's state is set in + * way that allows it to be interrupted by a signal. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_interruptible_by_signal ( + States_Control the_states +) +{ + return (the_states & STATES_INTERRUPTIBLE_BY_SIGNAL); + +} +/** + * This function returns true if one of the states which indicates + * that a task is blocked waiting for a local resource is set in + * the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the desired state condition is set. + */ + +RTEMS_INLINE_ROUTINE bool _States_Is_locally_blocked ( + States_Control the_states +) +{ + return (the_states & STATES_LOCALLY_BLOCKED); +} + +/** + * This function returns true if one of the states which indicates + * that a task is blocked is set in the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * + * @return This method returns true if the state indicates that the + * assocated thread is blocked. + */ +RTEMS_INLINE_ROUTINE bool _States_Is_blocked ( + States_Control the_states +) +{ + return (the_states & STATES_BLOCKED); +} + +/** + * This function returns true if any of the states in the mask + * are set in the_states, and false otherwise. + * + * @param[in] the_states is the task state set to test + * @param[in] mask is the state bits to test for + * + * @return This method returns true if the indicates state condition is set. + */ +RTEMS_INLINE_ROUTINE bool _States_Are_set ( + States_Control the_states, + States_Control mask +) +{ + return ( (the_states & mask) != STATES_READY); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/sysstate.h b/include/rtems/score/sysstate.h new file mode 100644 index 0000000000..02ac99584d --- /dev/null +++ b/include/rtems/score/sysstate.h @@ -0,0 +1,119 @@ +/** + * @file + * + * @ingroup ScoreSysState + * + * @brief System State Handler API + */ + +/* + * COPYRIGHT (c) 1989-2011. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_SYSSTATE_H +#define _RTEMS_SCORE_SYSSTATE_H + +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreSysState System State Handler + * + * @ingroup Score + * + * @brief Management of the internal system state of RTEMS. + */ +/**@{**/ + +/** + * @brief System states. + */ +typedef enum { + /** + * @brief The system is before the end of the first phase of initialization. + */ + SYSTEM_STATE_BEFORE_INITIALIZATION, + + /** + * @brief The system is between end of the first phase of initialization but + * before multitasking is started. + */ + SYSTEM_STATE_BEFORE_MULTITASKING, + + /** + * @brief The system is up and operating normally. + */ + SYSTEM_STATE_UP, + + /** + * @brief The system reached its terminal state. + */ + SYSTEM_STATE_TERMINATED +} System_state_Codes; + +#define SYSTEM_STATE_CODES_FIRST SYSTEM_STATE_BEFORE_INITIALIZATION + +#define SYSTEM_STATE_CODES_LAST SYSTEM_STATE_TERMINATED + +#if defined(RTEMS_MULTIPROCESSING) +SCORE_EXTERN bool _System_state_Is_multiprocessing; +#endif + +extern System_state_Codes _System_state_Current; + +RTEMS_INLINE_ROUTINE void _System_state_Set ( + System_state_Codes state +) +{ + _System_state_Current = state; +} + +RTEMS_INLINE_ROUTINE System_state_Codes _System_state_Get ( void ) +{ + return _System_state_Current; +} + +RTEMS_INLINE_ROUTINE bool _System_state_Is_before_initialization ( + System_state_Codes state +) +{ + return (state == SYSTEM_STATE_BEFORE_INITIALIZATION); +} + +RTEMS_INLINE_ROUTINE bool _System_state_Is_before_multitasking ( + System_state_Codes state +) +{ + return (state == SYSTEM_STATE_BEFORE_MULTITASKING); +} + +RTEMS_INLINE_ROUTINE bool _System_state_Is_up ( + System_state_Codes state +) +{ + return (state == SYSTEM_STATE_UP); +} + +RTEMS_INLINE_ROUTINE bool _System_state_Is_terminated ( + System_state_Codes state +) +{ + return (state == SYSTEM_STATE_TERMINATED); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/thread.h b/include/rtems/score/thread.h new file mode 100644 index 0000000000..94ce31ed50 --- /dev/null +++ b/include/rtems/score/thread.h @@ -0,0 +1,945 @@ +/** + * @file rtems/score/thread.h + * + * @brief Constants and Structures Related with the Thread Control Block + * + * This include file contains all constants and structures associated + * with the thread control block. + */ + +/* + * COPYRIGHT (c) 1989-2014. + * On-Line Applications Research Corporation (OAR). + * + * Copyright (c) 2014 embedded brains GmbH. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_THREAD_H +#define _RTEMS_SCORE_THREAD_H + +#include <rtems/score/atomic.h> +#include <rtems/score/context.h> +#if defined(RTEMS_MULTIPROCESSING) +#include <rtems/score/mppkt.h> +#endif +#include <rtems/score/isrlock.h> +#include <rtems/score/object.h> +#include <rtems/score/priority.h> +#include <rtems/score/resource.h> +#include <rtems/score/stack.h> +#include <rtems/score/states.h> +#include <rtems/score/threadq.h> +#include <rtems/score/watchdog.h> + +#if defined(RTEMS_SMP) + #include <rtems/score/cpuset.h> +#endif + +struct Per_CPU_Control; + +struct Scheduler_Control; + +struct Scheduler_Node; + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreThread Thread Handler + * + * @ingroup Score + * + * This handler encapsulates functionality related to the management of + * threads. This includes the creation, deletion, and scheduling of threads. + * + * The following variables are maintained as part of the per cpu data + * structure. + * + * + Idle thread pointer + * + Executing thread pointer + * + Heir thread pointer + */ +/**@{*/ + +#if defined(RTEMS_POSIX_API) + #define RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE +#endif + +/* + * With the addition of the Constant Block Scheduler (CBS), + * this feature is needed even when POSIX is disabled. + */ +#define RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT + +#if defined(RTEMS_POSIX_API) + #define RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API +#endif + +/* + * The user can define this at configure time and go back to ticks + * resolution. + */ +#include <rtems/score/timestamp.h> + +typedef Timestamp_Control Thread_CPU_usage_t; + +/** + * The following defines the "return type" of a thread. + * + * @note This cannot always be right. Some APIs have void + * tasks/threads, others return pointers, others may + * return a numeric value. Hopefully a pointer is + * always at least as big as an uint32_t . :) + */ +typedef void *Thread; + +/** + * @brief Type of the numeric argument of a thread entry function with at + * least one numeric argument. + * + * This numeric argument type designates an unsigned integer type with the + * property that any valid pointer to void can be converted to this type and + * then converted back to a pointer to void. The result will compare equal to + * the original pointer. + */ +typedef CPU_Uint32ptr Thread_Entry_numeric_type; + +/** + * The following defines the ways in which the entry point for a + * thread can be invoked. Basically, it can be passed any + * combination/permutation of a pointer and an uint32_t value. + * + * @note For now, we are ignoring the return type. + */ +typedef enum { + THREAD_START_NUMERIC, + THREAD_START_POINTER, + #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) + THREAD_START_BOTH_POINTER_FIRST, + THREAD_START_BOTH_NUMERIC_FIRST + #endif +} Thread_Start_types; + +/** This type corresponds to a very simple style thread entry point. */ +typedef Thread ( *Thread_Entry )( void ); /* basic type */ + +/** This type corresponds to a thread entry point which takes a single + * unsigned thirty-two bit integer as an argument. + */ +typedef Thread ( *Thread_Entry_numeric )( Thread_Entry_numeric_type ); + +/** This type corresponds to a thread entry point which takes a single + * untyped pointer as an argument. + */ +typedef Thread ( *Thread_Entry_pointer )( void * ); + +/** This type corresponds to a thread entry point which takes a single + * untyped pointer and an unsigned thirty-two bit integer as arguments. + */ +typedef Thread ( *Thread_Entry_both_pointer_first )( void *, Thread_Entry_numeric_type ); + +/** This type corresponds to a thread entry point which takes a single + * unsigned thirty-two bit integer and an untyped pointer and an + * as arguments. + */ +typedef Thread ( *Thread_Entry_both_numeric_first )( Thread_Entry_numeric_type, void * ); + +/** + * The following lists the algorithms used to manage the thread cpu budget. + * + * Reset Timeslice: At each context switch, reset the time quantum. + * Exhaust Timeslice: Only reset the quantum once it is consumed. + * Callout: Execute routine when budget is consumed. + */ +typedef enum { + THREAD_CPU_BUDGET_ALGORITHM_NONE, + THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE, + #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) + THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE, + #endif + #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) + THREAD_CPU_BUDGET_ALGORITHM_CALLOUT + #endif +} Thread_CPU_budget_algorithms; + +/** This defines thes the entry point for the thread specific timeslice + * budget management algorithm. + */ +typedef void (*Thread_CPU_budget_algorithm_callout )( Thread_Control * ); + +#if !defined(RTEMS_SMP) +/** + * @brief Forward reference to the per task variable structure.. + * + * Forward reference to the per task variable structure. + */ +struct rtems_task_variable_tt; + +/** + * @brief Internal structure used to manager per task variables. + * + * This is the internal structure used to manager per Task Variables. + */ +typedef struct { + /** This field points to the next per task variable for this task. */ + struct rtems_task_variable_tt *next; + /** This field points to the physical memory location of this per + * task variable. + */ + void **ptr; + /** This field is to the global value for this per task variable. */ + void *gval; + /** This field is to this thread's value for this per task variable. */ + void *tval; + /** This field points to the destructor for this per task variable. */ + void (*dtor)(void *); +} rtems_task_variable_t; +#endif + +/** + * The following structure contains the information which defines + * the starting state of a thread. + */ +typedef struct { + /** This field is the starting address for the thread. */ + Thread_Entry entry_point; + /** This field indicates the how task is invoked. */ + Thread_Start_types prototype; + /** This field is the pointer argument passed at thread start. */ + void *pointer_argument; + /** This field is the numeric argument passed at thread start. */ + Thread_Entry_numeric_type numeric_argument; + /*-------------- initial execution modes ----------------- */ + /** This field indicates whether the thread was preemptible when + * it started. + */ + bool is_preemptible; + /** This field indicates the CPU budget algorith. */ + Thread_CPU_budget_algorithms budget_algorithm; + /** This field is the routine to invoke when the CPU allotment is + * consumed. + */ + Thread_CPU_budget_algorithm_callout budget_callout; + /** This field is the initial ISR disable level of this thread. */ + uint32_t isr_level; + /** This field is the initial priority. */ + Priority_Control initial_priority; + #if defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) + /** This field indicates whether the SuperCore allocated the stack. */ + bool core_allocated_stack; + #endif + /** This field is the stack information. */ + Stack_Control Initial_stack; + #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) + /** This field is the initial FP context area address. */ + Context_Control_fp *fp_context; + #endif + /** This field is the initial stack area address. */ + void *stack; + /** The thread-local storage (TLS) area */ + void *tls_area; +} Thread_Start_information; + +/** + * @brief Union type to hold a pointer to an immutable or a mutable object. + * + * The main purpose is to enable passing of pointers to read-only send buffers + * in the message passing subsystem. This approach is somewhat fragile since + * it prevents the compiler to check if the operations on objects are valid + * with respect to the constant qualifier. An alternative would be to add a + * third pointer argument for immutable objects, but this would increase the + * structure size. + */ +typedef union { + void *mutable_object; + const void *immutable_object; +} Thread_Wait_information_Object_argument_type; + +/** + * @brief This type is able to contain several flags used to control the wait + * class and state of a thread. + * + * The mutually exclusive wait class flags are + * - @ref THREAD_WAIT_CLASS_EVENT, + * - @ref THREAD_WAIT_CLASS_SYSTEM_EVENT, and + * - @ref THREAD_WAIT_CLASS_OBJECT. + * + * The mutually exclusive wait state flags are + * - @ref THREAD_WAIT_STATE_INTEND_TO_BLOCK, + * - @ref THREAD_WAIT_STATE_BLOCKED, and + * - @ref THREAD_WAIT_STATE_READY_AGAIN. + */ +typedef unsigned int Thread_Wait_flags; + +/** + * @brief Information required to manage a thread while it is blocked. + * + * This contains the information required to manage a thread while it is + * blocked and to return information to it. + */ +typedef struct { + /** + * @brief Node for thread queues. + */ + union { + /** + * @brief A node for chains. + */ + Chain_Node Chain; + + /** + * @brief A node for red-black trees. + */ + RBTree_Node RBTree; + } Node; + + /** This field is the Id of the object this thread is waiting upon. */ + Objects_Id id; + /** This field is used to return an integer while when blocked. */ + uint32_t count; + /** This field is for a pointer to a user return argument. */ + void *return_argument; + /** This field is for a pointer to a second user return argument. */ + Thread_Wait_information_Object_argument_type + return_argument_second; + /** This field contains any options in effect on this blocking operation. */ + uint32_t option; + /** This field will contain the return status from a blocking operation. + * + * @note The following assumes that all API return codes can be + * treated as an uint32_t. + */ + uint32_t return_code; + + /** + * @brief Code to set the timeout return code in _Thread_Timeout(). + */ + uint32_t timeout_code; + + /** + * @brief The current thread queue. + * + * In case this field is @c NULL, then the thread is not blocked on a thread + * queue. This field is protected by the thread lock. + * + * @see _Thread_Lock_set() and _Thread_Wait_set_queue(). + */ + Thread_queue_Queue *queue; + + /** + * @brief This field contains several flags used to control the wait class + * and state of a thread in case fine-grained locking is used. + */ +#if defined(RTEMS_SMP) + Atomic_Uint flags; +#else + Thread_Wait_flags flags; +#endif + + /** + * @brief The current thread queue operations. + * + * This field is protected by the thread lock. + * + * @see _Thread_Lock_set() and _Thread_Wait_set_operations(). + */ + const Thread_queue_Operations *operations; + + Thread_queue_Heads *spare_heads; +} Thread_Wait_information; + +/** + * The following defines the control block used to manage + * each thread proxy. + * + * @note It is critical that proxies and threads have identical + * memory images for the shared part. + */ +typedef struct { + /** This field is the object management structure for each proxy. */ + Objects_Control Object; + /** This field is the current execution state of this proxy. */ + States_Control current_state; + + /** + * @brief This field is the current priority state of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ + Priority_Control current_priority; + + /** + * @brief This field is the base priority of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ + Priority_Control real_priority; + + /** + * @brief Generation of the current priority value. + * + * It is used in _Thread_Change_priority() to serialize the update of + * priority related data structures. + */ + uint32_t priority_generation; + + /** + * @brief Hints if a priority restore is necessary once the resource count + * changes from one to zero. + * + * This is an optimization to speed up the mutex surrender sequence in case + * no attempt to change the priority was made during the mutex ownership. On + * SMP configurations atomic fences must synchronize writes to + * Thread_Control::priority_restore_hint and Thread_Control::resource_count. + */ + bool priority_restore_hint; + + /** This field is the number of mutexes currently held by this proxy. */ + uint32_t resource_count; + + /** This field is the blocking information for this proxy. */ + Thread_Wait_information Wait; + /** This field is the Watchdog used to manage proxy delays and timeouts. */ + Watchdog_Control Timer; +#if defined(RTEMS_MULTIPROCESSING) + /** This field is the received response packet in an MP system. */ + MP_packet_Prefix *receive_packet; +#endif + /****************** end of common block ********************/ + /** This field is used to manage the set of proxies in the system. */ + Chain_Node Active; +} Thread_Proxy_control; + +/** + * The following record defines the control block used + * to manage each thread. + * + * @note It is critical that proxies and threads have identical + * memory images for the shared part. + */ +typedef enum { + /** This value is for the Classic RTEMS API. */ + THREAD_API_RTEMS, + /** This value is for the POSIX API. */ + THREAD_API_POSIX +} Thread_APIs; + +/** This macro defines the first API which has threads. */ +#define THREAD_API_FIRST THREAD_API_RTEMS + +/** This macro defines the last API which has threads. */ +#define THREAD_API_LAST THREAD_API_POSIX + +typedef struct Thread_Action Thread_Action; + +/** + * @brief Thread action handler. + * + * The thread action handler will be called with interrupts disabled and the + * thread action lock acquired. The handler must release the thread action + * lock with _Thread_Action_release_and_ISR_enable(). So the thread action + * lock can be used to protect private data fields of the particular action. + * + * Since the action is passed to the handler private data fields can be added + * below the common thread action fields. + * + * @param[in] thread The thread performing the action. + * @param[in] action The thread action. + * @param[in] cpu The processor of the thread. + * @param[in] level The ISR level for _Thread_Action_release_and_ISR_enable(). + */ +typedef void ( *Thread_Action_handler )( + Thread_Control *thread, + Thread_Action *action, + struct Per_CPU_Control *cpu, + ISR_Level level +); + +/** + * @brief Thread action. + * + * Thread actions can be chained together to trigger a set of actions on + * particular events like for example a thread post-switch. Use + * _Thread_Action_initialize() to initialize this structure. + * + * Thread actions are the building block for efficient implementation of + * - Classic signals delivery, + * - POSIX signals delivery, + * - thread restart notification, + * - thread delete notification, + * - forced thread migration on SMP configurations, and + * - the Multiprocessor Resource Sharing Protocol (MrsP). + * + * @see _Thread_Run_post_switch_actions(). + */ +struct Thread_Action { + Chain_Node Node; + Thread_Action_handler handler; +}; + +/** + * @brief Control block to manage thread actions. + * + * Use _Thread_Action_control_initialize() to initialize this structure. + */ +typedef struct { + Chain_Control Chain; +} Thread_Action_control; + +/** + * @brief Thread life states. + * + * The thread life states are orthogonal to the thread states used for + * synchronization primitives and blocking operations. They reflect the state + * changes triggered with thread restart and delete requests. + */ +typedef enum { + THREAD_LIFE_NORMAL = 0x0, + THREAD_LIFE_PROTECTED = 0x1, + THREAD_LIFE_RESTARTING = 0x2, + THREAD_LIFE_PROTECTED_RESTARTING = 0x3, + THREAD_LIFE_TERMINATING = 0x4, + THREAD_LIFE_PROTECTED_TERMINATING = 0x5, + THREAD_LIFE_RESTARTING_TERMINATING = 0x6, + THREAD_LIFE_PROTECTED_RESTARTING_TERMINATING = 0x7 +} Thread_Life_state; + +/** + * @brief Thread life control. + */ +typedef struct { + /** + * @brief Thread life action used to react upon thread restart and delete + * requests. + */ + Thread_Action Action; + + /** + * @brief The current thread life state. + */ + Thread_Life_state state; + + /** + * @brief The terminator thread of this thread. + * + * In case the thread is terminated and another thread (the terminator) waits + * for the actual termination completion, then this field references the + * terminator thread. + */ + Thread_Control *terminator; +} Thread_Life_control; + +#if defined(RTEMS_SMP) +/** + * @brief The thread state with respect to the scheduler. + */ +typedef enum { + /** + * @brief This thread is blocked with respect to the scheduler. + * + * This thread uses no scheduler nodes. + */ + THREAD_SCHEDULER_BLOCKED, + + /** + * @brief This thread is scheduled with respect to the scheduler. + * + * This thread executes using one of its scheduler nodes. This could be its + * own scheduler node or in case it owns resources taking part in the + * scheduler helping protocol a scheduler node of another thread. + */ + THREAD_SCHEDULER_SCHEDULED, + + /** + * @brief This thread is ready with respect to the scheduler. + * + * None of the scheduler nodes of this thread is scheduled. + */ + THREAD_SCHEDULER_READY +} Thread_Scheduler_state; +#endif + +/** + * @brief Thread scheduler control. + */ +typedef struct { +#if defined(RTEMS_SMP) + /** + * @brief The current scheduler state of this thread. + */ + Thread_Scheduler_state state; + + /** + * @brief The own scheduler control of this thread. + * + * This field is constant after initialization. + */ + const struct Scheduler_Control *own_control; + + /** + * @brief The scheduler control of this thread. + * + * The scheduler helping protocol may change this field. + */ + const struct Scheduler_Control *control; + + /** + * @brief The own scheduler node of this thread. + * + * This field is constant after initialization. It is used by change + * priority and ask for help operations. + */ + struct Scheduler_Node *own_node; +#endif + + /** + * @brief The scheduler node of this thread. + * + * On uni-processor configurations this field is constant after + * initialization. + * + * On SMP configurations the scheduler helping protocol may change this + * field. + */ + struct Scheduler_Node *node; + +#if defined(RTEMS_SMP) + /** + * @brief The processor assigned by the current scheduler. + */ + struct Per_CPU_Control *cpu; + +#if defined(RTEMS_DEBUG) + /** + * @brief The processor on which this thread executed the last time or is + * executing. + */ + struct Per_CPU_Control *debug_real_cpu; +#endif +#endif +} Thread_Scheduler_control; + +typedef struct { + uint32_t flags; + void * control; +}Thread_Capture_control; + +#if defined(RTEMS_SMP) +/** + * @brief Thread lock control. + * + * The thread lock is either the default lock or the lock of the resource on + * which the thread is currently blocked. The generation number takes care + * that the up to date lock is used. Only resources using fine grained locking + * provide their own lock. + * + * The thread lock protects the following thread variables + * - Thread_Control::current_priority, + * - Thread_Control::Wait::queue, and + * - Thread_Control::Wait::operations. + * + * @see _Thread_Lock_acquire(), _Thread_Lock_release(), _Thread_Lock_set() and + * _Thread_Lock_restore_default(). + */ +typedef struct { + /** + * @brief The current thread lock. + * + * This is a plain ticket lock without SMP lock statistics support. This + * enables external libraries to use thread locks since they are independent + * of the actual RTEMS build configuration, e.g. profiling enabled or + * disabled. + */ + SMP_ticket_lock_Control *current; + + /** + * @brief The default thread lock in case the thread is not blocked on a + * resource. + */ + SMP_ticket_lock_Control Default; + +#if defined(RTEMS_PROFILING) + /** + * @brief The thread lock statistics. + * + * These statistics are used by the executing thread in case it acquires a + * thread lock. Thus the statistics are an aggregation of acquire and + * release operations of diffent locks. + */ + SMP_lock_Stats Stats; +#endif + + /** + * @brief Generation number to invalidate stale locks. + */ + Atomic_Uint generation; +} Thread_Lock_control; +#endif + +/** + * This structure defines the Thread Control Block (TCB). + * + * Uses a leading underscore in the structure name to allow forward + * declarations in standard header files provided by Newlib and GCC. + */ +struct _Thread_Control { + /** This field is the object management structure for each thread. */ + Objects_Control Object; + /** This field is the current execution state of this thread. */ + States_Control current_state; + + /** + * @brief This field is the current priority state of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ + Priority_Control current_priority; + + /** + * @brief This field is the base priority of this thread. + * + * Writes to this field are only allowed in _Thread_Initialize() or via + * _Thread_Change_priority(). + */ + Priority_Control real_priority; + + /** + * @brief Generation of the current priority value. + * + * It is used in _Thread_Change_priority() to serialize the update of + * priority related data structures. + */ + uint32_t priority_generation; + + /** + * @brief Hints if a priority restore is necessary once the resource count + * changes from one to zero. + * + * This is an optimization to speed up the mutex surrender sequence in case + * no attempt to change the priority was made during the mutex ownership. On + * SMP configurations atomic fences must synchronize writes to + * Thread_Control::priority_restore_hint and Thread_Control::resource_count. + */ + bool priority_restore_hint; + + /** This field is the number of mutexes currently held by this thread. */ + uint32_t resource_count; + /** This field is the blocking information for this thread. */ + Thread_Wait_information Wait; + /** This field is the Watchdog used to manage thread delays and timeouts. */ + Watchdog_Control Timer; +#if defined(RTEMS_MULTIPROCESSING) + /** This field is the received response packet in an MP system. */ + MP_packet_Prefix *receive_packet; +#endif + /*================= end of common block =================*/ + +#if defined(RTEMS_SMP) + /** + * @brief Thread lock control. + */ + Thread_Lock_control Lock; +#endif + +#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING) + /** + * @brief Potpourri lock statistics. + * + * These SMP lock statistics are used for all lock objects that lack a + * storage space for the statistics. Examples are lock objects used in + * external libraries which are independent of the actual RTEMS build + * configuration. + */ + SMP_lock_Stats Potpourri_stats; +#endif + +#ifdef __RTEMS_STRICT_ORDER_MUTEX__ + /** This field is the head of queue of priority inheritance mutex + * held by the thread. + */ + Chain_Control lock_mutex; +#endif +#if defined(RTEMS_SMP) + /** + * @brief Resource node to build a dependency tree in case this thread owns + * resources or depends on a resource. + */ + Resource_Node Resource_node; +#endif +#if defined(RTEMS_MULTIPROCESSING) + /** This field is true if the thread is offered globally */ + bool is_global; +#endif + /** This field is true if the thread is preemptible. */ + bool is_preemptible; + /** This field is true if the thread uses the floating point unit. */ + bool is_fp; + + /** + * @brief Scheduler related control. + */ + Thread_Scheduler_control Scheduler; + +#if __RTEMS_ADA__ + /** This field is the GNAT self context pointer. */ + void *rtems_ada_self; +#endif + /** This field is the length of the time quantum that this thread is + * allowed to consume. The algorithm used to manage limits on CPU usage + * is specified by budget_algorithm. + */ + uint32_t cpu_time_budget; + /** This field is the algorithm used to manage this thread's time + * quantum. The algorithm may be specified as none which case, + * no limit is in place. + */ + Thread_CPU_budget_algorithms budget_algorithm; + /** This field is the method invoked with the budgeted time is consumed. */ + Thread_CPU_budget_algorithm_callout budget_callout; + /** This field is the amount of CPU time consumed by this thread + * since it was created. + */ + Thread_CPU_usage_t cpu_time_used; + + /** This field contains information about the starting state of + * this thread. + */ + Thread_Start_information Start; + + Thread_Action_control Post_switch_actions; + + /** This field contains the context of this thread. */ + Context_Control Registers; +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) + /** This field points to the floating point context for this thread. + * If NULL, the thread is integer only. + */ + Context_Control_fp *fp_context; +#endif + /** This field points to the newlib reentrancy structure for this thread. */ + struct _reent *libc_reent; + /** This array contains the API extension area pointers. */ + void *API_Extensions[ THREAD_API_LAST + 1 ]; + +#if !defined(RTEMS_SMP) + /** This field points to the set of per task variables. */ + rtems_task_variable_t *task_variables; +#endif + + /** + * This is the thread key value chain's control, which is used + * to track all key value for specific thread, and when thread + * exits, we can remove all key value for specific thread by + * iterating this chain, or we have to search a whole rbtree, + * which is inefficient. + */ + Chain_Control Key_Chain; + + /** + * @brief Thread life-cycle control. + * + * Control state changes triggered by thread restart and delete requests. + */ + Thread_Life_control Life; + + Thread_Capture_control Capture; + + /** + * @brief Variable length array of user extension pointers. + * + * The length is defined by the application via <rtems/confdefs.h>. + */ + void *extensions[ RTEMS_ZERO_LENGTH_ARRAY ]; +}; + +#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE) +/** + * This routine is the body of the system idle thread. + * + * NOTE: This routine is actually instantiated by confdefs.h when needed. + */ +void *_Thread_Idle_body( + uintptr_t ignored +); +#endif + +/** This defines the type for a method which operates on a single thread. + */ +typedef void (*rtems_per_thread_routine)( Thread_Control * ); + +/** + * @brief Iterates over all threads. + * This routine iterates over all threads regardless of API and + * invokes the specified routine. + */ +void rtems_iterate_over_all_threads( + rtems_per_thread_routine routine +); + +/** + * @brief Thread control add-on. + */ +typedef struct { + /** + * @brief Offset of the pointer field in Thread_Control referencing an + * application configuration dependent memory area in the thread control + * block. + */ + size_t destination_offset; + + /** + * @brief Offset relative to the thread control block begin to an application + * configuration dependent memory area. + */ + size_t source_offset; +} Thread_Control_add_on; + +/** + * @brief Thread control add-ons. + * + * The thread control block contains fields that point to application + * configuration dependent memory areas, like the scheduler information, the + * API control blocks, the user extension context table, the RTEMS notepads and + * the Newlib re-entrancy support. Account for these areas in the + * configuration and avoid extra workspace allocations for these areas. + * + * This array is provided via <rtems/confdefs.h>. + * + * @see _Thread_Control_add_on_count and _Thread_Control_size. + */ +extern const Thread_Control_add_on _Thread_Control_add_ons[]; + +/** + * @brief Thread control add-on count. + * + * Count of entries in _Thread_Control_add_ons. + * + * This value is provided via <rtems/confdefs.h>. + */ +extern const size_t _Thread_Control_add_on_count; + +/** + * @brief Size of the thread control block of a particular application. + * + * This value is provided via <rtems/confdefs.h>. + * + * @see _Thread_Control_add_ons. + */ +extern const size_t _Thread_Control_size; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/threaddispatch.h b/include/rtems/score/threaddispatch.h new file mode 100644 index 0000000000..4ef5538f7e --- /dev/null +++ b/include/rtems/score/threaddispatch.h @@ -0,0 +1,380 @@ +/** + * @brief Constants and Structures Related with Thread Dispatch + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_THREADDISPATCH_H +#define _RTEMS_SCORE_THREADDISPATCH_H + +#include <rtems/score/percpu.h> +#include <rtems/score/isrlock.h> +#include <rtems/score/profiling.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if defined(RTEMS_HEAVY_STACK_DEBUG) || \ + defined(RTEMS_HEAVY_MALLOC_DEBUG) + #define __THREAD_DO_NOT_INLINE_DISABLE_DISPATCH__ +#endif + +#if defined(RTEMS_SMP) || \ + (CPU_INLINE_ENABLE_DISPATCH == FALSE) || \ + (__RTEMS_DO_NOT_INLINE_THREAD_ENABLE_DISPATCH__ == 1) + #define __THREAD_DO_NOT_INLINE_ENABLE_DISPATCH__ +#endif + +/** + * @addtogroup ScoreThread + * + * @{ + */ + +/** + * @brief Indicates if the executing thread is inside a thread dispatch + * critical section. + * + * @retval true Thread dispatching is enabled. + * @retval false The executing thread is inside a thread dispatch critical + * section and dispatching is not allowed. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Dispatch_is_enabled(void) +{ + bool enabled; + +#if defined(RTEMS_SMP) + ISR_Level level; + + _ISR_Disable_without_giant( level ); +#endif + + enabled = _Thread_Dispatch_disable_level == 0; + +#if defined(RTEMS_SMP) + _ISR_Enable_without_giant( level ); +#endif + + return enabled; +} + +/** + * @brief Gets thread dispatch disable level. + * + * @return The value of the thread dispatch level. + */ +RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_get_disable_level(void) +{ + return _Thread_Dispatch_disable_level; +} + +/** + * @brief Thread dispatch initialization. + * + * This routine initializes the thread dispatching subsystem. + */ +RTEMS_INLINE_ROUTINE void _Thread_Dispatch_initialization( void ) +{ + _Thread_Dispatch_disable_level = 1; +} + +#if defined(RTEMS_SMP) + /** + * @brief Acquires the giant lock. + * + * The giant lock is a recursive SMP lock protecting nearly all operating + * system services. + * + * This lock is implicitly acquired in + * _Thread_Dispatch_increment_disable_level(). + * + * Thread dispatching must be disabled before the Giant lock can be acquired + * and must no be enabled while owning the Giant lock. The thread dispatch + * disable level is not altered by this function. + * + * @param[in] cpu_self The current processor. + */ + void _Giant_Acquire( Per_CPU_Control *cpu_self ); + + /** + * @brief Releases the giant lock. + * + * This lock is implicitly released in + * _Thread_Dispatch_decrement_disable_level(). + * + * The thread dispatch disable level is not altered by this function. + * + * @param[in] cpu_self The current processor. + */ + void _Giant_Release( Per_CPU_Control *cpu_self ); + + /** + * @brief Releases the giant lock completely if held by the executing processor. + * + * The thread dispatch disable level is not altered by this function. + * + * The only use case for this operation is in _SMP_Request_shutdown(). + * + * @param[in] cpu_self The current processor. + */ + void _Giant_Drop( Per_CPU_Control *cpu_self ); + + /** + * @brief Increments the thread dispatch level. + * + * This rountine increments the thread dispatch level + */ + uint32_t _Thread_Dispatch_increment_disable_level(void); + + /** + * @brief Decrements the thread dispatch level. + * + * This routine decrements the thread dispatch level. + */ + uint32_t _Thread_Dispatch_decrement_disable_level(void); +#else /* RTEMS_SMP */ + /** + * @brief Increase thread dispatch disable level. + * + * This rountine increments the thread dispatch level + */ + RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_increment_disable_level(void) + { + uint32_t disable_level = _Thread_Dispatch_disable_level; +#if defined( RTEMS_PROFILING ) + ISR_Level level; + + _ISR_Disable( level ); + _Profiling_Thread_dispatch_disable( _Per_CPU_Get(), disable_level ); +#endif + + ++disable_level; + _Thread_Dispatch_disable_level = disable_level; + +#if defined( RTEMS_PROFILING ) + _ISR_Enable( level ); +#endif + + return disable_level; + } + + /** + * @brief Decrease thread dispatch disable level. + * + * This routine decrements the thread dispatch level. + */ + RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_decrement_disable_level(void) + { + uint32_t disable_level = _Thread_Dispatch_disable_level; +#if defined( RTEMS_PROFILING ) + ISR_Level level; + + _ISR_Disable( level ); +#endif + + --disable_level; + _Thread_Dispatch_disable_level = disable_level; + +#if defined( RTEMS_PROFILING ) + _Profiling_Thread_dispatch_enable( _Per_CPU_Get(), disable_level ); + _ISR_Enable( level ); +#endif + + return disable_level; + } + + RTEMS_INLINE_ROUTINE void _Giant_Acquire( Per_CPU_Control *cpu_self ) + { + (void) cpu_self; + } + + RTEMS_INLINE_ROUTINE void _Giant_Release( Per_CPU_Control *cpu_self ) + { + (void) cpu_self; + } +#endif /* RTEMS_SMP */ + +/** + * @brief Performs a thread dispatch if necessary. + * + * This routine is responsible for transferring control of the processor from + * the executing thread to the heir thread. Once the heir is running an + * attempt is made to run the pending post-switch thread actions. + * + * As part of this process, it is responsible for the following actions + * - update timing information of the executing thread, + * - save the context of the executing thread, + * - invokation of the thread switch user extensions, + * - restore the context of the heir thread, and + * - run of pending post-switch thread actions of the resulting executing + * thread. + * + * On entry the thread dispatch level must be equal to zero. + */ +void _Thread_Dispatch( void ); + +/** + * @brief Performs a thread dispatch on the current processor. + * + * On entry the thread dispatch disable level must be equal to one and + * interrupts must be disabled. + * + * This function assumes that a thread dispatch is necessary. + * + * @param[in] cpu_self The current processor. + * @param[in] level The previous interrupt level. + * + * @see _Thread_Dispatch(). + */ +void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level ); + +/** + * @brief Disables thread dispatching inside a critical section (interrupts + * disabled). + * + * This function does not acquire the Giant lock. + * + * @param[in] lock_context The lock context of the corresponding + * _ISR_lock_ISR_disable() that started the critical section. + * + * @return The current processor. + */ +RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable_critical( + const ISR_lock_Context *lock_context +) +{ + Per_CPU_Control *cpu_self; + uint32_t disable_level; + + cpu_self = _Per_CPU_Get(); + disable_level = cpu_self->thread_dispatch_disable_level; + _Profiling_Thread_dispatch_disable_critical( + cpu_self, + disable_level, + lock_context + ); + cpu_self->thread_dispatch_disable_level = disable_level + 1; + + return cpu_self; +} + +/** + * @brief Disables thread dispatching. + * + * This function does not acquire the Giant lock. + * + * @return The current processor. + */ +RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Dispatch_disable( void ) +{ + Per_CPU_Control *cpu_self; + ISR_lock_Context lock_context; + +#if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING ) + _ISR_lock_ISR_disable( &lock_context ); +#endif + + cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); + +#if defined( RTEMS_SMP ) || defined( RTEMS_PROFILING ) + _ISR_lock_ISR_enable( &lock_context ); +#endif + + return cpu_self; +} + +/** + * @brief Enables thread dispatching. + * + * May perfrom a thread dispatch if necessary as a side-effect. + * + * This function does not release the Giant lock. + * + * @param[in] cpu_self The current processor. + */ +RTEMS_INLINE_ROUTINE void _Thread_Dispatch_enable( Per_CPU_Control *cpu_self ) +{ + uint32_t disable_level = cpu_self->thread_dispatch_disable_level; + + if ( disable_level == 1 ) { + ISR_Level level; + + _ISR_Disable_without_giant( level ); + + if ( cpu_self->dispatch_necessary ) { + _Thread_Do_dispatch( cpu_self, level ); + } else { + cpu_self->thread_dispatch_disable_level = 0; + _Profiling_Thread_dispatch_enable( cpu_self, 0 ); + } + + _ISR_Enable_without_giant( level ); + } else { + cpu_self->thread_dispatch_disable_level = disable_level - 1; + } +} + +/** + * @brief Disables thread dispatching and acquires the Giant lock. + */ +#if defined ( __THREAD_DO_NOT_INLINE_DISABLE_DISPATCH__ ) +void _Thread_Disable_dispatch( void ); +#else +RTEMS_INLINE_ROUTINE void _Thread_Disable_dispatch( void ) +{ + _Thread_Dispatch_increment_disable_level(); + RTEMS_COMPILER_MEMORY_BARRIER(); +} +#endif + +RTEMS_INLINE_ROUTINE void _Thread_Enable_dispatch_body( void ) +{ + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + _Giant_Release( cpu_self ); + _Thread_Dispatch_enable( cpu_self ); +} + +/** + * @brief Enables thread dispatching and releases the Giant lock. + * + * May perfrom a thread dispatch if necessary as a side-effect. + */ +#if defined ( __THREAD_DO_NOT_INLINE_ENABLE_DISPATCH__ ) + void _Thread_Enable_dispatch( void ); +#else + /* inlining of enable dispatching must be true */ + RTEMS_INLINE_ROUTINE void _Thread_Enable_dispatch( void ) + { + RTEMS_COMPILER_MEMORY_BARRIER(); + _Thread_Enable_dispatch_body(); + } +#endif + +/** + * @brief Enables thread dispatching and releases the Giant lock. + * + * @warning A thread dispatch is not performed as a side-effect. Use this + * function with + */ +RTEMS_INLINE_ROUTINE void _Thread_Unnest_dispatch( void ) +{ + RTEMS_COMPILER_MEMORY_BARRIER(); + _Thread_Dispatch_decrement_disable_level(); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_THREADDISPATCH_H */ diff --git a/include/rtems/score/threadimpl.h b/include/rtems/score/threadimpl.h new file mode 100644 index 0000000000..cf32082340 --- /dev/null +++ b/include/rtems/score/threadimpl.h @@ -0,0 +1,1505 @@ +/** + * @file + * + * @brief Inlined Routines from the Thread Handler + * + * This file contains the macro implementation of the inlined + * routines from the Thread handler. + */ + +/* + * COPYRIGHT (c) 1989-2008. + * On-Line Applications Research Corporation (OAR). + * + * Copyright (c) 2014-2015 embedded brains GmbH. + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_THREADIMPL_H +#define _RTEMS_SCORE_THREADIMPL_H + +#include <rtems/score/thread.h> +#include <rtems/score/assert.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/interr.h> +#include <rtems/score/isr.h> +#include <rtems/score/objectimpl.h> +#include <rtems/score/resourceimpl.h> +#include <rtems/score/statesimpl.h> +#include <rtems/score/sysstate.h> +#include <rtems/score/threadqimpl.h> +#include <rtems/score/todimpl.h> +#include <rtems/score/freechain.h> +#include <rtems/config.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreThread + */ +/**@{**/ + +/** + * The following structure contains the information necessary to manage + * a thread which it is waiting for a resource. + */ +#define THREAD_STATUS_PROXY_BLOCKING 0x1111111 + +/** + * Self for the GNU Ada Run-Time + */ +SCORE_EXTERN void *rtems_ada_self; + +typedef struct { + Objects_Information Objects; + + Freechain_Control Free_thread_queue_heads; +} Thread_Information; + +/** + * The following defines the information control block used to + * manage this class of objects. + */ +SCORE_EXTERN Thread_Information _Thread_Internal_information; + +/** + * The following points to the thread whose floating point + * context is currently loaded. + */ +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) +SCORE_EXTERN Thread_Control *_Thread_Allocated_fp; +#endif + +#define THREAD_CHAIN_NODE_TO_THREAD( node ) \ + RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain ) + +#define THREAD_RBTREE_NODE_TO_THREAD( node ) \ + RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree ) + +#if defined(RTEMS_SMP) +#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \ + RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node ) +#endif + +void _Thread_Initialize_information( + Thread_Information *information, + Objects_APIs the_api, + uint16_t the_class, + uint32_t maximum, + bool is_string, + uint32_t maximum_name_length +#if defined(RTEMS_MULTIPROCESSING) + , + bool supports_global +#endif +); + +/** + * @brief Initialize thread handler. + * + * This routine performs the initialization necessary for this handler. + */ +void _Thread_Handler_initialization(void); + +/** + * @brief Create idle thread. + * + * This routine creates the idle thread. + * + * @warning No thread should be created before this one. + */ +void _Thread_Create_idle(void); + +/** + * @brief Start thread multitasking. + * + * This routine initiates multitasking. It is invoked only as + * part of initialization and its invocation is the last act of + * the non-multitasking part of the system initialization. + */ +void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN; + +/** + * @brief Allocate the requested stack space for the thread. + * + * Allocate the requested stack space for the thread. + * Set the Start.stack field to the address of the stack. + * + * @param[in] the_thread is the thread where the stack space is requested + * @param[in] stack_size is the stack space is requested + * + * @retval actual size allocated after any adjustment + * @retval zero if the allocation failed + */ +size_t _Thread_Stack_Allocate( + Thread_Control *the_thread, + size_t stack_size +); + +/** + * @brief Deallocate thread stack. + * + * Deallocate the Thread's stack. + */ +void _Thread_Stack_Free( + Thread_Control *the_thread +); + +/** + * @brief Initialize thread. + * + * This routine initializes the specified the thread. It allocates + * all memory associated with this thread. It completes by adding + * the thread to the local object table so operations on this + * thread id are allowed. + * + * @note If stack_area is NULL, it is allocated from the workspace. + * + * @note If the stack is allocated from the workspace, then it is + * guaranteed to be of at least minimum size. + */ +bool _Thread_Initialize( + Thread_Information *information, + Thread_Control *the_thread, + const struct Scheduler_Control *scheduler, + void *stack_area, + size_t stack_size, + bool is_fp, + Priority_Control priority, + bool is_preemptible, + Thread_CPU_budget_algorithms budget_algorithm, + Thread_CPU_budget_algorithm_callout budget_callout, + uint32_t isr_level, + Objects_Name name +); + +/** + * @brief Initializes thread and executes it. + * + * This routine initializes the executable information for a thread + * and makes it ready to execute. After this routine executes, the + * thread competes with all other threads for CPU time. + * + * @param the_thread is the thread to be initialized + * @param the_prototype + * @param entry_point + * @param pointer_argument + * @param numeric_argument + * @param[in,out] cpu The processor if used to start an idle thread + * during system initialization. Must be set to @c NULL to start a normal + * thread. + */ +bool _Thread_Start( + Thread_Control *the_thread, + Thread_Start_types the_prototype, + void *entry_point, + void *pointer_argument, + Thread_Entry_numeric_type numeric_argument, + Per_CPU_Control *cpu +); + +bool _Thread_Restart( + Thread_Control *the_thread, + Thread_Control *executing, + void *pointer_argument, + Thread_Entry_numeric_type numeric_argument +); + +void _Thread_Yield( Thread_Control *executing ); + +bool _Thread_Set_life_protection( bool protect ); + +void _Thread_Life_action_handler( + Thread_Control *executing, + Thread_Action *action, + Per_CPU_Control *cpu, + ISR_Level level +); + +/** + * @brief Kills all zombie threads in the system. + * + * Threads change into the zombie state as the last step in the thread + * termination sequence right before a context switch to the heir thread is + * initiated. Since the thread stack is still in use during this phase we have + * to postpone the thread stack reclamation until this point. On SMP + * configurations we may have to busy wait for context switch completion here. + */ +void _Thread_Kill_zombies( void ); + +/** + * @brief Closes the thread. + * + * Closes the thread object and starts the thread termination sequence. In + * case the executing thread is not terminated, then this function waits until + * the terminating thread reached the zombie state. + */ +void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing ); + +/** + * @brief Clears the specified thread state. + * + * In case the previous state is a non-ready state and the next state is the + * ready state, then the thread is unblocked by the scheduler. + * + * @param[in] the_thread The thread. + * @param[in] state The state to clear. It must not be zero. + * + * @return The previous state. + */ +States_Control _Thread_Clear_state( + Thread_Control *the_thread, + States_Control state +); + +/** + * @brief Sets the specified thread state. + * + * In case the previous state is the ready state, then the thread is blocked by + * the scheduler. + * + * @param[in] the_thread The thread. + * @param[in] state The state to set. It must not be zero. + * + * @return The previous state. + */ +States_Control _Thread_Set_state( + Thread_Control *the_thread, + States_Control state +); + +/** + * @brief Clears all thread states. + * + * In case the previous state is a non-ready state, then the thread is + * unblocked by the scheduler. + * + * @param[in] the_thread The thread. + */ +RTEMS_INLINE_ROUTINE void _Thread_Ready( + Thread_Control *the_thread +) +{ + _Thread_Clear_state( the_thread, STATES_ALL_SET ); +} + +/** + * @brief Initializes enviroment for a thread. + * + * This routine initializes the context of @a the_thread to its + * appropriate starting state. + * + * @param[in] the_thread is the pointer to the thread control block. + */ +void _Thread_Load_environment( + Thread_Control *the_thread +); + +/** + * @brief Wrapper function for all threads. + * + * This routine is the wrapper function for all threads. It is + * the starting point for all threads. The user provided thread + * entry point is invoked by this routine. Operations + * which must be performed immediately before and after the user's + * thread executes are found here. + * + * @note On entry, it is assumed all interrupts are blocked and that this + * routine needs to set the initial isr level. This may or may not + * actually be needed by the context switch routine and as a result + * interrupts may already be at there proper level. Either way, + * setting the initial isr level properly here is safe. + */ +void _Thread_Handler( void ); + +/** + * @brief Executes the global constructors and then restarts itself as the + * first initialization thread. + * + * The first initialization thread is the first RTEMS initialization task or + * the first POSIX initialization thread in case no RTEMS initialization tasks + * are present. + */ +void *_Thread_Global_construction( void ); + +/** + * @brief Ended the delay of a thread. + * + * This routine is invoked when a thread must be unblocked at the + * end of a time based delay (i.e. wake after or wake when). + * It is called by the watchdog handler. + * + * @param[in] id is the thread id + * @param[in] ignored is not used + */ +void _Thread_Delay_ended( + Objects_Id id, + void *ignored +); + +/** + * @brief Returns true if the left thread priority is less than the right + * thread priority in the intuitive sense of priority and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than( + Priority_Control left, + Priority_Control right +) +{ + return left > right; +} + +/** + * @brief Returns the highest priority of the left and right thread priorities + * in the intuitive sense of priority. + */ +RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest( + Priority_Control left, + Priority_Control right +) +{ + return _Thread_Priority_less_than( left, right ) ? right : left; +} + +/** + * @brief Filters a thread priority change. + * + * Called by _Thread_Change_priority() under the protection of the thread lock. + * + * @param[in] the_thread The thread. + * @param[in, out] new_priority The new priority of the thread. The filter may + * alter this value. + * @param[in] arg The argument passed to _Thread_Change_priority(). + * + * @retval true Change the current priority. + * @retval false Otherwise. + */ +typedef bool ( *Thread_Change_priority_filter )( + Thread_Control *the_thread, + Priority_Control *new_priority, + void *arg +); + +/** + * @brief Changes the priority of a thread if allowed by the filter function. + * + * It changes current priority of the thread to the new priority in case the + * filter function returns true. In this case the scheduler is notified of the + * priority change as well. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + * @param[in] arg The argument for the filter function. + * @param[in] filter The filter function to determine if a priority change is + * allowed and optionally perform other actions under the protection of the + * thread lock simultaneously with the update of the current priority. + * @param[in] prepend_it In case this is true, then the thread is prepended to + * its priority group in its scheduler instance, otherwise it is appended. + */ +void _Thread_Change_priority( + Thread_Control *the_thread, + Priority_Control new_priority, + void *arg, + Thread_Change_priority_filter filter, + bool prepend_it +); + +/** + * @brief Raises the priority of a thread. + * + * It changes the current priority of the thread to the new priority if the new + * priority is higher than the current priority. In this case the thread is + * appended to its new priority group in its scheduler instance. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + * + * @see _Thread_Change_priority(). + */ +void _Thread_Raise_priority( + Thread_Control *the_thread, + Priority_Control new_priority +); + +/** + * @brief Inherit the priority of a thread. + * + * It changes the current priority of the inheritor thread to the current priority + * of the ancestor thread if it is higher than the current priority of the inheritor + * thread. In this case the inheritor thread is appended to its new priority group + * in its scheduler instance. + * + * On SMP configurations, the priority is changed to PRIORITY_PSEUDO_ISR in + * case the own schedulers of the inheritor and ancestor thread differ (priority + * boosting). + * + * @param[in] inheritor The thread to inherit the priority. + * @param[in] ancestor The thread to bequeath its priority to the inheritor + * thread. + */ +#if defined(RTEMS_SMP) +void _Thread_Inherit_priority( + Thread_Control *inheritor, + Thread_Control *ancestor +); +#else +RTEMS_INLINE_ROUTINE void _Thread_Inherit_priority( + Thread_Control *inheritor, + Thread_Control *ancestor +) +{ + _Thread_Raise_priority( inheritor, ancestor->current_priority ); +} +#endif + +/** + * @brief Sets the current to the real priority of a thread. + * + * Sets the priority restore hint to false. + */ +void _Thread_Restore_priority( Thread_Control *the_thread ); + +/** + * @brief Sets the priority of a thread. + * + * It sets the real priority of the thread. In addition it changes the current + * priority of the thread if the new priority is higher than the current + * priority or the thread owns no resources. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority of the thread. + * @param[out] old_priority The old real priority of the thread. This pointer + * must not be @c NULL. + * @param[in] prepend_it In case this is true, then the thread is prepended to + * its priority group in its scheduler instance, otherwise it is appended. + * + * @see _Thread_Change_priority(). + */ +void _Thread_Set_priority( + Thread_Control *the_thread, + Priority_Control new_priority, + Priority_Control *old_priority, + bool prepend_it +); + +/** + * @brief Maps thread Id to a TCB pointer. + * + * This function maps thread IDs to thread control + * blocks. If ID corresponds to a local thread, then it + * returns the_thread control pointer which maps to ID + * and @a location is set to OBJECTS_LOCAL. If the thread ID is + * global and resides on a remote node, then location is set + * to OBJECTS_REMOTE, and the_thread is undefined. + * Otherwise, location is set to OBJECTS_ERROR and + * the_thread is undefined. + * + * @param[in] id is the id of the thread. + * @param[in] location is the location of the block. + * + * @note The performance of many RTEMS services depends upon + * the quick execution of the "good object" path in this + * routine. If there is a possibility of saving a few + * cycles off the execution time, this routine is worth + * further optimization attention. + */ +Thread_Control *_Thread_Get ( + Objects_Id id, + Objects_Locations *location +); + +/** + * @brief Gets a thread by its identifier. + * + * @see _Objects_Get_isr_disable(). + */ +Thread_Control *_Thread_Get_interrupt_disable( + Objects_Id id, + Objects_Locations *location, + ISR_lock_Context *lock_context +); + +RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU( + const Thread_Control *thread +) +{ +#if defined(RTEMS_SMP) + return thread->Scheduler.cpu; +#else + (void) thread; + + return _Per_CPU_Get(); +#endif +} + +RTEMS_INLINE_ROUTINE void _Thread_Set_CPU( + Thread_Control *thread, + Per_CPU_Control *cpu +) +{ +#if defined(RTEMS_SMP) + thread->Scheduler.cpu = cpu; +#else + (void) thread; + (void) cpu; +#endif +} + +/** + * This function returns true if the_thread is the currently executing + * thread, and false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Thread_Is_executing ( + const Thread_Control *the_thread +) +{ + return ( the_thread == _Thread_Executing ); +} + +#if defined(RTEMS_SMP) +/** + * @brief Returns @a true in case the thread executes currently on some + * processor in the system, otherwise @a false. + * + * Do not confuse this with _Thread_Is_executing() which checks only the + * current processor. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor( + const Thread_Control *the_thread +) +{ + return _CPU_Context_Get_is_executing( &the_thread->Registers ); +} +#endif + +/** + * @brief Returns @a true and sets time_of_context_switch to the + * time of the last context switch when the thread is currently executing + * in the system, otherwise @a false. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch( + Thread_Control *the_thread, + Timestamp_Control *time_of_context_switch +) +{ + bool retval = false; + + _Thread_Disable_dispatch(); + #ifndef RTEMS_SMP + if ( _Thread_Executing->Object.id == the_thread->Object.id ) { + *time_of_context_switch = _Thread_Time_of_last_context_switch; + retval = true; + } + #else + if ( _Thread_Is_executing_on_a_processor( the_thread ) ) { + *time_of_context_switch = + _Thread_Get_CPU( the_thread )->time_of_last_context_switch; + retval = true; + } + #endif + _Thread_Enable_dispatch(); + return retval; +} + + +/** + * This function returns true if the_thread is the heir + * thread, and false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Thread_Is_heir ( + const Thread_Control *the_thread +) +{ + return ( the_thread == _Thread_Heir ); +} + +/** + * This routine clears any blocking state for the_thread. It performs + * any necessary scheduling operations including the selection of + * a new heir thread. + */ + +RTEMS_INLINE_ROUTINE void _Thread_Unblock ( + Thread_Control *the_thread +) +{ + _Thread_Clear_state( the_thread, STATES_BLOCKED ); +} + +/** + * This routine resets the current context of the calling thread + * to that of its initial state. + */ + +RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing ) +{ +#if defined(RTEMS_SMP) + ISR_Level level; + + _Giant_Release( _Per_CPU_Get() ); + + _ISR_Disable_without_giant( level ); + ( void ) level; +#endif + +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) + if ( executing->fp_context != NULL ) + _Context_Restore_fp( &executing->fp_context ); +#endif + + _CPU_Context_Restart_self( &executing->Registers ); +} + +/** + * This function returns true if the floating point context of + * the_thread is currently loaded in the floating point unit, and + * false otherwise. + */ + +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) +RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp ( + const Thread_Control *the_thread +) +{ + return ( the_thread == _Thread_Allocated_fp ); +} +#endif + +/* + * If the CPU has hardware floating point, then we must address saving + * and restoring it as part of the context switch. + * + * The second conditional compilation section selects the algorithm used + * to context switch between floating point tasks. The deferred algorithm + * can be significantly better in a system with few floating point tasks + * because it reduces the total number of save and restore FP context + * operations. However, this algorithm can not be used on all CPUs due + * to unpredictable use of FP registers by some compilers for integer + * operations. + */ + +RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing ) +{ +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) +#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) + if ( executing->fp_context != NULL ) + _Context_Save_fp( &executing->fp_context ); +#endif +#endif +} + +RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing ) +{ +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) +#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) + if ( (executing->fp_context != NULL) && + !_Thread_Is_allocated_fp( executing ) ) { + if ( _Thread_Allocated_fp != NULL ) + _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); + _Context_Restore_fp( &executing->fp_context ); + _Thread_Allocated_fp = executing; + } +#else + if ( executing->fp_context != NULL ) + _Context_Restore_fp( &executing->fp_context ); +#endif +#endif +} + +/** + * This routine is invoked when the currently loaded floating + * point context is now longer associated with an active thread. + */ + +#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) +RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void ) +{ + _Thread_Allocated_fp = NULL; +} +#endif + +/** + * This function returns true if dispatching is disabled, and false + * otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void ) +{ + return ( _Thread_Dispatch_necessary ); +} + +/** + * This function returns true if the_thread is NULL and false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Thread_Is_null ( + const Thread_Control *the_thread +) +{ + return ( the_thread == NULL ); +} + +/** + * @brief Is proxy blocking. + * + * status which indicates that a proxy is blocking, and false otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking ( + uint32_t code +) +{ + return (code == THREAD_STATUS_PROXY_BLOCKING); +} + +RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void) +{ + /* Idle threads */ + uint32_t maximum_internal_threads = + rtems_configuration_get_maximum_processors(); + + /* MPCI thread */ +#if defined(RTEMS_MULTIPROCESSING) + if ( _System_state_Is_multiprocessing ) { + ++maximum_internal_threads; + } +#endif + + return maximum_internal_threads; +} + +RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void ) +{ + return (Thread_Control *) + _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects ); +} + +/** + * @brief Gets the heir of the processor and makes it executing. + * + * Must be called with interrupts disabled. The thread dispatch necessary + * indicator is cleared as a side-effect. + * + * @return The heir thread. + * + * @see _Thread_Dispatch(), _Thread_Start_multitasking() and + * _Thread_Dispatch_update_heir(). + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing( + Per_CPU_Control *cpu_self +) +{ + Thread_Control *heir; + + heir = cpu_self->heir; + cpu_self->dispatch_necessary = false; + cpu_self->executing = heir; + + return heir; +} + +#if defined( RTEMS_SMP ) +RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir( + Per_CPU_Control *cpu_self, + Per_CPU_Control *cpu_for_heir, + Thread_Control *heir +) +{ + cpu_for_heir->heir = heir; + + if ( cpu_for_heir == cpu_self ) { + cpu_self->dispatch_necessary = true; + } else { + _Per_CPU_Send_interrupt( cpu_for_heir ); + } +} +#endif + +RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used( + Thread_Control *executing, + Timestamp_Control *time_of_last_context_switch +) +{ + Timestamp_Control uptime; + Timestamp_Control ran; + + _TOD_Get_uptime( &uptime ); + _Timestamp_Subtract( + time_of_last_context_switch, + &uptime, + &ran + ); + *time_of_last_context_switch = uptime; + _Timestamp_Add_to( &executing->cpu_time_used, &ran ); +} + +RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize( + Thread_Action_control *action_control +) +{ + _Chain_Initialize_empty( &action_control->Chain ); +} + +RTEMS_INLINE_ROUTINE void _Thread_Action_initialize( + Thread_Action *action, + Thread_Action_handler handler +) +{ + action->handler = handler; + _Chain_Set_off_chain( &action->Node ); +} + +RTEMS_INLINE_ROUTINE Per_CPU_Control * + _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level ) +{ + Per_CPU_Control *cpu; + + _ISR_Disable_without_giant( *level ); + cpu = _Per_CPU_Get(); + _Per_CPU_Acquire( cpu ); + + return cpu; +} + +RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire( + Thread_Control *thread, + ISR_Level *level +) +{ + Per_CPU_Control *cpu; + + _ISR_Disable_without_giant( *level ); + cpu = _Thread_Get_CPU( thread ); + _Per_CPU_Acquire( cpu ); + + return cpu; +} + +RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable( + Per_CPU_Control *cpu, + ISR_Level level +) +{ + _Per_CPU_Release_and_ISR_enable( cpu, level ); +} + +RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action( + Thread_Control *thread, + Thread_Action *action +) +{ + Per_CPU_Control *cpu_of_thread; + ISR_Level level; + + cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level ); + +#if defined(RTEMS_SMP) + if ( _Per_CPU_Get() == cpu_of_thread ) { + cpu_of_thread->dispatch_necessary = true; + } else { + _Per_CPU_Send_interrupt( cpu_of_thread ); + } +#else + cpu_of_thread->dispatch_necessary = true; +#endif + + _Chain_Append_if_is_off_chain_unprotected( + &thread->Post_switch_actions.Chain, + &action->Node + ); + + _Thread_Action_release_and_ISR_enable( cpu_of_thread, level ); +} + +RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting( + Thread_Life_state life_state +) +{ + return ( life_state & THREAD_LIFE_RESTARTING ) != 0; +} + +RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating( + Thread_Life_state life_state +) +{ + return ( life_state & THREAD_LIFE_TERMINATING ) != 0; +} + +RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected( + Thread_Life_state life_state +) +{ + return ( life_state & THREAD_LIFE_PROTECTED ) != 0; +} + +RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing( + Thread_Life_state life_state +) +{ + return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0; +} + +/** + * @brief Returns true if the thread owns resources, and false otherwise. + * + * Resources are accounted with the Thread_Control::resource_count resource + * counter. This counter is used by semaphore objects for example. + * + * In addition to the resource counter there is a resource dependency tree + * available on SMP configurations. In case this tree is non-empty, then the + * thread owns resources. + * + * @param[in] the_thread The thread. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources( + const Thread_Control *the_thread +) +{ + bool owns_resources = the_thread->resource_count != 0; + +#if defined(RTEMS_SMP) + owns_resources = owns_resources + || _Resource_Node_owns_resources( &the_thread->Resource_node ); +#endif + + return owns_resources; +} + +/** + * @brief Acquires the default thread lock inside a critical section + * (interrupts disabled). + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context used for the corresponding lock + * release. + * + * @see _Thread_Lock_release_default(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ + _Assert( _ISR_Get_level() != 0 ); +#if defined(RTEMS_SMP) + _SMP_ticket_lock_Acquire( + &the_thread->Lock.Default, + &_Thread_Executing->Lock.Stats, + &lock_context->Lock_context.Stats_context + ); +#else + (void) the_thread; + (void) lock_context; +#endif +} + +/** + * @brief Acquires the default thread lock and returns the executing thread. + * + * @param[in] lock_context The lock context used for the corresponding lock + * release. + * + * @return The executing thread. + * + * @see _Thread_Lock_release_default(). + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing( + ISR_lock_Context *lock_context +) +{ + Thread_Control *executing; + + _ISR_lock_ISR_disable( lock_context ); + executing = _Thread_Executing; + _Thread_Lock_acquire_default_critical( executing, lock_context ); + + return executing; +} + +/** + * @brief Acquires the default thread lock. + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context used for the corresponding lock + * release. + * + * @see _Thread_Lock_release_default(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_ISR_disable( lock_context ); + _Thread_Lock_acquire_default_critical( the_thread, lock_context ); +} + +/** + * @brief Releases the thread lock inside a critical section (interrupts + * disabled). + * + * The previous interrupt status is not restored. + * + * @param[in] lock The lock. + * @param[in] lock_context The lock context used for the corresponding lock + * acquire. + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical( + void *lock, + ISR_lock_Context *lock_context +) +{ +#if defined(RTEMS_SMP) + _SMP_ticket_lock_Release( + lock, + &lock_context->Lock_context.Stats_context + ); +#else + (void) lock; + (void) lock_context; +#endif +} + +/** + * @brief Releases the thread lock. + * + * @param[in] lock The lock returned by _Thread_Lock_acquire(). + * @param[in] lock_context The lock context used for _Thread_Lock_acquire(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_release( + void *lock, + ISR_lock_Context *lock_context +) +{ + _Thread_Lock_release_critical( lock, lock_context ); + _ISR_lock_ISR_enable( lock_context ); +} + +/** + * @brief Releases the default thread lock inside a critical section + * (interrupts disabled). + * + * The previous interrupt status is not restored. + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context used for the corresponding lock + * acquire. + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ + _Thread_Lock_release_critical( +#if defined(RTEMS_SMP) + &the_thread->Lock.Default, +#else + NULL, +#endif + lock_context + ); +} + +/** + * @brief Releases the default thread lock. + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context used for the corresponding lock + * acquire. + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ + _Thread_Lock_release_default_critical( the_thread, lock_context ); + _ISR_lock_ISR_enable( lock_context ); +} + +/** + * @brief Acquires the thread lock. + * + * @param[in] the_thread The thread. + * @param[in] lock_context The lock context for _Thread_Lock_release(). + * + * @return The lock required by _Thread_Lock_release(). + */ +RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire( + Thread_Control *the_thread, + ISR_lock_Context *lock_context +) +{ +#if defined(RTEMS_SMP) + SMP_ticket_lock_Control *lock; + + while ( true ) { + unsigned int first_generation; + unsigned int second_generation; + + _ISR_lock_ISR_disable( lock_context ); + + /* + * Ensure that we read our first lock generation before we obtain our + * current lock. See _Thread_Lock_set_unprotected(). + */ + first_generation = _Atomic_Load_uint( + &the_thread->Lock.generation, + ATOMIC_ORDER_ACQUIRE + ); + + lock = the_thread->Lock.current; + _SMP_ticket_lock_Acquire( + lock, + &_Thread_Executing->Lock.Stats, + &lock_context->Lock_context.Stats_context + ); + + /* + * The C11 memory model doesn't guarantee that we read the latest + * generation here. For this a read-modify-write operation would be + * necessary. We read at least the new generation set up by the owner of + * our current thread lock, and so on. + */ + second_generation = _Atomic_Load_uint( + &the_thread->Lock.generation, + ATOMIC_ORDER_ACQUIRE + ); + + if ( first_generation == second_generation ) { + return lock; + } + + _Thread_Lock_release( lock, lock_context ); + } +#else + _ISR_Disable( lock_context->isr_level ); + + return NULL; +#endif +} + +#if defined(RTEMS_SMP) +/* + * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default() + * instead. + */ +RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected( + Thread_Control *the_thread, + SMP_ticket_lock_Control *new_lock +) +{ + the_thread->Lock.current = new_lock; + + /* + * The generation release corresponds to the generation acquire in + * _Thread_Lock_acquire() and ensures that the new lock and other fields are + * visible to the next thread lock owner. Otherwise someone would be able to + * read an up to date generation number and an old lock. See + * _Thread_Wait_set_queue() and _Thread_Wait_restore_default_operations(). + * + * Since we set a new lock right before, this increment is not protected by a + * lock and thus must be an atomic operation. + */ + _Atomic_Fetch_add_uint( + &the_thread->Lock.generation, + 1, + ATOMIC_ORDER_RELEASE + ); +} +#endif + +/** + * @brief Sets a new thread lock. + * + * The caller must not be the owner of the default thread lock. The caller + * must be the owner of the new lock. + * + * @param[in] the_thread The thread. + * @param[in] new_lock The new thread lock. + */ +#if defined(RTEMS_SMP) +RTEMS_INLINE_ROUTINE void _Thread_Lock_set( + Thread_Control *the_thread, + SMP_ticket_lock_Control *new_lock +) +{ + ISR_lock_Context lock_context; + + _Thread_Lock_acquire_default_critical( the_thread, &lock_context ); + _Assert( the_thread->Lock.current == &the_thread->Lock.Default ); + _Thread_Lock_set_unprotected( the_thread, new_lock ); + _Thread_Lock_release_default_critical( the_thread, &lock_context ); +} +#else +#define _Thread_Lock_set( the_thread, new_lock ) \ + do { } while ( 0 ) +#endif + +/** + * @brief Restores the default thread lock. + * + * The caller must be the owner of the current thread lock. + * + * @param[in] the_thread The thread. + */ +#if defined(RTEMS_SMP) +RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default( + Thread_Control *the_thread +) +{ + _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default ); +} +#else +#define _Thread_Lock_restore_default( the_thread ) \ + do { } while ( 0 ) +#endif + +/** + * @brief The initial thread wait flags value set by _Thread_Initialize(). + */ +#define THREAD_WAIT_FLAGS_INITIAL 0x0U + +/** + * @brief Mask to get the thread wait state flags. + */ +#define THREAD_WAIT_STATE_MASK 0xffU + +/** + * @brief Indicates that the thread begins with the blocking operation. + * + * A blocking operation consists of an optional watchdog initialization and the + * setting of the appropriate thread blocking state with the corresponding + * scheduler block operation. + */ +#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U + +/** + * @brief Indicates that the thread completed the blocking operation. + */ +#define THREAD_WAIT_STATE_BLOCKED 0x2U + +/** + * @brief Indicates that a condition to end the thread wait occurred. + * + * This could be a timeout, a signal, an event or a resource availability. + */ +#define THREAD_WAIT_STATE_READY_AGAIN 0x4U + +/** + * @brief Mask to get the thread wait class flags. + */ +#define THREAD_WAIT_CLASS_MASK 0xff00U + +/** + * @brief Indicates that the thread waits for an event. + */ +#define THREAD_WAIT_CLASS_EVENT 0x100U + +/** + * @brief Indicates that the thread waits for a system event. + */ +#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U + +/** + * @brief Indicates that the thread waits for a object. + */ +#define THREAD_WAIT_CLASS_OBJECT 0x400U + +RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set( + Thread_Control *the_thread, + Thread_Wait_flags flags +) +{ +#if defined(RTEMS_SMP) + _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED ); +#else + the_thread->Wait.flags = flags; +#endif +} + +RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get( + const Thread_Control *the_thread +) +{ +#if defined(RTEMS_SMP) + return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED ); +#else + return the_thread->Wait.flags; +#endif +} + +/** + * @brief Tries to change the thread wait flags inside a critical section + * (interrupts disabled). + * + * In case the wait flags are equal to the expected wait flags, then the wait + * flags are set to the desired wait flags. + * + * @param[in] the_thread The thread. + * @param[in] expected_flags The expected wait flags. + * @param[in] desired_flags The desired wait flags. + * + * @retval true The wait flags were equal to the expected wait flags. + * @retval false Otherwise. + */ +RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical( + Thread_Control *the_thread, + Thread_Wait_flags expected_flags, + Thread_Wait_flags desired_flags +) +{ +#if defined(RTEMS_SMP) + return _Atomic_Compare_exchange_uint( + &the_thread->Wait.flags, + &expected_flags, + desired_flags, + ATOMIC_ORDER_RELAXED, + ATOMIC_ORDER_RELAXED + ); +#else + bool success = the_thread->Wait.flags == expected_flags; + + if ( success ) { + the_thread->Wait.flags = desired_flags; + } + + return success; +#endif +} + +/** + * @brief Tries to change the thread wait flags. + * + * @see _Thread_Wait_flags_try_change_critical(). + */ +RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change( + Thread_Control *the_thread, + Thread_Wait_flags expected_flags, + Thread_Wait_flags desired_flags +) +{ + bool success; +#if !defined(RTEMS_SMP) + ISR_Level level; + + _ISR_Disable_without_giant( level ); +#endif + + success = _Thread_Wait_flags_try_change_critical( + the_thread, + expected_flags, + desired_flags + ); + +#if !defined(RTEMS_SMP) + _ISR_Enable_without_giant( level ); +#endif + + return success; +} + +/** + * @brief Sets the thread queue. + * + * The caller must be the owner of the thread lock. + * + * @param[in] the_thread The thread. + * @param[in] new_queue The new queue. + * + * @see _Thread_Lock_set(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue( + Thread_Control *the_thread, + Thread_queue_Queue *new_queue +) +{ + the_thread->Wait.queue = new_queue; +} + +/** + * @brief Sets the thread queue operations. + * + * The caller must be the owner of the thread lock. + * + * @param[in] the_thread The thread. + * @param[in] new_operations The new queue operations. + * + * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations( + Thread_Control *the_thread, + const Thread_queue_Operations *new_operations +) +{ + the_thread->Wait.operations = new_operations; +} + +/** + * @brief Restores the default thread queue operations. + * + * The caller must be the owner of the thread lock. + * + * @param[in] the_thread The thread. + * + * @see _Thread_Wait_set_operations(). + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations( + Thread_Control *the_thread +) +{ + the_thread->Wait.operations = &_Thread_queue_Operations_default; +} + +/** + * @brief Sets the thread wait timeout code. + * + * @param[in] the_thread The thread. + * @param[in] timeout_code The new thread wait timeout code. + */ +RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code( + Thread_Control *the_thread, + uint32_t timeout_code +) +{ + the_thread->Wait.timeout_code = timeout_code; +} + +/** + * @brief General purpose thread wait timeout. + * + * @param[in] id Unused. + * @param[in] arg The thread. + */ +void _Thread_Timeout( Objects_Id id, void *arg ); + +RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor( + Thread_Control *the_thread, + Per_CPU_Control *cpu +) +{ +#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG) + the_thread->Scheduler.debug_real_cpu = cpu; +#else + (void) the_thread; + (void) cpu; +#endif +} + +/** @}*/ + +#ifdef __cplusplus +} +#endif + +#if defined(RTEMS_MULTIPROCESSING) +#include <rtems/score/threadmp.h> +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/threadq.h b/include/rtems/score/threadq.h new file mode 100644 index 0000000000..cc07daed57 --- /dev/null +++ b/include/rtems/score/threadq.h @@ -0,0 +1,282 @@ +/** + * @file + * + * @brief Constants and Structures Needed to Declare a Thread Queue + * + * This include file contains all the constants and structures + * needed to declare a thread queue. + */ + +/* + * COPYRIGHT (c) 1989-2014. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_THREADQ_H +#define _RTEMS_SCORE_THREADQ_H + +#include <rtems/score/chain.h> +#include <rtems/score/isrlock.h> +#include <rtems/score/priority.h> +#include <rtems/score/rbtree.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreThreadQueue Thread Queue Handler + * + * @ingroup Score + * + * This handler provides the capability to have threads block in + * ordered sets. The sets may be ordered using the FIFO or priority + * discipline. + */ +/**@{*/ + +typedef struct _Thread_Control Thread_Control; + +/** + * @brief Thread priority queue. + */ +typedef struct { +#if defined(RTEMS_SMP) + /** + * @brief Node to enqueue this queue in the FIFO chain of the corresponding + * heads structure. + * + * @see Thread_queue_Heads::Heads::Fifo. + */ + Chain_Node Node; +#endif + + /** + * @brief The actual thread priority queue. + */ + RBTree_Control Queue; +} Thread_queue_Priority_queue; + +/** + * @brief Thread queue heads. + * + * Each thread is equipped with spare thread queue heads in case it is not + * enqueued on a thread queue. The first thread enqueued on a thread queue + * will give its spare thread queue heads to that thread queue. The threads + * arriving at the queue will add their thread queue heads to the free chain of + * the queue heads provided by the first thread enqueued. Once a thread is + * dequeued it use the free chain to get new spare thread queue heads. + * + * Uses a leading underscore in the structure name to allow forward + * declarations in standard header files provided by Newlib and GCC. + */ +typedef struct _Thread_queue_Heads { + /** This union contains the data structures used to manage the blocked + * set of tasks which varies based upon the discipline. + */ + union { + /** + * @brief This is the FIFO discipline list. + * + * On SMP configurations this FIFO is used to enqueue the per scheduler + * instance priority queues of this structure. This ensures FIFO fairness + * among the highest priority thread of each scheduler instance. + */ + Chain_Control Fifo; + +#if !defined(RTEMS_SMP) + /** + * @brief This is the set of threads for priority discipline waiting. + */ + Thread_queue_Priority_queue Priority; +#endif + } Heads; + + /** + * @brief A chain with free thread queue heads providing the spare thread + * queue heads for a thread once it is dequeued. + */ + Chain_Control Free_chain; + + /** + * @brief A chain node to add these thread queue heads to the free chain of + * the thread queue heads dedicated to the thread queue of an object. + */ + Chain_Node Free_node; + +#if defined(RTEMS_SMP) + /** + * @brief One priority queue per scheduler instance. + */ + Thread_queue_Priority_queue Priority[ RTEMS_ZERO_LENGTH_ARRAY ]; +#endif +} Thread_queue_Heads; + +#if defined(RTEMS_SMP) + #define THREAD_QUEUE_HEADS_SIZE( scheduler_count ) \ + ( sizeof( Thread_queue_Heads ) \ + + ( scheduler_count ) * sizeof( Thread_queue_Priority_queue ) ) +#else + #define THREAD_QUEUE_HEADS_SIZE( scheduler_count ) \ + sizeof( Thread_queue_Heads ) +#endif + +typedef struct { + /** + * @brief The thread queue heads. + * + * This pointer is NULL, if and only if no threads are enqueued. The first + * thread to enqueue will give its spare thread queue heads to this thread + * queue. + */ + Thread_queue_Heads *heads; + + /** + * @brief Lock to protect this thread queue. + * + * It may be used to protect additional state of the object embedding this + * thread queue. + * + * @see _Thread_queue_Acquire(), _Thread_queue_Acquire_critical() and + * _Thread_queue_Release(). + */ +#if defined(RTEMS_SMP) + SMP_ticket_lock_Control Lock; +#endif +} Thread_queue_Queue; + +/** + * @brief Thread queue priority change operation. + * + * @param[in] the_thread The thread. + * @param[in] new_priority The new priority value. + * @param[in] queue The actual thread queue. + * + * @see Thread_queue_Operations. + */ +typedef void ( *Thread_queue_Priority_change_operation )( + Thread_Control *the_thread, + Priority_Control new_priority, + Thread_queue_Queue *queue +); + +/** + * @brief Thread queue enqueue operation. + * + * @param[in] queue The actual thread queue. + * @param[in] the_thread The thread to enqueue on the queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef void ( *Thread_queue_Enqueue_operation )( + Thread_queue_Queue *queue, + Thread_Control *the_thread +); + +/** + * @brief Thread queue extract operation. + * + * @param[in] queue The actual thread queue. + * @param[in] the_thread The thread to extract from the thread queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef void ( *Thread_queue_Extract_operation )( + Thread_queue_Queue *queue, + Thread_Control *the_thread +); + +/** + * @brief Thread queue first operation. + * + * @param[in] heads The thread queue heads. + * + * @retval NULL No thread is present on the thread queue. + * @retval first The first thread of the thread queue according to the insert + * order. This thread remains on the thread queue. + * + * @see _Thread_Wait_set_operations(). + */ +typedef Thread_Control *( *Thread_queue_First_operation )( + Thread_queue_Heads *heads +); + +/** + * @brief Thread queue operations. + * + * @see _Thread_wait_Set_operations(). + */ +typedef struct { + /** + * @brief Thread queue priority change operation. + * + * Called by _Thread_Change_priority() to notify a thread about a priority + * change. In case this thread waits currently for a resource the handler + * may adjust its data structures according to the new priority value. This + * handler must not be NULL, instead the default handler + * _Thread_Do_nothing_priority_change() should be used in case nothing needs + * to be done during a priority change. + */ + Thread_queue_Priority_change_operation priority_change; + + /** + * @brief Thread queue enqueue operation. + * + * Called by object routines to enqueue the thread. + */ + Thread_queue_Enqueue_operation enqueue; + + /** + * @brief Thread queue extract operation. + * + * Called by object routines to extract a thread from a thread queue. + */ + Thread_queue_Extract_operation extract; + + /** + * @brief Thread queue first operation. + */ + Thread_queue_First_operation first; +} Thread_queue_Operations; + +/** + * The following enumerated type details all of the disciplines + * supported by the Thread Queue Handler. + */ +typedef enum { + THREAD_QUEUE_DISCIPLINE_FIFO, /* FIFO queue discipline */ + THREAD_QUEUE_DISCIPLINE_PRIORITY /* PRIORITY queue discipline */ +} Thread_queue_Disciplines; + +/** + * This is the structure used to manage sets of tasks which are blocked + * waiting to acquire a resource. + */ +typedef struct { + /** + * @brief The actual thread queue. + */ + Thread_queue_Queue Queue; + +#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING) + SMP_lock_Stats Lock_stats; +#endif + + /** + * @brief The operations for the actual thread queue. + */ + const Thread_queue_Operations *operations; +} Thread_queue_Control; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/threadqimpl.h b/include/rtems/score/threadqimpl.h new file mode 100644 index 0000000000..510f886844 --- /dev/null +++ b/include/rtems/score/threadqimpl.h @@ -0,0 +1,577 @@ +/** + * @file rtems/score/threadq.h + * + * Constants and Structures Associated with the Manipulation of Objects + * + * This include file contains all the constants and structures associated + * with the manipulation of objects. + */ + +/* + * COPYRIGHT (c) 1989-2014. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_THREADQIMPL_H +#define _RTEMS_SCORE_THREADQIMPL_H + +#include <rtems/score/threadq.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/rbtreeimpl.h> +#include <rtems/score/scheduler.h> +#include <rtems/score/thread.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreThreadQueue + */ +/**@{*/ + +/** + * @brief Thread queue with a layout compatible to struct _Thread_queue_Queue + * defined in Newlib <sys/lock.h>. + */ +typedef struct { + Thread_queue_Queue Queue; + +#if !defined(RTEMS_SMP) + /* + * The struct _Thread_queue_Queue definition is independent of the RTEMS + * build configuration. Thus, the storage space for the SMP lock is always + * present. In SMP configurations, the SMP lock is contained in the + * Thread_queue_Queue. + */ + unsigned int reserved[2]; +#endif +} Thread_queue_Syslock_queue; + +RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize( + Thread_queue_Heads *heads +) +{ +#if defined(RTEMS_SMP) + size_t i; + + for ( i = 0; i < _Scheduler_Count; ++i ) { + _RBTree_Initialize_empty( &heads->Priority[ i ].Queue ); + } +#endif + + _Chain_Initialize_empty( &heads->Free_chain ); +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_initialize( + Thread_queue_Queue *queue +) +{ + queue->heads = NULL; +#if defined(RTEMS_SMP) + _SMP_ticket_lock_Initialize( &queue->Lock ); +#endif +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_do_acquire_critical( + Thread_queue_Queue *queue, +#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING) + SMP_lock_Stats *lock_stats, +#endif + ISR_lock_Context *lock_context +) +{ +#if defined(RTEMS_SMP) + _SMP_ticket_lock_Acquire( + &queue->Lock, + lock_stats, + &lock_context->Lock_context.Stats_context + ); +#else + (void) queue; + (void) lock_context; +#endif +} + +#if defined(RTEMS_SMP) && defined( RTEMS_PROFILING ) + #define \ + _Thread_queue_Queue_acquire_critical( queue, lock_stats, lock_context ) \ + _Thread_queue_Queue_do_acquire_critical( queue, lock_stats, lock_context ) +#else + #define \ + _Thread_queue_Queue_acquire_critical( queue, lock_stats, lock_context ) \ + _Thread_queue_Queue_do_acquire_critical( queue, lock_context ) +#endif + +RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release( + Thread_queue_Queue *queue, + ISR_lock_Context *lock_context +) +{ +#if defined(RTEMS_SMP) + _SMP_ticket_lock_Release( + &queue->Lock, + &lock_context->Lock_context.Stats_context + ); +#endif + _ISR_lock_ISR_enable( lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire_critical( + Thread_queue_Control *the_thread_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Queue_acquire_critical( + &the_thread_queue->Queue, + &the_thread_queue->Lock_stats, + lock_context + ); +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Acquire( + Thread_queue_Control *the_thread_queue, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_ISR_disable( lock_context ); + _Thread_queue_Acquire_critical( the_thread_queue, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Thread_queue_Release( + Thread_queue_Control *the_thread_queue, + ISR_lock_Context *lock_context +) +{ + _Thread_queue_Queue_release( + &the_thread_queue->Queue, + lock_context + ); +} + +/** + * The following type defines the callout used when a remote task + * is extracted from a local thread queue. + */ +typedef void ( *Thread_queue_Flush_callout )( + Thread_Control * + ); + +/** + * @brief Gets a pointer to a thread waiting on the_thread_queue. + * + * This function returns a pointer to a thread waiting on + * the_thread_queue. The selection of this thread is based on + * the discipline of the_thread_queue. If no threads are waiting + * on the_thread_queue, then NULL is returned. + * + * - INTERRUPT LATENCY: + * + single case + */ +Thread_Control *_Thread_queue_Dequeue( + Thread_queue_Control *the_thread_queue +); + +/** + * @brief Blocks the thread and places it on the thread queue. + * + * This enqueues the thread on the thread queue, blocks the thread, and + * optionally starts the thread timer in case the timeout interval is not + * WATCHDOG_NO_TIMEOUT. + * + * The caller must be the owner of the thread queue lock. This function will + * release the thread queue lock and register it as the new thread lock. + * Thread dispatching is disabled before the thread queue lock is released. + * Thread dispatching is enabled once the sequence to block the thread is + * complete. The operation to enqueue the thread on the queue is protected by + * the thread queue lock. This makes it possible to use the thread queue lock + * to protect the state of objects embedding the thread queue and directly + * enter _Thread_queue_Enqueue_critical() in case the thread must block. + * + * @code + * #include <rtems/score/threadqimpl.h> + * #include <rtems/score/statesimpl.h> + * + * typedef struct { + * Thread_queue_Control Queue; + * Thread_Control *owner; + * } Mutex; + * + * void _Mutex_Obtain( Mutex *mutex ) + * { + * ISR_lock_Context lock_context; + * Thread_Control *executing; + * + * _Thread_queue_Acquire( &mutex->Queue, &lock_context ); + * + * executing = _Thread_Executing; + * + * if ( mutex->owner == NULL ) { + * mutex->owner = executing; + * _Thread_queue_Release( &mutex->Queue, &lock_context ); + * } else { + * _Thread_queue_Enqueue_critical( + * &mutex->Queue.Queue, + * mutex->Queue.operations, + * executing, + * STATES_WAITING_FOR_MUTEX, + * WATCHDOG_NO_TIMEOUT, + * 0, + * &lock_context + * ); + * } + * } + * @endcode + * + * @param[in] queue The actual thread queue. + * @param[in] operations The thread queue operations. + * @param[in] the_thread The thread to enqueue. + * @param[in] state The new state of the thread. + * @param[in] timeout Interval to wait. Use WATCHDOG_NO_TIMEOUT to block + * potentially forever. + * @param[in] timeout_code The return code in case a timeout occurs. + * @param[in] lock_context The lock context of the lock acquire. + */ +void _Thread_queue_Enqueue_critical( + Thread_queue_Queue *queue, + const Thread_queue_Operations *operations, + Thread_Control *the_thread, + States_Control state, + Watchdog_Interval timeout, + uint32_t timeout_code, + ISR_lock_Context *lock_context +); + +/** + * @brief Acquires the thread queue lock and calls + * _Thread_queue_Enqueue_critical(). + */ +RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue( + Thread_queue_Control *the_thread_queue, + Thread_Control *the_thread, + States_Control state, + Watchdog_Interval timeout, + uint32_t timeout_code +) +{ + ISR_lock_Context lock_context; + + _Thread_queue_Acquire( the_thread_queue, &lock_context ); + _Thread_queue_Enqueue_critical( + &the_thread_queue->Queue, + the_thread_queue->operations, + the_thread, + state, + timeout, + timeout_code, + &lock_context + ); +} + +/** + * @brief Extracts the thread from the thread queue, restores the default wait + * operations and restores the default thread lock. + * + * The caller must be the owner of the thread queue lock. The thread queue + * lock is not released. + * + * @param[in] queue The actual thread queue. + * @param[in] operations The thread queue operations. + * @param[in] the_thread The thread to extract. + * + * @return Returns the unblock indicator for _Thread_queue_Unblock_critical(). + * True indicates, that this thread must be unblocked by the scheduler later in + * _Thread_queue_Unblock_critical(), and false otherwise. In case false is + * returned, then the thread queue enqueue procedure was interrupted. Thus it + * will unblock itself and the thread wait information is no longer accessible, + * since this thread may already block on another resource in an SMP + * configuration. + */ +bool _Thread_queue_Extract_locked( + Thread_queue_Queue *queue, + const Thread_queue_Operations *operations, + Thread_Control *the_thread +); + +/** + * @brief Unblocks the thread which was on the thread queue before. + * + * The caller must be the owner of the thread queue lock. This function will + * release the thread queue lock. Thread dispatching is disabled before the + * thread queue lock is released and an unblock is necessary. Thread + * dispatching is enabled once the sequence to unblock the thread is complete. + * + * @param[in] unblock The unblock indicator returned by + * _Thread_queue_Extract_locked(). + * @param[in] queue The actual thread queue. + * @param[in] the_thread The thread to extract. + * @param[in] lock_context The lock context of the lock acquire. + */ +void _Thread_queue_Unblock_critical( + bool unblock, + Thread_queue_Queue *queue, + Thread_Control *the_thread, + ISR_lock_Context *lock_context +); + +/** + * @brief Extracts the thread from the thread queue and unblocks it. + * + * The caller must be the owner of the thread queue lock. This function will + * release the thread queue lock and restore the default thread lock. Thread + * dispatching is disabled before the thread queue lock is released and an + * unblock is necessary. Thread dispatching is enabled once the sequence to + * unblock the thread is complete. This makes it possible to use the thread + * queue lock to protect the state of objects embedding the thread queue and + * directly enter _Thread_queue_Extract_critical() to finalize an operation in + * case a waiting thread exists. + * + * @code + * #include <rtems/score/threadqimpl.h> + * + * typedef struct { + * Thread_queue_Control Queue; + * Thread_Control *owner; + * } Mutex; + * + * void _Mutex_Release( Mutex *mutex ) + * { + * ISR_lock_Context lock_context; + * Thread_Control *first; + * + * _Thread_queue_Acquire( &mutex->Queue, &lock_context ); + * + * first = _Thread_queue_First_locked( &mutex->Queue ); + * mutex->owner = first; + * + * if ( first != NULL ) { + * _Thread_queue_Extract_critical( + * &mutex->Queue.Queue, + * mutex->Queue.operations, + * first, + * &lock_context + * ); + * } + * @endcode + * + * @param[in] queue The actual thread queue. + * @param[in] operations The thread queue operations. + * @param[in] the_thread The thread to extract. + * @param[in] lock_context The lock context of the lock acquire. + */ +void _Thread_queue_Extract_critical( + Thread_queue_Queue *queue, + const Thread_queue_Operations *operations, + Thread_Control *the_thread, + ISR_lock_Context *lock_context +); + +/** + * @brief Extracts thread from thread queue. + * + * This routine removes @a the_thread its thread queue + * and cancels any timeouts associated with this blocking. + * + * @param[in] the_thread is the pointer to a thread control block that + * is to be removed + */ +void _Thread_queue_Extract( Thread_Control *the_thread ); + +/** + * @brief Extracts the_thread from the_thread_queue. + * + * This routine extracts the_thread from the_thread_queue + * and ensures that if there is a proxy for this task on + * another node, it is also dealt with. + */ +void _Thread_queue_Extract_with_proxy( + Thread_Control *the_thread +); + +/** + * @brief Returns the first thread on the thread queue if it exists, otherwise + * @c NULL. + * + * The caller must be the owner of the thread queue lock. The thread queue + * lock is not released. + * + * @param[in] the_thread_queue The thread queue. + * + * @retval NULL No thread is present on the thread queue. + * @retval first The first thread on the thread queue according to the enqueue + * order. + */ +RTEMS_INLINE_ROUTINE Thread_Control *_Thread_queue_First_locked( + Thread_queue_Control *the_thread_queue +) +{ + Thread_queue_Heads *heads = the_thread_queue->Queue.heads; + + if ( heads != NULL ) { + return ( *the_thread_queue->operations->first )( heads ); + } else { + return NULL; + } +} + +/** + * @brief Returns the first thread on the thread queue if it exists, otherwise + * @c NULL. + * + * @param[in] the_thread_queue The thread queue. + * + * @retval NULL No thread is present on the thread queue. + * @retval first The first thread on the thread queue according to the enqueue + * order. + */ +Thread_Control *_Thread_queue_First( + Thread_queue_Control *the_thread_queue +); + +/** + * @brief Unblocks all threads blocked on the_thread_queue. + * + * This routine unblocks all threads blocked on the_thread_queue + * and cancels any associated timeouts. + * + * @param[in] the_thread_queue is the pointer to a threadq header + * @param[in] remote_extract_callout points to a method to invoke to + * invoke when a remote thread is unblocked + * @param[in] status is the status which will be returned to + * all unblocked threads + */ +void _Thread_queue_Flush( + Thread_queue_Control *the_thread_queue, + Thread_queue_Flush_callout remote_extract_callout, + uint32_t status +); + +/** + * @brief Initialize the_thread_queue. + * + * This routine initializes the_thread_queue based on the + * discipline indicated in attribute_set. The state set on + * threads which block on the_thread_queue is state. + * + * @param[in] the_thread_queue is the pointer to a threadq header + * @param[in] the_discipline is the queueing discipline + */ +void _Thread_queue_Initialize( + Thread_queue_Control *the_thread_queue, + Thread_queue_Disciplines the_discipline +); + +#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING) + #define THREAD_QUEUE_FIFO_INITIALIZER( designator, name ) { \ + .Queue = { \ + .heads = NULL, \ + .Lock = SMP_TICKET_LOCK_INITIALIZER, \ + }, \ + .Lock_stats = SMP_LOCK_STATS_INITIALIZER( name ), \ + .operations = &_Thread_queue_Operations_FIFO \ + } + + #define THREAD_QUEUE_PRIORITY_INITIALIZER( designator, name ) { \ + .Queue = { \ + .heads = NULL, \ + .Lock = SMP_TICKET_LOCK_INITIALIZER, \ + }, \ + .Lock_stats = SMP_LOCK_STATS_INITIALIZER( name ), \ + .operations = &_Thread_queue_Operations_priority \ + } +#elif defined(RTEMS_SMP) + #define THREAD_QUEUE_FIFO_INITIALIZER( designator, name ) { \ + .Queue = { \ + .heads = NULL, \ + .Lock = SMP_TICKET_LOCK_INITIALIZER, \ + }, \ + .operations = &_Thread_queue_Operations_FIFO \ + } + + #define THREAD_QUEUE_PRIORITY_INITIALIZER( designator, name ) { \ + .Queue = { \ + .heads = NULL, \ + .Lock = SMP_TICKET_LOCK_INITIALIZER, \ + }, \ + .operations = &_Thread_queue_Operations_priority \ + } +#else + #define THREAD_QUEUE_FIFO_INITIALIZER( designator, name ) { \ + .Queue = { .heads = NULL }, \ + .operations = &_Thread_queue_Operations_FIFO \ + } + + #define THREAD_QUEUE_PRIORITY_INITIALIZER( designator, name ) { \ + .Queue = { .heads = NULL }, \ + .operations = &_Thread_queue_Operations_priority \ + } +#endif + +RTEMS_INLINE_ROUTINE void _Thread_queue_Destroy( + Thread_queue_Control *the_thread_queue +) +{ +#if defined(RTEMS_SMP) + _SMP_ticket_lock_Destroy( &the_thread_queue->Queue.Lock ); + _SMP_lock_Stats_destroy( &the_thread_queue->Lock_stats ); +#endif +} + +/** + * @brief Boosts the priority of the thread if threads of another scheduler + * instance are enqueued on the thread queue. + * + * The thread queue must use the priority waiting discipline. + * + * @param[in] queue The actual thread queue. + * @param[in] the_thread The thread to boost the priority if necessary. + */ +#if defined(RTEMS_SMP) +void _Thread_queue_Boost_priority( + Thread_queue_Queue *queue, + Thread_Control *the_thread +); +#else +RTEMS_INLINE_ROUTINE void _Thread_queue_Boost_priority( + Thread_queue_Queue *queue, + Thread_Control *the_thread +) +{ + (void) queue; + (void) the_thread; +} +#endif + +/** + * @brief Compare two thread's priority for RBTree Insertion. + * + * @param[in] left points to the left thread's RBnode + * @param[in] right points to the right thread's RBnode + * + * @retval 1 The @a left node is more important than @a right node. + * @retval 0 The @a left node is of equal importance with @a right node. + * @retval 1 The @a left node is less important than @a right node. + */ +RBTree_Compare_result _Thread_queue_Compare_priority( + const RBTree_Node *left, + const RBTree_Node *right +); + +extern const Thread_queue_Operations _Thread_queue_Operations_default; + +extern const Thread_queue_Operations _Thread_queue_Operations_FIFO; + +extern const Thread_queue_Operations _Thread_queue_Operations_priority; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/timecounter.h b/include/rtems/score/timecounter.h new file mode 100644 index 0000000000..0d17cc7ce3 --- /dev/null +++ b/include/rtems/score/timecounter.h @@ -0,0 +1,199 @@ +/** + * @file + * + * @ingroup ScoreTimecounter + * + * @brief Timecounter API + */ + +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TIMECOUNTER_H +#define _RTEMS_SCORE_TIMECOUNTER_H + +#include <sys/time.h> +#include <sys/timetc.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreTimecounter Timecounter Handler + * + * @ingroup Score + * + * @{ + */ + +/** + * @brief Returns the wall clock time in the bintime format. + * + * @param[out] bt Returns the wall clock time. + */ +void _Timecounter_Bintime( struct bintime *bt ); + +/** + * @brief Returns the wall clock time in the timespec format. + * + * @param[out] ts Returns the wall clock time. + */ +void _Timecounter_Nanotime( struct timespec *ts ); + +/** + * @brief Returns the wall clock time in the timeval format. + * + * @param[out] tv Returns the wall clock time. + */ +void _Timecounter_Microtime( struct timeval *tv ); + +/** + * @brief Returns the uptime in the bintime format. + * + * @param[out] bt Returns the uptime. + */ +void _Timecounter_Binuptime( struct bintime *bt ); + +/** + * @brief Returns the uptime in the timespec format. + * + * @param[out] ts Returns the uptime. + */ +void _Timecounter_Nanouptime( struct timespec *ts ); + +/** + * @brief Returns the uptime in the timeval format. + * + * @param[out] tv Returns the uptime. + */ +void _Timecounter_Microuptime( struct timeval *tv ); + +/** + * @brief Returns the wall clock time in the bintime format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Bintime() variant. + * + * @param[out] ts Returns the wall clock time. + */ +void _Timecounter_Getbintime( struct bintime *bt ); + +/** + * @brief Returns the wall clock time in the timespec format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Nanotime() variant. + * + * @param[out] ts Returns the wall clock time. + * + * @see _Timecounter_Getbintime(). + */ +void _Timecounter_Getnanotime( struct timespec *ts ); + +/** + * @brief Returns the wall clock time in the timeval format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Microtime() variant. + * + * @param[out] tv Returns the wall clock time. + * + * @see _Timecounter_Getbintime(). + */ +void _Timecounter_Getmicrotime( struct timeval *tv ); + +/** + * @brief Returns the uptime in the bintime format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Binuptime() variant. + * + * @param[out] ts Returns the uptime. + */ +void _Timecounter_Getbinuptime( struct bintime *bt ); + +/** + * @brief Returns the uptime in the timespec format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Nanouptime() variant. + * + * @param[out] ts Returns the uptime. + */ +void _Timecounter_Getnanouptime( struct timespec *ts ); + +/** + * @brief Returns the uptime in the timeval format. + * + * This function obtains the time with a lower overhead and lower accuracy + * compared to the _Timecounter_Microuptime() variant. + * + * @param[out] tv Returns the uptime. + */ +void _Timecounter_Getmicrouptime( struct timeval *tv ); + +/** + * @brief Installs the timecounter. + * + * The timecounter structure must contain valid values in the fields + * tc_get_timecount, tc_counter_mask, tc_frequency and tc_quality. All other + * fields must be zero initialized. + * + * @param[in] tc The timecounter. + */ +void _Timecounter_Install( struct timecounter *tc ); + +/** + * @brief Performs a timecounter tick. + */ +void _Timecounter_Tick( void ); + +/** + * @brief Performs a simple timecounter tick. + * + * This is a special purpose tick function for simple timecounter to support + * legacy clock drivers. + * + * @param[in] delta The time in timecounter ticks elapsed since the last call + * to _Timecounter_Tick_simple(). + * @param[in] offset The current value of the timecounter. + */ +void _Timecounter_Tick_simple( uint32_t delta, uint32_t offset ); + +/** + * @brief The wall clock time in seconds. + */ +extern volatile time_t _Timecounter_Time_second; + +/** + * @brief The uptime in seconds. + * + * For compatibility with the FreeBSD network stack the initial value is one + * second. + */ +extern volatile time_t _Timecounter_Time_uptime; + +/** + * @brief The current timecounter. + */ +extern struct timecounter *_Timecounter; + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_TIMECOUNTER_H */ diff --git a/include/rtems/score/timecounterimpl.h b/include/rtems/score/timecounterimpl.h new file mode 100644 index 0000000000..dd47aacc04 --- /dev/null +++ b/include/rtems/score/timecounterimpl.h @@ -0,0 +1,49 @@ +/** + * @file + * + * @ingroup ScoreTimecounter + * + * @brief Timecounter Implementation + */ + +/* + * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TIMECOUNTERIMPL_H +#define _RTEMS_SCORE_TIMECOUNTERIMPL_H + +#include <rtems/score/timecounter.h> +#include <sys/timetc.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup ScoreTimecounter + * + * @{ + */ + +void _Timecounter_Initialize( void ); + +void _Timecounter_Set_clock( const struct timespec *ts ); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_TIMECOUNTERIMPL_H */ diff --git a/include/rtems/score/timespec.h b/include/rtems/score/timespec.h new file mode 100644 index 0000000000..72a000177f --- /dev/null +++ b/include/rtems/score/timespec.h @@ -0,0 +1,272 @@ +/** + * @file rtems/score/timespec.h + * + * This include file contains helpers for manipulating timespecs. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TIMESPEC_H +#define _RTEMS_SCORE_TIMESPEC_H + +/** + * @defgroup Timespec Helpers + * + * @ingroup Score + * + * This handler encapsulates functionality related to manipulating + * POSIX struct timespecs. + */ +/**@{*/ + +#include <stdbool.h> /* bool */ +#include <stdint.h> /* uint32_t */ +#include <time.h> /* struct timespec */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Set timespec to seconds nanosecond. + * + * This method sets the timespec to the specified seconds and nanoseconds + * value. + * + * @param[in] _time points to the timespec instance to validate. + * @param[in] _seconds is the seconds portion of the timespec + * @param[in] _nanoseconds is the nanoseconds portion of the timespec + */ +#define _Timespec_Set( _time, _seconds, _nanoseconds ) \ + do { \ + (_time)->tv_sec = (_seconds); \ + (_time)->tv_nsec = (_nanoseconds); \ + } while (0) + +/** + * @brief Sets the Timespec to Zero + * + * This method sets the timespec to zero. + * value. + * + * @param[in] _time points to the timespec instance to zero. + */ +#define _Timespec_Set_to_zero( _time ) \ + do { \ + (_time)->tv_sec = 0; \ + (_time)->tv_nsec = 0; \ + } while (0) + +/** + * @brief Get seconds portion of timespec. + * + * This method returns the seconds portion of the specified timespec + * + * @param[in] _time points to the timespec + * + * @retval The seconds portion of @a _time. + */ +#define _Timespec_Get_seconds( _time ) \ + ((_time)->tv_sec) + +/** + * @brief Get nanoseconds portion of timespec. + * + * This method returns the nanoseconds portion of the specified timespec + * + * @param[in] _time points to the timespec + * + * @retval The nanoseconds portion of @a _time. + */ +#define _Timespec_Get_nanoseconds( _time ) \ + ((_time)->tv_nsec) + +/** + * @brief Get the timestamp as nanoseconds. + * + * This method returns the timestamp as nanoseconds. + * + * @param[in] time points to the timestamp. + * + * @retval The time in nanoseconds. + */ +uint64_t _Timespec_Get_as_nanoseconds( + const struct timespec *time +); + +/** + * @brief Check if timespec is valid. + * + * This method determines the validity of a timespec. + * + * @param[in] time is the timespec instance to validate. + * + * @retval This method returns true if @a time is valid and + * false otherwise. + */ +bool _Timespec_Is_valid( + const struct timespec *time +); + +/** + * @brief The Timespec "less than" operator. + * + * This method is the less than operator for timespecs. + * + * @param[in] lhs is the left hand side timespec + * @param[in] rhs is the right hand side timespec + * + * @retval This method returns true if @a lhs is less than the @a rhs and + * false otherwise. + */ +bool _Timespec_Less_than( + const struct timespec *lhs, + const struct timespec *rhs +); + +/** + * @brief The Timespec "greater than" operator. + * + * This method is the greater than operator for timespecs. + * + * @param[in] _lhs is the left hand side timespec + * @param[in] _rhs is the right hand side timespec + * + * @retval This method returns true if @a lhs is greater than the @a rhs and + * false otherwise. + */ +#define _Timespec_Greater_than( _lhs, _rhs ) \ + _Timespec_Less_than( _rhs, _lhs ) + +/** + * @brief The Timespec "equal to" operator. + * + * This method is the is equal to than operator for timespecs. + * + * @param[in] lhs is the left hand side timespec + * @param[in] rhs is the right hand side timespec + * + * @retval This method returns true if @a lhs is equal to @a rhs and + * false otherwise. + */ +#define _Timespec_Equal_to( lhs, rhs ) \ + ( ((lhs)->tv_sec == (rhs)->tv_sec) && \ + ((lhs)->tv_nsec == (rhs)->tv_nsec) \ + ) + +/** + * @brief Add two timespecs. + * + * This routine adds two timespecs. The second argument is added + * to the first. + * + * @param[in] time is the base time to be added to + * @param[in] add is the timespec to add to the first argument + * + * @retval This method returns the number of seconds @a time increased by. + */ +uint32_t _Timespec_Add_to( + struct timespec *time, + const struct timespec *add +); + +/** + * @brief Convert timespec to number of ticks. + * + * This routine convert the @a time timespec to the corresponding number + * of clock ticks. + * + * @param[in] time is the time to be converted + * + * @retval This method returns the number of ticks computed. + */ +uint32_t _Timespec_To_ticks( + const struct timespec *time +); + +/** + * @brief Convert ticks to timespec. + * + * This routine converts the @a ticks value to the corresponding + * timespec format @a time. + * + * @param[in] time is the timespec format time result + * @param[in] ticks is the number of ticks to convert + */ +void _Timespec_From_ticks( + uint32_t ticks, + struct timespec *time +); + +/** + * @brief Subtract two timespec. + * + * This routine subtracts two timespecs. @a result is set to + * @a end - @a start. + * + * @param[in] start is the starting time + * @param[in] end is the ending time + * @param[in] result is the difference between starting and ending time. + * + * @retval This method fills in @a result. + */ +void _Timespec_Subtract( + const struct timespec *start, + const struct timespec *end, + struct timespec *result +); + +/** + * @brief Divide timespec by an integer. + * + * This routine divides a timespec by an integer value. The expected + * use is to assist in benchmark calculations where you typically + * divide a duration by a number of iterations. + * + * @param[in] time is the total + * @param[in] iterations is the number of iterations + * @param[in] result is the average time. + * + * @retval This method fills in @a result. + */ +void _Timespec_Divide_by_integer( + const struct timespec *time, + uint32_t iterations, + struct timespec *result +); + +/** + * @brief Divide a timespec by anonther timespec. + * + * This routine divides a timespec by another timespec. The + * intended use is for calculating percentages to three decimal points. + * + * @param[in] lhs is the left hand number + * @param[in] rhs is the right hand number + * @param[in] ival_percentage is the integer portion of the average + * @param[in] fval_percentage is the thousandths of percentage + * + * @retval This method fills in @a result. + */ +void _Timespec_Divide( + const struct timespec *lhs, + const struct timespec *rhs, + uint32_t *ival_percentage, + uint32_t *fval_percentage +); + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/timestamp.h b/include/rtems/score/timestamp.h new file mode 100644 index 0000000000..9d25943a8c --- /dev/null +++ b/include/rtems/score/timestamp.h @@ -0,0 +1,345 @@ +/** + * @file rtems/score/timestamp.h + * + * @brief Helpers for Manipulating Timestamps + * + * This include file contains helpers for manipulating timestamps. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TIMESTAMP_H +#define _RTEMS_SCORE_TIMESTAMP_H + +/** + * @defgroup SuperCoreTimeStamp Score Timestamp + * + * @ingroup Score + * + * This handler encapsulates functionality related to manipulating + * SuperCore Timestamps. SuperCore Timestamps may be used to + * represent time of day, uptime, or intervals. + * + * The key attribute of the SuperCore Timestamp handler is that it + * is a completely opaque handler. There can be multiple implementations + * of the required functionality and with a recompile, RTEMS can use + * any implementation. It is intended to be a simple wrapper. + * + * This handler can be implemented as either struct timespec or + * unsigned64 bit numbers. The use of a wrapper class allows the + * the implementation of timestamps to change on a per architecture + * basis. This is an important option as the performance of this + * handler is critical. + */ +/**@{*/ + +#include <sys/time.h> + +#include <rtems/score/basedefs.h> +#include <rtems/score/timespec.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Define the Timestamp control type. + */ +typedef struct bintime Timestamp_Control; + +/** + * @brief Set timestamp to specified seconds and nanoseconds. + * + * This method sets the timestamp to the specified @a _seconds and @a _nanoseconds + * value. + * + * @param[in] _time points to the timestamp instance to validate. + * @param[in] _seconds is the seconds portion of the timestamp + * @param[in] _nanoseconds is the nanoseconds portion of the timestamp + */ +RTEMS_INLINE_ROUTINE void _Timestamp_Set( + Timestamp_Control *_time, + time_t _seconds, + long _nanoseconds +) +{ + struct timespec _ts; + + _ts.tv_sec = _seconds; + _ts.tv_nsec = _nanoseconds; + + timespec2bintime( &_ts, _time ); +} + +/** + * @brief Sets the timestamp to zero. + * + * This method sets the timestamp to zero. + * value. + * + * @param[in] _time points to the timestamp instance to zero. + */ + +RTEMS_INLINE_ROUTINE void _Timestamp_Set_to_zero( + Timestamp_Control *_time +) +{ + _time->sec = 0; + _time->frac = 0; +} + +/** + * @brief Less than operator for timestamps. + * + * This method is the less than operator for timestamps. + * + * @param[in] _lhs points to the left hand side timestamp + * @param[in] _rhs points to the right hand side timestamp + * + * @retval This method returns true if @a _lhs is less than the @a _rhs and + * false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Timestamp_Less_than( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs +) +{ + if ( _lhs->sec < _rhs->sec ) + return true; + + if ( _lhs->sec > _rhs->sec ) + return false; + + return _lhs->frac < _rhs->frac; +} + +/** + * @brief Greater than operator for timestamps. + * + * This method is the greater than operator for timestamps. + * + * @param[in] _lhs points to the left hand side timestamp + * @param[in] _rhs points to the right hand side timestamp + * + * @retval This method returns true if @a _lhs is greater than the @a _rhs and + * false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Timestamp_Greater_than( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs +) +{ + if ( _lhs->sec > _rhs->sec ) + return true; + + if ( _lhs->sec < _rhs->sec ) + return false; + + return _lhs->frac > _rhs->frac; +} + +/** + * @brief Equal to than operator for timestamps. + * + * This method is the is equal to than operator for timestamps. + * + * @param[in] _lhs points to the left hand side timestamp + * @param[in] _rhs points to the right hand side timestamp + * + * @retval This method returns true if @a _lhs is equal to @a _rhs and + * false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Timestamp_Equal_to( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs +) +{ + return _lhs->sec == _rhs->sec && _lhs->frac == _rhs->frac; +} + +/** + * @brief Adds two timestamps. + * + * This routine adds two timestamps. The second argument is added + * to the first. + * + * @param[in] _time points to the base time to be added to + * @param[in] _add points to the timestamp to add to the first argument + * + * @retval This method returns the number of seconds @a time increased by. + */ +RTEMS_INLINE_ROUTINE time_t _Timestamp_Add_to( + Timestamp_Control *_time, + const Timestamp_Control *_add +) +{ + time_t seconds = _time->sec; + + bintime_add( _time, _add ); + + return _time->sec - seconds; +} + +/** + * @brief Subtracts two timestamps. + * + * This routine subtracts two timestamps. @a result is set to + * @a end - @a start. + * + * @param[in] _start points to the starting time + * @param[in] _end points to the ending time + * @param[in] _result points to the difference between + * starting and ending time. + * + * @retval This method fills in @a _result. + */ +RTEMS_INLINE_ROUTINE void _Timestamp_Subtract( + const Timestamp_Control *_start, + const Timestamp_Control *_end, + Timestamp_Control *_result +) +{ + _result->sec = _end->sec; + _result->frac = _end->frac; + + bintime_sub( _result, _start ); +} + +/** + * @brief Divides a timestamp by another timestamp. + * + * This routine divides a timestamp by another timestamp. The + * intended use is for calculating percentages to three decimal points. + * + * @param[in] _lhs points to the left hand number + * @param[in] _rhs points to the right hand number + * @param[in] _ival_percentage points to the integer portion of the average + * @param[in] _fval_percentage points to the thousandths of percentage + * + * @retval This method fills in @a result. + */ +RTEMS_INLINE_ROUTINE void _Timestamp_Divide( + const Timestamp_Control *_lhs, + const Timestamp_Control *_rhs, + uint32_t *_ival_percentage, + uint32_t *_fval_percentage +) +{ + struct timespec _ts_left; + struct timespec _ts_right; + + bintime2timespec( _lhs, &_ts_left ); + bintime2timespec( _rhs, &_ts_right ); + + _Timespec_Divide( + &_ts_left, + &_ts_right, + _ival_percentage, + _fval_percentage + ); +} + +/** + * @brief Get seconds portion of timestamp. + * + * This method returns the seconds portion of the specified timestamp + * + * @param[in] _time points to the timestamp + * + * @retval The seconds portion of @a _time. + */ +RTEMS_INLINE_ROUTINE time_t _Timestamp_Get_seconds( + const Timestamp_Control *_time +) +{ + return _time->sec; +} + +/** + * @brief Get nanoseconds portion of timestamp. + * + * This method returns the nanoseconds portion of the specified timestamp + * + * @param[in] _time points to the timestamp + * + * @retval The nanoseconds portion of @a _time. + */ +RTEMS_INLINE_ROUTINE uint32_t _Timestamp_Get_nanoseconds( + const Timestamp_Control *_time +) +{ + struct timespec _ts; + + bintime2timespec( _time, &_ts ); + + return _ts.tv_nsec; +} + +/** + * @brief Get the timestamp as nanoseconds. + * + * This method returns the timestamp as nanoseconds. + * + * @param[in] _time points to the timestamp + * + * @retval The time in nanoseconds. + */ +RTEMS_INLINE_ROUTINE uint64_t _Timestamp_Get_as_nanoseconds( + const Timestamp_Control *_time +) +{ + struct timespec _ts; + + bintime2timespec( _time, &_ts ); + + return _Timespec_Get_as_nanoseconds( &_ts ); +} + +/** + * @brief Convert timestamp to struct timespec. + * + * This method returns the seconds portion of the specified @a _timestamp. + * + * @param[in] _timestamp points to the timestamp + * @param[in] _timespec points to the timespec + */ +RTEMS_INLINE_ROUTINE void _Timestamp_To_timespec( + const Timestamp_Control *_timestamp, + struct timespec *_timespec +) +{ + bintime2timespec( _timestamp, _timespec ); +} + +/** + * @brief Convert timestamp to struct timeval. + * + * @param[in] _timestamp points to the timestamp + * @param[in] _timeval points to the timeval + */ +RTEMS_INLINE_ROUTINE void _Timestamp_To_timeval( + const Timestamp_Control *_timestamp, + struct timeval *_timeval +) +{ + bintime2timeval( _timestamp, _timeval ); +} + +#ifdef __cplusplus +} +#endif + +/**@}*/ + +#endif +/* end of include file */ diff --git a/include/rtems/score/tls.h b/include/rtems/score/tls.h new file mode 100644 index 0000000000..51398a0a35 --- /dev/null +++ b/include/rtems/score/tls.h @@ -0,0 +1,202 @@ +/** + * @file + * + * @ingroup ScoreTLS + * + * @brief Thread-Local Storage (TLS) + */ + +/* + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * <rtems@embedded-brains.de> + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TLS_H +#define _RTEMS_SCORE_TLS_H + +#include <rtems/score/cpu.h> + +#include <string.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup ScoreTLS Thread-Local Storage (TLS) + * + * @ingroup Score + * + * @brief Thread-local storage (TLS) support. + * + * Variants I and II are according to Ulrich Drepper, "ELF Handling For + * Thread-Local Storage". + * + * @{ + */ + +extern char _TLS_Data_begin[]; + +extern char _TLS_Data_end[]; + +extern char _TLS_Data_size[]; + +extern char _TLS_BSS_begin[]; + +extern char _TLS_BSS_end[]; + +extern char _TLS_BSS_size[]; + +extern char _TLS_Size[]; + +extern char _TLS_Alignment[]; + +typedef struct { + /* + * FIXME: Not sure if the generation number type is correct for all + * architectures. + */ + uint32_t generation_number; + + void *tls_blocks[1]; +} TLS_Dynamic_thread_vector; + +typedef struct { + TLS_Dynamic_thread_vector *dtv; + uintptr_t reserved; +} TLS_Thread_control_block; + +typedef struct { + uintptr_t module; + uintptr_t offset; +} TLS_Index; + +static inline uintptr_t _TLS_Get_size( void ) +{ + /* + * Do not use _TLS_Size here since this will lead GCC to assume that this + * symbol is not 0 and the tests for 0 will be optimized away. + */ + return (uintptr_t) _TLS_BSS_end - (uintptr_t) _TLS_Data_begin; +} + +static inline uintptr_t _TLS_Heap_align_up( uintptr_t val ) +{ + uintptr_t msk = CPU_HEAP_ALIGNMENT - 1; + + return (val + msk) & ~msk; +} + +static inline uintptr_t _TLS_Get_thread_control_block_area_size( + uintptr_t alignment +) +{ + return alignment <= sizeof(TLS_Thread_control_block) ? + sizeof(TLS_Thread_control_block) : alignment; +} + +static inline uintptr_t _TLS_Get_allocation_size( + uintptr_t size, + uintptr_t alignment +) +{ + uintptr_t aligned_size = _TLS_Heap_align_up( size ); + + return _TLS_Get_thread_control_block_area_size( alignment ) + + aligned_size + sizeof(TLS_Dynamic_thread_vector); +} + +static inline void *_TLS_Copy_and_clear( void *tls_area ) +{ + tls_area = memcpy( + tls_area, + _TLS_Data_begin, + (size_t) ((uintptr_t)_TLS_Data_size) + ); + + + memset( + (char *) tls_area + (size_t)((intptr_t) _TLS_BSS_begin) - + (size_t)((intptr_t) _TLS_Data_begin), + 0, + ((size_t) (intptr_t)_TLS_BSS_size) + ); + + return tls_area; +} + +static inline void *_TLS_Initialize( + void *tls_block, + TLS_Thread_control_block *tcb, + TLS_Dynamic_thread_vector *dtv +) +{ + tcb->dtv = dtv; + dtv->generation_number = 1; + dtv->tls_blocks[0] = tls_block; + + return _TLS_Copy_and_clear( tls_block ); +} + +/* Use Variant I, TLS offsets emitted by linker takes the TCB into account */ +static inline void *_TLS_TCB_at_area_begin_initialize( void *tls_area ) +{ + void *tls_block = (char *) tls_area + + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment ); + TLS_Thread_control_block *tcb = tls_area; + uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size ); + TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *) + ((char *) tls_block + aligned_size); + + return _TLS_Initialize( tls_block, tcb, dtv ); +} + +/* Use Variant I, TLS offsets emitted by linker neglects the TCB */ +static inline void *_TLS_TCB_before_TLS_block_initialize( void *tls_area ) +{ + void *tls_block = (char *) tls_area + + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment ); + TLS_Thread_control_block *tcb = (TLS_Thread_control_block *) + ((char *) tls_block - sizeof(*tcb)); + uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size ); + TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *) + ((char *) tls_block + aligned_size); + + return _TLS_Initialize( tls_block, tcb, dtv ); +} + +/* Use Variant II */ +static inline void *_TLS_TCB_after_TLS_block_initialize( void *tls_area ) +{ + uintptr_t size = (uintptr_t) _TLS_Size; + uintptr_t tls_align = (uintptr_t) _TLS_Alignment; + uintptr_t tls_mask = tls_align - 1; + uintptr_t heap_align = _TLS_Heap_align_up( tls_align ); + uintptr_t heap_mask = heap_align - 1; + TLS_Thread_control_block *tcb = (TLS_Thread_control_block *) + ((char *) tls_area + ((size + heap_mask) & ~heap_mask)); + void *tls_block = (char *) tcb - ((size + tls_mask) & ~tls_mask); + TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *) + ((char *) tcb + sizeof(*tcb)); + + _TLS_Initialize( tls_block, tcb, dtv ); + + return tcb; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_TLS_H */ diff --git a/include/rtems/score/tod.h b/include/rtems/score/tod.h new file mode 100644 index 0000000000..c0ab5e795d --- /dev/null +++ b/include/rtems/score/tod.h @@ -0,0 +1,32 @@ +/** + * @file + * + * @ingroup ScoreTOD + * + * @brief Time of Day Handler API + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TOD_H +#define _RTEMS_SCORE_TOD_H + +#include <rtems/score/basedefs.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/todimpl.h b/include/rtems/score/todimpl.h new file mode 100644 index 0000000000..a94b140d92 --- /dev/null +++ b/include/rtems/score/todimpl.h @@ -0,0 +1,388 @@ +/** + * @file + * + * @ingroup ScoreTOD + * + * @brief Time of Day Handler API + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_TODIMPL_H +#define _RTEMS_SCORE_TODIMPL_H + +#include <rtems/score/tod.h> +#include <rtems/score/timestamp.h> +#include <rtems/score/timecounterimpl.h> +#include <rtems/score/watchdog.h> + +#include <sys/time.h> +#include <time.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreTOD Time of Day Handler + * + * @ingroup Score + * + * The following constants are related to the time of day and are + * independent of RTEMS. + */ +/**@{*/ + +/** + * This constant represents the number of seconds in a minute. + */ +#define TOD_SECONDS_PER_MINUTE (uint32_t)60 + +/** + * This constant represents the number of minutes per hour. + */ +#define TOD_MINUTES_PER_HOUR (uint32_t)60 + +/** + * This constant represents the number of months in a year. + */ +#define TOD_MONTHS_PER_YEAR (uint32_t)12 + +/** + * This constant represents the number of days in a non-leap year. + */ +#define TOD_DAYS_PER_YEAR (uint32_t)365 + +/** + * This constant represents the number of hours per day. + */ +#define TOD_HOURS_PER_DAY (uint32_t)24 + +/** + * This constant represents the number of seconds in a day which does + * not include a leap second. + */ +#define TOD_SECONDS_PER_DAY (uint32_t) (TOD_SECONDS_PER_MINUTE * \ + TOD_MINUTES_PER_HOUR * \ + TOD_HOURS_PER_DAY) + +/** + * This constant represents the number of seconds in a non-leap year. + */ +#define TOD_SECONDS_PER_NON_LEAP_YEAR (365 * TOD_SECONDS_PER_DAY) + +/** + * This constant represents the number of millisecond in a second. + */ +#define TOD_MILLISECONDS_PER_SECOND (uint32_t)1000 + +/** + * This constant represents the number of microseconds in a second. + */ +#define TOD_MICROSECONDS_PER_SECOND (uint32_t)1000000 + +/** + * This constant represents the number of nanoseconds in a second. + */ +#define TOD_NANOSECONDS_PER_SECOND (uint32_t)1000000000 + +/** + * This constant represents the number of nanoseconds in a mircosecond. + */ +#define TOD_NANOSECONDS_PER_MICROSECOND (uint32_t)1000 + +/**@}*/ + +/** + * Seconds from January 1, 1970 to January 1, 1988. Used to account for + * differences between POSIX API and RTEMS core. The timespec format time + * is kept in POSIX compliant form. + */ +#define TOD_SECONDS_1970_THROUGH_1988 \ + (((1987 - 1970 + 1) * TOD_SECONDS_PER_NON_LEAP_YEAR) + \ + (4 * TOD_SECONDS_PER_DAY)) + +/** + * @brief Earliest year to which an time of day can be initialized. + * + * The following constant define the earliest year to which an + * time of day can be initialized. This is considered the + * epoch. + */ +#define TOD_BASE_YEAR 1988 + +/** + * @defgroup ScoreTOD Time Of Day (TOD) Handler + * + * @ingroup Score + * + * This handler encapsulates functionality used to manage time of day. + */ +/**@{*/ + +/** + * @brief TOD control. + */ +typedef struct { + /** + * @brief Time of day seconds trigger. + * + * This value specifies the nanoseconds since the last time of day second. + * It is updated and evaluated in _TOD_Tickle_ticks(). It is set in + * _TOD_Set_with_timestamp(). + */ + uint32_t seconds_trigger; + + /** + * @brief Indicates if the time of day is set. + * + * This is true if the application has set the current + * time of day, and false otherwise. + */ + bool is_set; +} TOD_Control; + +SCORE_EXTERN TOD_Control _TOD; + +/** + * @brief Initializes the time of day handler. + * + * Performs the initialization necessary for the Time Of Day handler. + */ +void _TOD_Handler_initialization(void); + +/** + * @brief Sets the time of day from timestamp. + * + * The @a tod_as_timestamp timestamp represents the time since UNIX epoch. + * The watchdog seconds chain will be adjusted. + * + * @param[in] tod_as_timestamp is the constant of the time of day as a timestamp + */ +void _TOD_Set_with_timestamp( + const Timestamp_Control *tod_as_timestamp +); + +/** + * @brief Sets the time of day from timespec. + * + * The @a tod_as_timestamp timestamp represents the time since UNIX epoch. + * The watchdog seconds chain will be adjusted. + * + * In the process the input given as timespec will be transformed to FreeBSD + * bintime format to guarantee the right format for later setting it with a + * timestamp. + * + * @param[in] tod_as_timespec is the constant of the time of day as a timespec + */ +static inline void _TOD_Set( + const struct timespec *tod_as_timespec +) +{ + Timestamp_Control tod_as_timestamp; + + _Timestamp_Set( + &tod_as_timestamp, + tod_as_timespec->tv_sec, + tod_as_timespec->tv_nsec + ); + _TOD_Set_with_timestamp( &tod_as_timestamp ); +} + +/** + * @brief Gets the current time in the bintime format. + * + * @param[out] time is the value gathered by the bintime request + */ +static inline void _TOD_Get( + Timestamp_Control *time +) +{ + _Timecounter_Bintime(time); +} + +/** + * @brief Gets the current time in the timespec format. + * + * @param[out] time is the value gathered by the nanotime request + */ +static inline void _TOD_Get_as_timespec( + struct timespec *time +) +{ + _Timecounter_Nanotime(time); +} + +/** + * @brief Gets the system uptime with potential accuracy to the nanosecond. + * + * This routine returns the system uptime with potential accuracy + * to the nanosecond. + * + * The initial uptime value is undefined. + * + * @param[in] time is a pointer to the uptime to be returned + */ +static inline void _TOD_Get_uptime( + Timestamp_Control *time +) +{ + _Timecounter_Binuptime( time ); +} + +/** + * @brief Gets the system uptime with potential accuracy to the nanosecond. + * to the nanosecond. + * + * The initial uptime value is zero. + * + * @param[in] time is a pointer to the uptime to be returned + */ +static inline void _TOD_Get_zero_based_uptime( + Timestamp_Control *time +) +{ + _Timecounter_Binuptime( time ); + --time->sec; +} + +/** + * @brief Gets the system uptime with potential accuracy to the nanosecond. + * + * The initial uptime value is zero. + * + * @param[in] time is a pointer to the uptime to be returned + */ +static inline void _TOD_Get_zero_based_uptime_as_timespec( + struct timespec *time +) +{ + _Timecounter_Nanouptime( time ); + --time->tv_sec; +} + +/** + * @brief Number of seconds Since RTEMS epoch. + * + * The following contains the number of seconds from 00:00:00 + * January 1, TOD_BASE_YEAR until the current time of day. + */ +static inline uint32_t _TOD_Seconds_since_epoch( void ) +{ + return (uint32_t) _Timecounter_Time_second; +} + +/** + * @brief Increments time of day at each clock tick. + * + * This routine increments the ticks field of the current time of + * day at each clock tick. + */ +void _TOD_Tickle_ticks( void ); + +/** + * @brief Gets number of ticks in a second. + * + * This method returns the number of ticks in a second. + * + * @note If the clock tick value does not multiply evenly into a second + * then this number of ticks will be slightly shorter than a second. + */ +uint32_t TOD_TICKS_PER_SECOND_method(void); + +/** + * @brief Gets number of ticks in a second. + * + * This method exists to hide the fact that TOD_TICKS_PER_SECOND can not + * be implemented as a macro in a .h file due to visibility issues. + * The Configuration Table is not available to SuperCore .h files but + * is available to their .c files. + */ +#define TOD_TICKS_PER_SECOND TOD_TICKS_PER_SECOND_method() + +/** + * This routine returns a timeval based upon the internal timespec format TOD. + */ + +RTEMS_INLINE_ROUTINE void _TOD_Get_timeval( + struct timeval *time +) +{ + _Timecounter_Microtime( time ); +} + +/** + * @brief Adjust the Time of Time + * + * This method is used to adjust the current time of day by the + * specified amount. + * + * @param[in] delta is the amount to adjust + */ +void _TOD_Adjust( + const Timestamp_Control timestamp +); + +/** + * @brief Check if the TOD is Set + * + * @return TRUE is the time is set. FALSE otherwise. + */ +RTEMS_INLINE_ROUTINE bool _TOD_Is_set( void ) +{ + return _TOD.is_set; +} + +/** + * @brief Absolute timeout conversion results. + * + * This enumeration defines the possible results of converting + * an absolute time used for timeouts to POSIX blocking calls to + * a number of ticks for example. + */ +typedef enum { + /** The timeout is invalid. */ + TOD_ABSOLUTE_TIMEOUT_INVALID, + /** The timeout represents a time that is in the past. */ + TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST, + /** The timeout represents a time that is equal to the current time. */ + TOD_ABSOLUTE_TIMEOUT_IS_NOW, + /** The timeout represents a time that is in the future. */ + TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE, +} TOD_Absolute_timeout_conversion_results; + +/** + * @brief Convert absolute timeout to ticks. + * + * This method takes an absolute time being used as a timeout + * to a blocking directive, validates it and returns the number + * of corresponding clock ticks for use by the SuperCore. + * + * @param[in] abstime is a pointer to the timeout + * @param[out] ticks_out will contain the number of ticks + * + * @return This method returns the number of ticks in @a ticks_out + * and a status value indicating whether the absolute time + * is valid, in the past, equal to the current time or in + * the future as it should be. + */ +TOD_Absolute_timeout_conversion_results _TOD_Absolute_timeout_to_ticks( + const struct timespec *abstime, + Watchdog_Interval *ticks_out +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/userext.h b/include/rtems/score/userext.h new file mode 100644 index 0000000000..e626f2f97a --- /dev/null +++ b/include/rtems/score/userext.h @@ -0,0 +1,273 @@ +/** + * @file + * + * @ingroup ScoreUserExt + * + * @brief User Extension Handler API + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_USEREXT_H +#define _RTEMS_SCORE_USEREXT_H + +#include <rtems/score/interr.h> +#include <rtems/score/chain.h> +#include <rtems/score/thread.h> + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void User_extensions_routine RTEMS_DEPRECATED; + +/** + * @defgroup ScoreUserExt User Extension Handler + * + * @ingroup Score + * + * @brief The User Extension Handler provides invocation of application + * dependent routines at critical points in the life of each thread and the + * system as a whole. + */ +/**@{**/ + +/** + * @brief Task create extension. + * + * It corresponds to _Thread_Initialize() (used by the rtems_task_create() + * directive and pthread_create()). + * + * It is invoked after the new thread has been completely initialized, but + * before it is placed on a ready chain. + * + * Thread dispatching may be disabled. This depends on the context of the + * _Thread_Initialize() call. Thread dispatch is disabled during the creation + * of the idle thread and the initialization threads. It can be considered as + * an invalid API usage, if the application calls _Thread_Initialize() with + * disabled thread dispatching. Disabled thread dispatching is different from + * disabled preemption. + * + * It can be assumed that the executing thread locked the allocator mutex. + * The only exception is the creation of the idle thread. In this case the + * allocator mutex is not locked. Since the allocator mutex allows nesting the + * normal memory allocation routines can be used. + * + * @param[in] executing The executing thread. + * @param[in] created The created thread. + * + * @retval true Successful operation. + * @retval false A thread create user extension will frequently attempt to + * allocate resources. If this allocation fails, then the extension should + * return @a false and the entire thread create operation will fail. + */ +typedef bool ( *User_extensions_thread_create_extension )( + Thread_Control *executing, + Thread_Control *created +); + +/** + * @brief Task delete extension. + * + * It corresponds to _Thread_Close() (used by the rtems_task_delete() + * directive, pthread_exit() and pthread_cancel()). + * + * It is invoked before all resources of the thread are deleted. The executing + * and deleted arguments are never equal. + * + * Thread dispatching is enabled. The executing thread locked the allocator + * mutex. + * + * @param[in] executing The executing thread. + * @param[in] deleted The deleted thread. + */ +typedef void( *User_extensions_thread_delete_extension )( + Thread_Control *executing, + Thread_Control *deleted +); + +/** + * @brief Task start extension. + * + * It corresponds to _Thread_Start() (used by the rtems_task_start() + * directive). + * + * It is invoked after the environment of the thread has been loaded and the + * thread has been made ready. + * + * Thread dispatching is disabled. The executing thread is not the holder of + * the allocator mutex. + * + * @param[in] executing The executing thread. + * @param[in] started The started thread. + */ +typedef void( *User_extensions_thread_start_extension )( + Thread_Control *executing, + Thread_Control *started +); + +/** + * @brief Task restart extension. + * + * It corresponds to _Thread_Restart() (used by the rtems_task_restart() + * directive). + * + * It is invoked in the context of the restarted thread right before the + * execution context is reloaded. The executing and restarted arguments are + * always equal. The thread stack reflects the previous execution context. + * + * Thread dispatching is enabled. The thread is not the holder of the + * allocator mutex. The thread life is protected. Thread restart and delete + * requests issued by restart extensions lead to recursion. + * + * @param[in] executing The executing thread. + * @param[in] restarted The executing thread. Yes, the executing thread. + */ +typedef void( *User_extensions_thread_restart_extension )( + Thread_Control *executing, + Thread_Control *restarted +); + +/** + * @brief Task switch extension. + * + * It corresponds to _Thread_Dispatch(). + * + * It is invoked before the context switch from the executing to the heir + * thread. + * + * Thread dispatching is disabled. The state of the allocator mutex is + * arbitrary. Interrupts are disabled and the per-CPU lock is acquired on SMP + * configurations. + * + * The context switches initiated through _Thread_Start_multitasking() are not + * covered by this extension. + * + * @param[in] executing The executing thread. + * @param[in] heir The heir thread. + */ +typedef void( *User_extensions_thread_switch_extension )( + Thread_Control *executing, + Thread_Control *heir +); + +/** + * @brief Task begin extension. + * + * It corresponds to _Thread_Handler(). + * + * Thread dispatching is disabled. The executing thread is not the holder of + * the allocator mutex. + * + * @param[in] executing The executing thread. + */ +typedef void( *User_extensions_thread_begin_extension )( + Thread_Control *executing +); + +/** + * @brief Task exitted extension. + * + * It corresponds to _Thread_Handler() after a return of the entry function. + * + * Thread dispatching is disabled. The state of the allocator mutex is + * arbitrary. + * + * @param[in] executing The executing thread. + */ +typedef void( *User_extensions_thread_exitted_extension )( + Thread_Control *executing +); + +/** + * @brief Fatal error extension. + * + * It corresponds to _Terminate() (used by the rtems_fatal() directive). + * + * This extension should not call any RTEMS directives. + * + * @param[in] source The fatal source indicating the subsystem the fatal + * condition originated in. + * @param[in] is_internal Indicates if the fatal condition was generated + * internally to the executive. + * @param[in] code The fatal error code. This value must be interpreted with + * respect to the source. + */ +typedef void( *User_extensions_fatal_extension )( + Internal_errors_Source source, + bool is_internal, + Internal_errors_t code +); + +/** + * @brief Task termination extension. + * + * This extension is invoked by _Thread_Life_action_handler() in case a + * termination request is recognized. + * + * It is invoked in the context of the terminated thread right before the + * thread dispatch to the heir thread. The POSIX cleanup and key destructors + * execute in this context. + * + * Thread dispatching is enabled. The thread is not the holder of the + * allocator mutex. The thread life is protected. Thread restart and delete + * requests issued by terminate extensions lead to recursion. + * + * @param[in] terminated The terminated thread. + */ +typedef void( *User_extensions_thread_terminate_extension )( + Thread_Control *terminated +); + +/** + * @brief User extension table. + */ +typedef struct { + User_extensions_thread_create_extension thread_create; + User_extensions_thread_start_extension thread_start; + User_extensions_thread_restart_extension thread_restart; + User_extensions_thread_delete_extension thread_delete; + User_extensions_thread_switch_extension thread_switch; + User_extensions_thread_begin_extension thread_begin; + User_extensions_thread_exitted_extension thread_exitted; + User_extensions_fatal_extension fatal; + User_extensions_thread_terminate_extension thread_terminate; +} User_extensions_Table; + +/** + * @brief Manages the switch callouts. + * + * They are managed separately from other extensions for performance reasons. + */ +typedef struct { + Chain_Node Node; + User_extensions_thread_switch_extension thread_switch; +} User_extensions_Switch_control; + +/** + * @brief Manages each user extension set. + * + * The switch control is part of the extensions control even if not used due to + * the extension not having a switch handler. + */ +typedef struct { + Chain_Node Node; + User_extensions_Switch_control Switch; + User_extensions_Table Callouts; +} User_extensions_Control; + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/userextimpl.h b/include/rtems/score/userextimpl.h new file mode 100644 index 0000000000..8c2a1fac4d --- /dev/null +++ b/include/rtems/score/userextimpl.h @@ -0,0 +1,276 @@ +/** + * @file + * + * @ingroup ScoreUserExt + * + * @brief User Extension Handler API + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_USEREXTIMPL_H +#define _RTEMS_SCORE_USEREXTIMPL_H + +#include <rtems/score/userext.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/percpu.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreUserExt User Extension Handler + * + * @ingroup Score + * + * @addtogroup ScoreUserExt + */ +/**@{**/ + +/** + * @brief List of active extensions. + */ +extern Chain_Control _User_extensions_List; + +/** + * @brief List of active task switch extensions. + */ +extern Chain_Control _User_extensions_Switches_list; + +/** + * @name Extension Maintainance + */ +/**@{**/ + +void _User_extensions_Handler_initialization( void ); + +void _User_extensions_Add_set( + User_extensions_Control *extension +); + +RTEMS_INLINE_ROUTINE void _User_extensions_Add_API_set( + User_extensions_Control *extension +) +{ + _User_extensions_Add_set( extension ); +} + +RTEMS_INLINE_ROUTINE void _User_extensions_Add_set_with_table( + User_extensions_Control *extension, + const User_extensions_Table *extension_table +) +{ + extension->Callouts = *extension_table; + + _User_extensions_Add_set( extension ); +} + +void _User_extensions_Remove_set( + User_extensions_Control *extension +); + +/** + * @brief User extension visitor. + * + * @param[in, out] executing The currently executing thread. + * @param[in, out] arg The argument passed to _User_extensions_Iterate(). + * @param[in] callouts The current callouts. + */ +typedef void (*User_extensions_Visitor)( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +typedef struct { + Thread_Control *created; + bool ok; +} User_extensions_Thread_create_context; + +void _User_extensions_Thread_create_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +void _User_extensions_Thread_delete_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +void _User_extensions_Thread_start_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +void _User_extensions_Thread_restart_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +void _User_extensions_Thread_begin_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +void _User_extensions_Thread_exitted_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +typedef struct { + Internal_errors_Source source; + bool is_internal; + Internal_errors_t error; +} User_extensions_Fatal_context; + +void _User_extensions_Fatal_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +void _User_extensions_Thread_terminate_visitor( + Thread_Control *executing, + void *arg, + const User_extensions_Table *callouts +); + +/** + * @brief Iterates through all user extensions and calls the visitor for each. + * + * @param[in, out] arg The argument passed to the visitor. + * @param[in] visitor is the visitor for each extension. + */ +void _User_extensions_Iterate( + void *arg, + User_extensions_Visitor visitor +); + +/** @} */ + +/** + * @name Extension Callout Dispatcher + */ +/**@{**/ + +static inline bool _User_extensions_Thread_create( Thread_Control *created ) +{ + User_extensions_Thread_create_context ctx = { created, true }; + + _User_extensions_Iterate( &ctx, _User_extensions_Thread_create_visitor ); + + return ctx.ok; +} + +static inline void _User_extensions_Thread_delete( Thread_Control *deleted ) +{ + _User_extensions_Iterate( + deleted, + _User_extensions_Thread_delete_visitor + ); +} + +static inline void _User_extensions_Thread_start( Thread_Control *started ) +{ + _User_extensions_Iterate( + started, + _User_extensions_Thread_start_visitor + ); +} + +static inline void _User_extensions_Thread_restart( Thread_Control *restarted ) +{ + _User_extensions_Iterate( + restarted, + _User_extensions_Thread_restart_visitor + ); +} + +static inline void _User_extensions_Thread_begin( Thread_Control *executing ) +{ + _User_extensions_Iterate( + executing, + _User_extensions_Thread_begin_visitor + ); +} + +static inline void _User_extensions_Thread_switch( + Thread_Control *executing, + Thread_Control *heir +) +{ + const Chain_Control *chain = &_User_extensions_Switches_list; + const Chain_Node *tail = _Chain_Immutable_tail( chain ); + const Chain_Node *node = _Chain_Immutable_first( chain ); + + if ( node != tail ) { + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + _Per_CPU_Acquire( cpu_self ); + + while ( node != tail ) { + const User_extensions_Switch_control *extension = + (const User_extensions_Switch_control *) node; + + (*extension->thread_switch)( executing, heir ); + + node = _Chain_Immutable_next( node ); + } + + _Per_CPU_Release( cpu_self ); + } +} + +static inline void _User_extensions_Thread_exitted( Thread_Control *executing ) +{ + _User_extensions_Iterate( + executing, + _User_extensions_Thread_exitted_visitor + ); +} + +static inline void _User_extensions_Fatal( + Internal_errors_Source source, + bool is_internal, + Internal_errors_t error +) +{ + User_extensions_Fatal_context ctx = { source, is_internal, error }; + + _User_extensions_Iterate( &ctx, _User_extensions_Fatal_visitor ); +} + +static inline void _User_extensions_Thread_terminate( + Thread_Control *executing +) +{ + _User_extensions_Iterate( + executing, + _User_extensions_Thread_terminate_visitor + ); +} + +/** @} */ + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/v850.h b/include/rtems/score/v850.h new file mode 100644 index 0000000000..26ab6c209d --- /dev/null +++ b/include/rtems/score/v850.h @@ -0,0 +1,137 @@ +/** + * @file + * + * @brief V850 Set up Basic CPU Dependency Settings Based on Compiler Settings + * + * This file sets up basic CPU dependency settings based on + * compiler settings. For example, it can determine if + * floating point is available. This particular implementation + * is specified to the Renesas v850 port. + */ + +/* + * COPYRIGHT (c) 1989-2012. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_V850_H +#define _RTEMS_SCORE_V850_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This file contains the information required to build + * RTEMS for a particular member of the NO CPU family. + * It does this by setting variables to indicate which + * implementation dependent features are present in a particular + * member of the family. + * + * This is a good place to list all the known CPU models + * that this port supports and which RTEMS CPU model they correspond + * to. + */ + +#if defined(rtems_multilib) +/* + * Figure out all CPU Model Feature Flags based upon compiler + * predefines. + */ +#define CPU_MODEL_NAME "rtems_multilib" +#define V850_HAS_FPU 0 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 0 + +#elif defined(__v850e2v3__) +#define CPU_MODEL_NAME "v850e2v3" +#define V850_HAS_FPU 1 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 1 + +#elif defined(__v850e2__) +#define CPU_MODEL_NAME "v850e2" +#define V850_HAS_FPU 0 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 1 + +#elif defined(__v850es__) +#define CPU_MODEL_NAME "v850es" +#define V850_HAS_FPU 0 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 1 + +#elif defined(__v850e1__) +#define CPU_MODEL_NAME "v850e1" +#define V850_HAS_FPU 0 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 1 + +#elif defined(__v850e__) +#define CPU_MODEL_NAME "v850e" +#define V850_HAS_FPU 0 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 1 + +#else +#define CPU_MODEL_NAME "v850" +#define V850_HAS_FPU 0 +#define V850_HAS_BYTE_SWAP_INSTRUCTION 0 + +#endif + +/* + * Define the name of the CPU family. + */ +#define CPU_NAME "v850 CPU" + +/* + * Method to set the Program Status Word (PSW) + */ +#define v850_set_psw( _psw ) \ + __asm__ __volatile__( "ldsr %0, psw" : : "r" (_psw) ) + +/* + * Method to obtain the Program Status Word (PSW) + */ +#define v850_get_psw( _psw ) \ + __asm__ __volatile__( "stsr psw, %0" : "=&r" (_psw) ) + +/* + * Masks and bits in the Program Status Word (PSW) + */ +#define V850_PSW_ZERO_MASK 0x01 +#define V850_PSW_IS_ZERO 0x01 +#define V850_PSW_IS_NOT 0x00 + +#define V850_PSW_SIGN_MASK 0x02 +#define V850_PSW_SIGN_IS_NEGATIVE 0x02 +#define V850_PSW_SIGN_IS_ZERO_OR_POSITIVE 0x00 + +#define V850_PSW_OVERFLOW_MASK 0x02 +#define V850_PSW_OVERFLOW_OCCURRED 0x02 +#define V850_PSW_OVERFLOW_DID_NOT_OCCUR 0x00 + +#define V850_PSW_CARRY_OR_BORROW_MASK 0x04 +#define V850_PSW_CARRY_OR_BORROW_OCCURRED 0x04 +#define V850_PSW_CARRY_OR_BORROW_DID_NOT_OCCUR 0x00 + +#define V850_PSW_SATURATION_MASK 0x10 +#define V850_PSW_SATURATION_OCCURRED 0x10 +#define V850_PSW_SATURATION_DID_NOT_OCCUR 0x00 + +#define V850_PSW_INTERRUPT_DISABLE_MASK 0x20 +#define V850_PSW_INTERRUPT_DISABLE 0x20 +#define V850_PSW_INTERRUPT_ENABLE 0x00 + +#define V850_PSW_EXCEPTION_IN_PROCESS_MASK 0x40 +#define V850_PSW_EXCEPTION_IN_PROCESS 0x40 +#define V850_PSW_EXCEPTION_NOT_IN_PROCESS 0x00 + +#define V850_PSW_NMI_IN_PROCESS_MASK 0x80 +#define V850_PSW_NMI_IN_PROCESS 0x80 +#define V850_PSW_NMI_NOT_IN_PROCESS 0x00 + +#ifdef __cplusplus +} +#endif + +#endif /* _RTEMS_SCORE_V850_H */ diff --git a/include/rtems/score/watchdog.h b/include/rtems/score/watchdog.h new file mode 100644 index 0000000000..bad7269051 --- /dev/null +++ b/include/rtems/score/watchdog.h @@ -0,0 +1,137 @@ +/** + * @file rtems/score/watchdog.h + * + * @brief Constants and Structures Associated with Watchdog Timers + * + * This include file contains all the constants and structures associated + * with watchdog timers. This Handler provides mechanisms which can be + * used to initialize and manipulate watchdog timers. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_WATCHDOG_H +#define _RTEMS_SCORE_WATCHDOG_H + +#include <rtems/score/object.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreWatchdog Watchdog Handler + * + * @ingroup Score + * + * This handler encapsulates functionality related to the scheduling of + * watchdog functions to be called at specific times in the future. + * + * @note This handler does not have anything to do with hardware watchdog + * timers. + */ +/**@{*/ + +/** + * @brief Type is used to specify the length of intervals. + * + * This type is used to specify the length of intervals. + */ +typedef uint32_t Watchdog_Interval; + +/** + * @brief Return type from a Watchdog Service Routine. + * + * This type defines the return type from a Watchdog Service Routine. + */ +typedef void Watchdog_Service_routine; + +/** + * @brief Pointer to a watchdog service routine. + * + * This type define a pointer to a watchdog service routine. + */ +typedef Watchdog_Service_routine ( *Watchdog_Service_routine_entry )( + Objects_Id, + void * + ); + +/** + * @brief The constant for indefinite wait. + * + * This is the constant for indefinite wait. It is actually an + * illegal interval. + */ +#define WATCHDOG_NO_TIMEOUT 0 + +/** + * @brief Set of the states which a watchdog timer may be at any given time. + * + * This enumerated type is the set of the states in which a + * watchdog timer may be at any given time. + */ + +typedef enum { + /** This is the state when the watchdog is off all chains */ + WATCHDOG_INACTIVE, + /** This is the state when the watchdog is off all chains, but we are + * currently searching for the insertion point. + */ + WATCHDOG_BEING_INSERTED, + /** This is the state when the watchdog is on a chain, and allowed to fire. */ + WATCHDOG_ACTIVE +} Watchdog_States; + +/** + * @brief The control block used to manage each watchdog timer. + * + * The following record defines the control block used + * to manage each watchdog timer. + */ +typedef struct { + /** This field is a Chain Node structure and allows this to be placed on + * chains for set management. + */ + Chain_Node Node; + /** This field is the state of the watchdog. */ + Watchdog_States state; + /** This field is the initially requested interval. */ + Watchdog_Interval initial; + /** This field is the remaining portion of the interval. */ + Watchdog_Interval delta_interval; + /** This field is the number of system clock ticks when this was scheduled. */ + Watchdog_Interval start_time; + /** This field is the number of system clock ticks when this was suspended. */ + Watchdog_Interval stop_time; + /** This field is the function to invoke. */ + Watchdog_Service_routine_entry routine; + /** This field is the Id to pass as an argument to the routine. */ + Objects_Id id; + /** This field is an untyped pointer to user data that is passed to the + * watchdog handler routine. + */ + void *user_data; +} Watchdog_Control; + +/** + * @brief The watchdog ticks counter. + * + * With a 1ms watchdog tick, this counter overflows after 50 days since boot. + */ +extern volatile Watchdog_Interval _Watchdog_Ticks_since_boot; + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/watchdogimpl.h b/include/rtems/score/watchdogimpl.h new file mode 100644 index 0000000000..0e04f64d7b --- /dev/null +++ b/include/rtems/score/watchdogimpl.h @@ -0,0 +1,513 @@ +/** + * @file + * + * @brief Inlined Routines in the Watchdog Handler + * + * This file contains the static inline implementation of all inlined + * routines in the Watchdog Handler. + */ + +/* + * COPYRIGHT (c) 1989-2004. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_WATCHDOGIMPL_H +#define _RTEMS_SCORE_WATCHDOGIMPL_H + +#include <rtems/score/watchdog.h> +#include <rtems/score/assert.h> +#include <rtems/score/chainimpl.h> +#include <rtems/score/isrlock.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup ScoreWatchdog + * @{ + */ + +/** + * @brief Watchdog initializer for static initialization. + * + * @see _Watchdog_Initialize(). + */ +#define WATCHDOG_INITIALIZER( routine, id, user_data ) \ + { \ + { NULL, NULL }, \ + WATCHDOG_INACTIVE, \ + 0, 0, 0, 0, \ + ( routine ), ( id ), ( user_data ) \ + } + +/** + * @brief Iterator item to synchronize concurrent insert, remove and tickle + * operations. + */ +typedef struct { + /** + * @brief A node for a Watchdog_Header::Iterators chain. + */ + Chain_Node Node; + + /** + * @brief The current delta interval of the new watchdog to insert. + */ + Watchdog_Interval delta_interval; + + /** + * @brief The current watchdog of the chain on the way to insert the new + * watchdog. + */ + Chain_Node *current; +} Watchdog_Iterator; + +/** + * @brief Watchdog header. + */ +typedef struct { + /** + * @brief ISR lock to protect this watchdog chain. + */ + ISR_LOCK_MEMBER( Lock ) + + /** + * @brief The chain of active or transient watchdogs. + */ + Chain_Control Watchdogs; + + /** + * @brief Currently active iterators. + * + * The iterators are registered in _Watchdog_Insert() and updated in case the + * watchdog chain changes. + */ + Chain_Control Iterators; +} Watchdog_Header; + +/** + * @brief Watchdog chain which is managed at ticks. + * + * This is the watchdog chain which is managed at ticks. + */ +SCORE_EXTERN Watchdog_Header _Watchdog_Ticks_header; + +/** + * @brief Watchdog chain which is managed at second boundaries. + * + * This is the watchdog chain which is managed at second boundaries. + */ +SCORE_EXTERN Watchdog_Header _Watchdog_Seconds_header; + +RTEMS_INLINE_ROUTINE void _Watchdog_Acquire( + Watchdog_Header *header, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_ISR_disable_and_acquire( &header->Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Watchdog_Release( + Watchdog_Header *header, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_Release_and_ISR_enable( &header->Lock, lock_context ); +} + +RTEMS_INLINE_ROUTINE void _Watchdog_Flash( + Watchdog_Header *header, + ISR_lock_Context *lock_context +) +{ + _ISR_lock_Flash( &header->Lock, lock_context ); +} + +/** + * @brief Initialize the watchdog handler. + * + * This routine initializes the watchdog handler. The watchdog + * synchronization flag is initialized and the watchdog chains are + * initialized and emptied. + */ +void _Watchdog_Handler_initialization( void ); + +/** + * @brief Triggers a watchdog tick. + * + * This routine executes TOD, watchdog and scheduler ticks. + */ +void _Watchdog_Tick( void ); + +/** + * @brief Removes @a the_watchdog from the watchdog chain. + * + * This routine removes @a the_watchdog from the watchdog chain on which + * it resides and returns the state @a the_watchdog timer was in. + * + * @param[in] header The watchdog chain. + * @param[in] the_watchdog will be removed + * @retval the state in which @a the_watchdog was in when removed + */ +Watchdog_States _Watchdog_Remove ( + Watchdog_Header *header, + Watchdog_Control *the_watchdog +); + +/** + * @brief Adjusts the header watchdog chain in the backward direction for + * units ticks. + * + * @param[in] header The watchdog chain. + * @param[in] units The units of ticks to adjust. + */ +void _Watchdog_Adjust_backward( + Watchdog_Header *header, + Watchdog_Interval units +); + +/** + * @brief Adjusts the watchdogs in backward direction in a locked context. + * + * The caller must be the owner of the watchdog lock and will be the owner + * after the call. + * + * @param[in] header The watchdog header. + * @param[in] units The units of ticks to adjust. + * + * @see _Watchdog_Adjust_forward(). + */ +void _Watchdog_Adjust_backward_locked( + Watchdog_Header *header, + Watchdog_Interval units +); + +/** + * @brief Adjusts the header watchdog chain in the forward direction for units + * ticks. + * + * This may lead to several _Watchdog_Tickle() invocations. + * + * @param[in] header The watchdog chain. + * @param[in] units The units of ticks to adjust. + */ +void _Watchdog_Adjust_forward( + Watchdog_Header *header, + Watchdog_Interval units +); + +/** + * @brief Adjusts the watchdogs in forward direction in a locked context. + * + * The caller must be the owner of the watchdog lock and will be the owner + * after the call. This function may release and acquire the watchdog lock + * internally. + * + * @param[in] header The watchdog header. + * @param[in] units The units of ticks to adjust. + * @param[in] lock_context The lock context. + * + * @see _Watchdog_Adjust_forward(). + */ +void _Watchdog_Adjust_forward_locked( + Watchdog_Header *header, + Watchdog_Interval units, + ISR_lock_Context *lock_context +); + +/** + * @brief Inserts @a the_watchdog into the @a header watchdog chain + * for a time of @a units. + * + * This routine inserts @a the_watchdog into the @a header watchdog chain + * for a time of @a units. + * Update the delta interval counters. + * + * @param[in] header is @a the_watchdog list to insert @a the_watchdog on + * @param[in] the_watchdog is the watchdog to insert + */ +void _Watchdog_Insert ( + Watchdog_Header *header, + Watchdog_Control *the_watchdog +); + +/** + * @brief Inserts the watchdog in a locked context. + * + * The caller must be the owner of the watchdog lock and will be the owner + * after the call. This function may release and acquire the watchdog lock + * internally. + * + * @param[in] header The watchdog header. + * @param[in] the_watchdog The watchdog. + * @param[in] lock_context The lock context. + * + * @see _Watchdog_Insert(). + */ +void _Watchdog_Insert_locked( + Watchdog_Header *header, + Watchdog_Control *the_watchdog, + ISR_lock_Context *lock_context +); + +/** + * @brief This routine is invoked at appropriate intervals to update + * the @a header watchdog chain. + * + * This routine is invoked at appropriate intervals to update + * the @a header watchdog chain. + * This routine decrements the delta counter in response to a tick. + * + * @param[in] header is the watchdog chain to tickle + */ +void _Watchdog_Tickle ( + Watchdog_Header *header +); + +/** + * @brief Pre-initializes a watchdog. + * + * This routine must be called before a watchdog is used in any way. The + * exception are statically initialized watchdogs via WATCHDOG_INITIALIZER(). + * + * @param[in] the_watchdog The uninitialized watchdog. + */ +RTEMS_INLINE_ROUTINE void _Watchdog_Preinitialize( + Watchdog_Control *the_watchdog +) +{ + the_watchdog->state = WATCHDOG_INACTIVE; +#if defined(RTEMS_DEBUG) + the_watchdog->routine = NULL; + the_watchdog->id = 0; + the_watchdog->user_data = NULL; +#endif +} + +/** + * This routine initializes the specified watchdog. The watchdog is + * made inactive, the watchdog id and handler routine are set to the + * specified values. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Initialize( + Watchdog_Control *the_watchdog, + Watchdog_Service_routine_entry routine, + Objects_Id id, + void *user_data +) +{ + _Assert( the_watchdog->state == WATCHDOG_INACTIVE ); + the_watchdog->routine = routine; + the_watchdog->id = id; + the_watchdog->user_data = user_data; +} + +/** + * This routine returns true if the watchdog timer is in the ACTIVE + * state, and false otherwise. + */ + +RTEMS_INLINE_ROUTINE bool _Watchdog_Is_active( + Watchdog_Control *the_watchdog +) +{ + + return ( the_watchdog->state == WATCHDOG_ACTIVE ); + +} + +/** + * This routine activates THE_WATCHDOG timer which is already + * on a watchdog chain. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Activate( + Watchdog_Control *the_watchdog +) +{ + + the_watchdog->state = WATCHDOG_ACTIVE; + +} + +/** + * This routine is invoked at each clock tick to update the ticks + * watchdog chain. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Tickle_ticks( void ) +{ + + _Watchdog_Tickle( &_Watchdog_Ticks_header ); + +} + +/** + * This routine is invoked at each clock tick to update the seconds + * watchdog chain. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Tickle_seconds( void ) +{ + + _Watchdog_Tickle( &_Watchdog_Seconds_header ); + +} + +/** + * This routine inserts THE_WATCHDOG into the ticks watchdog chain + * for a time of UNITS ticks. The INSERT_MODE indicates whether + * THE_WATCHDOG is to be activated automatically or later, explicitly + * by the caller. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Insert_ticks( + Watchdog_Control *the_watchdog, + Watchdog_Interval units +) +{ + + the_watchdog->initial = units; + + _Watchdog_Insert( &_Watchdog_Ticks_header, the_watchdog ); + +} + +/** + * This routine inserts THE_WATCHDOG into the seconds watchdog chain + * for a time of UNITS seconds. The INSERT_MODE indicates whether + * THE_WATCHDOG is to be activated automatically or later, explicitly + * by the caller. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Insert_seconds( + Watchdog_Control *the_watchdog, + Watchdog_Interval units +) +{ + + the_watchdog->initial = units; + + _Watchdog_Insert( &_Watchdog_Seconds_header, the_watchdog ); + +} + +RTEMS_INLINE_ROUTINE Watchdog_States _Watchdog_Remove_ticks( + Watchdog_Control *the_watchdog +) +{ + return _Watchdog_Remove( &_Watchdog_Ticks_header, the_watchdog ); +} + +RTEMS_INLINE_ROUTINE Watchdog_States _Watchdog_Remove_seconds( + Watchdog_Control *the_watchdog +) +{ + return _Watchdog_Remove( &_Watchdog_Seconds_header, the_watchdog ); +} + +/** + * This routine resets THE_WATCHDOG timer to its state at INSERT + * time. This routine is valid only on interval watchdog timers + * and is used to make an interval watchdog timer fire "every" so + * many ticks. + */ + +RTEMS_INLINE_ROUTINE void _Watchdog_Reset_ticks( + Watchdog_Control *the_watchdog +) +{ + + _Watchdog_Remove_ticks( the_watchdog ); + + _Watchdog_Insert( &_Watchdog_Ticks_header, the_watchdog ); + +} + +/** + * This routine returns a pointer to the watchdog timer following + * THE_WATCHDOG on the watchdog chain. + */ + +RTEMS_INLINE_ROUTINE Watchdog_Control *_Watchdog_Next( + Watchdog_Control *the_watchdog +) +{ + + return ( (Watchdog_Control *) the_watchdog->Node.next ); + +} + +/** + * This routine returns a pointer to the watchdog timer preceding + * THE_WATCHDOG on the watchdog chain. + */ + +RTEMS_INLINE_ROUTINE Watchdog_Control *_Watchdog_Previous( + Watchdog_Control *the_watchdog +) +{ + + return ( (Watchdog_Control *) the_watchdog->Node.previous ); + +} + +/** + * This routine returns a pointer to the first watchdog timer + * on the watchdog chain HEADER. + */ + +RTEMS_INLINE_ROUTINE Watchdog_Control *_Watchdog_First( + Watchdog_Header *header +) +{ + + return ( (Watchdog_Control *) _Chain_First( &header->Watchdogs ) ); + +} + +/** + * This routine returns a pointer to the last watchdog timer + * on the watchdog chain HEADER. + */ + +RTEMS_INLINE_ROUTINE Watchdog_Control *_Watchdog_Last( + Watchdog_Header *header +) +{ + + return ( (Watchdog_Control *) _Chain_Last( &header->Watchdogs ) ); + +} + +RTEMS_INLINE_ROUTINE bool _Watchdog_Is_empty( + const Watchdog_Header *header +) +{ + return _Chain_Is_empty( &header->Watchdogs ); +} + +RTEMS_INLINE_ROUTINE void _Watchdog_Header_initialize( + Watchdog_Header *header +) +{ + _ISR_lock_Initialize( &header->Lock, "Watchdog" ); + _Chain_Initialize_empty( &header->Watchdogs ); + _Chain_Initialize_empty( &header->Iterators ); +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ diff --git a/include/rtems/score/wkspace.h b/include/rtems/score/wkspace.h new file mode 100644 index 0000000000..18b01d3ced --- /dev/null +++ b/include/rtems/score/wkspace.h @@ -0,0 +1,138 @@ +/** + * @file rtems/score/wkspace.h + * + * @brief Information Related to the RAM Workspace + * + * This include file contains information related to the + * RAM Workspace. This Handler provides mechanisms which can be used to + * define, initialize and manipulate the workspace. + */ + +/* + * COPYRIGHT (c) 1989-2009. + * On-Line Applications Research Corporation (OAR). + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#ifndef _RTEMS_SCORE_WKSPACE_H +#define _RTEMS_SCORE_WKSPACE_H + +#include <rtems/score/heap.h> +#include <rtems/score/interr.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ScoreWorkspace Workspace Handler + * + * @ingroup Score + * + * This handler encapsulates functionality related to the management of + * the RTEMS Executive Workspace. + */ +/**@{*/ + +/** + * @brief Executive workspace control. + * + * This is the heap control structure used to manage the RTEMS Executive + * Workspace. + */ +SCORE_EXTERN Heap_Control _Workspace_Area; + +/** + * @brief Initilize workspace handler. + * + * This routine performs the initialization necessary for this handler. + */ +void _Workspace_Handler_initialization( + Heap_Area *areas, + size_t area_count, + Heap_Initialization_or_extend_handler extend +); + +/** + * @brief Allocate memory from workspace. + * + * This routine returns the address of a block of memory of size + * bytes. If a block of the appropriate size cannot be allocated + * from the workspace, then NULL is returned. + * + * @param size is the requested size + * + * @retval a pointer to the requested memory or NULL. + */ +void *_Workspace_Allocate( + size_t size +); + +/** + * @brief Allocate aligned memory from workspace. + * + * @param[in] size The size of the requested memory. + * @param[in] alignment The alignment of the requested memory. + * + * @retval NULL Not enough resources. + * @retval other The memory area begin. + */ +void *_Workspace_Allocate_aligned( size_t size, size_t alignment ); + +/** + * @brief Free memory to the workspace. + * + * This function frees the specified block of memory. If the block + * belongs to the Workspace and can be successfully freed, then + * true is returned. Otherwise false is returned. + * + * @param block is the memory to free + * + * @note If @a block is equal to NULL, then the request is ignored. + * This allows the caller to not worry about whether or not + * a pointer is NULL. + */ + +void _Workspace_Free( + void *block +); + +/** + * @brief Workspace allocate or fail with fatal error. + * + * This routine returns the address of a block of memory of @a size + * bytes. If a block of the appropriate size cannot be allocated + * from the workspace, then the internal error handler is invoked. + * + * @param[in] size is the desired number of bytes to allocate + * @retval If successful, the starting address of the allocated memory + */ +void *_Workspace_Allocate_or_fatal_error( + size_t size +); + +/** + * @brief Duplicates string with memory from the workspace. + * + * @param[in] string is the pointer to a zero terminated string. + * @param[in] len is the length of the string (equal to strlen(string)). + * + * @retval NULL Not enough memory. + * @retval other Duplicated string. + */ +char *_Workspace_String_duplicate( + const char *string, + size_t len +); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif +/* end of include file */ |