summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/arm/include/rtems/score
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/cpu/arm/include/rtems/score')
-rwxr-xr-xcpukit/score/cpu/arm/include/rtems/score/arm.h94
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/armv4.h98
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/armv7m.h620
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpu.h702
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h39
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h14
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h111
-rw-r--r--cpukit/score/cpu/arm/include/rtems/score/types.h51
8 files changed, 1729 insertions, 0 deletions
diff --git a/cpukit/score/cpu/arm/include/rtems/score/arm.h b/cpukit/score/cpu/arm/include/rtems/score/arm.h
new file mode 100755
index 0000000000..f08da1dc57
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/arm.h
@@ -0,0 +1,94 @@
+/**
+ * @file
+ *
+ * @brief ARM Assembler Support API
+ */
+
+/*
+ * COPYRIGHT (c) 2000 Canon Research Centre France SA.
+ * Emmanuel Raguet, mailto:raguet@crf.canon.fr
+ *
+ * Copyright (c) 2002 Advent Networks, Inc.
+ * Jay Monkman <jmonkman@adventnetworks.com>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+#ifndef _RTEMS_SCORE_ARM_H
+#define _RTEMS_SCORE_ARM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreCPU
+ */
+/**@{**/
+
+#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
+ #define CPU_MODEL_NAME "ARMv7M"
+ #define ARM_MULTILIB_ARCH_V7M
+#elif defined(__ARM_ARCH_6M__)
+ #define CPU_MODEL_NAME "ARMv6M"
+ #define ARM_MULTILIB_ARCH_V6M
+#else
+ #define CPU_MODEL_NAME "ARMv4"
+ #define ARM_MULTILIB_ARCH_V4
+#endif
+
+#if defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+ #define ARM_MULTILIB_HAS_WFI
+ #define ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
+ #define ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
+#endif
+
+#if defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__)
+ #define ARM_MULTILIB_HAS_THREAD_ID_REGISTER
+#endif
+
+#if defined(__ARM_ARCH_7A__)
+ #define ARM_MULTILIB_CACHE_LINE_MAX_64
+#endif
+
+#if defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+ #define ARM_MULTILIB_HAS_CPACR
+#endif
+
+#if !defined(__SOFTFP__)
+ #if defined(__ARM_NEON__)
+ #define ARM_MULTILIB_VFP_D32
+ #elif defined(__VFP_FP__)
+ #define ARM_MULTILIB_VFP_D16
+ #else
+ #error "FPU support not implemented"
+ #endif
+#endif
+
+#if defined(ARM_MULTILIB_VFP_D16) \
+ || defined(ARM_MULTILIB_VFP_D32)
+ #define ARM_MULTILIB_VFP
+#endif
+
+/*
+ * Define the name of the CPU family.
+ */
+
+#define CPU_NAME "ARM"
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_ARM_H */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/armv4.h b/cpukit/score/cpu/arm/include/rtems/score/armv4.h
new file mode 100644
index 0000000000..caeaa3e553
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/armv4.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef RTEMS_SCORE_ARMV4_H
+#define RTEMS_SCORE_ARMV4_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef ARM_MULTILIB_ARCH_V4
+
+void bsp_interrupt_dispatch( void );
+
+void _ARMV4_Exception_interrupt( void );
+
+typedef void ARMV4_Exception_abort_handler( CPU_Exception_frame *frame );
+
+void _ARMV4_Exception_data_abort_set_handler(
+ ARMV4_Exception_abort_handler handler
+);
+
+void _ARMV4_Exception_data_abort( void );
+
+void _ARMV4_Exception_prefetch_abort_set_handler(
+ ARMV4_Exception_abort_handler handler
+);
+
+void _ARMV4_Exception_prefetch_abort( void );
+
+void _ARMV4_Exception_undef_default( void );
+
+void _ARMV4_Exception_swi_default( void );
+
+void _ARMV4_Exception_data_abort_default( void );
+
+void _ARMV4_Exception_pref_abort_default( void );
+
+void _ARMV4_Exception_reserved_default( void );
+
+void _ARMV4_Exception_irq_default( void );
+
+void _ARMV4_Exception_fiq_default( void );
+
+static inline uint32_t _ARMV4_Status_irq_enable( void )
+{
+ uint32_t arm_switch_reg;
+ uint32_t psr;
+
+ RTEMS_COMPILER_MEMORY_BARRIER();
+
+ __asm__ volatile (
+ ARM_SWITCH_TO_ARM
+ "mrs %[psr], cpsr\n"
+ "bic %[arm_switch_reg], %[psr], #0x80\n"
+ "msr cpsr, %[arm_switch_reg]\n"
+ ARM_SWITCH_BACK
+ : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
+ );
+
+ return psr;
+}
+
+static inline void _ARMV4_Status_restore( uint32_t psr )
+{
+ ARM_SWITCH_REGISTERS;
+
+ __asm__ volatile (
+ ARM_SWITCH_TO_ARM
+ "msr cpsr, %[psr]\n"
+ ARM_SWITCH_BACK
+ : ARM_SWITCH_OUTPUT
+ : [psr] "r" (psr)
+ );
+
+ RTEMS_COMPILER_MEMORY_BARRIER();
+}
+
+#endif /* ARM_MULTILIB_ARCH_V4 */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SCORE_ARMV4_H */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/armv7m.h b/cpukit/score/cpu/arm/include/rtems/score/armv7m.h
new file mode 100644
index 0000000000..a6cc8a34ac
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/armv7m.h
@@ -0,0 +1,620 @@
+/**
+ * @file
+ *
+ * @brief ARMV7M Architecture Support
+ */
+
+/*
+ * Copyright (c) 2011-2014 Sebastian Huber. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef RTEMS_SCORE_ARMV7M_H
+#define RTEMS_SCORE_ARMV7M_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef ARM_MULTILIB_ARCH_V7M
+
+/* Coprocessor Access Control Register, CPACR */
+#define ARMV7M_CPACR 0xe000ed88
+
+#ifndef ASM
+
+typedef struct {
+ uint32_t reserved_0;
+ uint32_t ictr;
+ uint32_t actlr;
+ uint32_t reserved_1;
+} ARMV7M_ICTAC;
+
+typedef void (*ARMV7M_Exception_handler)(void);
+
+typedef struct {
+ uint32_t register_r0;
+ uint32_t register_r1;
+ uint32_t register_r2;
+ uint32_t register_r3;
+ uint32_t register_r12;
+ void *register_lr;
+ void *register_pc;
+ uint32_t register_xpsr;
+#ifdef ARM_MULTILIB_VFP
+ uint32_t register_s0;
+ uint32_t register_s1;
+ uint32_t register_s2;
+ uint32_t register_s3;
+ uint32_t register_s4;
+ uint32_t register_s5;
+ uint32_t register_s6;
+ uint32_t register_s7;
+ uint32_t register_s8;
+ uint32_t register_s9;
+ uint32_t register_s10;
+ uint32_t register_s11;
+ uint32_t register_s12;
+ uint32_t register_s13;
+ uint32_t register_s14;
+ uint32_t register_s15;
+ uint32_t register_fpscr;
+ uint32_t reserved;
+#endif
+} ARMV7M_Exception_frame;
+
+typedef struct {
+ uint32_t comp;
+ uint32_t mask;
+ uint32_t function;
+ uint32_t reserved;
+} ARMV7M_DWT_comparator;
+
+typedef struct {
+#define ARMV7M_DWT_CTRL_NOCYCCNT (1U << 25)
+#define ARMV7M_DWT_CTRL_CYCCNTENA (1U << 0)
+ uint32_t ctrl;
+ uint32_t cyccnt;
+ uint32_t cpicnt;
+ uint32_t exccnt;
+ uint32_t sleepcnt;
+ uint32_t lsucnt;
+ uint32_t foldcnt;
+ uint32_t pcsr;
+ ARMV7M_DWT_comparator comparator[249];
+#define ARMV7M_DWT_LAR_UNLOCK_MAGIC 0xc5acce55U
+ uint32_t lar;
+ uint32_t lsr;
+} ARMV7M_DWT;
+
+typedef struct {
+ uint32_t cpuid;
+
+#define ARMV7M_SCB_ICSR_NMIPENDSET (1U << 31)
+#define ARMV7M_SCB_ICSR_PENDSVSET (1U << 28)
+#define ARMV7M_SCB_ICSR_PENDSVCLR (1U << 27)
+#define ARMV7M_SCB_ICSR_PENDSTSET (1U << 26)
+#define ARMV7M_SCB_ICSR_PENDSTCLR (1U << 25)
+#define ARMV7M_SCB_ICSR_ISRPREEMPT (1U << 23)
+#define ARMV7M_SCB_ICSR_ISRPENDING (1U << 22)
+#define ARMV7M_SCB_ICSR_VECTPENDING_GET(reg) (((reg) >> 12) & 0x1ffU)
+#define ARMV7M_SCB_ICSR_RETTOBASE (1U << 11)
+#define ARMV7M_SCB_ICSR_VECTACTIVE_GET(reg) ((reg) & 0x1ffU)
+ uint32_t icsr;
+
+ ARMV7M_Exception_handler *vtor;
+
+#define ARMV7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
+#define ARMV7M_SCB_AIRCR_ENDIANESS (1U << 15)
+#define ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT 8
+#define ARMV7M_SCB_AIRCR_PRIGROUP_MASK \
+ ((0x7U) << ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT)
+#define ARMV7M_SCB_AIRCR_PRIGROUP(val) \
+ (((val) << ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT) & ARMV7M_SCB_AIRCR_PRIGROUP_MASK)
+#define ARMV7M_SCB_AIRCR_PRIGROUP_GET(reg) \
+ (((val) & ARMV7M_SCB_AIRCR_PRIGROUP_MASK) >> ARMV7M_SCB_AIRCR_PRIGROUP_SHIFT)
+#define ARMV7M_SCB_AIRCR_PRIGROUP_SET(reg, val) \
+ (((reg) & ~ARMV7M_SCB_AIRCR_PRIGROUP_MASK) | ARMV7M_SCB_AIRCR_PRIGROUP(val))
+#define ARMV7M_SCB_AIRCR_SYSRESETREQ (1U << 2)
+#define ARMV7M_SCB_AIRCR_VECTCLRACTIVE (1U << 1)
+#define ARMV7M_SCB_AIRCR_VECTRESET (1U << 0)
+ uint32_t aircr;
+
+ uint32_t scr;
+ uint32_t ccr;
+ uint8_t shpr [12];
+
+#define ARMV7M_SCB_SHCSR_USGFAULTENA (1U << 18)
+#define ARMV7M_SCB_SHCSR_BUSFAULTENA (1U << 17)
+#define ARMV7M_SCB_SHCSR_MEMFAULTENA (1U << 16)
+ uint32_t shcsr;
+
+ uint32_t cfsr;
+ uint32_t hfsr;
+ uint32_t dfsr;
+ uint32_t mmfar;
+ uint32_t bfar;
+ uint32_t afsr;
+ uint32_t reserved_e000ed40[18];
+ uint32_t cpacr;
+ uint32_t reserved_e000ed8c[106];
+ uint32_t fpccr;
+ uint32_t fpcar;
+ uint32_t fpdscr;
+ uint32_t mvfr0;
+ uint32_t mvfr1;
+} ARMV7M_SCB;
+
+typedef struct {
+#define ARMV7M_SYSTICK_CSR_COUNTFLAG (1U << 16)
+#define ARMV7M_SYSTICK_CSR_CLKSOURCE (1U << 2)
+#define ARMV7M_SYSTICK_CSR_TICKINT (1U << 1)
+#define ARMV7M_SYSTICK_CSR_ENABLE (1U << 0)
+ uint32_t csr;
+
+ uint32_t rvr;
+ uint32_t cvr;
+
+#define ARMV7M_SYSTICK_CALIB_NOREF (1U << 31)
+#define ARMV7M_SYSTICK_CALIB_SKEW (1U << 30)
+#define ARMV7M_SYSTICK_CALIB_TENMS_GET(reg) ((reg) & 0xffffffU)
+ uint32_t calib;
+} ARMV7M_Systick;
+
+typedef struct {
+ uint32_t iser [8];
+ uint32_t reserved_0 [24];
+ uint32_t icer [8];
+ uint32_t reserved_1 [24];
+ uint32_t ispr [8];
+ uint32_t reserved_2 [24];
+ uint32_t icpr [8];
+ uint32_t reserved_3 [24];
+ uint32_t iabr [8];
+ uint32_t reserved_4 [56];
+ uint8_t ipr [240];
+ uint32_t reserved_5 [644];
+ uint32_t stir;
+} ARMV7M_NVIC;
+
+typedef struct {
+#define ARMV7M_MPU_TYPE_IREGION_GET(reg) (((reg) >> 16) & 0xffU)
+#define ARMV7M_MPU_TYPE_DREGION_GET(reg) (((reg) >> 8) & 0xffU)
+#define ARMV7M_MPU_TYPE_SEPARATE (1U << 0)
+ uint32_t type;
+
+#define ARMV7M_MPU_CTRL_PRIVDEFENA (1U << 2)
+#define ARMV7M_MPU_CTRL_HFNMIENA (1U << 1)
+#define ARMV7M_MPU_CTRL_ENABLE (1U << 0)
+ uint32_t ctrl;
+
+ uint32_t rnr;
+
+#define ARMV7M_MPU_RBAR_ADDR_SHIFT 5
+#define ARMV7M_MPU_RBAR_ADDR_MASK \
+ ((0x7ffffffU) << ARMV7M_MPU_RBAR_ADDR_SHIFT)
+#define ARMV7M_MPU_RBAR_ADDR(val) \
+ (((val) << ARMV7M_MPU_RBAR_ADDR_SHIFT) & ARMV7M_MPU_RBAR_ADDR_MASK)
+#define ARMV7M_MPU_RBAR_ADDR_GET(reg) \
+ (((val) & ARMV7M_MPU_RBAR_ADDR_MASK) >> ARMV7M_MPU_RBAR_ADDR_SHIFT)
+#define ARMV7M_MPU_RBAR_ADDR_SET(reg, val) \
+ (((reg) & ~ARMV7M_MPU_RBAR_ADDR_MASK) | ARMV7M_MPU_RBAR_ADDR(val))
+#define ARMV7M_MPU_RBAR_VALID (1U << 4)
+#define ARMV7M_MPU_RBAR_REGION_SHIFT 0
+#define ARMV7M_MPU_RBAR_REGION_MASK \
+ ((0xfU) << ARMV7M_MPU_RBAR_REGION_SHIFT)
+#define ARMV7M_MPU_RBAR_REGION(val) \
+ (((val) << ARMV7M_MPU_RBAR_REGION_SHIFT) & ARMV7M_MPU_RBAR_REGION_MASK)
+#define ARMV7M_MPU_RBAR_REGION_GET(reg) \
+ (((val) & ARMV7M_MPU_RBAR_REGION_MASK) >> ARMV7M_MPU_RBAR_REGION_SHIFT)
+#define ARMV7M_MPU_RBAR_REGION_SET(reg, val) \
+ (((reg) & ~ARMV7M_MPU_RBAR_REGION_MASK) | ARMV7M_MPU_RBAR_REGION(val))
+ uint32_t rbar;
+
+#define ARMV7M_MPU_RASR_XN (1U << 28)
+#define ARMV7M_MPU_RASR_AP_SHIFT 24
+#define ARMV7M_MPU_RASR_AP_MASK \
+ ((0x7U) << ARMV7M_MPU_RASR_AP_SHIFT)
+#define ARMV7M_MPU_RASR_AP(val) \
+ (((val) << ARMV7M_MPU_RASR_AP_SHIFT) & ARMV7M_MPU_RASR_AP_MASK)
+#define ARMV7M_MPU_RASR_AP_GET(reg) \
+ (((val) & ARMV7M_MPU_RASR_AP_MASK) >> ARMV7M_MPU_RASR_AP_SHIFT)
+#define ARMV7M_MPU_RASR_AP_SET(reg, val) \
+ (((reg) & ~ARMV7M_MPU_RASR_AP_MASK) | ARMV7M_MPU_RASR_AP(val))
+#define ARMV7M_MPU_RASR_TEX_SHIFT 19
+#define ARMV7M_MPU_RASR_TEX_MASK \
+ ((0x7U) << ARMV7M_MPU_RASR_TEX_SHIFT)
+#define ARMV7M_MPU_RASR_TEX(val) \
+ (((val) << ARMV7M_MPU_RASR_TEX_SHIFT) & ARMV7M_MPU_RASR_TEX_MASK)
+#define ARMV7M_MPU_RASR_TEX_GET(reg) \
+ (((val) & ARMV7M_MPU_RASR_TEX_MASK) >> ARMV7M_MPU_RASR_TEX_SHIFT)
+#define ARMV7M_MPU_RASR_TEX_SET(reg, val) \
+ (((reg) & ~ARMV7M_MPU_RASR_TEX_MASK) | ARMV7M_MPU_RASR_TEX(val))
+#define ARMV7M_MPU_RASR_S (1U << 18)
+#define ARMV7M_MPU_RASR_C (1U << 17)
+#define ARMV7M_MPU_RASR_B (1U << 16)
+#define ARMV7M_MPU_RASR_SRD_SHIFT 8
+#define ARMV7M_MPU_RASR_SRD_MASK \
+ ((0xffU) << ARMV7M_MPU_RASR_SRD_SHIFT)
+#define ARMV7M_MPU_RASR_SRD(val) \
+ (((val) << ARMV7M_MPU_RASR_SRD_SHIFT) & ARMV7M_MPU_RASR_SRD_MASK)
+#define ARMV7M_MPU_RASR_SRD_GET(reg) \
+ (((val) & ARMV7M_MPU_RASR_SRD_MASK) >> ARMV7M_MPU_RASR_SRD_SHIFT)
+#define ARMV7M_MPU_RASR_SRD_SET(reg, val) \
+ (((reg) & ~ARMV7M_MPU_RASR_SRD_MASK) | ARMV7M_MPU_RASR_SRD(val))
+#define ARMV7M_MPU_RASR_SIZE_SHIFT 1
+#define ARMV7M_MPU_RASR_SIZE_MASK \
+ ((0x1fU) << ARMV7M_MPU_RASR_SIZE_SHIFT)
+#define ARMV7M_MPU_RASR_SIZE(val) \
+ (((val) << ARMV7M_MPU_RASR_SIZE_SHIFT) & ARMV7M_MPU_RASR_SIZE_MASK)
+#define ARMV7M_MPU_RASR_SIZE_GET(reg) \
+ (((val) & ARMV7M_MPU_RASR_SIZE_MASK) >> ARMV7M_MPU_RASR_SIZE_SHIFT)
+#define ARMV7M_MPU_RASR_SIZE_SET(reg, val) \
+ (((reg) & ~ARMV7M_MPU_RASR_SIZE_MASK) | ARMV7M_MPU_RASR_SIZE(val))
+#define ARMV7M_MPU_RASR_ENABLE (1U << 0)
+ uint32_t rasr;
+
+ uint32_t rbar_a1;
+ uint32_t rasr_a1;
+ uint32_t rbar_a2;
+ uint32_t rasr_a2;
+ uint32_t rbar_a3;
+ uint32_t rasr_a3;
+} ARMV7M_MPU;
+
+typedef enum {
+ ARMV7M_MPU_AP_PRIV_NO_USER_NO,
+ ARMV7M_MPU_AP_PRIV_RW_USER_NO,
+ ARMV7M_MPU_AP_PRIV_RW_USER_RO,
+ ARMV7M_MPU_AP_PRIV_RW_USER_RW,
+ ARMV7M_MPU_AP_PRIV_RO_USER_NO = 0x5,
+ ARMV7M_MPU_AP_PRIV_RO_USER_RO,
+} ARMV7M_MPU_Access_permissions;
+
+typedef enum {
+ ARMV7M_MPU_ATTR_R = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RO_USER_NO)
+ | ARMV7M_MPU_RASR_C | ARMV7M_MPU_RASR_XN,
+ ARMV7M_MPU_ATTR_RW = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RW_USER_NO)
+ | ARMV7M_MPU_RASR_C | ARMV7M_MPU_RASR_XN | ARMV7M_MPU_RASR_B,
+ ARMV7M_MPU_ATTR_RWX = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RW_USER_NO)
+ | ARMV7M_MPU_RASR_C | ARMV7M_MPU_RASR_B,
+ ARMV7M_MPU_ATTR_X = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_NO_USER_NO)
+ | ARMV7M_MPU_RASR_C,
+ ARMV7M_MPU_ATTR_RX = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RO_USER_NO)
+ | ARMV7M_MPU_RASR_C,
+ ARMV7M_MPU_ATTR_IO = ARMV7M_MPU_RASR_AP(ARMV7M_MPU_AP_PRIV_RW_USER_NO)
+ | ARMV7M_MPU_RASR_XN,
+} ARMV7M_MPU_Attributes;
+
+typedef enum {
+ ARMV7M_MPU_SIZE_32_B = 0x4,
+ ARMV7M_MPU_SIZE_64_B,
+ ARMV7M_MPU_SIZE_128_B,
+ ARMV7M_MPU_SIZE_256_B,
+ ARMV7M_MPU_SIZE_512_B,
+ ARMV7M_MPU_SIZE_1_KB,
+ ARMV7M_MPU_SIZE_2_KB,
+ ARMV7M_MPU_SIZE_4_KB,
+ ARMV7M_MPU_SIZE_8_KB,
+ ARMV7M_MPU_SIZE_16_KB,
+ ARMV7M_MPU_SIZE_32_KB,
+ ARMV7M_MPU_SIZE_64_KB,
+ ARMV7M_MPU_SIZE_128_KB,
+ ARMV7M_MPU_SIZE_256_KB,
+ ARMV7M_MPU_SIZE_512_KB,
+ ARMV7M_MPU_SIZE_1_MB,
+ ARMV7M_MPU_SIZE_2_MB,
+ ARMV7M_MPU_SIZE_4_MB,
+ ARMV7M_MPU_SIZE_8_MB,
+ ARMV7M_MPU_SIZE_16_MB,
+ ARMV7M_MPU_SIZE_32_MB,
+ ARMV7M_MPU_SIZE_64_MB,
+ ARMV7M_MPU_SIZE_128_MB,
+ ARMV7M_MPU_SIZE_256_MB,
+ ARMV7M_MPU_SIZE_512_MB,
+ ARMV7M_MPU_SIZE_1_GB,
+ ARMV7M_MPU_SIZE_2_GB,
+ ARMV7M_MPU_SIZE_4_GB
+} ARMV7M_MPU_Size;
+
+typedef struct {
+ uint32_t rbar;
+ uint32_t rasr;
+} ARMV7M_MPU_Region;
+
+#define ARMV7M_MPU_REGION_INITIALIZER(idx, addr, size, attr) \
+ { \
+ ((addr) & ARMV7M_MPU_RBAR_ADDR_MASK) \
+ | ARMV7M_MPU_RBAR_VALID \
+ | ARMV7M_MPU_RBAR_REGION(idx), \
+ ARMV7M_MPU_RASR_SIZE(size) | (attr) | ARMV7M_MPU_RASR_ENABLE \
+ }
+
+#define ARMV7M_MPU_REGION_DISABLED_INITIALIZER(idx) \
+ { \
+ ARMV7M_MPU_RBAR_VALID | ARMV7M_MPU_RBAR_REGION(idx), \
+ 0 \
+ }
+
+typedef struct {
+ uint32_t dhcsr;
+ uint32_t dcrsr;
+ uint32_t dcrdr;
+#define ARMV7M_DEBUG_DEMCR_VC_CORERESET (1U << 0)
+#define ARMV7M_DEBUG_DEMCR_VC_MMERR (1U << 4)
+#define ARMV7M_DEBUG_DEMCR_VC_NOCPERR (1U << 5)
+#define ARMV7M_DEBUG_DEMCR_VC_CHKERR (1U << 6)
+#define ARMV7M_DEBUG_DEMCR_VC_STATERR (1U << 7)
+#define ARMV7M_DEBUG_DEMCR_VC_BUSERR (1U << 8)
+#define ARMV7M_DEBUG_DEMCR_VC_INTERR (1U << 9)
+#define ARMV7M_DEBUG_DEMCR_VC_HARDERR (1U << 10)
+#define ARMV7M_DEBUG_DEMCR_MON_EN (1U << 16)
+#define ARMV7M_DEBUG_DEMCR_MON_PEND (1U << 17)
+#define ARMV7M_DEBUG_DEMCR_MON_STEP (1U << 18)
+#define ARMV7M_DEBUG_DEMCR_MON_REQ (1U << 19)
+#define ARMV7M_DEBUG_DEMCR_TRCENA (1U << 24)
+ uint32_t demcr;
+} ARMV7M_DEBUG;
+
+#define ARMV7M_DWT_BASE 0xe0001000
+#define ARMV7M_SCS_BASE 0xe000e000
+#define ARMV7M_ICTAC_BASE (ARMV7M_SCS_BASE + 0x0)
+#define ARMV7M_SYSTICK_BASE (ARMV7M_SCS_BASE + 0x10)
+#define ARMV7M_NVIC_BASE (ARMV7M_SCS_BASE + 0x100)
+#define ARMV7M_SCB_BASE (ARMV7M_SCS_BASE + 0xd00)
+#define ARMV7M_MPU_BASE (ARMV7M_SCS_BASE + 0xd90)
+#define ARMV7M_DEBUG_BASE (ARMV7M_SCS_BASE + 0xdf0)
+
+#define _ARMV7M_DWT \
+ ((volatile ARMV7M_DWT *) ARMV7M_DWT_BASE)
+#define _ARMV7M_ICTAC \
+ ((volatile ARMV7M_ICTAC *) ARMV7M_ICTAC_BASE)
+#define _ARMV7M_SCB \
+ ((volatile ARMV7M_SCB *) ARMV7M_SCB_BASE)
+#define _ARMV7M_Systick \
+ ((volatile ARMV7M_Systick *) ARMV7M_SYSTICK_BASE)
+#define _ARMV7M_NVIC \
+ ((volatile ARMV7M_NVIC *) ARMV7M_NVIC_BASE)
+#define _ARMV7M_MPU \
+ ((volatile ARMV7M_MPU *) ARMV7M_MPU_BASE)
+#define _ARMV7M_DEBUG \
+ ((volatile ARMV7M_DEBUG *) ARMV7M_DEBUG_BASE)
+
+#define ARMV7M_VECTOR_MSP 0
+#define ARMV7M_VECTOR_RESET 1
+#define ARMV7M_VECTOR_NMI 2
+#define ARMV7M_VECTOR_HARD_FAULT 3
+#define ARMV7M_VECTOR_MEM_MANAGE 4
+#define ARMV7M_VECTOR_BUS_FAULT 5
+#define ARMV7M_VECTOR_USAGE_FAULT 6
+#define ARMV7M_VECTOR_SVC 11
+#define ARMV7M_VECTOR_DEBUG_MONITOR 12
+#define ARMV7M_VECTOR_PENDSV 14
+#define ARMV7M_VECTOR_SYSTICK 15
+#define ARMV7M_VECTOR_IRQ(n) ((n) + 16)
+#define ARMV7M_IRQ_OF_VECTOR(n) ((n) - 16)
+
+#define ARMV7M_EXCEPTION_PRIORITY_LOWEST 255
+
+static inline bool _ARMV7M_Is_vector_an_irq( int vector )
+{
+ /* External (i.e. non-system) IRQs start after the SysTick vector. */
+ return vector > ARMV7M_VECTOR_SYSTICK;
+}
+
+static inline uint32_t _ARMV7M_Get_basepri(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], basepri\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline void _ARMV7M_Set_basepri(uint32_t val)
+{
+ __asm__ volatile ("msr basepri, %[val]\n" : : [val] "r" (val));
+}
+
+static inline uint32_t _ARMV7M_Get_primask(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], primask\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline void _ARMV7M_Set_primask(uint32_t val)
+{
+ __asm__ volatile ("msr primask, %[val]\n" : : [val] "r" (val));
+}
+
+static inline uint32_t _ARMV7M_Get_faultmask(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], faultmask\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline void _ARMV7M_Set_faultmask(uint32_t val)
+{
+ __asm__ volatile ("msr faultmask, %[val]\n" : : [val] "r" (val));
+}
+
+static inline uint32_t _ARMV7M_Get_control(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], control\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline void _ARMV7M_Set_control(uint32_t val)
+{
+ __asm__ volatile ("msr control, %[val]\n" : : [val] "r" (val));
+}
+
+static inline uint32_t _ARMV7M_Get_MSP(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], msp\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline void _ARMV7M_Set_MSP(uint32_t val)
+{
+ __asm__ volatile ("msr msp, %[val]\n" : : [val] "r" (val));
+}
+
+static inline uint32_t _ARMV7M_Get_PSP(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], psp\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline void _ARMV7M_Set_PSP(uint32_t val)
+{
+ __asm__ volatile ("msr psp, %[val]\n" : : [val] "r" (val));
+}
+
+static inline uint32_t _ARMV7M_Get_XPSR(void)
+{
+ uint32_t val;
+ __asm__ volatile ("mrs %[val], xpsr\n" : [val] "=&r" (val));
+ return val;
+}
+
+static inline bool _ARMV7M_NVIC_Is_enabled( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ return (_ARMV7M_NVIC->iser [index] & bit) != 0;
+}
+
+static inline void _ARMV7M_NVIC_Set_enable( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ _ARMV7M_NVIC->iser [index] = bit;
+}
+
+static inline void _ARMV7M_NVIC_Clear_enable( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ _ARMV7M_NVIC->icer [index] = bit;
+}
+
+static inline bool _ARMV7M_NVIC_Is_pending( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ return (_ARMV7M_NVIC->ispr [index] & bit) != 0;
+}
+
+static inline void _ARMV7M_NVIC_Set_pending( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ _ARMV7M_NVIC->ispr [index] = bit;
+}
+
+static inline void _ARMV7M_NVIC_Clear_pending( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ _ARMV7M_NVIC->icpr [index] = bit;
+}
+
+static inline bool _ARMV7M_NVIC_Is_active( int irq )
+{
+ int index = irq >> 5;
+ uint32_t bit = 1U << (irq & 0x1f);
+
+ return (_ARMV7M_NVIC->iabr [index] & bit) != 0;
+}
+
+static inline void _ARMV7M_NVIC_Set_priority( int irq, int priority )
+{
+ _ARMV7M_NVIC->ipr [irq] = (uint8_t) priority;
+}
+
+static inline int _ARMV7M_NVIC_Get_priority( int irq )
+{
+ return _ARMV7M_NVIC->ipr [irq];
+}
+
+static inline bool _ARMV7M_DWT_Enable_CYCCNT( void )
+{
+ uint32_t demcr;
+ uint32_t dwt_ctrl;
+
+ demcr = _ARMV7M_DEBUG->demcr;
+ _ARMV7M_DEBUG->demcr = demcr | ARMV7M_DEBUG_DEMCR_TRCENA;
+ _ARM_Data_synchronization_barrier();
+
+ dwt_ctrl = _ARMV7M_DWT->ctrl;
+ if ((dwt_ctrl & ARMV7M_DWT_CTRL_NOCYCCNT) == 0) {
+ _ARMV7M_DWT->lar = ARMV7M_DWT_LAR_UNLOCK_MAGIC;
+ _ARM_Data_synchronization_barrier();
+ _ARMV7M_DWT->ctrl = dwt_ctrl | ARMV7M_DWT_CTRL_CYCCNTENA;
+ return true;
+ } else {
+ _ARMV7M_DEBUG->demcr = demcr;
+ return false;
+ }
+}
+
+int _ARMV7M_Get_exception_priority( int vector );
+
+void _ARMV7M_Set_exception_priority( int vector, int priority );
+
+ARMV7M_Exception_handler _ARMV7M_Get_exception_handler( int index );
+
+void _ARMV7M_Set_exception_handler(
+ int index,
+ ARMV7M_Exception_handler handler
+);
+
+/**
+ * @brief ARMV7M set exception priority and handler.
+ */
+void _ARMV7M_Set_exception_priority_and_handler(
+ int index,
+ int priority,
+ ARMV7M_Exception_handler handler
+);
+
+void _ARMV7M_Exception_default( void );
+
+void _ARMV7M_Interrupt_service_enter( void );
+
+void _ARMV7M_Interrupt_service_leave( void );
+
+void _ARMV7M_Pendable_service_call( void );
+
+void _ARMV7M_Supervisor_call( void );
+
+#endif /* ASM */
+
+#endif /* ARM_MULTILIB_ARCH_V7M */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SCORE_ARMV7M_H */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu.h b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
new file mode 100644
index 0000000000..05e236c75a
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
@@ -0,0 +1,702 @@
+/**
+ * @file
+ *
+ * @brief ARM Architecture Support API
+ */
+
+/*
+ * This include file contains information pertaining to the ARM
+ * processor.
+ *
+ * Copyright (c) 2009, 2017 embedded brains GmbH
+ *
+ * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
+ *
+ * Copyright (c) 2006 OAR Corporation
+ *
+ * Copyright (c) 2002 Advent Networks, Inc.
+ * Jay Monkman <jmonkman@adventnetworks.com>
+ *
+ * COPYRIGHT (c) 2000 Canon Research Centre France SA.
+ * Emmanuel Raguet, mailto:raguet@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+#ifndef _RTEMS_SCORE_CPU_H
+#define _RTEMS_SCORE_CPU_H
+
+#include <rtems/score/types.h>
+#include <rtems/score/arm.h>
+
+#if defined(ARM_MULTILIB_ARCH_V4)
+
+/**
+ * @defgroup ScoreCPUARM ARM Specific Support
+ *
+ * @ingroup ScoreCPU
+ *
+ * @brief ARM specific support.
+ */
+/**@{**/
+
+#if defined(__thumb__) && !defined(__thumb2__)
+ #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
+ #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
+ #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
+ #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
+ #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
+#else
+ #define ARM_SWITCH_REGISTERS
+ #define ARM_SWITCH_TO_ARM
+ #define ARM_SWITCH_BACK
+ #define ARM_SWITCH_OUTPUT
+ #define ARM_SWITCH_ADDITIONAL_OUTPUT
+#endif
+
+/**
+ * @name Program Status Register
+ */
+/**@{**/
+
+#define ARM_PSR_N (1 << 31)
+#define ARM_PSR_Z (1 << 30)
+#define ARM_PSR_C (1 << 29)
+#define ARM_PSR_V (1 << 28)
+#define ARM_PSR_Q (1 << 27)
+#define ARM_PSR_J (1 << 24)
+#define ARM_PSR_GE_SHIFT 16
+#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
+#define ARM_PSR_E (1 << 9)
+#define ARM_PSR_A (1 << 8)
+#define ARM_PSR_I (1 << 7)
+#define ARM_PSR_F (1 << 6)
+#define ARM_PSR_T (1 << 5)
+#define ARM_PSR_M_SHIFT 0
+#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
+#define ARM_PSR_M_USR 0x10
+#define ARM_PSR_M_FIQ 0x11
+#define ARM_PSR_M_IRQ 0x12
+#define ARM_PSR_M_SVC 0x13
+#define ARM_PSR_M_ABT 0x17
+#define ARM_PSR_M_HYP 0x1a
+#define ARM_PSR_M_UND 0x1b
+#define ARM_PSR_M_SYS 0x1f
+
+/** @} */
+
+/** @} */
+
+#endif /* defined(ARM_MULTILIB_ARCH_V4) */
+
+/**
+ * @addtogroup ScoreCPU
+ */
+/**@{**/
+
+/*
+ * The ARM uses the PIC interrupt model.
+ */
+#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+
+#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
+
+#define CPU_ISR_PASSES_FRAME_POINTER FALSE
+
+#define CPU_HARDWARE_FP FALSE
+
+#define CPU_SOFTWARE_FP FALSE
+
+#define CPU_ALL_TASKS_ARE_FP FALSE
+
+#define CPU_IDLE_TASK_IS_FP FALSE
+
+#define CPU_USE_DEFERRED_FP_SWITCH FALSE
+
+#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
+
+#if defined(ARM_MULTILIB_HAS_WFI)
+ #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
+#else
+ #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
+#endif
+
+#define CPU_STACK_GROWS_UP FALSE
+
+#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
+ #define CPU_CACHE_LINE_BYTES 64
+#else
+ #define CPU_CACHE_LINE_BYTES 32
+#endif
+
+#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
+
+#define CPU_MODES_INTERRUPT_MASK 0x1
+
+#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
+
+#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
+
+/* AAPCS, section 4.1, Fundamental Data Types */
+#define CPU_SIZEOF_POINTER 4
+
+/* AAPCS, section 4.1, Fundamental Data Types */
+#define CPU_ALIGNMENT 8
+
+#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
+
+/* AAPCS, section 4.3.1, Aggregates */
+#define CPU_PARTITION_ALIGNMENT 4
+
+/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
+#define CPU_STACK_ALIGNMENT 8
+
+/*
+ * Bitfield handler macros.
+ *
+ * If we had a particularly fast function for finding the first
+ * bit set in a word, it would go here. Since we don't (*), we'll
+ * just use the universal macros.
+ *
+ * (*) On ARM V5 and later, there's a CLZ function which could be
+ * used to implement much quicker than the default macro.
+ */
+
+#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
+
+#define CPU_MAXIMUM_PROCESSORS 32
+
+/** @} */
+
+#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
+ #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
+#endif
+
+#ifdef ARM_MULTILIB_VFP
+ #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
+#endif
+
+#ifdef ARM_MULTILIB_ARCH_V4
+ #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
+#endif
+
+#ifdef RTEMS_SMP
+ #if defined(ARM_MULTILIB_VFP)
+ #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
+ #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
+ #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
+ #else
+ #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44
+ #endif
+#endif
+
+#define ARM_EXCEPTION_FRAME_SIZE 80
+
+#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
+
+#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
+
+#define ARM_VFP_CONTEXT_SIZE 264
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreCPU
+ */
+/**@{**/
+
+typedef struct {
+#if defined(ARM_MULTILIB_ARCH_V4)
+ uint32_t register_r4;
+ uint32_t register_r5;
+ uint32_t register_r6;
+ uint32_t register_r7;
+ uint32_t register_r8;
+ uint32_t register_r9;
+ uint32_t register_r10;
+ uint32_t register_fp;
+ uint32_t register_sp;
+ uint32_t register_lr;
+ uint32_t isr_dispatch_disable;
+#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
+ uint32_t register_r4;
+ uint32_t register_r5;
+ uint32_t register_r6;
+ uint32_t register_r7;
+ uint32_t register_r8;
+ uint32_t register_r9;
+ uint32_t register_r10;
+ uint32_t register_r11;
+ void *register_lr;
+ void *register_sp;
+ uint32_t isr_nest_level;
+#else
+ void *register_sp;
+#endif
+#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
+ uint32_t thread_id;
+#endif
+#ifdef ARM_MULTILIB_VFP
+ uint64_t register_d8;
+ uint64_t register_d9;
+ uint64_t register_d10;
+ uint64_t register_d11;
+ uint64_t register_d12;
+ uint64_t register_d13;
+ uint64_t register_d14;
+ uint64_t register_d15;
+#endif
+#ifdef RTEMS_SMP
+ volatile bool is_executing;
+#endif
+} Context_Control;
+
+typedef struct {
+ /* Not supported */
+} Context_Control_fp;
+
+static inline void _ARM_Data_memory_barrier( void )
+{
+#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
+ __asm__ volatile ( "dmb" : : : "memory" );
+#else
+ RTEMS_COMPILER_MEMORY_BARRIER();
+#endif
+}
+
+static inline void _ARM_Data_synchronization_barrier( void )
+{
+#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
+ __asm__ volatile ( "dsb" : : : "memory" );
+#else
+ RTEMS_COMPILER_MEMORY_BARRIER();
+#endif
+}
+
+static inline void _ARM_Instruction_synchronization_barrier( void )
+{
+#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
+ __asm__ volatile ( "isb" : : : "memory" );
+#else
+ RTEMS_COMPILER_MEMORY_BARRIER();
+#endif
+}
+
+static inline uint32_t arm_interrupt_disable( void )
+{
+ uint32_t level;
+
+#if defined(ARM_MULTILIB_ARCH_V4)
+ uint32_t arm_switch_reg;
+
+ /*
+ * Disable only normal interrupts (IRQ).
+ *
+ * In order to support fast interrupts (FIQ) such that they can do something
+ * useful, we have to disable the operating system support for FIQs. Having
+ * operating system support for them would require that FIQs are disabled
+ * during critical sections of the operating system and application. At this
+ * level IRQs and FIQs would be equal. It is true that FIQs could interrupt
+ * the non critical sections of IRQs, so here they would have a small
+ * advantage. Without operating system support, the FIQs can execute at any
+ * time (of course not during the service of another FIQ). If someone needs
+ * operating system support for a FIQ, she can trigger a software interrupt and
+ * service the request in a two-step process.
+ */
+ __asm__ volatile (
+ ARM_SWITCH_TO_ARM
+ "mrs %[level], cpsr\n"
+ "orr %[arm_switch_reg], %[level], #0x80\n"
+ "msr cpsr, %[arm_switch_reg]\n"
+ ARM_SWITCH_BACK
+ : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
+ );
+#elif defined(ARM_MULTILIB_ARCH_V7M)
+ uint32_t basepri = 0x80;
+
+ __asm__ volatile (
+ "mrs %[level], basepri\n"
+ "msr basepri_max, %[basepri]\n"
+ : [level] "=&r" (level)
+ : [basepri] "r" (basepri)
+ );
+#endif
+
+ return level;
+}
+
+static inline void arm_interrupt_enable( uint32_t level )
+{
+#if defined(ARM_MULTILIB_ARCH_V4)
+ ARM_SWITCH_REGISTERS;
+
+ __asm__ volatile (
+ ARM_SWITCH_TO_ARM
+ "msr cpsr, %[level]\n"
+ ARM_SWITCH_BACK
+ : ARM_SWITCH_OUTPUT
+ : [level] "r" (level)
+ );
+#elif defined(ARM_MULTILIB_ARCH_V7M)
+ __asm__ volatile (
+ "msr basepri, %[level]\n"
+ :
+ : [level] "r" (level)
+ );
+#endif
+}
+
+static inline void arm_interrupt_flash( uint32_t level )
+{
+#if defined(ARM_MULTILIB_ARCH_V4)
+ uint32_t arm_switch_reg;
+
+ __asm__ volatile (
+ ARM_SWITCH_TO_ARM
+ "mrs %[arm_switch_reg], cpsr\n"
+ "msr cpsr, %[level]\n"
+ "msr cpsr, %[arm_switch_reg]\n"
+ ARM_SWITCH_BACK
+ : [arm_switch_reg] "=&r" (arm_switch_reg)
+ : [level] "r" (level)
+ );
+#elif defined(ARM_MULTILIB_ARCH_V7M)
+ uint32_t basepri;
+
+ __asm__ volatile (
+ "mrs %[basepri], basepri\n"
+ "msr basepri, %[level]\n"
+ "msr basepri, %[basepri]\n"
+ : [basepri] "=&r" (basepri)
+ : [level] "r" (level)
+ );
+#endif
+}
+
+#define _CPU_ISR_Disable( _isr_cookie ) \
+ do { \
+ _isr_cookie = arm_interrupt_disable(); \
+ } while (0)
+
+#define _CPU_ISR_Enable( _isr_cookie ) \
+ arm_interrupt_enable( _isr_cookie )
+
+#define _CPU_ISR_Flash( _isr_cookie ) \
+ arm_interrupt_flash( _isr_cookie )
+
+RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
+{
+#if defined(ARM_MULTILIB_ARCH_V4)
+ return ( level & 0x80 ) == 0;
+#elif defined(ARM_MULTILIB_ARCH_V7M)
+ return level == 0;
+#endif
+}
+
+void _CPU_ISR_Set_level( uint32_t level );
+
+uint32_t _CPU_ISR_Get_level( void );
+
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ void *stack_area_begin,
+ size_t stack_area_size,
+ uint32_t new_level,
+ void (*entry_point)( void ),
+ bool is_fp,
+ void *tls_area
+);
+
+#define _CPU_Context_Get_SP( _context ) \
+ (_context)->register_sp
+
+#ifdef RTEMS_SMP
+ static inline bool _CPU_Context_Get_is_executing(
+ const Context_Control *context
+ )
+ {
+ return context->is_executing;
+ }
+
+ static inline void _CPU_Context_Set_is_executing(
+ Context_Control *context,
+ bool is_executing
+ )
+ {
+ context->is_executing = is_executing;
+ }
+#endif
+
+#define _CPU_Context_Restart_self( _the_context ) \
+ _CPU_Context_restore( (_the_context) );
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+ do { \
+ *(*(_destination)) = _CPU_Null_fp_context; \
+ } while (0)
+
+#define _CPU_Fatal_halt( _source, _err ) \
+ do { \
+ uint32_t _level; \
+ uint32_t _error = _err; \
+ _CPU_ISR_Disable( _level ); \
+ (void) _level; \
+ __asm__ volatile ("mov r0, %0\n" \
+ : "=r" (_error) \
+ : "0" (_error) \
+ : "r0" ); \
+ while (1); \
+ } while (0);
+
+/**
+ * @brief CPU initialization.
+ */
+void _CPU_Initialize( void );
+
+void _CPU_ISR_install_vector(
+ uint32_t vector,
+ proc_ptr new_handler,
+ proc_ptr *old_handler
+);
+
+/**
+ * @brief CPU switch context.
+ */
+void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
+
+void _CPU_Context_restore( Context_Control *new_context )
+ RTEMS_NO_RETURN;
+
+#if defined(ARM_MULTILIB_ARCH_V7M)
+ void _ARMV7M_Start_multitasking( Context_Control *heir )
+ RTEMS_NO_RETURN;
+ #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
+#endif
+
+void _CPU_Context_volatile_clobber( uintptr_t pattern );
+
+void _CPU_Context_validate( uintptr_t pattern );
+
+#ifdef RTEMS_SMP
+ uint32_t _CPU_SMP_Initialize( void );
+
+ bool _CPU_SMP_Start_processor( uint32_t cpu_index );
+
+ void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
+
+ void _CPU_SMP_Prepare_start_multitasking( void );
+
+ static inline uint32_t _CPU_SMP_Get_current_processor( void )
+ {
+ uint32_t mpidr;
+
+ /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
+ __asm__ volatile (
+ "mrc p15, 0, %[mpidr], c0, c0, 5\n"
+ : [mpidr] "=&r" (mpidr)
+ );
+
+ return mpidr & 0xffU;
+ }
+
+ void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
+
+ static inline void _ARM_Send_event( void )
+ {
+ __asm__ volatile ( "sev" : : : "memory" );
+ }
+
+ static inline void _ARM_Wait_for_event( void )
+ {
+ __asm__ volatile ( "wfe" : : : "memory" );
+ }
+
+ static inline void _CPU_SMP_Processor_event_broadcast( void )
+ {
+ _ARM_Data_synchronization_barrier();
+ _ARM_Send_event();
+ }
+
+ static inline void _CPU_SMP_Processor_event_receive( void )
+ {
+ _ARM_Wait_for_event();
+ _ARM_Data_memory_barrier();
+ }
+#endif
+
+
+static inline uint32_t CPU_swap_u32( uint32_t value )
+{
+#if defined(__thumb2__)
+ __asm__ volatile (
+ "rev %0, %0"
+ : "=r" (value)
+ : "0" (value)
+ );
+ return value;
+#elif defined(__thumb__)
+ uint32_t byte1, byte2, byte3, byte4, swapped;
+
+ byte4 = (value >> 24) & 0xff;
+ byte3 = (value >> 16) & 0xff;
+ byte2 = (value >> 8) & 0xff;
+ byte1 = value & 0xff;
+
+ swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+ return swapped;
+#else
+ uint32_t tmp = value; /* make compiler warnings go away */
+ __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
+ "BIC %1, %1, #0xff0000\n"
+ "MOV %0, %0, ROR #8\n"
+ "EOR %0, %0, %1, LSR #8\n"
+ : "=r" (value), "=r" (tmp)
+ : "0" (value), "1" (tmp));
+ return value;
+#endif
+}
+
+static inline uint16_t CPU_swap_u16( uint16_t value )
+{
+#if defined(__thumb2__)
+ __asm__ volatile (
+ "rev16 %0, %0"
+ : "=r" (value)
+ : "0" (value)
+ );
+ return value;
+#else
+ return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
+#endif
+}
+
+typedef uint32_t CPU_Counter_ticks;
+
+CPU_Counter_ticks _CPU_Counter_read( void );
+
+CPU_Counter_ticks _CPU_Counter_difference(
+ CPU_Counter_ticks second,
+ CPU_Counter_ticks first
+);
+
+#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
+ void *_CPU_Thread_Idle_body( uintptr_t ignored );
+#endif
+
+/** @} */
+
+/**
+ * @addtogroup ScoreCPUARM
+ */
+/**@{**/
+
+#if defined(ARM_MULTILIB_ARCH_V4)
+
+typedef enum {
+ ARM_EXCEPTION_RESET = 0,
+ ARM_EXCEPTION_UNDEF = 1,
+ ARM_EXCEPTION_SWI = 2,
+ ARM_EXCEPTION_PREF_ABORT = 3,
+ ARM_EXCEPTION_DATA_ABORT = 4,
+ ARM_EXCEPTION_RESERVED = 5,
+ ARM_EXCEPTION_IRQ = 6,
+ ARM_EXCEPTION_FIQ = 7,
+ MAX_EXCEPTIONS = 8,
+ ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
+} Arm_symbolic_exception_name;
+
+#endif /* defined(ARM_MULTILIB_ARCH_V4) */
+
+typedef struct {
+ uint32_t register_fpexc;
+ uint32_t register_fpscr;
+ uint64_t register_d0;
+ uint64_t register_d1;
+ uint64_t register_d2;
+ uint64_t register_d3;
+ uint64_t register_d4;
+ uint64_t register_d5;
+ uint64_t register_d6;
+ uint64_t register_d7;
+ uint64_t register_d8;
+ uint64_t register_d9;
+ uint64_t register_d10;
+ uint64_t register_d11;
+ uint64_t register_d12;
+ uint64_t register_d13;
+ uint64_t register_d14;
+ uint64_t register_d15;
+ uint64_t register_d16;
+ uint64_t register_d17;
+ uint64_t register_d18;
+ uint64_t register_d19;
+ uint64_t register_d20;
+ uint64_t register_d21;
+ uint64_t register_d22;
+ uint64_t register_d23;
+ uint64_t register_d24;
+ uint64_t register_d25;
+ uint64_t register_d26;
+ uint64_t register_d27;
+ uint64_t register_d28;
+ uint64_t register_d29;
+ uint64_t register_d30;
+ uint64_t register_d31;
+} ARM_VFP_context;
+
+typedef struct {
+ uint32_t register_r0;
+ uint32_t register_r1;
+ uint32_t register_r2;
+ uint32_t register_r3;
+ uint32_t register_r4;
+ uint32_t register_r5;
+ uint32_t register_r6;
+ uint32_t register_r7;
+ uint32_t register_r8;
+ uint32_t register_r9;
+ uint32_t register_r10;
+ uint32_t register_r11;
+ uint32_t register_r12;
+ uint32_t register_sp;
+ void *register_lr;
+ void *register_pc;
+#if defined(ARM_MULTILIB_ARCH_V4)
+ uint32_t register_cpsr;
+ Arm_symbolic_exception_name vector;
+#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
+ uint32_t register_xpsr;
+ uint32_t vector;
+#endif
+ const ARM_VFP_context *vfp_context;
+ uint32_t reserved_for_stack_alignment;
+} CPU_Exception_frame;
+
+void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
+
+void _ARM_Exception_default( CPU_Exception_frame *frame );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ASM */
+
+#endif /* _RTEMS_SCORE_CPU_H */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h
new file mode 100644
index 0000000000..c430911373
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpu_asm.h
@@ -0,0 +1,39 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreCPU
+ *
+ * @brief ARM Assembler Support API
+ */
+
+/*
+ * COPYRIGHT (c) 2002 by Advent Networks, Inc.
+ * Jay Monkman <jmonkman@adventnetworks.com>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ * This file is the include file for cpu_asm.S
+ */
+
+#ifndef _RTEMS_SCORE_CPU_ASM_H
+#define _RTEMS_SCORE_CPU_ASM_H
+
+
+/* Registers saved in context switch: */
+.set REG_CPSR, 0
+.set REG_R4, 4
+.set REG_R5, 8
+.set REG_R6, 12
+.set REG_R7, 16
+.set REG_R8, 20
+.set REG_R9, 24
+.set REG_R10, 28
+.set REG_R11, 32
+.set REG_SP, 36
+.set REG_LR, 40
+.set REG_PC, 44
+.set SIZE_REGS, REG_PC + 4
+
+#endif
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h b/cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h
new file mode 100644
index 0000000000..598ee76b20
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpuatomic.h
@@ -0,0 +1,14 @@
+/*
+ * COPYRIGHT (c) 2012-2013 Deng Hengyi.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
+#define _RTEMS_SCORE_ATOMIC_CPU_H
+
+#include <rtems/score/cpustdatomic.h>
+
+#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
new file mode 100644
index 0000000000..0885c2ef39
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpuimpl.h
@@ -0,0 +1,111 @@
+/**
+ * @file
+ *
+ * @brief CPU Port Implementation API
+ */
+
+/*
+ * Copyright (c) 2013, 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_CPUIMPL_H
+#define _RTEMS_SCORE_CPUIMPL_H
+
+#include <rtems/score/cpu.h>
+
+#define CPU_PER_CPU_CONTROL_SIZE 0
+
+#ifdef ARM_MULTILIB_ARCH_V4
+
+#if defined(ARM_MULTILIB_VFP_D32)
+#define CPU_INTERRUPT_FRAME_SIZE 240
+#elif defined(ARM_MULTILIB_VFP)
+#define CPU_INTERRUPT_FRAME_SIZE 112
+#else
+#define CPU_INTERRUPT_FRAME_SIZE 40
+#endif
+
+#endif /* ARM_MULTILIB_ARCH_V4 */
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef ARM_MULTILIB_ARCH_V4
+
+typedef struct {
+#ifdef ARM_MULTILIB_VFP
+ uint32_t fpscr;
+#ifdef ARM_MULTILIB_VFP_D32
+ double d16;
+ double d17;
+ double d18;
+ double d19;
+ double d20;
+ double d21;
+ double d22;
+ double d23;
+ double d24;
+ double d25;
+ double d26;
+ double d27;
+ double d28;
+ double d29;
+ double d30;
+ double d31;
+#endif /* ARM_MULTILIB_VFP_D32 */
+ double d0;
+ double d1;
+ double d2;
+ double d3;
+ double d4;
+ double d5;
+ double d6;
+ double d7;
+#endif /* ARM_MULTILIB_VFP */
+ uint32_t r9;
+ uint32_t lr;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t return_pc;
+ uint32_t return_cpsr;
+ uint32_t r7;
+ uint32_t r12;
+} CPU_Interrupt_frame;
+
+#ifdef RTEMS_SMP
+
+static inline struct Per_CPU_Control *_ARM_Get_current_per_CPU_control( void )
+{
+ struct Per_CPU_Control *cpu_self;
+
+ /* Use PL1 only Thread ID Register (TPIDRPRW) */
+ __asm__ volatile (
+ "mrc p15, 0, %0, c13, c0, 4"
+ : "=r" ( cpu_self )
+ );
+
+ return cpu_self;
+}
+
+#define _CPU_Get_current_per_CPU_control() _ARM_Get_current_per_CPU_control()
+
+#endif /* RTEMS_SMP */
+
+#endif /* ARM_MULTILIB_ARCH_V4 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ASM */
+
+#endif /* _RTEMS_SCORE_CPUIMPL_H */
diff --git a/cpukit/score/cpu/arm/include/rtems/score/types.h b/cpukit/score/cpu/arm/include/rtems/score/types.h
new file mode 100644
index 0000000000..37a56df029
--- /dev/null
+++ b/cpukit/score/cpu/arm/include/rtems/score/types.h
@@ -0,0 +1,51 @@
+/**
+ * @file
+ *
+ * @brief ARM Architecture Types API
+ */
+
+/*
+ * This include file contains type definitions pertaining to the
+ * arm processor family.
+ *
+ * COPYRIGHT (c) 2000 Canon Research Centre France SA.
+ * Emmanuel Raguet, mailto:raguet@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+#ifndef _RTEMS_SCORE_TYPES_H
+#define _RTEMS_SCORE_TYPES_H
+
+#include <rtems/score/basedefs.h>
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreCPU
+ */
+/**@{**/
+
+/*
+ * This section defines the basic types for this processor.
+ */
+
+/** Type that can store a 32-bit integer or a pointer. */
+typedef uintptr_t CPU_Uint32ptr;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !ASM */
+
+#endif