summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score/percpu.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2012-11-12 11:53:16 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2012-11-13 09:38:02 +0100
commit84ff9a471ae9eb1c2c0c73cec40fe847cb07884f (patch)
treee0a465ac1cd0d2bbe6079c2febc2766c328bef60 /cpukit/score/include/rtems/score/percpu.h
parentbsps/sparc: Add BSP_INITIAL_EXTENSION to <bsp.h> (diff)
downloadrtems-84ff9a471ae9eb1c2c0c73cec40fe847cb07884f.tar.bz2
score: Fix per CPU member offsets
Offset calculation was wrong for 16-bit and 64-bit pointer targets. Remove unused offsets. Move Per_CPU_Control::dispatch_necessary after Per_CPU_Control::isr_nest_level. Move SMP members to end of structure. All assembler relevant members are now at the structure beginning.
Diffstat (limited to 'cpukit/score/include/rtems/score/percpu.h')
-rw-r--r--cpukit/score/include/rtems/score/percpu.h112
1 files changed, 47 insertions, 65 deletions
diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h
index 50dd2e6817..9442b6e7f0 100644
--- a/cpukit/score/include/rtems/score/percpu.h
+++ b/cpukit/score/include/rtems/score/percpu.h
@@ -97,42 +97,30 @@ typedef enum {
* This structure is used to hold per core state information.
*/
typedef struct {
- #if defined(RTEMS_SMP)
- /** This element is used to lock this structure */
- SMP_lock_spinlock_simple_Control lock;
-
- /** This indicates that the CPU is online. */
- uint32_t state;
+ #if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
+ (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
+ /**
+ * This contains a pointer to the lower range of the interrupt stack for
+ * this CPU. This is the address allocated and freed.
+ */
+ void *interrupt_stack_low;
/**
- * This is the request for the interrupt.
- *
- * @note This may become a chain protected by atomic instructions.
+ * This contains a pointer to the interrupt stack pointer for this CPU.
+ * It will be loaded at the beginning on an ISR.
*/
- uint32_t message;
+ void *interrupt_stack_high;
#endif
-#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
- (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
- /**
- * This contains a pointer to the lower range of the interrupt stack for
- * this CPU. This is the address allocated and freed.
- */
- void *interrupt_stack_low;
-
- /**
- * This contains a pointer to the interrupt stack pointer for this CPU.
- * It will be loaded at the beginning on an ISR.
- */
- void *interrupt_stack_high;
-#endif
-
/**
* This contains the current interrupt nesting level on this
* CPU.
*/
uint32_t isr_nest_level;
+ /** This is set to true when this CPU needs to run the dispatcher. */
+ volatile bool dispatch_necessary;
+
/** This is the thread executing on this CPU. */
Thread_Control *executing;
@@ -142,70 +130,64 @@ typedef struct {
/** This is the idle thread for this CPU. */
Thread_Control *idle;
- /** This is set to true when this CPU needs to run the dispatcher. */
- volatile bool dispatch_necessary;
-
/** This is the time of the last context switch on this CPU. */
Timestamp_Control time_of_last_context_switch;
+
+ #if defined(RTEMS_SMP)
+ /** This element is used to lock this structure */
+ SMP_lock_spinlock_simple_Control lock;
+
+ /** This indicates that the CPU is online. */
+ uint32_t state;
+
+ /**
+ * This is the request for the interrupt.
+ *
+ * @note This may become a chain protected by atomic instructions.
+ */
+ uint32_t message;
+ #endif
} Per_CPU_Control;
#endif
-#ifdef ASM
-#if defined(RTEMS_SMP)
- #define PER_CPU_LOCK 0
- #define PER_CPU_STATE (1 * __RTEMS_SIZEOF_VOID_P__)
- #define PER_CPU_MESSAGE (2 * __RTEMS_SIZEOF_VOID_P__)
- #define PER_CPU_END_SMP (3 * __RTEMS_SIZEOF_VOID_P__)
-#else
- #define PER_CPU_END_SMP 0
-#endif
+#if defined(ASM)
#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
(CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
/*
* If this CPU target lets RTEMS allocates the interrupt stack, then
- * we need to have places in the per cpu table to hold them.
+ * we need to have places in the per CPU table to hold them.
*/
- #define PER_CPU_INTERRUPT_STACK_LOW PER_CPU_END_SMP
- #define PER_CPU_INTERRUPT_STACK_HIGH \
- PER_CPU_INTERRUPT_STACK_LOW + (1 * __RTEMS_SIZEOF_VOID_P__)
+ #define PER_CPU_INTERRUPT_STACK_LOW \
+ 0
+ #define PER_CPU_INTERRUPT_STACK_HIGH \
+ PER_CPU_INTERRUPT_STACK_LOW + __RTEMS_SIZEOF_VOID_P__
#define PER_CPU_END_STACK \
- PER_CPU_INTERRUPT_STACK_HIGH + (1 * __RTEMS_SIZEOF_VOID_P__)
+ PER_CPU_INTERRUPT_STACK_HIGH + __RTEMS_SIZEOF_VOID_P__
+
+ #define INTERRUPT_STACK_LOW \
+ (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
+ #define INTERRUPT_STACK_HIGH \
+ (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
#else
- #define PER_CPU_END_STACK PER_CPU_END_SMP
+ #define PER_CPU_END_STACK \
+ 0
#endif
/*
* These are the offsets of the required elements in the per CPU table.
*/
#define PER_CPU_ISR_NEST_LEVEL \
- PER_CPU_END_STACK + 0
-#define PER_CPU_EXECUTING \
- PER_CPU_END_STACK + (1 * __RTEMS_SIZEOF_VOID_P__)
-#define PER_CPU_HEIR \
- PER_CPU_END_STACK + (2 * __RTEMS_SIZEOF_VOID_P__)
-#define PER_CPU_IDLE \
- PER_CPU_END_STACK + (3 * __RTEMS_SIZEOF_VOID_P__)
+ PER_CPU_END_STACK
#define PER_CPU_DISPATCH_NEEDED \
- PER_CPU_END_STACK + (4 * __RTEMS_SIZEOF_VOID_P__)
+ PER_CPU_ISR_NEST_LEVEL + 4
#define ISR_NEST_LEVEL \
- (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
+ (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
#define DISPATCH_NEEDED \
- (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
-
-/*
- * Do not define these offsets if they are not in the table.
- */
-#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
- (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
- #define INTERRUPT_STACK_LOW \
- (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
- #define INTERRUPT_STACK_HIGH \
- (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
-#endif
+ (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
-#endif
+#endif /* defined(ASM) */
#ifndef ASM