summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-03-03 09:23:20 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-03-05 06:58:33 +0100
commit524839568d8df72bb3d62d64cb1b927bc8dbbbf1 (patch)
tree1a448dc50f597a98e52f3fb13f705b34bb6ee9ad /cpukit/include/rtems/score
parentCONFIGURE_MAXIMUM_THREAD_LOCAL_STORAGE_SIZE (diff)
downloadrtems-524839568d8df72bb3d62d64cb1b927bc8dbbbf1.tar.bz2
score: Ensure stack alignment requirement
Make sure that a user-provided stack size is the minimum size allocated for the stack. Make sure we meet the stack alignment requirement also for CPU ports with CPU_STACK_ALIGNMENT > CPU_HEAP_ALIGNMENT.
Diffstat (limited to 'cpukit/include/rtems/score')
-rw-r--r--cpukit/include/rtems/score/context.h3
-rw-r--r--cpukit/include/rtems/score/stackimpl.h24
-rw-r--r--cpukit/include/rtems/score/tls.h16
3 files changed, 30 insertions, 13 deletions
diff --git a/cpukit/include/rtems/score/context.h b/cpukit/include/rtems/score/context.h
index 46e04e9600..b65c15e73b 100644
--- a/cpukit/include/rtems/score/context.h
+++ b/cpukit/include/rtems/score/context.h
@@ -49,8 +49,7 @@ extern "C" {
*/
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#define CONTEXT_FP_SIZE \
- ( ( CPU_CONTEXT_FP_SIZE + CPU_HEAP_ALIGNMENT - 1 ) \
- & ~( CPU_HEAP_ALIGNMENT - 1 ) )
+ RTEMS_ALIGN_UP( CPU_CONTEXT_FP_SIZE, CPU_STACK_ALIGNMENT )
#else
#define CONTEXT_FP_SIZE 0
#endif
diff --git a/cpukit/include/rtems/score/stackimpl.h b/cpukit/include/rtems/score/stackimpl.h
index 43b7c8151e..c15206002c 100644
--- a/cpukit/include/rtems/score/stackimpl.h
+++ b/cpukit/include/rtems/score/stackimpl.h
@@ -135,6 +135,7 @@ RTEMS_INLINE_ROUTINE size_t _Stack_Extend_size(
)
{
size_t extra_size;
+ size_t alignment_overhead;
extra_size = _TLS_Get_allocation_size();
@@ -147,15 +148,32 @@ RTEMS_INLINE_ROUTINE size_t _Stack_Extend_size(
(void) is_fp;
#endif
- stack_size += extra_size;
+ /*
+ * In order to make sure that a user-provided stack size is the minimum which
+ * can be allocated for the stack, we have to align it up to the next stack
+ * boundary.
+ */
+ alignment_overhead = CPU_STACK_ALIGNMENT - 1;
+
+#if CPU_STACK_ALIGNMENT > CPU_HEAP_ALIGNMENT
+ /*
+ * If the heap allocator does not meet the stack alignment requirement, then
+ * we have to do the stack alignment manually in _Thread_Initialize() and
+ * need to allocate extra space for this.
+ */
+ alignment_overhead += CPU_STACK_ALIGNMENT - CPU_HEAP_ALIGNMENT;
+#endif
- if ( stack_size < extra_size ) {
+ if ( stack_size > SIZE_MAX - extra_size - alignment_overhead ) {
/*
* In case of an unsigned integer overflow, saturate at the maximum value.
*/
- stack_size = SIZE_MAX;
+ return SIZE_MAX;
}
+ stack_size += extra_size;
+ stack_size = RTEMS_ALIGN_UP( stack_size, CPU_STACK_ALIGNMENT );
+
return stack_size;
}
diff --git a/cpukit/include/rtems/score/tls.h b/cpukit/include/rtems/score/tls.h
index a32b7164b5..7725a003ca 100644
--- a/cpukit/include/rtems/score/tls.h
+++ b/cpukit/include/rtems/score/tls.h
@@ -122,17 +122,17 @@ static inline uintptr_t _TLS_Get_size( void )
}
/**
- * @brief Returns the value aligned up to the heap alignment.
+ * @brief Returns the value aligned up to the stack alignment.
*
* @param val The value to align.
*
- * @return The value aligned to the heap alignment.
+ * @return The value aligned to the stack alignment.
*/
-static inline uintptr_t _TLS_Heap_align_up( uintptr_t val )
+static inline uintptr_t _TLS_Align_up( uintptr_t val )
{
- uintptr_t msk = CPU_HEAP_ALIGNMENT - 1;
+ uintptr_t alignment = CPU_STACK_ALIGNMENT;
- return (val + msk) & ~msk;
+ return RTEMS_ALIGN_UP( val, alignment );
}
/**
@@ -229,7 +229,7 @@ static inline void *_TLS_TCB_at_area_begin_initialize( void *tls_area )
void *tls_block = (char *) tls_area
+ _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
TLS_Thread_control_block *tcb = tls_area;
- uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size );
+ uintptr_t aligned_size = _TLS_Align_up( (uintptr_t) _TLS_Size );
TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
((char *) tls_block + aligned_size);
@@ -253,7 +253,7 @@ static inline void *_TLS_TCB_before_TLS_block_initialize( void *tls_area )
+ _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
((char *) tls_block - sizeof(*tcb));
- uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size );
+ uintptr_t aligned_size = _TLS_Align_up( (uintptr_t) _TLS_Size );
TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
((char *) tls_block + aligned_size);
@@ -276,7 +276,7 @@ static inline void *_TLS_TCB_after_TLS_block_initialize( void *tls_area )
uintptr_t size = (uintptr_t) _TLS_Size;
uintptr_t tls_align = (uintptr_t) _TLS_Alignment;
uintptr_t tls_mask = tls_align - 1;
- uintptr_t heap_align = _TLS_Heap_align_up( tls_align );
+ uintptr_t heap_align = _TLS_Align_up( tls_align );
uintptr_t heap_mask = heap_align - 1;
TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
((char *) tls_area + ((size + heap_mask) & ~heap_mask));