summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score/tls.h
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-12-23 18:18:56 +1100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-25 08:45:26 +0100
commit2afb22b7e1ebcbe40373ff7e0efae7d207c655a9 (patch)
tree44759efe9374f13200a97e96d91bd9a2b7e5ce2a /cpukit/include/rtems/score/tls.h
parentMAINTAINERS: Add myself to Write After Approval. (diff)
downloadrtems-2afb22b7e1ebcbe40373ff7e0efae7d207c655a9.tar.bz2
Remove make preinstall
A speciality of the RTEMS build system was the make preinstall step. It copied header files from arbitrary locations into the build tree. The header files were included via the -Bsome/build/tree/path GCC command line option. This has at least seven problems: * The make preinstall step itself needs time and disk space. * Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error. * There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult. * The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit. * An introduction of a new build system is difficult. * Include paths specified by the -B option are system headers. This may suppress warnings. * The parallel build had sporadic failures on some hosts. This patch removes the make preinstall step. All installed header files are moved to dedicated include directories in the source tree. Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc, etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g. erc32, imx, qoriq, etc. The new cpukit include directories are: * cpukit/include * cpukit/score/cpu/@RTEMS_CPU@/include * cpukit/libnetworking The new BSP include directories are: * bsps/include * bsps/@RTEMS_CPU@/include * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include There are build tree include directories for generated files. The include directory order favours the most general header file, e.g. it is not possible to override general header files via the include path order. The "bootstrap -p" option was removed. The new "bootstrap -H" option should be used to regenerate the "headers.am" files. Update #3254.
Diffstat (limited to 'cpukit/include/rtems/score/tls.h')
-rw-r--r--cpukit/include/rtems/score/tls.h217
1 files changed, 217 insertions, 0 deletions
diff --git a/cpukit/include/rtems/score/tls.h b/cpukit/include/rtems/score/tls.h
new file mode 100644
index 0000000000..644e54e6f7
--- /dev/null
+++ b/cpukit/include/rtems/score/tls.h
@@ -0,0 +1,217 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreTLS
+ *
+ * @brief Thread-Local Storage (TLS)
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_TLS_H
+#define _RTEMS_SCORE_TLS_H
+
+#include <rtems/score/cpu.h>
+
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreTLS Thread-Local Storage (TLS)
+ *
+ * @ingroup Score
+ *
+ * @brief Thread-local storage (TLS) support.
+ *
+ * Variants I and II are according to Ulrich Drepper, "ELF Handling For
+ * Thread-Local Storage".
+ *
+ * @{
+ */
+
+extern char _TLS_Data_begin[];
+
+extern char _TLS_Data_end[];
+
+extern char _TLS_Data_size[];
+
+extern char _TLS_BSS_begin[];
+
+extern char _TLS_BSS_end[];
+
+extern char _TLS_BSS_size[];
+
+extern char _TLS_Size[];
+
+extern char _TLS_Alignment[];
+
+typedef struct {
+ /*
+ * FIXME: Not sure if the generation number type is correct for all
+ * architectures.
+ */
+ uint32_t generation_number;
+
+ void *tls_blocks[1];
+} TLS_Dynamic_thread_vector;
+
+typedef struct TLS_Thread_control_block {
+#ifdef __i386__
+ struct TLS_Thread_control_block *tcb;
+#else
+ TLS_Dynamic_thread_vector *dtv;
+ uintptr_t reserved;
+#endif
+} TLS_Thread_control_block;
+
+typedef struct {
+ uintptr_t module;
+ uintptr_t offset;
+} TLS_Index;
+
+static inline uintptr_t _TLS_Get_size( void )
+{
+ /*
+ * Do not use _TLS_Size here since this will lead GCC to assume that this
+ * symbol is not 0 and the tests for 0 will be optimized away.
+ */
+ return (uintptr_t) _TLS_BSS_end - (uintptr_t) _TLS_Data_begin;
+}
+
+static inline uintptr_t _TLS_Heap_align_up( uintptr_t val )
+{
+ uintptr_t msk = CPU_HEAP_ALIGNMENT - 1;
+
+ return (val + msk) & ~msk;
+}
+
+static inline uintptr_t _TLS_Get_thread_control_block_area_size(
+ uintptr_t alignment
+)
+{
+ return alignment <= sizeof(TLS_Thread_control_block) ?
+ sizeof(TLS_Thread_control_block) : alignment;
+}
+
+static inline uintptr_t _TLS_Get_allocation_size(
+ uintptr_t size,
+ uintptr_t alignment
+)
+{
+ uintptr_t allocation_size = 0;
+
+ allocation_size += _TLS_Heap_align_up( size );
+ allocation_size += _TLS_Get_thread_control_block_area_size( alignment );
+
+#ifndef __i386__
+ allocation_size += sizeof(TLS_Dynamic_thread_vector);
+#endif
+
+ return allocation_size;
+}
+
+static inline void *_TLS_Copy_and_clear( void *tls_area )
+{
+ tls_area = memcpy(
+ tls_area,
+ _TLS_Data_begin,
+ (size_t) ((uintptr_t)_TLS_Data_size)
+ );
+
+
+ memset(
+ (char *) tls_area + (size_t)((intptr_t) _TLS_BSS_begin) -
+ (size_t)((intptr_t) _TLS_Data_begin),
+ 0,
+ ((size_t) (intptr_t)_TLS_BSS_size)
+ );
+
+ return tls_area;
+}
+
+static inline void *_TLS_Initialize(
+ void *tls_block,
+ TLS_Thread_control_block *tcb,
+ TLS_Dynamic_thread_vector *dtv
+)
+{
+#ifdef __i386__
+ (void) dtv;
+ tcb->tcb = tcb;
+#else
+ tcb->dtv = dtv;
+ dtv->generation_number = 1;
+ dtv->tls_blocks[0] = tls_block;
+#endif
+
+ return _TLS_Copy_and_clear( tls_block );
+}
+
+/* Use Variant I, TLS offsets emitted by linker takes the TCB into account */
+static inline void *_TLS_TCB_at_area_begin_initialize( void *tls_area )
+{
+ void *tls_block = (char *) tls_area
+ + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
+ TLS_Thread_control_block *tcb = tls_area;
+ uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size );
+ TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
+ ((char *) tls_block + aligned_size);
+
+ return _TLS_Initialize( tls_block, tcb, dtv );
+}
+
+/* Use Variant I, TLS offsets emitted by linker neglects the TCB */
+static inline void *_TLS_TCB_before_TLS_block_initialize( void *tls_area )
+{
+ void *tls_block = (char *) tls_area
+ + _TLS_Get_thread_control_block_area_size( (uintptr_t) _TLS_Alignment );
+ TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
+ ((char *) tls_block - sizeof(*tcb));
+ uintptr_t aligned_size = _TLS_Heap_align_up( (uintptr_t) _TLS_Size );
+ TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
+ ((char *) tls_block + aligned_size);
+
+ return _TLS_Initialize( tls_block, tcb, dtv );
+}
+
+/* Use Variant II */
+static inline void *_TLS_TCB_after_TLS_block_initialize( void *tls_area )
+{
+ uintptr_t size = (uintptr_t) _TLS_Size;
+ uintptr_t tls_align = (uintptr_t) _TLS_Alignment;
+ uintptr_t tls_mask = tls_align - 1;
+ uintptr_t heap_align = _TLS_Heap_align_up( tls_align );
+ uintptr_t heap_mask = heap_align - 1;
+ TLS_Thread_control_block *tcb = (TLS_Thread_control_block *)
+ ((char *) tls_area + ((size + heap_mask) & ~heap_mask));
+ void *tls_block = (char *) tcb - ((size + tls_mask) & ~tls_mask);
+ TLS_Dynamic_thread_vector *dtv = (TLS_Dynamic_thread_vector *)
+ ((char *) tcb + sizeof(*tcb));
+
+ _TLS_Initialize( tls_block, tcb, dtv );
+
+ return tcb;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_TLS_H */