summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-08-27 10:36:35 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-09-10 10:38:45 +0200
commitcfc4231d8fe1056fa501508a929c8ccaa1dd11be (patch)
tree522a0bcab2e11c0dbd71a1f8c66b336608338d7a /cpukit
parentcpukit/Makefile.am: Cleanup (diff)
downloadrtems-cfc4231d8fe1056fa501508a929c8ccaa1dd11be.tar.bz2
score: Add flexible per-CPU data
Update #3507.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/headers.am1
-rw-r--r--cpukit/include/rtems/score/percpu.h9
-rw-r--r--cpukit/include/rtems/score/percpudata.h104
-rw-r--r--cpukit/score/src/wkspace.c128
4 files changed, 220 insertions, 22 deletions
diff --git a/cpukit/headers.am b/cpukit/headers.am
index 303dbc58a7..a848690e3a 100644
--- a/cpukit/headers.am
+++ b/cpukit/headers.am
@@ -375,6 +375,7 @@ include_rtems_score_HEADERS += include/rtems/score/objectimpl.h
include_rtems_score_HEADERS += include/rtems/score/objectmp.h
include_rtems_score_HEADERS += include/rtems/score/onceimpl.h
include_rtems_score_HEADERS += include/rtems/score/percpu.h
+include_rtems_score_HEADERS += include/rtems/score/percpudata.h
include_rtems_score_HEADERS += include/rtems/score/priority.h
include_rtems_score_HEADERS += include/rtems/score/prioritybitmap.h
include_rtems_score_HEADERS += include/rtems/score/prioritybitmapimpl.h
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index f0b155d4c1..712d1cde36 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -9,7 +9,7 @@
* COPYRIGHT (c) 1989-2011.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2012, 2016 embedded brains GmbH
+ * Copyright (c) 2012, 2018 embedded brains GmbH
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -477,6 +477,13 @@ typedef struct Per_CPU_Control {
} Scheduler;
/**
+ * @brief Begin of the per-CPU data area.
+ *
+ * Contains items defined via PER_CPU_DATA_ITEM().
+ */
+ char *data;
+
+ /**
* @brief Indicates the current state of the CPU.
*
* This field is protected by the _Per_CPU_State_lock lock.
diff --git a/cpukit/include/rtems/score/percpudata.h b/cpukit/include/rtems/score/percpudata.h
new file mode 100644
index 0000000000..3de99566ad
--- /dev/null
+++ b/cpukit/include/rtems/score/percpudata.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PERCPUDATA_H
+#define _RTEMS_SCORE_PERCPUDATA_H
+
+#include <rtems/score/percpu.h>
+#include <rtems/linkersets.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup PerCPUData Flexible Per-CPU Data
+ *
+ * @ingroup PerCPU
+ *
+ * Provides the definition of custom per-CPU items. The items are collected in
+ * a special linker set. During system initialization the content of the
+ * linker set is duplicated for all secondary processors using memory allocated
+ * from the workspace. The begin and end of the per-CPU data area is cache
+ * line aligned (CPU_CACHE_LINE_BYTES).
+ *
+ * @{
+ */
+
+RTEMS_LINKER_RWSET_DECLARE( _Per_CPU_Data, char );
+
+/**
+ * @brief Declares a per-CPU item of the specified type.
+ *
+ * Items declared with this macro have external linkage.
+ *
+ * @param type The type of the item.
+ * @param item The designator of the item.
+ */
+#define PER_CPU_DATA_ITEM_DECLARE( type, item ) \
+ RTEMS_LINKER_RWSET_ITEM_DECLARE( _Per_CPU_Data, type, item )
+
+/**
+ * @brief Defines a per-CPU item of the specified type.
+ *
+ * @param type The type of the item.
+ * @param item The designator of the item.
+ */
+#define PER_CPU_DATA_ITEM( type, item ) \
+ RTEMS_LINKER_RWSET_ITEM( _Per_CPU_Data, type, item )
+
+/**
+ * @brief Returns the offset of the per-CPU item to the begin of the per-CPU
+ * data area.
+ *
+ * @param item The designator of the item.
+ */
+#define PER_CPU_DATA_OFFSET( item ) \
+ ( (uintptr_t) &_Linker_set__Per_CPU_Data_##item \
+ - (uintptr_t) RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ) )
+
+/**
+ * @brief Returns a pointer of the specified type to the per-CPU item at the
+ * specified offset for the specified processor.
+ *
+ * @param cpu The processor of the item.
+ * @param type The type of the item.
+ * @param offset The offset of the item.
+ */
+#define PER_CPU_DATA_GET_BY_OFFSET( cpu, type, offset ) \
+ (type *) ( cpu->data + offset )
+
+/**
+ * @brief Returns a pointer of the specified type to the specified per-CPU item
+ * for the specified processor.
+ *
+ * @param cpu The processor of the item.
+ * @param type The type of the item.
+ * @param item The designator of the item.
+ */
+#ifdef RTEMS_SMP
+#define PER_CPU_DATA_GET( cpu, type, item ) \
+ PER_CPU_DATA_GET_BY_OFFSET( cpu, type, PER_CPU_DATA_OFFSET( item ) )
+#else
+#define PER_CPU_DATA_GET( cpu, type, item ) \
+ &_Linker_set__Per_CPU_Data_##item
+#endif
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PERCPUDATA_H */
diff --git a/cpukit/score/src/wkspace.c b/cpukit/score/src/wkspace.c
index 13dfc03706..823e357009 100644
--- a/cpukit/score/src/wkspace.c
+++ b/cpukit/score/src/wkspace.c
@@ -19,25 +19,33 @@
#endif
#include <rtems/score/wkspace.h>
+#include <rtems/score/assert.h>
#include <rtems/score/heapimpl.h>
#include <rtems/score/interr.h>
+#include <rtems/score/percpudata.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/tls.h>
#include <rtems/config.h>
-#include <string.h> /* for memset */
+#include <string.h>
/* #define DEBUG_WORKSPACE */
#if defined(DEBUG_WORKSPACE)
#include <rtems/bspIo.h>
#endif
+RTEMS_LINKER_RWSET(
+ _Per_CPU_Data,
+ RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) char
+);
+
Heap_Control _Workspace_Area;
-static uint32_t _Get_maximum_thread_count(void)
+static uint32_t _Workspace_Get_maximum_thread_count( void )
{
- uint32_t thread_count = 0;
+ uint32_t thread_count;
+ thread_count = 0;
thread_count += _Thread_Get_maximum_internal_threads();
thread_count += rtems_resource_maximum_per_allocation(
@@ -53,20 +61,12 @@ static uint32_t _Get_maximum_thread_count(void)
return thread_count;
}
-void _Workspace_Handler_initialization(
- Heap_Area *areas,
- size_t area_count,
- Heap_Initialization_or_extend_handler extend
-)
+static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
{
- Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
- uintptr_t remaining = rtems_configuration_get_work_space_size();
- bool do_zero = rtems_configuration_get_do_zero_of_workspace();
- bool unified = rtems_configuration_get_unified_work_area();
- uintptr_t page_size = CPU_HEAP_ALIGNMENT;
- uintptr_t overhead = _Heap_Area_overhead( page_size );
- uintptr_t tls_size = _TLS_Get_size();
- size_t i;
+ uintptr_t tls_size;
+ uintptr_t space;
+
+ tls_size = _TLS_Get_size();
/*
* In case we have a non-zero TLS size, then we need a TLS area for each
@@ -85,14 +85,99 @@ void _Workspace_Handler_initialization(
* of a free block. The last allocation may need one free block of minimum
* size.
*/
- remaining += _Heap_Min_block_size( page_size );
+ space = _Heap_Min_block_size( page_size );
- remaining += _Get_maximum_thread_count()
+ space += _Workspace_Get_maximum_thread_count()
* _Heap_Size_with_overhead( page_size, tls_alloc, tls_align );
+ } else {
+ space = 0;
+ }
+
+ return space;
+}
+
+static uintptr_t _Workspace_Space_for_per_CPU_data( uintptr_t page_size )
+{
+ uintptr_t space;
+
+#ifdef RTEMS_SMP
+ uintptr_t size;
+
+ size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
+ _Assert( size % CPU_CACHE_LINE_BYTES == 0 );
+
+ if ( size > 0 ) {
+ /*
+ * Memory allocated with an alignment constraint is allocated from the end of
+ * a free block. The last allocation may need one free block of minimum
+ * size.
+ */
+ space = _Heap_Min_block_size( page_size );
+
+ space += ( rtems_configuration_get_maximum_processors() - 1 )
+ * _Heap_Size_with_overhead( page_size, size, CPU_CACHE_LINE_BYTES );
+ } else {
+ space = 0;
}
+#else
+ space = 0;
+#endif
+
+ return space;
+}
+
+static void _Workspace_Allocate_per_CPU_data( void )
+{
+#ifdef RTEMS_SMP
+ Per_CPU_Control *cpu;
+ uintptr_t size;
+ uint32_t cpu_index;
+ uint32_t cpu_max;
+
+ cpu = _Per_CPU_Get_by_index( 0 );
+ cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
+
+ size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
+ cpu_max = rtems_configuration_get_maximum_processors();
+
+ for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ cpu->data = _Workspace_Allocate_aligned( size, CPU_CACHE_LINE_BYTES );
+ _Assert( cpu->data != NULL );
+ memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
+ }
+#endif
+}
+
+void _Workspace_Handler_initialization(
+ Heap_Area *areas,
+ size_t area_count,
+ Heap_Initialization_or_extend_handler extend
+)
+{
+ Heap_Initialization_or_extend_handler init_or_extend;
+ uintptr_t remaining;
+ bool do_zero;
+ bool unified;
+ uintptr_t page_size;
+ uintptr_t overhead;
+ size_t i;
+
+ page_size = CPU_HEAP_ALIGNMENT;
+
+ remaining = rtems_configuration_get_work_space_size();
+ remaining += _Workspace_Space_for_TLS( page_size );
+ remaining += _Workspace_Space_for_per_CPU_data( page_size );
+
+ init_or_extend = _Heap_Initialize;
+ do_zero = rtems_configuration_get_do_zero_of_workspace();
+ unified = rtems_configuration_get_unified_work_area();
+ overhead = _Heap_Area_overhead( page_size );
+
+ for ( i = 0; i < area_count; ++i ) {
+ Heap_Area *area;
- for (i = 0; i < area_count; ++i) {
- Heap_Area *area = &areas [i];
+ area = &areas[ i ];
if ( do_zero ) {
memset( area->begin, 0, area->size );
@@ -113,7 +198,7 @@ void _Workspace_Handler_initialization(
}
}
- space_available = (*init_or_extend)(
+ space_available = ( *init_or_extend )(
&_Workspace_Area,
area->begin,
size,
@@ -138,6 +223,7 @@ void _Workspace_Handler_initialization(
}
_Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
+ _Workspace_Allocate_per_CPU_data();
}
void *_Workspace_Allocate(