summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/wkspace.c
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/src/wkspace.c')
-rw-r--r--cpukit/score/src/wkspace.c128
1 files changed, 107 insertions, 21 deletions
diff --git a/cpukit/score/src/wkspace.c b/cpukit/score/src/wkspace.c
index 13dfc03706..823e357009 100644
--- a/cpukit/score/src/wkspace.c
+++ b/cpukit/score/src/wkspace.c
@@ -19,25 +19,33 @@
#endif
#include <rtems/score/wkspace.h>
+#include <rtems/score/assert.h>
#include <rtems/score/heapimpl.h>
#include <rtems/score/interr.h>
+#include <rtems/score/percpudata.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/tls.h>
#include <rtems/config.h>
-#include <string.h> /* for memset */
+#include <string.h>
/* #define DEBUG_WORKSPACE */
#if defined(DEBUG_WORKSPACE)
#include <rtems/bspIo.h>
#endif
+RTEMS_LINKER_RWSET(
+ _Per_CPU_Data,
+ RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) char
+);
+
Heap_Control _Workspace_Area;
-static uint32_t _Get_maximum_thread_count(void)
+static uint32_t _Workspace_Get_maximum_thread_count( void )
{
- uint32_t thread_count = 0;
+ uint32_t thread_count;
+ thread_count = 0;
thread_count += _Thread_Get_maximum_internal_threads();
thread_count += rtems_resource_maximum_per_allocation(
@@ -53,20 +61,12 @@ static uint32_t _Get_maximum_thread_count(void)
return thread_count;
}
-void _Workspace_Handler_initialization(
- Heap_Area *areas,
- size_t area_count,
- Heap_Initialization_or_extend_handler extend
-)
+static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
{
- Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
- uintptr_t remaining = rtems_configuration_get_work_space_size();
- bool do_zero = rtems_configuration_get_do_zero_of_workspace();
- bool unified = rtems_configuration_get_unified_work_area();
- uintptr_t page_size = CPU_HEAP_ALIGNMENT;
- uintptr_t overhead = _Heap_Area_overhead( page_size );
- uintptr_t tls_size = _TLS_Get_size();
- size_t i;
+ uintptr_t tls_size;
+ uintptr_t space;
+
+ tls_size = _TLS_Get_size();
/*
* In case we have a non-zero TLS size, then we need a TLS area for each
@@ -85,14 +85,99 @@ void _Workspace_Handler_initialization(
* of a free block. The last allocation may need one free block of minimum
* size.
*/
- remaining += _Heap_Min_block_size( page_size );
+ space = _Heap_Min_block_size( page_size );
- remaining += _Get_maximum_thread_count()
+ space += _Workspace_Get_maximum_thread_count()
* _Heap_Size_with_overhead( page_size, tls_alloc, tls_align );
+ } else {
+ space = 0;
+ }
+
+ return space;
+}
+
+static uintptr_t _Workspace_Space_for_per_CPU_data( uintptr_t page_size )
+{
+ uintptr_t space;
+
+#ifdef RTEMS_SMP
+ uintptr_t size;
+
+ size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
+ _Assert( size % CPU_CACHE_LINE_BYTES == 0 );
+
+ if ( size > 0 ) {
+ /*
+ * Memory allocated with an alignment constraint is allocated from the end of
+ * a free block. The last allocation may need one free block of minimum
+ * size.
+ */
+ space = _Heap_Min_block_size( page_size );
+
+ space += ( rtems_configuration_get_maximum_processors() - 1 )
+ * _Heap_Size_with_overhead( page_size, size, CPU_CACHE_LINE_BYTES );
+ } else {
+ space = 0;
}
+#else
+ space = 0;
+#endif
+
+ return space;
+}
+
+static void _Workspace_Allocate_per_CPU_data( void )
+{
+#ifdef RTEMS_SMP
+ Per_CPU_Control *cpu;
+ uintptr_t size;
+ uint32_t cpu_index;
+ uint32_t cpu_max;
+
+ cpu = _Per_CPU_Get_by_index( 0 );
+ cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
+
+ size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
+ cpu_max = rtems_configuration_get_maximum_processors();
+
+ for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ cpu->data = _Workspace_Allocate_aligned( size, CPU_CACHE_LINE_BYTES );
+ _Assert( cpu->data != NULL );
+ memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
+ }
+#endif
+}
+
+void _Workspace_Handler_initialization(
+ Heap_Area *areas,
+ size_t area_count,
+ Heap_Initialization_or_extend_handler extend
+)
+{
+ Heap_Initialization_or_extend_handler init_or_extend;
+ uintptr_t remaining;
+ bool do_zero;
+ bool unified;
+ uintptr_t page_size;
+ uintptr_t overhead;
+ size_t i;
+
+ page_size = CPU_HEAP_ALIGNMENT;
+
+ remaining = rtems_configuration_get_work_space_size();
+ remaining += _Workspace_Space_for_TLS( page_size );
+ remaining += _Workspace_Space_for_per_CPU_data( page_size );
+
+ init_or_extend = _Heap_Initialize;
+ do_zero = rtems_configuration_get_do_zero_of_workspace();
+ unified = rtems_configuration_get_unified_work_area();
+ overhead = _Heap_Area_overhead( page_size );
+
+ for ( i = 0; i < area_count; ++i ) {
+ Heap_Area *area;
- for (i = 0; i < area_count; ++i) {
- Heap_Area *area = &areas [i];
+ area = &areas[ i ];
if ( do_zero ) {
memset( area->begin, 0, area->size );
@@ -113,7 +198,7 @@ void _Workspace_Handler_initialization(
}
}
- space_available = (*init_or_extend)(
+ space_available = ( *init_or_extend )(
&_Workspace_Area,
area->begin,
size,
@@ -138,6 +223,7 @@ void _Workspace_Handler_initialization(
}
_Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
+ _Workspace_Allocate_per_CPU_data();
}
void *_Workspace_Allocate(