summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-12-18 08:45:06 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-12-18 08:50:08 +0100
commit7c19e50bdd3b6ae234b171992f2e109d4f804cda (patch)
treec967305bf67491cb855cf19077139ad836113d8e /cpukit/score
parentconfig: Remove CONFIGURE_CONFDEFS_DEBUG (diff)
downloadrtems-7c19e50bdd3b6ae234b171992f2e109d4f804cda.tar.bz2
score: Fix per-CPU data allocation
Allocate the per-CPU data for secondary processors directly from the heap areas before heap initialization and not via _Workspace_Allocate_aligned(). This avoids dependency on the workspace allocator. It fixes also a problem on some platforms (e.g. QorIQ) where at this early point in the system initialization the top of the RAM is used by low-level startup code on secondary processors (boot pages). Update #3507.
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/src/wkspace.c70
1 files changed, 44 insertions, 26 deletions
diff --git a/cpukit/score/src/wkspace.c b/cpukit/score/src/wkspace.c
index 95c4127056..9f91ac5a5d 100644
--- a/cpukit/score/src/wkspace.c
+++ b/cpukit/score/src/wkspace.c
@@ -77,37 +77,43 @@ static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
return space;
}
-static uintptr_t _Workspace_Space_for_per_CPU_data( uintptr_t page_size )
+#ifdef RTEMS_SMP
+static void *_Workspace_Allocate_from_areas(
+ Heap_Area *areas,
+ size_t area_count,
+ uintptr_t size,
+ uintptr_t alignment
+)
{
- uintptr_t space;
+ size_t i;
-#ifdef RTEMS_SMP
- uintptr_t size;
+ for ( i = 0; i < area_count; ++i ) {
+ Heap_Area *area;
+ uintptr_t alloc_begin;
+ uintptr_t alloc_size;
- size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
- _Assert( size % CPU_CACHE_LINE_BYTES == 0 );
+ area = &areas[ i ];
+ alloc_begin = (uintptr_t) area->begin;
+ alloc_begin = ( alloc_begin + alignment - 1 ) & ~( alignment - 1 );
+ alloc_size = size;
+ alloc_size += alloc_begin - (uintptr_t) area->begin;
- if ( size > 0 ) {
- /*
- * Memory allocated with an alignment constraint is allocated from the end of
- * a free block. The last allocation may need one free block of minimum
- * size.
- */
- space = _Heap_Min_block_size( page_size );
+ if ( area->size >= alloc_size ) {
+ area->begin = (void *) ( alloc_begin + size );
+ area->size -= alloc_size;
- space += ( rtems_configuration_get_maximum_processors() - 1 )
- * _Heap_Size_with_overhead( page_size, size, CPU_CACHE_LINE_BYTES );
- } else {
- space = 0;
+ return (void *) alloc_begin;
+ }
}
-#else
- space = 0;
-#endif
- return space;
+ return NULL;
}
+#endif
-static void _Workspace_Allocate_per_CPU_data( void )
+static void _Workspace_Allocate_per_CPU_data(
+ Heap_Area *areas,
+ size_t area_count
+)
{
#ifdef RTEMS_SMP
uintptr_t size;
@@ -126,11 +132,23 @@ static void _Workspace_Allocate_per_CPU_data( void )
for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
cpu = _Per_CPU_Get_by_index( cpu_index );
- cpu->data = _Workspace_Allocate_aligned( size, CPU_CACHE_LINE_BYTES );
- _Assert( cpu->data != NULL );
+ cpu->data = _Workspace_Allocate_from_areas(
+ areas,
+ area_count,
+ size,
+ CPU_CACHE_LINE_BYTES
+ );
+
+ if( cpu->data == NULL ) {
+ _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA );
+ }
+
memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
}
}
+#else
+ (void) areas;
+ (void) area_count;
#endif
}
@@ -148,11 +166,12 @@ void _Workspace_Handler_initialization(
uintptr_t overhead;
size_t i;
+ _Workspace_Allocate_per_CPU_data( areas, area_count );
+
page_size = CPU_HEAP_ALIGNMENT;
remaining = rtems_configuration_get_work_space_size();
remaining += _Workspace_Space_for_TLS( page_size );
- remaining += _Workspace_Space_for_per_CPU_data( page_size );
init_or_extend = _Heap_Initialize;
do_zero = rtems_configuration_get_do_zero_of_workspace();
@@ -208,7 +227,6 @@ void _Workspace_Handler_initialization(
}
_Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
- _Workspace_Allocate_per_CPU_data();
}
void *_Workspace_Allocate(