summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/wkspace.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2019-12-13 06:18:36 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2020-02-04 06:06:41 +0100
commiteea21eaca117ecd98afea164e1808d6530ef487f (patch)
treeca4e15da28b71a5ecca5fc357798261a39b3f250 /cpukit/score/src/wkspace.c
parentbsps: Remove uses of BSP_GET_WORK_AREA_DEBUG (diff)
downloadrtems-eea21eaca117ecd98afea164e1808d6530ef487f.tar.bz2
bsps: Rework work area initialization
The work area initialization was done by the BSP through bsp_work_area_initialize(). This approach predated the system initialization through the system initialization linker set. The workspace and C program heap were unconditionally initialized. The aim is to support RTEMS application configurations which do not need the workspace and C program heap. In these configurations, the workspace and C prgram heap should not get initialized. Change all bsp_work_area_initialize() to implement _Memory_Get() instead. Move the dirty memory, sbrk(), per-CPU data, workspace, and malloc() heap initialization into separate system initialization steps. This makes it also easier to test the individual initialization steps. This change adds a dependency to _Heap_Extend() to all BSPs. This dependency will be removed in a follow up change. Update #3838.
Diffstat (limited to 'cpukit/score/src/wkspace.c')
-rw-r--r--cpukit/score/src/wkspace.c123
1 files changed, 22 insertions, 101 deletions
diff --git a/cpukit/score/src/wkspace.c b/cpukit/score/src/wkspace.c
index d363b8d3b2..41c6cd3059 100644
--- a/cpukit/score/src/wkspace.c
+++ b/cpukit/score/src/wkspace.c
@@ -22,11 +22,11 @@
#include <rtems/score/assert.h>
#include <rtems/score/heapimpl.h>
#include <rtems/score/interr.h>
-#include <rtems/score/percpudata.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/tls.h>
#include <rtems/posix/pthread.h>
#include <rtems/config.h>
+#include <rtems/sysinit.h>
#include <string.h>
@@ -35,18 +35,6 @@
#include <rtems/bspIo.h>
#endif
-RTEMS_LINKER_RWSET(
- _Per_CPU_Data,
-#if defined(RTEMS_SMP)
- /*
- * In SMP configurations, prevent false cache line sharing of per-processor
- * data with a proper alignment.
- */
- RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
-#endif
- char
-);
-
Heap_Control _Workspace_Area;
static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
@@ -84,85 +72,20 @@ static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
return space;
}
-#ifdef RTEMS_SMP
-static void *_Workspace_Allocate_from_areas(
- Heap_Area *areas,
- size_t area_count,
- uintptr_t size,
- uintptr_t alignment
-)
+static void _Workspace_Initialize( void )
{
- size_t i;
-
- for ( i = 0; i < area_count; ++i ) {
- Heap_Area *area;
- uintptr_t alloc_begin;
- uintptr_t alloc_size;
-
- area = &areas[ i ];
- alloc_begin = (uintptr_t) area->begin;
- alloc_begin = ( alloc_begin + alignment - 1 ) & ~( alignment - 1 );
- alloc_size = size;
- alloc_size += alloc_begin - (uintptr_t) area->begin;
-
- if ( area->size >= alloc_size ) {
- area->begin = (void *) ( alloc_begin + size );
- area->size -= alloc_size;
-
- return (void *) alloc_begin;
- }
- }
-
- return NULL;
+ _Workspace_Handler_initialization( _Memory_Get(), _Heap_Extend );
}
-#endif
-
-static void _Workspace_Allocate_per_CPU_data(
- Heap_Area *areas,
- size_t area_count
-)
-{
-#ifdef RTEMS_SMP
- uintptr_t size;
- size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
-
- if ( size > 0 ) {
- Per_CPU_Control *cpu;
- uint32_t cpu_index;
- uint32_t cpu_max;
-
- cpu = _Per_CPU_Get_by_index( 0 );
- cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
-
- cpu_max = rtems_configuration_get_maximum_processors();
-
- for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
- cpu = _Per_CPU_Get_by_index( cpu_index );
- cpu->data = _Workspace_Allocate_from_areas(
- areas,
- area_count,
- size,
- CPU_CACHE_LINE_BYTES
- );
-
- if( cpu->data == NULL ) {
- _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA );
- }
-
- memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
- }
- }
-#else
- (void) areas;
- (void) area_count;
-#endif
-}
+RTEMS_SYSINIT_ITEM(
+ _Workspace_Initialize,
+ RTEMS_SYSINIT_WORKSPACE,
+ RTEMS_SYSINIT_ORDER_MIDDLE
+);
void _Workspace_Handler_initialization(
- Heap_Area *areas,
- size_t area_count,
- Heap_Initialization_or_extend_handler extend
+ const Memory_Information *mem,
+ Heap_Initialization_or_extend_handler extend
)
{
Heap_Initialization_or_extend_handler init_or_extend;
@@ -173,10 +96,7 @@ void _Workspace_Handler_initialization(
uintptr_t overhead;
size_t i;
- _Workspace_Allocate_per_CPU_data( areas, area_count );
-
page_size = CPU_HEAP_ALIGNMENT;
-
remaining = rtems_configuration_get_work_space_size();
remaining += _Workspace_Space_for_TLS( page_size );
@@ -185,25 +105,27 @@ void _Workspace_Handler_initialization(
unified = rtems_configuration_get_unified_work_area();
overhead = _Heap_Area_overhead( page_size );
- for ( i = 0; i < area_count; ++i ) {
- Heap_Area *area;
+ for ( i = 0; i < _Memory_Get_count( mem ); ++i ) {
+ Memory_Area *area;
+ uintptr_t free_size;
- area = &areas[ i ];
+ area = _Memory_Get_area( mem, i );
+ free_size = _Memory_Get_free_size( area );
if ( do_zero ) {
- memset( area->begin, 0, area->size );
+ memset( _Memory_Get_free_begin( area ), 0, free_size );
}
- if ( area->size > overhead ) {
+ if ( free_size > overhead ) {
uintptr_t space_available;
uintptr_t size;
if ( unified ) {
- size = area->size;
+ size = free_size;
} else {
if ( remaining > 0 ) {
- size = remaining < area->size - overhead ?
- remaining + overhead : area->size;
+ size = remaining < free_size - overhead ?
+ remaining + overhead : free_size;
} else {
size = 0;
}
@@ -211,13 +133,12 @@ void _Workspace_Handler_initialization(
space_available = ( *init_or_extend )(
&_Workspace_Area,
- area->begin,
+ _Memory_Get_free_begin( area ),
size,
page_size
);
- area->begin = (char *) area->begin + size;
- area->size -= size;
+ _Memory_Consume( area, size );
if ( space_available < remaining ) {
remaining -= space_available;