From eea21eaca117ecd98afea164e1808d6530ef487f Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Fri, 13 Dec 2019 06:18:36 +0100 Subject: bsps: Rework work area initialization The work area initialization was done by the BSP through bsp_work_area_initialize(). This approach predated the system initialization through the system initialization linker set. The workspace and C program heap were unconditionally initialized. The aim is to support RTEMS application configurations which do not need the workspace and C program heap. In these configurations, the workspace and C prgram heap should not get initialized. Change all bsp_work_area_initialize() to implement _Memory_Get() instead. Move the dirty memory, sbrk(), per-CPU data, workspace, and malloc() heap initialization into separate system initialization steps. This makes it also easier to test the individual initialization steps. This change adds a dependency to _Heap_Extend() to all BSPs. This dependency will be removed in a follow up change. Update #3838. --- cpukit/include/rtems/confdefs.h | 15 ---- cpukit/include/rtems/malloc.h | 6 +- cpukit/include/rtems/score/wkspace.h | 9 +-- cpukit/include/rtems/sysinit.h | 7 +- cpukit/libcsupport/src/malloc_initialize.c | 57 +++++++++---- cpukit/sapi/src/exinit.c | 13 +++ cpukit/score/src/smp.c | 42 ++++++++++ cpukit/score/src/wkspace.c | 123 ++++++----------------------- 8 files changed, 131 insertions(+), 141 deletions(-) (limited to 'cpukit') diff --git a/cpukit/include/rtems/confdefs.h b/cpukit/include/rtems/confdefs.h index c044f4842c..2605b513f4 100644 --- a/cpukit/include/rtems/confdefs.h +++ b/cpukit/include/rtems/confdefs.h @@ -1258,21 +1258,6 @@ extern rtems_initialization_tasks_table Initialization_tasks[]; /**@{*/ #include -#ifdef CONFIGURE_INIT - /** - * By default, RTEMS uses separate heaps for the RTEMS Workspace and - * the C Program Heap. The application can choose optionally to combine - * these to provide one larger memory pool. This is particularly - * useful in combination with the unlimited objects configuration. - */ - #ifdef CONFIGURE_UNIFIED_WORK_AREAS - Heap_Control *RTEMS_Malloc_Heap = &_Workspace_Area; - #else - Heap_Control RTEMS_Malloc_Area; - Heap_Control *RTEMS_Malloc_Heap = &RTEMS_Malloc_Area; - #endif -#endif - #ifdef CONFIGURE_INIT /** * This configures the sbrk() support for the malloc family. diff --git a/cpukit/include/rtems/malloc.h b/cpukit/include/rtems/malloc.h index 7cdce1f94a..34bdbcb91e 100644 --- a/cpukit/include/rtems/malloc.h +++ b/cpukit/include/rtems/malloc.h @@ -19,6 +19,7 @@ #include #include #include /* for malloc_walk() */ +#include #include @@ -43,9 +44,8 @@ extern "C" { extern Heap_Control *RTEMS_Malloc_Heap; void RTEMS_Malloc_Initialize( - const Heap_Area *areas, - size_t area_count, - Heap_Initialization_or_extend_handler extend + const Memory_Information *mem, + Heap_Initialization_or_extend_handler extend ); extern ptrdiff_t RTEMS_Malloc_Sbrk_amount; diff --git a/cpukit/include/rtems/score/wkspace.h b/cpukit/include/rtems/score/wkspace.h index 8428c9f957..8d0d3bc114 100644 --- a/cpukit/include/rtems/score/wkspace.h +++ b/cpukit/include/rtems/score/wkspace.h @@ -24,6 +24,7 @@ #include #include +#include #ifdef __cplusplus extern "C" { @@ -53,14 +54,12 @@ extern Heap_Control _Workspace_Area; * * This routine performs the initialization necessary for this handler. * - * @param areas The heap area for the new workspace. - * @param area_count The number of areas for the allocation. + * @param mem The memory information * @param extend The extension handler for the new workspace. */ void _Workspace_Handler_initialization( - Heap_Area *areas, - size_t area_count, - Heap_Initialization_or_extend_handler extend + const Memory_Information *mem, + Heap_Initialization_or_extend_handler extend ); /** diff --git a/cpukit/include/rtems/sysinit.h b/cpukit/include/rtems/sysinit.h index 087de59099..7edd313f6c 100644 --- a/cpukit/include/rtems/sysinit.h +++ b/cpukit/include/rtems/sysinit.h @@ -28,8 +28,13 @@ extern "C" { */ #define RTEMS_SYSINIT_RECORD 000100 #define RTEMS_SYSINIT_BSP_EARLY 000140 +#define RTEMS_SYSINIT_MEMORY 000180 +#define RTEMS_SYSINIT_DIRTY_MEMORY 0001c0 #define RTEMS_SYSINIT_ISR_STACK 000200 -#define RTEMS_SYSINIT_BSP_WORK_AREAS 000200 +#define RTEMS_SYSINIT_PER_CPU_DATA 000220 +#define RTEMS_SYSINIT_SBRK 000240 +#define RTEMS_SYSINIT_WORKSPACE 000260 +#define RTEMS_SYSINIT_MALLOC 000280 #define RTEMS_SYSINIT_BSP_START 000300 #define RTEMS_SYSINIT_CPU_COUNTER 000400 #define RTEMS_SYSINIT_INITIAL_EXTENSIONS 000500 diff --git a/cpukit/libcsupport/src/malloc_initialize.c b/cpukit/libcsupport/src/malloc_initialize.c index dc94b489ff..520960d547 100644 --- a/cpukit/libcsupport/src/malloc_initialize.c +++ b/cpukit/libcsupport/src/malloc_initialize.c @@ -18,33 +18,59 @@ #endif #include +#include +#include #include "malloc_p.h" +Heap_Control *RTEMS_Malloc_Heap; + +static void _Malloc_Initialize( void ) +{ + RTEMS_Malloc_Initialize( _Memory_Get(), _Heap_Extend ); +} + +RTEMS_SYSINIT_ITEM( + _Malloc_Initialize, + RTEMS_SYSINIT_MALLOC, + RTEMS_SYSINIT_ORDER_MIDDLE +); + #ifdef RTEMS_NEWLIB +static Heap_Control _Malloc_Heap; + void RTEMS_Malloc_Initialize( - const Heap_Area *areas, - size_t area_count, - Heap_Initialization_or_extend_handler extend + const Memory_Information *mem, + Heap_Initialization_or_extend_handler extend ) { - Heap_Control *heap = RTEMS_Malloc_Heap; + if ( rtems_configuration_get_unified_work_area() ) { + RTEMS_Malloc_Heap = &_Workspace_Area; + } else { + Heap_Control *heap; + Heap_Initialization_or_extend_handler init_or_extend; + uintptr_t page_size; + size_t i; + + heap = &_Malloc_Heap; + RTEMS_Malloc_Heap = heap; + init_or_extend = _Heap_Initialize; + page_size = CPU_HEAP_ALIGNMENT; - if ( !rtems_configuration_get_unified_work_area() ) { - Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize; - uintptr_t page_size = CPU_HEAP_ALIGNMENT; - size_t i; + for (i = 0; i < _Memory_Get_count( mem ); ++i) { + Memory_Area *area; + uintptr_t space_available; - for (i = 0; i < area_count; ++i) { - const Heap_Area *area = &areas [i]; - uintptr_t space_available = (*init_or_extend)( + area = _Memory_Get_area( mem, i ); + space_available = ( *init_or_extend )( heap, - area->begin, - area->size, + _Memory_Get_free_begin( area ), + _Memory_Get_free_size( area ), page_size ); if ( space_available > 0 ) { + _Memory_Consume( area, _Memory_Get_free_size( area ) ); init_or_extend = extend; } } @@ -56,9 +82,8 @@ void RTEMS_Malloc_Initialize( } #else void RTEMS_Malloc_Initialize( - Heap_Area *areas, - size_t area_count, - Heap_Initialization_or_extend_handler extend + const Memory_Information *mem, + Heap_Initialization_or_extend_handler extend ) { /* FIXME: Dummy function */ diff --git a/cpukit/sapi/src/exinit.c b/cpukit/sapi/src/exinit.c index 54e44515d3..196c9be576 100644 --- a/cpukit/sapi/src/exinit.c +++ b/cpukit/sapi/src/exinit.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,18 @@ _Objects_Information_table[ OBJECTS_APIS_LAST + 1 ] = { &_POSIX_Objects[ 0 ] }; +RTEMS_LINKER_RWSET( + _Per_CPU_Data, +#if defined(RTEMS_SMP) + /* + * In SMP configurations, prevent false cache line sharing of per-processor + * data with a proper alignment. + */ + RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) +#endif + char +); + static void rtems_initialize_data_structures(void) { /* diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c index 306d1ea4b4..b6394a8bdf 100644 --- a/cpukit/score/src/smp.c +++ b/cpukit/score/src/smp.c @@ -20,9 +20,14 @@ #include #include +#include +#include #include #include #include +#include + +#include #if CPU_USE_DEFERRED_FP_SWITCH == TRUE #error "deferred FP switch not implemented for SMP" @@ -252,3 +257,40 @@ void _SMP_Send_message_multicast( } } } + +static void _Per_CPU_Data_initialize( void ) +{ + uintptr_t size; + + size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data ); + + if ( size > 0 ) { + const Memory_Information *mem; + Per_CPU_Control *cpu; + uint32_t cpu_index; + uint32_t cpu_max; + + mem = _Memory_Get(); + cpu = _Per_CPU_Get_by_index( 0 ); + cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ); + + cpu_max = rtems_configuration_get_maximum_processors(); + + for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) { + cpu = _Per_CPU_Get_by_index( cpu_index ); + cpu->data = _Memory_Allocate( mem, size, CPU_CACHE_LINE_BYTES ); + + if( cpu->data == NULL ) { + _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA ); + } + + memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size); + } + } +} + +RTEMS_SYSINIT_ITEM( + _Per_CPU_Data_initialize, + RTEMS_SYSINIT_PER_CPU_DATA, + RTEMS_SYSINIT_ORDER_MIDDLE +); diff --git a/cpukit/score/src/wkspace.c b/cpukit/score/src/wkspace.c index d363b8d3b2..41c6cd3059 100644 --- a/cpukit/score/src/wkspace.c +++ b/cpukit/score/src/wkspace.c @@ -22,11 +22,11 @@ #include #include #include -#include #include #include #include #include +#include #include @@ -35,18 +35,6 @@ #include #endif -RTEMS_LINKER_RWSET( - _Per_CPU_Data, -#if defined(RTEMS_SMP) - /* - * In SMP configurations, prevent false cache line sharing of per-processor - * data with a proper alignment. - */ - RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) -#endif - char -); - Heap_Control _Workspace_Area; static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size ) @@ -84,85 +72,20 @@ static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size ) return space; } -#ifdef RTEMS_SMP -static void *_Workspace_Allocate_from_areas( - Heap_Area *areas, - size_t area_count, - uintptr_t size, - uintptr_t alignment -) +static void _Workspace_Initialize( void ) { - size_t i; - - for ( i = 0; i < area_count; ++i ) { - Heap_Area *area; - uintptr_t alloc_begin; - uintptr_t alloc_size; - - area = &areas[ i ]; - alloc_begin = (uintptr_t) area->begin; - alloc_begin = ( alloc_begin + alignment - 1 ) & ~( alignment - 1 ); - alloc_size = size; - alloc_size += alloc_begin - (uintptr_t) area->begin; - - if ( area->size >= alloc_size ) { - area->begin = (void *) ( alloc_begin + size ); - area->size -= alloc_size; - - return (void *) alloc_begin; - } - } - - return NULL; + _Workspace_Handler_initialization( _Memory_Get(), _Heap_Extend ); } -#endif - -static void _Workspace_Allocate_per_CPU_data( - Heap_Area *areas, - size_t area_count -) -{ -#ifdef RTEMS_SMP - uintptr_t size; - size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data ); - - if ( size > 0 ) { - Per_CPU_Control *cpu; - uint32_t cpu_index; - uint32_t cpu_max; - - cpu = _Per_CPU_Get_by_index( 0 ); - cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ); - - cpu_max = rtems_configuration_get_maximum_processors(); - - for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) { - cpu = _Per_CPU_Get_by_index( cpu_index ); - cpu->data = _Workspace_Allocate_from_areas( - areas, - area_count, - size, - CPU_CACHE_LINE_BYTES - ); - - if( cpu->data == NULL ) { - _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA ); - } - - memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size); - } - } -#else - (void) areas; - (void) area_count; -#endif -} +RTEMS_SYSINIT_ITEM( + _Workspace_Initialize, + RTEMS_SYSINIT_WORKSPACE, + RTEMS_SYSINIT_ORDER_MIDDLE +); void _Workspace_Handler_initialization( - Heap_Area *areas, - size_t area_count, - Heap_Initialization_or_extend_handler extend + const Memory_Information *mem, + Heap_Initialization_or_extend_handler extend ) { Heap_Initialization_or_extend_handler init_or_extend; @@ -173,10 +96,7 @@ void _Workspace_Handler_initialization( uintptr_t overhead; size_t i; - _Workspace_Allocate_per_CPU_data( areas, area_count ); - page_size = CPU_HEAP_ALIGNMENT; - remaining = rtems_configuration_get_work_space_size(); remaining += _Workspace_Space_for_TLS( page_size ); @@ -185,25 +105,27 @@ void _Workspace_Handler_initialization( unified = rtems_configuration_get_unified_work_area(); overhead = _Heap_Area_overhead( page_size ); - for ( i = 0; i < area_count; ++i ) { - Heap_Area *area; + for ( i = 0; i < _Memory_Get_count( mem ); ++i ) { + Memory_Area *area; + uintptr_t free_size; - area = &areas[ i ]; + area = _Memory_Get_area( mem, i ); + free_size = _Memory_Get_free_size( area ); if ( do_zero ) { - memset( area->begin, 0, area->size ); + memset( _Memory_Get_free_begin( area ), 0, free_size ); } - if ( area->size > overhead ) { + if ( free_size > overhead ) { uintptr_t space_available; uintptr_t size; if ( unified ) { - size = area->size; + size = free_size; } else { if ( remaining > 0 ) { - size = remaining < area->size - overhead ? - remaining + overhead : area->size; + size = remaining < free_size - overhead ? + remaining + overhead : free_size; } else { size = 0; } @@ -211,13 +133,12 @@ void _Workspace_Handler_initialization( space_available = ( *init_or_extend )( &_Workspace_Area, - area->begin, + _Memory_Get_free_begin( area ), size, page_size ); - area->begin = (char *) area->begin + size; - area->size -= size; + _Memory_Consume( area, size ); if ( space_available < remaining ) { remaining -= space_available; -- cgit v1.2.3