summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--c/src/ada/rtems.adb30
-rw-r--r--c/src/ada/rtems.ads8
-rw-r--r--cpukit/ChangeLog63
-rw-r--r--cpukit/ada/rtems.adb30
-rw-r--r--cpukit/ada/rtems.ads8
-rw-r--r--cpukit/libcsupport/src/malloc.c13
-rw-r--r--cpukit/rtems/Makefile.am3
-rw-r--r--cpukit/rtems/include/rtems/rtems/region.h31
-rw-r--r--cpukit/rtems/src/regionprocessqueue.c83
-rw-r--r--cpukit/rtems/src/regionresizesegment.c101
-rw-r--r--cpukit/rtems/src/regionreturnsegment.c37
-rw-r--r--cpukit/score/Makefile.am3
-rw-r--r--cpukit/score/include/rtems/score/heap.h46
-rw-r--r--cpukit/score/src/heap.c86
-rw-r--r--cpukit/score/src/heapallocate.c2
-rw-r--r--cpukit/score/src/heapallocatealigned.c3
-rw-r--r--cpukit/score/src/heapfree.c5
-rw-r--r--cpukit/score/src/heapresizeblock.c170
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c3
-rw-r--r--cpukit/score/src/heapwalk.c8
20 files changed, 642 insertions, 91 deletions
diff --git a/c/src/ada/rtems.adb b/c/src/ada/rtems.adb
index 72ee262e59..3043ad668c 100644
--- a/c/src/ada/rtems.adb
+++ b/c/src/ada/rtems.adb
@@ -1505,10 +1505,38 @@ package body RTEMS is
Segment,
Size_Base'Unchecked_Access
);
- Size := SIZE_Base;
+ Size := Size_Base;
end Region_Get_Segment_Size;
+ procedure Region_Resize_Segment (
+ ID : in RTEMS.ID;
+ Segment : in RTEMS.Address;
+ Size : in RTEMS.Unsigned32;
+ Old_Size : out RTEMS.Unsigned32;
+ Result : out RTEMS.Status_Codes
+ ) is
+ function Region_Resize_Segment_Base (
+ ID : RTEMS.ID;
+ Segment : RTEMS.Address;
+ Size : RTEMS.Unsigned32;
+ Old_Size : access RTEMS.Unsigned32
+ ) return RTEMS.Status_Codes;
+ pragma Import (C, Region_Resize_Segment_Base,
+ "rtems_region_get_segment_size");
+ Old_Size_Base : aliased RTEMS.Unsigned32;
+ begin
+
+ Result := Region_Resize_Segment_Base (
+ ID,
+ Segment,
+ Size,
+ Size_Base'Unchecked_Access
+ );
+ Old_Size := Old_Size_Base;
+
+ end Region_Resize_Segment;
+
procedure Region_Return_Segment (
ID : in RTEMS.ID;
Segment : in RTEMS.Address;
diff --git a/c/src/ada/rtems.ads b/c/src/ada/rtems.ads
index d95b2571d9..752a6fe064 100644
--- a/c/src/ada/rtems.ads
+++ b/c/src/ada/rtems.ads
@@ -1299,6 +1299,14 @@ pragma Elaborate_Body (RTEMS);
Result : out RTEMS.Status_Codes
);
+ procedure Region_Resize_Segment (
+ ID : in RTEMS.ID;
+ Segment : in RTEMS.Address;
+ Old_Size : in RTEMS.Unsigned32;
+ Size : out RTEMS.Unsigned32;
+ Result : out RTEMS.Status_Codes
+ );
+
procedure Region_Return_Segment (
ID : in RTEMS.ID;
Segment : in RTEMS.Address;
diff --git a/cpukit/ChangeLog b/cpukit/ChangeLog
index 1e0398fbde..6cc1789bb0 100644
--- a/cpukit/ChangeLog
+++ b/cpukit/ChangeLog
@@ -1,3 +1,66 @@
+2005-05-14 Sergei Organov <osv@topconrd.ru>
+
+ PR 746/rtems
+ Optimize realloc(). The problem is that realloc() can neither grow
+ nor shrink efficiently the current memory region without support
+ from underlying heap/region modules. The patch introduces one new
+ routine for each of heap and region modules, _Heap_Resize_block(),
+ and rtems_region_resize_segment(), respectively, and uses the
+ latter to optimize realloc().
+
+ The implementation of _Heap_Resize_block() lead to changing of the
+ heap allocation strategy: now the heap manager, when splits larger
+ free block into used and new free parts, makes the first part of
+ the block used, not the last one as it was before. Due to this new
+ strategy, _Heap_Resize_block() never needs to change the user
+ pointer.
+
+ Caveat: unlike previous heap implementation, first few bytes of
+ the contents of the memory allocated from the heap are now almost
+ never all zero. This can trigger bugs in client code that have not
+ been visible before this patch.
+
+ * libcsupport/src/malloc.c (realloc): try to resize segment in
+ place using new rtems_region_resize_segment() routine before
+ falling back to the malloc()/free() method.
+ * score/src/heap.c:
+ (_Heap_Initialize): change initial heap layout to reflect new
+ allocation strategy of using of the lower part of a previously
+ free block when splitting it for the purpose of allocation.
+ (_Heap_Block_allocate): when split, make the lower part used, and
+ leave the upper part free. Return type changed from Heap_Block* to
+ uint32_t.
+ * score/include/rtems/score/heap.h:
+ (Heap_Statistics): added 'resizes' field.
+ (Heap_Resize_status): new enum.
+ (_Heap_Resize_block): new routine.
+ (_Heap_Block_allocate): return type changed from Heap_Block* to
+ uint32_t.
+ * score/src/heapwalk.c: reflect new heap layout in checks.
+ * score/src/heapsizeofuserarea.c: more assertions added.
+ * score/src/heapresizeblock.c: new file.
+ (_Heap_Resize_block): new routine.
+ * score/src/heapfree.c: reverse the checks _Heap_Is_block_in() and
+ _Heap_Is_prev_used() on entry to be in this order.
+ * score/src/heapallocate.c, score/src/heapallocatealigned.c:
+ ignore return value of _Heap_Block_allocate().
+ * score/Makefile.am (HEAP_C_FILES): added src/heapresizeblock.c.
+ * rtems/include/rtems/rtems/region.h:
+ (rtems_region_resize_segment): new interface routine.
+ (_Region_Process_queue): new internal routine called from
+ rtems_region_resize_segment() and rtems_region_return_segment().
+ * rtems/src/regionreturnsegment.c: move queue management code into
+ the new internal routine _Region_Process_queue() and call it.
+
+ * rtems/src/regionresizesegment.c: new file.
+ (rtems_region_resize_segment): new interface routine.
+ * rtems/src/regionprocessqueue.c: new file.
+ (_Region_Process_queue): new internal routine containing queue
+ management code factored out from 'regionreturnsegment.c'.
+ * rtems/Makefile.am (REGION_C_FILES): Added
+ src/regionresizesegment.c, and src/regionprocessqueue.c.
+ * ada/rtems.adb, ada/rtems.ads: Added Region_Resize_Segment.
+
2005-05-20 Eric Norum <norume@aps.anl.gov>
PR 793/networking
diff --git a/cpukit/ada/rtems.adb b/cpukit/ada/rtems.adb
index 72ee262e59..3043ad668c 100644
--- a/cpukit/ada/rtems.adb
+++ b/cpukit/ada/rtems.adb
@@ -1505,10 +1505,38 @@ package body RTEMS is
Segment,
Size_Base'Unchecked_Access
);
- Size := SIZE_Base;
+ Size := Size_Base;
end Region_Get_Segment_Size;
+ procedure Region_Resize_Segment (
+ ID : in RTEMS.ID;
+ Segment : in RTEMS.Address;
+ Size : in RTEMS.Unsigned32;
+ Old_Size : out RTEMS.Unsigned32;
+ Result : out RTEMS.Status_Codes
+ ) is
+ function Region_Resize_Segment_Base (
+ ID : RTEMS.ID;
+ Segment : RTEMS.Address;
+ Size : RTEMS.Unsigned32;
+ Old_Size : access RTEMS.Unsigned32
+ ) return RTEMS.Status_Codes;
+ pragma Import (C, Region_Resize_Segment_Base,
+ "rtems_region_get_segment_size");
+ Old_Size_Base : aliased RTEMS.Unsigned32;
+ begin
+
+ Result := Region_Resize_Segment_Base (
+ ID,
+ Segment,
+ Size,
+ Size_Base'Unchecked_Access
+ );
+ Old_Size := Old_Size_Base;
+
+ end Region_Resize_Segment;
+
procedure Region_Return_Segment (
ID : in RTEMS.ID;
Segment : in RTEMS.Address;
diff --git a/cpukit/ada/rtems.ads b/cpukit/ada/rtems.ads
index d95b2571d9..752a6fe064 100644
--- a/cpukit/ada/rtems.ads
+++ b/cpukit/ada/rtems.ads
@@ -1299,6 +1299,14 @@ pragma Elaborate_Body (RTEMS);
Result : out RTEMS.Status_Codes
);
+ procedure Region_Resize_Segment (
+ ID : in RTEMS.ID;
+ Segment : in RTEMS.Address;
+ Old_Size : in RTEMS.Unsigned32;
+ Size : out RTEMS.Unsigned32;
+ Result : out RTEMS.Status_Codes
+ );
+
procedure Region_Return_Segment (
ID : in RTEMS.ID;
Segment : in RTEMS.Address;
diff --git a/cpukit/libcsupport/src/malloc.c b/cpukit/libcsupport/src/malloc.c
index 64d3a715e7..91acf92c44 100644
--- a/cpukit/libcsupport/src/malloc.c
+++ b/cpukit/libcsupport/src/malloc.c
@@ -308,7 +308,7 @@ void *realloc(
}
/*
- * Continue with calloc().
+ * Continue with realloc().
*/
if ( !ptr )
return malloc( size );
@@ -318,6 +318,17 @@ void *realloc(
return (void *) 0;
}
+ status =
+ rtems_region_resize_segment( RTEMS_Malloc_Heap, ptr, size, &old_size );
+
+ if( status == RTEMS_SUCCESSFUL ) {
+ return ptr;
+ }
+ else if ( status != RTEMS_UNSATISFIED ) {
+ errno = EINVAL;
+ return (void *) 0;
+ }
+
new_area = malloc( size );
MSBUMP(malloc_calls, -1); /* subtract off the malloc */
diff --git a/cpukit/rtems/Makefile.am b/cpukit/rtems/Makefile.am
index 947cb1a338..9486bedefb 100644
--- a/cpukit/rtems/Makefile.am
+++ b/cpukit/rtems/Makefile.am
@@ -105,7 +105,8 @@ SIGNAL_C_FILES = src/signal.c src/signalcatch.c src/signalsend.c
REGION_C_FILES = src/region.c src/regioncreate.c src/regiondelete.c \
src/regionextend.c src/regiongetsegment.c src/regiongetsegmentsize.c \
src/regionident.c src/regionreturnsegment.c src/regiongetinfo.c \
- src/regiongetfreeinfo.c
+ src/regiongetfreeinfo.c src/regionresizesegment.c \
+ src/regionprocessqueue.c
PARTITION_C_FILES = src/part.c src/partcreate.c src/partdelete.c \
src/partgetbuffer.c src/partident.c src/partreturnbuffer.c
diff --git a/cpukit/rtems/include/rtems/rtems/region.h b/cpukit/rtems/include/rtems/rtems/region.h
index a27dd7879b..9c8a0cffa9 100644
--- a/cpukit/rtems/include/rtems/rtems/region.h
+++ b/cpukit/rtems/include/rtems/rtems/region.h
@@ -232,8 +232,39 @@ rtems_status_code rtems_region_return_segment(
void *segment
);
+/*
+ * rtems_region_resize_segment
+ *
+ * DESCRIPTION:
+ *
+ * This routine implements the rtems_region_resize_segment directive. It
+ * tries to resize segment in the region associated with 'id' to the new size
+ * 'size' in place. The first 'size' or old size bytes of the segment
+ * (whatever is less) are guaranteed to remain unmodified. The segment must
+ * have been previously allocated from the same region. If resizing the
+ * segment results in enough memory being available to satisfy the
+ * rtems_region_get_segment of the first blocked task, then that task and as
+ * many subsequent tasks as possible will be unblocked with their requests
+ * satisfied.
+ * Returns:
+ * RTEMS_SUCCESSFUL - operation successful
+ * RTEMS_UNSATISFIED - the segment can't be resized in place
+ * any other code - failure.
+ * On RTEMS_SUCCESSFUL or RTEMS_UNSATISFIED exit it returns into the
+ * 'old_size' the old size in bytes of the user memory area of the specified
+ * segment.
+ */
+
+rtems_status_code rtems_region_resize_segment(
+ Objects_Id id,
+ void *segment,
+ size_t size,
+ size_t *old_size
+);
+
#ifndef __RTEMS_APPLICATION__
#include <rtems/rtems/region.inl>
+extern void _Region_Process_queue(Region_Control *the_region);
#endif
#if defined(RTEMS_MULTIPROCESSING)
#include <rtems/rtems/regionmp.h>
diff --git a/cpukit/rtems/src/regionprocessqueue.c b/cpukit/rtems/src/regionprocessqueue.c
new file mode 100644
index 0000000000..6fdb216989
--- /dev/null
+++ b/cpukit/rtems/src/regionprocessqueue.c
@@ -0,0 +1,83 @@
+/*
+ * Region Manager
+ *
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#include <rtems/system.h>
+#include <rtems/rtems/status.h>
+#include <rtems/rtems/support.h>
+#include <rtems/score/object.h>
+#include <rtems/rtems/options.h>
+#include <rtems/rtems/region.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/apimutex.h>
+
+/*PAGE
+ *
+ * _Region_Process_queue
+ *
+ * If enough memory is available to satisfy the rtems_region_get_segment of
+ * the first blocked task, then that task and as many subsequent tasks as
+ * possible will be unblocked with their requests satisfied.
+ *
+ * Input parameters:
+ * the_region - the region
+ *
+ * Output parameters:
+ * none
+ */
+
+void _Region_Process_queue(
+ Region_Control *the_region
+)
+{
+ Thread_Control *the_thread;
+ void *the_segment;
+ /*
+ * Switch from using the memory allocation mutex to using a
+ * dispatching disabled critical section. We have to do this
+ * because this thread may unblock one or more threads that were
+ * waiting on memory.
+ *
+ * NOTE: Be sure to disable dispatching before unlocking the mutex
+ * since we do not want to open a window where a context
+ * switch could occur.
+ */
+ _Thread_Disable_dispatch();
+ _RTEMS_Unlock_allocator();
+
+ /*
+ * NOTE: The following loop is O(n) where n is the number of
+ * threads whose memory request is satisfied.
+ */
+ for ( ; ; ) {
+ the_thread = _Thread_queue_First( &the_region->Wait_queue );
+
+ if ( the_thread == NULL )
+ break;
+
+ the_segment = (void **) _Region_Allocate_segment(
+ the_region,
+ the_thread->Wait.count
+ );
+
+ if ( the_segment == NULL )
+ break;
+
+ *(void **)the_thread->Wait.return_argument = the_segment;
+ the_region->number_of_used_blocks += 1;
+ _Thread_queue_Extract( &the_region->Wait_queue, the_thread );
+ the_thread->Wait.return_code = RTEMS_SUCCESSFUL;
+ }
+ _Thread_Enable_dispatch();
+}
diff --git a/cpukit/rtems/src/regionresizesegment.c b/cpukit/rtems/src/regionresizesegment.c
new file mode 100644
index 0000000000..b83ed2c6e6
--- /dev/null
+++ b/cpukit/rtems/src/regionresizesegment.c
@@ -0,0 +1,101 @@
+/*
+ * Region Manager
+ *
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/system.h>
+#include <rtems/rtems/status.h>
+#include <rtems/rtems/support.h>
+#include <rtems/score/object.h>
+#include <rtems/rtems/options.h>
+#include <rtems/rtems/region.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/apimutex.h>
+
+/*PAGE
+ *
+ * rtems_region_resize_segment
+ *
+ * This directive will try to resize segment to the new size 'size'
+ * "in place".
+ *
+ * Input parameters:
+ * id - region id
+ * segment - pointer to segment address
+ * size - new required size
+ *
+ * Output parameters:
+ * RTEMS_SUCCESSFUL - if successful
+ * error code - if unsuccessful
+ */
+
+rtems_status_code rtems_region_resize_segment(
+ Objects_Id id,
+ void *segment,
+ size_t size,
+ size_t *old_size
+)
+{
+ register Region_Control *the_region;
+ Objects_Locations location;
+ Heap_Resize_status status;
+ uint32_t avail_size;
+ uint32_t osize;
+
+ if ( !old_size )
+ return RTEMS_INVALID_ADDRESS;
+
+ _RTEMS_Lock_allocator();
+ the_region = _Region_Get( id, &location );
+ switch ( location ) {
+
+ case OBJECTS_REMOTE: /* this error cannot be returned */
+ _RTEMS_Unlock_allocator();
+ return RTEMS_INTERNAL_ERROR;
+
+ case OBJECTS_ERROR:
+ _RTEMS_Unlock_allocator();
+ return RTEMS_INVALID_ID;
+
+ case OBJECTS_LOCAL:
+
+ _Region_Debug_Walk( the_region, 7 );
+
+ status = _Heap_Resize_block(
+ &the_region->Memory,
+ segment,
+ (uint32_t) size,
+ &osize,
+ &avail_size
+ );
+ *old_size = (uint32_t) osize;
+
+ _Region_Debug_Walk( the_region, 8 );
+
+ if( status == HEAP_RESIZE_SUCCESSFUL && avail_size > 0 )
+ _Region_Process_queue( the_region ); /* unlocks allocator internally */
+ else
+ _RTEMS_Unlock_allocator();
+
+ return
+ (status == HEAP_RESIZE_SUCCESSFUL) ? RTEMS_SUCCESSFUL :
+ (status == HEAP_RESIZE_UNSATISFIED) ? RTEMS_UNSATISFIED :
+ RTEMS_INVALID_ADDRESS;
+ }
+
+ return RTEMS_INTERNAL_ERROR; /* unreached - only to remove warnings */
+}
diff --git a/cpukit/rtems/src/regionreturnsegment.c b/cpukit/rtems/src/regionreturnsegment.c
index ad8037fe6b..dc080e5ed5 100644
--- a/cpukit/rtems/src/regionreturnsegment.c
+++ b/cpukit/rtems/src/regionreturnsegment.c
@@ -55,9 +55,7 @@ rtems_status_code rtems_region_return_segment(
)
{
register Region_Control *the_region;
- Thread_Control *the_thread;
Objects_Locations location;
- void **the_segment;
#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
uint32_t size;
#endif
@@ -81,7 +79,7 @@ rtems_status_code rtems_region_return_segment(
#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
if ( _Heap_Size_of_user_area( &the_region->Memory, segment, &size ) ) {
- memset(segment, (RTEMS_REGION_FREE_SHRED_PATTERN & 0xFF), size);
+ memset( segment, (RTEMS_REGION_FREE_SHRED_PATTERN & 0xFF), size );
} else {
_RTEMS_Unlock_allocator();
return RTEMS_INVALID_ADDRESS;
@@ -99,38 +97,7 @@ rtems_status_code rtems_region_return_segment(
the_region->number_of_used_blocks -= 1;
- /*
- * Switch from using the memory allocation mutex to using a
- * dispatching disabled critical section. We have to do this
- * because this thread may unblock one or more threads that were
- * waiting on memory.
- *
- * NOTE: The following loop is O(n) where n is the number of
- * threads whose memory request is satisfied.
- */
- _RTEMS_Unlock_allocator();
- _Thread_Disable_dispatch();
-
- for ( ; ; ) {
- the_thread = _Thread_queue_First( &the_region->Wait_queue );
-
- if ( the_thread == NULL )
- break;
-
- the_segment = (void **) _Region_Allocate_segment(
- the_region,
- the_thread->Wait.count
- );
-
- if ( the_segment == NULL )
- break;
-
- *(void **)the_thread->Wait.return_argument = the_segment;
- the_region->number_of_used_blocks += 1;
- _Thread_queue_Extract( &the_region->Wait_queue, the_thread );
- the_thread->Wait.return_code = RTEMS_SUCCESSFUL;
- }
- _Thread_Enable_dispatch();
+ _Region_Process_queue(the_region); /* unlocks allocator internally */
return RTEMS_SUCCESSFUL;
}
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 2febb854e8..30e6394843 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -98,7 +98,8 @@ CORE_SEMAPHORE_C_FILES = src/coresem.c src/coresemflush.c src/coresemseize.c \
HEAP_C_FILES = src/heap.c src/heapallocate.c src/heapextend.c src/heapfree.c \
src/heapsizeofuserarea.c src/heapwalk.c src/heapgetinfo.c \
- src/heapgetfreeinfo.c src/heapallocatealigned.c
+ src/heapgetfreeinfo.c src/heapallocatealigned.c \
+ src/heapresizeblock.c
OBJECT_C_FILES = src/object.c src/objectallocate.c \
src/objectallocatebyindex.c src/objectclearname.c \
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index da0aea361d..4ec7396854 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -165,6 +165,8 @@ typedef struct Heap_Statistics_tag {
uint32_t searches;
/** total number of suceessful calls to free */
uint32_t frees;
+ /** total number of successful resizes */
+ uint32_t resizes;
} Heap_Statistics;
/**
@@ -190,7 +192,7 @@ typedef struct {
} Heap_Control;
/**
- * Status codes for heap_extend
+ * Status codes for _Heap_Extend
*/
typedef enum {
@@ -200,6 +202,16 @@ typedef enum {
} Heap_Extend_status;
/**
+ * Status codes for _Heap_Resize_block
+ */
+
+typedef enum {
+ HEAP_RESIZE_SUCCESSFUL,
+ HEAP_RESIZE_UNSATISFIED,
+ HEAP_RESIZE_FATAL_ERROR
+} Heap_Resize_status;
+
+/**
* Status codes for _Heap_Get_information
*/
@@ -326,6 +338,36 @@ boolean _Heap_Size_of_user_area(
size_t *size
);
+/*
+ * This function tries to resize in place the block that is pointed to by the
+ * @a starting_address to the new @a size.
+ *
+ * @param the_heap (in) is the heap to operate upon
+ * @param starting_address (in) is the starting address of the user block
+ * to be resized
+ * @param size (in) is the new size
+ * @param old_mem_size (in) points to a user area to return the size of the
+ * user memory area of the block before resizing.
+ * @param avail_mem_size (in) points to a user area to return the size of
+ * the user memory area of the free block that has been enlarged or
+ * created due to resizing, 0 if none.
+ * @return HEAP_RESIZE_SUCCESSFUL if successfully able to resize the block,
+ * HEAP_RESIZE_UNSATISFIED if the block can't be resized in place,
+ * HEAP_RESIZE_FATAL_ERROR if failure
+ * @return *old_mem_size filled in with the size of the user memory area of
+ * the block before resizing.
+ * @return *avail_mem_size filled in with the size of the user memory area
+ * of the free block that has been enlarged or created due to
+ * resizing, 0 if none.
+ */
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *the_heap,
+ void *starting_address,
+ uint32_t size,
+ uint32_t *old_mem_size,
+ uint32_t *avail_mem_size
+);
+
/**
* This routine returns the block of memory which begins
* at @a starting_address to @a the_heap. Any coalescing which is
@@ -404,7 +446,7 @@ extern uint32_t _Heap_Calc_block_size(
uint32_t page_size,
uint32_t min_size);
-extern Heap_Block* _Heap_Block_allocate(
+extern uint32_t _Heap_Block_allocate(
Heap_Control* the_heap,
Heap_Block* the_block,
uint32_t alloc_size);
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 00dda0afb0..e6c9cd920c 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -44,7 +44,7 @@ static uint32_t instance = 0;
* | unused space due to alignment |
* | size < page_size |
* 0 +--------------------------------+ <- first block
- * | prev_size = 1 (arbitrary) |
+ * | prev_size = page_size |
* 4 +--------------------------------+
* | size = size0 | 1 |
* 8 +---------------------+----------+ <- aligned on page_size
@@ -59,7 +59,7 @@ static uint32_t instance = 0;
* size0 +--------------------------------+ <- last dummy block
* | prev_size = size0 |
* +4 +--------------------------------+
- * | size = 0 (arbitrary) | 0 | <- prev block is free
+ * | size = page_size | 0 | <- prev block is free
* +8 +--------------------------------+ <- aligned on page_size
* | unused space due to alignment |
* | size < page_size |
@@ -74,36 +74,36 @@ static uint32_t instance = 0;
* +--------------------------------+ <- begin = starting_address
* | unused space due to alignment |
* | size < page_size |
- * 0 +--------------------------------+ <- first block
- * | prev_size = 1 (arbitrary) |
+ * 0 +--------------------------------+ <- used block
+ * | prev_size = page_size |
* 4 +--------------------------------+
- * | size = S = size0 - BSIZE | 1 |
- * 8 +---------------------+----------+ <- aligned on page_size
- * | next = HEAP_TAIL | |
- * 12 +---------------------+ |
- * | prev = HEAP_HEAD | memory |
- * +---------------------+ |
- * | available |
- * | |
- * | for allocation |
- * | |
- * S +--------------------------------+ <- used block
- * | prev_size = size0 - BSIZE |
- * +4 +--------------------------------+
- * | size = BSIZE | 0 | <- prev block is free
- * +8 +--------------------------------+ <- aligned on page_size
+ * | size = BSIZE | 1 | <- prev block is used
+ * 8 +--------------------------------+ <- aligned on page_size
* | . | Pointer returned to the user
- * | . | is (S+8) for _Heap_Allocate()
+ * | . | is 8 for _Heap_Allocate()
* | . | and is in range
- * S + 8 + | user-accessible | [S+8,S+8+page_size) for
- * page_size+- - - - - -+ _Heap_Allocate_aligned()
+ * 8 + | user-accessible | [8,8+page_size) for
+ * page_size +- - - - - -+ _Heap_Allocate_aligned()
* | area |
* | . |
- * S + BSIZE +- - - - - . - - - - -+ <- last dummy block
+ * BSIZE +- - - - - . - - - - -+ <- free block
* | . |
- * +4 +--------------------------------+
- * | size = 0 (arbitrary) | 1 | <- prev block is used
- * +8 +--------------------------------+ <- aligned on page_size
+ * BSIZE +4 +--------------------------------+
+ * | size = S = size0 - BSIZE | 1 | <- prev block is used
+ * BSIZE +8 +-------------------+------------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * BSIZE +12 +-------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +-------------------+ |
+ * | . available |
+ * | . |
+ * | . for |
+ * | . |
+ * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
+ * | prev_size = S | |
+ * +S+4 +-------------------+------------+
+ * | size = page_size | 0 | <- prev block is free
+ * +S+8 +--------------------------------+ <- aligned on page_size
* | unused space due to alignment |
* | size < page_size |
* +--------------------------------+ <- end = begin + size
@@ -160,7 +160,7 @@ uint32_t _Heap_Initialize(
the_block = (Heap_Block *) aligned_start;
- the_block->prev_size = HEAP_PREV_USED;
+ the_block->prev_size = page_size;
the_block->size = the_size | HEAP_PREV_USED;
the_block->next = _Heap_Tail( the_heap );
the_block->prev = _Heap_Head( the_heap );
@@ -175,7 +175,7 @@ uint32_t _Heap_Initialize(
the_block = _Heap_Block_at( the_block, the_size );
the_heap->final = the_block; /* Permanent final block of the heap */
the_block->prev_size = the_size; /* Previous block is free */
- the_block->size = 0; /* This is the only block with size=0 */
+ the_block->size = page_size;
stats->size = size;
stats->free_size = the_size;
@@ -187,6 +187,7 @@ uint32_t _Heap_Initialize(
stats->allocs = 0;
stats->searches = 0;
stats->frees = 0;
+ stats->resizes = 0;
stats->instance = instance++;
return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
@@ -213,15 +214,17 @@ uint32_t _Heap_Calc_block_size(
uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
_Heap_Align_up(&block_size, page_size);
if(block_size < min_size) block_size = min_size;
+ /* 'block_size' becomes <= 'size' if and only if overflow occured. */
return (block_size > size) ? block_size : 0;
}
/*
* Allocate block of size 'alloc_size' from 'the_block' belonging to
- * 'the_heap'. Either split 'the_block' or allocate it entirely.
- * Return the block allocated.
+ * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
+ * When split, make the lower part used, and leave the upper part free.
+ * Return the size of allocated block.
*/
-Heap_Block* _Heap_Block_allocate(
+unsigned32 _Heap_Block_allocate(
Heap_Control* the_heap,
Heap_Block* the_block,
uint32_t alloc_size)
@@ -233,14 +236,18 @@ Heap_Block* _Heap_Block_allocate(
_HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
_HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
_HAssert(alloc_size <= block_size);
+ _HAssert(_Heap_Is_prev_used(the_block));
if(the_rest >= the_heap->min_block_size) {
- /* Split the block so that lower part is still free, and upper part
- becomes used. */
- the_block->size = the_rest | HEAP_PREV_USED;
- the_block = _Heap_Block_at(the_block, the_rest);
- the_block->prev_size = the_rest;
- the_block->size = alloc_size;
+ /* Split the block so that upper part is still free, and lower part
+ becomes used. This is slightly less optimal than leaving lower part
+ free as it requires replacing block in the free blocks list, but it
+ makes it possible to reuse this code in the _Heap_Resize_block(). */
+ Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size);
+ _Heap_Block_replace(the_block, next_block);
+ the_block->size = alloc_size | HEAP_PREV_USED;
+ next_block->size = the_rest | HEAP_PREV_USED;
+ _Heap_Block_at(next_block, the_rest)->prev_size = the_rest;
}
else {
/* Don't split the block as remainder is either zero or too small to be
@@ -248,14 +255,13 @@ Heap_Block* _Heap_Block_allocate(
block and remove the block from the list of free blocks. */
_Heap_Block_remove(the_block);
alloc_size = block_size;
+ _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
stats->free_blocks -= 1;
}
- /* Mark the block as used (in the next block). */
- _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
/* Update statistics */
stats->free_size -= alloc_size;
if(stats->min_free_size > stats->free_size)
stats->min_free_size = stats->free_size;
stats->used_blocks += 1;
- return the_block;
+ return alloc_size;
}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index b0f17dca85..e2a7ecdc25 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -62,7 +62,7 @@ void *_Heap_Allocate(
/* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
result of the comparison. */
if(the_block->size >= the_size) {
- the_block = _Heap_Block_allocate(the_heap, the_block, the_size );
+ (void)_Heap_Block_allocate(the_heap, the_block, the_size );
ptr = _Heap_User_area(the_block);
diff --git a/cpukit/score/src/heapallocatealigned.c b/cpukit/score/src/heapallocatealigned.c
index 557c3c8499..741b389e77 100644
--- a/cpukit/score/src/heapallocatealigned.c
+++ b/cpukit/score/src/heapallocatealigned.c
@@ -172,8 +172,7 @@ void *_Heap_Allocate_aligned(
_HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
- the_block =
- _Heap_Block_allocate(the_heap, the_block, alloc_size);
+ (void)_Heap_Block_allocate(the_heap, the_block, alloc_size);
stats->searches += search_count + 1;
stats->allocs += 1;
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 9702241e32..e0e2e72d85 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -51,18 +51,19 @@ boolean _Heap_Free(
if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
_HAssert(starting_address == NULL);
+ _HAssert(FALSE);
return( FALSE );
}
the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
- if ( !_Heap_Is_prev_used( next_block ) ) {
+ if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
_HAssert(FALSE);
return( FALSE );
}
- if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
+ if ( !_Heap_Is_prev_used( next_block ) ) {
_HAssert(FALSE);
return( FALSE );
}
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
new file mode 100644
index 0000000000..661ef20b51
--- /dev/null
+++ b/cpukit/score/src/heapresizeblock.c
@@ -0,0 +1,170 @@
+/*
+ * Heap Handler
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+
+#include <rtems/system.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/heap.h>
+
+/*
+ * _Heap_Resize_block
+ *
+ * DESCRIPTION:
+ *
+ * This routine tries to resize in place the block that is pointed to by the
+ * 'starting_address' to the new 'size'.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header
+ * starting_address - starting address of the memory block
+ * size - new size
+ *
+ * Output parameters:
+ * 'old_mem_size' - the size of the user memory area of the block before
+ * resizing.
+ * 'avail_mem_size' - the size of the user memory area of the free block
+ * that has been enlarged or created due to resizing,
+ * 0 if none.
+ * Returns
+ * HEAP_RESIZE_SUCCESSFUL - if success
+ * HEAP_RESIZE_UNSATISFIED - if the block can't be resized in place
+ * HEAP_RESIZE_FATAL_ERROR - if failure
+ */
+
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *the_heap,
+ void *starting_address,
+ uint32_t size,
+ uint32_t *old_mem_size,
+ uint32_t *avail_mem_size
+)
+{
+ Heap_Block *the_block;
+ Heap_Block *next_block;
+ uint32_t next_block_size;
+ boolean next_is_used;
+ Heap_Block *next_next_block;
+ uint32_t old_block_size;
+ uint32_t old_user_size;
+ uint32_t prev_used_flag;
+ Heap_Statistics *const stats = &the_heap->stats;
+ uint32_t const min_block_size = the_heap->min_block_size;
+ uint32_t const page_size = the_heap->page_size;
+
+ *old_mem_size = 0;
+ *avail_mem_size = 0;
+
+ _Heap_Start_of_block(the_heap, starting_address, &the_block);
+ _HAssert(_Heap_Is_block_in(the_heap, the_block));
+ if (!_Heap_Is_block_in(the_heap, the_block))
+ return HEAP_RESIZE_FATAL_ERROR;
+
+ prev_used_flag = the_block->size & HEAP_PREV_USED;
+ old_block_size = _Heap_Block_size(the_block);
+ next_block = _Heap_Block_at(the_block, old_block_size);
+
+ _HAssert(_Heap_Is_block_in(the_heap, next_block));
+ _HAssert(_Heap_Is_prev_used(next_block));
+ if ( !_Heap_Is_block_in(the_heap, next_block) ||
+ !_Heap_Is_prev_used(next_block))
+ return HEAP_RESIZE_FATAL_ERROR;
+
+ next_block_size = _Heap_Block_size(next_block);
+ next_next_block = _Heap_Block_at(next_block, next_block_size);
+ next_is_used = (next_block == the_heap->final) ||
+ _Heap_Is_prev_used(next_next_block);
+
+ /* See _Heap_Size_of_user_area() source for explanations */
+ old_user_size = _Addresses_Subtract(next_block, starting_address)
+ + HEAP_BLOCK_HEADER_OFFSET;
+
+ *old_mem_size = old_user_size;
+
+ if (size > old_user_size) {
+ /* Need to extend the block: allocate part of the next block and then
+ merge 'the_block' and allocated block together. */
+ if (next_is_used) /* Next block is in use, -- no way to extend */
+ return HEAP_RESIZE_UNSATISFIED;
+ else {
+ uint32_t add_block_size = size - old_user_size;
+ _Heap_Align_up(&add_block_size, page_size);
+ if (add_block_size < min_block_size)
+ add_block_size = min_block_size;
+ if (add_block_size > next_block_size)
+ return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
+ add_block_size =
+ _Heap_Block_allocate(the_heap, next_block, add_block_size);
+ /* Merge two subsequent blocks */
+ the_block->size = (old_block_size + add_block_size) | prev_used_flag;
+ --stats->used_blocks;
+ }
+ } else {
+
+ /* Calculate how much memory we could free */
+ uint32_t free_block_size = old_user_size - size;
+ _Heap_Align_down(&free_block_size, page_size);
+
+ if (free_block_size > 0) {
+
+ /* To free some memory the block should be shortened so that it can
+ can hold 'size' user bytes and still remain not shorter than
+ 'min_block_size'. */
+
+ uint32_t new_block_size = old_block_size - free_block_size;
+
+ if (new_block_size < min_block_size) {
+ uint32_t delta = min_block_size - new_block_size;
+ _HAssert(free_block_size >= delta);
+ free_block_size -= delta;
+ if (free_block_size == 0) {
+ ++stats->resizes;
+ return HEAP_RESIZE_SUCCESSFUL;
+ }
+ new_block_size += delta;
+ }
+
+ _HAssert(new_block_size >= min_block_size);
+ _HAssert(new_block_size + free_block_size == old_block_size);
+ _HAssert(_Heap_Is_aligned(new_block_size, page_size));
+ _HAssert(_Heap_Is_aligned(free_block_size, page_size));
+
+ if (!next_is_used) {
+ /* Extend the next block to the low addresses by 'free_block_size' */
+ Heap_Block *const new_next_block =
+ _Heap_Block_at(the_block, new_block_size);
+ uint32_t const new_next_block_size =
+ next_block_size + free_block_size;
+ _HAssert(_Heap_Is_block_in(the_heap, next_next_block));
+ the_block->size = new_block_size | prev_used_flag;
+ new_next_block->size = new_next_block_size | HEAP_PREV_USED;
+ next_next_block->prev_size = new_next_block_size;
+ _Heap_Block_replace(next_block, new_next_block);
+ the_heap->stats.free_size += free_block_size;
+ *avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
+
+ } else if (free_block_size >= min_block_size) {
+ /* Split the block into 2 used parts, then free the second one. */
+ the_block->size = new_block_size | prev_used_flag;
+ next_block = _Heap_Block_at(the_block, new_block_size);
+ next_block->size = free_block_size | HEAP_PREV_USED;
+ ++stats->used_blocks; /* We have created used block */
+ --stats->frees; /* Don't count next call in stats */
+ _Heap_Free(the_heap, _Heap_User_area(next_block));
+ *avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
+ }
+ }
+ }
+
+ ++stats->resizes;
+ return HEAP_RESIZE_SUCCESSFUL;
+}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index 0585d2966d..9b075e5433 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -55,12 +55,15 @@ boolean _Heap_Size_of_user_area(
_Heap_Start_of_block( the_heap, starting_address, &the_block );
+ _HAssert(_Heap_Is_block_in( the_heap, the_block ));
if ( !_Heap_Is_block_in( the_heap, the_block ) )
return( FALSE );
the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
+ _HAssert(_Heap_Is_block_in( the_heap, next_block ));
+ _HAssert(_Heap_Is_prev_used( next_block ));
if (
!_Heap_Is_block_in( the_heap, next_block ) ||
!_Heap_Is_prev_used( next_block )
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index 7994e22b27..b85529cf9d 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -87,8 +87,8 @@ boolean _Heap_Walk(
error = 1;
}
- if (the_block->prev_size != HEAP_PREV_USED) {
- printf("PASS: %d !prev_size of 1st block isn't HEAP_PREV_USED\n", source);
+ if (the_block->prev_size != the_heap->page_size) {
+ printf("PASS: %d !prev_size of 1st block isn't page_size\n", source);
error = 1;
}
@@ -162,8 +162,8 @@ boolean _Heap_Walk(
error = 1;
}
- if (_Heap_Block_size(the_block) != 0) {
- printf("PASS: %d !last block's size isn't 0\n", source);
+ if (_Heap_Block_size(the_block) != the_heap->page_size) {
+ printf("PASS: %d !last block's size isn't page_size\n", source);
error = 1;
}