summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/heap.c
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2005-05-20 19:15:41 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2005-05-20 19:15:41 +0000
commit80f2885b70bca394c002241c119a0fca7add2a42 (patch)
tree59dfc9a04bbf39cd637826ade7fc51e2a5c8a37f /cpukit/score/src/heap.c
parent2005-05-20 Joel Sherrill <joel@OARcorp.com> (diff)
downloadrtems-80f2885b70bca394c002241c119a0fca7add2a42.tar.bz2
2005-05-14 Sergei Organov <osv@topconrd.ru>
PR 746/rtems Optimize realloc(). The problem is that realloc() can neither grow nor shrink efficiently the current memory region without support from underlying heap/region modules. The patch introduces one new routine for each of heap and region modules, _Heap_Resize_block(), and rtems_region_resize_segment(), respectively, and uses the latter to optimize realloc(). The implementation of _Heap_Resize_block() lead to changing of the heap allocation strategy: now the heap manager, when splits larger free block into used and new free parts, makes the first part of the block used, not the last one as it was before. Due to this new strategy, _Heap_Resize_block() never needs to change the user pointer. Caveat: unlike previous heap implementation, first few bytes of the contents of the memory allocated from the heap are now almost never all zero. This can trigger bugs in client code that have not been visible before this patch. * libcsupport/src/malloc.c (realloc): try to resize segment in place using new rtems_region_resize_segment() routine before falling back to the malloc()/free() method. * score/src/heap.c: (_Heap_Initialize): change initial heap layout to reflect new allocation strategy of using of the lower part of a previously free block when splitting it for the purpose of allocation. (_Heap_Block_allocate): when split, make the lower part used, and leave the upper part free. Return type changed from Heap_Block* to uint32_t. * score/include/rtems/score/heap.h: (Heap_Statistics): added 'resizes' field. (Heap_Resize_status): new enum. (_Heap_Resize_block): new routine. (_Heap_Block_allocate): return type changed from Heap_Block* to uint32_t. * score/src/heapwalk.c: reflect new heap layout in checks. * score/src/heapsizeofuserarea.c: more assertions added. * score/src/heapresizeblock.c: new file. (_Heap_Resize_block): new routine. * score/src/heapfree.c: reverse the checks _Heap_Is_block_in() and _Heap_Is_prev_used() on entry to be in this order. * score/src/heapallocate.c, score/src/heapallocatealigned.c: ignore return value of _Heap_Block_allocate(). * score/Makefile.am (HEAP_C_FILES): added src/heapresizeblock.c. * rtems/include/rtems/rtems/region.h: (rtems_region_resize_segment): new interface routine. (_Region_Process_queue): new internal routine called from rtems_region_resize_segment() and rtems_region_return_segment(). * rtems/src/regionreturnsegment.c: move queue management code into the new internal routine _Region_Process_queue() and call it. * rtems/src/regionresizesegment.c: new file. (rtems_region_resize_segment): new interface routine. * rtems/src/regionprocessqueue.c: new file. (_Region_Process_queue): new internal routine containing queue management code factored out from 'regionreturnsegment.c'. * rtems/Makefile.am (REGION_C_FILES): Added src/regionresizesegment.c, and src/regionprocessqueue.c. * ada/rtems.adb, ada/rtems.ads: Added Region_Resize_Segment.
Diffstat (limited to 'cpukit/score/src/heap.c')
-rw-r--r--cpukit/score/src/heap.c86
1 files changed, 46 insertions, 40 deletions
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 00dda0afb0..e6c9cd920c 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -44,7 +44,7 @@ static uint32_t instance = 0;
* | unused space due to alignment |
* | size < page_size |
* 0 +--------------------------------+ <- first block
- * | prev_size = 1 (arbitrary) |
+ * | prev_size = page_size |
* 4 +--------------------------------+
* | size = size0 | 1 |
* 8 +---------------------+----------+ <- aligned on page_size
@@ -59,7 +59,7 @@ static uint32_t instance = 0;
* size0 +--------------------------------+ <- last dummy block
* | prev_size = size0 |
* +4 +--------------------------------+
- * | size = 0 (arbitrary) | 0 | <- prev block is free
+ * | size = page_size | 0 | <- prev block is free
* +8 +--------------------------------+ <- aligned on page_size
* | unused space due to alignment |
* | size < page_size |
@@ -74,36 +74,36 @@ static uint32_t instance = 0;
* +--------------------------------+ <- begin = starting_address
* | unused space due to alignment |
* | size < page_size |
- * 0 +--------------------------------+ <- first block
- * | prev_size = 1 (arbitrary) |
+ * 0 +--------------------------------+ <- used block
+ * | prev_size = page_size |
* 4 +--------------------------------+
- * | size = S = size0 - BSIZE | 1 |
- * 8 +---------------------+----------+ <- aligned on page_size
- * | next = HEAP_TAIL | |
- * 12 +---------------------+ |
- * | prev = HEAP_HEAD | memory |
- * +---------------------+ |
- * | available |
- * | |
- * | for allocation |
- * | |
- * S +--------------------------------+ <- used block
- * | prev_size = size0 - BSIZE |
- * +4 +--------------------------------+
- * | size = BSIZE | 0 | <- prev block is free
- * +8 +--------------------------------+ <- aligned on page_size
+ * | size = BSIZE | 1 | <- prev block is used
+ * 8 +--------------------------------+ <- aligned on page_size
* | . | Pointer returned to the user
- * | . | is (S+8) for _Heap_Allocate()
+ * | . | is 8 for _Heap_Allocate()
* | . | and is in range
- * S + 8 + | user-accessible | [S+8,S+8+page_size) for
- * page_size+- - - - - -+ _Heap_Allocate_aligned()
+ * 8 + | user-accessible | [8,8+page_size) for
+ * page_size +- - - - - -+ _Heap_Allocate_aligned()
* | area |
* | . |
- * S + BSIZE +- - - - - . - - - - -+ <- last dummy block
+ * BSIZE +- - - - - . - - - - -+ <- free block
* | . |
- * +4 +--------------------------------+
- * | size = 0 (arbitrary) | 1 | <- prev block is used
- * +8 +--------------------------------+ <- aligned on page_size
+ * BSIZE +4 +--------------------------------+
+ * | size = S = size0 - BSIZE | 1 | <- prev block is used
+ * BSIZE +8 +-------------------+------------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * BSIZE +12 +-------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +-------------------+ |
+ * | . available |
+ * | . |
+ * | . for |
+ * | . |
+ * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
+ * | prev_size = S | |
+ * +S+4 +-------------------+------------+
+ * | size = page_size | 0 | <- prev block is free
+ * +S+8 +--------------------------------+ <- aligned on page_size
* | unused space due to alignment |
* | size < page_size |
* +--------------------------------+ <- end = begin + size
@@ -160,7 +160,7 @@ uint32_t _Heap_Initialize(
the_block = (Heap_Block *) aligned_start;
- the_block->prev_size = HEAP_PREV_USED;
+ the_block->prev_size = page_size;
the_block->size = the_size | HEAP_PREV_USED;
the_block->next = _Heap_Tail( the_heap );
the_block->prev = _Heap_Head( the_heap );
@@ -175,7 +175,7 @@ uint32_t _Heap_Initialize(
the_block = _Heap_Block_at( the_block, the_size );
the_heap->final = the_block; /* Permanent final block of the heap */
the_block->prev_size = the_size; /* Previous block is free */
- the_block->size = 0; /* This is the only block with size=0 */
+ the_block->size = page_size;
stats->size = size;
stats->free_size = the_size;
@@ -187,6 +187,7 @@ uint32_t _Heap_Initialize(
stats->allocs = 0;
stats->searches = 0;
stats->frees = 0;
+ stats->resizes = 0;
stats->instance = instance++;
return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
@@ -213,15 +214,17 @@ uint32_t _Heap_Calc_block_size(
uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
_Heap_Align_up(&block_size, page_size);
if(block_size < min_size) block_size = min_size;
+ /* 'block_size' becomes <= 'size' if and only if overflow occured. */
return (block_size > size) ? block_size : 0;
}
/*
* Allocate block of size 'alloc_size' from 'the_block' belonging to
- * 'the_heap'. Either split 'the_block' or allocate it entirely.
- * Return the block allocated.
+ * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
+ * When split, make the lower part used, and leave the upper part free.
+ * Return the size of allocated block.
*/
-Heap_Block* _Heap_Block_allocate(
+unsigned32 _Heap_Block_allocate(
Heap_Control* the_heap,
Heap_Block* the_block,
uint32_t alloc_size)
@@ -233,14 +236,18 @@ Heap_Block* _Heap_Block_allocate(
_HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
_HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
_HAssert(alloc_size <= block_size);
+ _HAssert(_Heap_Is_prev_used(the_block));
if(the_rest >= the_heap->min_block_size) {
- /* Split the block so that lower part is still free, and upper part
- becomes used. */
- the_block->size = the_rest | HEAP_PREV_USED;
- the_block = _Heap_Block_at(the_block, the_rest);
- the_block->prev_size = the_rest;
- the_block->size = alloc_size;
+ /* Split the block so that upper part is still free, and lower part
+ becomes used. This is slightly less optimal than leaving lower part
+ free as it requires replacing block in the free blocks list, but it
+ makes it possible to reuse this code in the _Heap_Resize_block(). */
+ Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size);
+ _Heap_Block_replace(the_block, next_block);
+ the_block->size = alloc_size | HEAP_PREV_USED;
+ next_block->size = the_rest | HEAP_PREV_USED;
+ _Heap_Block_at(next_block, the_rest)->prev_size = the_rest;
}
else {
/* Don't split the block as remainder is either zero or too small to be
@@ -248,14 +255,13 @@ Heap_Block* _Heap_Block_allocate(
block and remove the block from the list of free blocks. */
_Heap_Block_remove(the_block);
alloc_size = block_size;
+ _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
stats->free_blocks -= 1;
}
- /* Mark the block as used (in the next block). */
- _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
/* Update statistics */
stats->free_size -= alloc_size;
if(stats->min_free_size > stats->free_size)
stats->min_free_size = stats->free_size;
stats->used_blocks += 1;
- return the_block;
+ return alloc_size;
}