summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/heapresizeblock.c
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
commitdea3eccb38b556b04552219e00b7abd656587278 (patch)
tree6affcb3026172273e366ee15ed3e8ec70f023a20 /cpukit/score/src/heapresizeblock.c
parentRegenerate. (diff)
downloadrtems-dea3eccb38b556b04552219e00b7abd656587278.tar.bz2
2009-09-06 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* libcsupport/src/free.c, libmisc/stackchk/check.c, rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c, rtems/src/regionextend.c, rtems/src/regiongetinfo.c, rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c, rtems/src/regionresizesegment.c, score/src/pheapallocate.c, score/src/pheapallocatealigned.c, score/src/pheapextend.c, score/src/pheapfree.c, score/src/pheapgetblocksize.c, score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c, score/src/pheapgetsize.c, score/src/pheapinit.c, score/src/pheapresizeblock.c, score/src/pheapwalk.c: Update for heap API changes. * score/include/rtems/score/apimutex.h, score/include/rtems/score/object.h: Documentation. * score/include/rtems/score/heap.h, score/include/rtems/score/protectedheap.h, score/inline/rtems/score/heap.inl, score/src/heap.c, score/src/heapallocate.c, score/src/heapallocatealigned.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c, score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c, score/src/heapwalk.c: Overall cleanup. Added boundary constraint to allocation function. More changes follow.
Diffstat (limited to 'cpukit/score/src/heapresizeblock.c')
-rw-r--r--cpukit/score/src/heapresizeblock.c204
1 files changed, 122 insertions, 82 deletions
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
index 8916bbe12c..2f26589667 100644
--- a/cpukit/score/src/heapresizeblock.c
+++ b/cpukit/score/src/heapresizeblock.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,129 +27,163 @@
Heap_Resize_status _Heap_Resize_block(
Heap_Control *heap,
- void *alloc_area_begin_ptr,
- uintptr_t size,
- uintptr_t *old_mem_size,
- uintptr_t *avail_mem_size
+ void *alloc_begin_ptr,
+ uintptr_t new_alloc_size,
+ uintptr_t *old_size,
+ uintptr_t *new_size
)
{
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
- Heap_Block *block;
- Heap_Block *next_block;
- uintptr_t next_block_size;
- bool next_is_used;
- Heap_Block *next_next_block;
- uintptr_t old_block_size;
- uintptr_t old_user_size;
- uintptr_t prev_used_flag;
Heap_Statistics *const stats = &heap->stats;
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const page_size = heap->page_size;
+ uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
+ Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
+ Heap_Block *next_block = NULL;
+ Heap_Block *next_next_block = NULL;
+ uintptr_t block_size = 0;
+ uintptr_t block_end = 0;
+ uintptr_t next_block_size = 0;
+ bool next_block_is_used = false;;
+ uintptr_t alloc_size = 0;
+ uintptr_t prev_block_used_flag = 0;
+
+ *old_size = 0;
+ *new_size = 0;
+
+ if ( !_Heap_Is_block_in_heap( heap, block ) ) {
+ return HEAP_RESIZE_FATAL_ERROR;
+ }
- *old_mem_size = 0;
- *avail_mem_size = 0;
+ block_size = _Heap_Block_size( block );
+ block_end = (uintptr_t) block + block_size;
+ prev_block_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
+ next_block = _Heap_Block_at( block, block_size );
- block = _Heap_Block_of_alloc_area(alloc_area_begin, heap->page_size);
- _HAssert(_Heap_Is_block_in_heap(heap, block));
- if (!_Heap_Is_block_in_heap(heap, block))
- return HEAP_RESIZE_FATAL_ERROR;
+ _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
+ _HAssert( _Heap_Is_prev_used( next_block ) );
- prev_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
- old_block_size = _Heap_Block_size(block);
- next_block = _Heap_Block_at(block, old_block_size);
+ next_block_size = _Heap_Block_size( next_block );
+ next_next_block = _Heap_Block_at( next_block, next_block_size );
- _HAssert(_Heap_Is_block_in_heap(heap, next_block));
- _HAssert(_Heap_Is_prev_used(next_block));
- if ( !_Heap_Is_block_in_heap(heap, next_block) ||
- !_Heap_Is_prev_used(next_block))
- return HEAP_RESIZE_FATAL_ERROR;
+ _HAssert(
+ next_block == heap->last_block
+ || _Heap_Is_block_in_heap( heap, next_next_block )
+ );
- next_block_size = _Heap_Block_size(next_block);
- next_next_block = _Heap_Block_at(next_block, next_block_size);
- next_is_used = (next_block == heap->final) ||
- _Heap_Is_prev_used(next_next_block);
+ next_block_is_used = next_block == heap->last_block
+ || _Heap_Is_prev_used( next_next_block );
- /* See _Heap_Size_of_alloc_area() source for explanations */
- old_user_size = (uintptr_t) next_block - alloc_area_begin
- + HEAP_BLOCK_SIZE_OFFSET;
+ alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
- *old_mem_size = old_user_size;
+ *old_size = alloc_size;
- if (size > old_user_size) {
- /* Need to extend the block: allocate part of the next block and then
- merge 'block' and allocated block together. */
- if (next_is_used) /* Next block is in use, -- no way to extend */
+ if ( new_alloc_size > alloc_size ) {
+ /*
+ * Need to extend the block: allocate part of the next block and then
+ * merge the blocks.
+ */
+ if ( next_block_is_used ) {
return HEAP_RESIZE_UNSATISFIED;
- else {
+ } else {
uintptr_t add_block_size =
- _Heap_Align_up(size - old_user_size, page_size);
- if (add_block_size < min_block_size)
+ _Heap_Align_up( new_alloc_size - alloc_size, page_size );
+
+ if ( add_block_size < min_block_size ) {
add_block_size = min_block_size;
- if (add_block_size > next_block_size)
- return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
- add_block_size =
- _Heap_Block_allocate(heap, next_block, add_block_size);
- /* Merge two subsequent blocks */
- block->size_and_flag = (old_block_size + add_block_size) | prev_used_flag;
+ }
+
+ if ( add_block_size > next_block_size ) {
+ return HEAP_RESIZE_UNSATISFIED;
+ }
+
+ next_block = _Heap_Block_allocate(
+ heap,
+ next_block,
+ _Heap_Alloc_area_of_block( next_block ),
+ add_block_size - HEAP_BLOCK_HEADER_SIZE
+ );
+
+ /* Merge the blocks */
+ block->size_and_flag = ( block_size + _Heap_Block_size( next_block ) )
+ | prev_block_used_flag;
+
+ /* Statistics */
--stats->used_blocks;
}
} else {
-
/* Calculate how much memory we could free */
uintptr_t free_block_size =
- _Heap_Align_down(old_user_size - size, page_size);
+ _Heap_Align_down( alloc_size - new_alloc_size, page_size );
- if (free_block_size > 0) {
+ if ( free_block_size > 0 ) {
+ /*
+ * To free some memory the block should be shortened so that it can can
+ * hold 'new_alloc_size' user bytes and still remain not shorter than
+ * 'min_block_size'.
+ */
+ uintptr_t new_block_size = block_size - free_block_size;
- /* To free some memory the block should be shortened so that it can
- can hold 'size' user bytes and still remain not shorter than
- 'min_block_size'. */
+ if ( new_block_size < min_block_size ) {
+ uintptr_t const delta = min_block_size - new_block_size;
- uintptr_t new_block_size = old_block_size - free_block_size;
+ _HAssert( free_block_size >= delta );
- if (new_block_size < min_block_size) {
- uintptr_t delta = min_block_size - new_block_size;
- _HAssert(free_block_size >= delta);
free_block_size -= delta;
- if (free_block_size == 0) {
+
+ if ( free_block_size == 0 ) {
+ /* Statistics */
++stats->resizes;
+
return HEAP_RESIZE_SUCCESSFUL;
}
+
new_block_size += delta;
}
- _HAssert(new_block_size >= min_block_size);
- _HAssert(new_block_size + free_block_size == old_block_size);
- _HAssert(_Heap_Is_aligned(new_block_size, page_size));
- _HAssert(_Heap_Is_aligned(free_block_size, page_size));
+ _HAssert( new_block_size >= min_block_size );
+ _HAssert( new_block_size + free_block_size == block_size );
+ _HAssert( _Heap_Is_aligned( new_block_size, page_size ) );
+ _HAssert( _Heap_Is_aligned( free_block_size, page_size ) );
- if (!next_is_used) {
- /* Extend the next block to the low addresses by 'free_block_size' */
+ if ( !next_block_is_used ) {
+ /* Extend the next block */
Heap_Block *const new_next_block =
- _Heap_Block_at(block, new_block_size);
+ _Heap_Block_at( block, new_block_size );
uintptr_t const new_next_block_size =
next_block_size + free_block_size;
- _HAssert(_Heap_Is_block_in_heap(heap, next_next_block));
- block->size_and_flag = new_block_size | prev_used_flag;
- new_next_block->size_and_flag = new_next_block_size | HEAP_PREV_BLOCK_USED;
+
+ _HAssert( _Heap_Is_block_in_heap( heap, next_next_block ) );
+
+ block->size_and_flag = new_block_size | prev_block_used_flag;
+ new_next_block->size_and_flag =
+ new_next_block_size | HEAP_PREV_BLOCK_USED;
next_next_block->prev_size = new_next_block_size;
- _Heap_Block_replace_in_free_list(next_block, new_next_block);
- heap->stats.free_size += free_block_size;
- *avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
-
- } else if (free_block_size >= min_block_size) {
- /* Split the block into 2 used parts, then free the second one. */
- block->size_and_flag = new_block_size | prev_used_flag;
- next_block = _Heap_Block_at(block, new_block_size);
+
+ _Heap_Free_list_replace( next_block, new_next_block );
+
+ *new_size = new_next_block_size - HEAP_BLOCK_SIZE_OFFSET;
+
+ /* Statistics */
+ stats->free_size += free_block_size;
+ } else if ( free_block_size >= min_block_size ) {
+ /* Split the block into two used parts, then free the second one */
+ block->size_and_flag = new_block_size | prev_block_used_flag;
+ next_block = _Heap_Block_at( block, new_block_size );
next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
+
+ _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( next_block ) );
+
+ *new_size = free_block_size - HEAP_BLOCK_SIZE_OFFSET;
+
+ /* Statistics */
++stats->used_blocks; /* We have created used block */
- --stats->frees; /* Don't count next call in stats */
- _Heap_Free(heap, (void *) _Heap_Alloc_area_of_block(next_block));
- *avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
+ --stats->frees; /* Do not count next call in stats */
}
}
}
+ /* Statistics */
++stats->resizes;
+
return HEAP_RESIZE_SUCCESSFUL;
}