summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2005-05-20 19:15:41 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2005-05-20 19:15:41 +0000
commit80f2885b70bca394c002241c119a0fca7add2a42 (patch)
tree59dfc9a04bbf39cd637826ade7fc51e2a5c8a37f /cpukit/score
parent2005-05-20 Joel Sherrill <joel@OARcorp.com> (diff)
downloadrtems-80f2885b70bca394c002241c119a0fca7add2a42.tar.bz2
2005-05-14 Sergei Organov <osv@topconrd.ru>
PR 746/rtems Optimize realloc(). The problem is that realloc() can neither grow nor shrink efficiently the current memory region without support from underlying heap/region modules. The patch introduces one new routine for each of heap and region modules, _Heap_Resize_block(), and rtems_region_resize_segment(), respectively, and uses the latter to optimize realloc(). The implementation of _Heap_Resize_block() lead to changing of the heap allocation strategy: now the heap manager, when splits larger free block into used and new free parts, makes the first part of the block used, not the last one as it was before. Due to this new strategy, _Heap_Resize_block() never needs to change the user pointer. Caveat: unlike previous heap implementation, first few bytes of the contents of the memory allocated from the heap are now almost never all zero. This can trigger bugs in client code that have not been visible before this patch. * libcsupport/src/malloc.c (realloc): try to resize segment in place using new rtems_region_resize_segment() routine before falling back to the malloc()/free() method. * score/src/heap.c: (_Heap_Initialize): change initial heap layout to reflect new allocation strategy of using of the lower part of a previously free block when splitting it for the purpose of allocation. (_Heap_Block_allocate): when split, make the lower part used, and leave the upper part free. Return type changed from Heap_Block* to uint32_t. * score/include/rtems/score/heap.h: (Heap_Statistics): added 'resizes' field. (Heap_Resize_status): new enum. (_Heap_Resize_block): new routine. (_Heap_Block_allocate): return type changed from Heap_Block* to uint32_t. * score/src/heapwalk.c: reflect new heap layout in checks. * score/src/heapsizeofuserarea.c: more assertions added. * score/src/heapresizeblock.c: new file. (_Heap_Resize_block): new routine. * score/src/heapfree.c: reverse the checks _Heap_Is_block_in() and _Heap_Is_prev_used() on entry to be in this order. * score/src/heapallocate.c, score/src/heapallocatealigned.c: ignore return value of _Heap_Block_allocate(). * score/Makefile.am (HEAP_C_FILES): added src/heapresizeblock.c. * rtems/include/rtems/rtems/region.h: (rtems_region_resize_segment): new interface routine. (_Region_Process_queue): new internal routine called from rtems_region_resize_segment() and rtems_region_return_segment(). * rtems/src/regionreturnsegment.c: move queue management code into the new internal routine _Region_Process_queue() and call it. * rtems/src/regionresizesegment.c: new file. (rtems_region_resize_segment): new interface routine. * rtems/src/regionprocessqueue.c: new file. (_Region_Process_queue): new internal routine containing queue management code factored out from 'regionreturnsegment.c'. * rtems/Makefile.am (REGION_C_FILES): Added src/regionresizesegment.c, and src/regionprocessqueue.c. * ada/rtems.adb, ada/rtems.ads: Added Region_Resize_Segment.
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/Makefile.am3
-rw-r--r--cpukit/score/include/rtems/score/heap.h46
-rw-r--r--cpukit/score/src/heap.c86
-rw-r--r--cpukit/score/src/heapallocate.c2
-rw-r--r--cpukit/score/src/heapallocatealigned.c3
-rw-r--r--cpukit/score/src/heapfree.c5
-rw-r--r--cpukit/score/src/heapresizeblock.c170
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c3
-rw-r--r--cpukit/score/src/heapwalk.c8
9 files changed, 274 insertions, 52 deletions
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 2febb854e8..30e6394843 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -98,7 +98,8 @@ CORE_SEMAPHORE_C_FILES = src/coresem.c src/coresemflush.c src/coresemseize.c \
HEAP_C_FILES = src/heap.c src/heapallocate.c src/heapextend.c src/heapfree.c \
src/heapsizeofuserarea.c src/heapwalk.c src/heapgetinfo.c \
- src/heapgetfreeinfo.c src/heapallocatealigned.c
+ src/heapgetfreeinfo.c src/heapallocatealigned.c \
+ src/heapresizeblock.c
OBJECT_C_FILES = src/object.c src/objectallocate.c \
src/objectallocatebyindex.c src/objectclearname.c \
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index da0aea361d..4ec7396854 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -165,6 +165,8 @@ typedef struct Heap_Statistics_tag {
uint32_t searches;
/** total number of suceessful calls to free */
uint32_t frees;
+ /** total number of successful resizes */
+ uint32_t resizes;
} Heap_Statistics;
/**
@@ -190,7 +192,7 @@ typedef struct {
} Heap_Control;
/**
- * Status codes for heap_extend
+ * Status codes for _Heap_Extend
*/
typedef enum {
@@ -200,6 +202,16 @@ typedef enum {
} Heap_Extend_status;
/**
+ * Status codes for _Heap_Resize_block
+ */
+
+typedef enum {
+ HEAP_RESIZE_SUCCESSFUL,
+ HEAP_RESIZE_UNSATISFIED,
+ HEAP_RESIZE_FATAL_ERROR
+} Heap_Resize_status;
+
+/**
* Status codes for _Heap_Get_information
*/
@@ -326,6 +338,36 @@ boolean _Heap_Size_of_user_area(
size_t *size
);
+/*
+ * This function tries to resize in place the block that is pointed to by the
+ * @a starting_address to the new @a size.
+ *
+ * @param the_heap (in) is the heap to operate upon
+ * @param starting_address (in) is the starting address of the user block
+ * to be resized
+ * @param size (in) is the new size
+ * @param old_mem_size (in) points to a user area to return the size of the
+ * user memory area of the block before resizing.
+ * @param avail_mem_size (in) points to a user area to return the size of
+ * the user memory area of the free block that has been enlarged or
+ * created due to resizing, 0 if none.
+ * @return HEAP_RESIZE_SUCCESSFUL if successfully able to resize the block,
+ * HEAP_RESIZE_UNSATISFIED if the block can't be resized in place,
+ * HEAP_RESIZE_FATAL_ERROR if failure
+ * @return *old_mem_size filled in with the size of the user memory area of
+ * the block before resizing.
+ * @return *avail_mem_size filled in with the size of the user memory area
+ * of the free block that has been enlarged or created due to
+ * resizing, 0 if none.
+ */
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *the_heap,
+ void *starting_address,
+ uint32_t size,
+ uint32_t *old_mem_size,
+ uint32_t *avail_mem_size
+);
+
/**
* This routine returns the block of memory which begins
* at @a starting_address to @a the_heap. Any coalescing which is
@@ -404,7 +446,7 @@ extern uint32_t _Heap_Calc_block_size(
uint32_t page_size,
uint32_t min_size);
-extern Heap_Block* _Heap_Block_allocate(
+extern uint32_t _Heap_Block_allocate(
Heap_Control* the_heap,
Heap_Block* the_block,
uint32_t alloc_size);
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 00dda0afb0..e6c9cd920c 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -44,7 +44,7 @@ static uint32_t instance = 0;
* | unused space due to alignment |
* | size < page_size |
* 0 +--------------------------------+ <- first block
- * | prev_size = 1 (arbitrary) |
+ * | prev_size = page_size |
* 4 +--------------------------------+
* | size = size0 | 1 |
* 8 +---------------------+----------+ <- aligned on page_size
@@ -59,7 +59,7 @@ static uint32_t instance = 0;
* size0 +--------------------------------+ <- last dummy block
* | prev_size = size0 |
* +4 +--------------------------------+
- * | size = 0 (arbitrary) | 0 | <- prev block is free
+ * | size = page_size | 0 | <- prev block is free
* +8 +--------------------------------+ <- aligned on page_size
* | unused space due to alignment |
* | size < page_size |
@@ -74,36 +74,36 @@ static uint32_t instance = 0;
* +--------------------------------+ <- begin = starting_address
* | unused space due to alignment |
* | size < page_size |
- * 0 +--------------------------------+ <- first block
- * | prev_size = 1 (arbitrary) |
+ * 0 +--------------------------------+ <- used block
+ * | prev_size = page_size |
* 4 +--------------------------------+
- * | size = S = size0 - BSIZE | 1 |
- * 8 +---------------------+----------+ <- aligned on page_size
- * | next = HEAP_TAIL | |
- * 12 +---------------------+ |
- * | prev = HEAP_HEAD | memory |
- * +---------------------+ |
- * | available |
- * | |
- * | for allocation |
- * | |
- * S +--------------------------------+ <- used block
- * | prev_size = size0 - BSIZE |
- * +4 +--------------------------------+
- * | size = BSIZE | 0 | <- prev block is free
- * +8 +--------------------------------+ <- aligned on page_size
+ * | size = BSIZE | 1 | <- prev block is used
+ * 8 +--------------------------------+ <- aligned on page_size
* | . | Pointer returned to the user
- * | . | is (S+8) for _Heap_Allocate()
+ * | . | is 8 for _Heap_Allocate()
* | . | and is in range
- * S + 8 + | user-accessible | [S+8,S+8+page_size) for
- * page_size+- - - - - -+ _Heap_Allocate_aligned()
+ * 8 + | user-accessible | [8,8+page_size) for
+ * page_size +- - - - - -+ _Heap_Allocate_aligned()
* | area |
* | . |
- * S + BSIZE +- - - - - . - - - - -+ <- last dummy block
+ * BSIZE +- - - - - . - - - - -+ <- free block
* | . |
- * +4 +--------------------------------+
- * | size = 0 (arbitrary) | 1 | <- prev block is used
- * +8 +--------------------------------+ <- aligned on page_size
+ * BSIZE +4 +--------------------------------+
+ * | size = S = size0 - BSIZE | 1 | <- prev block is used
+ * BSIZE +8 +-------------------+------------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * BSIZE +12 +-------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +-------------------+ |
+ * | . available |
+ * | . |
+ * | . for |
+ * | . |
+ * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
+ * | prev_size = S | |
+ * +S+4 +-------------------+------------+
+ * | size = page_size | 0 | <- prev block is free
+ * +S+8 +--------------------------------+ <- aligned on page_size
* | unused space due to alignment |
* | size < page_size |
* +--------------------------------+ <- end = begin + size
@@ -160,7 +160,7 @@ uint32_t _Heap_Initialize(
the_block = (Heap_Block *) aligned_start;
- the_block->prev_size = HEAP_PREV_USED;
+ the_block->prev_size = page_size;
the_block->size = the_size | HEAP_PREV_USED;
the_block->next = _Heap_Tail( the_heap );
the_block->prev = _Heap_Head( the_heap );
@@ -175,7 +175,7 @@ uint32_t _Heap_Initialize(
the_block = _Heap_Block_at( the_block, the_size );
the_heap->final = the_block; /* Permanent final block of the heap */
the_block->prev_size = the_size; /* Previous block is free */
- the_block->size = 0; /* This is the only block with size=0 */
+ the_block->size = page_size;
stats->size = size;
stats->free_size = the_size;
@@ -187,6 +187,7 @@ uint32_t _Heap_Initialize(
stats->allocs = 0;
stats->searches = 0;
stats->frees = 0;
+ stats->resizes = 0;
stats->instance = instance++;
return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
@@ -213,15 +214,17 @@ uint32_t _Heap_Calc_block_size(
uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
_Heap_Align_up(&block_size, page_size);
if(block_size < min_size) block_size = min_size;
+ /* 'block_size' becomes <= 'size' if and only if overflow occured. */
return (block_size > size) ? block_size : 0;
}
/*
* Allocate block of size 'alloc_size' from 'the_block' belonging to
- * 'the_heap'. Either split 'the_block' or allocate it entirely.
- * Return the block allocated.
+ * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
+ * When split, make the lower part used, and leave the upper part free.
+ * Return the size of allocated block.
*/
-Heap_Block* _Heap_Block_allocate(
+unsigned32 _Heap_Block_allocate(
Heap_Control* the_heap,
Heap_Block* the_block,
uint32_t alloc_size)
@@ -233,14 +236,18 @@ Heap_Block* _Heap_Block_allocate(
_HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
_HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
_HAssert(alloc_size <= block_size);
+ _HAssert(_Heap_Is_prev_used(the_block));
if(the_rest >= the_heap->min_block_size) {
- /* Split the block so that lower part is still free, and upper part
- becomes used. */
- the_block->size = the_rest | HEAP_PREV_USED;
- the_block = _Heap_Block_at(the_block, the_rest);
- the_block->prev_size = the_rest;
- the_block->size = alloc_size;
+ /* Split the block so that upper part is still free, and lower part
+ becomes used. This is slightly less optimal than leaving lower part
+ free as it requires replacing block in the free blocks list, but it
+ makes it possible to reuse this code in the _Heap_Resize_block(). */
+ Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size);
+ _Heap_Block_replace(the_block, next_block);
+ the_block->size = alloc_size | HEAP_PREV_USED;
+ next_block->size = the_rest | HEAP_PREV_USED;
+ _Heap_Block_at(next_block, the_rest)->prev_size = the_rest;
}
else {
/* Don't split the block as remainder is either zero or too small to be
@@ -248,14 +255,13 @@ Heap_Block* _Heap_Block_allocate(
block and remove the block from the list of free blocks. */
_Heap_Block_remove(the_block);
alloc_size = block_size;
+ _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
stats->free_blocks -= 1;
}
- /* Mark the block as used (in the next block). */
- _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
/* Update statistics */
stats->free_size -= alloc_size;
if(stats->min_free_size > stats->free_size)
stats->min_free_size = stats->free_size;
stats->used_blocks += 1;
- return the_block;
+ return alloc_size;
}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index b0f17dca85..e2a7ecdc25 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -62,7 +62,7 @@ void *_Heap_Allocate(
/* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
result of the comparison. */
if(the_block->size >= the_size) {
- the_block = _Heap_Block_allocate(the_heap, the_block, the_size );
+ (void)_Heap_Block_allocate(the_heap, the_block, the_size );
ptr = _Heap_User_area(the_block);
diff --git a/cpukit/score/src/heapallocatealigned.c b/cpukit/score/src/heapallocatealigned.c
index 557c3c8499..741b389e77 100644
--- a/cpukit/score/src/heapallocatealigned.c
+++ b/cpukit/score/src/heapallocatealigned.c
@@ -172,8 +172,7 @@ void *_Heap_Allocate_aligned(
_HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
- the_block =
- _Heap_Block_allocate(the_heap, the_block, alloc_size);
+ (void)_Heap_Block_allocate(the_heap, the_block, alloc_size);
stats->searches += search_count + 1;
stats->allocs += 1;
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 9702241e32..e0e2e72d85 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -51,18 +51,19 @@ boolean _Heap_Free(
if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
_HAssert(starting_address == NULL);
+ _HAssert(FALSE);
return( FALSE );
}
the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
- if ( !_Heap_Is_prev_used( next_block ) ) {
+ if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
_HAssert(FALSE);
return( FALSE );
}
- if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
+ if ( !_Heap_Is_prev_used( next_block ) ) {
_HAssert(FALSE);
return( FALSE );
}
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
new file mode 100644
index 0000000000..661ef20b51
--- /dev/null
+++ b/cpukit/score/src/heapresizeblock.c
@@ -0,0 +1,170 @@
+/*
+ * Heap Handler
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+
+#include <rtems/system.h>
+#include <rtems/score/sysstate.h>
+#include <rtems/score/heap.h>
+
+/*
+ * _Heap_Resize_block
+ *
+ * DESCRIPTION:
+ *
+ * This routine tries to resize in place the block that is pointed to by the
+ * 'starting_address' to the new 'size'.
+ *
+ * Input parameters:
+ * the_heap - pointer to heap header
+ * starting_address - starting address of the memory block
+ * size - new size
+ *
+ * Output parameters:
+ * 'old_mem_size' - the size of the user memory area of the block before
+ * resizing.
+ * 'avail_mem_size' - the size of the user memory area of the free block
+ * that has been enlarged or created due to resizing,
+ * 0 if none.
+ * Returns
+ * HEAP_RESIZE_SUCCESSFUL - if success
+ * HEAP_RESIZE_UNSATISFIED - if the block can't be resized in place
+ * HEAP_RESIZE_FATAL_ERROR - if failure
+ */
+
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *the_heap,
+ void *starting_address,
+ uint32_t size,
+ uint32_t *old_mem_size,
+ uint32_t *avail_mem_size
+)
+{
+ Heap_Block *the_block;
+ Heap_Block *next_block;
+ uint32_t next_block_size;
+ boolean next_is_used;
+ Heap_Block *next_next_block;
+ uint32_t old_block_size;
+ uint32_t old_user_size;
+ uint32_t prev_used_flag;
+ Heap_Statistics *const stats = &the_heap->stats;
+ uint32_t const min_block_size = the_heap->min_block_size;
+ uint32_t const page_size = the_heap->page_size;
+
+ *old_mem_size = 0;
+ *avail_mem_size = 0;
+
+ _Heap_Start_of_block(the_heap, starting_address, &the_block);
+ _HAssert(_Heap_Is_block_in(the_heap, the_block));
+ if (!_Heap_Is_block_in(the_heap, the_block))
+ return HEAP_RESIZE_FATAL_ERROR;
+
+ prev_used_flag = the_block->size & HEAP_PREV_USED;
+ old_block_size = _Heap_Block_size(the_block);
+ next_block = _Heap_Block_at(the_block, old_block_size);
+
+ _HAssert(_Heap_Is_block_in(the_heap, next_block));
+ _HAssert(_Heap_Is_prev_used(next_block));
+ if ( !_Heap_Is_block_in(the_heap, next_block) ||
+ !_Heap_Is_prev_used(next_block))
+ return HEAP_RESIZE_FATAL_ERROR;
+
+ next_block_size = _Heap_Block_size(next_block);
+ next_next_block = _Heap_Block_at(next_block, next_block_size);
+ next_is_used = (next_block == the_heap->final) ||
+ _Heap_Is_prev_used(next_next_block);
+
+ /* See _Heap_Size_of_user_area() source for explanations */
+ old_user_size = _Addresses_Subtract(next_block, starting_address)
+ + HEAP_BLOCK_HEADER_OFFSET;
+
+ *old_mem_size = old_user_size;
+
+ if (size > old_user_size) {
+ /* Need to extend the block: allocate part of the next block and then
+ merge 'the_block' and allocated block together. */
+ if (next_is_used) /* Next block is in use, -- no way to extend */
+ return HEAP_RESIZE_UNSATISFIED;
+ else {
+ uint32_t add_block_size = size - old_user_size;
+ _Heap_Align_up(&add_block_size, page_size);
+ if (add_block_size < min_block_size)
+ add_block_size = min_block_size;
+ if (add_block_size > next_block_size)
+ return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
+ add_block_size =
+ _Heap_Block_allocate(the_heap, next_block, add_block_size);
+ /* Merge two subsequent blocks */
+ the_block->size = (old_block_size + add_block_size) | prev_used_flag;
+ --stats->used_blocks;
+ }
+ } else {
+
+ /* Calculate how much memory we could free */
+ uint32_t free_block_size = old_user_size - size;
+ _Heap_Align_down(&free_block_size, page_size);
+
+ if (free_block_size > 0) {
+
+ /* To free some memory the block should be shortened so that it can
+ can hold 'size' user bytes and still remain not shorter than
+ 'min_block_size'. */
+
+ uint32_t new_block_size = old_block_size - free_block_size;
+
+ if (new_block_size < min_block_size) {
+ uint32_t delta = min_block_size - new_block_size;
+ _HAssert(free_block_size >= delta);
+ free_block_size -= delta;
+ if (free_block_size == 0) {
+ ++stats->resizes;
+ return HEAP_RESIZE_SUCCESSFUL;
+ }
+ new_block_size += delta;
+ }
+
+ _HAssert(new_block_size >= min_block_size);
+ _HAssert(new_block_size + free_block_size == old_block_size);
+ _HAssert(_Heap_Is_aligned(new_block_size, page_size));
+ _HAssert(_Heap_Is_aligned(free_block_size, page_size));
+
+ if (!next_is_used) {
+ /* Extend the next block to the low addresses by 'free_block_size' */
+ Heap_Block *const new_next_block =
+ _Heap_Block_at(the_block, new_block_size);
+ uint32_t const new_next_block_size =
+ next_block_size + free_block_size;
+ _HAssert(_Heap_Is_block_in(the_heap, next_next_block));
+ the_block->size = new_block_size | prev_used_flag;
+ new_next_block->size = new_next_block_size | HEAP_PREV_USED;
+ next_next_block->prev_size = new_next_block_size;
+ _Heap_Block_replace(next_block, new_next_block);
+ the_heap->stats.free_size += free_block_size;
+ *avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
+
+ } else if (free_block_size >= min_block_size) {
+ /* Split the block into 2 used parts, then free the second one. */
+ the_block->size = new_block_size | prev_used_flag;
+ next_block = _Heap_Block_at(the_block, new_block_size);
+ next_block->size = free_block_size | HEAP_PREV_USED;
+ ++stats->used_blocks; /* We have created used block */
+ --stats->frees; /* Don't count next call in stats */
+ _Heap_Free(the_heap, _Heap_User_area(next_block));
+ *avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
+ }
+ }
+ }
+
+ ++stats->resizes;
+ return HEAP_RESIZE_SUCCESSFUL;
+}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index 0585d2966d..9b075e5433 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -55,12 +55,15 @@ boolean _Heap_Size_of_user_area(
_Heap_Start_of_block( the_heap, starting_address, &the_block );
+ _HAssert(_Heap_Is_block_in( the_heap, the_block ));
if ( !_Heap_Is_block_in( the_heap, the_block ) )
return( FALSE );
the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
+ _HAssert(_Heap_Is_block_in( the_heap, next_block ));
+ _HAssert(_Heap_Is_prev_used( next_block ));
if (
!_Heap_Is_block_in( the_heap, next_block ) ||
!_Heap_Is_prev_used( next_block )
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index 7994e22b27..b85529cf9d 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -87,8 +87,8 @@ boolean _Heap_Walk(
error = 1;
}
- if (the_block->prev_size != HEAP_PREV_USED) {
- printf("PASS: %d !prev_size of 1st block isn't HEAP_PREV_USED\n", source);
+ if (the_block->prev_size != the_heap->page_size) {
+ printf("PASS: %d !prev_size of 1st block isn't page_size\n", source);
error = 1;
}
@@ -162,8 +162,8 @@ boolean _Heap_Walk(
error = 1;
}
- if (_Heap_Block_size(the_block) != 0) {
- printf("PASS: %d !last block's size isn't 0\n", source);
+ if (_Heap_Block_size(the_block) != the_heap->page_size) {
+ printf("PASS: %d !last block's size isn't page_size\n", source);
error = 1;
}