summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-09 14:58:37 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-09 14:58:37 +0000
commit518c2aeba262cdad2c58bf581c7a49c2966d6569 (patch)
tree8944a4cd0377ca9ed3afd4a81642f9e95105bd63 /cpukit
parentRemove. (diff)
downloadrtems-518c2aeba262cdad2c58bf581c7a49c2966d6569.tar.bz2
2009-09-09 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* score/include/rtems/score/heap.h, score/inline/rtems/score/heap.inl, score/src/heapallocate.c, score/src/heap.c, score/src/heapextend.c, score/src/heapresizeblock.c, score/src/heapwalk.c: Documenation. Simplified block resize. Improved heap walk. Changed heap layout to avoid a special case for _Heap_Is_used() and _Heap_Is_free(). * libmisc/stackchk/check.c: Update for heap API changes.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/ChangeLog9
-rw-r--r--cpukit/libmisc/stackchk/check.c9
-rw-r--r--cpukit/score/include/rtems/score/heap.h59
-rw-r--r--cpukit/score/inline/rtems/score/heap.inl46
-rw-r--r--cpukit/score/src/heap.c149
-rw-r--r--cpukit/score/src/heapallocate.c7
-rw-r--r--cpukit/score/src/heapextend.c23
-rw-r--r--cpukit/score/src/heapresizeblock.c190
-rw-r--r--cpukit/score/src/heapwalk.c288
9 files changed, 404 insertions, 376 deletions
diff --git a/cpukit/ChangeLog b/cpukit/ChangeLog
index 2cd8aead5f..7ad3566ca7 100644
--- a/cpukit/ChangeLog
+++ b/cpukit/ChangeLog
@@ -1,3 +1,12 @@
+2009-09-09 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
+
+ * score/include/rtems/score/heap.h, score/inline/rtems/score/heap.inl,
+ score/src/heapallocate.c, score/src/heap.c, score/src/heapextend.c,
+ score/src/heapresizeblock.c, score/src/heapwalk.c: Documenation.
+ Simplified block resize. Improved heap walk. Changed heap layout to
+ avoid a special case for _Heap_Is_used() and _Heap_Is_free().
+ * libmisc/stackchk/check.c: Update for heap API changes.
+
2009-09-06 Ralf Corsépius <ralf.corsepius@rtems.org>
* libcsupport/src/issetugid.c: New (relocated from libnetworking).
diff --git a/cpukit/libmisc/stackchk/check.c b/cpukit/libmisc/stackchk/check.c
index 4b6d0a6a68..a51961c073 100644
--- a/cpukit/libmisc/stackchk/check.c
+++ b/cpukit/libmisc/stackchk/check.c
@@ -91,8 +91,15 @@ static inline bool Stack_check_Frame_pointer_in_range(
((_the_stack)->area)
#else
+ /*
+ * We need this magic offset because during a task delete the task stack will
+ * be freed before we enter the task switch extension which checks the stack.
+ * The task stack free operation will write the next and previous pointers
+ * for the free list into this area.
+ */
#define Stack_check_Get_pattern_area( _the_stack ) \
- ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_BLOCK_HEADER_SIZE))
+ ((Stack_check_Control *) ((char *)(_the_stack)->area \
+ + sizeof(Heap_Block) - HEAP_BLOCK_HEADER_SIZE))
#define Stack_check_Calculate_used( _low, _size, _high_water) \
( ((char *)(_low) + (_size)) - (char *)(_high_water) )
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index d07eac3baf..10a267526e 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -91,7 +91,7 @@ extern "C" {
* The heap area after initialization contains two blocks and looks like:
* <table>
* <tr><th>Label</th><th colspan=2>Content</th></tr>
- * <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr>
+ * <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
* <tr>
* <td>first_block->prev_size</td>
* <td colspan=2>page size (the value is arbitrary)</td>
@@ -108,14 +108,18 @@ extern "C" {
* <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
* <tr><td>...</td></tr>
* <tr>
- * <td>second_block->prev_size</td><td colspan=2>size of first block</td>
+ * <td>last_block->prev_size</td><td colspan=2>size of first block</td>
* </tr>
* <tr>
- * <td>second_block->size</td>
- * <td colspan=2>page size (the value is arbitrary)</td>
+ * <td>last_block->size</td>
+ * <td colspan=2>first block begin address - last block begin address</td>
* </tr>
- * <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr>
+ * <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
* </table>
+ * The next block of the last block is the first block. Since the first
+ * block indicates that the previous block is used, this ensures that the
+ * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
+ * functions.
*
* @{
*/
@@ -205,17 +209,23 @@ typedef struct {
uint32_t instance;
/**
- * @brief The size of the memory for heap.
+ * @brief Size of the allocatable area in bytes.
+ *
+ * This value is an integral multiple of the page size.
*/
uintptr_t size;
/**
- * @brief Current free size.
+ * @brief Current free size in bytes.
+ *
+ * This value is an integral multiple of the page size.
*/
uintptr_t free_size;
/**
- * @brief Minimum free size ever.
+ * @brief Minimum free size ever in bytes.
+ *
+ * This value is an integral multiple of the page size.
*/
uintptr_t min_free_size;
@@ -240,7 +250,7 @@ typedef struct {
uint32_t max_search;
/**
- * @brief Total number of successful calls to alloc.
+ * @brief Total number of successful allocations.
*/
uint32_t allocs;
@@ -354,7 +364,7 @@ Heap_Extend_status _Heap_Extend(
);
/**
- * @brief Allocates a memory area of size @a size bytes.
+ * @brief Allocates a memory area of size @a size bytes from the heap @a heap.
*
* If the alignment parameter @a alignment is not equal to zero, the allocated
* memory area will begin at an address aligned by this value.
@@ -378,11 +388,26 @@ void *_Heap_Allocate_aligned_with_boundary(
uintptr_t boundary
);
-#define _Heap_Allocate_aligned( heap, size, alignment ) \
- _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 )
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero.
+ */
+RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment
+)
+{
+ return _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 );
+}
-#define _Heap_Allocate( heap, size ) \
- _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 )
+/**
+ * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
+ * boundary equals zero.
+ */
+RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
+{
+ return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
+}
/**
* @brief Frees the allocated memory area starting at @a addr in the heap
@@ -473,7 +498,11 @@ Heap_Resize_status _Heap_Resize_block(
* @brief Allocates the memory area starting at @a alloc_begin of size
* @a alloc_size bytes in the block @a block.
*
- * The block may be split up into multiple blocks.
+ * The block may be split up into multiple blocks. The previous and next block
+ * may be used or free. Free block parts which form a vaild new block will be
+ * inserted into the free list or merged with an adjacent free block. If the
+ * block is used, they will be inserted after the free list head. If the block
+ * is free, they will be inserted after the previous block in the free list.
*
* Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
* heap.
diff --git a/cpukit/score/inline/rtems/score/heap.inl b/cpukit/score/inline/rtems/score/heap.inl
index 2bcadac385..0f14a0b245 100644
--- a/cpukit/score/inline/rtems/score/heap.inl
+++ b/cpukit/score/inline/rtems/score/heap.inl
@@ -123,13 +123,20 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
* @brief Returns the block which is @a offset away from @a block.
*/
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
- Heap_Block *block,
+ const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
}
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block(
+ const Heap_Block *block
+)
+{
+ return (Heap_Block *) ((uintptr_t) block - block->prev_size);
+}
+
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
const Heap_Block *block
)
@@ -146,14 +153,41 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
- HEAP_BLOCK_HEADER_SIZE);
}
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
+{
+ return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
+}
+
+RTEMS_INLINE_ROUTINE void _Heap_Block_set_size(
+ Heap_Block *block,
+ uintptr_t size
+)
+{
+ uintptr_t flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
+
+ block->size_and_flag = size | flag;
+}
+
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
}
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
+RTEMS_INLINE_ROUTINE bool _Heap_Is_used(
+ const Heap_Block *block
+)
{
- return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
+ const Heap_Block *const next_block =
+ _Heap_Block_at( block, _Heap_Block_size( block ) );
+
+ return _Heap_Is_prev_used( next_block );
+}
+
+RTEMS_INLINE_ROUTINE bool _Heap_Is_free(
+ const Heap_Block *block
+)
+{
+ return !_Heap_Is_used( block );
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
@@ -166,11 +200,13 @@ RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
}
/**
- * @brief Returns the heap area size.
+ * @brief Returns the size of the allocatable area in bytes.
+ *
+ * This value is an integral multiple of the page size.
*/
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
{
- return heap->area_end - heap->area_begin;
+ return heap->stats.size;
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 45faac7630..b12bdd8fab 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -140,12 +140,17 @@ uintptr_t _Heap_Initialize(
uintptr_t min_block_size = 0;
uintptr_t overhead = 0;
Heap_Block *first_block = NULL;
- Heap_Block *second_block = NULL;
+ Heap_Block *last_block = NULL;
if ( page_size == 0 ) {
page_size = CPU_ALIGNMENT;
} else {
page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
+
+ if ( page_size < CPU_ALIGNMENT ) {
+ /* Integer overflow */
+ return 0;
+ }
}
min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
@@ -172,10 +177,17 @@ uintptr_t _Heap_Initialize(
first_block->next = _Heap_Free_list_tail( heap );
first_block->prev = _Heap_Free_list_head( heap );
- /* Second and last block */
- second_block = _Heap_Block_at( first_block, first_block_size );
- second_block->prev_size = first_block_size;
- second_block->size_and_flag = page_size;
+ /*
+ * Last block.
+ *
+ * The next block of the last block is the first block. Since the first
+ * block indicates that the previous block is used, this ensures that the
+ * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
+ * functions.
+ */
+ last_block = _Heap_Block_at( first_block, first_block_size );
+ last_block->prev_size = first_block_size;
+ last_block->size_and_flag = first_block_begin - (uintptr_t) last_block;
/* Heap control */
heap->page_size = page_size;
@@ -183,12 +195,12 @@ uintptr_t _Heap_Initialize(
heap->area_begin = heap_area_begin;
heap->area_end = heap_area_end;
heap->first_block = first_block;
- heap->last_block = second_block;
+ heap->last_block = last_block;
_Heap_Free_list_head( heap )->next = first_block;
_Heap_Free_list_tail( heap )->prev = first_block;
/* Statistics */
- stats->size = heap_area_size;
+ stats->size = first_block_size;
stats->free_size = first_block_size;
stats->min_free_size = first_block_size;
stats->free_blocks = 1;
@@ -208,22 +220,21 @@ uintptr_t _Heap_Initialize(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
);
_HAssert(
- _Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
+ _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
);
- if ( !_Heap_Walk( heap, 0, false ) ) {
- _Heap_Walk( heap, 0, true );
- }
-
return alloc_area_size;
}
-static Heap_Block *_Heap_Block_split(
+void _Heap_Block_split(
Heap_Control *heap,
Heap_Block *block,
+ Heap_Block *free_list_anchor,
uintptr_t alloc_size
)
{
+ Heap_Statistics *const stats = &heap->stats;
+
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
@@ -237,48 +248,54 @@ static Heap_Block *_Heap_Block_split(
uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
- Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+ Heap_Block *next_block = _Heap_Block_at( block, block_size );
_HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
_HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
if ( free_size >= free_size_limit ) {
- uintptr_t const free_block_size = block_size - used_block_size;
Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
+ uintptr_t free_block_size = block_size - used_block_size;
_HAssert( used_block_size + free_block_size == block_size );
- block->size_and_flag = used_block_size
- | (block->size_and_flag & HEAP_PREV_BLOCK_USED);
+ _Heap_Block_set_size( block, used_block_size );
+
+ /* Statistics */
+ stats->free_size += free_block_size;
+
+ if ( _Heap_Is_used( next_block ) ) {
+ _Heap_Free_list_insert_after( free_list_anchor, free_block );
+
+ /* Statistics */
+ ++stats->free_blocks;
+ } else {
+ uintptr_t const next_block_size = _Heap_Block_size( next_block );
+
+ _Heap_Free_list_replace( next_block, free_block );
+
+ free_block_size += next_block_size;
+
+ next_block = _Heap_Block_at( free_block, free_block_size );
+ }
+
free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
- next_block->prev_size = free_block_size;
- return free_block;
+ next_block->prev_size = free_block_size;
+ next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
} else {
next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
-
- return NULL;
}
}
static Heap_Block *_Heap_Block_allocate_from_begin(
Heap_Control *heap,
Heap_Block *block,
+ Heap_Block *free_list_anchor,
uintptr_t alloc_size
)
{
- Heap_Block *const free_block = _Heap_Block_split( heap, block, alloc_size );
-
- if ( free_block != NULL ) {
- _Heap_Free_list_replace( block, free_block );
- } else {
- Heap_Statistics *const stats = &heap->stats;
-
- _Heap_Free_list_remove( block );
-
- /* Statistics */
- --stats->free_blocks;
- }
+ _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
return block;
}
@@ -286,11 +303,14 @@ static Heap_Block *_Heap_Block_allocate_from_begin(
static Heap_Block *_Heap_Block_allocate_from_end(
Heap_Control *heap,
Heap_Block *block,
+ Heap_Block *free_list_anchor,
uintptr_t alloc_begin,
uintptr_t alloc_size
)
{
- uintptr_t const block_begin = (uintptr_t) block;
+ Heap_Statistics *const stats = &heap->stats;
+
+ uintptr_t block_begin = (uintptr_t) block;
uintptr_t block_size = _Heap_Block_size( block );
uintptr_t block_end = block_begin + block_size;
@@ -299,22 +319,37 @@ static Heap_Block *_Heap_Block_allocate_from_end(
uintptr_t const new_block_begin = (uintptr_t) new_block;
uintptr_t const new_block_size = block_end - new_block_begin;
- Heap_Block *free_block = NULL;
-
block_end = new_block_begin;
block_size = block_end - block_begin;
_HAssert( block_size >= heap->min_block_size );
_HAssert( new_block_size >= heap->min_block_size );
+ /* Statistics */
+ stats->free_size += block_size;
+
+ if ( _Heap_Is_prev_used( block ) ) {
+ _Heap_Free_list_insert_after( free_list_anchor, block );
+
+ free_list_anchor = block;
+
+ /* Statistics */
+ ++stats->free_blocks;
+ } else {
+ Heap_Block *const prev_block = _Heap_Prev_block( block );
+ uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
+
+ block = prev_block;
+ block_begin = (uintptr_t) block;
+ block_size += prev_block_size;
+ }
+
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
+
new_block->prev_size = block_size;
new_block->size_and_flag = new_block_size;
- free_block = _Heap_Block_split( heap, new_block, alloc_size );
- if ( free_block != NULL ) {
- _Heap_Free_list_insert_after( block, free_block );
- }
+ _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
return new_block;
}
@@ -327,23 +362,47 @@ Heap_Block *_Heap_Block_allocate(
)
{
Heap_Statistics *const stats = &heap->stats;
+
uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
- _HAssert( _Heap_Is_prev_used( block ) );
+ Heap_Block *free_list_anchor = NULL;
+
_HAssert( alloc_area_begin <= alloc_begin );
+ if ( _Heap_Is_free( block ) ) {
+ free_list_anchor = block->prev;
+
+ _Heap_Free_list_remove( block );
+
+ /* Statistics */
+ --stats->free_blocks;
+ ++stats->used_blocks;
+ stats->free_size -= _Heap_Block_size( block );
+ } else {
+ free_list_anchor = _Heap_Free_list_head( heap );
+ }
+
if ( alloc_area_offset < heap->page_size ) {
alloc_size += alloc_area_offset;
- block = _Heap_Block_allocate_from_begin( heap, block, alloc_size );
+ block = _Heap_Block_allocate_from_begin(
+ heap,
+ block,
+ free_list_anchor,
+ alloc_size
+ );
} else {
- block = _Heap_Block_allocate_from_end( heap, block, alloc_begin, alloc_size );
+ block = _Heap_Block_allocate_from_end(
+ heap,
+ block,
+ free_list_anchor,
+ alloc_begin,
+ alloc_size
+ );
}
/* Statistics */
- ++stats->used_blocks;
- stats->free_size -= _Heap_Block_size( block );
if ( stats->min_free_size > stats->free_size ) {
stats->min_free_size = stats->free_size;
}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index 7b0f51e232..a1c6810eb8 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -48,7 +48,6 @@
uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
- uintptr_t const alloc_area_size = alloc_area_offset + alloc_size;
_HAssert( block_size >= min_block_size );
_HAssert( block_begin < block_end );
@@ -206,6 +205,9 @@ void *_Heap_Allocate_aligned_with_boundary(
}
if ( alloc_begin != 0 ) {
+ /* Statistics */
+ stats->searches += search_count;
+
block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
_Heap_Check_allocation(
@@ -216,9 +218,6 @@ void *_Heap_Allocate_aligned_with_boundary(
alignment,
boundary
);
-
- /* Statistics */
- stats->searches += search_count;
}
/* Statistics */
diff --git a/cpukit/score/src/heapextend.c b/cpukit/score/src/heapextend.c
index 3541bddcc9..02826a82b4 100644
--- a/cpukit/score/src/heapextend.c
+++ b/cpukit/score/src/heapextend.c
@@ -38,8 +38,7 @@ Heap_Extend_status _Heap_Extend(
uintptr_t const heap_area_end = heap->area_end;
uintptr_t const new_heap_area_end = heap_area_end + area_size;
uintptr_t extend_size = 0;
- Heap_Block *const old_final = heap->last_block;
- Heap_Block *new_final = NULL;
+ Heap_Block *const last_block = heap->last_block;
/*
* There are five possibilities for the location of starting
@@ -69,24 +68,28 @@ Heap_Extend_status _Heap_Extend(
heap->area_end = new_heap_area_end;
extend_size = new_heap_area_end
- - (uintptr_t) old_final - HEAP_BLOCK_HEADER_SIZE;
+ - (uintptr_t) last_block - HEAP_BLOCK_HEADER_SIZE;
extend_size = _Heap_Align_down( extend_size, heap->page_size );
*amount_extended = extend_size;
if( extend_size >= heap->min_block_size ) {
- old_final->size_and_flag = extend_size
- | (old_final->size_and_flag & HEAP_PREV_BLOCK_USED);
- new_final = _Heap_Block_at( old_final, extend_size );
- new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
+ Heap_Block *const new_last_block = _Heap_Block_at( last_block, extend_size );
- heap->last_block = new_final;
+ _Heap_Block_set_size( last_block, extend_size );
- stats->size += area_size;
+ new_last_block->size_and_flag =
+ ((uintptr_t) heap->first_block - (uintptr_t) new_last_block)
+ | HEAP_PREV_BLOCK_USED;
+
+ heap->last_block = new_last_block;
+
+ /* Statistics */
+ stats->size += extend_size;
++stats->used_blocks;
--stats->frees; /* Do not count subsequent call as actual free() */
- _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( old_final ));
+ _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( last_block ));
}
return HEAP_EXTEND_SUCCESSFUL;
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
index 2f26589667..d684c84c26 100644
--- a/cpukit/score/src/heapresizeblock.c
+++ b/cpukit/score/src/heapresizeblock.c
@@ -10,6 +10,8 @@
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2009 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
@@ -25,165 +27,93 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-Heap_Resize_status _Heap_Resize_block(
+static Heap_Resize_status _Heap_Resize_block_checked(
Heap_Control *heap,
- void *alloc_begin_ptr,
+ Heap_Block *block,
+ uintptr_t alloc_begin,
uintptr_t new_alloc_size,
uintptr_t *old_size,
uintptr_t *new_size
)
{
Heap_Statistics *const stats = &heap->stats;
- uintptr_t const min_block_size = heap->min_block_size;
- uintptr_t const page_size = heap->page_size;
- uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
- Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
- Heap_Block *next_block = NULL;
- Heap_Block *next_next_block = NULL;
- uintptr_t block_size = 0;
- uintptr_t block_end = 0;
- uintptr_t next_block_size = 0;
- bool next_block_is_used = false;;
- uintptr_t alloc_size = 0;
- uintptr_t prev_block_used_flag = 0;
- *old_size = 0;
- *new_size = 0;
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t block_size = _Heap_Block_size( block );
+ uintptr_t block_end = block_begin + block_size;
- if ( !_Heap_Is_block_in_heap( heap, block ) ) {
- return HEAP_RESIZE_FATAL_ERROR;
- }
+ uintptr_t alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
- block_size = _Heap_Block_size( block );
- block_end = (uintptr_t) block + block_size;
- prev_block_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
- next_block = _Heap_Block_at( block, block_size );
+ Heap_Block *next_block = _Heap_Block_at( block, block_size );
+ uintptr_t next_block_size = _Heap_Block_size( next_block );
+ bool next_block_is_free = _Heap_Is_free( next_block );;
_HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
_HAssert( _Heap_Is_prev_used( next_block ) );
- next_block_size = _Heap_Block_size( next_block );
- next_next_block = _Heap_Block_at( next_block, next_block_size );
-
- _HAssert(
- next_block == heap->last_block
- || _Heap_Is_block_in_heap( heap, next_next_block )
- );
-
- next_block_is_used = next_block == heap->last_block
- || _Heap_Is_prev_used( next_next_block );
-
- alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
-
*old_size = alloc_size;
- if ( new_alloc_size > alloc_size ) {
- /*
- * Need to extend the block: allocate part of the next block and then
- * merge the blocks.
- */
- if ( next_block_is_used ) {
- return HEAP_RESIZE_UNSATISFIED;
- } else {
- uintptr_t add_block_size =
- _Heap_Align_up( new_alloc_size - alloc_size, page_size );
-
- if ( add_block_size < min_block_size ) {
- add_block_size = min_block_size;
- }
-
- if ( add_block_size > next_block_size ) {
- return HEAP_RESIZE_UNSATISFIED;
- }
-
- next_block = _Heap_Block_allocate(
- heap,
- next_block,
- _Heap_Alloc_area_of_block( next_block ),
- add_block_size - HEAP_BLOCK_HEADER_SIZE
- );
-
- /* Merge the blocks */
- block->size_and_flag = ( block_size + _Heap_Block_size( next_block ) )
- | prev_block_used_flag;
-
- /* Statistics */
- --stats->used_blocks;
- }
- } else {
- /* Calculate how much memory we could free */
- uintptr_t free_block_size =
- _Heap_Align_down( alloc_size - new_alloc_size, page_size );
-
- if ( free_block_size > 0 ) {
- /*
- * To free some memory the block should be shortened so that it can can
- * hold 'new_alloc_size' user bytes and still remain not shorter than
- * 'min_block_size'.
- */
- uintptr_t new_block_size = block_size - free_block_size;
-
- if ( new_block_size < min_block_size ) {
- uintptr_t const delta = min_block_size - new_block_size;
-
- _HAssert( free_block_size >= delta );
+ if ( next_block_is_free ) {
+ block_size += next_block_size;
+ alloc_size += next_block_size;
+ }
- free_block_size -= delta;
+ if ( new_alloc_size > alloc_size ) {
+ return HEAP_RESIZE_UNSATISFIED;
+ }
- if ( free_block_size == 0 ) {
- /* Statistics */
- ++stats->resizes;
+ if ( next_block_is_free ) {
+ _Heap_Block_set_size( block, block_size );
- return HEAP_RESIZE_SUCCESSFUL;
- }
+ _Heap_Free_list_remove( next_block );
- new_block_size += delta;
- }
+ next_block = _Heap_Block_at( block, block_size );
+ next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
- _HAssert( new_block_size >= min_block_size );
- _HAssert( new_block_size + free_block_size == block_size );
- _HAssert( _Heap_Is_aligned( new_block_size, page_size ) );
- _HAssert( _Heap_Is_aligned( free_block_size, page_size ) );
+ /* Statistics */
+ --stats->free_blocks;
+ stats->free_size -= next_block_size;
+ }
- if ( !next_block_is_used ) {
- /* Extend the next block */
- Heap_Block *const new_next_block =
- _Heap_Block_at( block, new_block_size );
- uintptr_t const new_next_block_size =
- next_block_size + free_block_size;
+ block = _Heap_Block_allocate( heap, block, alloc_begin, new_alloc_size );
- _HAssert( _Heap_Is_block_in_heap( heap, next_next_block ) );
+ block_size = _Heap_Block_size( block );
+ next_block = _Heap_Block_at( block, block_size );
+ *new_size = (uintptr_t) next_block - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
- block->size_and_flag = new_block_size | prev_block_used_flag;
- new_next_block->size_and_flag =
- new_next_block_size | HEAP_PREV_BLOCK_USED;
- next_next_block->prev_size = new_next_block_size;
+ /* Statistics */
+ ++stats->resizes;
- _Heap_Free_list_replace( next_block, new_next_block );
+ return HEAP_RESIZE_SUCCESSFUL;
+}
- *new_size = new_next_block_size - HEAP_BLOCK_SIZE_OFFSET;
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *heap,
+ void *alloc_begin_ptr,
+ uintptr_t new_alloc_size,
+ uintptr_t *old_size,
+ uintptr_t *new_size
+)
+{
+ uintptr_t const page_size = heap->page_size;
- /* Statistics */
- stats->free_size += free_block_size;
- } else if ( free_block_size >= min_block_size ) {
- /* Split the block into two used parts, then free the second one */
- block->size_and_flag = new_block_size | prev_block_used_flag;
- next_block = _Heap_Block_at( block, new_block_size );
- next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
+ uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
- _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( next_block ) );
+ Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
- *new_size = free_block_size - HEAP_BLOCK_SIZE_OFFSET;
+ *old_size = 0;
+ *new_size = 0;
- /* Statistics */
- ++stats->used_blocks; /* We have created used block */
- --stats->frees; /* Do not count next call in stats */
- }
- }
+ if ( _Heap_Is_block_in_heap( heap, block ) ) {
+ return _Heap_Resize_block_checked(
+ heap,
+ block,
+ alloc_begin,
+ new_alloc_size,
+ old_size,
+ new_size
+ );
+ } else {
+ return HEAP_RESIZE_FATAL_ERROR;
}
-
- /* Statistics */
- ++stats->resizes;
-
- return HEAP_RESIZE_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index dd255e1a17..07fd100fe0 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -51,16 +51,13 @@ static bool _Heap_Walk_check_free_list(
Heap_Control *heap
)
{
+ uintptr_t const page_size = heap->page_size;
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ const Heap_Block *prev_block = free_list_tail;
const Heap_Block *free_block = first_free_block;
- uintptr_t const page_size = heap->page_size;
- uintptr_t const loop_limit =
- ((uintptr_t) heap->last_block - (uintptr_t) heap->first_block)
- / heap->min_block_size;
- uintptr_t loop_counter = 0;
- while ( free_block != free_list_tail && loop_counter < loop_limit ) {
+ while ( free_block != free_list_tail ) {
if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
_Heap_Walk_printk(
source,
@@ -87,20 +84,33 @@ static bool _Heap_Walk_check_free_list(
return false;
}
- ++loop_counter;
+ if ( _Heap_Is_used( free_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: is used\n",
+ free_block
+ );
- free_block = free_block->next;
- }
+ return false;
+ }
- if ( loop_counter >= loop_limit ) {
- _Heap_Walk_printk(
- source,
- dump,
- true,
- "free list contains a loop\n"
- );
+ if ( free_block->prev != prev_block ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: invalid previous block 0x%08x\n",
+ free_block,
+ free_block->prev
+ );
- return false;
+ return false;
+ }
+
+ prev_block = free_block;
+ free_block = free_block->next;
}
return true;
@@ -132,8 +142,6 @@ static bool _Heap_Walk_check_control(
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
- Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
- Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
Heap_Block *const first_block = heap->first_block;
@@ -184,84 +192,128 @@ static bool _Heap_Walk_check_control(
}
if (
- first_free_block != free_list_head
- && !_Addresses_Is_aligned( first_free_block )
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
) {
_Heap_Walk_printk(
source,
dump,
true,
- "first free block: 0x%08x not CPU aligned\n",
- first_free_block
+ "first block 0x%08x: alloc area not page aligned\n",
+ first_block
);
return false;
}
- if (
- last_free_block != free_list_tail
- && !_Addresses_Is_aligned( last_free_block )
- ) {
+ if ( !_Heap_Is_prev_used( first_block ) ) {
_Heap_Walk_printk(
source,
dump,
true,
- "last free block: 0x%08x not CPU aligned\n",
- last_free_block
+ "first block: HEAP_PREV_BLOCK_USED is cleared\n"
);
-
+
return false;
}
- if (
- !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
- ) {
+ if ( first_block->prev_size != page_size ) {
_Heap_Walk_printk(
source,
dump,
true,
- "first block: 0x%08x not page aligned\n",
- first_block
+ "first block: prev size %u != page size %u\n",
+ first_block->prev_size,
+ page_size
);
return false;
}
- if (
- !_Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
- ) {
+ if ( _Heap_Is_free( last_block ) ) {
_Heap_Walk_printk(
source,
dump,
true,
- "last block: 0x%08x not page aligned\n",
- last_block
+ "last block: is free\n"
);
return false;
}
- if ( !_Heap_Is_prev_used( first_block ) ) {
+ return _Heap_Walk_check_free_list( source, dump, heap );
+}
+
+static bool _Heap_Walk_check_free_block(
+ int source,
+ bool dump,
+ Heap_Control *heap,
+ Heap_Block *block
+)
+{
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ bool const prev_used = _Heap_Is_prev_used( block );
+ uintptr_t const block_size = _Heap_Block_size( block );
+ Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+
+ _Heap_Walk_printk(
+ source,
+ dump,
+ false,
+ "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
+ block,
+ block->prev,
+ block->prev == first_free_block ?
+ " (= first)"
+ : (block->prev == free_list_head ? " (= head)" : ""),
+ block->next,
+ block->next == last_free_block ?
+ " (= last)"
+ : (block->next == free_list_tail ? " (= tail)" : "")
+ );
+
+ if ( block_size != next_block->prev_size ) {
_Heap_Walk_printk(
source,
dump,
true,
- "first block: HEAP_PREV_BLOCK_USED is cleared\n"
+ "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
+ block,
+ block_size,
+ next_block->prev_size,
+ next_block
+ );
+
+ return false;
+ }
+
+ if ( !prev_used ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "block 0x%08x: two consecutive blocks are free\n",
+ block
);
+
+ return false;
}
- if ( first_block->prev_size != page_size ) {
+ if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
_Heap_Walk_printk(
source,
dump,
true,
- "first block: prev size %u != page size %u\n",
- first_block->prev_size,
- page_size
+ "block 0x%08x: free block not in free list\n",
+ block
);
+
+ return false;
}
- return _Heap_Walk_check_free_list( source, dump, heap );
+ return true;
}
bool _Heap_Walk(
@@ -272,13 +324,8 @@ bool _Heap_Walk(
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
- Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
- Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
- Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
- Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
Heap_Block *const last_block = heap->last_block;
Heap_Block *block = heap->first_block;
- bool error = false;
if ( !_System_state_Is_up( _System_state_Get() ) ) {
return true;
@@ -288,7 +335,7 @@ bool _Heap_Walk(
return false;
}
- while ( block != last_block && _Addresses_Is_aligned( block ) ) {
+ while ( block != last_block ) {
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
bool const prev_used = _Heap_Is_prev_used( block );
@@ -299,7 +346,7 @@ bool _Heap_Walk(
_Heap_Walk_printk(
source,
dump,
- error,
+ false,
"block 0x%08x: size %u\n",
block,
block_size
@@ -308,7 +355,7 @@ bool _Heap_Walk(
_Heap_Walk_printk(
source,
dump,
- error,
+ false,
"block 0x%08x: size %u, prev_size %u\n",
block,
block_size,
@@ -316,158 +363,67 @@ bool _Heap_Walk(
);
}
- if (
- !_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
- ) {
- error = true;
+ if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
_Heap_Walk_printk(
source,
dump,
- error,
- "block 0x%08x: not page (%u) aligned\n",
+ true,
+ "block 0x%08x: next block 0x%08x not in heap\n",
block,
- page_size
+ next_block
);
- break;
+
+ return false;
}
if ( !_Heap_Is_aligned( block_size, page_size ) ) {
- error = true;
_Heap_Walk_printk(
source,
dump,
- error,
- "block 0x%08x: block size %u not page (%u) aligned\n",
+ true,
+ "block 0x%08x: block size %u not page aligned\n",
block,
- block_size,
- page_size
+ block_size
);
- break;
+
+ return false;
}
if ( block_size < min_block_size ) {
- error = true;
_Heap_Walk_printk(
source,
dump,
- error,
+ true,
"block 0x%08x: size %u < min block size %u\n",
block,
block_size,
min_block_size
);
- break;
+
+ return false;
}
if ( next_block_begin <= block_begin ) {
- error = true;
_Heap_Walk_printk(
source,
dump,
- error,
+ true,
"block 0x%08x: next block 0x%08x is not a successor\n",
block,
next_block
);
- break;
+
+ return false;
}
if ( !_Heap_Is_prev_used( next_block ) ) {
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
- block,
- block->prev,
- block->prev == first_free_block ?
- " (= first)"
- : (block->prev == free_list_head ? " (= head)" : ""),
- block->next,
- block->next == last_free_block ?
- " (= last)"
- : (block->next == free_list_tail ? " (= tail)" : "")
- );
-
- if ( block_size != next_block->prev_size ) {
- error = true;
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
- block,
- block_size,
- next_block->prev_size,
- next_block
- );
- }
-
- if ( !prev_used ) {
- error = true;
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "block 0x%08x: two consecutive blocks are free\n",
- block
- );
- }
-
- if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
- error = true;
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "block 0x%08x: free block not in free list\n",
- block
- );
+ if ( !_Heap_Walk_check_free_block( source, dump, heap, block ) ) {
+ return false;
}
}
block = next_block;
}
- if ( !_Addresses_Is_aligned( block ) ) {
- error = true;
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "block 0x%08x: not CPU aligned\n",
- block
- );
-
- return false;
- }
-
- if ( block == last_block ) {
- uintptr_t const block_size = _Heap_Block_size( block );
-
- if ( block_size != page_size ) {
- error = true;
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "last block 0x%08x: size %u != page size %u\n",
- block,
- block_size,
- page_size
- );
- }
- } else {
- error = true;
- _Heap_Walk_printk(
- source,
- dump,
- error,
- "last block 0x%08x != last block 0x%08x\n",
- block,
- last_block
- );
- }
-
- return !error;
+ return true;
}