summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2005-01-20 18:22:29 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2005-01-20 18:22:29 +0000
commit962e894f4ed0836a5aad472805682bb2f2c90201 (patch)
tree2bb745c9bed1353cb59f88e00da9962e649009bf /cpukit/score/src
parent2005-01-20 Joel Sherrill <joel@OARcorp.com> (diff)
downloadrtems-962e894f4ed0836a5aad472805682bb2f2c90201.tar.bz2
2005-01-20 Sergei Organov <osv@topconrd.ru>
PR 536/rtems Heap manager re-implementation to consume less memory and still satisfy alignment requirements. * score/src/heap.c, score/src/heapallocate.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetinfo.c, score/src/heapgetfreeinfo.c, core/src/heapsizeofuserarea.c, score/src/heapwalk.c, core/macros/rtems/score/heap.inl, score/inline/rtems/score/heap.inl, score/include/rtems/score/heap.h: Reimplemented. * score/src/heapallocatealigned.c: new file * score/Makefile.am: HEAP_C_FILES: add score/src/heapallocatealigned.c
Diffstat (limited to 'cpukit/score/src')
-rw-r--r--cpukit/score/src/heap.c245
-rw-r--r--cpukit/score/src/heapallocate.c93
-rw-r--r--cpukit/score/src/heapextend.c50
-rw-r--r--cpukit/score/src/heapfree.c111
-rw-r--r--cpukit/score/src/heapgetfreeinfo.c23
-rw-r--r--cpukit/score/src/heapgetinfo.c84
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c36
-rw-r--r--cpukit/score/src/heapwalk.c168
8 files changed, 485 insertions, 325 deletions
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 6d6873ebed..910ac94c33 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -16,6 +16,8 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
+static uint32_t instance = 0;
+
/*PAGE
*
* _Heap_Initialize
@@ -32,27 +34,77 @@
* returns - maximum memory available if RTEMS_SUCCESSFUL
* 0 - otherwise
*
- * This is what a heap looks like in memory immediately
- * after initialization:
- *
- * +--------------------------------+
- * 0 | size = 0 | status = used | a.k.a. dummy back flag
- * +--------------------------------+
- * 4 | size = size-8 | status = free | a.k.a. front flag
- * +--------------------------------+
- * 8 | next = PERM HEAP_TAIL |
- * +--------------------------------+
- * 12 | previous = PERM HEAP_HEAD |
- * +--------------------------------+
+ * This is what a heap looks like in memory immediately after initialization:
+ *
+ *
+ * +--------------------------------+ <- begin = starting_address
+ * | unused space due to alignment |
+ * | size < page_size |
+ * 0 +--------------------------------+ <- first block
+ * | prev_size = 1 (arbitrary) |
+ * 4 +--------------------------------+
+ * | size = size0 | 1 |
+ * 8 +---------------------+----------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * 12 +---------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +---------------------+ |
+ * | available |
* | |
- * | memory available |
- * | for allocation |
+ * | for allocation |
* | |
- * +--------------------------------+
- * size - 8 | size = size-8 | status = free | a.k.a. back flag
- * +--------------------------------+
- * size - 4 | size = 0 | status = used | a.k.a. dummy front flag
- * +--------------------------------+
+ * size0 +--------------------------------+ <- last dummy block
+ * | prev_size = size0 |
+ * +4 +--------------------------------+
+ * | size = 0 (arbitrary) | 0 | <- prev block is free
+ * +8 +--------------------------------+ <- aligned on page_size
+ * | unused space due to alignment |
+ * | size < page_size |
+ * +--------------------------------+ <- end = begin + size
+ *
+ * This is what a heap looks like after first allocation of SIZE bytes.
+ * BSIZE stands for SIZE + 4 aligned up on 'page_size' boundary if allocation
+ * has been performed by _Heap_Allocate(). If allocation has been performed
+ * by _Heap_Allocate_aligned(), the block size BSIZE is defined differently
+ * (see 'heapallocatealigned.c' for details).
+ *
+ * +--------------------------------+ <- begin = starting_address
+ * | unused space due to alignment |
+ * | size < page_size |
+ * 0 +--------------------------------+ <- first block
+ * | prev_size = 1 (arbitrary) |
+ * 4 +--------------------------------+
+ * | size = S = size0 - BSIZE | 1 |
+ * 8 +---------------------+----------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * 12 +---------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +---------------------+ |
+ * | available |
+ * | |
+ * | for allocation |
+ * | |
+ * S +--------------------------------+ <- used block
+ * | prev_size = size0 - BSIZE |
+ * +4 +--------------------------------+
+ * | size = BSIZE | 0 | <- prev block is free
+ * +8 +--------------------------------+ <- aligned on page_size
+ * | . | Pointer returned to the user
+ * | . | is (S+8) for _Heap_Allocate()
+ * | . | and is in range
+ * S + 8 + | user-accessible | [S+8,S+8+page_size) for
+ * page_size+- - - - - -+ _Heap_Allocate_aligned()
+ * | area |
+ * | . |
+ * S + BSIZE +- - - - - . - - - - -+ <- last dummy block
+ * | . |
+ * +4 +--------------------------------+
+ * | size = 0 (arbitrary) | 1 | <- prev block is used
+ * +8 +--------------------------------+ <- aligned on page_size
+ * | unused space due to alignment |
+ * | size < page_size |
+ * +--------------------------------+ <- end = begin + size
+ *
*/
uint32_t _Heap_Initialize(
@@ -62,31 +114,148 @@ uint32_t _Heap_Initialize(
uint32_t page_size
)
{
- Heap_Block *the_block;
- uint32_t the_size;
+ Heap_Block *the_block;
+ uint32_t the_size;
+ _H_uptr_t start;
+ _H_uptr_t aligned_start;
+ uint32_t overhead;
+ Heap_Statistics *const stats = &the_heap->stats;
+
+ if(page_size == 0)
+ page_size = CPU_ALIGNMENT;
+ else
+ _Heap_Align_up( &page_size, CPU_ALIGNMENT );
- if ( !_Heap_Is_page_size_valid( page_size ) ||
- (size < HEAP_MINIMUM_SIZE) )
- return 0;
+ /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET
+ (value of user pointer) is aligned on 'page_size' boundary. Make sure
+ resulting 'aligned_start' is not below 'starting_address'. */
+ start = _H_p2u(starting_address);
+ aligned_start = start + HEAP_BLOCK_USER_OFFSET;
+ _Heap_Align_up_uptr ( &aligned_start, page_size );
+ aligned_start -= HEAP_BLOCK_USER_OFFSET;
+
+ /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the
+ nearest multiple of 'page_size'. */
+ the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE;
+ _Heap_Align_up ( &the_heap->min_block_size, page_size );
+
+ /* Calculate 'the_size' -- size of the first block so that there is enough
+ space at the end for the permanent last block. It is equal to 'size'
+ minus total overhead aligned down to the nearest multiple of
+ 'page_size'. */
+ overhead = HEAP_OVERHEAD + (aligned_start - start);
+ if ( size < overhead )
+ return 0; /* Too small area for the heap */
+ the_size = size - overhead;
+ _Heap_Align_down ( &the_size, page_size );
+ if ( the_size == 0 )
+ return 0; /* Too small area for the heap */
the_heap->page_size = page_size;
- the_size = size - HEAP_OVERHEAD;
+ the_heap->begin = starting_address;
+ the_heap->end = starting_address + size;
+
+ the_block = (Heap_Block *) aligned_start;
+
+ the_block->prev_size = HEAP_PREV_USED;
+ the_block->size = the_size | HEAP_PREV_USED;
+ the_block->next = _Heap_Tail( the_heap );
+ the_block->prev = _Heap_Head( the_heap );
+ _Heap_Head(the_heap)->next = the_block;
+ _Heap_Tail(the_heap)->prev = the_block;
+ the_heap->start = the_block;
- the_block = (Heap_Block *) starting_address;
- the_block->back_flag = HEAP_DUMMY_FLAG;
- the_block->front_flag = the_size;
- the_block->next = _Heap_Tail( the_heap );
- the_block->previous = _Heap_Head( the_heap );
+ _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT));
+ _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size));
+ _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size));
- the_heap->start = the_block;
- the_heap->first = the_block;
- the_heap->permanent_null = NULL;
- the_heap->last = the_block;
- the_block = _Heap_Next_block( the_block );
- the_block->back_flag = the_size;
- the_block->front_flag = HEAP_DUMMY_FLAG;
- the_heap->final = the_block;
+ the_block = _Heap_Block_at( the_block, the_size );
+ the_heap->final = the_block; /* Permanent final block of the heap */
+ the_block->prev_size = the_size; /* Previous block is free */
+ the_block->size = 0; /* This is the only block with size=0 */
+
+ stats->size = size;
+ stats->free_size = the_size;
+ stats->min_free_size = the_size;
+ stats->free_blocks = 1;
+ stats->max_free_blocks = 1;
+ stats->used_blocks = 0;
+ stats->max_search = 0;
+ stats->allocs = 0;
+ stats->searches = 0;
+ stats->frees = 0;
+ stats->instance = instance++;
return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
}
+
+/*PAGE
+ *
+ * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
+ *
+ * Note: there is no reason to put them into a separate file(s) as they are
+ * always required for heap to be usefull.
+ *
+ */
+
+
+/*
+ * Convert user requested 'size' of memory block to the block size.
+ * Return block size on success, 0 if overflow occured
+ */
+uint32_t _Heap_Calc_block_size(
+ uint32_t size,
+ uint32_t page_size,
+ uint32_t min_size)
+{
+ uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
+ _Heap_Align_up(&block_size, page_size);
+ if(block_size < min_size) block_size = min_size;
+ return (block_size > size) ? block_size : 0;
+}
+
+
+/*
+ * Allocate block of size 'alloc_size' from 'the_block' belonging to
+ * 'the_heap'. Either split 'the_block' or allocate it entirely.
+ * Return the block allocated.
+ */
+Heap_Block* _Heap_Block_allocate(
+ Heap_Control* the_heap,
+ Heap_Block* the_block,
+ uint32_t alloc_size)
+{
+ Heap_Statistics *const stats = &the_heap->stats;
+ uint32_t const block_size = _Heap_Block_size(the_block);
+ uint32_t const the_rest = block_size - alloc_size;
+
+ _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
+ _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
+ _HAssert(alloc_size <= block_size);
+
+ if(the_rest >= the_heap->min_block_size) {
+ /* Split the block so that lower part is still free, and upper part
+ becomes used. */
+ the_block->size = the_rest | HEAP_PREV_USED;
+ the_block = _Heap_Block_at(the_block, the_rest);
+ the_block->prev_size = the_rest;
+ the_block->size = alloc_size;
+ }
+ else {
+ /* Don't split the block as remainder is either zero or too small to be
+ used as a separate free block. Change 'alloc_size' to the size of the
+ block and remove the block from the list of free blocks. */
+ _Heap_Block_remove(the_block);
+ alloc_size = block_size;
+ stats->free_blocks -= 1;
+ }
+ /* Mark the block as used (in the next block). */
+ _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
+ /* Update statistics */
+ stats->free_size -= alloc_size;
+ if(stats->min_free_size > stats->free_size)
+ stats->min_free_size = stats->free_size;
+ stats->used_blocks += 1;
+ return the_block;
+}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index 54bdd58e45..d49eb62b46 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -36,78 +36,43 @@ void *_Heap_Allocate(
uint32_t size
)
{
- uint32_t excess;
- uint32_t the_size;
+ uint32_t the_size;
+ uint32_t search_count;
Heap_Block *the_block;
- Heap_Block *next_block;
- Heap_Block *temporary_block;
- void *ptr;
- uint32_t offset;
-
- /*
- * Catch the case of a user allocating close to the limit of the
- * uint32_t .
- */
-
- if ( size >= (-1 - HEAP_BLOCK_USED_OVERHEAD) )
- return( NULL );
+ void *ptr = NULL;
+ Heap_Statistics *const stats = &the_heap->stats;
+ Heap_Block *const tail = _Heap_Tail(the_heap);
+
+ the_size =
+ _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size);
+ if(the_size == 0)
+ return NULL;
+
+ /* Find large enough free block. */
+ for(the_block = _Heap_First(the_heap), search_count = 0;
+ the_block != tail;
+ the_block = the_block->next, ++search_count)
+ {
+ /* As we always coalesce free blocks, prev block must have been used. */
+ _HAssert(_Heap_Is_prev_used(the_block));
- excess = size % the_heap->page_size;
- the_size = size + the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD;
+ /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
+ result of the comparison. */
+ if(the_block->size >= the_size) {
+ the_block = _Heap_Block_allocate(the_heap, the_block, the_size );
- if ( excess )
- the_size += the_heap->page_size - excess;
+ ptr = _Heap_User_area(the_block);
- if ( the_size < sizeof( Heap_Block ) )
- the_size = sizeof( Heap_Block );
+ stats->allocs += 1;
+ stats->searches += search_count + 1;
- for ( the_block = the_heap->first;
- ;
- the_block = the_block->next ) {
- if ( the_block == _Heap_Tail( the_heap ) )
- return( NULL );
- if ( the_block->front_flag >= the_size )
+ _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size));
break;
+ }
}
- if ( (the_block->front_flag - the_size) >
- (the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD) ) {
- the_block->front_flag -= the_size;
- next_block = _Heap_Next_block( the_block );
- next_block->back_flag = the_block->front_flag;
-
- temporary_block = _Heap_Block_at( next_block, the_size );
- temporary_block->back_flag =
- next_block->front_flag = _Heap_Build_flag( the_size,
- HEAP_BLOCK_USED );
- ptr = _Heap_Start_of_user_area( next_block );
- } else {
- next_block = _Heap_Next_block( the_block );
- next_block->back_flag = _Heap_Build_flag( the_block->front_flag,
- HEAP_BLOCK_USED );
- the_block->front_flag = next_block->back_flag;
- the_block->next->previous = the_block->previous;
- the_block->previous->next = the_block->next;
- ptr = _Heap_Start_of_user_area( the_block );
- }
-
- /*
- * round ptr up to a multiple of page size
- * Have to save the bump amount in the buffer so that free can figure it out
- */
-
- offset = the_heap->page_size - (((uint32_t ) ptr) & (the_heap->page_size - 1));
- ptr = _Addresses_Add_offset( ptr, offset );
- *(((uint32_t *) ptr) - 1) = offset;
-
-#ifdef RTEMS_DEBUG
- {
- uint32_t ptr_u32;
- ptr_u32 = (uint32_t ) ptr;
- if (ptr_u32 & (the_heap->page_size - 1))
- abort();
- }
-#endif
+ if(stats->max_search < search_count)
+ stats->max_search = search_count;
return ptr;
}
diff --git a/cpukit/score/src/heapextend.c b/cpukit/score/src/heapextend.c
index 3cb318d70b..8166d99e6f 100644
--- a/cpukit/score/src/heapextend.c
+++ b/cpukit/score/src/heapextend.c
@@ -39,8 +39,8 @@ Heap_Extend_status _Heap_Extend(
uint32_t *amount_extended
)
{
- Heap_Block *the_block;
- uint32_t *p;
+ uint32_t the_size;
+ Heap_Statistics *const stats = &the_heap->stats;
/*
* The overhead was taken from the original heap memory.
@@ -62,22 +62,13 @@ Heap_Extend_status _Heap_Extend(
* As noted, this code only supports (4).
*/
- if ( starting_address >= (void *) the_heap->start && /* case 3 */
- starting_address <= (void *) the_heap->final
+ if ( starting_address >= the_heap->begin && /* case 3 */
+ starting_address < the_heap->end
)
return HEAP_EXTEND_ERROR;
- if ( starting_address < (void *) the_heap->start ) { /* cases 1 and 2 */
-
- return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1 and 2 */
-
- } else { /* cases 4 and 5 */
-
- the_block = (Heap_Block *)
- _Addresses_Subtract_offset( starting_address, HEAP_OVERHEAD );
- if ( the_block != the_heap->final )
- return HEAP_EXTEND_NOT_IMPLEMENTED; /* case 5 */
- }
+ if ( starting_address != the_heap->end )
+ return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1, 2, and 5 */
/*
* Currently only case 4 should make it to this point.
@@ -85,27 +76,26 @@ Heap_Extend_status _Heap_Extend(
* block and free it.
*/
+ old_final = the_heap->final;
+ the_heap->end = _Addresses_Add_offset( the_heap->end, size );
+ the_size = _Addresses_Subtract( the_heap->end, old_final ) - HEAP_OVERHEAD;
+ _Heap_Align_down( &the_size, the_heap->page_size );
+
*amount_extended = size;
- old_final = the_heap->final;
- new_final = _Addresses_Add_offset( old_final, size );
- /* SAME AS: _Addresses_Add_offset( starting_address, size-HEAP_OVERHEAD ); */
+ if( the_size < the_heap->min_block_size )
+ return HEAP_EXTEND_SUCCESSFUL;
+ old_final->size = the_size | (old_final->size & HEAP_PREV_USED);
+ new_final = _Heap_Block_at( old_final, the_size );
+ new_final->size = HEAP_PREV_USED;
the_heap->final = new_final;
- old_final->front_flag =
- new_final->back_flag = _Heap_Build_flag( size, HEAP_BLOCK_USED );
- new_final->front_flag = HEAP_DUMMY_FLAG;
-
- /*
- * Must pass in address of "user" area
- * So add in the offset field.
- */
+ stats->size += size;
+ stats->used_blocks += 1;
+ stats->frees -= 1; /* Don't count subsequent call as actual free() */
- p = (uint32_t *) &old_final->next;
- *p = sizeof(uint32_t );
- p++;
- _Heap_Free( the_heap, p );
+ _Heap_Free( the_heap, _Heap_User_area( old_final ) );
return HEAP_EXTEND_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 40e56a7b88..0bd413d1dc 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -39,65 +39,90 @@ boolean _Heap_Free(
{
Heap_Block *the_block;
Heap_Block *next_block;
- Heap_Block *new_next_block;
- Heap_Block *previous_block;
- Heap_Block *temporary_block;
- uint32_t the_size;
+ uint32_t the_size;
+ uint32_t next_size;
+ Heap_Statistics *const stats = &the_heap->stats;
+ boolean next_is_free;
- the_block = _Heap_User_block_at( starting_address );
+ _Heap_Start_of_block( the_heap, starting_address, &the_block );
- if ( !_Heap_Is_block_in( the_heap, the_block ) ||
- _Heap_Is_block_free( the_block ) ) {
- return( FALSE );
+ if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
+ _HAssert(starting_address == NULL);
+ return( FALSE );
}
- the_size = _Heap_Block_size( the_block );
+ the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
- if ( !_Heap_Is_block_in( the_heap, next_block ) ||
- (the_block->front_flag != next_block->back_flag) ) {
- return( FALSE );
+ if ( !_Heap_Is_prev_used( next_block ) ) {
+ _HAssert(FALSE);
+ return( FALSE );
+ }
+
+ if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
+ _HAssert(FALSE);
+ return( FALSE );
}
- if ( _Heap_Is_previous_block_free( the_block ) ) {
- previous_block = _Heap_Previous_block( the_block );
+ next_size = _Heap_Block_size( next_block );
+ next_is_free = next_block < the_heap->final &&
+ !_Heap_Is_prev_used(_Heap_Block_at(next_block, next_size));
+
+ if ( !_Heap_Is_prev_used( the_block ) ) {
+ uint32_t const prev_size = the_block->prev_size;
+ Heap_Block *const prev_block = _Heap_Block_at( the_block, -prev_size );
- if ( !_Heap_Is_block_in( the_heap, previous_block ) ) {
- return( FALSE );
+ if ( !_Heap_Is_block_in( the_heap, prev_block ) ) {
+ _HAssert(FALSE);
+ return( FALSE );
}
- if ( _Heap_Is_block_free( next_block ) ) { /* coalesce both */
- previous_block->front_flag += next_block->front_flag + the_size;
- temporary_block = _Heap_Next_block( previous_block );
- temporary_block->back_flag = previous_block->front_flag;
- next_block->next->previous = next_block->previous;
- next_block->previous->next = next_block->next;
+ /* As we always coalesce free blocks, the block that preceedes prev_block
+ must have been used. */
+ if ( !_Heap_Is_prev_used ( prev_block) ) {
+ _HAssert(FALSE);
+ return( FALSE );
+ }
+
+ if ( next_is_free ) { /* coalesce both */
+ uint32_t const size = the_size + prev_size + next_size;
+ _Heap_Block_remove( next_block );
+ stats->free_blocks -= 1;
+ prev_block->size = size | HEAP_PREV_USED;
+ next_block = _Heap_Block_at( prev_block, size );
+ _HAssert(!_Heap_Is_prev_used( next_block));
+ next_block->prev_size = size;
}
- else { /* coalesce prev */
- previous_block->front_flag =
- next_block->back_flag = previous_block->front_flag + the_size;
+ else { /* coalesce prev */
+ uint32_t const size = the_size + prev_size;
+ prev_block->size = size | HEAP_PREV_USED;
+ next_block->size &= ~HEAP_PREV_USED;
+ next_block->prev_size = size;
}
}
- else if ( _Heap_Is_block_free( next_block ) ) { /* coalesce next */
- the_block->front_flag = the_size + next_block->front_flag;
- new_next_block = _Heap_Next_block( the_block );
- new_next_block->back_flag = the_block->front_flag;
- the_block->next = next_block->next;
- the_block->previous = next_block->previous;
- next_block->previous->next = the_block;
- next_block->next->previous = the_block;
-
- if (the_heap->first == next_block)
- the_heap->first = the_block;
+ else if ( next_is_free ) { /* coalesce next */
+ uint32_t const size = the_size + next_size;
+ _Heap_Block_replace( next_block, the_block );
+ the_block->size = size | HEAP_PREV_USED;
+ next_block = _Heap_Block_at( the_block, size );
+ next_block->prev_size = size;
}
- else { /* no coalesce */
- next_block->back_flag =
- the_block->front_flag = the_size;
- the_block->previous = _Heap_Head( the_heap );
- the_block->next = the_heap->first;
- the_heap->first = the_block;
- the_block->next->previous = the_block;
+ else { /* no coalesce */
+ /* Add 'the_block' to the head of the free blocks list as it tends to
+ produce less fragmentation than adding to the tail. */
+ _Heap_Block_insert_after( _Heap_Head( the_heap), the_block );
+ the_block->size = the_size | HEAP_PREV_USED;
+ next_block->size &= ~HEAP_PREV_USED;
+ next_block->prev_size = the_size;
+
+ stats->free_blocks += 1;
+ if ( stats->max_free_blocks < stats->free_blocks )
+ stats->max_free_blocks = stats->free_blocks;
}
+ stats->used_blocks -= 1;
+ stats->free_size += the_size;
+ stats->frees += 1;
+
return( TRUE );
}
diff --git a/cpukit/score/src/heapgetfreeinfo.c b/cpukit/score/src/heapgetfreeinfo.c
index 910eda838f..1b8d8598a0 100644
--- a/cpukit/score/src/heapgetfreeinfo.c
+++ b/cpukit/score/src/heapgetfreeinfo.c
@@ -37,21 +37,24 @@ void _Heap_Get_free_information(
)
{
Heap_Block *the_block;
+ Heap_Block *const tail = _Heap_Tail(the_heap);
info->number = 0;
info->largest = 0;
info->total = 0;
- for ( the_block = the_heap->first;
- ;
- the_block = the_block->next ) {
- if ( the_block == _Heap_Tail( the_heap ) )
- return;
-
- info->number++;
- info->total += the_block->front_flag;
+ for(the_block = _Heap_First(the_heap);
+ the_block != tail;
+ the_block = the_block->next)
+ {
+ uint32_t const the_size = _Heap_Block_size(the_block);
+
+ /* As we always coalesce free blocks, prev block must have been used. */
+ _HAssert(_Heap_Is_prev_used(the_block));
- if ( the_block->front_flag >= info->largest )
- info->largest = the_block->front_flag;
+ info->number++;
+ info->total += the_size;
+ if ( info->largest < the_size )
+ info->largest = the_size;
}
}
diff --git a/cpukit/score/src/heapgetinfo.c b/cpukit/score/src/heapgetinfo.c
index 9e71ea7e43..6ba368c7a0 100644
--- a/cpukit/score/src/heapgetinfo.c
+++ b/cpukit/score/src/heapgetinfo.c
@@ -8,6 +8,7 @@
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*
+ * $Id$
*/
@@ -37,11 +38,11 @@ Heap_Get_information_status _Heap_Get_information(
Heap_Information_block *the_info
)
{
- Heap_Block *the_block = 0; /* avoid warnings */
- Heap_Block *next_block = 0; /* avoid warnings */
- int notdone = 1;
- uint32_t size;
+ Heap_Block *the_block = the_heap->start;
+ Heap_Block *const end = the_heap->final;
+ _HAssert(the_block->prev_size == HEAP_PREV_USED);
+ _HAssert(_Heap_Is_prev_used(the_block));
the_info->Free.number = 0;
the_info->Free.total = 0;
@@ -50,60 +51,31 @@ Heap_Get_information_status _Heap_Get_information(
the_info->Used.total = 0;
the_info->Used.largest = 0;
- /*
- * We don't want to allow walking the heap until we have
- * transferred control to the user task so we watch the
- * system state.
- */
-
- if ( !_System_state_Is_up( _System_state_Get() ) )
- return HEAP_GET_INFORMATION_SYSTEM_STATE_ERROR;
-
- the_block = the_heap->start;
-
- /*
- * Handle the 1st block
- */
-
- if ( the_block->back_flag != HEAP_DUMMY_FLAG ) {
- return HEAP_GET_INFORMATION_BLOCK_ERROR;
+ while ( the_block != end ) {
+ uint32_t const the_size = _Heap_Block_size(the_block);
+ Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
+
+ if ( _Heap_Is_prev_used(next_block) ) {
+ the_info->Used.number++;
+ the_info->Used.total += the_size;
+ if ( the_info->Used.largest < the_size )
+ the_info->Used.largest = the_size;
+ } else {
+ the_info->Free.number++;
+ the_info->Free.total += the_size;
+ if ( the_info->Free.largest < the_size )
+ the_info->Free.largest = the_size;
+ if ( the_size != next_block->prev_size )
+ return HEAP_GET_INFORMATION_BLOCK_ERROR;
+ }
+
+ the_block = next_block;
}
- while (notdone) {
-
- /*
- * Accumulate size
- */
-
- size = _Heap_Block_size(the_block);
- if ( _Heap_Is_block_free(the_block) ) {
- the_info->Free.number++;
- the_info->Free.total += size;
- if ( size > the_info->Free.largest )
- the_info->Free.largest = size;
- } else {
- the_info->Used.number++;
- the_info->Used.total += size;
- if ( size > the_info->Used.largest )
- the_info->Used.largest = size;
- }
-
- /*
- * Handle the last block
- */
-
- if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
- next_block = _Heap_Next_block(the_block);
- if ( the_block->front_flag != next_block->back_flag ) {
- return HEAP_GET_INFORMATION_BLOCK_ERROR;
- }
- }
-
- if ( the_block->front_flag == HEAP_DUMMY_FLAG )
- notdone = 0;
- else
- the_block = next_block;
- } /* while(notdone) */
+ /* Handle the last dummy block. Don't consider this block to be
+ "used" as client never allocated it. Make 'Used.total' contain this
+ blocks' overhead though. */
+ the_info->Used.total += HEAP_OVERHEAD;
return HEAP_GET_INFORMATION_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index 3321816c4a..002e815bd5 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -20,12 +20,14 @@
*
* _Heap_Size_of_user_area
*
- * This kernel routine returns the size of the memory area
- * given heap block.
+ * This kernel routine sets '*size' to the size of the block of memory
+ * which begins at 'starting_address'.
+ * It returns TRUE if the 'starting_address' is in the heap, and FALSE
+ * otherwise.
*
* Input parameters:
* the_heap - pointer to heap header
- * starting_address - starting address of the memory block to free.
+ * starting_address - starting address of the memory block
* size - pointer to size of area
*
* Output parameters:
@@ -37,7 +39,7 @@
boolean _Heap_Size_of_user_area(
Heap_Control *the_heap,
void *starting_address,
- uint32_t *size
+ size_t *size
)
{
Heap_Block *the_block;
@@ -48,22 +50,32 @@ boolean _Heap_Size_of_user_area(
starting_address, (void *)the_heap->start, (void *)the_heap->final ) )
return( FALSE );
- the_block = _Heap_User_block_at( starting_address );
-
- if ( !_Heap_Is_block_in( the_heap, the_block ) )
- return( FALSE );
+ _Heap_Start_of_block( the_heap, starting_address, &the_block );
- if ( _Heap_Is_block_free( the_block ) )
+ if ( !_Heap_Is_block_in( the_heap, the_block ) )
return( FALSE );
the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
- if ( !_Heap_Is_block_in( the_heap, next_block ) ||
- (the_block->front_flag != next_block->back_flag) )
+ if (
+ !_Heap_Is_block_in( the_heap, next_block ) ||
+ !_Heap_Is_prev_used( next_block )
+ )
return( FALSE );
- *size = the_size;
+ /* 'starting_address' could be greater than 'the_block' address plus
+ HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
+ pointers. To get rid of this offset we calculate user size as difference
+ between the end of 'the_block' (='next_block') and 'starting_address'
+ and then add correction equal to the offset of the 'size' field of the
+ 'Heap_Block' structure. The correction is due to the fact that
+ 'prev_size' field of the next block is actually used as user accessible
+ area of 'the_block'. */
+
+ *size = _Addresses_Subtract ( next_block, starting_address )
+ + HEAP_BLOCK_HEADER_OFFSET;
+
return( TRUE );
}
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index 4f93ae7be0..a589b3cdf4 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -30,32 +30,32 @@
* Output parameters: NONE
*/
-#ifndef RTEMS_DEBUG
+#if !defined(RTEMS_HEAP_DEBUG)
-void _Heap_Walk(
+boolean _Heap_Walk(
Heap_Control *the_heap,
int source,
boolean do_dump
)
{
+ return TRUE;
}
-#else
+#else /* defined(RTEMS_HEAP_DEBUG) */
#include <stdio.h>
-#include <unistd.h>
-void _Heap_Walk(
+boolean _Heap_Walk(
Heap_Control *the_heap,
int source,
boolean do_dump
)
{
- Heap_Block *the_block = 0; /* avoid warnings */
- Heap_Block *next_block = 0; /* avoid warnings */
- int notdone = 1;
- int error = 0;
- int passes = 0;
+ Heap_Block *the_block = the_heap->start;
+ Heap_Block *const end = the_heap->final;
+ Heap_Block *const tail = _Heap_Tail(the_heap);
+ int error = 0;
+ int passes = 0;
/*
* We don't want to allow walking the heap until we have
@@ -64,86 +64,110 @@ void _Heap_Walk(
*/
if ( !_System_state_Is_up( _System_state_Get() ) )
- return;
+ return FALSE;
- the_block = the_heap->start;
+ if (source < 0)
+ source = the_heap->stats.instance;
- if (do_dump == TRUE) {
- printf("\nPASS: %d start @ 0x%p final 0x%p, first 0x%p last 0x%p\n",
- source, the_heap->start, the_heap->final,
- the_heap->first, the_heap->last
- );
- }
+ if (do_dump == TRUE)
+ printf("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
+ source, the_block, end,
+ _Heap_First(the_heap), _Heap_Last(the_heap),
+ the_heap->begin, the_heap->end);
/*
* Handle the 1st block
*/
- if (the_block->back_flag != HEAP_DUMMY_FLAG) {
- printf("PASS: %d Back flag of 1st block isn't HEAP_DUMMY_FLAG\n", source);
+ if (!_Heap_Is_prev_used(the_block)) {
+ printf("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source);
error = 1;
}
- while (notdone) {
- passes++;
- if (error && (passes > 10))
- abort();
-
- if (do_dump == TRUE) {
- printf("PASS: %d Block @ 0x%p Back %d, Front %d",
- source, the_block,
- the_block->back_flag, the_block->front_flag);
- if ( _Heap_Is_block_free(the_block) ) {
- printf( " Prev 0x%p, Next 0x%p\n",
- the_block->previous, the_block->next);
- } else {
- printf("\n");
- }
+ if (the_block->prev_size != HEAP_PREV_USED) {
+ printf("PASS: %d !prev_size of 1st block isn't HEAP_PREV_USED\n", source);
+ error = 1;
+ }
+
+ while ( the_block < end ) {
+ uint32_t const the_size = _Heap_Block_size(the_block);
+ Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
+ boolean prev_used = _Heap_Is_prev_used(the_block);
+
+ if (do_dump) {
+ printf("PASS: %d block %p size %d(%c)",
+ source, the_block, the_size, (prev_used ? 'U' : 'F'));
+ if (prev_used)
+ printf(" prev_size %d", the_block->prev_size);
+ else
+ printf(" (prev_size) %d", the_block->prev_size);
}
- /*
- * Handle the last block
- */
+ if (!_Heap_Is_block_in(the_heap, next_block)) {
+ if (do_dump) printf("\n");
+ printf("PASS: %d !block %p is out of heap\n", source, next_block);
+ error = 1;
+ break;
+ }
- if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
- next_block = _Heap_Next_block(the_block);
- if ( the_block->front_flag != next_block->back_flag ) {
+ if (!_Heap_Is_prev_used(next_block)) {
+ if (do_dump)
+ printf( " prev %p next %p", the_block->prev, the_block->next);
+ if (_Heap_Block_size(the_block) != next_block->prev_size) {
+ if (do_dump) printf("\n");
+ printf("PASS: %d !front and back sizes don't match", source);
error = 1;
- printf("PASS: %d Front and back flags don't match\n", source);
- printf(" Current Block (%p): Back - %d, Front - %d",
- the_block, the_block->back_flag, the_block->front_flag);
- if (do_dump == TRUE) {
- if (_Heap_Is_block_free(the_block)) {
- printf(" Prev 0x%p, Next 0x%p\n",
- the_block->previous, the_block->next);
- } else {
- printf("\n");
- }
- } else {
- printf("\n");
- }
- printf(" Next Block (%p): Back - %d, Front - %d",
- next_block, next_block->back_flag, next_block->front_flag);
- if (do_dump == TRUE) {
- if (_Heap_Is_block_free(next_block)) {
- printf(" Prev 0x%p, Next 0x%p\n",
- the_block->previous, the_block->next);
- } else {
- printf("\n");
- }
- } else {
- printf("\n");
+ }
+ if (!prev_used) {
+ if (do_dump || error) printf("\n");
+ printf("PASS: %d !two consecutive blocks are free", source);
+ error = 1;
+ }
+
+ { /* Check if 'the_block' is in the free block list */
+ Heap_Block* block = _Heap_First(the_heap);
+ while(block != the_block && block != tail)
+ block = block->next;
+ if(block != the_block) {
+ if (do_dump || error) printf("\n");
+ printf("PASS: %d !the_block not in the free list", source);
+ error = 1;
}
}
+
}
+ if (do_dump || error) printf("\n");
- if (the_block->front_flag == HEAP_DUMMY_FLAG)
- notdone = 0;
- else
- the_block = next_block;
+ if (the_size < the_heap->min_block_size) {
+ printf("PASS: %d !block size is too small\n", source);
+ error = 1;
+ break;
+ }
+ if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
+ printf("PASS: %d !block size is misaligned\n", source);
+ error = 1;
+ }
+
+ if (++passes > (do_dump ? 10 : 0) && error)
+ break;
+
+ the_block = next_block;
+ }
+
+ if (the_block != end) {
+ printf("PASS: %d !last block address isn't equal to 'final'\n", source);
+ error = 1;
+ }
+
+ if (_Heap_Block_size(the_block) != 0) {
+ printf("PASS: %d !last block's size isn't 0\n", source);
+ error = 1;
}
- if (error)
- abort();
+ if(do_dump && error)
+ abort();
+
+ return error == 0;
+
}
-#endif
+#endif /* defined(RTEMS_HEAP_DEBUG) */