summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
commitdea3eccb38b556b04552219e00b7abd656587278 (patch)
tree6affcb3026172273e366ee15ed3e8ec70f023a20 /cpukit/score/src
parentRegenerate. (diff)
downloadrtems-dea3eccb38b556b04552219e00b7abd656587278.tar.bz2
2009-09-06 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* libcsupport/src/free.c, libmisc/stackchk/check.c, rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c, rtems/src/regionextend.c, rtems/src/regiongetinfo.c, rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c, rtems/src/regionresizesegment.c, score/src/pheapallocate.c, score/src/pheapallocatealigned.c, score/src/pheapextend.c, score/src/pheapfree.c, score/src/pheapgetblocksize.c, score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c, score/src/pheapgetsize.c, score/src/pheapinit.c, score/src/pheapresizeblock.c, score/src/pheapwalk.c: Update for heap API changes. * score/include/rtems/score/apimutex.h, score/include/rtems/score/object.h: Documentation. * score/include/rtems/score/heap.h, score/include/rtems/score/protectedheap.h, score/inline/rtems/score/heap.inl, score/src/heap.c, score/src/heapallocate.c, score/src/heapallocatealigned.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c, score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c, score/src/heapwalk.c: Overall cleanup. Added boundary constraint to allocation function. More changes follow.
Diffstat (limited to 'cpukit/score/src')
-rw-r--r--cpukit/score/src/heap.c240
-rw-r--r--cpukit/score/src/heapallocate.c214
-rw-r--r--cpukit/score/src/heapallocatealigned.c12
-rw-r--r--cpukit/score/src/heapextend.c22
-rw-r--r--cpukit/score/src/heapfree.c32
-rw-r--r--cpukit/score/src/heapgetfreeinfo.c27
-rw-r--r--cpukit/score/src/heapgetinfo.c36
-rw-r--r--cpukit/score/src/heapresizeblock.c204
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c46
-rw-r--r--cpukit/score/src/heapwalk.c581
-rw-r--r--cpukit/score/src/pheapallocate.c10
-rw-r--r--cpukit/score/src/pheapallocatealigned.c12
-rw-r--r--cpukit/score/src/pheapextend.c12
-rw-r--r--cpukit/score/src/pheapfree.c8
-rw-r--r--cpukit/score/src/pheapgetblocksize.c12
-rw-r--r--cpukit/score/src/pheapgetfreeinfo.c8
-rw-r--r--cpukit/score/src/pheapgetinfo.c17
-rw-r--r--cpukit/score/src/pheapgetsize.c10
-rw-r--r--cpukit/score/src/pheapinit.c8
-rw-r--r--cpukit/score/src/pheapresizeblock.c14
-rw-r--r--cpukit/score/src/pheapwalk.c8
21 files changed, 1068 insertions, 465 deletions
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 208197d5a3..45faac7630 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -1,9 +1,17 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2009 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
@@ -18,6 +26,10 @@
#include <rtems/system.h>
#include <rtems/score/heap.h>
+#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 4 != 0
+ #error "invalid CPU_ALIGNMENT value"
+#endif
+
static uint32_t instance = 0;
/*PAGE
@@ -113,16 +125,19 @@ static uint32_t instance = 0;
uintptr_t _Heap_Initialize(
Heap_Control *heap,
- void *area_begin,
- uintptr_t area_size,
+ void *heap_area_begin_ptr,
+ uintptr_t heap_area_size,
uintptr_t page_size
)
{
- Heap_Statistics * const stats = &heap->stats;
- uintptr_t heap_area_begin = (uintptr_t) area_begin;
- uintptr_t heap_area_end = heap_area_begin + area_size;
- uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_ALLOC_AREA_OFFSET;
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
+ uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
+ uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_HEADER_SIZE;
uintptr_t alloc_area_size = 0;
+ uintptr_t first_block_begin = 0;
+ uintptr_t first_block_size = 0;
+ uintptr_t min_block_size = 0;
uintptr_t overhead = 0;
Heap_Block *first_block = NULL;
Heap_Block *second_block = NULL;
@@ -132,47 +147,50 @@ uintptr_t _Heap_Initialize(
} else {
page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
}
-
- heap->min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
+ min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
- overhead = HEAP_LAST_BLOCK_OVERHEAD
- + (alloc_area_begin - HEAP_BLOCK_ALLOC_AREA_OFFSET - heap_area_begin);
- alloc_area_size = _Heap_Align_down ( area_size - overhead, page_size );
+ first_block_begin = alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
+ overhead = HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
+ first_block_size = heap_area_size - overhead;
+ first_block_size = _Heap_Align_down ( first_block_size, page_size );
+ alloc_area_size = first_block_size - HEAP_BLOCK_HEADER_SIZE;
if (
heap_area_end < heap_area_begin
- || area_size < overhead
- || alloc_area_size == 0
+ || heap_area_size <= overhead
+ || first_block_size < min_block_size
) {
/* Invalid area or area too small */
return 0;
}
- heap->page_size = page_size;
- heap->begin = heap_area_begin;
- heap->end = heap_area_end;
-
/* First block */
- first_block = _Heap_Block_of_alloc_area( alloc_area_begin, page_size );
+ first_block = (Heap_Block *) first_block_begin;
first_block->prev_size = page_size;
- first_block->size_and_flag = alloc_area_size | HEAP_PREV_BLOCK_USED;
+ first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
first_block->next = _Heap_Free_list_tail( heap );
first_block->prev = _Heap_Free_list_head( heap );
- _Heap_Free_list_head( heap )->next = first_block;
- _Heap_Free_list_tail( heap )->prev = first_block;
- heap->start = first_block;
/* Second and last block */
- second_block = _Heap_Block_at( first_block, alloc_area_size );
- second_block->prev_size = alloc_area_size;
- second_block->size_and_flag = page_size | HEAP_PREV_BLOCK_FREE;
- heap->final = second_block;
+ second_block = _Heap_Block_at( first_block, first_block_size );
+ second_block->prev_size = first_block_size;
+ second_block->size_and_flag = page_size;
+
+ /* Heap control */
+ heap->page_size = page_size;
+ heap->min_block_size = min_block_size;
+ heap->area_begin = heap_area_begin;
+ heap->area_end = heap_area_end;
+ heap->first_block = first_block;
+ heap->last_block = second_block;
+ _Heap_Free_list_head( heap )->next = first_block;
+ _Heap_Free_list_tail( heap )->prev = first_block;
/* Statistics */
- stats->size = area_size;
- stats->free_size = alloc_area_size;
- stats->min_free_size = alloc_area_size;
+ stats->size = heap_area_size;
+ stats->free_size = first_block_size;
+ stats->min_free_size = first_block_size;
stats->free_blocks = 1;
stats->max_free_blocks = 1;
stats->used_blocks = 0;
@@ -183,9 +201,9 @@ uintptr_t _Heap_Initialize(
stats->resizes = 0;
stats->instance = instance++;
- _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ));
- _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ));
- _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ));
+ _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ) );
+ _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
+ _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
_HAssert(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
);
@@ -193,72 +211,142 @@ uintptr_t _Heap_Initialize(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
);
+ if ( !_Heap_Walk( heap, 0, false ) ) {
+ _Heap_Walk( heap, 0, true );
+ }
+
return alloc_area_size;
}
-uintptr_t _Heap_Calc_block_size(
- uintptr_t alloc_size,
- uintptr_t page_size,
- uintptr_t min_block_size)
+static Heap_Block *_Heap_Block_split(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_size
+)
{
- uintptr_t block_size =
- _Heap_Align_up( alloc_size + HEAP_BLOCK_USED_OVERHEAD, page_size );
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
- if (block_size < min_block_size) {
- block_size = min_block_size;
- }
+ uintptr_t const block_size = _Heap_Block_size( block );
+
+ uintptr_t const used_size =
+ _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
+ uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
- if (block_size > alloc_size) {
- return block_size;
+ uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
+ uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
+
+ Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+
+ _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
+ _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
+
+ if ( free_size >= free_size_limit ) {
+ uintptr_t const free_block_size = block_size - used_block_size;
+ Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
+
+ _HAssert( used_block_size + free_block_size == block_size );
+
+ block->size_and_flag = used_block_size
+ | (block->size_and_flag & HEAP_PREV_BLOCK_USED);
+ free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
+ next_block->prev_size = free_block_size;
+
+ return free_block;
} else {
- /* Integer overflow occured */
- return 0;
+ next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
+
+ return NULL;
}
}
-uintptr_t _Heap_Block_allocate(
+static Heap_Block *_Heap_Block_allocate_from_begin(
Heap_Control *heap,
Heap_Block *block,
uintptr_t alloc_size
)
{
- Heap_Statistics * const stats = &heap->stats;
- uintptr_t const block_size = _Heap_Block_size( block );
- uintptr_t const unused_size = block_size - alloc_size;
- Heap_Block *next_block = _Heap_Block_at( block, block_size );
-
- _HAssert( _Heap_Is_aligned( block_size, heap->page_size ));
- _HAssert( _Heap_Is_aligned( alloc_size, heap->page_size ));
- _HAssert( alloc_size <= block_size );
- _HAssert( _Heap_Is_prev_used( block ));
-
- if (unused_size >= heap->min_block_size) {
- /*
- * Split the block so that the upper part is still free, and the lower part
- * becomes used. This is slightly less optimal than leaving the lower part
- * free as it requires replacing block in the free blocks list, but it
- * makes it possible to reuse this code in the _Heap_Resize_block().
- */
- Heap_Block *new_block = _Heap_Block_at( block, alloc_size );
- block->size_and_flag = alloc_size | HEAP_PREV_BLOCK_USED;
- new_block->size_and_flag = unused_size | HEAP_PREV_BLOCK_USED;
- next_block->prev_size = unused_size;
- _Heap_Block_replace_in_free_list( block, new_block );
+ Heap_Block *const free_block = _Heap_Block_split( heap, block, alloc_size );
+
+ if ( free_block != NULL ) {
+ _Heap_Free_list_replace( block, free_block );
} else {
- next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
- alloc_size = block_size;
- _Heap_Block_remove_from_free_list( block );
+ Heap_Statistics *const stats = &heap->stats;
+
+ _Heap_Free_list_remove( block );
/* Statistics */
--stats->free_blocks;
}
+ return block;
+}
+
+static Heap_Block *_Heap_Block_allocate_from_end(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size
+)
+{
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t block_size = _Heap_Block_size( block );
+ uintptr_t block_end = block_begin + block_size;
+
+ Heap_Block *const new_block =
+ _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
+ uintptr_t const new_block_begin = (uintptr_t) new_block;
+ uintptr_t const new_block_size = block_end - new_block_begin;
+
+ Heap_Block *free_block = NULL;
+
+ block_end = new_block_begin;
+ block_size = block_end - block_begin;
+
+ _HAssert( block_size >= heap->min_block_size );
+ _HAssert( new_block_size >= heap->min_block_size );
+
+ block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
+ new_block->prev_size = block_size;
+ new_block->size_and_flag = new_block_size;
+
+ free_block = _Heap_Block_split( heap, new_block, alloc_size );
+ if ( free_block != NULL ) {
+ _Heap_Free_list_insert_after( block, free_block );
+ }
+
+ return new_block;
+}
+
+Heap_Block *_Heap_Block_allocate(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size
+)
+{
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
+ uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
+
+ _HAssert( _Heap_Is_prev_used( block ) );
+ _HAssert( alloc_area_begin <= alloc_begin );
+
+ if ( alloc_area_offset < heap->page_size ) {
+ alloc_size += alloc_area_offset;
+
+ block = _Heap_Block_allocate_from_begin( heap, block, alloc_size );
+ } else {
+ block = _Heap_Block_allocate_from_end( heap, block, alloc_begin, alloc_size );
+ }
+
/* Statistics */
++stats->used_blocks;
- stats->free_size -= alloc_size;
- if(stats->min_free_size > stats->free_size) {
+ stats->free_size -= _Heap_Block_size( block );
+ if ( stats->min_free_size > stats->free_size ) {
stats->min_free_size = stats->free_size;
}
- return alloc_size;
+ return block;
}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index 7c9e78f31e..7b0f51e232 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -1,9 +1,17 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2009 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
@@ -19,48 +27,204 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
+#ifdef RTEMS_HEAP_DEBUG
+ static void _Heap_Check_allocation(
+ const Heap_Control *heap,
+ const Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size,
+ uintptr_t alignment,
+ uintptr_t boundary
+ )
+ {
+ uintptr_t const min_block_size = heap->min_block_size;
+ uintptr_t const page_size = heap->page_size;
+
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ uintptr_t const block_end = block_begin + block_size;
+
+ uintptr_t const alloc_end = alloc_begin + alloc_size;
+
+ uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
+ uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
+ uintptr_t const alloc_area_size = alloc_area_offset + alloc_size;
+
+ _HAssert( block_size >= min_block_size );
+ _HAssert( block_begin < block_end );
+ _HAssert(
+ _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
+ );
+ _HAssert(
+ _Heap_Is_aligned( block_size, page_size )
+ );
+
+ _HAssert( alloc_end <= block_end + HEAP_BLOCK_SIZE_OFFSET );
+ _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
+ _HAssert( alloc_area_offset < page_size );
+
+ _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
+ if ( alignment == 0 ) {
+ _HAssert( alloc_begin == alloc_area_begin );
+ } else {
+ _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
+ }
+
+ if ( boundary != 0 ) {
+ uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
+
+ _HAssert( alloc_size <= boundary );
+ _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
+ }
+ }
+#else
+ #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
+#endif
+
+static uintptr_t _Heap_Check_block(
+ const Heap_Control *heap,
+ const Heap_Block *block,
+ uintptr_t alloc_size,
+ uintptr_t alignment,
+ uintptr_t boundary
+)
+{
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ uintptr_t const block_end = block_begin + block_size;
+
+ uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
+ uintptr_t const alloc_begin_ceiling = block_end - min_block_size
+ + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
+
+ uintptr_t alloc_end = block_end + HEAP_BLOCK_SIZE_OFFSET;
+ uintptr_t alloc_begin = alloc_end - alloc_size;
+
+ alloc_begin = _Heap_Align_down( alloc_begin, alignment );
+
+ /* Ensure that the we have a valid new block at the end */
+ if ( alloc_begin > alloc_begin_ceiling ) {
+ alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
+ }
+
+ alloc_end = alloc_begin + alloc_size;
+
+ /* Ensure boundary constaint */
+ if ( boundary != 0 ) {
+ uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
+ uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
+
+ while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
+ if ( boundary_line < boundary_floor ) {
+ return 0;
+ }
+ alloc_begin = boundary_line - alloc_size;
+ alloc_begin = _Heap_Align_down( alloc_begin, alignment );
+ alloc_end = alloc_begin + alloc_size;
+ boundary_line = _Heap_Align_down( alloc_end, boundary );
+ }
+ }
+
+ /* Ensure that the we have a valid new block at the beginning */
+ if ( alloc_begin >= alloc_begin_floor ) {
+ uintptr_t const alloc_block_begin =
+ (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
+ uintptr_t const free_size = alloc_block_begin - block_begin;
+
+ if ( free_size >= min_block_size || free_size == 0 ) {
+ return alloc_begin;
+ }
+ }
+
+ return 0;
+}
+
+void *_Heap_Allocate_aligned_with_boundary(
+ Heap_Control *heap,
+ uintptr_t alloc_size,
+ uintptr_t alignment,
+ uintptr_t boundary
+)
{
Heap_Statistics *const stats = &heap->stats;
- Heap_Block * const tail = _Heap_Free_list_tail( heap );
- Heap_Block *block = _Heap_First_free_block( heap );
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *block = _Heap_Free_list_first( heap );
+ uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
+ - HEAP_BLOCK_SIZE_OFFSET;
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t alloc_begin = 0;
uint32_t search_count = 0;
- void *alloc_area_begin_ptr = NULL;
- size = _Heap_Calc_block_size( size, heap->page_size, heap->min_block_size );
- if( size == 0 ) {
+ if ( block_size_floor < alloc_size ) {
+ /* Integer overflow occured */
return NULL;
}
- /*
- * Find large enough free block.
- *
- * Do not bother to mask out the HEAP_PREV_BLOCK_USED bit as it will not
- * change the result of the size comparison.
- */
- while (block != tail && block->size_and_flag < size) {
- _HAssert( _Heap_Is_prev_used( block ));
+ if ( boundary != 0 ) {
+ if ( boundary < alloc_size ) {
+ return NULL;
+ }
- block = block->next;
- ++search_count;
+ if ( alignment == 0 ) {
+ alignment = page_size;
+ }
}
- if (block != tail) {
- _Heap_Block_allocate( heap, block, size );
+ while ( block != free_list_tail ) {
+ _HAssert( _Heap_Is_prev_used( block ) );
+
+ /* Statistics */
+ ++search_count;
+
+ /*
+ * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
+ * field. Thus the value is about one unit larger than the real block
+ * size. The greater than operator takes this into account.
+ */
+ if ( block->size_and_flag > block_size_floor ) {
+ if ( alignment == 0 ) {
+ alloc_begin = _Heap_Alloc_area_of_block( block );
+ } else {
+ alloc_begin = _Heap_Check_block(
+ heap,
+ block,
+ alloc_size,
+ alignment,
+ boundary
+ );
+ }
+ }
+
+ if ( alloc_begin != 0 ) {
+ break;
+ }
+
+ block = block->next;
+ }
- alloc_area_begin_ptr = (void *) _Heap_Alloc_area_of_block( block );
+ if ( alloc_begin != 0 ) {
+ block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
- _HAssert( _Heap_Is_aligned( (uintptr_t) alloc_area_begin_ptr, heap->page_size ));
+ _Heap_Check_allocation(
+ heap,
+ block,
+ alloc_begin,
+ alloc_size,
+ alignment,
+ boundary
+ );
/* Statistics */
- ++stats->allocs;
stats->searches += search_count;
}
/* Statistics */
- if (stats->max_search < search_count) {
+ if ( stats->max_search < search_count ) {
stats->max_search = search_count;
}
- return alloc_area_begin_ptr;
+ return (void *) alloc_begin;
}
diff --git a/cpukit/score/src/heapallocatealigned.c b/cpukit/score/src/heapallocatealigned.c
index 935c3509aa..7b61d723fc 100644
--- a/cpukit/score/src/heapallocatealigned.c
+++ b/cpukit/score/src/heapallocatealigned.c
@@ -1,3 +1,4 @@
+#if 0
/*
* Heap Handler
*
@@ -31,10 +32,10 @@ check_result(
)
{
uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
- uintptr_t const block_end = the_block
+ uintptr_t const block_end = (uintptr_t) the_block
+ _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const user_end = aligned_user_addr + size;
- uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_LAST_BLOCK_OVERHEAD;
+ uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_BLOCK_HEADER_SIZE;
uintptr_t const heap_end = (uintptr_t) the_heap->final
+ HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const page_size = the_heap->page_size;
@@ -97,7 +98,7 @@ Heap_Block *block_allocate(
/* Don't split the block as remainder is either zero or too small to be
used as a separate free block. Change 'alloc_size' to the size of the
block and remove the block from the list of free blocks. */
- _Heap_Block_remove_from_free_list(the_block);
+ _Heap_Free_list_remove(the_block);
alloc_size = block_size;
stats->free_blocks -= 1;
}
@@ -157,7 +158,7 @@ void *_Heap_Allocate_aligned(
/* Find large enough free block that satisfies the alignment requirements. */
- for (the_block = _Heap_First_free_block(the_heap), search_count = 0;
+ for (the_block = _Heap_Free_list_first(the_heap), search_count = 0;
the_block != tail;
the_block = the_block->next, ++search_count)
{
@@ -220,7 +221,7 @@ void *_Heap_Allocate_aligned(
/* The block is indeed acceptable: calculate the size of the block
to be allocated and perform allocation. */
uintptr_t const alloc_size =
- block_end - user_addr + HEAP_BLOCK_ALLOC_AREA_OFFSET;
+ block_end - user_addr + HEAP_BLOCK_HEADER_SIZE;
_HAssert(_Heap_Is_aligned(aligned_user_addr, alignment));
@@ -244,3 +245,4 @@ void *_Heap_Allocate_aligned(
return user_ptr;
}
+#endif
diff --git a/cpukit/score/src/heapextend.c b/cpukit/score/src/heapextend.c
index bb3f301235..3541bddcc9 100644
--- a/cpukit/score/src/heapextend.c
+++ b/cpukit/score/src/heapextend.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -28,11 +34,11 @@ Heap_Extend_status _Heap_Extend(
{
Heap_Statistics *const stats = &heap->stats;
uintptr_t const area_begin = (uintptr_t) area_begin_ptr;
- uintptr_t const heap_area_begin = heap->begin;
- uintptr_t const heap_area_end = heap->end;
+ uintptr_t const heap_area_begin = heap->area_begin;
+ uintptr_t const heap_area_end = heap->area_end;
uintptr_t const new_heap_area_end = heap_area_end + area_size;
uintptr_t extend_size = 0;
- Heap_Block *const old_final = heap->final;
+ Heap_Block *const old_final = heap->last_block;
Heap_Block *new_final = NULL;
/*
@@ -60,10 +66,10 @@ Heap_Extend_status _Heap_Extend(
* block and free it.
*/
- heap->end = new_heap_area_end;
+ heap->area_end = new_heap_area_end;
extend_size = new_heap_area_end
- - (uintptr_t) old_final - HEAP_LAST_BLOCK_OVERHEAD;
+ - (uintptr_t) old_final - HEAP_BLOCK_HEADER_SIZE;
extend_size = _Heap_Align_down( extend_size, heap->page_size );
*amount_extended = extend_size;
@@ -74,7 +80,7 @@ Heap_Extend_status _Heap_Extend(
new_final = _Heap_Block_at( old_final, extend_size );
new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
- heap->final = new_final;
+ heap->last_block = new_final;
stats->size += area_size;
++stats->used_blocks;
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 9d5be9e290..48b54293ea 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,26 +25,18 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
+bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
{
Heap_Statistics *const stats = &heap->stats;
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
+ uintptr_t alloc_begin = (uintptr_t) alloc_begin_ptr;
Heap_Block *block =
- _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
+ _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
Heap_Block *next_block = NULL;
uintptr_t block_size = 0;
uintptr_t next_block_size = 0;
bool next_is_free = false;
- if (
- !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final)
- ) {
- _HAssert( alloc_area_begin_ptr != NULL );
- return false;
- }
-
if ( !_Heap_Is_block_in_heap( heap, block ) ) {
- _HAssert( false );
return false;
}
@@ -56,7 +54,7 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
}
next_block_size = _Heap_Block_size( next_block );
- next_is_free = next_block != heap->final
+ next_is_free = next_block != heap->last_block
&& !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
if ( !_Heap_Is_prev_used( block ) ) {
@@ -77,7 +75,7 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
if ( next_is_free ) { /* coalesce both */
uintptr_t const size = block_size + prev_size + next_block_size;
- _Heap_Block_remove_from_free_list( next_block );
+ _Heap_Free_list_remove( next_block );
stats->free_blocks -= 1;
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( prev_block, size );
@@ -91,14 +89,14 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
}
} else if ( next_is_free ) { /* coalesce next */
uintptr_t const size = block_size + next_block_size;
- _Heap_Block_replace_in_free_list( next_block, block );
+ _Heap_Free_list_replace( next_block, block );
block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( block, size );
next_block->prev_size = size;
} else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
- _Heap_Block_insert_after( _Heap_Free_list_head( heap), block );
+ _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
next_block->prev_size = block_size;
diff --git a/cpukit/score/src/heapgetfreeinfo.c b/cpukit/score/src/heapgetfreeinfo.c
index a288529cad..406ed81d39 100644
--- a/cpukit/score/src/heapgetfreeinfo.c
+++ b/cpukit/score/src/heapgetfreeinfo.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2004.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,21 +25,6 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*PAGE
- *
- * _Heap_Get_free_information
- *
- * This heap routine returns information about the free blocks
- * in the specified heap.
- *
- * Input parameters:
- * the_heap - pointer to heap header.
- * info - pointer to the free block information.
- *
- * Output parameters:
- * returns - free block information filled in.
- */
-
void _Heap_Get_free_information(
Heap_Control *the_heap,
Heap_Information *info
@@ -46,7 +37,7 @@ void _Heap_Get_free_information(
info->largest = 0;
info->total = 0;
- for(the_block = _Heap_First_free_block(the_heap);
+ for(the_block = _Heap_Free_list_first(the_heap);
the_block != tail;
the_block = the_block->next)
{
diff --git a/cpukit/score/src/heapgetinfo.c b/cpukit/score/src/heapgetinfo.c
index 7f907170b9..bc3d4cc893 100644
--- a/cpukit/score/src/heapgetinfo.c
+++ b/cpukit/score/src/heapgetinfo.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,27 +25,13 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*
- * _Heap_Get_information
- *
- * This kernel routine walks the heap and tots up the free and allocated
- * sizes. Derived from _Heap_Walk.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * the_info - pointer for information to be returned
- *
- * Output parameters:
- * *the_info - contains information about heap
- * return 0=success, otherwise heap is corrupt.
- */
-Heap_Get_information_status _Heap_Get_information(
+void _Heap_Get_information(
Heap_Control *the_heap,
Heap_Information_block *the_info
)
{
- Heap_Block *the_block = the_heap->start;
- Heap_Block *const end = the_heap->final;
+ Heap_Block *the_block = the_heap->first_block;
+ Heap_Block *const end = the_heap->last_block;
_HAssert(the_block->prev_size == the_heap->page_size);
_HAssert(_Heap_Is_prev_used(the_block));
@@ -52,7 +44,7 @@ Heap_Get_information_status _Heap_Get_information(
the_info->Used.largest = 0;
while ( the_block != end ) {
- uint32_t const the_size = _Heap_Block_size(the_block);
+ uintptr_t const the_size = _Heap_Block_size(the_block);
Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
Heap_Information *info;
@@ -74,7 +66,5 @@ Heap_Get_information_status _Heap_Get_information(
* "used" as client never allocated it. Make 'Used.total' contain this
* blocks' overhead though.
*/
- the_info->Used.total += HEAP_LAST_BLOCK_OVERHEAD;
-
- return HEAP_GET_INFORMATION_SUCCESSFUL;
+ the_info->Used.total += HEAP_BLOCK_HEADER_SIZE;
}
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
index 8916bbe12c..2f26589667 100644
--- a/cpukit/score/src/heapresizeblock.c
+++ b/cpukit/score/src/heapresizeblock.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,129 +27,163 @@
Heap_Resize_status _Heap_Resize_block(
Heap_Control *heap,
- void *alloc_area_begin_ptr,
- uintptr_t size,
- uintptr_t *old_mem_size,
- uintptr_t *avail_mem_size
+ void *alloc_begin_ptr,
+ uintptr_t new_alloc_size,
+ uintptr_t *old_size,
+ uintptr_t *new_size
)
{
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
- Heap_Block *block;
- Heap_Block *next_block;
- uintptr_t next_block_size;
- bool next_is_used;
- Heap_Block *next_next_block;
- uintptr_t old_block_size;
- uintptr_t old_user_size;
- uintptr_t prev_used_flag;
Heap_Statistics *const stats = &heap->stats;
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const page_size = heap->page_size;
+ uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
+ Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
+ Heap_Block *next_block = NULL;
+ Heap_Block *next_next_block = NULL;
+ uintptr_t block_size = 0;
+ uintptr_t block_end = 0;
+ uintptr_t next_block_size = 0;
+ bool next_block_is_used = false;;
+ uintptr_t alloc_size = 0;
+ uintptr_t prev_block_used_flag = 0;
+
+ *old_size = 0;
+ *new_size = 0;
+
+ if ( !_Heap_Is_block_in_heap( heap, block ) ) {
+ return HEAP_RESIZE_FATAL_ERROR;
+ }
- *old_mem_size = 0;
- *avail_mem_size = 0;
+ block_size = _Heap_Block_size( block );
+ block_end = (uintptr_t) block + block_size;
+ prev_block_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
+ next_block = _Heap_Block_at( block, block_size );
- block = _Heap_Block_of_alloc_area(alloc_area_begin, heap->page_size);
- _HAssert(_Heap_Is_block_in_heap(heap, block));
- if (!_Heap_Is_block_in_heap(heap, block))
- return HEAP_RESIZE_FATAL_ERROR;
+ _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
+ _HAssert( _Heap_Is_prev_used( next_block ) );
- prev_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
- old_block_size = _Heap_Block_size(block);
- next_block = _Heap_Block_at(block, old_block_size);
+ next_block_size = _Heap_Block_size( next_block );
+ next_next_block = _Heap_Block_at( next_block, next_block_size );
- _HAssert(_Heap_Is_block_in_heap(heap, next_block));
- _HAssert(_Heap_Is_prev_used(next_block));
- if ( !_Heap_Is_block_in_heap(heap, next_block) ||
- !_Heap_Is_prev_used(next_block))
- return HEAP_RESIZE_FATAL_ERROR;
+ _HAssert(
+ next_block == heap->last_block
+ || _Heap_Is_block_in_heap( heap, next_next_block )
+ );
- next_block_size = _Heap_Block_size(next_block);
- next_next_block = _Heap_Block_at(next_block, next_block_size);
- next_is_used = (next_block == heap->final) ||
- _Heap_Is_prev_used(next_next_block);
+ next_block_is_used = next_block == heap->last_block
+ || _Heap_Is_prev_used( next_next_block );
- /* See _Heap_Size_of_alloc_area() source for explanations */
- old_user_size = (uintptr_t) next_block - alloc_area_begin
- + HEAP_BLOCK_SIZE_OFFSET;
+ alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
- *old_mem_size = old_user_size;
+ *old_size = alloc_size;
- if (size > old_user_size) {
- /* Need to extend the block: allocate part of the next block and then
- merge 'block' and allocated block together. */
- if (next_is_used) /* Next block is in use, -- no way to extend */
+ if ( new_alloc_size > alloc_size ) {
+ /*
+ * Need to extend the block: allocate part of the next block and then
+ * merge the blocks.
+ */
+ if ( next_block_is_used ) {
return HEAP_RESIZE_UNSATISFIED;
- else {
+ } else {
uintptr_t add_block_size =
- _Heap_Align_up(size - old_user_size, page_size);
- if (add_block_size < min_block_size)
+ _Heap_Align_up( new_alloc_size - alloc_size, page_size );
+
+ if ( add_block_size < min_block_size ) {
add_block_size = min_block_size;
- if (add_block_size > next_block_size)
- return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
- add_block_size =
- _Heap_Block_allocate(heap, next_block, add_block_size);
- /* Merge two subsequent blocks */
- block->size_and_flag = (old_block_size + add_block_size) | prev_used_flag;
+ }
+
+ if ( add_block_size > next_block_size ) {
+ return HEAP_RESIZE_UNSATISFIED;
+ }
+
+ next_block = _Heap_Block_allocate(
+ heap,
+ next_block,
+ _Heap_Alloc_area_of_block( next_block ),
+ add_block_size - HEAP_BLOCK_HEADER_SIZE
+ );
+
+ /* Merge the blocks */
+ block->size_and_flag = ( block_size + _Heap_Block_size( next_block ) )
+ | prev_block_used_flag;
+
+ /* Statistics */
--stats->used_blocks;
}
} else {
-
/* Calculate how much memory we could free */
uintptr_t free_block_size =
- _Heap_Align_down(old_user_size - size, page_size);
+ _Heap_Align_down( alloc_size - new_alloc_size, page_size );
- if (free_block_size > 0) {
+ if ( free_block_size > 0 ) {
+ /*
+ * To free some memory the block should be shortened so that it can can
+ * hold 'new_alloc_size' user bytes and still remain not shorter than
+ * 'min_block_size'.
+ */
+ uintptr_t new_block_size = block_size - free_block_size;
- /* To free some memory the block should be shortened so that it can
- can hold 'size' user bytes and still remain not shorter than
- 'min_block_size'. */
+ if ( new_block_size < min_block_size ) {
+ uintptr_t const delta = min_block_size - new_block_size;
- uintptr_t new_block_size = old_block_size - free_block_size;
+ _HAssert( free_block_size >= delta );
- if (new_block_size < min_block_size) {
- uintptr_t delta = min_block_size - new_block_size;
- _HAssert(free_block_size >= delta);
free_block_size -= delta;
- if (free_block_size == 0) {
+
+ if ( free_block_size == 0 ) {
+ /* Statistics */
++stats->resizes;
+
return HEAP_RESIZE_SUCCESSFUL;
}
+
new_block_size += delta;
}
- _HAssert(new_block_size >= min_block_size);
- _HAssert(new_block_size + free_block_size == old_block_size);
- _HAssert(_Heap_Is_aligned(new_block_size, page_size));
- _HAssert(_Heap_Is_aligned(free_block_size, page_size));
+ _HAssert( new_block_size >= min_block_size );
+ _HAssert( new_block_size + free_block_size == block_size );
+ _HAssert( _Heap_Is_aligned( new_block_size, page_size ) );
+ _HAssert( _Heap_Is_aligned( free_block_size, page_size ) );
- if (!next_is_used) {
- /* Extend the next block to the low addresses by 'free_block_size' */
+ if ( !next_block_is_used ) {
+ /* Extend the next block */
Heap_Block *const new_next_block =
- _Heap_Block_at(block, new_block_size);
+ _Heap_Block_at( block, new_block_size );
uintptr_t const new_next_block_size =
next_block_size + free_block_size;
- _HAssert(_Heap_Is_block_in_heap(heap, next_next_block));
- block->size_and_flag = new_block_size | prev_used_flag;
- new_next_block->size_and_flag = new_next_block_size | HEAP_PREV_BLOCK_USED;
+
+ _HAssert( _Heap_Is_block_in_heap( heap, next_next_block ) );
+
+ block->size_and_flag = new_block_size | prev_block_used_flag;
+ new_next_block->size_and_flag =
+ new_next_block_size | HEAP_PREV_BLOCK_USED;
next_next_block->prev_size = new_next_block_size;
- _Heap_Block_replace_in_free_list(next_block, new_next_block);
- heap->stats.free_size += free_block_size;
- *avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
-
- } else if (free_block_size >= min_block_size) {
- /* Split the block into 2 used parts, then free the second one. */
- block->size_and_flag = new_block_size | prev_used_flag;
- next_block = _Heap_Block_at(block, new_block_size);
+
+ _Heap_Free_list_replace( next_block, new_next_block );
+
+ *new_size = new_next_block_size - HEAP_BLOCK_SIZE_OFFSET;
+
+ /* Statistics */
+ stats->free_size += free_block_size;
+ } else if ( free_block_size >= min_block_size ) {
+ /* Split the block into two used parts, then free the second one */
+ block->size_and_flag = new_block_size | prev_block_used_flag;
+ next_block = _Heap_Block_at( block, new_block_size );
next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
+
+ _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( next_block ) );
+
+ *new_size = free_block_size - HEAP_BLOCK_SIZE_OFFSET;
+
+ /* Statistics */
++stats->used_blocks; /* We have created used block */
- --stats->frees; /* Don't count next call in stats */
- _Heap_Free(heap, (void *) _Heap_Alloc_area_of_block(next_block));
- *avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
+ --stats->frees; /* Do not count next call in stats */
}
}
}
+ /* Statistics */
++stats->resizes;
+
return HEAP_RESIZE_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index be51255eee..7c297a77e6 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,24 +27,16 @@
bool _Heap_Size_of_alloc_area(
Heap_Control *heap,
- void *alloc_area_begin_ptr,
- uintptr_t *size
+ void *alloc_begin_ptr,
+ uintptr_t *alloc_size
)
{
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
- Heap_Block *block =
- _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
+ Heap_Block *block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
Heap_Block *next_block = NULL;
uintptr_t block_size = 0;
- if (
- !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final )
- ) {
- return false;
- }
-
-
- _HAssert(_Heap_Is_block_in_heap( heap, block ));
if ( !_Heap_Is_block_in_heap( heap, block ) ) {
return false;
}
@@ -46,26 +44,14 @@ bool _Heap_Size_of_alloc_area(
block_size = _Heap_Block_size( block );
next_block = _Heap_Block_at( block, block_size );
- _HAssert( _Heap_Is_block_in_heap( heap, next_block ));
- _HAssert( _Heap_Is_prev_used( next_block ));
if (
- !_Heap_Is_block_in_heap( heap, next_block ) ||
- !_Heap_Is_prev_used( next_block )
+ !_Heap_Is_block_in_heap( heap, next_block )
+ || !_Heap_Is_prev_used( next_block )
) {
return false;
}
- /*
- * 'alloc_area_begin' could be greater than 'block' address plus
- * HEAP_BLOCK_ALLOC_AREA_OFFSET as _Heap_Allocate_aligned() may produce such
- * user pointers. To get rid of this offset we calculate user size as
- * difference between the end of 'block' (='next_block') and
- * 'alloc_area_begin' and then add correction equal to the offset of the
- * 'size' field of the 'Heap_Block' structure. The correction is due to the
- * fact that 'prev_size' field of the next block is actually used as user
- * accessible area of 'block'.
- */
- *size = (uintptr_t) next_block - alloc_area_begin + HEAP_BLOCK_SIZE_OFFSET;
+ *alloc_size = (uintptr_t) next_block + HEAP_BLOCK_SIZE_OFFSET - alloc_begin;
return true;
}
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index a6628df0b3..dd255e1a17 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -1,8 +1,14 @@
-/*
- * Heap Handler
+/**
+ * @file
*
- * COPYRIGHT (c) 1989-2007.
- * On-Line Applications Research Corporation (OAR).
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler implementation.
+ */
+
+/*
+ * COPYRIGHT ( c ) 1989-2007.
+ * On-Line Applications Research Corporation ( OAR ).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -22,197 +28,446 @@
#include <rtems/score/interr.h>
#include <rtems/bspIo.h>
-#if defined(__GNUC__)
- #define DO_NOT_INLINE __attribute__((__noinline__))
-#else
- #define DO_NOT_INLINE
-#endif
-/*
- * Helper to avoid introducing even more branches and paths in this
- * code to do coverage analysis on.
- *
- * We do not want this inlined.
- */
-static void hw_nl(
- int error,
- bool do_dump
-) DO_NOT_INLINE;
+static void _Heap_Walk_printk( int source, bool dump, bool error, const char *fmt, ... )
+{
+ if ( dump ) {
+ va_list ap;
-/*PAGE
- *
- * _Heap_Walk
- *
- * This kernel routine walks the heap and verifies its correctness.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * source - a numeric indicator of the invoker of this routine
- * do_dump - when true print the information
- *
- * Output parameters: NONE
- */
+ if ( error ) {
+ printk( "FAIL[%d]: ", source );
+ } else {
+ printk( "PASS[%d]: ", source );
+ }
-bool _Heap_Walk(
- Heap_Control *the_heap,
- int source,
- bool do_dump
+ va_start( ap, fmt );
+ vprintk( fmt, ap );
+ va_end( ap );
+ }
+}
+
+static bool _Heap_Walk_check_free_list(
+ int source,
+ bool dump,
+ Heap_Control *heap
)
{
- Heap_Block *the_block = the_heap->start;
- Heap_Block *const end = the_heap->final;
- Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
- int error = 0;
- int passes = 0;
-
- /* FIXME: Why is this disabled? */
- do_dump = false;
-
- /* FIXME: Why is this disabled? */
- /*
- * We don't want to allow walking the heap until we have
- * transferred control to the user task so we watch the
- * system state.
- */
+ const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ const Heap_Block *free_block = first_free_block;
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const loop_limit =
+ ((uintptr_t) heap->last_block - (uintptr_t) heap->first_block)
+ / heap->min_block_size;
+ uintptr_t loop_counter = 0;
-/*
- if ( !_System_state_Is_up( _System_state_Get() ) )
- return true;
-*/
+ while ( free_block != free_list_tail && loop_counter < loop_limit ) {
+ if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: not in heap\n",
+ free_block
+ );
- /* FIXME: Reason for this? */
- if (source < 0)
- source = (int) the_heap->stats.instance;
+ return false;
+ }
+
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( free_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: alloc area not page aligned\n",
+ free_block
+ );
- if (do_dump)
- printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
- source, the_block, end,
- _Heap_First_free_block(the_heap), _Heap_Last_free_block(the_heap),
- the_heap->begin, the_heap->end);
+ return false;
+ }
- /*
- * Handle the 1st block
- */
+ ++loop_counter;
- if (!_Heap_Is_prev_used(the_block)) {
- printk("PASS: %d !HEAP_PREV_BLOCK_USED flag of 1st block isn't set\n", source);
- error = 1;
+ free_block = free_block->next;
}
- if (the_block->prev_size != the_heap->page_size) {
- printk("PASS: %d !prev_size of 1st block isn't page_size\n", source);
- error = 1;
+ if ( loop_counter >= loop_limit ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free list contains a loop\n"
+ );
+
+ return false;
}
- while ( the_block != end ) {
- uint32_t const the_size = _Heap_Block_size(the_block);
- Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
- bool prev_used = _Heap_Is_prev_used(the_block);
+ return true;
+}
- if (do_dump) {
- printk("PASS: %d block %p size %d(%c)",
- source, the_block, the_size, (prev_used ? 'U' : 'F'));
- if (prev_used)
- printk(" prev_size %d", the_block->prev_size);
- else
- printk(" (prev_size) %d", the_block->prev_size);
+static bool _Heap_Walk_is_in_free_list(
+ Heap_Control *heap,
+ Heap_Block *block
+)
+{
+ const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ const Heap_Block *free_block = _Heap_Free_list_first( heap );
+
+ while ( free_block != free_list_tail ) {
+ if ( free_block == block ) {
+ return true;
}
+ free_block = free_block->next;
+ }
+ return false;
+}
- if (!_Addresses_Is_aligned(next_block) ) {
- printk("PASS: %d next_block %p is not aligned\n", source, next_block);
- error = 1;
- break;
- }
-
- if (!_Heap_Is_prev_used(next_block)) {
- if (do_dump)
- printk( " prev %p next %p", the_block->prev, the_block->next);
- if (_Heap_Block_size(the_block) != next_block->prev_size) {
- if (do_dump) printk("\n");
- printk("PASS: %d !front and back sizes don't match", source);
- error = 1;
- }
- if (!prev_used) {
-
- hw_nl(do_dump, error);
- printk("PASS: %d !two consecutive blocks are free", source);
- error = 1;
- }
+static bool _Heap_Walk_check_control(
+ int source,
+ bool dump,
+ Heap_Control *heap
+)
+{
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ Heap_Block *const first_block = heap->first_block;
+ Heap_Block *const last_block = heap->last_block;
- { /* Check if 'the_block' is in the free block list */
- Heap_Block* block = _Heap_First_free_block(the_heap);
- if (!_Addresses_Is_aligned(block) ) {
- printk(
- "PASS: %d first free block %p is not aligned\n", source, block);
- error = 1;
- break;
- }
- while(block != the_block && block != tail) {
- if (!_Addresses_Is_aligned(block) ) {
- printk(
- "PASS: %d a free block %p is not aligned\n", source, block);
- error = 1;
- break;
- }
- if (!_Heap_Is_block_in_heap(the_heap, block)) {
- printk("PASS: %d a free block %p is not in heap\n", source, block);
- error = 1;
- break;
- }
- block = block->next;
- }
- if (block != the_block) {
- hw_nl(do_dump, error);
- printk("PASS: %d !the_block not in the free list", source);
- error = 1;
- }
- }
+ _Heap_Walk_printk(
+ source,
+ dump,
+ false,
+ "page size %u, min block size %u\n"
+ "\tarea begin 0x%08x, area end 0x%08x\n"
+ "\tfirst block 0x%08x, last block 0x%08x\n"
+ "\tfirst free 0x%08x, last free 0x%08x\n",
+ page_size, min_block_size,
+ heap->area_begin, heap->area_end,
+ first_block, last_block,
+ first_free_block, last_free_block
+ );
- }
- hw_nl(do_dump, error);
+ if ( page_size == 0 ) {
+ _Heap_Walk_printk( source, dump, true, "page size is zero\n" );
- if (the_size < the_heap->min_block_size) {
- printk("PASS: %d !block size is too small\n", source);
- error = 1;
- break;
- }
- if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
- printk("PASS: %d !block size is misaligned\n", source);
- error = 1;
- }
+ return false;
+ }
- if (++passes > (do_dump ? 10 : 0) && error)
- break;
+ if ( !_Addresses_Is_aligned( (void *) page_size ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "page size %u not CPU aligned\n",
+ page_size
+ );
- the_block = next_block;
+ return false;
}
- if (the_block != end) {
- printk("PASS: %d !last block address isn't equal to 'final' %p %p\n",
- source, the_block, end);
- error = 1;
+ if ( !_Heap_Is_aligned( min_block_size, page_size ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "min block size %u not page aligned\n",
+ min_block_size
+ );
+
+ return false;
}
- if (_Heap_Block_size(the_block) != the_heap->page_size) {
- printk("PASS: %d !last block's size isn't page_size (%d != %d)\n", source,
- _Heap_Block_size(the_block), the_heap->page_size);
- error = 1;
+ if (
+ first_free_block != free_list_head
+ && !_Addresses_Is_aligned( first_free_block )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first free block: 0x%08x not CPU aligned\n",
+ first_free_block
+ );
+
+ return false;
}
- if (do_dump && error)
- _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, 0xffff0000 );
+ if (
+ last_free_block != free_list_tail
+ && !_Addresses_Is_aligned( last_free_block )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "last free block: 0x%08x not CPU aligned\n",
+ last_free_block
+ );
+
+ return false;
+ }
- return error;
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: 0x%08x not page aligned\n",
+ first_block
+ );
+ return false;
+ }
+
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "last block: 0x%08x not page aligned\n",
+ last_block
+ );
+
+ return false;
+ }
+
+ if ( !_Heap_Is_prev_used( first_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: HEAP_PREV_BLOCK_USED is cleared\n"
+ );
+ }
+
+ if ( first_block->prev_size != page_size ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: prev size %u != page size %u\n",
+ first_block->prev_size,
+ page_size
+ );
+ }
+
+ return _Heap_Walk_check_free_list( source, dump, heap );
}
-/*
- * This method exists to simplify branch paths in the generated code above.
- */
-static void hw_nl(
- int error,
- bool do_dump
+bool _Heap_Walk(
+ Heap_Control *heap,
+ int source,
+ bool dump
)
{
- if (do_dump || error) printk("\n");
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ Heap_Block *const last_block = heap->last_block;
+ Heap_Block *block = heap->first_block;
+ bool error = false;
+
+ if ( !_System_state_Is_up( _System_state_Get() ) ) {
+ return true;
+ }
+
+ if ( !_Heap_Walk_check_control( source, dump, heap ) ) {
+ return false;
+ }
+
+ while ( block != last_block && _Addresses_Is_aligned( block ) ) {
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ bool const prev_used = _Heap_Is_prev_used( block );
+ Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+ uintptr_t const next_block_begin = (uintptr_t) next_block;
+
+ if ( prev_used ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u\n",
+ block,
+ block_size
+ );
+ } else {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u, prev_size %u\n",
+ block,
+ block_size,
+ block->prev_size
+ );
+ }
+
+ if (
+ !_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
+ ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: not page (%u) aligned\n",
+ block,
+ page_size
+ );
+ break;
+ }
+
+ if ( !_Heap_Is_aligned( block_size, page_size ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: block size %u not page (%u) aligned\n",
+ block,
+ block_size,
+ page_size
+ );
+ break;
+ }
+
+ if ( block_size < min_block_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u < min block size %u\n",
+ block,
+ block_size,
+ min_block_size
+ );
+ break;
+ }
+
+ if ( next_block_begin <= block_begin ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: next block 0x%08x is not a successor\n",
+ block,
+ next_block
+ );
+ break;
+ }
+
+ if ( !_Heap_Is_prev_used( next_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
+ block,
+ block->prev,
+ block->prev == first_free_block ?
+ " (= first)"
+ : (block->prev == free_list_head ? " (= head)" : ""),
+ block->next,
+ block->next == last_free_block ?
+ " (= last)"
+ : (block->next == free_list_tail ? " (= tail)" : "")
+ );
+
+ if ( block_size != next_block->prev_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
+ block,
+ block_size,
+ next_block->prev_size,
+ next_block
+ );
+ }
+
+ if ( !prev_used ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: two consecutive blocks are free\n",
+ block
+ );
+ }
+
+ if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: free block not in free list\n",
+ block
+ );
+ }
+ }
+
+ block = next_block;
+ }
+
+ if ( !_Addresses_Is_aligned( block ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: not CPU aligned\n",
+ block
+ );
+
+ return false;
+ }
+
+ if ( block == last_block ) {
+ uintptr_t const block_size = _Heap_Block_size( block );
+
+ if ( block_size != page_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "last block 0x%08x: size %u != page size %u\n",
+ block,
+ block_size,
+ page_size
+ );
+ }
+ } else {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "last block 0x%08x != last block 0x%08x\n",
+ block,
+ last_block
+ );
+ }
+
+ return !error;
}
diff --git a/cpukit/score/src/pheapallocate.c b/cpukit/score/src/pheapallocate.c
index f864a4a435..50d560f3a7 100644
--- a/cpukit/score/src/pheapallocate.c
+++ b/cpukit/score/src/pheapallocate.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -18,7 +26,7 @@
void *_Protected_heap_Allocate(
Heap_Control *the_heap,
- intptr_t size
+ uintptr_t size
)
{
void *p;
diff --git a/cpukit/score/src/pheapallocatealigned.c b/cpukit/score/src/pheapallocatealigned.c
index 97873f9b29..756d8a8aa2 100644
--- a/cpukit/score/src/pheapallocatealigned.c
+++ b/cpukit/score/src/pheapallocatealigned.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -18,8 +26,8 @@
void *_Protected_heap_Allocate_aligned(
Heap_Control *the_heap,
- intptr_t size,
- uint32_t alignment
+ uintptr_t size,
+ uintptr_t alignment
)
{
void *p;
diff --git a/cpukit/score/src/pheapextend.c b/cpukit/score/src/pheapextend.c
index d391018d0d..c2aa9735b9 100644
--- a/cpukit/score/src/pheapextend.c
+++ b/cpukit/score/src/pheapextend.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,11 +27,11 @@
bool _Protected_heap_Extend(
Heap_Control *the_heap,
void *starting_address,
- intptr_t size
+ uintptr_t size
)
{
Heap_Extend_status status;
- intptr_t amount_extended;
+ uintptr_t amount_extended;
_RTEMS_Lock_allocator();
status = _Heap_Extend(the_heap, starting_address, size, &amount_extended);
diff --git a/cpukit/score/src/pheapfree.c b/cpukit/score/src/pheapfree.c
index 1548547855..eb57ded669 100644
--- a/cpukit/score/src/pheapfree.c
+++ b/cpukit/score/src/pheapfree.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
diff --git a/cpukit/score/src/pheapgetblocksize.c b/cpukit/score/src/pheapgetblocksize.c
index 21727d77ac..0591abdd6e 100644
--- a/cpukit/score/src/pheapgetblocksize.c
+++ b/cpukit/score/src/pheapgetblocksize.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,10 +27,10 @@
bool _Protected_heap_Get_block_size(
Heap_Control *the_heap,
void *starting_address,
- intptr_t *size
+ uintptr_t *size
)
{
- bool status;
+ bool status;
_RTEMS_Lock_allocator();
status = _Heap_Size_of_alloc_area( the_heap, starting_address, size );
diff --git a/cpukit/score/src/pheapgetfreeinfo.c b/cpukit/score/src/pheapgetfreeinfo.c
index 4b211d93dd..e95bffe0ba 100644
--- a/cpukit/score/src/pheapgetfreeinfo.c
+++ b/cpukit/score/src/pheapgetfreeinfo.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
diff --git a/cpukit/score/src/pheapgetinfo.c b/cpukit/score/src/pheapgetinfo.c
index 1d62fc0673..2c4a287edd 100644
--- a/cpukit/score/src/pheapgetinfo.c
+++ b/cpukit/score/src/pheapgetinfo.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,8 +29,6 @@ bool _Protected_heap_Get_information(
Heap_Information_block *the_info
)
{
- Heap_Get_information_status status;
-
if ( !the_heap )
return false;
@@ -30,11 +36,8 @@ bool _Protected_heap_Get_information(
return false;
_RTEMS_Lock_allocator();
- status = _Heap_Get_information( the_heap, the_info );
+ _Heap_Get_information( the_heap, the_info );
_RTEMS_Unlock_allocator();
- if ( status == HEAP_GET_INFORMATION_SUCCESSFUL )
- return true;
-
- return false;
+ return true;
}
diff --git a/cpukit/score/src/pheapgetsize.c b/cpukit/score/src/pheapgetsize.c
index c283d34c13..fbf90ea3a4 100644
--- a/cpukit/score/src/pheapgetsize.c
+++ b/cpukit/score/src/pheapgetsize.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
@@ -16,7 +24,7 @@
#include <rtems/system.h>
#include <rtems/score/protectedheap.h>
-uint32_t _Protected_heap_Get_size(
+uintptr_t _Protected_heap_Get_size(
Heap_Control *the_heap
)
{
diff --git a/cpukit/score/src/pheapinit.c b/cpukit/score/src/pheapinit.c
index 1d99fcd5f2..f2a11a7be9 100644
--- a/cpukit/score/src/pheapinit.c
+++ b/cpukit/score/src/pheapinit.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
diff --git a/cpukit/score/src/pheapresizeblock.c b/cpukit/score/src/pheapresizeblock.c
index 45e7861509..c229f31731 100644
--- a/cpukit/score/src/pheapresizeblock.c
+++ b/cpukit/score/src/pheapresizeblock.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,12 +27,12 @@
bool _Protected_heap_Resize_block(
Heap_Control *the_heap,
void *starting_address,
- intptr_t size
+ uintptr_t size
)
{
Heap_Resize_status status;
- intptr_t old_mem_size;
- intptr_t avail_mem_size;
+ uintptr_t old_mem_size;
+ uintptr_t avail_mem_size;
_RTEMS_Lock_allocator();
status = _Heap_Resize_block(
diff --git a/cpukit/score/src/pheapwalk.c b/cpukit/score/src/pheapwalk.c
index 4ddd5d9f90..e86874192f 100644
--- a/cpukit/score/src/pheapwalk.c
+++ b/cpukit/score/src/pheapwalk.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*