summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/heapwalk.c
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
commitdea3eccb38b556b04552219e00b7abd656587278 (patch)
tree6affcb3026172273e366ee15ed3e8ec70f023a20 /cpukit/score/src/heapwalk.c
parentRegenerate. (diff)
downloadrtems-dea3eccb38b556b04552219e00b7abd656587278.tar.bz2
2009-09-06 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* libcsupport/src/free.c, libmisc/stackchk/check.c, rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c, rtems/src/regionextend.c, rtems/src/regiongetinfo.c, rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c, rtems/src/regionresizesegment.c, score/src/pheapallocate.c, score/src/pheapallocatealigned.c, score/src/pheapextend.c, score/src/pheapfree.c, score/src/pheapgetblocksize.c, score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c, score/src/pheapgetsize.c, score/src/pheapinit.c, score/src/pheapresizeblock.c, score/src/pheapwalk.c: Update for heap API changes. * score/include/rtems/score/apimutex.h, score/include/rtems/score/object.h: Documentation. * score/include/rtems/score/heap.h, score/include/rtems/score/protectedheap.h, score/inline/rtems/score/heap.inl, score/src/heap.c, score/src/heapallocate.c, score/src/heapallocatealigned.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c, score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c, score/src/heapwalk.c: Overall cleanup. Added boundary constraint to allocation function. More changes follow.
Diffstat (limited to 'cpukit/score/src/heapwalk.c')
-rw-r--r--cpukit/score/src/heapwalk.c581
1 files changed, 418 insertions, 163 deletions
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index a6628df0b3..dd255e1a17 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -1,8 +1,14 @@
-/*
- * Heap Handler
+/**
+ * @file
*
- * COPYRIGHT (c) 1989-2007.
- * On-Line Applications Research Corporation (OAR).
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler implementation.
+ */
+
+/*
+ * COPYRIGHT ( c ) 1989-2007.
+ * On-Line Applications Research Corporation ( OAR ).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -22,197 +28,446 @@
#include <rtems/score/interr.h>
#include <rtems/bspIo.h>
-#if defined(__GNUC__)
- #define DO_NOT_INLINE __attribute__((__noinline__))
-#else
- #define DO_NOT_INLINE
-#endif
-/*
- * Helper to avoid introducing even more branches and paths in this
- * code to do coverage analysis on.
- *
- * We do not want this inlined.
- */
-static void hw_nl(
- int error,
- bool do_dump
-) DO_NOT_INLINE;
+static void _Heap_Walk_printk( int source, bool dump, bool error, const char *fmt, ... )
+{
+ if ( dump ) {
+ va_list ap;
-/*PAGE
- *
- * _Heap_Walk
- *
- * This kernel routine walks the heap and verifies its correctness.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * source - a numeric indicator of the invoker of this routine
- * do_dump - when true print the information
- *
- * Output parameters: NONE
- */
+ if ( error ) {
+ printk( "FAIL[%d]: ", source );
+ } else {
+ printk( "PASS[%d]: ", source );
+ }
-bool _Heap_Walk(
- Heap_Control *the_heap,
- int source,
- bool do_dump
+ va_start( ap, fmt );
+ vprintk( fmt, ap );
+ va_end( ap );
+ }
+}
+
+static bool _Heap_Walk_check_free_list(
+ int source,
+ bool dump,
+ Heap_Control *heap
)
{
- Heap_Block *the_block = the_heap->start;
- Heap_Block *const end = the_heap->final;
- Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
- int error = 0;
- int passes = 0;
-
- /* FIXME: Why is this disabled? */
- do_dump = false;
-
- /* FIXME: Why is this disabled? */
- /*
- * We don't want to allow walking the heap until we have
- * transferred control to the user task so we watch the
- * system state.
- */
+ const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ const Heap_Block *free_block = first_free_block;
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const loop_limit =
+ ((uintptr_t) heap->last_block - (uintptr_t) heap->first_block)
+ / heap->min_block_size;
+ uintptr_t loop_counter = 0;
-/*
- if ( !_System_state_Is_up( _System_state_Get() ) )
- return true;
-*/
+ while ( free_block != free_list_tail && loop_counter < loop_limit ) {
+ if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: not in heap\n",
+ free_block
+ );
- /* FIXME: Reason for this? */
- if (source < 0)
- source = (int) the_heap->stats.instance;
+ return false;
+ }
+
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( free_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: alloc area not page aligned\n",
+ free_block
+ );
- if (do_dump)
- printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
- source, the_block, end,
- _Heap_First_free_block(the_heap), _Heap_Last_free_block(the_heap),
- the_heap->begin, the_heap->end);
+ return false;
+ }
- /*
- * Handle the 1st block
- */
+ ++loop_counter;
- if (!_Heap_Is_prev_used(the_block)) {
- printk("PASS: %d !HEAP_PREV_BLOCK_USED flag of 1st block isn't set\n", source);
- error = 1;
+ free_block = free_block->next;
}
- if (the_block->prev_size != the_heap->page_size) {
- printk("PASS: %d !prev_size of 1st block isn't page_size\n", source);
- error = 1;
+ if ( loop_counter >= loop_limit ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free list contains a loop\n"
+ );
+
+ return false;
}
- while ( the_block != end ) {
- uint32_t const the_size = _Heap_Block_size(the_block);
- Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
- bool prev_used = _Heap_Is_prev_used(the_block);
+ return true;
+}
- if (do_dump) {
- printk("PASS: %d block %p size %d(%c)",
- source, the_block, the_size, (prev_used ? 'U' : 'F'));
- if (prev_used)
- printk(" prev_size %d", the_block->prev_size);
- else
- printk(" (prev_size) %d", the_block->prev_size);
+static bool _Heap_Walk_is_in_free_list(
+ Heap_Control *heap,
+ Heap_Block *block
+)
+{
+ const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ const Heap_Block *free_block = _Heap_Free_list_first( heap );
+
+ while ( free_block != free_list_tail ) {
+ if ( free_block == block ) {
+ return true;
}
+ free_block = free_block->next;
+ }
+ return false;
+}
- if (!_Addresses_Is_aligned(next_block) ) {
- printk("PASS: %d next_block %p is not aligned\n", source, next_block);
- error = 1;
- break;
- }
-
- if (!_Heap_Is_prev_used(next_block)) {
- if (do_dump)
- printk( " prev %p next %p", the_block->prev, the_block->next);
- if (_Heap_Block_size(the_block) != next_block->prev_size) {
- if (do_dump) printk("\n");
- printk("PASS: %d !front and back sizes don't match", source);
- error = 1;
- }
- if (!prev_used) {
-
- hw_nl(do_dump, error);
- printk("PASS: %d !two consecutive blocks are free", source);
- error = 1;
- }
+static bool _Heap_Walk_check_control(
+ int source,
+ bool dump,
+ Heap_Control *heap
+)
+{
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ Heap_Block *const first_block = heap->first_block;
+ Heap_Block *const last_block = heap->last_block;
- { /* Check if 'the_block' is in the free block list */
- Heap_Block* block = _Heap_First_free_block(the_heap);
- if (!_Addresses_Is_aligned(block) ) {
- printk(
- "PASS: %d first free block %p is not aligned\n", source, block);
- error = 1;
- break;
- }
- while(block != the_block && block != tail) {
- if (!_Addresses_Is_aligned(block) ) {
- printk(
- "PASS: %d a free block %p is not aligned\n", source, block);
- error = 1;
- break;
- }
- if (!_Heap_Is_block_in_heap(the_heap, block)) {
- printk("PASS: %d a free block %p is not in heap\n", source, block);
- error = 1;
- break;
- }
- block = block->next;
- }
- if (block != the_block) {
- hw_nl(do_dump, error);
- printk("PASS: %d !the_block not in the free list", source);
- error = 1;
- }
- }
+ _Heap_Walk_printk(
+ source,
+ dump,
+ false,
+ "page size %u, min block size %u\n"
+ "\tarea begin 0x%08x, area end 0x%08x\n"
+ "\tfirst block 0x%08x, last block 0x%08x\n"
+ "\tfirst free 0x%08x, last free 0x%08x\n",
+ page_size, min_block_size,
+ heap->area_begin, heap->area_end,
+ first_block, last_block,
+ first_free_block, last_free_block
+ );
- }
- hw_nl(do_dump, error);
+ if ( page_size == 0 ) {
+ _Heap_Walk_printk( source, dump, true, "page size is zero\n" );
- if (the_size < the_heap->min_block_size) {
- printk("PASS: %d !block size is too small\n", source);
- error = 1;
- break;
- }
- if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
- printk("PASS: %d !block size is misaligned\n", source);
- error = 1;
- }
+ return false;
+ }
- if (++passes > (do_dump ? 10 : 0) && error)
- break;
+ if ( !_Addresses_Is_aligned( (void *) page_size ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "page size %u not CPU aligned\n",
+ page_size
+ );
- the_block = next_block;
+ return false;
}
- if (the_block != end) {
- printk("PASS: %d !last block address isn't equal to 'final' %p %p\n",
- source, the_block, end);
- error = 1;
+ if ( !_Heap_Is_aligned( min_block_size, page_size ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "min block size %u not page aligned\n",
+ min_block_size
+ );
+
+ return false;
}
- if (_Heap_Block_size(the_block) != the_heap->page_size) {
- printk("PASS: %d !last block's size isn't page_size (%d != %d)\n", source,
- _Heap_Block_size(the_block), the_heap->page_size);
- error = 1;
+ if (
+ first_free_block != free_list_head
+ && !_Addresses_Is_aligned( first_free_block )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first free block: 0x%08x not CPU aligned\n",
+ first_free_block
+ );
+
+ return false;
}
- if (do_dump && error)
- _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, 0xffff0000 );
+ if (
+ last_free_block != free_list_tail
+ && !_Addresses_Is_aligned( last_free_block )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "last free block: 0x%08x not CPU aligned\n",
+ last_free_block
+ );
+
+ return false;
+ }
- return error;
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: 0x%08x not page aligned\n",
+ first_block
+ );
+ return false;
+ }
+
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "last block: 0x%08x not page aligned\n",
+ last_block
+ );
+
+ return false;
+ }
+
+ if ( !_Heap_Is_prev_used( first_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: HEAP_PREV_BLOCK_USED is cleared\n"
+ );
+ }
+
+ if ( first_block->prev_size != page_size ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: prev size %u != page size %u\n",
+ first_block->prev_size,
+ page_size
+ );
+ }
+
+ return _Heap_Walk_check_free_list( source, dump, heap );
}
-/*
- * This method exists to simplify branch paths in the generated code above.
- */
-static void hw_nl(
- int error,
- bool do_dump
+bool _Heap_Walk(
+ Heap_Control *heap,
+ int source,
+ bool dump
)
{
- if (do_dump || error) printk("\n");
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ Heap_Block *const last_block = heap->last_block;
+ Heap_Block *block = heap->first_block;
+ bool error = false;
+
+ if ( !_System_state_Is_up( _System_state_Get() ) ) {
+ return true;
+ }
+
+ if ( !_Heap_Walk_check_control( source, dump, heap ) ) {
+ return false;
+ }
+
+ while ( block != last_block && _Addresses_Is_aligned( block ) ) {
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ bool const prev_used = _Heap_Is_prev_used( block );
+ Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+ uintptr_t const next_block_begin = (uintptr_t) next_block;
+
+ if ( prev_used ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u\n",
+ block,
+ block_size
+ );
+ } else {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u, prev_size %u\n",
+ block,
+ block_size,
+ block->prev_size
+ );
+ }
+
+ if (
+ !_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
+ ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: not page (%u) aligned\n",
+ block,
+ page_size
+ );
+ break;
+ }
+
+ if ( !_Heap_Is_aligned( block_size, page_size ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: block size %u not page (%u) aligned\n",
+ block,
+ block_size,
+ page_size
+ );
+ break;
+ }
+
+ if ( block_size < min_block_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u < min block size %u\n",
+ block,
+ block_size,
+ min_block_size
+ );
+ break;
+ }
+
+ if ( next_block_begin <= block_begin ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: next block 0x%08x is not a successor\n",
+ block,
+ next_block
+ );
+ break;
+ }
+
+ if ( !_Heap_Is_prev_used( next_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
+ block,
+ block->prev,
+ block->prev == first_free_block ?
+ " (= first)"
+ : (block->prev == free_list_head ? " (= head)" : ""),
+ block->next,
+ block->next == last_free_block ?
+ " (= last)"
+ : (block->next == free_list_tail ? " (= tail)" : "")
+ );
+
+ if ( block_size != next_block->prev_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
+ block,
+ block_size,
+ next_block->prev_size,
+ next_block
+ );
+ }
+
+ if ( !prev_used ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: two consecutive blocks are free\n",
+ block
+ );
+ }
+
+ if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: free block not in free list\n",
+ block
+ );
+ }
+ }
+
+ block = next_block;
+ }
+
+ if ( !_Addresses_Is_aligned( block ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: not CPU aligned\n",
+ block
+ );
+
+ return false;
+ }
+
+ if ( block == last_block ) {
+ uintptr_t const block_size = _Heap_Block_size( block );
+
+ if ( block_size != page_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "last block 0x%08x: size %u != page size %u\n",
+ block,
+ block_size,
+ page_size
+ );
+ }
+ } else {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "last block 0x%08x != last block 0x%08x\n",
+ block,
+ last_block
+ );
+ }
+
+ return !error;
}