summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cpukit/score/include/rtems/score/heap.h386
-rw-r--r--cpukit/score/inline/rtems/score/heap.inl236
-rw-r--r--cpukit/score/macros/rtems/score/heap.inl182
-rw-r--r--cpukit/score/src/heap.c245
-rw-r--r--cpukit/score/src/heapallocate.c93
-rw-r--r--cpukit/score/src/heapextend.c50
-rw-r--r--cpukit/score/src/heapfree.c111
-rw-r--r--cpukit/score/src/heapgetfreeinfo.c23
-rw-r--r--cpukit/score/src/heapgetinfo.c84
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c36
-rw-r--r--cpukit/score/src/heapwalk.c168
11 files changed, 1033 insertions, 581 deletions
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index c68bfd0fd1..7559cd89fb 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -1,4 +1,4 @@
-/**
+/**
* @file rtems/score/heap.h
*
* This include file contains the information pertaining to the Heap
@@ -6,11 +6,18 @@
* blocks which are allocated using the first fit method. Garbage
* collection is performed each time a block is returned to the heap by
* coalescing neighbor blocks. Control information for both allocated
- * and unallocated blocks is contained in the heap space. A heap header
- * contains control information for the heap.
- */
-
-/*
+ * and unallocated blocks is contained in the heap space. A heap control
+ * structure contains control information for the heap.
+ *
+ * FIXME: the alignment routines could be made faster should we require only
+ * powers of two to be supported both for 'page_size' and for
+ * 'alignment' arguments. However, both workspace and malloc heaps are
+ * initialized with CPU_HEAP_ALIGNMENT as 'page_size', and while all
+ * the BSPs seem to use CPU_ALIGNMENT (that is power of two) as
+ * CPU_HEAP_ALIGNMENT, for whatever reason CPU_HEAP_ALIGNMENT is only
+ * required to be multiple of CPU_ALIGNMENT and explicitly not
+ * required to be a power of two.
+ *
* COPYRIGHT (c) 1989-2004.
* On-Line Applications Research Corporation (OAR).
*
@@ -37,152 +44,200 @@ extern "C" {
#endif
/**
- * Status codes for heap_extend
+ * This type defines unsigned integer type to store 'void*'. Analog of C99
+ * 'uintptr_t'. This should work on 16/32/64 bit architectures.
+ *
+ * FIXME: Something like this should better be defined by
+ * 'rtems/score/types.h' and used here.
*/
-typedef enum {
- HEAP_EXTEND_SUCCESSFUL,
- HEAP_EXTEND_ERROR,
- HEAP_EXTEND_NOT_IMPLEMENTED
-} Heap_Extend_status;
+
+typedef unsigned long int _H_uptr_t;
/**
- * Status codes for _Heap_Get_information
+ * Forward reference
+ *
+ * @ref Heap_Block
*/
-typedef enum {
- HEAP_GET_INFORMATION_SUCCESSFUL = 0,
- HEAP_GET_INFORMATION_SYSTEM_STATE_ERROR,
- HEAP_GET_INFORMATION_BLOCK_ERROR
-} Heap_Get_information_status;
+typedef struct Heap_Block_struct Heap_Block;
/**
- * Information block returned by the Heap routines used to
- * obtain statistics. This information is returned about
- * either free or used blocks.
+ * The following defines the data structure used to manage individual blocks
+ * in a heap. When the block is allocated, the 'next' and 'prev' fields, as
+ * well as 'prev_size' field of the next block, are not used by the heap
+ * manager and thus the address returned for the block starts at the address
+ * of the 'next' field and the size of the user accessible area includes the
+ * size of the 'prev_size' field.
+ *
+ * @note The 'next' and 'prev' pointers are only valid when the block is free.
+ * Caution must be taken to ensure that every block is large enough to
+ * hold them and that they are not accessed while the block is actually
+ * allocated (i.e., not free).
+ *
+ * @note The 'prev_size' field is only valid when HEAP_PREV_USED bit is clear
+ * in the 'size' field indicating that previous block is not allocated.
+ * If the bit is set, the 'prev_size' field is part of user-accessible
+ * space of the previous allocated block and thus shouldn't be accessed
+ * by the heap manager code. This trick allows to further decrease
+ * overhead in the used blocks to the size of 'size' field (4 bytes).
+ *
*/
-typedef struct {
- /** Number of blocks of this type. */
- uint32_t number;
- /** Largest blocks of this type. */
- uint32_t largest;
- /** Total size of the blocks of this type. */
- uint32_t total;
+struct Heap_Block_struct {
+ /** size of prev block (if prev block is free) */
+ uint32_t prev_size;
+ /** size of block in bytes and status of prev block */
+ uint32_t size;
+ /** pointer to the next free block */
+ Heap_Block *next;
+ /** pointer to the previous free block */
+ Heap_Block *prev;
+};
-} Heap_Information;
+/**
+ * This flag used in the 'size' field of each heap block to indicate
+ * if previous block is free or in use. As sizes are always multiples of
+ * 4, the 2 least significant bits would always be 0, and we use one of them
+ * to store the flag.
+ */
+#define HEAP_PREV_USED 1 /* indicates previous block is in use */
/**
- * Information block returned by _Heap_Get_information
+ * The following constants reflect various requirements of the
+ * heap data structures which impact the management of a heap.
*/
-typedef struct {
- /** This field is information on the used blocks in the heap. */
- Heap_Information Free;
- /** This field is information on the used blocks in the heap. */
- Heap_Information Used;
-} Heap_Information_block;
+
+#define HEAP_MIN_BLOCK_SIZE (sizeof(Heap_Block))
/**
- * This constant is used in the size/used field of each heap block to
- * indicate when a block is in use.
+ * Offset of the block header from the block pointer. Equal to the
+ * offsetof(Heap_Block.size).
*/
-#define HEAP_BLOCK_USED 1
+#define HEAP_BLOCK_HEADER_OFFSET (sizeof(uint32_t))
/**
- * This constant is used in the size/used field of each heap block to
- * indicate when a block is free.
+ * Offset of user data pointer from the block pointer. Equal to the
+ * offsetof(Heap_Block.next).
*/
-#define HEAP_BLOCK_FREE 0
+#define HEAP_BLOCK_USER_OFFSET (sizeof(uint32_t) * 2)
/**
- * The size/used field value for the dummy front and back flags.
+ * Num bytes of overhead in used block. Equal to the sizeof(Heap_Block.size).
*/
-#define HEAP_DUMMY_FLAG (0 + HEAP_BLOCK_USED)
+#define HEAP_BLOCK_USED_OVERHEAD \
+ (HEAP_BLOCK_USER_OFFSET - HEAP_BLOCK_HEADER_OFFSET)
-/*
- * The following constants reflect various requirements of the
- * heap data structures which impact the management of a heap.
+/** Size of the permanent dummy last block. */
+#define HEAP_OVERHEAD HEAP_BLOCK_USER_OFFSET
+
+/**
+ * Run-time heap statistics.
+ *
+ * @note (double)searches/allocs gives mean number of searches per alloc while
+ * max_search gives maximum number of searches ever performed on a
+ * single call to alloc.
*
- * NOTE: Because free block overhead is greater than used block
- * overhead AND a portion of the allocated space is from
- * the extra free block overhead, the absolute lower bound
- * of the minimum fragment size is equal to the size of
- * the free block overhead.
+ * @note the statistics is always gathered. I believe the imposed overhead is
+ * rather small. Feel free to make it compile-time option if you think
+ * the overhead is too high for your application.
*/
-/** size dummy first and last blocks */
-#define HEAP_OVERHEAD (sizeof( uint32_t ) * 2)
-
-/** number of bytes of overhead in used block */
-#define HEAP_BLOCK_USED_OVERHEAD (sizeof( void * ) * 2)
+typedef struct Heap_Statistics_tag {
+ /** instance number of this heap */
+ uint32_t instance;
+ /** the size of the memory for heap */
+ uint32_t size;
+ /** current free size */
+ uint32_t free_size;
+ /** minimum free size ever */
+ uint32_t min_free_size;
+ /** current number of free blocks */
+ uint32_t free_blocks;
+ /** maximum number of free blocks ever */
+ uint32_t max_free_blocks;
+ /** current number of used blocks */
+ uint32_t used_blocks;
+ /** maximum number of blocks searched ever */
+ uint32_t max_search;
+ /** total number of successful calls to alloc */
+ uint32_t allocs;
+ /** total number of searches ever */
+ uint32_t searches;
+ /** total number of suceessful calls to free */
+ uint32_t frees;
+} Heap_Statistics;
-/** Minimum number of bytes the user may specify for the heap size */
-#define HEAP_MINIMUM_SIZE (HEAP_OVERHEAD + sizeof (Heap_Block))
+/**
+ * Control block used to manage each heap.
+ */
+typedef struct {
+ /** head and tail of circular list of free blocks */
+ Heap_Block free_list;
+ /** allocation unit and alignment */
+ uint32_t page_size;
+ /** minimum block size aligned on page_size */
+ uint32_t min_block_size;
+ /** first address of memory for the heap */
+ void *begin;
+ /** first address past end of memory for the heap */
+ void *end;
+ /** first valid block address in the heap */
+ Heap_Block *start;
+ /** last valid block address in the heap */
+ Heap_Block *final;
+ /** run-time statistics */
+ Heap_Statistics stats;
+} Heap_Control;
/**
- * Forward reference
- *
- * @ref Heap_Block
+ * Status codes for heap_extend
*/
-typedef struct Heap_Block_struct Heap_Block;
+
+typedef enum {
+ HEAP_EXTEND_SUCCESSFUL,
+ HEAP_EXTEND_ERROR,
+ HEAP_EXTEND_NOT_IMPLEMENTED
+} Heap_Extend_status;
/**
- * The following defines the data structure used to manage
- * individual blocks in a heap. When the block is allocated, the
- * next and previous fields are not used by the Heap Handler
- * and thus the address returned for the block starts at
- * the address of the next field.
- *
- * @note The next and previous pointers are only valid when the
- * block is free. Caution must be exercised to insure that
- * allocated blocks are large enough to contain them and
- * that they are not accidentally overwritten when the
- * block is actually allocated.
+ * Status codes for _Heap_Get_information
*/
-struct Heap_Block_struct {
- /** size and status of prev block */
- uint32_t back_flag;
- /** size and status of block */
- uint32_t front_flag;
- /** pointer to next block */
- Heap_Block *next;
- /** pointer to previous block */
- Heap_Block *previous;
-};
+
+typedef enum {
+ HEAP_GET_INFORMATION_SUCCESSFUL = 0,
+ HEAP_GET_INFORMATION_BLOCK_ERROR
+} Heap_Get_information_status;
/**
- * The following defines the control block used to manage each heap.
- *
- * @note This structure is layed out such that it can be used a a dummy
- * first and last block on the free block chain. The extra padding
- * insures the dummy last block is the correct size.
- *
- * The first Heap_Block starts at first while the second starts at
- * final. This is effectively the same trick as is used in the Chain
- * Handler.
+ * Information block returned by the Heap routines used to
+ * obtain heap information. This information is returned about
+ * either free or used blocks.
*/
typedef struct {
- /** first valid block address in heap */
- Heap_Block *start;
- /** last valid block address in heap */
- Heap_Block *final;
+ /** Number of blocks of this type. */
+ uint32_t number;
+ /** Largest blocks of this type. */
+ uint32_t largest;
+ /** Total size of the blocks of this type. */
+ uint32_t total;
+} Heap_Information;
- /** pointer to first block in heap */
- Heap_Block *first;
- /** always NULL pointer */
- Heap_Block *permanent_null;
- /** pointer to last block in heap */
- Heap_Block *last;
- /** allocation unit */
- uint32_t page_size;
- /** reserved field */
- uint32_t reserved;
-} Heap_Control;
+/**
+ * Information block returned by _Heap_Get_information
+ */
+typedef struct {
+ /** This field is information on the used blocks in the heap. */
+ Heap_Information Free;
+ /** This field is information on the used blocks in the heap. */
+ Heap_Information Used;
+} Heap_Information_block;
/**
* This routine initializes @a the_heap record to manage the
* contiguous heap of @a size bytes which starts at @a starting_address.
* Blocks of memory are allocated from the heap in multiples of
- * @a page_size byte units.
+ * @a page_size byte units. If @a page_size is 0 or is not multiple of
+ * CPU_ALIGNMENT, it's aligned up to the nearest CPU_ALIGNMENT boundary.
*
* @param the_heap (in) is the heap to operate upon
* @param starting_address (in) is the starting address of the memory for
@@ -206,7 +261,7 @@ uint32_t _Heap_Initialize(
* @param starting_address (in) is the starting address of the memory
* to add to the heap
* @param size (in) is the size in bytes of the memory area to add
- * @param amount_extended (in) points to a user area to return the
+ * @param amount_extended (in) points to a user area to return the
* @return a status indicating success or the reason for failure
* @return *size filled in with the amount of memory added to the heap
*/
@@ -218,9 +273,9 @@ Heap_Extend_status _Heap_Extend(
);
/**
- * This function attempts to allocate a block of size bytes from
+ * This function attempts to allocate a block of @a size bytes from
* @a the_heap. If insufficient memory is free in @a the_heap to allocate
- * a block of the requested size, then NULL is returned.
+ * a block of the requested size, then NULL is returned.
*
* @param the_heap (in) is the heap to operate upon
* @param size (in) is the amount of memory to allocate in bytes
@@ -228,16 +283,38 @@ Heap_Extend_status _Heap_Extend(
*/
void *_Heap_Allocate(
Heap_Control *the_heap,
- uint32_t size
+ uint32_t size
+);
+
+/**
+ * This function attempts to allocate a memory block of @a size bytes from
+ * @a the_heap so that the start of the user memory is aligned on the
+ * @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
+ * Any other value of @a alignment is taken "as is", i.e., even odd
+ * alignments are possible.
+ * Returns pointer to the start of the memory block if success, NULL if
+ * failure.
+ *
+ * @param the_heap (in) is the heap to operate upon
+ * @param size (in) is the amount of memory to allocate in bytes
+ * @param alignment (in) the required alignment
+ * @return NULL if unsuccessful and a pointer to the block if successful
+ */
+void *_Heap_Allocate_aligned(
+ Heap_Control *the_heap,
+ uint32_t size,
+ uint32_t alignment
);
/**
- * This kernel routine sets size to the size of the given heap block.
- * It returns TRUE if the @a starting_address is in @a the_heap, and FALSE
+ * This function sets @a *size to the size of the block of user memory
+ * which begins at @a starting_address. The size returned in @a *size could
+ * be greater than the size requested for allocation.
+ * Returns TRUE if the @a starting_address is in the heap, and FALSE
* otherwise.
*
* @param the_heap (in) is the heap to operate upon
- * @param starting_address (in) is the starting address of the user block
+ * @param starting_address (in) is the starting address of the user block
* to obtain the size of
* @param size (in) points to a user area to return the size in
* @return TRUE if successfully able to determine the size, FALSE otherwise
@@ -246,7 +323,7 @@ void *_Heap_Allocate(
boolean _Heap_Size_of_user_area(
Heap_Control *the_heap,
void *starting_address,
- uint32_t *size
+ size_t *size
);
/**
@@ -255,7 +332,7 @@ boolean _Heap_Size_of_user_area(
* possible with the freeing of this routine is performed.
*
* @param the_heap (in) is the heap to operate upon
- * @param starting_address (in) is the starting address of the user block
+ * @param starting_address (in) is the starting address of the user block
* to free
* @return TRUE if successfully freed, FALSE otherwise
*/
@@ -266,20 +343,22 @@ boolean _Heap_Free(
/**
* This routine walks the heap to verify its integrity.
- *
+ *
* @param the_heap (in) is the heap to operate upon
* @param source (in) XXX
* @param do_dump (in) is set to TRUE if errors should be printed
+ * @return TRUE if the test passed fine, FALSE otherwise.
*/
-void _Heap_Walk(
+
+boolean _Heap_Walk(
Heap_Control *the_heap,
int source,
boolean do_dump
);
/**
- * This kernel routine walks the heap and tots up the free and allocated
- * sizes. Derived from _Heap_Walk.
+ * This routine walks the heap and tots up the free and allocated
+ * sizes.
*
* @param the_heap (in) pointer to heap header
* @param the_info (in) pointer to a status information area
@@ -294,22 +373,75 @@ Heap_Get_information_status _Heap_Get_information(
/**
* This heap routine returns information about the free blocks
* in the specified heap.
- *
+ *
* @param the_heap (in) pointer to heap header.
* @param info (in) pointer to the free block information.
- *
+ *
* @return free block information filled in.
- */
-
+ */
+
void _Heap_Get_free_information(
Heap_Control *the_heap,
Heap_Information *info
);
-#ifndef __RTEMS_APPLICATION__
+#if !defined(__RTEMS_APPLICATION__)
+
+/*
+ * A pointer to unsigned integer conversion.
+ */
+#define _H_p2u(_p) ((_H_uptr_t)(_p))
+
#include <rtems/score/heap.inl>
+
+/*
+ * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
+ * Refer to 'heap.c' for details.
+ */
+
+extern uint32_t _Heap_Calc_block_size(
+ uint32_t size,
+ uint32_t page_size,
+ uint32_t min_size);
+
+extern Heap_Block* _Heap_Block_allocate(
+ Heap_Control* the_heap,
+ Heap_Block* the_block,
+ uint32_t alloc_size);
+
+/*
+ * Debug support
+ */
+
+#if defined(RTEMS_DEBUG)
+#define RTEMS_HEAP_DEBUG
#endif
+#if defined(RTEMS_HEAP_DEBUG)
+
+#include <assert.h>
+
+/*
+ * We do asserts only for heaps with instance number greater than 0 assuming
+ * that the heap used for malloc is initialized first and thus has instance
+ * number 0. Asserting malloc heap may lead to troubles as printf may invoke
+ * malloc thus probably leading to infinite recursion.
+ */
+
+#define _HAssert(cond_) \
+ do { \
+ if(the_heap->stats.instance && !(cond_)) \
+ __assert(__FILE__, __LINE__, #cond_); \
+ } while(0)
+
+#else /* !defined(RTEMS_HEAP_DEBUG) */
+
+#define _HAssert(cond_) ((void)0)
+
+#endif /* !defined(RTEMS_HEAP_DEBUG) */
+
+#endif /* !defined(__RTEMS_APPLICATION__) */
+
#ifdef __cplusplus
}
#endif
diff --git a/cpukit/score/inline/rtems/score/heap.inl b/cpukit/score/inline/rtems/score/heap.inl
index db38500a96..80df3590b7 100644
--- a/cpukit/score/inline/rtems/score/heap.inl
+++ b/cpukit/score/inline/rtems/score/heap.inl
@@ -34,7 +34,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Head (
Heap_Control *the_heap
)
{
- return (Heap_Block *)&the_heap->start;
+ return &the_heap->free_list;
}
/**
@@ -45,166 +45,254 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Tail (
Heap_Control *the_heap
)
{
- return (Heap_Block *)&the_heap->final;
+ return &the_heap->free_list;
}
/**
- * This function returns the address of the block which physically
- * precedes the_block in memory.
+ * Return the first free block of the specified heap.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Previous_block (
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First (
+ Heap_Control *the_heap
+)
+{
+ return _Heap_Head(the_heap)->next;
+}
+
+/*PAGE
+ *
+ * _Heap_Last
+ *
+ * DESCRIPTION:
+ *
+ * Return the last free block of the specified heap.
+ */
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last (
+ Heap_Control *the_heap
+)
+{
+ return _Heap_Tail(the_heap)->prev;
+}
+
+/*PAGE
+ *
+ * _Heap_Block_remove
+ *
+ * DESCRIPTION:
+ *
+ * This function removes 'the_block' from doubly-linked list.
+ */
+
+RTEMS_INLINE_ROUTINE void _Heap_Block_remove (
Heap_Block *the_block
)
{
- return (Heap_Block *) _Addresses_Subtract_offset(
- (void *)the_block,
- the_block->back_flag & ~ HEAP_BLOCK_USED
- );
+ Heap_Block *block = the_block;
+
+ Heap_Block *next = block->next;
+ Heap_Block *prev = block->prev;
+ prev->next = next;
+ next->prev = prev;
}
/**
- * This function returns the address of the block which physically
- * follows the_block in memory.
- *
- * @note Next_block assumes that the block is free.
+ * This function replaces @a old_block by @a new_block in doubly-linked list.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Next_block (
+RTEMS_INLINE_ROUTINE void _Heap_Block_replace (
+ Heap_Block *old_block,
+ Heap_Block *new_block
+)
+{
+ Heap_Block *block = old_block;
+ Heap_Block *next = block->next;
+ Heap_Block *prev = block->prev;
+
+ block = new_block;
+ block->next = next;
+ block->prev = prev;
+ next->prev = prev->next = block;
+}
+
+/**
+ * This function inserts @a the_block after @a prev_block
+ * in doubly-linked list.
+ */
+RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after (
+ Heap_Block *prev_block,
Heap_Block *the_block
)
{
- return (Heap_Block *) _Addresses_Add_offset(
- (void *)the_block,
- the_block->front_flag & ~ HEAP_BLOCK_USED
- );
+ Heap_Block *prev = prev_block;
+ Heap_Block *block = the_block;
+
+ Heap_Block *next = prev->next;
+ block->next = next;
+ block->prev = prev;
+ next->prev = prev->next = block;
}
/**
- * This function calculates and returns a block's location (address)
- * in the heap based upon a base address and an offset.
+ * Return TRUE if @a value is a multiple of @a alignment, FALSE otherwise
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
- void *base,
- uint32_t offset
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_aligned (
+ uint32_t value,
+ uint32_t alignment
)
{
- return (Heap_Block *) _Addresses_Add_offset( (void *)base, offset );
+ return (value % alignment) == 0;
}
/**
- * XXX
+ * Align @a *value up to the nearest multiple of @a alignment.
*/
-
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_User_block_at(
- void *base
+
+RTEMS_INLINE_ROUTINE void _Heap_Align_up (
+ uint32_t *value,
+ uint32_t alignment
)
{
- uint32_t offset;
-
- offset = *(((uint32_t *) base) - 1);
- return _Heap_Block_at( base, -offset + -HEAP_BLOCK_USED_OVERHEAD);
+ uint32_t v = *value;
+ uint32_t a = alignment;
+ uint32_t r = v % a;
+ *value = r ? v - r + a : v;
}
/**
- * This function returns TRUE if the previous block of the_block
- * is free, and FALSE otherwise.
+ * Align @a *value down to the nearest multiple of @a alignment.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_previous_block_free (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Align_down (
+ uint32_t *value,
+ uint32_t alignment
)
{
- return !(the_block->back_flag & HEAP_BLOCK_USED);
+ uint32_t v = *value;
+ *value = v - (v % alignment);
}
/**
- * This function returns TRUE if the block is free, and FALSE otherwise.
+ * Return TRUE if @a ptr is aligned at @a alignment boundary,
+ * FALSE otherwise
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_free (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_aligned_ptr (
+ void *ptr,
+ uint32_t alignment
)
{
- return !(the_block->front_flag & HEAP_BLOCK_USED);
+ return (_H_p2u(ptr) % alignment) == 0;
}
/**
- * This function returns TRUE if the block is currently allocated,
- * and FALSE otherwise.
+ * Align @a *value up to the nearest multiple of @a alignment.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_used (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Align_up_uptr (
+ _H_uptr_t *value,
+ uint32_t alignment
)
{
- return (the_block->front_flag & HEAP_BLOCK_USED);
+ _H_uptr_t v = *value;
+ uint32_t a = alignment;
+ _H_uptr_t r = v % a;
+ *value = r ? v - r + a : v;
}
/**
- * This function returns the size of the_block in bytes.
+ * Align @a *value down to the nearest multiple of @a alignment.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Align_down_uptr (
+ _H_uptr_t *value,
+ uint32_t alignment
+)
+{
+ _H_uptr_t v = *value;
+ *value = v - (v % alignment);
+}
+
+/**
+ * This function calculates and returns a block's location (address)
+ * in the heap based upon a base address @a base and an @a offset.
+ */
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
+ void *base,
+ uint32_t offset
)
{
- return (the_block->front_flag & ~HEAP_BLOCK_USED);
+ return (Heap_Block *) _Addresses_Add_offset( base, offset );
}
/**
- * This function returns the starting address of the portion of the block
+ * This function returns the starting address of the portion of @a the_block
* which the user may access.
*/
-RTEMS_INLINE_ROUTINE void *_Heap_Start_of_user_area (
+RTEMS_INLINE_ROUTINE void *_Heap_User_area (
Heap_Block *the_block
)
{
- return (void *) &the_block->next;
+ return (void *) _Addresses_Add_offset ( the_block, HEAP_BLOCK_USER_OFFSET );
}
/**
- * This function returns TRUE if the_block is within the memory area
- * managed by the_heap, and FALSE otherwise.
+ * Fill @a *the_block with the address of the beginning of the block given
+ * pointer to the user accessible area @a base.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_in (
- Heap_Control *the_heap,
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Start_of_block (
+ Heap_Control *the_heap,
+ void *base,
+ Heap_Block **the_block
)
{
- return _Addresses_Is_in_range( the_block, the_heap->start, the_heap->final );
+ _H_uptr_t addr = _H_p2u(base);
+ /* The address passed could be greater than the block address plus
+ * HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
+ * pointers. To get rid of this offset we need to align the address down
+ * to the nearest 'page_size' boundary. */
+ _Heap_Align_down_uptr ( &addr, the_heap->page_size );
+ *the_block = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET);
+}
+
+/**
+ * This function returns TRUE if the previous block of @a the_block
+ * is in use, and FALSE otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_prev_used (
+ Heap_Block *the_block
+)
+{
+ return (the_block->size & HEAP_PREV_USED);
}
/**
- * This function validates a specified heap page size. If the page size
- * is 0 or if lies outside a page size alignment boundary it is invalid
- * and FALSE is returned. Otherwise, the page size is valid and TRUE is
- * returned.
+ * This function returns the size of @a the_block in bytes.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_page_size_valid(
- uint32_t page_size
+RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
+ Heap_Block *the_block
)
{
- return ((page_size != 0) &&
- ((page_size % CPU_HEAP_ALIGNMENT) == 0));
+ return (the_block->size & ~HEAP_PREV_USED);
}
/**
- * This function returns the block flag composed of size and in_use_flag.
- * The flag returned is suitable for use as a back or front flag in a
- * heap block.
+ * This function returns TRUE if @a the_block is within the memory area
+ * managed by @a the_heap, and FALSE otherwise.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Heap_Build_flag (
- uint32_t size,
- uint32_t in_use_flag
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_in (
+ Heap_Control *the_heap,
+ Heap_Block *the_block
)
{
- return size | in_use_flag;
+ return _Addresses_Is_in_range( the_block, the_heap->start, the_heap->final );
}
/**@}*/
diff --git a/cpukit/score/macros/rtems/score/heap.inl b/cpukit/score/macros/rtems/score/heap.inl
index 9c0b3cc77c..fc75ee0657 100644
--- a/cpukit/score/macros/rtems/score/heap.inl
+++ b/cpukit/score/macros/rtems/score/heap.inl
@@ -16,6 +16,12 @@
#ifndef __HEAP_inl
#define __HEAP_inl
+/*
+ * WARNING: this file is only visually checked against
+ * '../../../inline/rtems/score/heap.inl'. Use those file for reference
+ * if encounter problems.
+ */
+
#include <rtems/score/address.h>
/*PAGE
@@ -23,127 +29,193 @@
* _Heap_Head
*/
-#define _Heap_Head( _the_heap ) \
- ((Heap_Block *)&(_the_heap)->start)
+#define _Heap_Head( _the_heap ) (&(_the_heap)->free_list)
/*PAGE
*
* _Heap_Tail
*/
-#define _Heap_Tail( _the_heap ) \
- ((Heap_Block *)&(_the_heap)->final)
+#define _Heap_Tail( _the_heap ) (&(_the_heap)->free_list)
/*PAGE
*
- * _Heap_Previous_block
+ * _Heap_First
*/
-#define _Heap_Previous_block( _the_block ) \
- ( (Heap_Block *) _Addresses_Subtract_offset( \
- (void *)(_the_block), \
- (_the_block)->back_flag & ~ HEAP_BLOCK_USED \
- ) \
- )
+#define _Heap_First( _the_heap ) (_Heap_Head(_the_heap)->next)
/*PAGE
*
- * _Heap_Next_block
+ * _Heap_Last
*/
-#define _Heap_Next_block( _the_block ) \
- ( (Heap_Block *) _Addresses_Add_offset( \
- (void *)(_the_block), \
- (_the_block)->front_flag & ~ HEAP_BLOCK_USED \
- ) \
- )
+#define _Heap_Last( _the_heap ) (_Heap_Tail(_the_heap)->prev)
/*PAGE
*
- * _Heap_Block_at
+ * _Heap_Block_remove
+ *
*/
-#define _Heap_Block_at( _base, _offset ) \
- ( (Heap_Block *) \
- _Addresses_Add_offset( (void *)(_base), (_offset) ) )
+#define _Heap_Block_remove( _the_block ) \
+ do { \
+ Heap_Block *block = (_the_block); \
+ Heap_Block *next = block->next; \
+ Heap_Block *prev = block->prev; \
+ prev->next = next; \
+ next->prev = prev; \
+ } while(0)
/*PAGE
*
- * _Heap_User_block_at
+ * _Heap_Block_replace
*
*/
-
-#define _Heap_User_block_at( _base ) \
- _Heap_Block_at( \
- (_base), \
- -*(((uint32_t *) (_base)) - 1) + -HEAP_BLOCK_USED_OVERHEAD \
- )
+
+#define _Heap_Block_replace( _old_block, _new_block ) \
+ do { \
+ Heap_Block *block = (_old_block); \
+ Heap_Block *next = block->next; \
+ Heap_Block *prev = block->prev; \
+ block = (_new_block); \
+ block->next = next; \
+ block->prev = prev; \
+ next->prev = prev->next = block; \
+ } while(0)
/*PAGE
*
- * _Heap_Is_previous_block_free
+ * _Heap_Block_insert_after
+ *
*/
-#define _Heap_Is_previous_block_free( _the_block ) \
- ( !((_the_block)->back_flag & HEAP_BLOCK_USED) )
+#define _Heap_Block_insert_after( _prev_block, _the_block ) \
+ do { \
+ Heap_Block *prev = (_prev_block); \
+ Heap_Block *block = (_the_block); \
+ Heap_Block *next = prev->next; \
+ block->next = next; \
+ block->prev = prev; \
+ next->prev = prev->next = block; \
+ } while(0)
/*PAGE
*
- * _Heap_Is_block_free
+ * _Heap_Is_aligned
*/
-#define _Heap_Is_block_free( _the_block ) \
- ( !((_the_block)->front_flag & HEAP_BLOCK_USED) )
+#define _Heap_Is_aligned( _value, _alignment ) \
+ (((_value) % (_alignment)) == 0)
/*PAGE
*
- * _Heap_Is_block_used
+ * _Heap_Align_up
*/
-#define _Heap_Is_block_used( _the_block ) \
- ((_the_block)->front_flag & HEAP_BLOCK_USED)
+#define _Heap_Align_up( _value, _alignment ) \
+ do { \
+ unsigned32 v = *(_value); \
+ unsigned32 a = (_alignment); \
+ unsigned32 r = v % a; \
+ *(_value) = r ? v - r + a : v; \
+ } while(0)
/*PAGE
*
- * _Heap_Block_size
+ * _Heap_Align_down
*/
-#define _Heap_Block_size( _the_block ) \
- ((_the_block)->front_flag & ~HEAP_BLOCK_USED)
+#define _Heap_Align_down( _value, _alignment ) \
+ do { \
+ unsigned32 v = *(_value); \
+ *(_value) = v - (v % (_alignment)); \
+ } while(0)
/*PAGE
*
- * _Heap_Start_of_user_area
+ * _Heap_Is_aligned_ptr
*/
-#define _Heap_Start_of_user_area( _the_block ) \
- ((void *) &(_the_block)->next)
+#define _Heap_Is_aligned_ptr( _ptr, _alignment ) \
+ ((_H_p2u(_ptr) % (_alignment)) == 0)
/*PAGE
*
- * _Heap_Is_block_in
+ * _Heap_Align_up_uptr
*/
-#define _Heap_Is_block_in( _the_heap, _the_block ) \
- ( ((_the_block) >= (_the_heap)->start) && \
- ((_the_block) <= (_the_heap)->final) )
+#define _Heap_Align_up_uptr( _value, _alignment ) \
+ do { \
+ _H_uptr_t v = *(_value); \
+ unsigned32 a = (_alignment); \
+ _H_uptr_t r = v % a; \
+ *(_value) = r ? v - r + a : v; \
+ } while(0)
/*PAGE
*
- * _Heap_Is_page_size_valid
+ * _Heap_Align_down_uptr
*/
-#define _Heap_Is_page_size_valid( _page_size ) \
- ( ((_page_size) != 0) && \
- (((_page_size) % CPU_HEAP_ALIGNMENT) == 0) )
+#define _Heap_Align_down_uptr( _value, _alignment ) \
+ do { \
+ _H_uptr_t v = *(_value); \
+ *(_value) = v - (v % (_alignment)); \
+ } while(0)
/*PAGE
*
- * _Heap_Build_flag
+ * _Heap_Block_at
*/
-#define _Heap_Build_flag( _size, _in_use_flag ) \
- ( (_size) | (_in_use_flag))
+#define _Heap_Block_at( _base, _offset ) \
+ ( (Heap_Block *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*PAGE
+ *
+ * _Heap_User_area
+ */
+
+#define _Heap_User_area( _the_block ) \
+ ((void *) _Addresses_Add_offset( (_the_block), HEAP_BLOCK_USER_OFFSET ))
+
+/*PAGE
+ *
+ * _Heap_Start_of_block
+ */
+
+#define _Heap_Start_of_block( _the_heap, _base, _the_block_ptr ) \
+ do { \
+ _H_uptr_t addr = _H_p2u(_base); \
+ _Heap_Align_down( &addr, (_the_heap)->page_size ); \
+ *(_the_block_ptr) = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET); \
+ } while(0)
+
+/*PAGE
+ *
+ * _Heap_Is_prev_used
+ */
+
+#define _Heap_Is_prev_used( _the_block ) \
+ ((_the_block)->size & HEAP_PREV_USED)
+
+/*PAGE
+ *
+ * _Heap_Block_size
+ */
+
+#define _Heap_Block_size( _the_block ) \
+ ((_the_block)->size & ~HEAP_PREV_USED)
+
+/*PAGE
+ *
+ * _Heap_Is_block_in
+ */
+
+#define _Heap_Is_block_in( _the_heap, _the_block ) \
+ ( _Addresses_Is_in_range( (_the_block), \
+ (_the_heap)->start, (_the_heap)->final ) )
#endif
/* end of include file */
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 6d6873ebed..910ac94c33 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -16,6 +16,8 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
+static uint32_t instance = 0;
+
/*PAGE
*
* _Heap_Initialize
@@ -32,27 +34,77 @@
* returns - maximum memory available if RTEMS_SUCCESSFUL
* 0 - otherwise
*
- * This is what a heap looks like in memory immediately
- * after initialization:
- *
- * +--------------------------------+
- * 0 | size = 0 | status = used | a.k.a. dummy back flag
- * +--------------------------------+
- * 4 | size = size-8 | status = free | a.k.a. front flag
- * +--------------------------------+
- * 8 | next = PERM HEAP_TAIL |
- * +--------------------------------+
- * 12 | previous = PERM HEAP_HEAD |
- * +--------------------------------+
+ * This is what a heap looks like in memory immediately after initialization:
+ *
+ *
+ * +--------------------------------+ <- begin = starting_address
+ * | unused space due to alignment |
+ * | size < page_size |
+ * 0 +--------------------------------+ <- first block
+ * | prev_size = 1 (arbitrary) |
+ * 4 +--------------------------------+
+ * | size = size0 | 1 |
+ * 8 +---------------------+----------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * 12 +---------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +---------------------+ |
+ * | available |
* | |
- * | memory available |
- * | for allocation |
+ * | for allocation |
* | |
- * +--------------------------------+
- * size - 8 | size = size-8 | status = free | a.k.a. back flag
- * +--------------------------------+
- * size - 4 | size = 0 | status = used | a.k.a. dummy front flag
- * +--------------------------------+
+ * size0 +--------------------------------+ <- last dummy block
+ * | prev_size = size0 |
+ * +4 +--------------------------------+
+ * | size = 0 (arbitrary) | 0 | <- prev block is free
+ * +8 +--------------------------------+ <- aligned on page_size
+ * | unused space due to alignment |
+ * | size < page_size |
+ * +--------------------------------+ <- end = begin + size
+ *
+ * This is what a heap looks like after first allocation of SIZE bytes.
+ * BSIZE stands for SIZE + 4 aligned up on 'page_size' boundary if allocation
+ * has been performed by _Heap_Allocate(). If allocation has been performed
+ * by _Heap_Allocate_aligned(), the block size BSIZE is defined differently
+ * (see 'heapallocatealigned.c' for details).
+ *
+ * +--------------------------------+ <- begin = starting_address
+ * | unused space due to alignment |
+ * | size < page_size |
+ * 0 +--------------------------------+ <- first block
+ * | prev_size = 1 (arbitrary) |
+ * 4 +--------------------------------+
+ * | size = S = size0 - BSIZE | 1 |
+ * 8 +---------------------+----------+ <- aligned on page_size
+ * | next = HEAP_TAIL | |
+ * 12 +---------------------+ |
+ * | prev = HEAP_HEAD | memory |
+ * +---------------------+ |
+ * | available |
+ * | |
+ * | for allocation |
+ * | |
+ * S +--------------------------------+ <- used block
+ * | prev_size = size0 - BSIZE |
+ * +4 +--------------------------------+
+ * | size = BSIZE | 0 | <- prev block is free
+ * +8 +--------------------------------+ <- aligned on page_size
+ * | . | Pointer returned to the user
+ * | . | is (S+8) for _Heap_Allocate()
+ * | . | and is in range
+ * S + 8 + | user-accessible | [S+8,S+8+page_size) for
+ * page_size+- - - - - -+ _Heap_Allocate_aligned()
+ * | area |
+ * | . |
+ * S + BSIZE +- - - - - . - - - - -+ <- last dummy block
+ * | . |
+ * +4 +--------------------------------+
+ * | size = 0 (arbitrary) | 1 | <- prev block is used
+ * +8 +--------------------------------+ <- aligned on page_size
+ * | unused space due to alignment |
+ * | size < page_size |
+ * +--------------------------------+ <- end = begin + size
+ *
*/
uint32_t _Heap_Initialize(
@@ -62,31 +114,148 @@ uint32_t _Heap_Initialize(
uint32_t page_size
)
{
- Heap_Block *the_block;
- uint32_t the_size;
+ Heap_Block *the_block;
+ uint32_t the_size;
+ _H_uptr_t start;
+ _H_uptr_t aligned_start;
+ uint32_t overhead;
+ Heap_Statistics *const stats = &the_heap->stats;
+
+ if(page_size == 0)
+ page_size = CPU_ALIGNMENT;
+ else
+ _Heap_Align_up( &page_size, CPU_ALIGNMENT );
- if ( !_Heap_Is_page_size_valid( page_size ) ||
- (size < HEAP_MINIMUM_SIZE) )
- return 0;
+ /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET
+ (value of user pointer) is aligned on 'page_size' boundary. Make sure
+ resulting 'aligned_start' is not below 'starting_address'. */
+ start = _H_p2u(starting_address);
+ aligned_start = start + HEAP_BLOCK_USER_OFFSET;
+ _Heap_Align_up_uptr ( &aligned_start, page_size );
+ aligned_start -= HEAP_BLOCK_USER_OFFSET;
+
+ /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the
+ nearest multiple of 'page_size'. */
+ the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE;
+ _Heap_Align_up ( &the_heap->min_block_size, page_size );
+
+ /* Calculate 'the_size' -- size of the first block so that there is enough
+ space at the end for the permanent last block. It is equal to 'size'
+ minus total overhead aligned down to the nearest multiple of
+ 'page_size'. */
+ overhead = HEAP_OVERHEAD + (aligned_start - start);
+ if ( size < overhead )
+ return 0; /* Too small area for the heap */
+ the_size = size - overhead;
+ _Heap_Align_down ( &the_size, page_size );
+ if ( the_size == 0 )
+ return 0; /* Too small area for the heap */
the_heap->page_size = page_size;
- the_size = size - HEAP_OVERHEAD;
+ the_heap->begin = starting_address;
+ the_heap->end = starting_address + size;
+
+ the_block = (Heap_Block *) aligned_start;
+
+ the_block->prev_size = HEAP_PREV_USED;
+ the_block->size = the_size | HEAP_PREV_USED;
+ the_block->next = _Heap_Tail( the_heap );
+ the_block->prev = _Heap_Head( the_heap );
+ _Heap_Head(the_heap)->next = the_block;
+ _Heap_Tail(the_heap)->prev = the_block;
+ the_heap->start = the_block;
- the_block = (Heap_Block *) starting_address;
- the_block->back_flag = HEAP_DUMMY_FLAG;
- the_block->front_flag = the_size;
- the_block->next = _Heap_Tail( the_heap );
- the_block->previous = _Heap_Head( the_heap );
+ _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT));
+ _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size));
+ _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size));
- the_heap->start = the_block;
- the_heap->first = the_block;
- the_heap->permanent_null = NULL;
- the_heap->last = the_block;
- the_block = _Heap_Next_block( the_block );
- the_block->back_flag = the_size;
- the_block->front_flag = HEAP_DUMMY_FLAG;
- the_heap->final = the_block;
+ the_block = _Heap_Block_at( the_block, the_size );
+ the_heap->final = the_block; /* Permanent final block of the heap */
+ the_block->prev_size = the_size; /* Previous block is free */
+ the_block->size = 0; /* This is the only block with size=0 */
+
+ stats->size = size;
+ stats->free_size = the_size;
+ stats->min_free_size = the_size;
+ stats->free_blocks = 1;
+ stats->max_free_blocks = 1;
+ stats->used_blocks = 0;
+ stats->max_search = 0;
+ stats->allocs = 0;
+ stats->searches = 0;
+ stats->frees = 0;
+ stats->instance = instance++;
return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
}
+
+/*PAGE
+ *
+ * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
+ *
+ * Note: there is no reason to put them into a separate file(s) as they are
+ * always required for heap to be usefull.
+ *
+ */
+
+
+/*
+ * Convert user requested 'size' of memory block to the block size.
+ * Return block size on success, 0 if overflow occured
+ */
+uint32_t _Heap_Calc_block_size(
+ uint32_t size,
+ uint32_t page_size,
+ uint32_t min_size)
+{
+ uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
+ _Heap_Align_up(&block_size, page_size);
+ if(block_size < min_size) block_size = min_size;
+ return (block_size > size) ? block_size : 0;
+}
+
+
+/*
+ * Allocate block of size 'alloc_size' from 'the_block' belonging to
+ * 'the_heap'. Either split 'the_block' or allocate it entirely.
+ * Return the block allocated.
+ */
+Heap_Block* _Heap_Block_allocate(
+ Heap_Control* the_heap,
+ Heap_Block* the_block,
+ uint32_t alloc_size)
+{
+ Heap_Statistics *const stats = &the_heap->stats;
+ uint32_t const block_size = _Heap_Block_size(the_block);
+ uint32_t const the_rest = block_size - alloc_size;
+
+ _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
+ _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
+ _HAssert(alloc_size <= block_size);
+
+ if(the_rest >= the_heap->min_block_size) {
+ /* Split the block so that lower part is still free, and upper part
+ becomes used. */
+ the_block->size = the_rest | HEAP_PREV_USED;
+ the_block = _Heap_Block_at(the_block, the_rest);
+ the_block->prev_size = the_rest;
+ the_block->size = alloc_size;
+ }
+ else {
+ /* Don't split the block as remainder is either zero or too small to be
+ used as a separate free block. Change 'alloc_size' to the size of the
+ block and remove the block from the list of free blocks. */
+ _Heap_Block_remove(the_block);
+ alloc_size = block_size;
+ stats->free_blocks -= 1;
+ }
+ /* Mark the block as used (in the next block). */
+ _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
+ /* Update statistics */
+ stats->free_size -= alloc_size;
+ if(stats->min_free_size > stats->free_size)
+ stats->min_free_size = stats->free_size;
+ stats->used_blocks += 1;
+ return the_block;
+}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index 54bdd58e45..d49eb62b46 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -36,78 +36,43 @@ void *_Heap_Allocate(
uint32_t size
)
{
- uint32_t excess;
- uint32_t the_size;
+ uint32_t the_size;
+ uint32_t search_count;
Heap_Block *the_block;
- Heap_Block *next_block;
- Heap_Block *temporary_block;
- void *ptr;
- uint32_t offset;
-
- /*
- * Catch the case of a user allocating close to the limit of the
- * uint32_t .
- */
-
- if ( size >= (-1 - HEAP_BLOCK_USED_OVERHEAD) )
- return( NULL );
+ void *ptr = NULL;
+ Heap_Statistics *const stats = &the_heap->stats;
+ Heap_Block *const tail = _Heap_Tail(the_heap);
+
+ the_size =
+ _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size);
+ if(the_size == 0)
+ return NULL;
+
+ /* Find large enough free block. */
+ for(the_block = _Heap_First(the_heap), search_count = 0;
+ the_block != tail;
+ the_block = the_block->next, ++search_count)
+ {
+ /* As we always coalesce free blocks, prev block must have been used. */
+ _HAssert(_Heap_Is_prev_used(the_block));
- excess = size % the_heap->page_size;
- the_size = size + the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD;
+ /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
+ result of the comparison. */
+ if(the_block->size >= the_size) {
+ the_block = _Heap_Block_allocate(the_heap, the_block, the_size );
- if ( excess )
- the_size += the_heap->page_size - excess;
+ ptr = _Heap_User_area(the_block);
- if ( the_size < sizeof( Heap_Block ) )
- the_size = sizeof( Heap_Block );
+ stats->allocs += 1;
+ stats->searches += search_count + 1;
- for ( the_block = the_heap->first;
- ;
- the_block = the_block->next ) {
- if ( the_block == _Heap_Tail( the_heap ) )
- return( NULL );
- if ( the_block->front_flag >= the_size )
+ _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size));
break;
+ }
}
- if ( (the_block->front_flag - the_size) >
- (the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD) ) {
- the_block->front_flag -= the_size;
- next_block = _Heap_Next_block( the_block );
- next_block->back_flag = the_block->front_flag;
-
- temporary_block = _Heap_Block_at( next_block, the_size );
- temporary_block->back_flag =
- next_block->front_flag = _Heap_Build_flag( the_size,
- HEAP_BLOCK_USED );
- ptr = _Heap_Start_of_user_area( next_block );
- } else {
- next_block = _Heap_Next_block( the_block );
- next_block->back_flag = _Heap_Build_flag( the_block->front_flag,
- HEAP_BLOCK_USED );
- the_block->front_flag = next_block->back_flag;
- the_block->next->previous = the_block->previous;
- the_block->previous->next = the_block->next;
- ptr = _Heap_Start_of_user_area( the_block );
- }
-
- /*
- * round ptr up to a multiple of page size
- * Have to save the bump amount in the buffer so that free can figure it out
- */
-
- offset = the_heap->page_size - (((uint32_t ) ptr) & (the_heap->page_size - 1));
- ptr = _Addresses_Add_offset( ptr, offset );
- *(((uint32_t *) ptr) - 1) = offset;
-
-#ifdef RTEMS_DEBUG
- {
- uint32_t ptr_u32;
- ptr_u32 = (uint32_t ) ptr;
- if (ptr_u32 & (the_heap->page_size - 1))
- abort();
- }
-#endif
+ if(stats->max_search < search_count)
+ stats->max_search = search_count;
return ptr;
}
diff --git a/cpukit/score/src/heapextend.c b/cpukit/score/src/heapextend.c
index 3cb318d70b..8166d99e6f 100644
--- a/cpukit/score/src/heapextend.c
+++ b/cpukit/score/src/heapextend.c
@@ -39,8 +39,8 @@ Heap_Extend_status _Heap_Extend(
uint32_t *amount_extended
)
{
- Heap_Block *the_block;
- uint32_t *p;
+ uint32_t the_size;
+ Heap_Statistics *const stats = &the_heap->stats;
/*
* The overhead was taken from the original heap memory.
@@ -62,22 +62,13 @@ Heap_Extend_status _Heap_Extend(
* As noted, this code only supports (4).
*/
- if ( starting_address >= (void *) the_heap->start && /* case 3 */
- starting_address <= (void *) the_heap->final
+ if ( starting_address >= the_heap->begin && /* case 3 */
+ starting_address < the_heap->end
)
return HEAP_EXTEND_ERROR;
- if ( starting_address < (void *) the_heap->start ) { /* cases 1 and 2 */
-
- return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1 and 2 */
-
- } else { /* cases 4 and 5 */
-
- the_block = (Heap_Block *)
- _Addresses_Subtract_offset( starting_address, HEAP_OVERHEAD );
- if ( the_block != the_heap->final )
- return HEAP_EXTEND_NOT_IMPLEMENTED; /* case 5 */
- }
+ if ( starting_address != the_heap->end )
+ return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1, 2, and 5 */
/*
* Currently only case 4 should make it to this point.
@@ -85,27 +76,26 @@ Heap_Extend_status _Heap_Extend(
* block and free it.
*/
+ old_final = the_heap->final;
+ the_heap->end = _Addresses_Add_offset( the_heap->end, size );
+ the_size = _Addresses_Subtract( the_heap->end, old_final ) - HEAP_OVERHEAD;
+ _Heap_Align_down( &the_size, the_heap->page_size );
+
*amount_extended = size;
- old_final = the_heap->final;
- new_final = _Addresses_Add_offset( old_final, size );
- /* SAME AS: _Addresses_Add_offset( starting_address, size-HEAP_OVERHEAD ); */
+ if( the_size < the_heap->min_block_size )
+ return HEAP_EXTEND_SUCCESSFUL;
+ old_final->size = the_size | (old_final->size & HEAP_PREV_USED);
+ new_final = _Heap_Block_at( old_final, the_size );
+ new_final->size = HEAP_PREV_USED;
the_heap->final = new_final;
- old_final->front_flag =
- new_final->back_flag = _Heap_Build_flag( size, HEAP_BLOCK_USED );
- new_final->front_flag = HEAP_DUMMY_FLAG;
-
- /*
- * Must pass in address of "user" area
- * So add in the offset field.
- */
+ stats->size += size;
+ stats->used_blocks += 1;
+ stats->frees -= 1; /* Don't count subsequent call as actual free() */
- p = (uint32_t *) &old_final->next;
- *p = sizeof(uint32_t );
- p++;
- _Heap_Free( the_heap, p );
+ _Heap_Free( the_heap, _Heap_User_area( old_final ) );
return HEAP_EXTEND_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 40e56a7b88..0bd413d1dc 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -39,65 +39,90 @@ boolean _Heap_Free(
{
Heap_Block *the_block;
Heap_Block *next_block;
- Heap_Block *new_next_block;
- Heap_Block *previous_block;
- Heap_Block *temporary_block;
- uint32_t the_size;
+ uint32_t the_size;
+ uint32_t next_size;
+ Heap_Statistics *const stats = &the_heap->stats;
+ boolean next_is_free;
- the_block = _Heap_User_block_at( starting_address );
+ _Heap_Start_of_block( the_heap, starting_address, &the_block );
- if ( !_Heap_Is_block_in( the_heap, the_block ) ||
- _Heap_Is_block_free( the_block ) ) {
- return( FALSE );
+ if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
+ _HAssert(starting_address == NULL);
+ return( FALSE );
}
- the_size = _Heap_Block_size( the_block );
+ the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
- if ( !_Heap_Is_block_in( the_heap, next_block ) ||
- (the_block->front_flag != next_block->back_flag) ) {
- return( FALSE );
+ if ( !_Heap_Is_prev_used( next_block ) ) {
+ _HAssert(FALSE);
+ return( FALSE );
+ }
+
+ if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
+ _HAssert(FALSE);
+ return( FALSE );
}
- if ( _Heap_Is_previous_block_free( the_block ) ) {
- previous_block = _Heap_Previous_block( the_block );
+ next_size = _Heap_Block_size( next_block );
+ next_is_free = next_block < the_heap->final &&
+ !_Heap_Is_prev_used(_Heap_Block_at(next_block, next_size));
+
+ if ( !_Heap_Is_prev_used( the_block ) ) {
+ uint32_t const prev_size = the_block->prev_size;
+ Heap_Block *const prev_block = _Heap_Block_at( the_block, -prev_size );
- if ( !_Heap_Is_block_in( the_heap, previous_block ) ) {
- return( FALSE );
+ if ( !_Heap_Is_block_in( the_heap, prev_block ) ) {
+ _HAssert(FALSE);
+ return( FALSE );
}
- if ( _Heap_Is_block_free( next_block ) ) { /* coalesce both */
- previous_block->front_flag += next_block->front_flag + the_size;
- temporary_block = _Heap_Next_block( previous_block );
- temporary_block->back_flag = previous_block->front_flag;
- next_block->next->previous = next_block->previous;
- next_block->previous->next = next_block->next;
+ /* As we always coalesce free blocks, the block that preceedes prev_block
+ must have been used. */
+ if ( !_Heap_Is_prev_used ( prev_block) ) {
+ _HAssert(FALSE);
+ return( FALSE );
+ }
+
+ if ( next_is_free ) { /* coalesce both */
+ uint32_t const size = the_size + prev_size + next_size;
+ _Heap_Block_remove( next_block );
+ stats->free_blocks -= 1;
+ prev_block->size = size | HEAP_PREV_USED;
+ next_block = _Heap_Block_at( prev_block, size );
+ _HAssert(!_Heap_Is_prev_used( next_block));
+ next_block->prev_size = size;
}
- else { /* coalesce prev */
- previous_block->front_flag =
- next_block->back_flag = previous_block->front_flag + the_size;
+ else { /* coalesce prev */
+ uint32_t const size = the_size + prev_size;
+ prev_block->size = size | HEAP_PREV_USED;
+ next_block->size &= ~HEAP_PREV_USED;
+ next_block->prev_size = size;
}
}
- else if ( _Heap_Is_block_free( next_block ) ) { /* coalesce next */
- the_block->front_flag = the_size + next_block->front_flag;
- new_next_block = _Heap_Next_block( the_block );
- new_next_block->back_flag = the_block->front_flag;
- the_block->next = next_block->next;
- the_block->previous = next_block->previous;
- next_block->previous->next = the_block;
- next_block->next->previous = the_block;
-
- if (the_heap->first == next_block)
- the_heap->first = the_block;
+ else if ( next_is_free ) { /* coalesce next */
+ uint32_t const size = the_size + next_size;
+ _Heap_Block_replace( next_block, the_block );
+ the_block->size = size | HEAP_PREV_USED;
+ next_block = _Heap_Block_at( the_block, size );
+ next_block->prev_size = size;
}
- else { /* no coalesce */
- next_block->back_flag =
- the_block->front_flag = the_size;
- the_block->previous = _Heap_Head( the_heap );
- the_block->next = the_heap->first;
- the_heap->first = the_block;
- the_block->next->previous = the_block;
+ else { /* no coalesce */
+ /* Add 'the_block' to the head of the free blocks list as it tends to
+ produce less fragmentation than adding to the tail. */
+ _Heap_Block_insert_after( _Heap_Head( the_heap), the_block );
+ the_block->size = the_size | HEAP_PREV_USED;
+ next_block->size &= ~HEAP_PREV_USED;
+ next_block->prev_size = the_size;
+
+ stats->free_blocks += 1;
+ if ( stats->max_free_blocks < stats->free_blocks )
+ stats->max_free_blocks = stats->free_blocks;
}
+ stats->used_blocks -= 1;
+ stats->free_size += the_size;
+ stats->frees += 1;
+
return( TRUE );
}
diff --git a/cpukit/score/src/heapgetfreeinfo.c b/cpukit/score/src/heapgetfreeinfo.c
index 910eda838f..1b8d8598a0 100644
--- a/cpukit/score/src/heapgetfreeinfo.c
+++ b/cpukit/score/src/heapgetfreeinfo.c
@@ -37,21 +37,24 @@ void _Heap_Get_free_information(
)
{
Heap_Block *the_block;
+ Heap_Block *const tail = _Heap_Tail(the_heap);
info->number = 0;
info->largest = 0;
info->total = 0;
- for ( the_block = the_heap->first;
- ;
- the_block = the_block->next ) {
- if ( the_block == _Heap_Tail( the_heap ) )
- return;
-
- info->number++;
- info->total += the_block->front_flag;
+ for(the_block = _Heap_First(the_heap);
+ the_block != tail;
+ the_block = the_block->next)
+ {
+ uint32_t const the_size = _Heap_Block_size(the_block);
+
+ /* As we always coalesce free blocks, prev block must have been used. */
+ _HAssert(_Heap_Is_prev_used(the_block));
- if ( the_block->front_flag >= info->largest )
- info->largest = the_block->front_flag;
+ info->number++;
+ info->total += the_size;
+ if ( info->largest < the_size )
+ info->largest = the_size;
}
}
diff --git a/cpukit/score/src/heapgetinfo.c b/cpukit/score/src/heapgetinfo.c
index 9e71ea7e43..6ba368c7a0 100644
--- a/cpukit/score/src/heapgetinfo.c
+++ b/cpukit/score/src/heapgetinfo.c
@@ -8,6 +8,7 @@
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*
+ * $Id$
*/
@@ -37,11 +38,11 @@ Heap_Get_information_status _Heap_Get_information(
Heap_Information_block *the_info
)
{
- Heap_Block *the_block = 0; /* avoid warnings */
- Heap_Block *next_block = 0; /* avoid warnings */
- int notdone = 1;
- uint32_t size;
+ Heap_Block *the_block = the_heap->start;
+ Heap_Block *const end = the_heap->final;
+ _HAssert(the_block->prev_size == HEAP_PREV_USED);
+ _HAssert(_Heap_Is_prev_used(the_block));
the_info->Free.number = 0;
the_info->Free.total = 0;
@@ -50,60 +51,31 @@ Heap_Get_information_status _Heap_Get_information(
the_info->Used.total = 0;
the_info->Used.largest = 0;
- /*
- * We don't want to allow walking the heap until we have
- * transferred control to the user task so we watch the
- * system state.
- */
-
- if ( !_System_state_Is_up( _System_state_Get() ) )
- return HEAP_GET_INFORMATION_SYSTEM_STATE_ERROR;
-
- the_block = the_heap->start;
-
- /*
- * Handle the 1st block
- */
-
- if ( the_block->back_flag != HEAP_DUMMY_FLAG ) {
- return HEAP_GET_INFORMATION_BLOCK_ERROR;
+ while ( the_block != end ) {
+ uint32_t const the_size = _Heap_Block_size(the_block);
+ Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
+
+ if ( _Heap_Is_prev_used(next_block) ) {
+ the_info->Used.number++;
+ the_info->Used.total += the_size;
+ if ( the_info->Used.largest < the_size )
+ the_info->Used.largest = the_size;
+ } else {
+ the_info->Free.number++;
+ the_info->Free.total += the_size;
+ if ( the_info->Free.largest < the_size )
+ the_info->Free.largest = the_size;
+ if ( the_size != next_block->prev_size )
+ return HEAP_GET_INFORMATION_BLOCK_ERROR;
+ }
+
+ the_block = next_block;
}
- while (notdone) {
-
- /*
- * Accumulate size
- */
-
- size = _Heap_Block_size(the_block);
- if ( _Heap_Is_block_free(the_block) ) {
- the_info->Free.number++;
- the_info->Free.total += size;
- if ( size > the_info->Free.largest )
- the_info->Free.largest = size;
- } else {
- the_info->Used.number++;
- the_info->Used.total += size;
- if ( size > the_info->Used.largest )
- the_info->Used.largest = size;
- }
-
- /*
- * Handle the last block
- */
-
- if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
- next_block = _Heap_Next_block(the_block);
- if ( the_block->front_flag != next_block->back_flag ) {
- return HEAP_GET_INFORMATION_BLOCK_ERROR;
- }
- }
-
- if ( the_block->front_flag == HEAP_DUMMY_FLAG )
- notdone = 0;
- else
- the_block = next_block;
- } /* while(notdone) */
+ /* Handle the last dummy block. Don't consider this block to be
+ "used" as client never allocated it. Make 'Used.total' contain this
+ blocks' overhead though. */
+ the_info->Used.total += HEAP_OVERHEAD;
return HEAP_GET_INFORMATION_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index 3321816c4a..002e815bd5 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -20,12 +20,14 @@
*
* _Heap_Size_of_user_area
*
- * This kernel routine returns the size of the memory area
- * given heap block.
+ * This kernel routine sets '*size' to the size of the block of memory
+ * which begins at 'starting_address'.
+ * It returns TRUE if the 'starting_address' is in the heap, and FALSE
+ * otherwise.
*
* Input parameters:
* the_heap - pointer to heap header
- * starting_address - starting address of the memory block to free.
+ * starting_address - starting address of the memory block
* size - pointer to size of area
*
* Output parameters:
@@ -37,7 +39,7 @@
boolean _Heap_Size_of_user_area(
Heap_Control *the_heap,
void *starting_address,
- uint32_t *size
+ size_t *size
)
{
Heap_Block *the_block;
@@ -48,22 +50,32 @@ boolean _Heap_Size_of_user_area(
starting_address, (void *)the_heap->start, (void *)the_heap->final ) )
return( FALSE );
- the_block = _Heap_User_block_at( starting_address );
-
- if ( !_Heap_Is_block_in( the_heap, the_block ) )
- return( FALSE );
+ _Heap_Start_of_block( the_heap, starting_address, &the_block );
- if ( _Heap_Is_block_free( the_block ) )
+ if ( !_Heap_Is_block_in( the_heap, the_block ) )
return( FALSE );
the_size = _Heap_Block_size( the_block );
next_block = _Heap_Block_at( the_block, the_size );
- if ( !_Heap_Is_block_in( the_heap, next_block ) ||
- (the_block->front_flag != next_block->back_flag) )
+ if (
+ !_Heap_Is_block_in( the_heap, next_block ) ||
+ !_Heap_Is_prev_used( next_block )
+ )
return( FALSE );
- *size = the_size;
+ /* 'starting_address' could be greater than 'the_block' address plus
+ HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
+ pointers. To get rid of this offset we calculate user size as difference
+ between the end of 'the_block' (='next_block') and 'starting_address'
+ and then add correction equal to the offset of the 'size' field of the
+ 'Heap_Block' structure. The correction is due to the fact that
+ 'prev_size' field of the next block is actually used as user accessible
+ area of 'the_block'. */
+
+ *size = _Addresses_Subtract ( next_block, starting_address )
+ + HEAP_BLOCK_HEADER_OFFSET;
+
return( TRUE );
}
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index 4f93ae7be0..a589b3cdf4 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -30,32 +30,32 @@
* Output parameters: NONE
*/
-#ifndef RTEMS_DEBUG
+#if !defined(RTEMS_HEAP_DEBUG)
-void _Heap_Walk(
+boolean _Heap_Walk(
Heap_Control *the_heap,
int source,
boolean do_dump
)
{
+ return TRUE;
}
-#else
+#else /* defined(RTEMS_HEAP_DEBUG) */
#include <stdio.h>
-#include <unistd.h>
-void _Heap_Walk(
+boolean _Heap_Walk(
Heap_Control *the_heap,
int source,
boolean do_dump
)
{
- Heap_Block *the_block = 0; /* avoid warnings */
- Heap_Block *next_block = 0; /* avoid warnings */
- int notdone = 1;
- int error = 0;
- int passes = 0;
+ Heap_Block *the_block = the_heap->start;
+ Heap_Block *const end = the_heap->final;
+ Heap_Block *const tail = _Heap_Tail(the_heap);
+ int error = 0;
+ int passes = 0;
/*
* We don't want to allow walking the heap until we have
@@ -64,86 +64,110 @@ void _Heap_Walk(
*/
if ( !_System_state_Is_up( _System_state_Get() ) )
- return;
+ return FALSE;
- the_block = the_heap->start;
+ if (source < 0)
+ source = the_heap->stats.instance;
- if (do_dump == TRUE) {
- printf("\nPASS: %d start @ 0x%p final 0x%p, first 0x%p last 0x%p\n",
- source, the_heap->start, the_heap->final,
- the_heap->first, the_heap->last
- );
- }
+ if (do_dump == TRUE)
+ printf("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
+ source, the_block, end,
+ _Heap_First(the_heap), _Heap_Last(the_heap),
+ the_heap->begin, the_heap->end);
/*
* Handle the 1st block
*/
- if (the_block->back_flag != HEAP_DUMMY_FLAG) {
- printf("PASS: %d Back flag of 1st block isn't HEAP_DUMMY_FLAG\n", source);
+ if (!_Heap_Is_prev_used(the_block)) {
+ printf("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source);
error = 1;
}
- while (notdone) {
- passes++;
- if (error && (passes > 10))
- abort();
-
- if (do_dump == TRUE) {
- printf("PASS: %d Block @ 0x%p Back %d, Front %d",
- source, the_block,
- the_block->back_flag, the_block->front_flag);
- if ( _Heap_Is_block_free(the_block) ) {
- printf( " Prev 0x%p, Next 0x%p\n",
- the_block->previous, the_block->next);
- } else {
- printf("\n");
- }
+ if (the_block->prev_size != HEAP_PREV_USED) {
+ printf("PASS: %d !prev_size of 1st block isn't HEAP_PREV_USED\n", source);
+ error = 1;
+ }
+
+ while ( the_block < end ) {
+ uint32_t const the_size = _Heap_Block_size(the_block);
+ Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
+ boolean prev_used = _Heap_Is_prev_used(the_block);
+
+ if (do_dump) {
+ printf("PASS: %d block %p size %d(%c)",
+ source, the_block, the_size, (prev_used ? 'U' : 'F'));
+ if (prev_used)
+ printf(" prev_size %d", the_block->prev_size);
+ else
+ printf(" (prev_size) %d", the_block->prev_size);
}
- /*
- * Handle the last block
- */
+ if (!_Heap_Is_block_in(the_heap, next_block)) {
+ if (do_dump) printf("\n");
+ printf("PASS: %d !block %p is out of heap\n", source, next_block);
+ error = 1;
+ break;
+ }
- if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
- next_block = _Heap_Next_block(the_block);
- if ( the_block->front_flag != next_block->back_flag ) {
+ if (!_Heap_Is_prev_used(next_block)) {
+ if (do_dump)
+ printf( " prev %p next %p", the_block->prev, the_block->next);
+ if (_Heap_Block_size(the_block) != next_block->prev_size) {
+ if (do_dump) printf("\n");
+ printf("PASS: %d !front and back sizes don't match", source);
error = 1;
- printf("PASS: %d Front and back flags don't match\n", source);
- printf(" Current Block (%p): Back - %d, Front - %d",
- the_block, the_block->back_flag, the_block->front_flag);
- if (do_dump == TRUE) {
- if (_Heap_Is_block_free(the_block)) {
- printf(" Prev 0x%p, Next 0x%p\n",
- the_block->previous, the_block->next);
- } else {
- printf("\n");
- }
- } else {
- printf("\n");
- }
- printf(" Next Block (%p): Back - %d, Front - %d",
- next_block, next_block->back_flag, next_block->front_flag);
- if (do_dump == TRUE) {
- if (_Heap_Is_block_free(next_block)) {
- printf(" Prev 0x%p, Next 0x%p\n",
- the_block->previous, the_block->next);
- } else {
- printf("\n");
- }
- } else {
- printf("\n");
+ }
+ if (!prev_used) {
+ if (do_dump || error) printf("\n");
+ printf("PASS: %d !two consecutive blocks are free", source);
+ error = 1;
+ }
+
+ { /* Check if 'the_block' is in the free block list */
+ Heap_Block* block = _Heap_First(the_heap);
+ while(block != the_block && block != tail)
+ block = block->next;
+ if(block != the_block) {
+ if (do_dump || error) printf("\n");
+ printf("PASS: %d !the_block not in the free list", source);
+ error = 1;
}
}
+
}
+ if (do_dump || error) printf("\n");
- if (the_block->front_flag == HEAP_DUMMY_FLAG)
- notdone = 0;
- else
- the_block = next_block;
+ if (the_size < the_heap->min_block_size) {
+ printf("PASS: %d !block size is too small\n", source);
+ error = 1;
+ break;
+ }
+ if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
+ printf("PASS: %d !block size is misaligned\n", source);
+ error = 1;
+ }
+
+ if (++passes > (do_dump ? 10 : 0) && error)
+ break;
+
+ the_block = next_block;
+ }
+
+ if (the_block != end) {
+ printf("PASS: %d !last block address isn't equal to 'final'\n", source);
+ error = 1;
+ }
+
+ if (_Heap_Block_size(the_block) != 0) {
+ printf("PASS: %d !last block's size isn't 0\n", source);
+ error = 1;
}
- if (error)
- abort();
+ if(do_dump && error)
+ abort();
+
+ return error == 0;
+
}
-#endif
+#endif /* defined(RTEMS_HEAP_DEBUG) */