summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2009-08-26 12:00:24 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2009-08-26 12:00:24 +0000
commit371cea31d3cffb1e1d2ae2e865d1e9a462a3fbd6 (patch)
treec407c96dda4ebe8580df10d35a12bc20dd8bc221 /cpukit
parentAdd mpfr_provided for openSUSE 11.0 + 11.1 (diff)
downloadrtems-371cea31d3cffb1e1d2ae2e865d1e9a462a3fbd6.tar.bz2
2009-08-24 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* libmisc/stackchk/check.c, rtems/src/regionreturnsegment.c, rtems/src/regiongetsegmentsize.c, src/heapalignupuptr.c, src/heapallocatealigned.c, src/heapallocate.c, src/heap.c, src/heapextend.c, src/heapfree.c, src/heapgetfreeinfo.c, src/heapgetinfo.c, src/heapresizeblock.c, src/heapsizeofuserarea.c, src/heapwalk.c, src/pheapgetblocksize.c, inline/rtems/score/heap.inl, include/rtems/score/heap.h: Overall cleanup. Changed all types for addresses, sizes, offsets and alignments to uintptr_t. Reformatted. Added variables for clarity. Renamed various objects. Enabled _HAssert() for all instances. More changes follow.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/ChangeLog13
-rw-r--r--cpukit/libmisc/stackchk/check.c2
-rw-r--r--cpukit/rtems/src/regiongetsegmentsize.c2
-rw-r--r--cpukit/rtems/src/regionreturnsegment.c2
-rw-r--r--cpukit/score/include/rtems/score/heap.h481
-rw-r--r--cpukit/score/inline/rtems/score/heap.inl334
-rw-r--r--cpukit/score/src/heap.c242
-rw-r--r--cpukit/score/src/heapalignupuptr.c22
-rw-r--r--cpukit/score/src/heapallocate.c79
-rw-r--r--cpukit/score/src/heapallocatealigned.c78
-rw-r--r--cpukit/score/src/heapextend.c85
-rw-r--r--cpukit/score/src/heapfree.c130
-rw-r--r--cpukit/score/src/heapgetfreeinfo.c4
-rw-r--r--cpukit/score/src/heapgetinfo.c4
-rw-r--r--cpukit/score/src/heapresizeblock.c116
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c91
-rw-r--r--cpukit/score/src/heapwalk.c18
-rw-r--r--cpukit/score/src/pheapgetblocksize.c2
18 files changed, 717 insertions, 988 deletions
diff --git a/cpukit/ChangeLog b/cpukit/ChangeLog
index 6525c23c25..1b31862e55 100644
--- a/cpukit/ChangeLog
+++ b/cpukit/ChangeLog
@@ -1,3 +1,16 @@
+2009-08-24 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
+
+ * libmisc/stackchk/check.c, rtems/src/regionreturnsegment.c,
+ rtems/src/regiongetsegmentsize.c, src/heapalignupuptr.c,
+ src/heapallocatealigned.c, src/heapallocate.c, src/heap.c,
+ src/heapextend.c, src/heapfree.c, src/heapgetfreeinfo.c,
+ src/heapgetinfo.c, src/heapresizeblock.c, src/heapsizeofuserarea.c,
+ src/heapwalk.c, src/pheapgetblocksize.c, inline/rtems/score/heap.inl,
+ include/rtems/score/heap.h: Overall cleanup. Changed all types for
+ addresses, sizes, offsets and alignments to uintptr_t. Reformatted.
+ Added variables for clarity. Renamed various objects. Enabled
+ _HAssert() for all instances. More changes follow.
+
2009-08-25 Joel Sherrill <joel.sherrill@OARcorp.com>
* libfs/src/devfs/devfs_eval.c: Fix bug where use of strncmp() resulted
diff --git a/cpukit/libmisc/stackchk/check.c b/cpukit/libmisc/stackchk/check.c
index c43788db36..316824516d 100644
--- a/cpukit/libmisc/stackchk/check.c
+++ b/cpukit/libmisc/stackchk/check.c
@@ -92,7 +92,7 @@ static inline bool Stack_check_Frame_pointer_in_range(
#else
#define Stack_check_Get_pattern_area( _the_stack ) \
- ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_OVERHEAD))
+ ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_LAST_BLOCK_OVERHEAD))
#define Stack_check_Calculate_used( _low, _size, _high_water) \
( ((char *)(_low) + (_size)) - (char *)(_high_water) )
diff --git a/cpukit/rtems/src/regiongetsegmentsize.c b/cpukit/rtems/src/regiongetsegmentsize.c
index fd5f3c50e6..203ce4c09a 100644
--- a/cpukit/rtems/src/regiongetsegmentsize.c
+++ b/cpukit/rtems/src/regiongetsegmentsize.c
@@ -64,7 +64,7 @@ rtems_status_code rtems_region_get_segment_size(
switch ( location ) {
case OBJECTS_LOCAL:
- if ( !_Heap_Size_of_user_area( &the_region->Memory, segment, size ) )
+ if ( !_Heap_Size_of_alloc_area( &the_region->Memory, segment, size ) )
return_status = RTEMS_INVALID_ADDRESS;
else
return_status = RTEMS_SUCCESSFUL;
diff --git a/cpukit/rtems/src/regionreturnsegment.c b/cpukit/rtems/src/regionreturnsegment.c
index 0da5f1ca79..d6f0669fb8 100644
--- a/cpukit/rtems/src/regionreturnsegment.c
+++ b/cpukit/rtems/src/regionreturnsegment.c
@@ -72,7 +72,7 @@ rtems_status_code rtems_region_return_segment(
_Region_Debug_Walk( the_region, 3 );
#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
- if ( !_Heap_Size_of_user_area( &the_region->Memory, segment, &size ) )
+ if ( !_Heap_Size_of_alloc_area( &the_region->Memory, segment, &size ) )
return_status = RTEMS_INVALID_ADDRESS;
else {
memset( segment, (RTEMS_REGION_FREE_SHRED_PATTERN & 0xFF), size );
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index 52b55fac70..fedddd20f3 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -1,23 +1,10 @@
/**
- * @file rtems/score/heap.h
- *
- * This include file contains the information pertaining to the Heap
- * Handler. A heap is a doubly linked list of variable size
- * blocks which are allocated using the first fit method. Garbage
- * collection is performed each time a block is returned to the heap by
- * coalescing neighbor blocks. Control information for both allocated
- * and unallocated blocks is contained in the heap space. A heap control
- * structure contains control information for the heap.
- *
- * FIXME: the alignment routines could be made faster should we require only
- * powers of two to be supported both for 'page_size' and for
- * 'alignment' arguments. However, both workspace and malloc heaps are
- * initialized with CPU_HEAP_ALIGNMENT as 'page_size', and while all
- * the BSPs seem to use CPU_ALIGNMENT (that is power of two) as
- * CPU_HEAP_ALIGNMENT, for whatever reason CPU_HEAP_ALIGNMENT is only
- * required to be multiple of CPU_ALIGNMENT and explicitly not
- * required to be a power of two.
+ * @file
*
+ * Heap Handler API.
+ */
+
+/*
* COPYRIGHT (c) 1989-2006.
* On-Line Applications Research Corporation (OAR).
*
@@ -31,104 +18,162 @@
#ifndef _RTEMS_SCORE_HEAP_H
#define _RTEMS_SCORE_HEAP_H
-/**
- * @defgroup ScoreHeap Heap Handler
- *
- * This handler encapsulates functionality which provides the foundation
- * Heap services used in all of the APIs supported by RTEMS.
- */
-/**@{*/
-
#ifdef __cplusplus
extern "C" {
#endif
/**
- * This type defines unsigned integer type to store 'void*'. Analog of C99
- * 'uintptr_t'. This should work on 16/32/64 bit architectures.
- *
- * FIXME: Something like this should better be defined by
- * 'rtems/score/types.h' and used here.
- */
-
-typedef uintptr_t _H_uptr_t;
-
-/**
- * Forward reference
- *
- * @ref Heap_Block
- */
-typedef struct Heap_Block_struct Heap_Block;
-
-/**
- * The following defines the data structure used to manage individual blocks
- * in a heap. When the block is allocated, the 'next' and 'prev' fields, as
- * well as 'prev_size' field of the next block, are not used by the heap
- * manager and thus the address returned for the block starts at the address
- * of the 'next' field and the size of the user accessible area includes the
- * size of the 'prev_size' field.
- *
- * @note The 'next' and 'prev' pointers are only valid when the block is free.
- * Caution must be taken to ensure that every block is large enough to
- * hold them and that they are not accessed while the block is actually
- * allocated (i.e., not free).
- *
- * @note The 'prev_size' field is only valid when HEAP_PREV_USED bit is clear
- * in the 'size' field indicating that previous block is not allocated.
- * If the bit is set, the 'prev_size' field is part of user-accessible
- * space of the previous allocated block and thus shouldn't be accessed
- * by the heap manager code. This trick allows to further decrease
- * overhead in the used blocks to the size of 'size' field (4 bytes).
- *
- */
-
-struct Heap_Block_struct {
- /** size of prev block (if prev block is free) */
- uint32_t prev_size;
- /** size of block in bytes and status of prev block */
- uint32_t size;
- /** pointer to the next free block */
- Heap_Block *next;
- /** pointer to the previous free block */
- Heap_Block *prev;
-};
-
-/**
- * This flag used in the 'size' field of each heap block to indicate
- * if previous block is free or in use. As sizes are always multiples of
- * 4, the 2 least significant bits would always be 0, and we use one of them
- * to store the flag.
- */
-
-#define HEAP_PREV_USED 1u /* indicates previous block is in use */
-
-/**
- * The following constants reflect various requirements of the
- * heap data structures which impact the management of a heap.
- */
-
-#define HEAP_MIN_BLOCK_SIZE (sizeof(Heap_Block))
-
-/**
- * Offset of the block header from the block pointer. Equal to the
- * offsetof(Heap_Block.size).
- */
-#define HEAP_BLOCK_HEADER_OFFSET (sizeof(uint32_t))
-
-/**
- * Offset of user data pointer from the block pointer. Equal to the
- * offset of(Heap_Block.next).
- */
-#define HEAP_BLOCK_USER_OFFSET (sizeof(uint32_t) * 2)
-
-/**
- * This is the number of bytes of overhead in a used block.
- * Equal to the sizeof(Heap_Block.previous and next).
- */
-#define HEAP_BLOCK_USED_OVERHEAD (sizeof(uint32_t) * 2)
-
-/** Size of the permanent dummy last block. */
-#define HEAP_OVERHEAD HEAP_BLOCK_USER_OFFSET
+ * @defgroup ScoreHeap Heap Handler
+ *
+ * The Heap Handler provides a heap.
+ *
+ * A heap is a doubly linked list of variable size blocks which are allocated
+ * using the first fit method. Garbage collection is performed each time a
+ * block is returned to the heap by coalescing neighbor blocks. Control
+ * information for both allocated and free blocks is contained in the heap
+ * area. A heap control structure contains control information for the heap.
+ *
+ * FIXME: The alignment routines could be made faster should we require only
+ * powers of two to be supported both for 'page_size' and for 'alignment'
+ * arguments. However, both workspace and malloc heaps are initialized with
+ * CPU_HEAP_ALIGNMENT as 'page_size', and while all the BSPs seem to use
+ * CPU_ALIGNMENT (that is power of two) as CPU_HEAP_ALIGNMENT, for whatever
+ * reason CPU_HEAP_ALIGNMENT is only required to be multiple of CPU_ALIGNMENT
+ * and explicitly not required to be a power of two.
+ *
+ * There are two kinds of blocks. One sort describes a free block from which
+ * we can allocate memory. The other blocks are used and contain allocated
+ * memory. The free blocks are accessible via a list of free blocks.
+ *
+ * Free blocks look like:
+ * <table>
+ * <tr>
+ * <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
+ * previous block is free, <br> otherwise it may contain data used by
+ * the previous block</td>
+ * </tr>
+ * <tr>
+ * <td>block size and a flag which indicates if the previous block is free
+ * or used, <br> this field contains always valid data regardless of the
+ * block usage</td>
+ * </tr>
+ * <tr><td>pointer to next block (this field is page size aligned)</td></tr>
+ * <tr><td>pointer to previous block</td></tr>
+ * <tr><td colspan=2>free space</td></tr>
+ * </table>
+ *
+ * Used blocks look like:
+ * <table>
+ * <tr>
+ * <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
+ * previous block is free,<br>otherwise it may contain data used by
+ * the previous block</td>
+ * </tr>
+ * <tr>
+ * <td>block size and a flag which indicates if the previous block is free
+ * or used, <br> this field contains always valid data regardless of the
+ * block usage</td>
+ * </tr>
+ * <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
+ * <tr><td>allocated space</td></tr>
+ * <tr><td colspan=2>allocated space</td></tr>
+ * </table>
+ *
+ * The heap area after initialization contains two blocks and looks like:
+ * <table>
+ * <tr><th>Label</th><th colspan=2>Content</th></tr>
+ * <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr>
+ * <tr><td>first_block->prev_size</td><td colspan=2>arbitrary value</td></tr>
+ * <tr>
+ * <td>first_block->size</td>
+ * <td colspan=2>size available for allocation
+ * | @c HEAP_PREV_BLOCK_USED</td>
+ * </tr>
+ * <tr>
+ * <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
+ * <td rowspan=3>memory area available for allocation</td>
+ * </tr>
+ * <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
+ * <tr><td>...</td></tr>
+ * <tr>
+ * <td>second_block->prev_size</td><td colspan=2>size of first block</td>
+ * </tr>
+ * <tr>
+ * <td>second_block->size</td>
+ * <td colspan=2>arbitrary size | @c HEAP_PREV_BLOCK_FREE</td>
+ * </tr>
+ * <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr>
+ * </table>
+ *
+ * @{
+ */
+
+/**
+ * @brief Description for free or used blocks.
+ */
+typedef struct Heap_Block {
+ /**
+ * @brief Size of the previous block or part of the allocated area of the
+ * previous block.
+ *
+ * This field is only valid if the previous block is free. This case is
+ * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
+ * @a size_and_flag field of the current block.
+ */
+ uintptr_t prev_size;
+
+ /**
+ * @brief Contains the size of the current block and a flag which indicates
+ * if the previous block is free or used.
+ *
+ * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
+ * used, otherwise the previous block is free. A used previous block may
+ * claim the @a prev_size field for allocation. This trick allows to
+ * decrease the overhead in the used blocks by the size of the
+ * @a prev_size field. As sizes are always multiples of four, the two least
+ * significant bits are always zero. We use one of them to store the flag.
+ *
+ * This field is always valid.
+ */
+ uintptr_t size_and_flag;
+
+ /**
+ * @brief Pointer to the next free block or part of the allocated area.
+ *
+ * This field is page size aligned and begins of the allocated area in case
+ * the block is used.
+ *
+ * This field is only valid if the block is free and thus part of the free
+ * block list.
+ */
+ struct Heap_Block *next;
+
+ /**
+ * @brief Pointer to the previous free block or part of the allocated area.
+ *
+ * This field is only valid if the block is free and thus part of the free
+ * block list.
+ */
+ struct Heap_Block *prev;
+} Heap_Block;
+
+#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
+
+#define HEAP_PREV_BLOCK_FREE ((uintptr_t) 0)
+
+/**
+ * @brief Offset from the block begin up to the block size field.
+ */
+#define HEAP_BLOCK_SIZE_OFFSET (sizeof(uintptr_t))
+
+/**
+ * @brief Offset from the block begin up to the allocated area begin.
+ */
+#define HEAP_BLOCK_ALLOC_AREA_OFFSET (sizeof(uintptr_t) * 2)
+
+#define HEAP_BLOCK_USED_OVERHEAD (sizeof(uintptr_t) * 2)
+
+#define HEAP_LAST_BLOCK_OVERHEAD HEAP_BLOCK_ALLOC_AREA_OFFSET
/**
* Run-time heap statistics.
@@ -145,11 +190,11 @@ typedef struct {
/** instance number of this heap */
uint32_t instance;
/** the size of the memory for heap */
- intptr_t size;
+ uintptr_t size;
/** current free size */
- intptr_t free_size;
+ uintptr_t free_size;
/** minimum free size ever */
- intptr_t min_free_size;
+ uintptr_t min_free_size;
/** current number of free blocks */
uint32_t free_blocks;
/** maximum number of free blocks ever */
@@ -173,15 +218,15 @@ typedef struct {
*/
typedef struct {
/** head and tail of circular list of free blocks */
- Heap_Block free_list;
+ Heap_Block free_list;
/** allocation unit and alignment */
- uint32_t page_size;
+ uintptr_t page_size;
/** minimum block size aligned on page_size */
- uint32_t min_block_size;
+ uintptr_t min_block_size;
/** first address of memory for the heap */
- void *begin;
+ uintptr_t begin;
/** first address past end of memory for the heap */
- void *end;
+ uintptr_t end;
/** first valid block address in the heap */
Heap_Block *start;
/** last valid block address in the heap */
@@ -193,7 +238,6 @@ typedef struct {
/**
* Status codes for _Heap_Extend
*/
-
typedef enum {
HEAP_EXTEND_SUCCESSFUL,
HEAP_EXTEND_ERROR,
@@ -203,7 +247,6 @@ typedef enum {
/**
* Status codes for _Heap_Resize_block
*/
-
typedef enum {
HEAP_RESIZE_SUCCESSFUL,
HEAP_RESIZE_UNSATISFIED,
@@ -213,7 +256,6 @@ typedef enum {
/**
* Status codes for _Heap_Get_information
*/
-
typedef enum {
HEAP_GET_INFORMATION_SUCCESSFUL = 0,
HEAP_GET_INFORMATION_BLOCK_ERROR
@@ -244,33 +286,27 @@ typedef struct {
} Heap_Information_block;
/**
- * This routine initializes @a the_heap record to manage the
- * contiguous heap of @a size bytes which starts at @a starting_address.
- * Blocks of memory are allocated from the heap in multiples of
- * @a page_size byte units. If @a page_size is 0 or is not multiple of
- * CPU_ALIGNMENT, it's aligned up to the nearest CPU_ALIGNMENT boundary.
+ * Initializes the @a heap control block to manage the area starting at
+ * @a area_begin of @a area_size bytes.
*
- * @param[in] the_heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the memory for
- * the heap
- * @param[in] size is the size in bytes of the memory area for the heap
- * @param[in] page_size is the size in bytes of the allocation unit
+ * Blocks of memory are allocated from the heap in multiples of @a page_size
+ * byte units. If the @a page_size is equal to zero or is not multiple of
+ * @c CPU_ALIGNMENT, it is aligned up to the nearest @c CPU_ALIGNMENT boundary.
*
- * @return This method returns the maximum memory available. If
- * unsuccessful, 0 will be returned.
+ * Returns the maximum memory available, or zero in case of failure.
*/
-uint32_t _Heap_Initialize(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size,
- uint32_t page_size
+uintptr_t _Heap_Initialize(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t page_size
);
/**
- * This routine grows @a the_heap memory area using the size bytes which
+ * This routine grows @a heap memory area using the size bytes which
* begin at @a starting_address.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] starting_address is the starting address of the memory
* to add to the heap
* @param[in] size is the size in bytes of the memory area to add
@@ -279,71 +315,68 @@ uint32_t _Heap_Initialize(
* @return *size filled in with the amount of memory added to the heap
*/
Heap_Extend_status _Heap_Extend(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size,
- intptr_t *amount_extended
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t *amount_extended
);
/**
* This function attempts to allocate a block of @a size bytes from
- * @a the_heap. If insufficient memory is free in @a the_heap to allocate
+ * @a heap. If insufficient memory is free in @a heap to allocate
* a block of the requested size, then NULL is returned.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] size is the amount of memory to allocate in bytes
* @return NULL if unsuccessful and a pointer to the block if successful
*/
-void *_Heap_Allocate(
- Heap_Control *the_heap,
- intptr_t size
-);
+void *_Heap_Allocate( Heap_Control *heap, uintptr_t size );
/**
* This function attempts to allocate a memory block of @a size bytes from
- * @a the_heap so that the start of the user memory is aligned on the
+ * @a heap so that the start of the user memory is aligned on the
* @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
* Any other value of @a alignment is taken "as is", i.e., even odd
* alignments are possible.
* Returns pointer to the start of the memory block if success, NULL if
* failure.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] size is the amount of memory to allocate in bytes
* @param[in] alignment the required alignment
* @return NULL if unsuccessful and a pointer to the block if successful
*/
void *_Heap_Allocate_aligned(
- Heap_Control *the_heap,
- intptr_t size,
- uint32_t alignment
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment
);
/**
- * This function sets @a *size to the size of the block of user memory
+ * This function sets @a size to the size of the block of allocatable area
* which begins at @a starting_address. The size returned in @a *size could
* be greater than the size requested for allocation.
* Returns true if the @a starting_address is in the heap, and false
* otherwise.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] starting_address is the starting address of the user block
* to obtain the size of
* @param[in] size points to a user area to return the size in
* @return true if successfully able to determine the size, false otherwise
* @return *size filled in with the size of the user area for this block
*/
-bool _Heap_Size_of_user_area(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t *size
+bool _Heap_Size_of_alloc_area(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t *size
);
/**
* This function tries to resize in place the block that is pointed to by the
* @a starting_address to the new @a size.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] starting_address is the starting address of the user block
* to be resized
* @param[in] size is the new size
@@ -362,39 +395,36 @@ bool _Heap_Size_of_user_area(
* resizing, 0 if none.
*/
Heap_Resize_status _Heap_Resize_block(
- Heap_Control *the_heap,
+ Heap_Control *heap,
void *starting_address,
- intptr_t size,
- intptr_t *old_mem_size,
- intptr_t *avail_mem_size
+ uintptr_t size,
+ uintptr_t *old_mem_size,
+ uintptr_t *avail_mem_size
);
/**
* This routine returns the block of memory which begins
- * at @a starting_address to @a the_heap. Any coalescing which is
+ * at @a alloc_area_begin to @a heap. Any coalescing which is
* possible with the freeing of this routine is performed.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] start_address is the starting address of the user block
* to free
* @return true if successfully freed, false otherwise
*/
-bool _Heap_Free(
- Heap_Control *the_heap,
- void *start_address
-);
+bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin );
/**
* This routine walks the heap to verify its integrity.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] source is a user specified integer which may be used to
* indicate where in the application this was invoked from
* @param[in] do_dump is set to true if errors should be printed
* @return true if the test passed fine, false otherwise.
*/
bool _Heap_Walk(
- Heap_Control *the_heap,
+ Heap_Control *heap,
int source,
bool do_dump
);
@@ -403,13 +433,13 @@ bool _Heap_Walk(
* This routine walks the heap and tots up the free and allocated
* sizes.
*
- * @param[in] the_heap pointer to heap header
+ * @param[in] heap pointer to heap header
* @param[in] the_info pointer to a status information area
* @return *the_info is filled with status information
* @return 0=success, otherwise heap is corrupt.
*/
Heap_Get_information_status _Heap_Get_information(
- Heap_Control *the_heap,
+ Heap_Control *heap,
Heap_Information_block *the_info
);
@@ -417,107 +447,72 @@ Heap_Get_information_status _Heap_Get_information(
* This heap routine returns information about the free blocks
* in the specified heap.
*
- * @param[in] the_heap pointer to heap header.
+ * @param[in] heap pointer to heap header.
* @param[in] info pointer to the free block information.
*
* @return free block information filled in.
*/
void _Heap_Get_free_information(
- Heap_Control *the_heap,
+ Heap_Control *heap,
Heap_Information *info
);
#if !defined(__RTEMS_APPLICATION__)
-/**
- * A pointer to unsigned integer conversion.
- */
-#define _H_p2u(_p) ((_H_uptr_t)(_p))
-
#include <rtems/score/heap.inl>
/**
- * Convert user requested 'size' of memory block to the block size.
- *
- * @note This is an internal routine used by _Heap_Allocate() and
- * _Heap_Allocate_aligned(). Refer to 'heap.c' for details.
+ * @brief Returns the minimal block size for a block which may contain an area
+ * of size @a alloc_size for allocation, or zero in case of an overflow.
*
- * @param[in] size is the size of the block requested
- * @param[in] page_size is the page size of this heap instance
- * @param[in] min_size is minimum size block that should be allocated
- * from this heap instance
- *
- * @return This method returns block size on success, 0 if overflow occured.
+ * Uses the heap values @a page_size and @a min_block_size.
*/
-size_t _Heap_Calc_block_size(
- size_t size,
- uint32_t page_size,
- uint32_t min_size
+uintptr_t _Heap_Calc_block_size(
+ uintptr_t alloc_size,
+ uintptr_t page_size,
+ uintptr_t min_block_size
);
/**
* This method allocates a block of size @a alloc_size from @a the_block
- * belonging to @a the_heap. Split @a the_block if possible, otherwise
+ * belonging to @a heap. Split @a the_block if possible, otherwise
* allocate it entirely. When split, make the lower part used, and leave
* the upper part free.
*
* This is an internal routines used by _Heap_Allocate() and
* _Heap_Allocate_aligned(). Refer to 'heap.c' for details.
*
- * @param[in] the_heap is the heap to operate upon
+ * @param[in] heap is the heap to operate upon
* @param[in] the_block is the block to allocates the requested size from
* @param[in] alloc_size is the requested number of bytes to take out of
* the block
*
* @return This methods returns the size of the allocated block.
*/
-uint32_t _Heap_Block_allocate(
- Heap_Control* the_heap,
- Heap_Block* the_block,
- uint32_t alloc_size
+uintptr_t _Heap_Block_allocate(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_size
);
-/**
- * Align @a *value up to the nearest multiple of @a alignment.
- *
- * @param[in] value is a pointer to be aligned.
- * @param[in] alignment is the alignment value.
- *
- * @return Upon return, @a value will contain the aligned result.
- */
-void _Heap_Align_up_uptr (
- _H_uptr_t *value,
- uint32_t alignment
-);
-
-/*
- * Debug support
- */
+/** @} */
-#if defined(RTEMS_DEBUG)
-#define RTEMS_HEAP_DEBUG
+#ifdef RTEMS_DEBUG
+ #define RTEMS_HEAP_DEBUG
+ #define RTEMS_MALLOC_BOUNDARY_HELPERS
#endif
-/**
- * We do asserts only for heaps with instance number greater than 0 assuming
- * that the heap used for malloc is initialized first and thus has instance
- * number 0. Asserting malloc heap may lead to troubles as printf may invoke
- * malloc thus probably leading to infinite recursion.
- */
-#if defined(RTEMS_HEAP_DEBUG)
-#include <assert.h>
-
-#define _HAssert(cond_) \
- do { \
- if (the_heap->stats.instance && !(cond_)) \
- __assert(__FILE__, __LINE__, #cond_); \
- } while(0)
-
-#else /* !defined(RTEMS_HEAP_DEBUG) */
-
-#define _HAssert(cond_) ((void)0)
-
-#endif /* !defined(RTEMS_HEAP_DEBUG) */
+#ifdef RTEMS_HEAP_DEBUG
+ #include <assert.h>
+ #define _HAssert( cond ) \
+ do { \
+ if ( !(cond) ) { \
+ __assert( __FILE__, __LINE__, #cond ); \
+ } \
+ } while (0)
+#else
+ #define _HAssert( cond ) ((void) 0)
+#endif
#endif /* !defined(__RTEMS_APPLICATION__) */
@@ -525,7 +520,5 @@ void _Heap_Align_up_uptr (
}
#endif
-/**@}*/
-
#endif
/* end of include file */
diff --git a/cpukit/score/inline/rtems/score/heap.inl b/cpukit/score/inline/rtems/score/heap.inl
index bdbe40029a..7ac5649f28 100644
--- a/cpukit/score/inline/rtems/score/heap.inl
+++ b/cpukit/score/inline/rtems/score/heap.inl
@@ -1,8 +1,8 @@
/**
- * @file rtems/score/heap.inl
+ * @file
*
- * This file contains the static inline implementation of the inlined
- * routines from the heap handler.
+ * @brief Static inline implementations of the inlined routines from the heap
+ * handler.
*/
/*
@@ -23,348 +23,162 @@
#ifndef _RTEMS_SCORE_HEAP_INL
#define _RTEMS_SCORE_HEAP_INL
-/**
- * @addtogroup ScoreHeap
- * @{
- */
-
#include <rtems/score/address.h>
/**
- * This function returns the head of the specified heap.
+ * @addtogroup ScoreHeap
*
- * @param[in] the_heap points to the heap being operated upon
- *
- * @return This method returns a pointer to the dummy head of the free
- * block list.
+ * @{
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Head (
- Heap_Control *the_heap
-)
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
{
- return &the_heap->free_list;
+ return &heap->free_list;
}
-/**
- * This function returns the tail of the specified heap.
- *
- * @param[in] the_heap points to the heap being operated upon
- *
- * @return This method returns a pointer to the dummy tail of the heap
- * free list.
- */
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Tail (
- Heap_Control *the_heap
-)
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
{
- return &the_heap->free_list;
+ return &heap->free_list;
}
-/**
- * Return the first free block of the specified heap.
- *
- * @param[in] the_heap points to the heap being operated upon
- *
- * @return This method returns a pointer to the first free block.
- */
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First (
- Heap_Control *the_heap
-)
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First_free_block( Heap_Control *heap )
{
- return _Heap_Head(the_heap)->next;
+ return _Heap_Free_list_head(heap)->next;
}
-/**
- * Return the last free block of the specified heap.
- *
- * @param[in] the_heap points to the heap being operated upon
- *
- * @return This method returns a pointer to the last block on the free list.
- */
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last (
- Heap_Control *the_heap
-)
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last_free_block( Heap_Control *heap )
{
- return _Heap_Tail(the_heap)->prev;
+ return _Heap_Free_list_tail(heap)->prev;
}
-/**
- * This function removes 'the_block' from doubly-linked list.
- *
- * @param[in] the_block is the block to remove from the heap used block
- * list.
- */
-RTEMS_INLINE_ROUTINE void _Heap_Block_remove (
- Heap_Block *the_block
-)
+RTEMS_INLINE_ROUTINE void _Heap_Block_remove_from_free_list( Heap_Block *block )
{
- Heap_Block *block = the_block;
-
Heap_Block *next = block->next;
Heap_Block *prev = block->prev;
+
prev->next = next;
next->prev = prev;
}
-/**
- * This function replaces @a old_block by @a new_block in doubly-linked list.
- * When a block is allocated, the memory is allocated from the low memory
- * address range of the block. This means that the upper address range of
- * the memory block must be added to the free block list in place of the
- * lower address portion being allocated. This method is also used as part
- * of resizing a block.
- *
- * @param[in] old_block is the block which is currently on the list.
- * @param[in] new_block is the new block which will replace it on the list.
- */
-
-RTEMS_INLINE_ROUTINE void _Heap_Block_replace (
+RTEMS_INLINE_ROUTINE void _Heap_Block_replace_in_free_list(
Heap_Block *old_block,
Heap_Block *new_block
)
{
- Heap_Block *block = old_block;
- Heap_Block *next = block->next;
- Heap_Block *prev = block->prev;
+ Heap_Block *next = old_block->next;
+ Heap_Block *prev = old_block->prev;
- block = new_block;
- block->next = next;
- block->prev = prev;
- next->prev = prev->next = block;
+ new_block->next = next;
+ new_block->prev = prev;
+
+ next->prev = new_block;
+ prev->next = new_block;
}
-/**
- * This function inserts @a the_block after @a prev_block
- * in the doubly-linked free block list.
- *
- * @param[in] prev_block is the previous block in the free list.
- * @param[in] the_block is the block being freed.
- */
-RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after (
+RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after(
Heap_Block *prev_block,
- Heap_Block *the_block
+ Heap_Block *new_block
)
{
- Heap_Block *prev = prev_block;
- Heap_Block *block = the_block;
+ Heap_Block *next = prev_block->next;
- Heap_Block *next = prev->next;
- block->next = next;
- block->prev = prev;
- next->prev = prev->next = block;
+ new_block->next = next;
+ new_block->prev = prev_block;
+ prev_block->next = new_block;
+ next->prev = new_block;
}
-/**
- * Return true if @a value is a multiple of @a alignment, false otherwise
- *
- * @param[in] value is the address to verify alignment of.
- * @param[in] alignment is the alignment factor to verify.
- *
- * @return This method returns true if the address is aligned and false
- * otherwise.
- */
-RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned (
- uint32_t value,
- uint32_t alignment
+RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
+ uintptr_t value,
+ uintptr_t alignment
)
{
return (value % alignment) == 0;
}
-/**
- * Align @a *value up to the nearest multiple of @a alignment.
- *
- * @param[in] value is a pointer to be aligned.
- * @param[in] alignment is the alignment value.
- *
- * @return Upon return, @a value will contain the aligned result.
- */
-RTEMS_INLINE_ROUTINE void _Heap_Align_up (
- uint32_t *value,
- uint32_t alignment
-)
-{
- uint32_t v = *value;
- uint32_t a = alignment;
- uint32_t r = v % a;
- *value = r ? v - r + a : v;
-}
-
-/**
- * Align @a *value down to the nearest multiple of @a alignment.
- *
- * @param[in] value is a pointer to be aligned.
- * @param[in] alignment is the alignment value.
- *
- * @return Upon return, @a value will contain the aligned result.
- */
-RTEMS_INLINE_ROUTINE void _Heap_Align_down (
- uint32_t *value,
- uint32_t alignment
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
+ uintptr_t value,
+ uintptr_t alignment
)
{
- uint32_t v = *value;
- *value = v - (v % alignment);
-}
+ uintptr_t remainder = value % alignment;
-/**
- * Return true if @a ptr is aligned at @a alignment boundary,
- * false otherwise.
- *
- * @param[in] ptr is the pointer to verify alignment of.
- * @param[in] alignment is the alignment factor.
- *
- * @return This method returns true if @a ptr is aligned at @a alignment
- * boundary, and false otherwise.
- */
-RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned_ptr (
- void *ptr,
- uint32_t alignment
-)
-{
- return (_H_p2u(ptr) % alignment) == 0;
+ if ( remainder != 0 ) {
+ return value - remainder + alignment;
+ } else {
+ return value;
+ }
}
-/**
- * Align @a *value down to the nearest multiple of @a alignment.
- *
- * @param[in] value is a pointer to be aligned.
- * @param[in] alignment is the alignment value.
- *
- * @return Upon return, @a value will contain the aligned result.
- */
-RTEMS_INLINE_ROUTINE void _Heap_Align_down_uptr (
- _H_uptr_t *value,
- uint32_t alignment
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
+ uintptr_t value,
+ uintptr_t alignment
)
{
- _H_uptr_t v = *value;
- *value = v - (v % alignment);
+ return value - (value % alignment);
}
/**
- * This function calculates and returns a block's location (address)
- * in the heap based upon a base address @a base and an @a offset.
- *
- * @param[in] base is the base address of the memory area.
- * @param[in] offset is the byte offset into @a base.
- *
- * @return This method returns a pointer to the block's address.
+ * @brief Returns the block which is @a offset away from @a block.
*/
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
- void *base,
- int32_t offset
+ const Heap_Block *block,
+ uintptr_t offset
)
{
- return (Heap_Block *) _Addresses_Add_offset( base, offset );
+ return (Heap_Block *) ((uintptr_t) block + offset);
}
/**
- * This function returns the starting address of the portion of @a the_block
- * which the user may access.
- *
- * @param[in] the_block is the heap block to find the user area of.
- *
- * @return This method returns a pointer to the start of the user area in
- * the block.
+ * @brief Returns the begin of the allocatable area of @a block.
*/
-RTEMS_INLINE_ROUTINE void *_Heap_User_area (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
+ Heap_Block *block
)
{
- return (void *) _Addresses_Add_offset ( the_block, HEAP_BLOCK_USER_OFFSET );
+ return (uintptr_t) block + HEAP_BLOCK_ALLOC_AREA_OFFSET;
}
/**
- * Fill @a *the_block with the address of the beginning of the block given
- * pointer to the user accessible area @a base.
- *
- * @param[in] the_heap points to the heap being operated upon
- * @param[in] base points to the user area in the block.
- * @param[in] the_block will be the address of the heap block.
- *
- * @return This method returns a pointer to the heap block based upon the
- * given heap and user pointer.
+ * @brief Returns the block associated with the allocatable area starting at
+ * @a alloc_area_begin inside a heap with a page size of @a page_size.
*/
-RTEMS_INLINE_ROUTINE void _Heap_Start_of_block (
- Heap_Control *the_heap,
- void *base,
- Heap_Block **the_block
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
+ uintptr_t alloc_area_begin,
+ uintptr_t page_size
)
{
- _H_uptr_t addr = _H_p2u(base);
- /* The address passed could be greater than the block address plus
- * HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
- * pointers. To get rid of this offset we need to align the address down
- * to the nearest 'page_size' boundary. */
- _Heap_Align_down_uptr ( &addr, the_heap->page_size );
- *the_block = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET);
+ return (Heap_Block *) (_Heap_Align_down( alloc_area_begin, page_size )
+ - HEAP_BLOCK_ALLOC_AREA_OFFSET);
}
-/**
- * This function returns true if the previous block of @a the_block
- * is in use, and false otherwise.
- *
- * @param[in] the_block is the block to operate upon.
- *
- * @return This method returns true if the previous block is used and false
- * if the previous block is free.
- */
-RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used (
- Heap_Block *the_block
-)
+RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( Heap_Block *block )
{
- return (the_block->size & HEAP_PREV_USED);
+ return block->size_and_flag & HEAP_PREV_BLOCK_USED;
}
-/**
- * This function returns the size of @a the_block in bytes.
- *
- * @param[in] the_block is the block to operate upon.
- *
- * @return This method returns the size of the specified heap block in bytes.
- */
-RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
- Heap_Block *the_block
-)
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( Heap_Block *block )
{
- return (the_block->size & ~HEAP_PREV_USED);
+ return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
}
-/**
- * This function returns true if @a the_block is within the memory area
- * managed by @a the_heap, and false otherwise.
- *
- * @param[in] the_heap points to the heap being operated upon
- * @param[in] the_block is the block address to check.
- *
- * @return This method returns true if @a the_block appears to have been
- * allocated from @a the_heap, and false otherwise.
- */
-RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in (
- Heap_Control *the_heap,
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
+ Heap_Control *heap,
+ Heap_Block *block
)
{
- return _Addresses_Is_in_range( the_block, the_heap->start, the_heap->final );
+ return _Addresses_Is_in_range( block, heap->start, heap->final );
}
/**
- * This function returns the maximum size of the heap.
- *
- * @param[in] the_heap points to the heap being operated upon
- *
- * @return This method returns the total amount of memory
- * allocated to the heap.
+ * @brief Returns the maximum size of the heap.
*/
-RTEMS_INLINE_ROUTINE int32_t _Heap_Get_size (
- Heap_Control *the_heap
-)
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( Heap_Control *heap )
{
- return _Addresses_Subtract( the_heap->end, the_heap->begin );
+ return (uintptr_t) heap->end - (uintptr_t) heap->begin;
}
-/**@}*/
+/** @} */
#endif
/* end of include file */
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index df194a6dca..208197d5a3 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -27,8 +27,8 @@ static uint32_t instance = 0;
* This kernel routine initializes a heap.
*
* Input parameters:
- * the_heap - pointer to heap header
- * starting_address - starting address of heap
+ * heap - pointer to heap header
+ * area_begin - starting address of heap
* size - size of heap
* page_size - allocatable unit of memory
*
@@ -39,7 +39,7 @@ static uint32_t instance = 0;
* This is what a heap looks like in memory immediately after initialization:
*
*
- * +--------------------------------+ <- begin = starting_address
+ * +--------------------------------+ <- begin = area_begin
* | unused space due to alignment |
* | size < page_size |
* 0 +--------------------------------+ <- first block
@@ -72,7 +72,7 @@ static uint32_t instance = 0;
* be split so that upper part of it will become used block (see
* 'heapallocatealigned.c' for details).]
*
- * +--------------------------------+ <- begin = starting_address
+ * +--------------------------------+ <- begin = area_begin
* | unused space due to alignment |
* | size < page_size |
* 0 +--------------------------------+ <- used block
@@ -111,76 +111,68 @@ static uint32_t instance = 0;
*
*/
-uint32_t _Heap_Initialize(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size,
- uint32_t page_size
+uintptr_t _Heap_Initialize(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t page_size
)
{
- Heap_Block *the_block;
- uint32_t the_size;
- _H_uptr_t start;
- _H_uptr_t aligned_start;
- uint32_t overhead;
- Heap_Statistics *const stats = &the_heap->stats;
+ Heap_Statistics * const stats = &heap->stats;
+ uintptr_t heap_area_begin = (uintptr_t) area_begin;
+ uintptr_t heap_area_end = heap_area_begin + area_size;
+ uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_ALLOC_AREA_OFFSET;
+ uintptr_t alloc_area_size = 0;
+ uintptr_t overhead = 0;
+ Heap_Block *first_block = NULL;
+ Heap_Block *second_block = NULL;
- if (page_size == 0)
+ if ( page_size == 0 ) {
page_size = CPU_ALIGNMENT;
- else
- _Heap_Align_up( &page_size, CPU_ALIGNMENT );
-
- /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET
- (value of user pointer) is aligned on 'page_size' boundary. Make sure
- resulting 'aligned_start' is not below 'starting_address'. */
- start = _H_p2u(starting_address);
- aligned_start = start + HEAP_BLOCK_USER_OFFSET;
- _Heap_Align_up_uptr ( &aligned_start, page_size );
- aligned_start -= HEAP_BLOCK_USER_OFFSET;
-
- /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the
- nearest multiple of 'page_size'. */
- the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE;
- _Heap_Align_up ( &the_heap->min_block_size, page_size );
+ } else {
+ page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
+ }
- /* Calculate 'the_size' -- size of the first block so that there is enough
- space at the end for the permanent last block. It is equal to 'size'
- minus total overhead aligned down to the nearest multiple of
- 'page_size'. */
- overhead = HEAP_OVERHEAD + (aligned_start - start);
- if ( size < overhead )
- return 0; /* Too small area for the heap */
- the_size = size - overhead;
- _Heap_Align_down ( &the_size, page_size );
- if ( the_size == 0 )
- return 0; /* Too small area for the heap */
+ heap->min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
- the_heap->page_size = page_size;
- the_heap->begin = starting_address;
- the_heap->end = starting_address + size;
+ alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
+ overhead = HEAP_LAST_BLOCK_OVERHEAD
+ + (alloc_area_begin - HEAP_BLOCK_ALLOC_AREA_OFFSET - heap_area_begin);
+ alloc_area_size = _Heap_Align_down ( area_size - overhead, page_size );
- the_block = (Heap_Block *) aligned_start;
+ if (
+ heap_area_end < heap_area_begin
+ || area_size < overhead
+ || alloc_area_size == 0
+ ) {
+ /* Invalid area or area too small */
+ return 0;
+ }
- the_block->prev_size = page_size;
- the_block->size = the_size | HEAP_PREV_USED;
- the_block->next = _Heap_Tail( the_heap );
- the_block->prev = _Heap_Head( the_heap );
- _Heap_Head(the_heap)->next = the_block;
- _Heap_Tail(the_heap)->prev = the_block;
- the_heap->start = the_block;
+ heap->page_size = page_size;
+ heap->begin = heap_area_begin;
+ heap->end = heap_area_end;
- _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT));
- _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size));
- _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size));
+ /* First block */
+ first_block = _Heap_Block_of_alloc_area( alloc_area_begin, page_size );
+ first_block->prev_size = page_size;
+ first_block->size_and_flag = alloc_area_size | HEAP_PREV_BLOCK_USED;
+ first_block->next = _Heap_Free_list_tail( heap );
+ first_block->prev = _Heap_Free_list_head( heap );
+ _Heap_Free_list_head( heap )->next = first_block;
+ _Heap_Free_list_tail( heap )->prev = first_block;
+ heap->start = first_block;
- the_block = _Heap_Block_at( the_block, the_size );
- the_heap->final = the_block; /* Permanent final block of the heap */
- the_block->prev_size = the_size; /* Previous block is free */
- the_block->size = page_size;
+ /* Second and last block */
+ second_block = _Heap_Block_at( first_block, alloc_area_size );
+ second_block->prev_size = alloc_area_size;
+ second_block->size_and_flag = page_size | HEAP_PREV_BLOCK_FREE;
+ heap->final = second_block;
- stats->size = size;
- stats->free_size = the_size;
- stats->min_free_size = the_size;
+ /* Statistics */
+ stats->size = area_size;
+ stats->free_size = alloc_area_size;
+ stats->min_free_size = alloc_area_size;
stats->free_blocks = 1;
stats->max_free_blocks = 1;
stats->used_blocks = 0;
@@ -191,78 +183,82 @@ uint32_t _Heap_Initialize(
stats->resizes = 0;
stats->instance = instance++;
- return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
-}
+ _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ));
+ _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ));
+ _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ));
+ _HAssert(
+ _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
+ );
+ _HAssert(
+ _Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
+ );
-/*PAGE
- *
- * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
- *
- * Note: there is no reason to put them into a separate file(s) as they are
- * always required for heap to be useful.
- */
+ return alloc_area_size;
+}
-/*
- * Convert user requested 'size' of memory block to the block size.
- * Return block size on success, 0 if overflow occured
- */
-size_t _Heap_Calc_block_size(
- size_t size,
- uint32_t page_size,
- uint32_t min_size)
+uintptr_t _Heap_Calc_block_size(
+ uintptr_t alloc_size,
+ uintptr_t page_size,
+ uintptr_t min_block_size)
{
- uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
- _Heap_Align_up(&block_size, page_size);
- if (block_size < min_size) block_size = min_size;
- /* 'block_size' becomes <= 'size' if and only if overflow occured. */
- return (block_size > size) ? block_size : 0;
+ uintptr_t block_size =
+ _Heap_Align_up( alloc_size + HEAP_BLOCK_USED_OVERHEAD, page_size );
+
+ if (block_size < min_block_size) {
+ block_size = min_block_size;
+ }
+
+ if (block_size > alloc_size) {
+ return block_size;
+ } else {
+ /* Integer overflow occured */
+ return 0;
+ }
}
-/*
- * Allocate block of size 'alloc_size' from 'the_block' belonging to
- * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
- * When split, make the lower part used, and leave the upper part free.
- * Return the size of allocated block.
- */
-uint32_t _Heap_Block_allocate(
- Heap_Control* the_heap,
- Heap_Block* the_block,
- uint32_t alloc_size
+uintptr_t _Heap_Block_allocate(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_size
)
{
- Heap_Statistics *const stats = &the_heap->stats;
- uint32_t const block_size = _Heap_Block_size(the_block);
- uint32_t const the_rest = block_size - alloc_size;
+ Heap_Statistics * const stats = &heap->stats;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ uintptr_t const unused_size = block_size - alloc_size;
+ Heap_Block *next_block = _Heap_Block_at( block, block_size );
- _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
- _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
- _HAssert(alloc_size <= block_size);
- _HAssert(_Heap_Is_prev_used(the_block));
+ _HAssert( _Heap_Is_aligned( block_size, heap->page_size ));
+ _HAssert( _Heap_Is_aligned( alloc_size, heap->page_size ));
+ _HAssert( alloc_size <= block_size );
+ _HAssert( _Heap_Is_prev_used( block ));
- if(the_rest >= the_heap->min_block_size) {
- /* Split the block so that upper part is still free, and lower part
- becomes used. This is slightly less optimal than leaving lower part
- free as it requires replacing block in the free blocks list, but it
- makes it possible to reuse this code in the _Heap_Resize_block(). */
- Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size);
- _Heap_Block_replace(the_block, next_block);
- the_block->size = alloc_size | HEAP_PREV_USED;
- next_block->size = the_rest | HEAP_PREV_USED;
- _Heap_Block_at(next_block, the_rest)->prev_size = the_rest;
- }
- else {
- /* Don't split the block as remainder is either zero or too small to be
- used as a separate free block. Change 'alloc_size' to the size of the
- block and remove the block from the list of free blocks. */
- _Heap_Block_remove(the_block);
+ if (unused_size >= heap->min_block_size) {
+ /*
+ * Split the block so that the upper part is still free, and the lower part
+ * becomes used. This is slightly less optimal than leaving the lower part
+ * free as it requires replacing block in the free blocks list, but it
+ * makes it possible to reuse this code in the _Heap_Resize_block().
+ */
+ Heap_Block *new_block = _Heap_Block_at( block, alloc_size );
+ block->size_and_flag = alloc_size | HEAP_PREV_BLOCK_USED;
+ new_block->size_and_flag = unused_size | HEAP_PREV_BLOCK_USED;
+ next_block->prev_size = unused_size;
+ _Heap_Block_replace_in_free_list( block, new_block );
+ } else {
+ next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
alloc_size = block_size;
- _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
- stats->free_blocks -= 1;
+ _Heap_Block_remove_from_free_list( block );
+
+ /* Statistics */
+ --stats->free_blocks;
}
- /* Update statistics */
+
+ /* Statistics */
+ ++stats->used_blocks;
stats->free_size -= alloc_size;
- if(stats->min_free_size > stats->free_size)
+ if(stats->min_free_size > stats->free_size) {
stats->min_free_size = stats->free_size;
- stats->used_blocks += 1;
+ }
+
return alloc_size;
}
diff --git a/cpukit/score/src/heapalignupuptr.c b/cpukit/score/src/heapalignupuptr.c
index dbfe0f83cb..966d5d8649 100644
--- a/cpukit/score/src/heapalignupuptr.c
+++ b/cpukit/score/src/heapalignupuptr.c
@@ -1,3 +1,4 @@
+#if 0
/*
* Heap Handler
*
@@ -23,18 +24,17 @@
* path explosion where it is used. This makes full test coverage more
* difficult.
*/
-void _Heap_Align_up_uptr (
- _H_uptr_t *value,
- uint32_t alignment
+uintptr_t _Heap_Align_up(
+ uintptr_t value,
+ uintptr_t alignment
)
{
- _H_uptr_t remainder;
- _H_uptr_t v = *value;
+ uintptr_t remainder = value % alignment;
- remainder = v % alignment;
-
- if ( remainder )
- *value = v - remainder + alignment;
+ if ( remainder != 0 ) {
+ return value - remainder + alignment;
+ } else {
+ return value;
+ }
}
-
-
+#endif
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index 86164d209c..7c9e78f31e 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -19,63 +19,48 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*PAGE
- *
- * _Heap_Allocate
- *
- * This kernel routine allocates the requested size of memory
- * from the specified heap.
- *
- * Input parameters:
- * the_heap - pointer to heap header.
- * size - size in bytes of the memory block to allocate.
- *
- * Output parameters:
- * returns - starting address of memory block allocated
- */
-
-void *_Heap_Allocate(
- Heap_Control *the_heap,
- intptr_t size
-)
+void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
{
- uint32_t the_size;
- uint32_t search_count;
- Heap_Block *the_block;
- void *ptr = NULL;
- Heap_Statistics *const stats = &the_heap->stats;
- Heap_Block *const tail = _Heap_Tail(the_heap);
+ Heap_Statistics *const stats = &heap->stats;
+ Heap_Block * const tail = _Heap_Free_list_tail( heap );
+ Heap_Block *block = _Heap_First_free_block( heap );
+ uint32_t search_count = 0;
+ void *alloc_area_begin_ptr = NULL;
- the_size =
- _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size);
- if(the_size == 0)
+ size = _Heap_Calc_block_size( size, heap->page_size, heap->min_block_size );
+ if( size == 0 ) {
return NULL;
+ }
- /* Find large enough free block. */
- for(the_block = _Heap_First(the_heap), search_count = 0;
- the_block != tail;
- the_block = the_block->next, ++search_count)
- {
- /* As we always coalesce free blocks, prev block must have been used. */
- _HAssert(_Heap_Is_prev_used(the_block));
+ /*
+ * Find large enough free block.
+ *
+ * Do not bother to mask out the HEAP_PREV_BLOCK_USED bit as it will not
+ * change the result of the size comparison.
+ */
+ while (block != tail && block->size_and_flag < size) {
+ _HAssert( _Heap_Is_prev_used( block ));
- /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
- result of the comparison. */
- if(the_block->size >= the_size) {
- (void)_Heap_Block_allocate(the_heap, the_block, the_size );
+ block = block->next;
+ ++search_count;
+ }
- ptr = _Heap_User_area(the_block);
+ if (block != tail) {
+ _Heap_Block_allocate( heap, block, size );
- stats->allocs += 1;
- stats->searches += search_count + 1;
+ alloc_area_begin_ptr = (void *) _Heap_Alloc_area_of_block( block );
- _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size));
- break;
- }
+ _HAssert( _Heap_Is_aligned( (uintptr_t) alloc_area_begin_ptr, heap->page_size ));
+
+ /* Statistics */
+ ++stats->allocs;
+ stats->searches += search_count;
}
- if(stats->max_search < search_count)
+ /* Statistics */
+ if (stats->max_search < search_count) {
stats->max_search = search_count;
+ }
- return ptr;
+ return alloc_area_begin_ptr;
}
diff --git a/cpukit/score/src/heapallocatealigned.c b/cpukit/score/src/heapallocatealigned.c
index b0d63f9880..935c3509aa 100644
--- a/cpukit/score/src/heapallocatealigned.c
+++ b/cpukit/score/src/heapallocatealigned.c
@@ -25,19 +25,19 @@ static void
check_result(
Heap_Control *the_heap,
Heap_Block *the_block,
- _H_uptr_t user_addr,
- _H_uptr_t aligned_user_addr,
- intptr_t size
+ uintptr_t user_addr,
+ uintptr_t aligned_user_addr,
+ uintptr_t size
)
{
- _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
- _H_uptr_t const block_end = _H_p2u(the_block)
- + _Heap_Block_size(the_block) + HEAP_BLOCK_HEADER_OFFSET;
- _H_uptr_t const user_end = aligned_user_addr + size;
- _H_uptr_t const heap_start = _H_p2u(the_heap->start) + HEAP_OVERHEAD;
- _H_uptr_t const heap_end = _H_p2u(the_heap->final)
- + HEAP_BLOCK_HEADER_OFFSET;
- uint32_t const page_size = the_heap->page_size;
+ uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
+ uintptr_t const block_end = the_block
+ + _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET;
+ uintptr_t const user_end = aligned_user_addr + size;
+ uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_LAST_BLOCK_OVERHEAD;
+ uintptr_t const heap_end = (uintptr_t) the_heap->final
+ + HEAP_BLOCK_SIZE_OFFSET;
+ uintptr_t const page_size = the_heap->page_size;
_HAssert(user_addr == user_area);
_HAssert(aligned_user_addr - user_area < page_size);
@@ -74,12 +74,12 @@ static
Heap_Block *block_allocate(
Heap_Control *the_heap,
Heap_Block *the_block,
- intptr_t alloc_size
+ uintptr_t alloc_size
)
{
Heap_Statistics *const stats = &the_heap->stats;
- uint32_t const block_size = _Heap_Block_size(the_block);
- uint32_t const the_rest = block_size - alloc_size;
+ uintptr_t const block_size = _Heap_Block_size(the_block);
+ uintptr_t const the_rest = block_size - alloc_size;
_HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
_HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
@@ -89,20 +89,20 @@ Heap_Block *block_allocate(
if (the_rest >= the_heap->min_block_size) {
/* Split the block so that lower part is still free, and upper part
becomes used. */
- the_block->size = the_rest | HEAP_PREV_USED;
+ the_block->size_and_flag = the_rest | HEAP_PREV_BLOCK_USED;
the_block = _Heap_Block_at(the_block, the_rest);
the_block->prev_size = the_rest;
- the_block->size = alloc_size;
+ the_block->size_and_flag = alloc_size;
} else {
/* Don't split the block as remainder is either zero or too small to be
used as a separate free block. Change 'alloc_size' to the size of the
block and remove the block from the list of free blocks. */
- _Heap_Block_remove(the_block);
+ _Heap_Block_remove_from_free_list(the_block);
alloc_size = block_size;
stats->free_blocks -= 1;
}
/* Mark the block as used (in the next block). */
- _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
+ _Heap_Block_at(the_block, alloc_size)->size_and_flag |= HEAP_PREV_BLOCK_USED;
/* Update statistics */
stats->free_size -= alloc_size;
if (stats->min_free_size > stats->free_size)
@@ -132,21 +132,21 @@ Heap_Block *block_allocate(
void *_Heap_Allocate_aligned(
Heap_Control *the_heap,
- intptr_t size,
- uint32_t alignment
+ uintptr_t size,
+ uintptr_t alignment
)
{
- uint32_t search_count;
+ uintptr_t search_count;
Heap_Block *the_block;
void *user_ptr = NULL;
- uint32_t const page_size = the_heap->page_size;
+ uintptr_t const page_size = the_heap->page_size;
Heap_Statistics *const stats = &the_heap->stats;
- Heap_Block *const tail = _Heap_Tail(the_heap);
+ Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
- uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;
+ uintptr_t const end_to_user_offs = size - HEAP_BLOCK_SIZE_OFFSET;
- uint32_t const the_size =
+ uintptr_t const the_size =
_Heap_Calc_block_size(size, page_size, the_heap->min_block_size);
if (the_size == 0)
@@ -157,35 +157,34 @@ void *_Heap_Allocate_aligned(
/* Find large enough free block that satisfies the alignment requirements. */
- for (the_block = _Heap_First(the_heap), search_count = 0;
+ for (the_block = _Heap_First_free_block(the_heap), search_count = 0;
the_block != tail;
the_block = the_block->next, ++search_count)
{
- uint32_t const block_size = _Heap_Block_size(the_block);
+ uintptr_t const block_size = _Heap_Block_size(the_block);
/* As we always coalesce free blocks, prev block must have been used. */
_HAssert(_Heap_Is_prev_used(the_block));
if (block_size >= the_size) { /* the_block is large enough. */
- _H_uptr_t user_addr;
- _H_uptr_t aligned_user_addr;
- _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
+ uintptr_t user_addr;
+ uintptr_t aligned_user_addr;
+ uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
/* Calculate 'aligned_user_addr' that will become the user pointer we
return. It should be at least 'end_to_user_offs' bytes less than the
the 'block_end' and should be aligned on 'alignment' boundary.
Calculations are from the 'block_end' as we are going to split free
block so that the upper part of the block becomes used block. */
- _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
- aligned_user_addr = block_end - end_to_user_offs;
- _Heap_Align_down_uptr(&aligned_user_addr, alignment);
+ uintptr_t const block_end = (uintptr_t) the_block + block_size;
+ aligned_user_addr =
+ _Heap_Align_down(block_end - end_to_user_offs, alignment);
/* 'user_addr' is the 'aligned_user_addr' further aligned down to the
'page_size' boundary. We need it as blocks' user areas should begin
only at 'page_size' aligned addresses */
- user_addr = aligned_user_addr;
- _Heap_Align_down_uptr(&user_addr, page_size);
+ user_addr = _Heap_Align_down(aligned_user_addr, page_size);
/* Make sure 'user_addr' calculated didn't run out of 'the_block'. */
if (user_addr >= user_area) {
@@ -210,8 +209,7 @@ void *_Heap_Allocate_aligned(
/* The user pointer will be too far from 'user_addr'. See if we
can make 'aligned_user_addr' to be close enough to the
'user_addr'. */
- aligned_user_addr = user_addr;
- _Heap_Align_up_uptr(&aligned_user_addr, alignment);
+ aligned_user_addr = _Heap_Align_up(user_addr, alignment);
if (aligned_user_addr - user_addr >= page_size) {
/* No, we can't use the block */
continue;
@@ -221,10 +219,10 @@ void *_Heap_Allocate_aligned(
/* The block is indeed acceptable: calculate the size of the block
to be allocated and perform allocation. */
- uint32_t const alloc_size =
- block_end - user_addr + HEAP_BLOCK_USER_OFFSET;
+ uintptr_t const alloc_size =
+ block_end - user_addr + HEAP_BLOCK_ALLOC_AREA_OFFSET;
- _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
+ _HAssert(_Heap_Is_aligned(aligned_user_addr, alignment));
the_block = block_allocate(the_heap, the_block, alloc_size);
diff --git a/cpukit/score/src/heapextend.c b/cpukit/score/src/heapextend.c
index 340fc84db2..bb3f301235 100644
--- a/cpukit/score/src/heapextend.c
+++ b/cpukit/score/src/heapextend.c
@@ -19,38 +19,21 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*PAGE
- *
- * _Heap_Extend
- *
- * This routine grows the_heap memory area using the size bytes which
- * begin at starting_address.
- *
- * Input parameters:
- * the_heap - pointer to heap header.
- * starting_address - pointer to the memory area.
- * size - size in bytes of the memory block to allocate.
- *
- * Output parameters:
- * *amount_extended - amount of memory added to the_heap
- */
-
Heap_Extend_status _Heap_Extend(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size,
- intptr_t *amount_extended
+ Heap_Control *heap,
+ void *area_begin_ptr,
+ uintptr_t area_size,
+ uintptr_t *amount_extended
)
{
- uint32_t the_size;
- Heap_Statistics *const stats = &the_heap->stats;
-
- /*
- * The overhead was taken from the original heap memory.
- */
-
- Heap_Block *old_final;
- Heap_Block *new_final;
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t const area_begin = (uintptr_t) area_begin_ptr;
+ uintptr_t const heap_area_begin = heap->begin;
+ uintptr_t const heap_area_end = heap->end;
+ uintptr_t const new_heap_area_end = heap_area_end + area_size;
+ uintptr_t extend_size = 0;
+ Heap_Block *const old_final = heap->final;
+ Heap_Block *new_final = NULL;
/*
* There are five possibilities for the location of starting
@@ -65,13 +48,11 @@ Heap_Extend_status _Heap_Extend(
* As noted, this code only supports (4).
*/
- if ( starting_address >= the_heap->begin && /* case 3 */
- starting_address < the_heap->end
- )
- return HEAP_EXTEND_ERROR;
-
- if ( starting_address != the_heap->end )
- return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1, 2, and 5 */
+ if ( area_begin >= heap_area_begin && area_begin < heap_area_end ) {
+ return HEAP_EXTEND_ERROR; /* case 3 */
+ } else if ( area_begin != heap_area_end ) {
+ return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1, 2, and 5 */
+ }
/*
* Currently only case 4 should make it to this point.
@@ -79,26 +60,28 @@ Heap_Extend_status _Heap_Extend(
* block and free it.
*/
- old_final = the_heap->final;
- the_heap->end = _Addresses_Add_offset( the_heap->end, size );
- the_size = _Addresses_Subtract( the_heap->end, old_final ) - HEAP_OVERHEAD;
- _Heap_Align_down( &the_size, the_heap->page_size );
+ heap->end = new_heap_area_end;
+
+ extend_size = new_heap_area_end
+ - (uintptr_t) old_final - HEAP_LAST_BLOCK_OVERHEAD;
+ extend_size = _Heap_Align_down( extend_size, heap->page_size );
- *amount_extended = size;
+ *amount_extended = extend_size;
- if( the_size < the_heap->min_block_size )
- return HEAP_EXTEND_SUCCESSFUL;
+ if( extend_size >= heap->min_block_size ) {
+ old_final->size_and_flag = extend_size
+ | (old_final->size_and_flag & HEAP_PREV_BLOCK_USED);
+ new_final = _Heap_Block_at( old_final, extend_size );
+ new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
- old_final->size = the_size | (old_final->size & HEAP_PREV_USED);
- new_final = _Heap_Block_at( old_final, the_size );
- new_final->size = HEAP_PREV_USED;
- the_heap->final = new_final;
+ heap->final = new_final;
- stats->size += size;
- stats->used_blocks += 1;
- stats->frees -= 1; /* Don't count subsequent call as actual free() */
+ stats->size += area_size;
+ ++stats->used_blocks;
+ --stats->frees; /* Do not count subsequent call as actual free() */
- _Heap_Free( the_heap, _Heap_User_area( old_final ) );
+ _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( old_final ));
+ }
return HEAP_EXTEND_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 09bdd9d1eb..9d5be9e290 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -19,69 +19,51 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*PAGE
- *
- * _Heap_Free
- *
- * This kernel routine returns the memory designated by the
- * given heap and given starting address to the memory pool.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * starting_address - starting address of the memory block to free.
- *
- * Output parameters:
- * true - if starting_address is valid heap address
- * false - if starting_address is invalid heap address
- */
-
-bool _Heap_Free(
- Heap_Control *the_heap,
- void *starting_address
-)
+bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
{
- Heap_Block *the_block;
- Heap_Block *next_block;
- uint32_t the_size;
- uint32_t next_size;
- Heap_Statistics *const stats = &the_heap->stats;
- bool next_is_free;
-
- if ( !_Addresses_Is_in_range(
- starting_address, (void *)the_heap->start, (void *)the_heap->final ) ) {
- _HAssert(starting_address != NULL);
- return( false );
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
+ Heap_Block *block =
+ _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
+ Heap_Block *next_block = NULL;
+ uintptr_t block_size = 0;
+ uintptr_t next_block_size = 0;
+ bool next_is_free = false;
+
+ if (
+ !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final)
+ ) {
+ _HAssert( alloc_area_begin_ptr != NULL );
+ return false;
}
- _Heap_Start_of_block( the_heap, starting_address, &the_block );
-
- if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
+ if ( !_Heap_Is_block_in_heap( heap, block ) ) {
_HAssert( false );
- return( false );
+ return false;
}
- the_size = _Heap_Block_size( the_block );
- next_block = _Heap_Block_at( the_block, the_size );
+ block_size = _Heap_Block_size( block );
+ next_block = _Heap_Block_at( block, block_size );
- if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
+ if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
_HAssert( false );
- return( false );
+ return false;
}
if ( !_Heap_Is_prev_used( next_block ) ) {
_HAssert( false );
- return( false );
+ return false;
}
- next_size = _Heap_Block_size( next_block );
- next_is_free = next_block < the_heap->final &&
- !_Heap_Is_prev_used(_Heap_Block_at(next_block, next_size));
+ next_block_size = _Heap_Block_size( next_block );
+ next_is_free = next_block != heap->final
+ && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
- if ( !_Heap_Is_prev_used( the_block ) ) {
- uint32_t const prev_size = the_block->prev_size;
- Heap_Block *const prev_block = _Heap_Block_at( the_block, -prev_size );
+ if ( !_Heap_Is_prev_used( block ) ) {
+ uintptr_t const prev_size = block->prev_size;
+ Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size );
- if ( !_Heap_Is_block_in( the_heap, prev_block ) ) {
+ if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) {
_HAssert( false );
return( false );
}
@@ -94,44 +76,44 @@ bool _Heap_Free(
}
if ( next_is_free ) { /* coalesce both */
- uint32_t const size = the_size + prev_size + next_size;
- _Heap_Block_remove( next_block );
+ uintptr_t const size = block_size + prev_size + next_block_size;
+ _Heap_Block_remove_from_free_list( next_block );
stats->free_blocks -= 1;
- prev_block->size = size | HEAP_PREV_USED;
+ prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( prev_block, size );
_HAssert(!_Heap_Is_prev_used( next_block));
next_block->prev_size = size;
- }
- else { /* coalesce prev */
- uint32_t const size = the_size + prev_size;
- prev_block->size = size | HEAP_PREV_USED;
- next_block->size &= ~HEAP_PREV_USED;
+ } else { /* coalesce prev */
+ uintptr_t const size = block_size + prev_size;
+ prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
+ next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
next_block->prev_size = size;
}
- }
- else if ( next_is_free ) { /* coalesce next */
- uint32_t const size = the_size + next_size;
- _Heap_Block_replace( next_block, the_block );
- the_block->size = size | HEAP_PREV_USED;
- next_block = _Heap_Block_at( the_block, size );
+ } else if ( next_is_free ) { /* coalesce next */
+ uintptr_t const size = block_size + next_block_size;
+ _Heap_Block_replace_in_free_list( next_block, block );
+ block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
+ next_block = _Heap_Block_at( block, size );
next_block->prev_size = size;
- }
- else { /* no coalesce */
- /* Add 'the_block' to the head of the free blocks list as it tends to
+ } else { /* no coalesce */
+ /* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
- _Heap_Block_insert_after( _Heap_Head( the_heap), the_block );
- the_block->size = the_size | HEAP_PREV_USED;
- next_block->size &= ~HEAP_PREV_USED;
- next_block->prev_size = the_size;
-
- stats->free_blocks += 1;
- if ( stats->max_free_blocks < stats->free_blocks )
+ _Heap_Block_insert_after( _Heap_Free_list_head( heap), block );
+ block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
+ next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
+ next_block->prev_size = block_size;
+
+ /* Statistics */
+ ++stats->free_blocks;
+ if ( stats->max_free_blocks < stats->free_blocks ) {
stats->max_free_blocks = stats->free_blocks;
+ }
}
- stats->used_blocks -= 1;
- stats->free_size += the_size;
- stats->frees += 1;
+ /* Statistics */
+ --stats->used_blocks;
+ ++stats->frees;
+ stats->free_size += block_size;
return( true );
}
diff --git a/cpukit/score/src/heapgetfreeinfo.c b/cpukit/score/src/heapgetfreeinfo.c
index b53931414b..a288529cad 100644
--- a/cpukit/score/src/heapgetfreeinfo.c
+++ b/cpukit/score/src/heapgetfreeinfo.c
@@ -40,13 +40,13 @@ void _Heap_Get_free_information(
)
{
Heap_Block *the_block;
- Heap_Block *const tail = _Heap_Tail(the_heap);
+ Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
info->number = 0;
info->largest = 0;
info->total = 0;
- for(the_block = _Heap_First(the_heap);
+ for(the_block = _Heap_First_free_block(the_heap);
the_block != tail;
the_block = the_block->next)
{
diff --git a/cpukit/score/src/heapgetinfo.c b/cpukit/score/src/heapgetinfo.c
index eb06e0af69..7f907170b9 100644
--- a/cpukit/score/src/heapgetinfo.c
+++ b/cpukit/score/src/heapgetinfo.c
@@ -41,7 +41,7 @@ Heap_Get_information_status _Heap_Get_information(
Heap_Block *the_block = the_heap->start;
Heap_Block *const end = the_heap->final;
- _HAssert(the_block->prev_size == HEAP_PREV_USED);
+ _HAssert(the_block->prev_size == the_heap->page_size);
_HAssert(_Heap_Is_prev_used(the_block));
the_info->Free.number = 0;
@@ -74,7 +74,7 @@ Heap_Get_information_status _Heap_Get_information(
* "used" as client never allocated it. Make 'Used.total' contain this
* blocks' overhead though.
*/
- the_info->Used.total += HEAP_OVERHEAD;
+ the_info->Used.total += HEAP_LAST_BLOCK_OVERHEAD;
return HEAP_GET_INFORMATION_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
index c744d4ff88..8916bbe12c 100644
--- a/cpukit/score/src/heapresizeblock.c
+++ b/cpukit/score/src/heapresizeblock.c
@@ -19,103 +19,79 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*
- * _Heap_Resize_block
- *
- * DESCRIPTION:
- *
- * This routine tries to resize in place the block that is pointed to by the
- * 'starting_address' to the new 'size'.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * starting_address - starting address of the memory block
- * size - new size
- *
- * Output parameters:
- * 'old_mem_size' - the size of the user memory area of the block before
- * resizing.
- * 'avail_mem_size' - the size of the user memory area of the free block
- * that has been enlarged or created due to resizing,
- * 0 if none.
- * Returns
- * HEAP_RESIZE_SUCCESSFUL - if success
- * HEAP_RESIZE_UNSATISFIED - if the block can't be resized in place
- * HEAP_RESIZE_FATAL_ERROR - if failure
- */
-
Heap_Resize_status _Heap_Resize_block(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size,
- intptr_t *old_mem_size,
- intptr_t *avail_mem_size
+ Heap_Control *heap,
+ void *alloc_area_begin_ptr,
+ uintptr_t size,
+ uintptr_t *old_mem_size,
+ uintptr_t *avail_mem_size
)
{
- Heap_Block *the_block;
+ uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
+ Heap_Block *block;
Heap_Block *next_block;
- uint32_t next_block_size;
+ uintptr_t next_block_size;
bool next_is_used;
Heap_Block *next_next_block;
- uint32_t old_block_size;
- uint32_t old_user_size;
- uint32_t prev_used_flag;
- Heap_Statistics *const stats = &the_heap->stats;
- uint32_t const min_block_size = the_heap->min_block_size;
- uint32_t const page_size = the_heap->page_size;
+ uintptr_t old_block_size;
+ uintptr_t old_user_size;
+ uintptr_t prev_used_flag;
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t const min_block_size = heap->min_block_size;
+ uintptr_t const page_size = heap->page_size;
*old_mem_size = 0;
*avail_mem_size = 0;
- _Heap_Start_of_block(the_heap, starting_address, &the_block);
- _HAssert(_Heap_Is_block_in(the_heap, the_block));
- if (!_Heap_Is_block_in(the_heap, the_block))
+ block = _Heap_Block_of_alloc_area(alloc_area_begin, heap->page_size);
+ _HAssert(_Heap_Is_block_in_heap(heap, block));
+ if (!_Heap_Is_block_in_heap(heap, block))
return HEAP_RESIZE_FATAL_ERROR;
- prev_used_flag = the_block->size & HEAP_PREV_USED;
- old_block_size = _Heap_Block_size(the_block);
- next_block = _Heap_Block_at(the_block, old_block_size);
+ prev_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
+ old_block_size = _Heap_Block_size(block);
+ next_block = _Heap_Block_at(block, old_block_size);
- _HAssert(_Heap_Is_block_in(the_heap, next_block));
+ _HAssert(_Heap_Is_block_in_heap(heap, next_block));
_HAssert(_Heap_Is_prev_used(next_block));
- if ( !_Heap_Is_block_in(the_heap, next_block) ||
+ if ( !_Heap_Is_block_in_heap(heap, next_block) ||
!_Heap_Is_prev_used(next_block))
return HEAP_RESIZE_FATAL_ERROR;
next_block_size = _Heap_Block_size(next_block);
next_next_block = _Heap_Block_at(next_block, next_block_size);
- next_is_used = (next_block == the_heap->final) ||
+ next_is_used = (next_block == heap->final) ||
_Heap_Is_prev_used(next_next_block);
- /* See _Heap_Size_of_user_area() source for explanations */
- old_user_size = _Addresses_Subtract(next_block, starting_address)
- + HEAP_BLOCK_HEADER_OFFSET;
+ /* See _Heap_Size_of_alloc_area() source for explanations */
+ old_user_size = (uintptr_t) next_block - alloc_area_begin
+ + HEAP_BLOCK_SIZE_OFFSET;
*old_mem_size = old_user_size;
if (size > old_user_size) {
/* Need to extend the block: allocate part of the next block and then
- merge 'the_block' and allocated block together. */
+ merge 'block' and allocated block together. */
if (next_is_used) /* Next block is in use, -- no way to extend */
return HEAP_RESIZE_UNSATISFIED;
else {
- uint32_t add_block_size = size - old_user_size;
- _Heap_Align_up(&add_block_size, page_size);
+ uintptr_t add_block_size =
+ _Heap_Align_up(size - old_user_size, page_size);
if (add_block_size < min_block_size)
add_block_size = min_block_size;
if (add_block_size > next_block_size)
return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
add_block_size =
- _Heap_Block_allocate(the_heap, next_block, add_block_size);
+ _Heap_Block_allocate(heap, next_block, add_block_size);
/* Merge two subsequent blocks */
- the_block->size = (old_block_size + add_block_size) | prev_used_flag;
+ block->size_and_flag = (old_block_size + add_block_size) | prev_used_flag;
--stats->used_blocks;
}
} else {
/* Calculate how much memory we could free */
- uint32_t free_block_size = old_user_size - size;
- _Heap_Align_down(&free_block_size, page_size);
+ uintptr_t free_block_size =
+ _Heap_Align_down(old_user_size - size, page_size);
if (free_block_size > 0) {
@@ -123,10 +99,10 @@ Heap_Resize_status _Heap_Resize_block(
can hold 'size' user bytes and still remain not shorter than
'min_block_size'. */
- uint32_t new_block_size = old_block_size - free_block_size;
+ uintptr_t new_block_size = old_block_size - free_block_size;
if (new_block_size < min_block_size) {
- uint32_t delta = min_block_size - new_block_size;
+ uintptr_t delta = min_block_size - new_block_size;
_HAssert(free_block_size >= delta);
free_block_size -= delta;
if (free_block_size == 0) {
@@ -144,25 +120,25 @@ Heap_Resize_status _Heap_Resize_block(
if (!next_is_used) {
/* Extend the next block to the low addresses by 'free_block_size' */
Heap_Block *const new_next_block =
- _Heap_Block_at(the_block, new_block_size);
- uint32_t const new_next_block_size =
+ _Heap_Block_at(block, new_block_size);
+ uintptr_t const new_next_block_size =
next_block_size + free_block_size;
- _HAssert(_Heap_Is_block_in(the_heap, next_next_block));
- the_block->size = new_block_size | prev_used_flag;
- new_next_block->size = new_next_block_size | HEAP_PREV_USED;
+ _HAssert(_Heap_Is_block_in_heap(heap, next_next_block));
+ block->size_and_flag = new_block_size | prev_used_flag;
+ new_next_block->size_and_flag = new_next_block_size | HEAP_PREV_BLOCK_USED;
next_next_block->prev_size = new_next_block_size;
- _Heap_Block_replace(next_block, new_next_block);
- the_heap->stats.free_size += free_block_size;
+ _Heap_Block_replace_in_free_list(next_block, new_next_block);
+ heap->stats.free_size += free_block_size;
*avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
} else if (free_block_size >= min_block_size) {
/* Split the block into 2 used parts, then free the second one. */
- the_block->size = new_block_size | prev_used_flag;
- next_block = _Heap_Block_at(the_block, new_block_size);
- next_block->size = free_block_size | HEAP_PREV_USED;
+ block->size_and_flag = new_block_size | prev_used_flag;
+ next_block = _Heap_Block_at(block, new_block_size);
+ next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
++stats->used_blocks; /* We have created used block */
--stats->frees; /* Don't count next call in stats */
- _Heap_Free(the_heap, _Heap_User_area(next_block));
+ _Heap_Free(heap, (void *) _Heap_Alloc_area_of_block(next_block));
*avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
}
}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index a9a276fabc..be51255eee 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -19,69 +19,54 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*PAGE
- *
- * _Heap_Size_of_user_area
- *
- * This kernel routine sets '*size' to the size of the block of memory
- * which begins at 'starting_address'.
- * It returns true if the 'starting_address' is in the heap, and false
- * otherwise.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * starting_address - starting address of the memory block
- * size - pointer to size of area
- *
- * Output parameters:
- * size - size of area filled in
- * true - if starting_address is valid heap address
- * false - if starting_address is invalid heap address
- */
-
-bool _Heap_Size_of_user_area(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t *size
+bool _Heap_Size_of_alloc_area(
+ Heap_Control *heap,
+ void *alloc_area_begin_ptr,
+ uintptr_t *size
)
{
- Heap_Block *the_block;
- Heap_Block *next_block;
- uint32_t the_size;
+ uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
+ Heap_Block *block =
+ _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
+ Heap_Block *next_block = NULL;
+ uintptr_t block_size = 0;
- if ( !_Addresses_Is_in_range(
- starting_address, (void *)the_heap->start, (void *)the_heap->final ) )
- return( false );
+ if (
+ !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final )
+ ) {
+ return false;
+ }
- _Heap_Start_of_block( the_heap, starting_address, &the_block );
- _HAssert(_Heap_Is_block_in( the_heap, the_block ));
- if ( !_Heap_Is_block_in( the_heap, the_block ) )
- return( false );
+ _HAssert(_Heap_Is_block_in_heap( heap, block ));
+ if ( !_Heap_Is_block_in_heap( heap, block ) ) {
+ return false;
+ }
- the_size = _Heap_Block_size( the_block );
- next_block = _Heap_Block_at( the_block, the_size );
+ block_size = _Heap_Block_size( block );
+ next_block = _Heap_Block_at( block, block_size );
- _HAssert(_Heap_Is_block_in( the_heap, next_block ));
- _HAssert(_Heap_Is_prev_used( next_block ));
+ _HAssert( _Heap_Is_block_in_heap( heap, next_block ));
+ _HAssert( _Heap_Is_prev_used( next_block ));
if (
- !_Heap_Is_block_in( the_heap, next_block ) ||
+ !_Heap_Is_block_in_heap( heap, next_block ) ||
!_Heap_Is_prev_used( next_block )
- )
- return( false );
-
- /* 'starting_address' could be greater than 'the_block' address plus
- HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
- pointers. To get rid of this offset we calculate user size as difference
- between the end of 'the_block' (='next_block') and 'starting_address'
- and then add correction equal to the offset of the 'size' field of the
- 'Heap_Block' structure. The correction is due to the fact that
- 'prev_size' field of the next block is actually used as user accessible
- area of 'the_block'. */
+ ) {
+ return false;
+ }
- *size = _Addresses_Subtract ( next_block, starting_address )
- + HEAP_BLOCK_HEADER_OFFSET;
+ /*
+ * 'alloc_area_begin' could be greater than 'block' address plus
+ * HEAP_BLOCK_ALLOC_AREA_OFFSET as _Heap_Allocate_aligned() may produce such
+ * user pointers. To get rid of this offset we calculate user size as
+ * difference between the end of 'block' (='next_block') and
+ * 'alloc_area_begin' and then add correction equal to the offset of the
+ * 'size' field of the 'Heap_Block' structure. The correction is due to the
+ * fact that 'prev_size' field of the next block is actually used as user
+ * accessible area of 'block'.
+ */
+ *size = (uintptr_t) next_block - alloc_area_begin + HEAP_BLOCK_SIZE_OFFSET;
- return( true );
+ return true;
}
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index 69782bf4e8..a6628df0b3 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -60,11 +60,14 @@ bool _Heap_Walk(
{
Heap_Block *the_block = the_heap->start;
Heap_Block *const end = the_heap->final;
- Heap_Block *const tail = _Heap_Tail(the_heap);
+ Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
int error = 0;
int passes = 0;
+ /* FIXME: Why is this disabled? */
do_dump = false;
+
+ /* FIXME: Why is this disabled? */
/*
* We don't want to allow walking the heap until we have
* transferred control to the user task so we watch the
@@ -76,13 +79,14 @@ bool _Heap_Walk(
return true;
*/
+ /* FIXME: Reason for this? */
if (source < 0)
- source = the_heap->stats.instance;
+ source = (int) the_heap->stats.instance;
- if (do_dump == true)
+ if (do_dump)
printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
source, the_block, end,
- _Heap_First(the_heap), _Heap_Last(the_heap),
+ _Heap_First_free_block(the_heap), _Heap_Last_free_block(the_heap),
the_heap->begin, the_heap->end);
/*
@@ -90,7 +94,7 @@ bool _Heap_Walk(
*/
if (!_Heap_Is_prev_used(the_block)) {
- printk("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source);
+ printk("PASS: %d !HEAP_PREV_BLOCK_USED flag of 1st block isn't set\n", source);
error = 1;
}
@@ -136,7 +140,7 @@ bool _Heap_Walk(
}
{ /* Check if 'the_block' is in the free block list */
- Heap_Block* block = _Heap_First(the_heap);
+ Heap_Block* block = _Heap_First_free_block(the_heap);
if (!_Addresses_Is_aligned(block) ) {
printk(
"PASS: %d first free block %p is not aligned\n", source, block);
@@ -150,7 +154,7 @@ bool _Heap_Walk(
error = 1;
break;
}
- if (!_Heap_Is_block_in(the_heap, block)) {
+ if (!_Heap_Is_block_in_heap(the_heap, block)) {
printk("PASS: %d a free block %p is not in heap\n", source, block);
error = 1;
break;
diff --git a/cpukit/score/src/pheapgetblocksize.c b/cpukit/score/src/pheapgetblocksize.c
index 0e6cd80ea5..21727d77ac 100644
--- a/cpukit/score/src/pheapgetblocksize.c
+++ b/cpukit/score/src/pheapgetblocksize.c
@@ -25,7 +25,7 @@ bool _Protected_heap_Get_block_size(
bool status;
_RTEMS_Lock_allocator();
- status = _Heap_Size_of_user_area( the_heap, starting_address, size );
+ status = _Heap_Size_of_alloc_area( the_heap, starting_address, size );
_RTEMS_Unlock_allocator();
return status;
}