summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2009-09-06 15:24:08 +0000
commitdea3eccb38b556b04552219e00b7abd656587278 (patch)
tree6affcb3026172273e366ee15ed3e8ec70f023a20 /cpukit
parentRegenerate. (diff)
downloadrtems-dea3eccb38b556b04552219e00b7abd656587278.tar.bz2
2009-09-06 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* libcsupport/src/free.c, libmisc/stackchk/check.c, rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c, rtems/src/regionextend.c, rtems/src/regiongetinfo.c, rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c, rtems/src/regionresizesegment.c, score/src/pheapallocate.c, score/src/pheapallocatealigned.c, score/src/pheapextend.c, score/src/pheapfree.c, score/src/pheapgetblocksize.c, score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c, score/src/pheapgetsize.c, score/src/pheapinit.c, score/src/pheapresizeblock.c, score/src/pheapwalk.c: Update for heap API changes. * score/include/rtems/score/apimutex.h, score/include/rtems/score/object.h: Documentation. * score/include/rtems/score/heap.h, score/include/rtems/score/protectedheap.h, score/inline/rtems/score/heap.inl, score/src/heap.c, score/src/heapallocate.c, score/src/heapallocatealigned.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c, score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c, score/src/heapwalk.c: Overall cleanup. Added boundary constraint to allocation function. More changes follow.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/ChangeLog25
-rw-r--r--cpukit/libcsupport/src/free.c4
-rw-r--r--cpukit/libmisc/stackchk/check.c2
-rw-r--r--cpukit/rtems/include/rtems/rtems/region.h24
-rw-r--r--cpukit/rtems/src/regioncreate.c4
-rw-r--r--cpukit/rtems/src/regionextend.c4
-rw-r--r--cpukit/rtems/src/regiongetinfo.c7
-rw-r--r--cpukit/rtems/src/regiongetsegment.c2
-rw-r--r--cpukit/rtems/src/regiongetsegmentsize.c2
-rw-r--r--cpukit/rtems/src/regionresizesegment.c8
-rw-r--r--cpukit/score/include/rtems/score/apimutex.h124
-rw-r--r--cpukit/score/include/rtems/score/heap.h458
-rw-r--r--cpukit/score/include/rtems/score/object.h6
-rw-r--r--cpukit/score/include/rtems/score/protectedheap.h204
-rw-r--r--cpukit/score/inline/rtems/score/heap.inl73
-rw-r--r--cpukit/score/src/heap.c240
-rw-r--r--cpukit/score/src/heapallocate.c214
-rw-r--r--cpukit/score/src/heapallocatealigned.c12
-rw-r--r--cpukit/score/src/heapextend.c22
-rw-r--r--cpukit/score/src/heapfree.c32
-rw-r--r--cpukit/score/src/heapgetfreeinfo.c27
-rw-r--r--cpukit/score/src/heapgetinfo.c36
-rw-r--r--cpukit/score/src/heapresizeblock.c204
-rw-r--r--cpukit/score/src/heapsizeofuserarea.c46
-rw-r--r--cpukit/score/src/heapwalk.c581
-rw-r--r--cpukit/score/src/pheapallocate.c10
-rw-r--r--cpukit/score/src/pheapallocatealigned.c12
-rw-r--r--cpukit/score/src/pheapextend.c12
-rw-r--r--cpukit/score/src/pheapfree.c8
-rw-r--r--cpukit/score/src/pheapgetblocksize.c12
-rw-r--r--cpukit/score/src/pheapgetfreeinfo.c8
-rw-r--r--cpukit/score/src/pheapgetinfo.c17
-rw-r--r--cpukit/score/src/pheapgetsize.c10
-rw-r--r--cpukit/score/src/pheapinit.c8
-rw-r--r--cpukit/score/src/pheapresizeblock.c14
-rw-r--r--cpukit/score/src/pheapwalk.c8
36 files changed, 1504 insertions, 976 deletions
diff --git a/cpukit/ChangeLog b/cpukit/ChangeLog
index 3d5c86ed0e..01e47b9377 100644
--- a/cpukit/ChangeLog
+++ b/cpukit/ChangeLog
@@ -1,3 +1,28 @@
+2009-09-06 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
+
+ * libcsupport/src/free.c, libmisc/stackchk/check.c,
+ rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c,
+ rtems/src/regionextend.c, rtems/src/regiongetinfo.c,
+ rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c,
+ rtems/src/regionresizesegment.c, score/src/pheapallocate.c,
+ score/src/pheapallocatealigned.c, score/src/pheapextend.c,
+ score/src/pheapfree.c, score/src/pheapgetblocksize.c,
+ score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c,
+ score/src/pheapgetsize.c, score/src/pheapinit.c,
+ score/src/pheapresizeblock.c, score/src/pheapwalk.c:
+ Update for heap API changes.
+ * score/include/rtems/score/apimutex.h,
+ score/include/rtems/score/object.h: Documentation.
+ * score/include/rtems/score/heap.h,
+ score/include/rtems/score/protectedheap.h,
+ score/inline/rtems/score/heap.inl, score/src/heap.c,
+ score/src/heapallocate.c, score/src/heapallocatealigned.c,
+ score/src/heapextend.c, score/src/heapfree.c,
+ score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c,
+ score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c,
+ score/src/heapwalk.c: Overall cleanup. Added boundary constraint to
+ allocation function. More changes follow.
+
2009-09-04 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* rtems/src/taskmode.c, sapi/src/exshutdown.c,
diff --git a/cpukit/libcsupport/src/free.c b/cpukit/libcsupport/src/free.c
index 690d9d8903..aa7638e759 100644
--- a/cpukit/libcsupport/src/free.c
+++ b/cpukit/libcsupport/src/free.c
@@ -59,8 +59,8 @@ void free(
if ( !_Protected_heap_Free( RTEMS_Malloc_Heap, ptr ) ) {
printk( "Program heap: free of bad pointer %p -- range %p - %p \n",
ptr,
- RTEMS_Malloc_Heap->begin,
- RTEMS_Malloc_Heap->end
+ RTEMS_Malloc_Heap->area_begin,
+ RTEMS_Malloc_Heap->area_end
);
}
diff --git a/cpukit/libmisc/stackchk/check.c b/cpukit/libmisc/stackchk/check.c
index 4cc1320610..4b6d0a6a68 100644
--- a/cpukit/libmisc/stackchk/check.c
+++ b/cpukit/libmisc/stackchk/check.c
@@ -92,7 +92,7 @@ static inline bool Stack_check_Frame_pointer_in_range(
#else
#define Stack_check_Get_pattern_area( _the_stack ) \
- ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_LAST_BLOCK_OVERHEAD))
+ ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_BLOCK_HEADER_SIZE))
#define Stack_check_Calculate_used( _low, _size, _high_water) \
( ((char *)(_low) + (_size)) - (char *)(_high_water) )
diff --git a/cpukit/rtems/include/rtems/rtems/region.h b/cpukit/rtems/include/rtems/rtems/region.h
index 938be4e77d..905a580ec1 100644
--- a/cpukit/rtems/include/rtems/rtems/region.h
+++ b/cpukit/rtems/include/rtems/rtems/region.h
@@ -69,9 +69,9 @@ typedef struct {
Objects_Control Object;
Thread_queue_Control Wait_queue; /* waiting threads */
void *starting_address; /* physical start addr */
- intptr_t length; /* physical length(bytes) */
- uint32_t page_size; /* in bytes */
- intptr_t maximum_segment_size; /* in bytes */
+ uintptr_t length; /* physical length(bytes) */
+ uintptr_t page_size; /* in bytes */
+ uintptr_t maximum_segment_size; /* in bytes */
rtems_attribute attribute_set;
uint32_t number_of_used_blocks; /* blocks allocated */
Heap_Control Memory;
@@ -104,9 +104,9 @@ void _Region_Manager_initialization(void);
rtems_status_code rtems_region_create(
rtems_name name,
void *starting_address,
- intptr_t length,
- uint32_t page_size,
- rtems_attribute attribute_set,
+ uintptr_t length,
+ uintptr_t page_size,
+ rtems_attribute attribute_set,
Objects_Id *id
);
@@ -121,7 +121,7 @@ rtems_status_code rtems_region_create(
rtems_status_code rtems_region_extend(
Objects_Id id,
void *starting_address,
- intptr_t length
+ uintptr_t length
);
/**
@@ -184,10 +184,10 @@ rtems_status_code rtems_region_delete(
*/
rtems_status_code rtems_region_get_segment(
Objects_Id id,
- intptr_t size,
+ uintptr_t size,
rtems_option option_set,
rtems_interval timeout,
- void **segment
+ void **segment
);
/**
@@ -199,7 +199,7 @@ rtems_status_code rtems_region_get_segment(
rtems_status_code rtems_region_get_segment_size(
Objects_Id id,
void *segment,
- intptr_t *size
+ uintptr_t *size
);
/**
@@ -241,8 +241,8 @@ rtems_status_code rtems_region_return_segment(
rtems_status_code rtems_region_resize_segment(
Objects_Id id,
void *segment,
- intptr_t size,
- intptr_t *old_size
+ uintptr_t size,
+ uintptr_t *old_size
);
#ifndef __RTEMS_APPLICATION__
diff --git a/cpukit/rtems/src/regioncreate.c b/cpukit/rtems/src/regioncreate.c
index 98c29bb94a..66beec13fd 100644
--- a/cpukit/rtems/src/regioncreate.c
+++ b/cpukit/rtems/src/regioncreate.c
@@ -50,8 +50,8 @@
rtems_status_code rtems_region_create(
rtems_name name,
void *starting_address,
- intptr_t length,
- uint32_t page_size,
+ uintptr_t length,
+ uintptr_t page_size,
rtems_attribute attribute_set,
Objects_Id *id
)
diff --git a/cpukit/rtems/src/regionextend.c b/cpukit/rtems/src/regionextend.c
index cf02444b56..a198aabebc 100644
--- a/cpukit/rtems/src/regionextend.c
+++ b/cpukit/rtems/src/regionextend.c
@@ -45,10 +45,10 @@
rtems_status_code rtems_region_extend(
Objects_Id id,
void *starting_address,
- intptr_t length
+ uintptr_t length
)
{
- intptr_t amount_extended;
+ uintptr_t amount_extended;
Heap_Extend_status heap_status;
Objects_Locations location;
rtems_status_code return_status;
diff --git a/cpukit/rtems/src/regiongetinfo.c b/cpukit/rtems/src/regiongetinfo.c
index 2fad55dcff..31d5166e0d 100644
--- a/cpukit/rtems/src/regiongetinfo.c
+++ b/cpukit/rtems/src/regiongetinfo.c
@@ -60,11 +60,8 @@ rtems_status_code rtems_region_get_information(
switch ( location ) {
case OBJECTS_LOCAL:
- if ( _Heap_Get_information( &the_region->Memory, the_info ) !=
- HEAP_GET_INFORMATION_SUCCESSFUL )
- return_status = RTEMS_INVALID_ADDRESS;
- else
- return_status = RTEMS_SUCCESSFUL;
+ _Heap_Get_information( &the_region->Memory, the_info );
+ return_status = RTEMS_SUCCESSFUL;
break;
#if defined(RTEMS_MULTIPROCESSING)
diff --git a/cpukit/rtems/src/regiongetsegment.c b/cpukit/rtems/src/regiongetsegment.c
index e060a8a4e9..af70ead300 100644
--- a/cpukit/rtems/src/regiongetsegment.c
+++ b/cpukit/rtems/src/regiongetsegment.c
@@ -47,7 +47,7 @@
rtems_status_code rtems_region_get_segment(
Objects_Id id,
- intptr_t size,
+ uintptr_t size,
rtems_option option_set,
rtems_interval timeout,
void **segment
diff --git a/cpukit/rtems/src/regiongetsegmentsize.c b/cpukit/rtems/src/regiongetsegmentsize.c
index 203ce4c09a..fda11d0bc8 100644
--- a/cpukit/rtems/src/regiongetsegmentsize.c
+++ b/cpukit/rtems/src/regiongetsegmentsize.c
@@ -45,7 +45,7 @@
rtems_status_code rtems_region_get_segment_size(
Objects_Id id,
void *segment,
- intptr_t *size
+ uintptr_t *size
)
{
Objects_Locations location;
diff --git a/cpukit/rtems/src/regionresizesegment.c b/cpukit/rtems/src/regionresizesegment.c
index 2bbca2bae5..922641039f 100644
--- a/cpukit/rtems/src/regionresizesegment.c
+++ b/cpukit/rtems/src/regionresizesegment.c
@@ -46,13 +46,13 @@
rtems_status_code rtems_region_resize_segment(
Objects_Id id,
void *segment,
- intptr_t size,
- intptr_t *old_size
+ uintptr_t size,
+ uintptr_t *old_size
)
{
- intptr_t avail_size;
+ uintptr_t avail_size;
Objects_Locations location;
- intptr_t osize;
+ uintptr_t osize;
rtems_status_code return_status = RTEMS_INTERNAL_ERROR;
Heap_Resize_status status;
register Region_Control *the_region;
diff --git a/cpukit/score/include/rtems/score/apimutex.h b/cpukit/score/include/rtems/score/apimutex.h
index a0ff68b8d6..0288fd9367 100644
--- a/cpukit/score/include/rtems/score/apimutex.h
+++ b/cpukit/score/include/rtems/score/apimutex.h
@@ -1,9 +1,9 @@
/**
- * @file rtems/score/apimutex.h
+ * @file
*
- * This include file contains all the constants and structures associated
- * with the API Mutex Handler. This handler is used by API level
- * routines to manage mutual exclusion.
+ * @ingroup ScoreAPIMutex
+ *
+ * @brief API Mutex Handler API.
*/
/*
@@ -20,115 +20,97 @@
#ifndef _RTEMS_SCORE_APIMUTEX_H
#define _RTEMS_SCORE_APIMUTEX_H
-/**
- * @defgroup ScoreAPIMutex API Mutex Handler
- *
- * This handler encapsulates functionality which provides mutexes to be used
- * in the implementation of API functionality.
- */
-/**@{*/
-
#ifdef __cplusplus
extern "C" {
#endif
+/**
+ * @defgroup ScoreAPIMutex API Mutex Handler
+ *
+ * @ingroup Score
+ *
+ * @brief Provides routines to ensure mutual exclusion on API level.
+ *
+ * @{
+ */
+
#include <rtems/score/coremutex.h>
#include <rtems/score/isr.h>
#include <rtems/score/object.h>
/**
- * The following defines the control block used to manage each API mutex.
- * An API Mutex is an aggregration of an Object and a SuperCore Mutex.
+ * @brief Control block used to manage each API mutex.
*/
typedef struct {
- /** This field allows each API Mutex to be a full-fledged RTEMS object. */
- Objects_Control Object;
- /** This field contains the SuperCore mutex information. */
- CORE_mutex_Control Mutex;
-} API_Mutex_Control;
+ /**
+ * @brief Allows each API Mutex to be a full-fledged RTEMS object.
+ */
+ Objects_Control Object;
-/**
- * The following variable is the information control block used to manage
- * this class of objects.
- */
-SCORE_EXTERN Objects_Information _API_Mutex_Information;
+ /**
+ * Contains the SuperCore mutex information.
+ */
+ CORE_mutex_Control Mutex;
+} API_Mutex_Control;
/**
- * This routine performs the initialization necessary for this handler.
- *
- * @param[in] maximum_mutexes is the maximum number of API mutexes
- * that may exist at any time
+ * @brief Information control block used to manage this class of objects.
*/
-void _API_Mutex_Initialization(
- uint32_t maximum_mutexes
-);
+SCORE_EXTERN Objects_Information _API_Mutex_Information;
/**
- * This routine allocates an API mutex from the inactive set.
+ * @brief Performs the initialization necessary for this handler.
*
- * @param[out] the_mutex will contain the allocated mutex.
+ * The value @a maximum_mutexes is the maximum number of API mutexes that may
+ * exist at any time.
*/
-void _API_Mutex_Allocate(
- API_Mutex_Control **the_mutex
-);
+void _API_Mutex_Initialization( uint32_t maximum_mutexes );
/**
- * This routine acquires the specified API mutex.
- *
- * @param[in] the_mutex is the mutex to acquire.
+ * @brief Allocates an API mutex from the inactive set and returns it in
+ * @a mutex.
*/
-void _API_Mutex_Lock(
- API_Mutex_Control *the_mutex
-);
+void _API_Mutex_Allocate( API_Mutex_Control **mutex );
/**
- * This routine releases the specified API mutex.
- *
- * @param[in] the_mutex is the mutex to release.
+ * @brief Acquires the specified API mutex @a mutex.
*/
-void _API_Mutex_Unlock(
- API_Mutex_Control *the_mutex
-);
+void _API_Mutex_Lock( API_Mutex_Control *mutex );
/**
- * This variable points to the API Mutex instance that is used
- * to protect all memory allocation and deallocation in RTEMS.
- *
- * @note When the APIs all use this for allocation and deallocation
- * protection, then this possibly should be renamed and moved to a
- * higher level in the hierarchy.
+ * @brief Releases the specified API mutex @a mutex.
*/
-SCORE_EXTERN API_Mutex_Control *_RTEMS_Allocator_Mutex;
+void _API_Mutex_Unlock( API_Mutex_Control *mutex );
+
+/** @} */
/**
- * This macro locks the RTEMS Allocation Mutex.
+ * @defgroup ScoreAllocatorMutex RTEMS Allocator Mutex
+ *
+ * @ingroup ScoreAPIMutex
+ *
+ * @brief Protection for all memory allocations and deallocations in RTEMS.
*
- * @see _RTEMS_Allocator_Mutex
+ * When the APIs all use this for allocation and deallocation protection, then
+ * this possibly should be renamed and moved to a higher level in the
+ * hierarchy.
+ *
+ * @{
*/
+
+SCORE_EXTERN API_Mutex_Control *_RTEMS_Allocator_Mutex;
+
#define _RTEMS_Lock_allocator() \
_API_Mutex_Lock( _RTEMS_Allocator_Mutex )
-/**
- * This macro unlocks the RTEMS Allocation Mutex.
- *
- * @see _RTEMS_Allocator_Mutex
- */
#define _RTEMS_Unlock_allocator() \
_API_Mutex_Unlock( _RTEMS_Allocator_Mutex )
-/*
- * There are no inlines for this handler.
- */
-
-#ifndef __RTEMS_APPLICATION__
-/* #include <rtems/score/apimutex.inl> */
-#endif
+/** @} */
#ifdef __cplusplus
}
#endif
-/*!@}*/
-
#endif
/* end of include file */
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index fedddd20f3..d07eac3baf 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -1,7 +1,9 @@
/**
* @file
*
- * Heap Handler API.
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler API.
*/
/*
@@ -25,7 +27,9 @@ extern "C" {
/**
* @defgroup ScoreHeap Heap Handler
*
- * The Heap Handler provides a heap.
+ * @ingroup Score
+ *
+ * @brief The Heap Handler provides a heap.
*
* A heap is a doubly linked list of variable size blocks which are allocated
* using the first fit method. Garbage collection is performed each time a
@@ -33,17 +37,22 @@ extern "C" {
* information for both allocated and free blocks is contained in the heap
* area. A heap control structure contains control information for the heap.
*
- * FIXME: The alignment routines could be made faster should we require only
- * powers of two to be supported both for 'page_size' and for 'alignment'
- * arguments. However, both workspace and malloc heaps are initialized with
- * CPU_HEAP_ALIGNMENT as 'page_size', and while all the BSPs seem to use
+ * The alignment routines could be made faster should we require only powers of
+ * two to be supported both for page size, alignment and boundary arguments.
+ * However, both workspace and malloc heaps are initialized with
+ * CPU_HEAP_ALIGNMENT as page size, and while all the BSPs seem to use
* CPU_ALIGNMENT (that is power of two) as CPU_HEAP_ALIGNMENT, for whatever
* reason CPU_HEAP_ALIGNMENT is only required to be multiple of CPU_ALIGNMENT
* and explicitly not required to be a power of two.
*
* There are two kinds of blocks. One sort describes a free block from which
- * we can allocate memory. The other blocks are used and contain allocated
- * memory. The free blocks are accessible via a list of free blocks.
+ * we can allocate memory. The other blocks are used and provide an allocated
+ * memory area. The free blocks are accessible via a list of free blocks.
+ *
+ * Blocks or areas cover a continuous set of memory addresses. They have a
+ * begin and end address. The end address is not part of the set. The size of
+ * a block or area equals the distance between the begin and end address in
+ * units of bytes.
*
* Free blocks look like:
* <table>
@@ -83,7 +92,10 @@ extern "C" {
* <table>
* <tr><th>Label</th><th colspan=2>Content</th></tr>
* <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr>
- * <tr><td>first_block->prev_size</td><td colspan=2>arbitrary value</td></tr>
+ * <tr>
+ * <td>first_block->prev_size</td>
+ * <td colspan=2>page size (the value is arbitrary)</td>
+ * </tr>
* <tr>
* <td>first_block->size</td>
* <td colspan=2>size available for allocation
@@ -100,7 +112,7 @@ extern "C" {
* </tr>
* <tr>
* <td>second_block->size</td>
- * <td colspan=2>arbitrary size | @c HEAP_PREV_BLOCK_FREE</td>
+ * <td colspan=2>page size (the value is arbitrary)</td>
* </tr>
* <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr>
* </table>
@@ -109,6 +121,23 @@ extern "C" {
*/
/**
+ * @brief See also @ref Heap_Block.size_and_flag.
+ */
+#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
+
+/**
+ * @brief Offset from the block begin up to the block size field
+ * (@ref Heap_Block.size_and_flag).
+ */
+#define HEAP_BLOCK_SIZE_OFFSET sizeof(uintptr_t)
+
+/**
+ * @brief The block header consists of the two size fields
+ * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
+ */
+#define HEAP_BLOCK_HEADER_SIZE (sizeof(uintptr_t) * 2)
+
+/**
* @brief Description for free or used blocks.
*/
typedef struct Heap_Block {
@@ -119,6 +148,11 @@ typedef struct Heap_Block {
* This field is only valid if the previous block is free. This case is
* indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
* @a size_and_flag field of the current block.
+ *
+ * In a used block only the @a size_and_flag field needs to be valid. The
+ * @a prev_size field of the current block is maintained by the previous
+ * block. The current block can use the @a prev_size field in the next block
+ * for allocation.
*/
uintptr_t prev_size;
@@ -157,86 +191,119 @@ typedef struct Heap_Block {
struct Heap_Block *prev;
} Heap_Block;
-#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
-
-#define HEAP_PREV_BLOCK_FREE ((uintptr_t) 0)
-
/**
- * @brief Offset from the block begin up to the block size field.
- */
-#define HEAP_BLOCK_SIZE_OFFSET (sizeof(uintptr_t))
-
-/**
- * @brief Offset from the block begin up to the allocated area begin.
- */
-#define HEAP_BLOCK_ALLOC_AREA_OFFSET (sizeof(uintptr_t) * 2)
-
-#define HEAP_BLOCK_USED_OVERHEAD (sizeof(uintptr_t) * 2)
-
-#define HEAP_LAST_BLOCK_OVERHEAD HEAP_BLOCK_ALLOC_AREA_OFFSET
-
-/**
- * Run-time heap statistics.
- *
- * @note (double)searches/allocs gives mean number of searches per alloc while
- * max_search gives maximum number of searches ever performed on a
- * single call to alloc.
+ * @brief Run-time heap statistics.
*
- * @note the statistics is always gathered. I believe the imposed overhead is
- * rather small. Feel free to make it compile-time option if you think
- * the overhead is too high for your application.
+ * The value @a searches / @a allocs gives the mean number of searches per
+ * allocation, while @a max_search gives maximum number of searches ever
+ * performed on a single allocation call.
*/
typedef struct {
- /** instance number of this heap */
+ /**
+ * @brief Instance number of this heap.
+ */
uint32_t instance;
- /** the size of the memory for heap */
+
+ /**
+ * @brief The size of the memory for heap.
+ */
uintptr_t size;
- /** current free size */
+
+ /**
+ * @brief Current free size.
+ */
uintptr_t free_size;
- /** minimum free size ever */
+
+ /**
+ * @brief Minimum free size ever.
+ */
uintptr_t min_free_size;
- /** current number of free blocks */
+
+ /**
+ * @brief Current number of free blocks.
+ */
uint32_t free_blocks;
- /** maximum number of free blocks ever */
+
+ /**
+ * @brief Maximum number of free blocks ever.
+ */
uint32_t max_free_blocks;
- /** current number of used blocks */
+
+ /**
+ * @brief Current number of used blocks.
+ */
uint32_t used_blocks;
- /** maximum number of blocks searched ever */
+
+ /**
+ * @brief Maximum number of blocks searched ever.
+ */
uint32_t max_search;
- /** total number of successful calls to alloc */
+
+ /**
+ * @brief Total number of successful calls to alloc.
+ */
uint32_t allocs;
- /** total number of searches ever */
+
+ /**
+ * @brief Total number of searches ever.
+ */
uint32_t searches;
- /** total number of suceessful calls to free */
+
+ /**
+ * @brief Total number of suceessful calls to free.
+ */
uint32_t frees;
- /** total number of successful resizes */
+
+ /**
+ * @brief Total number of successful resizes.
+ */
uint32_t resizes;
} Heap_Statistics;
/**
- * Control block used to manage each heap.
+ * @brief Control block used to manage a heap.
*/
typedef struct {
- /** head and tail of circular list of free blocks */
Heap_Block free_list;
- /** allocation unit and alignment */
uintptr_t page_size;
- /** minimum block size aligned on page_size */
uintptr_t min_block_size;
- /** first address of memory for the heap */
- uintptr_t begin;
- /** first address past end of memory for the heap */
- uintptr_t end;
- /** first valid block address in the heap */
- Heap_Block *start;
- /** last valid block address in the heap */
- Heap_Block *final;
- /** run-time statistics */
+ uintptr_t area_begin;
+ uintptr_t area_end;
+ Heap_Block *first_block;
+ Heap_Block *last_block;
Heap_Statistics stats;
} Heap_Control;
/**
- * Status codes for _Heap_Extend
+ * @brief Information about blocks.
+ */
+typedef struct {
+ /**
+ * @brief Number of blocks of this type.
+ */
+ uint32_t number;
+
+ /**
+ * @brief Largest block of this type.
+ */
+ uint32_t largest;
+
+ /**
+ * @brief Total size of the blocks of this type.
+ */
+ uint32_t total;
+} Heap_Information;
+
+/**
+ * @brief Information block returned by _Heap_Get_information().
+ */
+typedef struct {
+ Heap_Information Free;
+ Heap_Information Used;
+} Heap_Information_block;
+
+/**
+ * @brief See _Heap_Extend().
*/
typedef enum {
HEAP_EXTEND_SUCCESSFUL,
@@ -245,7 +312,7 @@ typedef enum {
} Heap_Extend_status;
/**
- * Status codes for _Heap_Resize_block
+ * @brief See _Heap_Resize_block().
*/
typedef enum {
HEAP_RESIZE_SUCCESSFUL,
@@ -254,40 +321,8 @@ typedef enum {
} Heap_Resize_status;
/**
- * Status codes for _Heap_Get_information
- */
-typedef enum {
- HEAP_GET_INFORMATION_SUCCESSFUL = 0,
- HEAP_GET_INFORMATION_BLOCK_ERROR
-} Heap_Get_information_status;
-
-/**
- * Information block returned by the Heap routines used to
- * obtain heap information. This information is returned about
- * either free or used blocks.
- */
-typedef struct {
- /** Number of blocks of this type. */
- uint32_t number;
- /** Largest blocks of this type. */
- uint32_t largest;
- /** Total size of the blocks of this type. */
- uint32_t total;
-} Heap_Information;
-
-/**
- * Information block returned by _Heap_Get_information
- */
-typedef struct {
- /** This field is information on the used blocks in the heap. */
- Heap_Information Free;
- /** This field is information on the used blocks in the heap. */
- Heap_Information Used;
-} Heap_Information_block;
-
-/**
- * Initializes the @a heap control block to manage the area starting at
- * @a area_begin of @a area_size bytes.
+ * @brief Initializes the heap control block @a heap to manage the area
+ * starting at @a area_begin of size @a area_size bytes.
*
* Blocks of memory are allocated from the heap in multiples of @a page_size
* byte units. If the @a page_size is equal to zero or is not multiple of
@@ -303,16 +338,13 @@ uintptr_t _Heap_Initialize(
);
/**
- * This routine grows @a heap memory area using the size bytes which
- * begin at @a starting_address.
- *
- * @param[in] heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the memory
- * to add to the heap
- * @param[in] size is the size in bytes of the memory area to add
- * @param[in] amount_extended points to a user area to return the
- * @return a status indicating success or the reason for failure
- * @return *size filled in with the amount of memory added to the heap
+ * @brief Extends the memory area of the heap @a heap using the memory area
+ * starting at @a area_begin of size @a area_size bytes.
+ *
+ * The extended space available for allocation will be returned in
+ * @a amount_extended.
+ *
+ * The memory area must start at the end of the currently used memory area.
*/
Heap_Extend_status _Heap_Extend(
Heap_Control *heap,
@@ -322,139 +354,115 @@ Heap_Extend_status _Heap_Extend(
);
/**
- * This function attempts to allocate a block of @a size bytes from
- * @a heap. If insufficient memory is free in @a heap to allocate
- * a block of the requested size, then NULL is returned.
+ * @brief Allocates a memory area of size @a size bytes.
*
- * @param[in] heap is the heap to operate upon
- * @param[in] size is the amount of memory to allocate in bytes
- * @return NULL if unsuccessful and a pointer to the block if successful
- */
-void *_Heap_Allocate( Heap_Control *heap, uintptr_t size );
-
-/**
- * This function attempts to allocate a memory block of @a size bytes from
- * @a heap so that the start of the user memory is aligned on the
- * @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
- * Any other value of @a alignment is taken "as is", i.e., even odd
- * alignments are possible.
- * Returns pointer to the start of the memory block if success, NULL if
- * failure.
- *
- * @param[in] heap is the heap to operate upon
- * @param[in] size is the amount of memory to allocate in bytes
- * @param[in] alignment the required alignment
- * @return NULL if unsuccessful and a pointer to the block if successful
+ * If the alignment parameter @a alignment is not equal to zero, the allocated
+ * memory area will begin at an address aligned by this value.
+ *
+ * If the boundary parameter @a boundary is not equal to zero, the allocated
+ * memory area will fulfill a boundary constraint. The boudnary value
+ * specifies the set of addresses which are aligned by the boundary value. The
+ * interior of the allocated memory area will not contain an element of this
+ * set. The begin or end address of the area may be a member of the set.
+ *
+ * A size value of zero will return a unique address which may be freed with
+ * _Heap_Free().
+ *
+ * Returns a pointer to the begin of the allocated memory area, or @c NULL if
+ * no memory is available or the parameters are inconsistent.
*/
-void *_Heap_Allocate_aligned(
+void *_Heap_Allocate_aligned_with_boundary(
Heap_Control *heap,
uintptr_t size,
- uintptr_t alignment
+ uintptr_t alignment,
+ uintptr_t boundary
);
+#define _Heap_Allocate_aligned( heap, size, alignment ) \
+ _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 )
+
+#define _Heap_Allocate( heap, size ) \
+ _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 )
+
/**
- * This function sets @a size to the size of the block of allocatable area
- * which begins at @a starting_address. The size returned in @a *size could
- * be greater than the size requested for allocation.
- * Returns true if the @a starting_address is in the heap, and false
- * otherwise.
- *
- * @param[in] heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the user block
- * to obtain the size of
- * @param[in] size points to a user area to return the size in
- * @return true if successfully able to determine the size, false otherwise
- * @return *size filled in with the size of the user area for this block
+ * @brief Frees the allocated memory area starting at @a addr in the heap
+ * @a heap.
+ *
+ * Inappropriate values for @a addr may corrupt the heap.
+ *
+ * Returns @c true in case of success, and @c false otherwise.
*/
-bool _Heap_Size_of_alloc_area(
- Heap_Control *heap,
- void *area_begin,
- uintptr_t *size
-);
+bool _Heap_Free( Heap_Control *heap, void *addr );
/**
- * This function tries to resize in place the block that is pointed to by the
- * @a starting_address to the new @a size.
- *
- * @param[in] heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the user block
- * to be resized
- * @param[in] size is the new size
- * @param[in] old_mem_size points to a user area to return the size of the
- * user memory area of the block before resizing.
- * @param[in] avail_mem_size points to a user area to return the size of
- * the user memory area of the free block that has been enlarged or
- * created due to resizing, 0 if none.
- * @return HEAP_RESIZE_SUCCESSFUL if successfully able to resize the block,
- * HEAP_RESIZE_UNSATISFIED if the block can't be resized in place,
- * HEAP_RESIZE_FATAL_ERROR if failure
- * @return *old_mem_size filled in with the size of the user memory area of
- * the block before resizing.
- * @return *avail_mem_size filled in with the size of the user memory area
- * of the free block that has been enlarged or created due to
- * resizing, 0 if none.
+ * @brief Walks the heap @a heap to verify its integrity.
+ *
+ * If @a dump is @c true, then diagnostic messages will be printed to standard
+ * output. In this case @a source is used to mark the output lines.
+ *
+ * Returns @c true if no errors occured, and @c false if the heap is corrupt.
*/
-Heap_Resize_status _Heap_Resize_block(
+bool _Heap_Walk(
Heap_Control *heap,
- void *starting_address,
- uintptr_t size,
- uintptr_t *old_mem_size,
- uintptr_t *avail_mem_size
+ int source,
+ bool dump
);
/**
- * This routine returns the block of memory which begins
- * at @a alloc_area_begin to @a heap. Any coalescing which is
- * possible with the freeing of this routine is performed.
- *
- * @param[in] heap is the heap to operate upon
- * @param[in] start_address is the starting address of the user block
- * to free
- * @return true if successfully freed, false otherwise
+ * @brief Returns information about used and free blocks for the heap @a heap
+ * in @a info.
*/
-bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin );
+void _Heap_Get_information(
+ Heap_Control *heap,
+ Heap_Information_block *info
+);
/**
- * This routine walks the heap to verify its integrity.
- *
- * @param[in] heap is the heap to operate upon
- * @param[in] source is a user specified integer which may be used to
- * indicate where in the application this was invoked from
- * @param[in] do_dump is set to true if errors should be printed
- * @return true if the test passed fine, false otherwise.
+ * @brief Returns information about free blocks for the heap @a heap in
+ * @a info.
*/
-bool _Heap_Walk(
+void _Heap_Get_free_information(
Heap_Control *heap,
- int source,
- bool do_dump
+ Heap_Information *info
);
/**
- * This routine walks the heap and tots up the free and allocated
- * sizes.
+ * @brief Returns the size of the allocatable memory area starting at @a addr
+ * in @a size.
+ *
+ * The size value may be greater than the initially requested size in
+ * _Heap_Allocate_aligned_with_boundary().
+ *
+ * Inappropriate values for @a addr will not corrupt the heap, but may yield
+ * invalid size values.
*
- * @param[in] heap pointer to heap header
- * @param[in] the_info pointer to a status information area
- * @return *the_info is filled with status information
- * @return 0=success, otherwise heap is corrupt.
+ * Returns @a true if successful, and @c false otherwise.
*/
-Heap_Get_information_status _Heap_Get_information(
- Heap_Control *heap,
- Heap_Information_block *the_info
+bool _Heap_Size_of_alloc_area(
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t *size
);
/**
- * This heap routine returns information about the free blocks
- * in the specified heap.
+ * @brief Resizes the block of the allocated memory area starting at @a addr.
+ *
+ * The new memory area will have a size of at least @a size bytes. A resize
+ * may be impossible and depends on the current heap usage.
*
- * @param[in] heap pointer to heap header.
- * @param[in] info pointer to the free block information.
+ * The size available for allocation in the current block before the resize
+ * will be returned in @a old_size. The size available for allocation in
+ * the resized block will be returned in @a new_size. If the resize was not
+ * successful, then a value of zero will be returned in @a new_size.
*
- * @return free block information filled in.
+ * Inappropriate values for @a addr may corrupt the heap.
*/
-void _Heap_Get_free_information(
- Heap_Control *heap,
- Heap_Information *info
+Heap_Resize_status _Heap_Resize_block(
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t size,
+ uintptr_t *old_size,
+ uintptr_t *new_size
);
#if !defined(__RTEMS_APPLICATION__)
@@ -462,36 +470,20 @@ void _Heap_Get_free_information(
#include <rtems/score/heap.inl>
/**
- * @brief Returns the minimal block size for a block which may contain an area
- * of size @a alloc_size for allocation, or zero in case of an overflow.
- *
- * Uses the heap values @a page_size and @a min_block_size.
- */
-uintptr_t _Heap_Calc_block_size(
- uintptr_t alloc_size,
- uintptr_t page_size,
- uintptr_t min_block_size
-);
-
-/**
- * This method allocates a block of size @a alloc_size from @a the_block
- * belonging to @a heap. Split @a the_block if possible, otherwise
- * allocate it entirely. When split, make the lower part used, and leave
- * the upper part free.
+ * @brief Allocates the memory area starting at @a alloc_begin of size
+ * @a alloc_size bytes in the block @a block.
*
- * This is an internal routines used by _Heap_Allocate() and
- * _Heap_Allocate_aligned(). Refer to 'heap.c' for details.
+ * The block may be split up into multiple blocks.
*
- * @param[in] heap is the heap to operate upon
- * @param[in] the_block is the block to allocates the requested size from
- * @param[in] alloc_size is the requested number of bytes to take out of
- * the block
+ * Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
+ * heap.
*
- * @return This methods returns the size of the allocated block.
+ * Returns the block containing the allocated memory area.
*/
-uintptr_t _Heap_Block_allocate(
+Heap_Block *_Heap_Block_allocate(
Heap_Control *heap,
Heap_Block *block,
+ uintptr_t alloc_begin,
uintptr_t alloc_size
);
diff --git a/cpukit/score/include/rtems/score/object.h b/cpukit/score/include/rtems/score/object.h
index b78678a530..5cf770ef91 100644
--- a/cpukit/score/include/rtems/score/object.h
+++ b/cpukit/score/include/rtems/score/object.h
@@ -30,6 +30,12 @@ extern "C" {
#endif
/**
+ * @defgroup Score SuperCore
+ *
+ * @brief Provides services for all APIs.
+ */
+
+/**
* The following type defines the control block used to manage
* object names.
*/
diff --git a/cpukit/score/include/rtems/score/protectedheap.h b/cpukit/score/include/rtems/score/protectedheap.h
index ffe6b8406a..c9d6a62d8d 100644
--- a/cpukit/score/include/rtems/score/protectedheap.h
+++ b/cpukit/score/include/rtems/score/protectedheap.h
@@ -1,9 +1,12 @@
/**
- * @file rtems/score/protectedheap.h
+ * @file
*
- * This include file contains the information pertaining to the
- * Protected Heap Handler.
+ * @ingroup ScoreProtHeap
*
+ * @brief Protected Heap Handler API.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -20,210 +23,125 @@
#include <rtems/score/heap.h>
#include <rtems/score/apimutex.h>
-/**
- * @defgroup ScoreProtHeap Protected Heap Handler
- *
- * This handler encapsulates functionality which provides the foundation
- * Protected Heap services.
- *
- * It is a simple wrapper for the help with the addition of the
- * allocation mutex being used for protection.
- */
-/**@{*/
-
#ifdef __cplusplus
extern "C" {
#endif
/**
- * This routine initializes @a the_heap record to manage the
- * contiguous heap of @a size bytes which starts at @a starting_address.
- * Blocks of memory are allocated from the heap in multiples of
- * @a page_size byte units. If @a page_size is 0 or is not multiple of
- * CPU_ALIGNMENT, it's aligned up to the nearest CPU_ALIGNMENT boundary.
+ * @defgroup ScoreProtHeap Protected Heap Handler
*
- * @param[in] the_heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the memory for
- * the heap
- * @param[in] size is the size in bytes of the memory area for the heap
- * @param[in] page_size is the size in bytes of the allocation unit
+ * @ingroup ScoreHeap
*
- * @return This method returns the maximum memory available. If
- * unsuccessful, 0 will be returned.
+ * @brief Provides protected heap services.
+ *
+ * The @ref ScoreAllocatorMutex is used to protect the heap accesses.
+ *
+ * @{
+ */
+
+/**
+ * @brief See _Heap_Initialize().
*/
-static inline uint32_t _Protected_heap_Initialize(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size,
- uint32_t page_size
+RTEMS_INLINE_ROUTINE uintptr_t _Protected_heap_Initialize(
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size,
+ uintptr_t page_size
)
{
- return _Heap_Initialize( the_heap, starting_address, size, page_size );
+ return _Heap_Initialize( heap, area_begin, area_size, page_size );
}
/**
- * This routine grows @a the_heap memory area using the size bytes which
- * begin at @a starting_address.
+ * @brief See _Heap_Extend().
*
- * @param[in] the_heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the memory
- * to add to the heap
- * @param[in] size is the size in bytes of the memory area to add
- * @return a status indicating success or the reason for failure
+ * Returns @a true in case of success, and @a false otherwise.
*/
bool _Protected_heap_Extend(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size
+ Heap_Control *heap,
+ void *area_begin,
+ uintptr_t area_size
);
/**
- * This function attempts to allocate a block of @a size bytes from
- * @a the_heap. If insufficient memory is free in @a the_heap to allocate
- * a block of the requested size, then NULL is returned.
- *
- * @param[in] the_heap is the heap to operate upon
- * @param[in] size is the amount of memory to allocate in bytes
- * @return NULL if unsuccessful and a pointer to the block if successful
+ * @brief See _Heap_Allocate_aligned_with_boundary().
*/
void *_Protected_heap_Allocate(
- Heap_Control *the_heap,
- intptr_t size
+ Heap_Control *heap,
+ uintptr_t size
);
/**
- * This function attempts to allocate a memory block of @a size bytes from
- * @a the_heap so that the start of the user memory is aligned on the
- * @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
- * Any other value of @a alignment is taken "as is", i.e., even odd
- * alignments are possible.
- * Returns pointer to the start of the memory block if success, NULL if
- * failure.
- *
- * @param[in] the_heap is the heap to operate upon
- * @param[in] size is the amount of memory to allocate in bytes
- * @param[in] alignment the required alignment
- * @return NULL if unsuccessful and a pointer to the block if successful
+ * @brief See _Heap_Allocate_aligned_with_boundary().
*/
void *_Protected_heap_Allocate_aligned(
- Heap_Control *the_heap,
- intptr_t size,
- uint32_t alignment
+ Heap_Control *heap,
+ uintptr_t size,
+ uintptr_t alignment
);
/**
- * This function sets @a *size to the size of the block of user memory
- * which begins at @a starting_address. The size returned in @a *size could
- * be greater than the size requested for allocation.
- * Returns true if the @a starting_address is in the heap, and false
- * otherwise.
- *
- * @param[in] the_heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the user block
- * to obtain the size of
- * @param[in] size points to a user area to return the size in
- * @return true if successfully able to determine the size, false otherwise
- * @return *size filled in with the size of the user area for this block
+ * @brief See _Heap_Size_of_alloc_area().
*/
bool _Protected_heap_Get_block_size(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t *size
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t *size
);
/**
- * This function tries to resize in place the block that is pointed to by the
- * @a starting_address to the new @a size.
+ * @brief See _Heap_Resize_block().
*
- * @param[in] the_heap is the heap to operate upon
- * @param[in] starting_address is the starting address of the user block
- * to be resized
- * @param[in] size is the new size
- *
- * @return true if successfully able to resize the block.
- * false if the block can't be resized in place.
+ * Returns @a true in case of success, and @a false otherwise.
*/
bool _Protected_heap_Resize_block(
- Heap_Control *the_heap,
- void *starting_address,
- intptr_t size
+ Heap_Control *heap,
+ void *addr,
+ uintptr_t size
);
/**
- * This routine returns the block of memory which begins
- * at @a starting_address to @a the_heap. Any coalescing which is
- * possible with the freeing of this routine is performed.
+ * @brief See _Heap_Free().
*
- * @param[in] the_heap is the heap to operate upon
- * @param[in] start_address is the starting address of the user block
- * to free
- * @return true if successfully freed, false otherwise
+ * Returns @a true in case of success, and @a false otherwise.
*/
-bool _Protected_heap_Free(
- Heap_Control *the_heap,
- void *start_address
-);
+bool _Protected_heap_Free( Heap_Control *heap, void *addr );
/**
- * This routine walks the heap to verify its integrity.
- *
- * @param[in] the_heap is the heap to operate upon
- * @param[in] source is a user specified integer which may be used to
- * indicate where in the application this was invoked from
- * @param[in] do_dump is set to true if errors should be printed
- * @return true if the test passed fine, false otherwise.
+ * @brief See _Heap_Walk().
*/
-bool _Protected_heap_Walk(
- Heap_Control *the_heap,
- int source,
- bool do_dump
-);
+bool _Protected_heap_Walk( Heap_Control *heap, int source, bool dump );
/**
- * This routine walks the heap and tots up the free and allocated
- * sizes.
- *
- * @param[in] the_heap pointer to heap header
- * @param[in] the_info pointer to a status information area
+ * @brief See _Heap_Get_information().
*
- * @return true if successfully able to return information
+ * Returns @a true in case of success, and @a false otherwise.
*/
bool _Protected_heap_Get_information(
- Heap_Control *the_heap,
- Heap_Information_block *the_info
+ Heap_Control *heap,
+ Heap_Information_block *info
);
/**
- * This heap routine returns information about the free blocks
- * in the specified heap.
+ * @brief See _Heap_Get_free_information().
*
- * @param[in] the_heap pointer to heap header.
- * @param[in] info pointer to the free block information.
- *
- * @return free block information filled in.
+ * Returns @a true in case of success, and @a false otherwise.
*/
bool _Protected_heap_Get_free_information(
- Heap_Control *the_heap,
- Heap_Information *info
+ Heap_Control *heap,
+ Heap_Information *info
);
/**
- * This function returns the maximum size of the protected heap.
- *
- * @param[in] the_heap points to the heap being operated upon
- *
- * @return This method returns the total amount of memory
- * allocated to the heap.
+ * @brief See _Heap_Get_size().
*/
-uint32_t _Protected_heap_Get_size(
- Heap_Control *the_heap
-);
+uintptr_t _Protected_heap_Get_size( Heap_Control *heap );
+
+/** @} */
#ifdef __cplusplus
}
#endif
-/**@}*/
-
#endif
/* end of include file */
diff --git a/cpukit/score/inline/rtems/score/heap.inl b/cpukit/score/inline/rtems/score/heap.inl
index 7ac5649f28..2bcadac385 100644
--- a/cpukit/score/inline/rtems/score/heap.inl
+++ b/cpukit/score/inline/rtems/score/heap.inl
@@ -1,8 +1,9 @@
-/**
+/**
* @file
*
- * @brief Static inline implementations of the inlined routines from the heap
- * handler.
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler API.
*/
/*
@@ -26,7 +27,7 @@
#include <rtems/score/address.h>
/**
- * @addtogroup ScoreHeap
+ * @addtogroup ScoreHeap
*
* @{
*/
@@ -41,17 +42,17 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
return &heap->free_list;
}
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First_free_block( Heap_Control *heap )
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
{
return _Heap_Free_list_head(heap)->next;
}
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last_free_block( Heap_Control *heap )
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
{
return _Heap_Free_list_tail(heap)->prev;
}
-RTEMS_INLINE_ROUTINE void _Heap_Block_remove_from_free_list( Heap_Block *block )
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
{
Heap_Block *next = block->next;
Heap_Block *prev = block->prev;
@@ -60,7 +61,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Block_remove_from_free_list( Heap_Block *block )
next->prev = prev;
}
-RTEMS_INLINE_ROUTINE void _Heap_Block_replace_in_free_list(
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace(
Heap_Block *old_block,
Heap_Block *new_block
)
@@ -75,16 +76,16 @@ RTEMS_INLINE_ROUTINE void _Heap_Block_replace_in_free_list(
prev->next = new_block;
}
-RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after(
- Heap_Block *prev_block,
+RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
+ Heap_Block *block_before,
Heap_Block *new_block
)
{
- Heap_Block *next = prev_block->next;
+ Heap_Block *next = block_before->next;
new_block->next = next;
- new_block->prev = prev_block;
- prev_block->next = new_block;
+ new_block->prev = block_before;
+ block_before->next = new_block;
next->prev = new_block;
}
@@ -122,60 +123,64 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
* @brief Returns the block which is @a offset away from @a block.
*/
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
- const Heap_Block *block,
+ Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
}
-/**
- * @brief Returns the begin of the allocatable area of @a block.
- */
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
- Heap_Block *block
+ const Heap_Block *block
)
{
- return (uintptr_t) block + HEAP_BLOCK_ALLOC_AREA_OFFSET;
+ return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE;
}
-/**
- * @brief Returns the block associated with the allocatable area starting at
- * @a alloc_area_begin inside a heap with a page size of @a page_size.
- */
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
- uintptr_t alloc_area_begin,
+ uintptr_t alloc_begin,
uintptr_t page_size
)
{
- return (Heap_Block *) (_Heap_Align_down( alloc_area_begin, page_size )
- - HEAP_BLOCK_ALLOC_AREA_OFFSET);
+ return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
+ - HEAP_BLOCK_HEADER_SIZE);
}
-RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( Heap_Block *block )
+RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
}
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( Heap_Block *block )
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
- Heap_Control *heap,
- Heap_Block *block
+ const Heap_Control *heap,
+ const Heap_Block *block
)
{
- return _Addresses_Is_in_range( block, heap->start, heap->final );
+ return (uintptr_t) block >= (uintptr_t) heap->first_block
+ && (uintptr_t) block <= (uintptr_t) heap->last_block;
}
/**
- * @brief Returns the maximum size of the heap.
+ * @brief Returns the heap area size.
*/
-RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( Heap_Control *heap )
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
+{
+ return heap->area_end - heap->area_begin;
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
+{
+ return a > b ? a : b;
+}
+
+RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min( uintptr_t a, uintptr_t b )
{
- return (uintptr_t) heap->end - (uintptr_t) heap->begin;
+ return a < b ? a : b;
}
/** @} */
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index 208197d5a3..45faac7630 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -1,9 +1,17 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2009 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
@@ -18,6 +26,10 @@
#include <rtems/system.h>
#include <rtems/score/heap.h>
+#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 4 != 0
+ #error "invalid CPU_ALIGNMENT value"
+#endif
+
static uint32_t instance = 0;
/*PAGE
@@ -113,16 +125,19 @@ static uint32_t instance = 0;
uintptr_t _Heap_Initialize(
Heap_Control *heap,
- void *area_begin,
- uintptr_t area_size,
+ void *heap_area_begin_ptr,
+ uintptr_t heap_area_size,
uintptr_t page_size
)
{
- Heap_Statistics * const stats = &heap->stats;
- uintptr_t heap_area_begin = (uintptr_t) area_begin;
- uintptr_t heap_area_end = heap_area_begin + area_size;
- uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_ALLOC_AREA_OFFSET;
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
+ uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
+ uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_HEADER_SIZE;
uintptr_t alloc_area_size = 0;
+ uintptr_t first_block_begin = 0;
+ uintptr_t first_block_size = 0;
+ uintptr_t min_block_size = 0;
uintptr_t overhead = 0;
Heap_Block *first_block = NULL;
Heap_Block *second_block = NULL;
@@ -132,47 +147,50 @@ uintptr_t _Heap_Initialize(
} else {
page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
}
-
- heap->min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
+ min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
- overhead = HEAP_LAST_BLOCK_OVERHEAD
- + (alloc_area_begin - HEAP_BLOCK_ALLOC_AREA_OFFSET - heap_area_begin);
- alloc_area_size = _Heap_Align_down ( area_size - overhead, page_size );
+ first_block_begin = alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
+ overhead = HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
+ first_block_size = heap_area_size - overhead;
+ first_block_size = _Heap_Align_down ( first_block_size, page_size );
+ alloc_area_size = first_block_size - HEAP_BLOCK_HEADER_SIZE;
if (
heap_area_end < heap_area_begin
- || area_size < overhead
- || alloc_area_size == 0
+ || heap_area_size <= overhead
+ || first_block_size < min_block_size
) {
/* Invalid area or area too small */
return 0;
}
- heap->page_size = page_size;
- heap->begin = heap_area_begin;
- heap->end = heap_area_end;
-
/* First block */
- first_block = _Heap_Block_of_alloc_area( alloc_area_begin, page_size );
+ first_block = (Heap_Block *) first_block_begin;
first_block->prev_size = page_size;
- first_block->size_and_flag = alloc_area_size | HEAP_PREV_BLOCK_USED;
+ first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
first_block->next = _Heap_Free_list_tail( heap );
first_block->prev = _Heap_Free_list_head( heap );
- _Heap_Free_list_head( heap )->next = first_block;
- _Heap_Free_list_tail( heap )->prev = first_block;
- heap->start = first_block;
/* Second and last block */
- second_block = _Heap_Block_at( first_block, alloc_area_size );
- second_block->prev_size = alloc_area_size;
- second_block->size_and_flag = page_size | HEAP_PREV_BLOCK_FREE;
- heap->final = second_block;
+ second_block = _Heap_Block_at( first_block, first_block_size );
+ second_block->prev_size = first_block_size;
+ second_block->size_and_flag = page_size;
+
+ /* Heap control */
+ heap->page_size = page_size;
+ heap->min_block_size = min_block_size;
+ heap->area_begin = heap_area_begin;
+ heap->area_end = heap_area_end;
+ heap->first_block = first_block;
+ heap->last_block = second_block;
+ _Heap_Free_list_head( heap )->next = first_block;
+ _Heap_Free_list_tail( heap )->prev = first_block;
/* Statistics */
- stats->size = area_size;
- stats->free_size = alloc_area_size;
- stats->min_free_size = alloc_area_size;
+ stats->size = heap_area_size;
+ stats->free_size = first_block_size;
+ stats->min_free_size = first_block_size;
stats->free_blocks = 1;
stats->max_free_blocks = 1;
stats->used_blocks = 0;
@@ -183,9 +201,9 @@ uintptr_t _Heap_Initialize(
stats->resizes = 0;
stats->instance = instance++;
- _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ));
- _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ));
- _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ));
+ _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ) );
+ _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
+ _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
_HAssert(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
);
@@ -193,72 +211,142 @@ uintptr_t _Heap_Initialize(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
);
+ if ( !_Heap_Walk( heap, 0, false ) ) {
+ _Heap_Walk( heap, 0, true );
+ }
+
return alloc_area_size;
}
-uintptr_t _Heap_Calc_block_size(
- uintptr_t alloc_size,
- uintptr_t page_size,
- uintptr_t min_block_size)
+static Heap_Block *_Heap_Block_split(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_size
+)
{
- uintptr_t block_size =
- _Heap_Align_up( alloc_size + HEAP_BLOCK_USED_OVERHEAD, page_size );
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
- if (block_size < min_block_size) {
- block_size = min_block_size;
- }
+ uintptr_t const block_size = _Heap_Block_size( block );
+
+ uintptr_t const used_size =
+ _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
+ uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
- if (block_size > alloc_size) {
- return block_size;
+ uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
+ uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
+
+ Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+
+ _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
+ _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
+
+ if ( free_size >= free_size_limit ) {
+ uintptr_t const free_block_size = block_size - used_block_size;
+ Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
+
+ _HAssert( used_block_size + free_block_size == block_size );
+
+ block->size_and_flag = used_block_size
+ | (block->size_and_flag & HEAP_PREV_BLOCK_USED);
+ free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
+ next_block->prev_size = free_block_size;
+
+ return free_block;
} else {
- /* Integer overflow occured */
- return 0;
+ next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
+
+ return NULL;
}
}
-uintptr_t _Heap_Block_allocate(
+static Heap_Block *_Heap_Block_allocate_from_begin(
Heap_Control *heap,
Heap_Block *block,
uintptr_t alloc_size
)
{
- Heap_Statistics * const stats = &heap->stats;
- uintptr_t const block_size = _Heap_Block_size( block );
- uintptr_t const unused_size = block_size - alloc_size;
- Heap_Block *next_block = _Heap_Block_at( block, block_size );
-
- _HAssert( _Heap_Is_aligned( block_size, heap->page_size ));
- _HAssert( _Heap_Is_aligned( alloc_size, heap->page_size ));
- _HAssert( alloc_size <= block_size );
- _HAssert( _Heap_Is_prev_used( block ));
-
- if (unused_size >= heap->min_block_size) {
- /*
- * Split the block so that the upper part is still free, and the lower part
- * becomes used. This is slightly less optimal than leaving the lower part
- * free as it requires replacing block in the free blocks list, but it
- * makes it possible to reuse this code in the _Heap_Resize_block().
- */
- Heap_Block *new_block = _Heap_Block_at( block, alloc_size );
- block->size_and_flag = alloc_size | HEAP_PREV_BLOCK_USED;
- new_block->size_and_flag = unused_size | HEAP_PREV_BLOCK_USED;
- next_block->prev_size = unused_size;
- _Heap_Block_replace_in_free_list( block, new_block );
+ Heap_Block *const free_block = _Heap_Block_split( heap, block, alloc_size );
+
+ if ( free_block != NULL ) {
+ _Heap_Free_list_replace( block, free_block );
} else {
- next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
- alloc_size = block_size;
- _Heap_Block_remove_from_free_list( block );
+ Heap_Statistics *const stats = &heap->stats;
+
+ _Heap_Free_list_remove( block );
/* Statistics */
--stats->free_blocks;
}
+ return block;
+}
+
+static Heap_Block *_Heap_Block_allocate_from_end(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size
+)
+{
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t block_size = _Heap_Block_size( block );
+ uintptr_t block_end = block_begin + block_size;
+
+ Heap_Block *const new_block =
+ _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
+ uintptr_t const new_block_begin = (uintptr_t) new_block;
+ uintptr_t const new_block_size = block_end - new_block_begin;
+
+ Heap_Block *free_block = NULL;
+
+ block_end = new_block_begin;
+ block_size = block_end - block_begin;
+
+ _HAssert( block_size >= heap->min_block_size );
+ _HAssert( new_block_size >= heap->min_block_size );
+
+ block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
+ new_block->prev_size = block_size;
+ new_block->size_and_flag = new_block_size;
+
+ free_block = _Heap_Block_split( heap, new_block, alloc_size );
+ if ( free_block != NULL ) {
+ _Heap_Free_list_insert_after( block, free_block );
+ }
+
+ return new_block;
+}
+
+Heap_Block *_Heap_Block_allocate(
+ Heap_Control *heap,
+ Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size
+)
+{
+ Heap_Statistics *const stats = &heap->stats;
+ uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
+ uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
+
+ _HAssert( _Heap_Is_prev_used( block ) );
+ _HAssert( alloc_area_begin <= alloc_begin );
+
+ if ( alloc_area_offset < heap->page_size ) {
+ alloc_size += alloc_area_offset;
+
+ block = _Heap_Block_allocate_from_begin( heap, block, alloc_size );
+ } else {
+ block = _Heap_Block_allocate_from_end( heap, block, alloc_begin, alloc_size );
+ }
+
/* Statistics */
++stats->used_blocks;
- stats->free_size -= alloc_size;
- if(stats->min_free_size > stats->free_size) {
+ stats->free_size -= _Heap_Block_size( block );
+ if ( stats->min_free_size > stats->free_size ) {
stats->min_free_size = stats->free_size;
}
- return alloc_size;
+ return block;
}
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index 7c9e78f31e..7b0f51e232 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -1,9 +1,17 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
+ * Copyright (c) 2009 embedded brains GmbH.
+ *
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
@@ -19,48 +27,204 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
+#ifdef RTEMS_HEAP_DEBUG
+ static void _Heap_Check_allocation(
+ const Heap_Control *heap,
+ const Heap_Block *block,
+ uintptr_t alloc_begin,
+ uintptr_t alloc_size,
+ uintptr_t alignment,
+ uintptr_t boundary
+ )
+ {
+ uintptr_t const min_block_size = heap->min_block_size;
+ uintptr_t const page_size = heap->page_size;
+
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ uintptr_t const block_end = block_begin + block_size;
+
+ uintptr_t const alloc_end = alloc_begin + alloc_size;
+
+ uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
+ uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
+ uintptr_t const alloc_area_size = alloc_area_offset + alloc_size;
+
+ _HAssert( block_size >= min_block_size );
+ _HAssert( block_begin < block_end );
+ _HAssert(
+ _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
+ );
+ _HAssert(
+ _Heap_Is_aligned( block_size, page_size )
+ );
+
+ _HAssert( alloc_end <= block_end + HEAP_BLOCK_SIZE_OFFSET );
+ _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
+ _HAssert( alloc_area_offset < page_size );
+
+ _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
+ if ( alignment == 0 ) {
+ _HAssert( alloc_begin == alloc_area_begin );
+ } else {
+ _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
+ }
+
+ if ( boundary != 0 ) {
+ uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
+
+ _HAssert( alloc_size <= boundary );
+ _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
+ }
+ }
+#else
+ #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
+#endif
+
+static uintptr_t _Heap_Check_block(
+ const Heap_Control *heap,
+ const Heap_Block *block,
+ uintptr_t alloc_size,
+ uintptr_t alignment,
+ uintptr_t boundary
+)
+{
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ uintptr_t const block_end = block_begin + block_size;
+
+ uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
+ uintptr_t const alloc_begin_ceiling = block_end - min_block_size
+ + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
+
+ uintptr_t alloc_end = block_end + HEAP_BLOCK_SIZE_OFFSET;
+ uintptr_t alloc_begin = alloc_end - alloc_size;
+
+ alloc_begin = _Heap_Align_down( alloc_begin, alignment );
+
+ /* Ensure that the we have a valid new block at the end */
+ if ( alloc_begin > alloc_begin_ceiling ) {
+ alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
+ }
+
+ alloc_end = alloc_begin + alloc_size;
+
+ /* Ensure boundary constaint */
+ if ( boundary != 0 ) {
+ uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
+ uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
+
+ while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
+ if ( boundary_line < boundary_floor ) {
+ return 0;
+ }
+ alloc_begin = boundary_line - alloc_size;
+ alloc_begin = _Heap_Align_down( alloc_begin, alignment );
+ alloc_end = alloc_begin + alloc_size;
+ boundary_line = _Heap_Align_down( alloc_end, boundary );
+ }
+ }
+
+ /* Ensure that the we have a valid new block at the beginning */
+ if ( alloc_begin >= alloc_begin_floor ) {
+ uintptr_t const alloc_block_begin =
+ (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
+ uintptr_t const free_size = alloc_block_begin - block_begin;
+
+ if ( free_size >= min_block_size || free_size == 0 ) {
+ return alloc_begin;
+ }
+ }
+
+ return 0;
+}
+
+void *_Heap_Allocate_aligned_with_boundary(
+ Heap_Control *heap,
+ uintptr_t alloc_size,
+ uintptr_t alignment,
+ uintptr_t boundary
+)
{
Heap_Statistics *const stats = &heap->stats;
- Heap_Block * const tail = _Heap_Free_list_tail( heap );
- Heap_Block *block = _Heap_First_free_block( heap );
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *block = _Heap_Free_list_first( heap );
+ uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
+ - HEAP_BLOCK_SIZE_OFFSET;
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t alloc_begin = 0;
uint32_t search_count = 0;
- void *alloc_area_begin_ptr = NULL;
- size = _Heap_Calc_block_size( size, heap->page_size, heap->min_block_size );
- if( size == 0 ) {
+ if ( block_size_floor < alloc_size ) {
+ /* Integer overflow occured */
return NULL;
}
- /*
- * Find large enough free block.
- *
- * Do not bother to mask out the HEAP_PREV_BLOCK_USED bit as it will not
- * change the result of the size comparison.
- */
- while (block != tail && block->size_and_flag < size) {
- _HAssert( _Heap_Is_prev_used( block ));
+ if ( boundary != 0 ) {
+ if ( boundary < alloc_size ) {
+ return NULL;
+ }
- block = block->next;
- ++search_count;
+ if ( alignment == 0 ) {
+ alignment = page_size;
+ }
}
- if (block != tail) {
- _Heap_Block_allocate( heap, block, size );
+ while ( block != free_list_tail ) {
+ _HAssert( _Heap_Is_prev_used( block ) );
+
+ /* Statistics */
+ ++search_count;
+
+ /*
+ * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
+ * field. Thus the value is about one unit larger than the real block
+ * size. The greater than operator takes this into account.
+ */
+ if ( block->size_and_flag > block_size_floor ) {
+ if ( alignment == 0 ) {
+ alloc_begin = _Heap_Alloc_area_of_block( block );
+ } else {
+ alloc_begin = _Heap_Check_block(
+ heap,
+ block,
+ alloc_size,
+ alignment,
+ boundary
+ );
+ }
+ }
+
+ if ( alloc_begin != 0 ) {
+ break;
+ }
+
+ block = block->next;
+ }
- alloc_area_begin_ptr = (void *) _Heap_Alloc_area_of_block( block );
+ if ( alloc_begin != 0 ) {
+ block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
- _HAssert( _Heap_Is_aligned( (uintptr_t) alloc_area_begin_ptr, heap->page_size ));
+ _Heap_Check_allocation(
+ heap,
+ block,
+ alloc_begin,
+ alloc_size,
+ alignment,
+ boundary
+ );
/* Statistics */
- ++stats->allocs;
stats->searches += search_count;
}
/* Statistics */
- if (stats->max_search < search_count) {
+ if ( stats->max_search < search_count ) {
stats->max_search = search_count;
}
- return alloc_area_begin_ptr;
+ return (void *) alloc_begin;
}
diff --git a/cpukit/score/src/heapallocatealigned.c b/cpukit/score/src/heapallocatealigned.c
index 935c3509aa..7b61d723fc 100644
--- a/cpukit/score/src/heapallocatealigned.c
+++ b/cpukit/score/src/heapallocatealigned.c
@@ -1,3 +1,4 @@
+#if 0
/*
* Heap Handler
*
@@ -31,10 +32,10 @@ check_result(
)
{
uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
- uintptr_t const block_end = the_block
+ uintptr_t const block_end = (uintptr_t) the_block
+ _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const user_end = aligned_user_addr + size;
- uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_LAST_BLOCK_OVERHEAD;
+ uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_BLOCK_HEADER_SIZE;
uintptr_t const heap_end = (uintptr_t) the_heap->final
+ HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const page_size = the_heap->page_size;
@@ -97,7 +98,7 @@ Heap_Block *block_allocate(
/* Don't split the block as remainder is either zero or too small to be
used as a separate free block. Change 'alloc_size' to the size of the
block and remove the block from the list of free blocks. */
- _Heap_Block_remove_from_free_list(the_block);
+ _Heap_Free_list_remove(the_block);
alloc_size = block_size;
stats->free_blocks -= 1;
}
@@ -157,7 +158,7 @@ void *_Heap_Allocate_aligned(
/* Find large enough free block that satisfies the alignment requirements. */
- for (the_block = _Heap_First_free_block(the_heap), search_count = 0;
+ for (the_block = _Heap_Free_list_first(the_heap), search_count = 0;
the_block != tail;
the_block = the_block->next, ++search_count)
{
@@ -220,7 +221,7 @@ void *_Heap_Allocate_aligned(
/* The block is indeed acceptable: calculate the size of the block
to be allocated and perform allocation. */
uintptr_t const alloc_size =
- block_end - user_addr + HEAP_BLOCK_ALLOC_AREA_OFFSET;
+ block_end - user_addr + HEAP_BLOCK_HEADER_SIZE;
_HAssert(_Heap_Is_aligned(aligned_user_addr, alignment));
@@ -244,3 +245,4 @@ void *_Heap_Allocate_aligned(
return user_ptr;
}
+#endif
diff --git a/cpukit/score/src/heapextend.c b/cpukit/score/src/heapextend.c
index bb3f301235..3541bddcc9 100644
--- a/cpukit/score/src/heapextend.c
+++ b/cpukit/score/src/heapextend.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -28,11 +34,11 @@ Heap_Extend_status _Heap_Extend(
{
Heap_Statistics *const stats = &heap->stats;
uintptr_t const area_begin = (uintptr_t) area_begin_ptr;
- uintptr_t const heap_area_begin = heap->begin;
- uintptr_t const heap_area_end = heap->end;
+ uintptr_t const heap_area_begin = heap->area_begin;
+ uintptr_t const heap_area_end = heap->area_end;
uintptr_t const new_heap_area_end = heap_area_end + area_size;
uintptr_t extend_size = 0;
- Heap_Block *const old_final = heap->final;
+ Heap_Block *const old_final = heap->last_block;
Heap_Block *new_final = NULL;
/*
@@ -60,10 +66,10 @@ Heap_Extend_status _Heap_Extend(
* block and free it.
*/
- heap->end = new_heap_area_end;
+ heap->area_end = new_heap_area_end;
extend_size = new_heap_area_end
- - (uintptr_t) old_final - HEAP_LAST_BLOCK_OVERHEAD;
+ - (uintptr_t) old_final - HEAP_BLOCK_HEADER_SIZE;
extend_size = _Heap_Align_down( extend_size, heap->page_size );
*amount_extended = extend_size;
@@ -74,7 +80,7 @@ Heap_Extend_status _Heap_Extend(
new_final = _Heap_Block_at( old_final, extend_size );
new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
- heap->final = new_final;
+ heap->last_block = new_final;
stats->size += area_size;
++stats->used_blocks;
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index 9d5be9e290..48b54293ea 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,26 +25,18 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
+bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
{
Heap_Statistics *const stats = &heap->stats;
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
+ uintptr_t alloc_begin = (uintptr_t) alloc_begin_ptr;
Heap_Block *block =
- _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
+ _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
Heap_Block *next_block = NULL;
uintptr_t block_size = 0;
uintptr_t next_block_size = 0;
bool next_is_free = false;
- if (
- !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final)
- ) {
- _HAssert( alloc_area_begin_ptr != NULL );
- return false;
- }
-
if ( !_Heap_Is_block_in_heap( heap, block ) ) {
- _HAssert( false );
return false;
}
@@ -56,7 +54,7 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
}
next_block_size = _Heap_Block_size( next_block );
- next_is_free = next_block != heap->final
+ next_is_free = next_block != heap->last_block
&& !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
if ( !_Heap_Is_prev_used( block ) ) {
@@ -77,7 +75,7 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
if ( next_is_free ) { /* coalesce both */
uintptr_t const size = block_size + prev_size + next_block_size;
- _Heap_Block_remove_from_free_list( next_block );
+ _Heap_Free_list_remove( next_block );
stats->free_blocks -= 1;
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( prev_block, size );
@@ -91,14 +89,14 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
}
} else if ( next_is_free ) { /* coalesce next */
uintptr_t const size = block_size + next_block_size;
- _Heap_Block_replace_in_free_list( next_block, block );
+ _Heap_Free_list_replace( next_block, block );
block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( block, size );
next_block->prev_size = size;
} else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
- _Heap_Block_insert_after( _Heap_Free_list_head( heap), block );
+ _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
next_block->prev_size = block_size;
diff --git a/cpukit/score/src/heapgetfreeinfo.c b/cpukit/score/src/heapgetfreeinfo.c
index a288529cad..406ed81d39 100644
--- a/cpukit/score/src/heapgetfreeinfo.c
+++ b/cpukit/score/src/heapgetfreeinfo.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2004.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,21 +25,6 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*PAGE
- *
- * _Heap_Get_free_information
- *
- * This heap routine returns information about the free blocks
- * in the specified heap.
- *
- * Input parameters:
- * the_heap - pointer to heap header.
- * info - pointer to the free block information.
- *
- * Output parameters:
- * returns - free block information filled in.
- */
-
void _Heap_Get_free_information(
Heap_Control *the_heap,
Heap_Information *info
@@ -46,7 +37,7 @@ void _Heap_Get_free_information(
info->largest = 0;
info->total = 0;
- for(the_block = _Heap_First_free_block(the_heap);
+ for(the_block = _Heap_Free_list_first(the_heap);
the_block != tail;
the_block = the_block->next)
{
diff --git a/cpukit/score/src/heapgetinfo.c b/cpukit/score/src/heapgetinfo.c
index 7f907170b9..bc3d4cc893 100644
--- a/cpukit/score/src/heapgetinfo.c
+++ b/cpukit/score/src/heapgetinfo.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,27 +25,13 @@
#include <rtems/score/sysstate.h>
#include <rtems/score/heap.h>
-/*
- * _Heap_Get_information
- *
- * This kernel routine walks the heap and tots up the free and allocated
- * sizes. Derived from _Heap_Walk.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * the_info - pointer for information to be returned
- *
- * Output parameters:
- * *the_info - contains information about heap
- * return 0=success, otherwise heap is corrupt.
- */
-Heap_Get_information_status _Heap_Get_information(
+void _Heap_Get_information(
Heap_Control *the_heap,
Heap_Information_block *the_info
)
{
- Heap_Block *the_block = the_heap->start;
- Heap_Block *const end = the_heap->final;
+ Heap_Block *the_block = the_heap->first_block;
+ Heap_Block *const end = the_heap->last_block;
_HAssert(the_block->prev_size == the_heap->page_size);
_HAssert(_Heap_Is_prev_used(the_block));
@@ -52,7 +44,7 @@ Heap_Get_information_status _Heap_Get_information(
the_info->Used.largest = 0;
while ( the_block != end ) {
- uint32_t const the_size = _Heap_Block_size(the_block);
+ uintptr_t const the_size = _Heap_Block_size(the_block);
Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
Heap_Information *info;
@@ -74,7 +66,5 @@ Heap_Get_information_status _Heap_Get_information(
* "used" as client never allocated it. Make 'Used.total' contain this
* blocks' overhead though.
*/
- the_info->Used.total += HEAP_LAST_BLOCK_OVERHEAD;
-
- return HEAP_GET_INFORMATION_SUCCESSFUL;
+ the_info->Used.total += HEAP_BLOCK_HEADER_SIZE;
}
diff --git a/cpukit/score/src/heapresizeblock.c b/cpukit/score/src/heapresizeblock.c
index 8916bbe12c..2f26589667 100644
--- a/cpukit/score/src/heapresizeblock.c
+++ b/cpukit/score/src/heapresizeblock.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,129 +27,163 @@
Heap_Resize_status _Heap_Resize_block(
Heap_Control *heap,
- void *alloc_area_begin_ptr,
- uintptr_t size,
- uintptr_t *old_mem_size,
- uintptr_t *avail_mem_size
+ void *alloc_begin_ptr,
+ uintptr_t new_alloc_size,
+ uintptr_t *old_size,
+ uintptr_t *new_size
)
{
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
- Heap_Block *block;
- Heap_Block *next_block;
- uintptr_t next_block_size;
- bool next_is_used;
- Heap_Block *next_next_block;
- uintptr_t old_block_size;
- uintptr_t old_user_size;
- uintptr_t prev_used_flag;
Heap_Statistics *const stats = &heap->stats;
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const page_size = heap->page_size;
+ uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
+ Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
+ Heap_Block *next_block = NULL;
+ Heap_Block *next_next_block = NULL;
+ uintptr_t block_size = 0;
+ uintptr_t block_end = 0;
+ uintptr_t next_block_size = 0;
+ bool next_block_is_used = false;;
+ uintptr_t alloc_size = 0;
+ uintptr_t prev_block_used_flag = 0;
+
+ *old_size = 0;
+ *new_size = 0;
+
+ if ( !_Heap_Is_block_in_heap( heap, block ) ) {
+ return HEAP_RESIZE_FATAL_ERROR;
+ }
- *old_mem_size = 0;
- *avail_mem_size = 0;
+ block_size = _Heap_Block_size( block );
+ block_end = (uintptr_t) block + block_size;
+ prev_block_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
+ next_block = _Heap_Block_at( block, block_size );
- block = _Heap_Block_of_alloc_area(alloc_area_begin, heap->page_size);
- _HAssert(_Heap_Is_block_in_heap(heap, block));
- if (!_Heap_Is_block_in_heap(heap, block))
- return HEAP_RESIZE_FATAL_ERROR;
+ _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
+ _HAssert( _Heap_Is_prev_used( next_block ) );
- prev_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
- old_block_size = _Heap_Block_size(block);
- next_block = _Heap_Block_at(block, old_block_size);
+ next_block_size = _Heap_Block_size( next_block );
+ next_next_block = _Heap_Block_at( next_block, next_block_size );
- _HAssert(_Heap_Is_block_in_heap(heap, next_block));
- _HAssert(_Heap_Is_prev_used(next_block));
- if ( !_Heap_Is_block_in_heap(heap, next_block) ||
- !_Heap_Is_prev_used(next_block))
- return HEAP_RESIZE_FATAL_ERROR;
+ _HAssert(
+ next_block == heap->last_block
+ || _Heap_Is_block_in_heap( heap, next_next_block )
+ );
- next_block_size = _Heap_Block_size(next_block);
- next_next_block = _Heap_Block_at(next_block, next_block_size);
- next_is_used = (next_block == heap->final) ||
- _Heap_Is_prev_used(next_next_block);
+ next_block_is_used = next_block == heap->last_block
+ || _Heap_Is_prev_used( next_next_block );
- /* See _Heap_Size_of_alloc_area() source for explanations */
- old_user_size = (uintptr_t) next_block - alloc_area_begin
- + HEAP_BLOCK_SIZE_OFFSET;
+ alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
- *old_mem_size = old_user_size;
+ *old_size = alloc_size;
- if (size > old_user_size) {
- /* Need to extend the block: allocate part of the next block and then
- merge 'block' and allocated block together. */
- if (next_is_used) /* Next block is in use, -- no way to extend */
+ if ( new_alloc_size > alloc_size ) {
+ /*
+ * Need to extend the block: allocate part of the next block and then
+ * merge the blocks.
+ */
+ if ( next_block_is_used ) {
return HEAP_RESIZE_UNSATISFIED;
- else {
+ } else {
uintptr_t add_block_size =
- _Heap_Align_up(size - old_user_size, page_size);
- if (add_block_size < min_block_size)
+ _Heap_Align_up( new_alloc_size - alloc_size, page_size );
+
+ if ( add_block_size < min_block_size ) {
add_block_size = min_block_size;
- if (add_block_size > next_block_size)
- return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
- add_block_size =
- _Heap_Block_allocate(heap, next_block, add_block_size);
- /* Merge two subsequent blocks */
- block->size_and_flag = (old_block_size + add_block_size) | prev_used_flag;
+ }
+
+ if ( add_block_size > next_block_size ) {
+ return HEAP_RESIZE_UNSATISFIED;
+ }
+
+ next_block = _Heap_Block_allocate(
+ heap,
+ next_block,
+ _Heap_Alloc_area_of_block( next_block ),
+ add_block_size - HEAP_BLOCK_HEADER_SIZE
+ );
+
+ /* Merge the blocks */
+ block->size_and_flag = ( block_size + _Heap_Block_size( next_block ) )
+ | prev_block_used_flag;
+
+ /* Statistics */
--stats->used_blocks;
}
} else {
-
/* Calculate how much memory we could free */
uintptr_t free_block_size =
- _Heap_Align_down(old_user_size - size, page_size);
+ _Heap_Align_down( alloc_size - new_alloc_size, page_size );
- if (free_block_size > 0) {
+ if ( free_block_size > 0 ) {
+ /*
+ * To free some memory the block should be shortened so that it can can
+ * hold 'new_alloc_size' user bytes and still remain not shorter than
+ * 'min_block_size'.
+ */
+ uintptr_t new_block_size = block_size - free_block_size;
- /* To free some memory the block should be shortened so that it can
- can hold 'size' user bytes and still remain not shorter than
- 'min_block_size'. */
+ if ( new_block_size < min_block_size ) {
+ uintptr_t const delta = min_block_size - new_block_size;
- uintptr_t new_block_size = old_block_size - free_block_size;
+ _HAssert( free_block_size >= delta );
- if (new_block_size < min_block_size) {
- uintptr_t delta = min_block_size - new_block_size;
- _HAssert(free_block_size >= delta);
free_block_size -= delta;
- if (free_block_size == 0) {
+
+ if ( free_block_size == 0 ) {
+ /* Statistics */
++stats->resizes;
+
return HEAP_RESIZE_SUCCESSFUL;
}
+
new_block_size += delta;
}
- _HAssert(new_block_size >= min_block_size);
- _HAssert(new_block_size + free_block_size == old_block_size);
- _HAssert(_Heap_Is_aligned(new_block_size, page_size));
- _HAssert(_Heap_Is_aligned(free_block_size, page_size));
+ _HAssert( new_block_size >= min_block_size );
+ _HAssert( new_block_size + free_block_size == block_size );
+ _HAssert( _Heap_Is_aligned( new_block_size, page_size ) );
+ _HAssert( _Heap_Is_aligned( free_block_size, page_size ) );
- if (!next_is_used) {
- /* Extend the next block to the low addresses by 'free_block_size' */
+ if ( !next_block_is_used ) {
+ /* Extend the next block */
Heap_Block *const new_next_block =
- _Heap_Block_at(block, new_block_size);
+ _Heap_Block_at( block, new_block_size );
uintptr_t const new_next_block_size =
next_block_size + free_block_size;
- _HAssert(_Heap_Is_block_in_heap(heap, next_next_block));
- block->size_and_flag = new_block_size | prev_used_flag;
- new_next_block->size_and_flag = new_next_block_size | HEAP_PREV_BLOCK_USED;
+
+ _HAssert( _Heap_Is_block_in_heap( heap, next_next_block ) );
+
+ block->size_and_flag = new_block_size | prev_block_used_flag;
+ new_next_block->size_and_flag =
+ new_next_block_size | HEAP_PREV_BLOCK_USED;
next_next_block->prev_size = new_next_block_size;
- _Heap_Block_replace_in_free_list(next_block, new_next_block);
- heap->stats.free_size += free_block_size;
- *avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
-
- } else if (free_block_size >= min_block_size) {
- /* Split the block into 2 used parts, then free the second one. */
- block->size_and_flag = new_block_size | prev_used_flag;
- next_block = _Heap_Block_at(block, new_block_size);
+
+ _Heap_Free_list_replace( next_block, new_next_block );
+
+ *new_size = new_next_block_size - HEAP_BLOCK_SIZE_OFFSET;
+
+ /* Statistics */
+ stats->free_size += free_block_size;
+ } else if ( free_block_size >= min_block_size ) {
+ /* Split the block into two used parts, then free the second one */
+ block->size_and_flag = new_block_size | prev_block_used_flag;
+ next_block = _Heap_Block_at( block, new_block_size );
next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
+
+ _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( next_block ) );
+
+ *new_size = free_block_size - HEAP_BLOCK_SIZE_OFFSET;
+
+ /* Statistics */
++stats->used_blocks; /* We have created used block */
- --stats->frees; /* Don't count next call in stats */
- _Heap_Free(heap, (void *) _Heap_Alloc_area_of_block(next_block));
- *avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
+ --stats->frees; /* Do not count next call in stats */
}
}
}
+ /* Statistics */
++stats->resizes;
+
return HEAP_RESIZE_SUCCESSFUL;
}
diff --git a/cpukit/score/src/heapsizeofuserarea.c b/cpukit/score/src/heapsizeofuserarea.c
index be51255eee..7c297a77e6 100644
--- a/cpukit/score/src/heapsizeofuserarea.c
+++ b/cpukit/score/src/heapsizeofuserarea.c
@@ -1,6 +1,12 @@
-/*
- * Heap Handler
+/**
+ * @file
+ *
+ * @ingroup ScoreHeap
*
+ * @brief Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,24 +27,16 @@
bool _Heap_Size_of_alloc_area(
Heap_Control *heap,
- void *alloc_area_begin_ptr,
- uintptr_t *size
+ void *alloc_begin_ptr,
+ uintptr_t *alloc_size
)
{
- uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
- Heap_Block *block =
- _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
+ Heap_Block *block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
Heap_Block *next_block = NULL;
uintptr_t block_size = 0;
- if (
- !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final )
- ) {
- return false;
- }
-
-
- _HAssert(_Heap_Is_block_in_heap( heap, block ));
if ( !_Heap_Is_block_in_heap( heap, block ) ) {
return false;
}
@@ -46,26 +44,14 @@ bool _Heap_Size_of_alloc_area(
block_size = _Heap_Block_size( block );
next_block = _Heap_Block_at( block, block_size );
- _HAssert( _Heap_Is_block_in_heap( heap, next_block ));
- _HAssert( _Heap_Is_prev_used( next_block ));
if (
- !_Heap_Is_block_in_heap( heap, next_block ) ||
- !_Heap_Is_prev_used( next_block )
+ !_Heap_Is_block_in_heap( heap, next_block )
+ || !_Heap_Is_prev_used( next_block )
) {
return false;
}
- /*
- * 'alloc_area_begin' could be greater than 'block' address plus
- * HEAP_BLOCK_ALLOC_AREA_OFFSET as _Heap_Allocate_aligned() may produce such
- * user pointers. To get rid of this offset we calculate user size as
- * difference between the end of 'block' (='next_block') and
- * 'alloc_area_begin' and then add correction equal to the offset of the
- * 'size' field of the 'Heap_Block' structure. The correction is due to the
- * fact that 'prev_size' field of the next block is actually used as user
- * accessible area of 'block'.
- */
- *size = (uintptr_t) next_block - alloc_area_begin + HEAP_BLOCK_SIZE_OFFSET;
+ *alloc_size = (uintptr_t) next_block + HEAP_BLOCK_SIZE_OFFSET - alloc_begin;
return true;
}
diff --git a/cpukit/score/src/heapwalk.c b/cpukit/score/src/heapwalk.c
index a6628df0b3..dd255e1a17 100644
--- a/cpukit/score/src/heapwalk.c
+++ b/cpukit/score/src/heapwalk.c
@@ -1,8 +1,14 @@
-/*
- * Heap Handler
+/**
+ * @file
*
- * COPYRIGHT (c) 1989-2007.
- * On-Line Applications Research Corporation (OAR).
+ * @ingroup ScoreHeap
+ *
+ * @brief Heap Handler implementation.
+ */
+
+/*
+ * COPYRIGHT ( c ) 1989-2007.
+ * On-Line Applications Research Corporation ( OAR ).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -22,197 +28,446 @@
#include <rtems/score/interr.h>
#include <rtems/bspIo.h>
-#if defined(__GNUC__)
- #define DO_NOT_INLINE __attribute__((__noinline__))
-#else
- #define DO_NOT_INLINE
-#endif
-/*
- * Helper to avoid introducing even more branches and paths in this
- * code to do coverage analysis on.
- *
- * We do not want this inlined.
- */
-static void hw_nl(
- int error,
- bool do_dump
-) DO_NOT_INLINE;
+static void _Heap_Walk_printk( int source, bool dump, bool error, const char *fmt, ... )
+{
+ if ( dump ) {
+ va_list ap;
-/*PAGE
- *
- * _Heap_Walk
- *
- * This kernel routine walks the heap and verifies its correctness.
- *
- * Input parameters:
- * the_heap - pointer to heap header
- * source - a numeric indicator of the invoker of this routine
- * do_dump - when true print the information
- *
- * Output parameters: NONE
- */
+ if ( error ) {
+ printk( "FAIL[%d]: ", source );
+ } else {
+ printk( "PASS[%d]: ", source );
+ }
-bool _Heap_Walk(
- Heap_Control *the_heap,
- int source,
- bool do_dump
+ va_start( ap, fmt );
+ vprintk( fmt, ap );
+ va_end( ap );
+ }
+}
+
+static bool _Heap_Walk_check_free_list(
+ int source,
+ bool dump,
+ Heap_Control *heap
)
{
- Heap_Block *the_block = the_heap->start;
- Heap_Block *const end = the_heap->final;
- Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
- int error = 0;
- int passes = 0;
-
- /* FIXME: Why is this disabled? */
- do_dump = false;
-
- /* FIXME: Why is this disabled? */
- /*
- * We don't want to allow walking the heap until we have
- * transferred control to the user task so we watch the
- * system state.
- */
+ const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ const Heap_Block *free_block = first_free_block;
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const loop_limit =
+ ((uintptr_t) heap->last_block - (uintptr_t) heap->first_block)
+ / heap->min_block_size;
+ uintptr_t loop_counter = 0;
-/*
- if ( !_System_state_Is_up( _System_state_Get() ) )
- return true;
-*/
+ while ( free_block != free_list_tail && loop_counter < loop_limit ) {
+ if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: not in heap\n",
+ free_block
+ );
- /* FIXME: Reason for this? */
- if (source < 0)
- source = (int) the_heap->stats.instance;
+ return false;
+ }
+
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( free_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free block 0x%08x: alloc area not page aligned\n",
+ free_block
+ );
- if (do_dump)
- printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
- source, the_block, end,
- _Heap_First_free_block(the_heap), _Heap_Last_free_block(the_heap),
- the_heap->begin, the_heap->end);
+ return false;
+ }
- /*
- * Handle the 1st block
- */
+ ++loop_counter;
- if (!_Heap_Is_prev_used(the_block)) {
- printk("PASS: %d !HEAP_PREV_BLOCK_USED flag of 1st block isn't set\n", source);
- error = 1;
+ free_block = free_block->next;
}
- if (the_block->prev_size != the_heap->page_size) {
- printk("PASS: %d !prev_size of 1st block isn't page_size\n", source);
- error = 1;
+ if ( loop_counter >= loop_limit ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "free list contains a loop\n"
+ );
+
+ return false;
}
- while ( the_block != end ) {
- uint32_t const the_size = _Heap_Block_size(the_block);
- Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
- bool prev_used = _Heap_Is_prev_used(the_block);
+ return true;
+}
- if (do_dump) {
- printk("PASS: %d block %p size %d(%c)",
- source, the_block, the_size, (prev_used ? 'U' : 'F'));
- if (prev_used)
- printk(" prev_size %d", the_block->prev_size);
- else
- printk(" (prev_size) %d", the_block->prev_size);
+static bool _Heap_Walk_is_in_free_list(
+ Heap_Control *heap,
+ Heap_Block *block
+)
+{
+ const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ const Heap_Block *free_block = _Heap_Free_list_first( heap );
+
+ while ( free_block != free_list_tail ) {
+ if ( free_block == block ) {
+ return true;
}
+ free_block = free_block->next;
+ }
+ return false;
+}
- if (!_Addresses_Is_aligned(next_block) ) {
- printk("PASS: %d next_block %p is not aligned\n", source, next_block);
- error = 1;
- break;
- }
-
- if (!_Heap_Is_prev_used(next_block)) {
- if (do_dump)
- printk( " prev %p next %p", the_block->prev, the_block->next);
- if (_Heap_Block_size(the_block) != next_block->prev_size) {
- if (do_dump) printk("\n");
- printk("PASS: %d !front and back sizes don't match", source);
- error = 1;
- }
- if (!prev_used) {
-
- hw_nl(do_dump, error);
- printk("PASS: %d !two consecutive blocks are free", source);
- error = 1;
- }
+static bool _Heap_Walk_check_control(
+ int source,
+ bool dump,
+ Heap_Control *heap
+)
+{
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ Heap_Block *const first_block = heap->first_block;
+ Heap_Block *const last_block = heap->last_block;
- { /* Check if 'the_block' is in the free block list */
- Heap_Block* block = _Heap_First_free_block(the_heap);
- if (!_Addresses_Is_aligned(block) ) {
- printk(
- "PASS: %d first free block %p is not aligned\n", source, block);
- error = 1;
- break;
- }
- while(block != the_block && block != tail) {
- if (!_Addresses_Is_aligned(block) ) {
- printk(
- "PASS: %d a free block %p is not aligned\n", source, block);
- error = 1;
- break;
- }
- if (!_Heap_Is_block_in_heap(the_heap, block)) {
- printk("PASS: %d a free block %p is not in heap\n", source, block);
- error = 1;
- break;
- }
- block = block->next;
- }
- if (block != the_block) {
- hw_nl(do_dump, error);
- printk("PASS: %d !the_block not in the free list", source);
- error = 1;
- }
- }
+ _Heap_Walk_printk(
+ source,
+ dump,
+ false,
+ "page size %u, min block size %u\n"
+ "\tarea begin 0x%08x, area end 0x%08x\n"
+ "\tfirst block 0x%08x, last block 0x%08x\n"
+ "\tfirst free 0x%08x, last free 0x%08x\n",
+ page_size, min_block_size,
+ heap->area_begin, heap->area_end,
+ first_block, last_block,
+ first_free_block, last_free_block
+ );
- }
- hw_nl(do_dump, error);
+ if ( page_size == 0 ) {
+ _Heap_Walk_printk( source, dump, true, "page size is zero\n" );
- if (the_size < the_heap->min_block_size) {
- printk("PASS: %d !block size is too small\n", source);
- error = 1;
- break;
- }
- if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
- printk("PASS: %d !block size is misaligned\n", source);
- error = 1;
- }
+ return false;
+ }
- if (++passes > (do_dump ? 10 : 0) && error)
- break;
+ if ( !_Addresses_Is_aligned( (void *) page_size ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "page size %u not CPU aligned\n",
+ page_size
+ );
- the_block = next_block;
+ return false;
}
- if (the_block != end) {
- printk("PASS: %d !last block address isn't equal to 'final' %p %p\n",
- source, the_block, end);
- error = 1;
+ if ( !_Heap_Is_aligned( min_block_size, page_size ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "min block size %u not page aligned\n",
+ min_block_size
+ );
+
+ return false;
}
- if (_Heap_Block_size(the_block) != the_heap->page_size) {
- printk("PASS: %d !last block's size isn't page_size (%d != %d)\n", source,
- _Heap_Block_size(the_block), the_heap->page_size);
- error = 1;
+ if (
+ first_free_block != free_list_head
+ && !_Addresses_Is_aligned( first_free_block )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first free block: 0x%08x not CPU aligned\n",
+ first_free_block
+ );
+
+ return false;
}
- if (do_dump && error)
- _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, 0xffff0000 );
+ if (
+ last_free_block != free_list_tail
+ && !_Addresses_Is_aligned( last_free_block )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "last free block: 0x%08x not CPU aligned\n",
+ last_free_block
+ );
+
+ return false;
+ }
- return error;
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: 0x%08x not page aligned\n",
+ first_block
+ );
+ return false;
+ }
+
+ if (
+ !_Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
+ ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "last block: 0x%08x not page aligned\n",
+ last_block
+ );
+
+ return false;
+ }
+
+ if ( !_Heap_Is_prev_used( first_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: HEAP_PREV_BLOCK_USED is cleared\n"
+ );
+ }
+
+ if ( first_block->prev_size != page_size ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ true,
+ "first block: prev size %u != page size %u\n",
+ first_block->prev_size,
+ page_size
+ );
+ }
+
+ return _Heap_Walk_check_free_list( source, dump, heap );
}
-/*
- * This method exists to simplify branch paths in the generated code above.
- */
-static void hw_nl(
- int error,
- bool do_dump
+bool _Heap_Walk(
+ Heap_Control *heap,
+ int source,
+ bool dump
)
{
- if (do_dump || error) printk("\n");
+ uintptr_t const page_size = heap->page_size;
+ uintptr_t const min_block_size = heap->min_block_size;
+ Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
+ Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
+ Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
+ Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
+ Heap_Block *const last_block = heap->last_block;
+ Heap_Block *block = heap->first_block;
+ bool error = false;
+
+ if ( !_System_state_Is_up( _System_state_Get() ) ) {
+ return true;
+ }
+
+ if ( !_Heap_Walk_check_control( source, dump, heap ) ) {
+ return false;
+ }
+
+ while ( block != last_block && _Addresses_Is_aligned( block ) ) {
+ uintptr_t const block_begin = (uintptr_t) block;
+ uintptr_t const block_size = _Heap_Block_size( block );
+ bool const prev_used = _Heap_Is_prev_used( block );
+ Heap_Block *const next_block = _Heap_Block_at( block, block_size );
+ uintptr_t const next_block_begin = (uintptr_t) next_block;
+
+ if ( prev_used ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u\n",
+ block,
+ block_size
+ );
+ } else {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u, prev_size %u\n",
+ block,
+ block_size,
+ block->prev_size
+ );
+ }
+
+ if (
+ !_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
+ ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: not page (%u) aligned\n",
+ block,
+ page_size
+ );
+ break;
+ }
+
+ if ( !_Heap_Is_aligned( block_size, page_size ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: block size %u not page (%u) aligned\n",
+ block,
+ block_size,
+ page_size
+ );
+ break;
+ }
+
+ if ( block_size < min_block_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u < min block size %u\n",
+ block,
+ block_size,
+ min_block_size
+ );
+ break;
+ }
+
+ if ( next_block_begin <= block_begin ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: next block 0x%08x is not a successor\n",
+ block,
+ next_block
+ );
+ break;
+ }
+
+ if ( !_Heap_Is_prev_used( next_block ) ) {
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
+ block,
+ block->prev,
+ block->prev == first_free_block ?
+ " (= first)"
+ : (block->prev == free_list_head ? " (= head)" : ""),
+ block->next,
+ block->next == last_free_block ?
+ " (= last)"
+ : (block->next == free_list_tail ? " (= tail)" : "")
+ );
+
+ if ( block_size != next_block->prev_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
+ block,
+ block_size,
+ next_block->prev_size,
+ next_block
+ );
+ }
+
+ if ( !prev_used ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: two consecutive blocks are free\n",
+ block
+ );
+ }
+
+ if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: free block not in free list\n",
+ block
+ );
+ }
+ }
+
+ block = next_block;
+ }
+
+ if ( !_Addresses_Is_aligned( block ) ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "block 0x%08x: not CPU aligned\n",
+ block
+ );
+
+ return false;
+ }
+
+ if ( block == last_block ) {
+ uintptr_t const block_size = _Heap_Block_size( block );
+
+ if ( block_size != page_size ) {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "last block 0x%08x: size %u != page size %u\n",
+ block,
+ block_size,
+ page_size
+ );
+ }
+ } else {
+ error = true;
+ _Heap_Walk_printk(
+ source,
+ dump,
+ error,
+ "last block 0x%08x != last block 0x%08x\n",
+ block,
+ last_block
+ );
+ }
+
+ return !error;
}
diff --git a/cpukit/score/src/pheapallocate.c b/cpukit/score/src/pheapallocate.c
index f864a4a435..50d560f3a7 100644
--- a/cpukit/score/src/pheapallocate.c
+++ b/cpukit/score/src/pheapallocate.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -18,7 +26,7 @@
void *_Protected_heap_Allocate(
Heap_Control *the_heap,
- intptr_t size
+ uintptr_t size
)
{
void *p;
diff --git a/cpukit/score/src/pheapallocatealigned.c b/cpukit/score/src/pheapallocatealigned.c
index 97873f9b29..756d8a8aa2 100644
--- a/cpukit/score/src/pheapallocatealigned.c
+++ b/cpukit/score/src/pheapallocatealigned.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -18,8 +26,8 @@
void *_Protected_heap_Allocate_aligned(
Heap_Control *the_heap,
- intptr_t size,
- uint32_t alignment
+ uintptr_t size,
+ uintptr_t alignment
)
{
void *p;
diff --git a/cpukit/score/src/pheapextend.c b/cpukit/score/src/pheapextend.c
index d391018d0d..c2aa9735b9 100644
--- a/cpukit/score/src/pheapextend.c
+++ b/cpukit/score/src/pheapextend.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,11 +27,11 @@
bool _Protected_heap_Extend(
Heap_Control *the_heap,
void *starting_address,
- intptr_t size
+ uintptr_t size
)
{
Heap_Extend_status status;
- intptr_t amount_extended;
+ uintptr_t amount_extended;
_RTEMS_Lock_allocator();
status = _Heap_Extend(the_heap, starting_address, size, &amount_extended);
diff --git a/cpukit/score/src/pheapfree.c b/cpukit/score/src/pheapfree.c
index 1548547855..eb57ded669 100644
--- a/cpukit/score/src/pheapfree.c
+++ b/cpukit/score/src/pheapfree.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
diff --git a/cpukit/score/src/pheapgetblocksize.c b/cpukit/score/src/pheapgetblocksize.c
index 21727d77ac..0591abdd6e 100644
--- a/cpukit/score/src/pheapgetblocksize.c
+++ b/cpukit/score/src/pheapgetblocksize.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,10 +27,10 @@
bool _Protected_heap_Get_block_size(
Heap_Control *the_heap,
void *starting_address,
- intptr_t *size
+ uintptr_t *size
)
{
- bool status;
+ bool status;
_RTEMS_Lock_allocator();
status = _Heap_Size_of_alloc_area( the_heap, starting_address, size );
diff --git a/cpukit/score/src/pheapgetfreeinfo.c b/cpukit/score/src/pheapgetfreeinfo.c
index 4b211d93dd..e95bffe0ba 100644
--- a/cpukit/score/src/pheapgetfreeinfo.c
+++ b/cpukit/score/src/pheapgetfreeinfo.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
diff --git a/cpukit/score/src/pheapgetinfo.c b/cpukit/score/src/pheapgetinfo.c
index 1d62fc0673..2c4a287edd 100644
--- a/cpukit/score/src/pheapgetinfo.c
+++ b/cpukit/score/src/pheapgetinfo.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -21,8 +29,6 @@ bool _Protected_heap_Get_information(
Heap_Information_block *the_info
)
{
- Heap_Get_information_status status;
-
if ( !the_heap )
return false;
@@ -30,11 +36,8 @@ bool _Protected_heap_Get_information(
return false;
_RTEMS_Lock_allocator();
- status = _Heap_Get_information( the_heap, the_info );
+ _Heap_Get_information( the_heap, the_info );
_RTEMS_Unlock_allocator();
- if ( status == HEAP_GET_INFORMATION_SUCCESSFUL )
- return true;
-
- return false;
+ return true;
}
diff --git a/cpukit/score/src/pheapgetsize.c b/cpukit/score/src/pheapgetsize.c
index c283d34c13..fbf90ea3a4 100644
--- a/cpukit/score/src/pheapgetsize.c
+++ b/cpukit/score/src/pheapgetsize.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
@@ -16,7 +24,7 @@
#include <rtems/system.h>
#include <rtems/score/protectedheap.h>
-uint32_t _Protected_heap_Get_size(
+uintptr_t _Protected_heap_Get_size(
Heap_Control *the_heap
)
{
diff --git a/cpukit/score/src/pheapinit.c b/cpukit/score/src/pheapinit.c
index 1d99fcd5f2..f2a11a7be9 100644
--- a/cpukit/score/src/pheapinit.c
+++ b/cpukit/score/src/pheapinit.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
diff --git a/cpukit/score/src/pheapresizeblock.c b/cpukit/score/src/pheapresizeblock.c
index 45e7861509..c229f31731 100644
--- a/cpukit/score/src/pheapresizeblock.c
+++ b/cpukit/score/src/pheapresizeblock.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*
@@ -19,12 +27,12 @@
bool _Protected_heap_Resize_block(
Heap_Control *the_heap,
void *starting_address,
- intptr_t size
+ uintptr_t size
)
{
Heap_Resize_status status;
- intptr_t old_mem_size;
- intptr_t avail_mem_size;
+ uintptr_t old_mem_size;
+ uintptr_t avail_mem_size;
_RTEMS_Lock_allocator();
status = _Heap_Resize_block(
diff --git a/cpukit/score/src/pheapwalk.c b/cpukit/score/src/pheapwalk.c
index 4ddd5d9f90..e86874192f 100644
--- a/cpukit/score/src/pheapwalk.c
+++ b/cpukit/score/src/pheapwalk.c
@@ -1,4 +1,12 @@
/**
+ * @file
+ *
+ * @ingroup ScoreProtHeap
+ *
+ * @brief Protected Heap Handler implementation.
+ */
+
+/*
* COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR).
*