summaryrefslogtreecommitdiffstats
path: root/cpukit/score/inline
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2005-01-20 18:22:29 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2005-01-20 18:22:29 +0000
commit962e894f4ed0836a5aad472805682bb2f2c90201 (patch)
tree2bb745c9bed1353cb59f88e00da9962e649009bf /cpukit/score/inline
parent2005-01-20 Joel Sherrill <joel@OARcorp.com> (diff)
downloadrtems-962e894f4ed0836a5aad472805682bb2f2c90201.tar.bz2
2005-01-20 Sergei Organov <osv@topconrd.ru>
PR 536/rtems Heap manager re-implementation to consume less memory and still satisfy alignment requirements. * score/src/heap.c, score/src/heapallocate.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetinfo.c, score/src/heapgetfreeinfo.c, core/src/heapsizeofuserarea.c, score/src/heapwalk.c, core/macros/rtems/score/heap.inl, score/inline/rtems/score/heap.inl, score/include/rtems/score/heap.h: Reimplemented. * score/src/heapallocatealigned.c: new file * score/Makefile.am: HEAP_C_FILES: add score/src/heapallocatealigned.c
Diffstat (limited to 'cpukit/score/inline')
-rw-r--r--cpukit/score/inline/rtems/score/heap.inl236
1 files changed, 162 insertions, 74 deletions
diff --git a/cpukit/score/inline/rtems/score/heap.inl b/cpukit/score/inline/rtems/score/heap.inl
index db38500a96..80df3590b7 100644
--- a/cpukit/score/inline/rtems/score/heap.inl
+++ b/cpukit/score/inline/rtems/score/heap.inl
@@ -34,7 +34,7 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Head (
Heap_Control *the_heap
)
{
- return (Heap_Block *)&the_heap->start;
+ return &the_heap->free_list;
}
/**
@@ -45,166 +45,254 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Tail (
Heap_Control *the_heap
)
{
- return (Heap_Block *)&the_heap->final;
+ return &the_heap->free_list;
}
/**
- * This function returns the address of the block which physically
- * precedes the_block in memory.
+ * Return the first free block of the specified heap.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Previous_block (
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First (
+ Heap_Control *the_heap
+)
+{
+ return _Heap_Head(the_heap)->next;
+}
+
+/*PAGE
+ *
+ * _Heap_Last
+ *
+ * DESCRIPTION:
+ *
+ * Return the last free block of the specified heap.
+ */
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last (
+ Heap_Control *the_heap
+)
+{
+ return _Heap_Tail(the_heap)->prev;
+}
+
+/*PAGE
+ *
+ * _Heap_Block_remove
+ *
+ * DESCRIPTION:
+ *
+ * This function removes 'the_block' from doubly-linked list.
+ */
+
+RTEMS_INLINE_ROUTINE void _Heap_Block_remove (
Heap_Block *the_block
)
{
- return (Heap_Block *) _Addresses_Subtract_offset(
- (void *)the_block,
- the_block->back_flag & ~ HEAP_BLOCK_USED
- );
+ Heap_Block *block = the_block;
+
+ Heap_Block *next = block->next;
+ Heap_Block *prev = block->prev;
+ prev->next = next;
+ next->prev = prev;
}
/**
- * This function returns the address of the block which physically
- * follows the_block in memory.
- *
- * @note Next_block assumes that the block is free.
+ * This function replaces @a old_block by @a new_block in doubly-linked list.
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Next_block (
+RTEMS_INLINE_ROUTINE void _Heap_Block_replace (
+ Heap_Block *old_block,
+ Heap_Block *new_block
+)
+{
+ Heap_Block *block = old_block;
+ Heap_Block *next = block->next;
+ Heap_Block *prev = block->prev;
+
+ block = new_block;
+ block->next = next;
+ block->prev = prev;
+ next->prev = prev->next = block;
+}
+
+/**
+ * This function inserts @a the_block after @a prev_block
+ * in doubly-linked list.
+ */
+RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after (
+ Heap_Block *prev_block,
Heap_Block *the_block
)
{
- return (Heap_Block *) _Addresses_Add_offset(
- (void *)the_block,
- the_block->front_flag & ~ HEAP_BLOCK_USED
- );
+ Heap_Block *prev = prev_block;
+ Heap_Block *block = the_block;
+
+ Heap_Block *next = prev->next;
+ block->next = next;
+ block->prev = prev;
+ next->prev = prev->next = block;
}
/**
- * This function calculates and returns a block's location (address)
- * in the heap based upon a base address and an offset.
+ * Return TRUE if @a value is a multiple of @a alignment, FALSE otherwise
*/
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
- void *base,
- uint32_t offset
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_aligned (
+ uint32_t value,
+ uint32_t alignment
)
{
- return (Heap_Block *) _Addresses_Add_offset( (void *)base, offset );
+ return (value % alignment) == 0;
}
/**
- * XXX
+ * Align @a *value up to the nearest multiple of @a alignment.
*/
-
-RTEMS_INLINE_ROUTINE Heap_Block *_Heap_User_block_at(
- void *base
+
+RTEMS_INLINE_ROUTINE void _Heap_Align_up (
+ uint32_t *value,
+ uint32_t alignment
)
{
- uint32_t offset;
-
- offset = *(((uint32_t *) base) - 1);
- return _Heap_Block_at( base, -offset + -HEAP_BLOCK_USED_OVERHEAD);
+ uint32_t v = *value;
+ uint32_t a = alignment;
+ uint32_t r = v % a;
+ *value = r ? v - r + a : v;
}
/**
- * This function returns TRUE if the previous block of the_block
- * is free, and FALSE otherwise.
+ * Align @a *value down to the nearest multiple of @a alignment.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_previous_block_free (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Align_down (
+ uint32_t *value,
+ uint32_t alignment
)
{
- return !(the_block->back_flag & HEAP_BLOCK_USED);
+ uint32_t v = *value;
+ *value = v - (v % alignment);
}
/**
- * This function returns TRUE if the block is free, and FALSE otherwise.
+ * Return TRUE if @a ptr is aligned at @a alignment boundary,
+ * FALSE otherwise
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_free (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_aligned_ptr (
+ void *ptr,
+ uint32_t alignment
)
{
- return !(the_block->front_flag & HEAP_BLOCK_USED);
+ return (_H_p2u(ptr) % alignment) == 0;
}
/**
- * This function returns TRUE if the block is currently allocated,
- * and FALSE otherwise.
+ * Align @a *value up to the nearest multiple of @a alignment.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_used (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Align_up_uptr (
+ _H_uptr_t *value,
+ uint32_t alignment
)
{
- return (the_block->front_flag & HEAP_BLOCK_USED);
+ _H_uptr_t v = *value;
+ uint32_t a = alignment;
+ _H_uptr_t r = v % a;
+ *value = r ? v - r + a : v;
}
/**
- * This function returns the size of the_block in bytes.
+ * Align @a *value down to the nearest multiple of @a alignment.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Align_down_uptr (
+ _H_uptr_t *value,
+ uint32_t alignment
+)
+{
+ _H_uptr_t v = *value;
+ *value = v - (v % alignment);
+}
+
+/**
+ * This function calculates and returns a block's location (address)
+ * in the heap based upon a base address @a base and an @a offset.
+ */
+
+RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
+ void *base,
+ uint32_t offset
)
{
- return (the_block->front_flag & ~HEAP_BLOCK_USED);
+ return (Heap_Block *) _Addresses_Add_offset( base, offset );
}
/**
- * This function returns the starting address of the portion of the block
+ * This function returns the starting address of the portion of @a the_block
* which the user may access.
*/
-RTEMS_INLINE_ROUTINE void *_Heap_Start_of_user_area (
+RTEMS_INLINE_ROUTINE void *_Heap_User_area (
Heap_Block *the_block
)
{
- return (void *) &the_block->next;
+ return (void *) _Addresses_Add_offset ( the_block, HEAP_BLOCK_USER_OFFSET );
}
/**
- * This function returns TRUE if the_block is within the memory area
- * managed by the_heap, and FALSE otherwise.
+ * Fill @a *the_block with the address of the beginning of the block given
+ * pointer to the user accessible area @a base.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_in (
- Heap_Control *the_heap,
- Heap_Block *the_block
+RTEMS_INLINE_ROUTINE void _Heap_Start_of_block (
+ Heap_Control *the_heap,
+ void *base,
+ Heap_Block **the_block
)
{
- return _Addresses_Is_in_range( the_block, the_heap->start, the_heap->final );
+ _H_uptr_t addr = _H_p2u(base);
+ /* The address passed could be greater than the block address plus
+ * HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
+ * pointers. To get rid of this offset we need to align the address down
+ * to the nearest 'page_size' boundary. */
+ _Heap_Align_down_uptr ( &addr, the_heap->page_size );
+ *the_block = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET);
+}
+
+/**
+ * This function returns TRUE if the previous block of @a the_block
+ * is in use, and FALSE otherwise.
+ */
+
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_prev_used (
+ Heap_Block *the_block
+)
+{
+ return (the_block->size & HEAP_PREV_USED);
}
/**
- * This function validates a specified heap page size. If the page size
- * is 0 or if lies outside a page size alignment boundary it is invalid
- * and FALSE is returned. Otherwise, the page size is valid and TRUE is
- * returned.
+ * This function returns the size of @a the_block in bytes.
*/
-RTEMS_INLINE_ROUTINE boolean _Heap_Is_page_size_valid(
- uint32_t page_size
+RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
+ Heap_Block *the_block
)
{
- return ((page_size != 0) &&
- ((page_size % CPU_HEAP_ALIGNMENT) == 0));
+ return (the_block->size & ~HEAP_PREV_USED);
}
/**
- * This function returns the block flag composed of size and in_use_flag.
- * The flag returned is suitable for use as a back or front flag in a
- * heap block.
+ * This function returns TRUE if @a the_block is within the memory area
+ * managed by @a the_heap, and FALSE otherwise.
*/
-RTEMS_INLINE_ROUTINE uint32_t _Heap_Build_flag (
- uint32_t size,
- uint32_t in_use_flag
+RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_in (
+ Heap_Control *the_heap,
+ Heap_Block *the_block
)
{
- return size | in_use_flag;
+ return _Addresses_Is_in_range( the_block, the_heap->start, the_heap->final );
}
/**@}*/