summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--cpukit/ChangeLog5
-rw-r--r--cpukit/score/include/rtems/score/heap.h16
-rw-r--r--cpukit/score/src/heap.c3
3 files changed, 13 insertions, 11 deletions
diff --git a/cpukit/ChangeLog b/cpukit/ChangeLog
index 1a0a9be0ac..5d9bc7eff2 100644
--- a/cpukit/ChangeLog
+++ b/cpukit/ChangeLog
@@ -1,3 +1,8 @@
+2009-09-25 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
+
+ * score/src/heap.c, score/include/rtems/score/heap.h: Reduced alignment
+ requirement for CPU_ALIGNMENT from four to two.
+
2009-09-25 Joel Sherrill <joel.sherrill@OARcorp.com>
* posix/include/rtems/posix/threadsup.h: Add no return atrribute to
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index 10a267526e..c2286bf2af 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -38,12 +38,10 @@ extern "C" {
* area. A heap control structure contains control information for the heap.
*
* The alignment routines could be made faster should we require only powers of
- * two to be supported both for page size, alignment and boundary arguments.
- * However, both workspace and malloc heaps are initialized with
- * CPU_HEAP_ALIGNMENT as page size, and while all the BSPs seem to use
- * CPU_ALIGNMENT (that is power of two) as CPU_HEAP_ALIGNMENT, for whatever
- * reason CPU_HEAP_ALIGNMENT is only required to be multiple of CPU_ALIGNMENT
- * and explicitly not required to be a power of two.
+ * two to be supported for page size, alignment and boundary arguments. The
+ * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this
+ * value is only required to be multiple of two and explicitly not required to
+ * be a power of two.
*
* There are two kinds of blocks. One sort describes a free block from which
* we can allocate memory. The other blocks are used and provide an allocated
@@ -167,9 +165,9 @@ typedef struct Heap_Block {
* If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
* used, otherwise the previous block is free. A used previous block may
* claim the @a prev_size field for allocation. This trick allows to
- * decrease the overhead in the used blocks by the size of the
- * @a prev_size field. As sizes are always multiples of four, the two least
- * significant bits are always zero. We use one of them to store the flag.
+ * decrease the overhead in the used blocks by the size of the @a prev_size
+ * field. As sizes are required to be multiples of two, the least
+ * significant bits would be always zero. We use this bit to store the flag.
*
* This field is always valid.
*/
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index b12bdd8fab..c851334679 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -26,7 +26,7 @@
#include <rtems/system.h>
#include <rtems/score/heap.h>
-#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 4 != 0
+#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
#error "invalid CPU_ALIGNMENT value"
#endif
@@ -213,7 +213,6 @@ uintptr_t _Heap_Initialize(
stats->resizes = 0;
stats->instance = instance++;
- _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ) );
_HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
_HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
_HAssert(