summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-03-24 15:57:29 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-03-24 16:00:26 +0100
commit2a713e3b944625c45154f0ea7f5703e918de758b (patch)
treec572bfa9e000f7e1fd815d2ab698ac27abf70e7d
parentlibcsupport: Use POSIX keys for GXX key functions (diff)
downloadrtems-2a713e3b944625c45154f0ea7f5703e918de758b.tar.bz2
score: _Heap_Protection_set_delayed_free_fraction
Add and use _Heap_Protection_set_delayed_free_fraction(). This makes it possible to avoid a dependency on _Thread_Dispatch_is_enabled().
-rw-r--r--cpukit/score/include/rtems/score/heap.h1
-rw-r--r--cpukit/score/include/rtems/score/heapimpl.h23
-rw-r--r--cpukit/score/src/heap.c2
-rw-r--r--cpukit/score/src/heapallocate.c4
-rw-r--r--cpukit/score/src/heapfree.c28
-rw-r--r--cpukit/score/src/wkspace.c2
6 files changed, 38 insertions, 22 deletions
diff --git a/cpukit/score/include/rtems/score/heap.h b/cpukit/score/include/rtems/score/heap.h
index 62a64e5715..1ca840d4a0 100644
--- a/cpukit/score/include/rtems/score/heap.h
+++ b/cpukit/score/include/rtems/score/heap.h
@@ -161,6 +161,7 @@ typedef struct Heap_Block Heap_Block;
Heap_Block *first_delayed_free_block;
Heap_Block *last_delayed_free_block;
uintptr_t delayed_free_block_count;
+ uintptr_t delayed_free_fraction;
} Heap_Protection;
typedef struct {
diff --git a/cpukit/score/include/rtems/score/heapimpl.h b/cpukit/score/include/rtems/score/heapimpl.h
index abc573de19..a8948edd6f 100644
--- a/cpukit/score/include/rtems/score/heapimpl.h
+++ b/cpukit/score/include/rtems/score/heapimpl.h
@@ -347,6 +347,29 @@ Heap_Block *_Heap_Block_allocate(
}
#endif
+/**
+ * @brief Sets the fraction of delayed free blocks that is actually freed
+ * during memory shortage.
+ *
+ * The default is to free half the delayed free blocks. This is equal to a
+ * fraction value of two.
+ *
+ * @param[in] heap The heap control.
+ * @param[in] fraction The fraction is one divided by this fraction value.
+ */
+RTEMS_INLINE_ROUTINE void _Heap_Protection_set_delayed_free_fraction(
+ Heap_Control *heap,
+ uintptr_t fraction
+)
+{
+#ifdef HEAP_PROTECTION
+ heap->Protection.delayed_free_fraction = fraction;
+#else
+ (void) heap;
+ (void) fraction;
+#endif
+}
+
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
{
return &heap->free_list;
diff --git a/cpukit/score/src/heap.c b/cpukit/score/src/heap.c
index fa2430a2d8..949e963070 100644
--- a/cpukit/score/src/heap.c
+++ b/cpukit/score/src/heap.c
@@ -285,6 +285,8 @@ uintptr_t _Heap_Initialize(
stats->max_free_blocks = 1;
stats->instance = instance++;
+ _Heap_Protection_set_delayed_free_fraction( heap, 2 );
+
_HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
_HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
_HAssert(
diff --git a/cpukit/score/src/heapallocate.c b/cpukit/score/src/heapallocate.c
index f8a4820905..597077e280 100644
--- a/cpukit/score/src/heapallocate.c
+++ b/cpukit/score/src/heapallocate.c
@@ -34,7 +34,9 @@
{
bool search_again = false;
uintptr_t const blocks_to_free_count =
- (heap->Protection.delayed_free_block_count + 1) / 2;
+ (heap->Protection.delayed_free_block_count
+ + heap->Protection.delayed_free_fraction - 1)
+ / heap->Protection.delayed_free_fraction;
if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
diff --git a/cpukit/score/src/heapfree.c b/cpukit/score/src/heapfree.c
index df51438f0c..e577e1313a 100644
--- a/cpukit/score/src/heapfree.c
+++ b/cpukit/score/src/heapfree.c
@@ -20,7 +20,6 @@
#endif
#include <rtems/score/heapimpl.h>
-#include <rtems/score/threadimpl.h>
#ifndef HEAP_PROTECTION
#define _Heap_Protection_determine_block_free( heap, block ) true
@@ -83,26 +82,13 @@
bool do_free = true;
Heap_Block *const next = block->Protection_begin.next_delayed_free_block;
- /*
- * Sometimes after a free the allocated area is still in use. An example
- * is the task stack of a thread that deletes itself. The thread dispatch
- * disable level is a way to detect this use case.
- */
- if ( _Thread_Dispatch_is_enabled() ) {
- if ( next == NULL ) {
- _Heap_Protection_delay_block_free( heap, block );
- do_free = false;
- } else if ( next == HEAP_PROTECTION_OBOLUS ) {
- _Heap_Protection_check_free_block( heap, block );
- } else {
- _Heap_Protection_block_error( heap, block );
- }
- } else if ( next == NULL ) {
- /*
- * This is a hack to prevent heavy workspace fragmentation which would
- * lead to test suite failures.
- */
- _Heap_Protection_free_all_delayed_blocks( heap );
+ if ( next == NULL ) {
+ _Heap_Protection_delay_block_free( heap, block );
+ do_free = false;
+ } else if ( next == HEAP_PROTECTION_OBOLUS ) {
+ _Heap_Protection_check_free_block( heap, block );
+ } else {
+ _Heap_Protection_block_error( heap, block );
}
return do_free;
diff --git a/cpukit/score/src/wkspace.c b/cpukit/score/src/wkspace.c
index a1460ff0fb..071c178758 100644
--- a/cpukit/score/src/wkspace.c
+++ b/cpukit/score/src/wkspace.c
@@ -123,6 +123,8 @@ void _Workspace_Handler_initialization(
INTERNAL_ERROR_TOO_LITTLE_WORKSPACE
);
}
+
+ _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
}
void *_Workspace_Allocate(