summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-10 14:58:22 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-10-20 14:59:54 +0200
commit7533adcf09f501df069028dec02612466beb046c (patch)
tree106827ddade0e8e62a8b1be8fc72ad7838257c4c
parentZONE(9): Use recursive lock for the UMA drain (diff)
downloadrtems-libbsd-7533adcf09f501df069028dec02612466beb046c.tar.bz2
rtems-kernel-page: Add used pages counter
-rw-r--r--rtemsbsd/rtems/rtems-kernel-page.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/rtemsbsd/rtems/rtems-kernel-page.c b/rtemsbsd/rtems/rtems-kernel-page.c
index c2ed23fe..eb96fcde 100644
--- a/rtemsbsd/rtems/rtems-kernel-page.c
+++ b/rtemsbsd/rtems/rtems-kernel-page.c
@@ -58,31 +58,34 @@ void **rtems_bsd_page_object_table;
uintptr_t rtems_bsd_page_area_begin;
-static rtems_rbheap_control page_heap;
-
-struct mtx page_heap_mtx;
+static struct {
+ struct mtx mtx;
+ rtems_rbheap_control heap;
+ size_t used;
+} page_alloc;
void *
rtems_bsd_page_alloc(uintptr_t size_in_bytes, int wait)
{
void *addr;
- mtx_lock(&page_heap_mtx);
+ mtx_lock(&page_alloc.mtx);
- addr = rtems_rbheap_allocate(&page_heap, size_in_bytes);
+ addr = rtems_rbheap_allocate(&page_alloc.heap, size_in_bytes);
if (addr == NULL && wait) {
int i;
for (i = 0; i < 8; i++) {
- mtx_unlock(&page_heap_mtx);
+ mtx_unlock(&page_alloc.mtx);
uma_reclaim();
- mtx_lock(&page_heap_mtx);
+ mtx_lock(&page_alloc.mtx);
- addr = rtems_rbheap_allocate(&page_heap, size_in_bytes);
+ addr = rtems_rbheap_allocate(&page_alloc.heap,
+ size_in_bytes);
if (addr != NULL)
break;
- msleep(&page_heap, &page_heap_mtx, 0,
+ msleep(&page_alloc.heap, &page_alloc.mtx, 0,
"page alloc", (hz / 4) * (i + 1));
}
@@ -91,7 +94,8 @@ rtems_bsd_page_alloc(uintptr_t size_in_bytes, int wait)
}
}
- mtx_unlock(&page_heap_mtx);
+ page_alloc.used += (addr != NULL) ? 1 : 0;
+ mtx_unlock(&page_alloc.mtx);
#ifdef INVARIANTS
wait |= M_ZERO;
@@ -107,10 +111,12 @@ rtems_bsd_page_alloc(uintptr_t size_in_bytes, int wait)
void
rtems_bsd_page_free(void *addr)
{
- mtx_lock(&page_heap_mtx);
- rtems_rbheap_free(&page_heap, addr);
- wakeup(&page_heap);
- mtx_unlock(&page_heap_mtx);
+
+ mtx_lock(&page_alloc.mtx);
+ --page_alloc.used;
+ rtems_rbheap_free(&page_alloc.heap, addr);
+ wakeup(&page_alloc.heap);
+ mtx_unlock(&page_alloc.mtx);
}
static void
@@ -124,7 +130,7 @@ rtems_bsd_page_init(void *arg)
size_t n;
uintptr_t heap_size;
- mtx_init(&page_heap_mtx, "page heap", NULL, MTX_DEF);
+ mtx_init(&page_alloc.mtx, "page heap", NULL, MTX_DEF);
heap_size = rtems_bsd_get_allocator_domain_size(
RTEMS_BSD_ALLOCATOR_DOMAIN_PAGE);
@@ -133,11 +139,11 @@ rtems_bsd_page_init(void *arg)
0);
BSD_ASSERT(area != NULL);
- sc = rtems_rbheap_initialize(&page_heap, area, heap_size, PAGE_SIZE,
- rtems_rbheap_extend_descriptors_with_malloc, NULL);
+ sc = rtems_rbheap_initialize(&page_alloc.heap, area, heap_size,
+ PAGE_SIZE, rtems_rbheap_extend_descriptors_with_malloc, NULL);
BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
- rtems_rbheap_set_extend_descriptors(&page_heap,
+ rtems_rbheap_set_extend_descriptors(&page_alloc.heap,
rtems_rbheap_extend_descriptors_never);
n = heap_size / PAGE_SIZE;
@@ -146,7 +152,7 @@ rtems_bsd_page_init(void *arg)
BSD_ASSERT(chunks != NULL);
for (i = 0; i < n; ++i) {
- rtems_rbheap_add_to_spare_descriptor_chain(&page_heap,
+ rtems_rbheap_add_to_spare_descriptor_chain(&page_alloc.heap,
&chunks[i]);
}