summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/vm/uma_core.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/vm/uma_core.c')
-rw-r--r--freebsd/sys/vm/uma_core.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/freebsd/sys/vm/uma_core.c b/freebsd/sys/vm/uma_core.c
index db245a6e..7738c5d2 100644
--- a/freebsd/sys/vm/uma_core.c
+++ b/freebsd/sys/vm/uma_core.c
@@ -176,9 +176,16 @@ static int boot_pages;
static struct sx uma_drain_lock;
-/* kmem soft limit. */
+/*
+ * kmem soft limit, initialized by uma_set_limit(). Ensure that early
+ * allocations don't trigger a wakeup of the reclaim thread.
+ */
static unsigned long uma_kmem_limit = LONG_MAX;
-static volatile unsigned long uma_kmem_total;
+SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
+ "UMA kernel memory soft limit");
+static unsigned long uma_kmem_total;
+SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
+ "UMA kernel memory usage");
#ifndef __rtems__
/* Is the VM done starting up? */
@@ -299,7 +306,7 @@ static void keg_small_init(uma_keg_t keg);
static void keg_large_init(uma_keg_t keg);
static void zone_foreach(void (*zfunc)(uma_zone_t));
static void zone_timeout(uma_zone_t zone);
-static int hash_alloc(struct uma_hash *);
+static int hash_alloc(struct uma_hash *, u_int);
static int hash_expand(struct uma_hash *, struct uma_hash *);
static void hash_free(struct uma_hash *hash);
static void uma_timeout(void *);
@@ -628,6 +635,7 @@ zone_domain_update_wss(uma_zone_domain_t zdom)
static void
keg_timeout(uma_keg_t keg)
{
+ u_int slabs;
KEG_LOCK(keg);
/*
@@ -638,7 +646,8 @@ keg_timeout(uma_keg_t keg)
* may be a little aggressive. Should I allow for two collisions max?
*/
if (keg->uk_flags & UMA_ZONE_HASH &&
- keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
+ (slabs = keg->uk_pages / keg->uk_ppera) >
+ keg->uk_hash.uh_hashsize) {
struct uma_hash newhash;
struct uma_hash oldhash;
int ret;
@@ -649,9 +658,8 @@ keg_timeout(uma_keg_t keg)
* I have to do everything in stages and check for
* races.
*/
- newhash = keg->uk_hash;
KEG_UNLOCK(keg);
- ret = hash_alloc(&newhash);
+ ret = hash_alloc(&newhash, 1 << fls(slabs));
KEG_LOCK(keg);
if (ret) {
if (hash_expand(&keg->uk_hash, &newhash)) {
@@ -692,16 +700,13 @@ zone_timeout(uma_zone_t zone)
* 1 on success and 0 on failure.
*/
static int
-hash_alloc(struct uma_hash *hash)
+hash_alloc(struct uma_hash *hash, u_int size)
{
- u_int oldsize;
size_t alloc;
- oldsize = hash->uh_hashsize;
-
- /* We're just going to go to a power of two greater */
- if (oldsize) {
- hash->uh_hashsize = oldsize * 2;
+ KASSERT(powerof2(size), ("hash size must be power of 2"));
+ if (size > UMA_HASH_SIZE_INIT) {
+ hash->uh_hashsize = size;
alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
M_UMAHASH, M_NOWAIT);
@@ -1353,9 +1358,9 @@ pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
zkva += PAGE_SIZE;
}
return ((void*)addr);
- fail:
+fail:
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
- vm_page_unwire(p, PQ_NONE);
+ vm_page_unwire_noq(p);
vm_page_free(p);
}
return (NULL);
@@ -1405,7 +1410,7 @@ noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
* exit.
*/
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
- vm_page_unwire(p, PQ_NONE);
+ vm_page_unwire_noq(p);
vm_page_free(p);
}
return (NULL);
@@ -1475,7 +1480,7 @@ pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
paddr = pmap_kextract(curva);
m = PHYS_TO_VM_PAGE(paddr);
- vm_page_unwire(m, PQ_NONE);
+ vm_page_unwire_noq(m);
vm_page_free(m);
}
pmap_qremove(sva, size >> PAGE_SHIFT);
@@ -1820,7 +1825,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
}
if (keg->uk_flags & UMA_ZONE_HASH)
- hash_alloc(&keg->uk_hash);
+ hash_alloc(&keg->uk_hash, 0);
CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
keg, zone->uz_name, zone,
@@ -4065,14 +4070,14 @@ unsigned long
uma_size(void)
{
- return (uma_kmem_total);
+ return (atomic_load_long(&uma_kmem_total));
}
long
uma_avail(void)
{
- return (uma_kmem_limit - uma_kmem_total);
+ return (uma_kmem_limit - uma_size());
}
void