diff options
Diffstat (limited to 'freebsd/sys/vm')
-rw-r--r-- | freebsd/sys/vm/uma.h | 23 | ||||
-rw-r--r-- | freebsd/sys/vm/uma_core.c | 326 | ||||
-rw-r--r-- | freebsd/sys/vm/uma_int.h | 53 | ||||
-rw-r--r-- | freebsd/sys/vm/vm_extern.h | 9 |
4 files changed, 326 insertions, 85 deletions
diff --git a/freebsd/sys/vm/uma.h b/freebsd/sys/vm/uma.h index d71f0ee3..3ab65563 100644 --- a/freebsd/sys/vm/uma.h +++ b/freebsd/sys/vm/uma.h @@ -279,8 +279,7 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, * mini-dumps. */ #define UMA_ZONE_PCPU 0x8000 /* - * Allocates mp_maxid + 1 slabs sized to - * sizeof(struct pcpu). + * Allocates mp_maxid + 1 slabs of PAGE_SIZE */ #define UMA_ZONE_NUMA 0x10000 /* * NUMA aware Zone. Implements a best @@ -333,6 +332,7 @@ void uma_zdestroy(uma_zone_t zone); */ void *uma_zalloc_arg(uma_zone_t zone, void *arg, int flags); +void *uma_zalloc_pcpu_arg(uma_zone_t zone, void *arg, int flags); /* * Allocate an item from a specific NUMA domain. This uses a slow path in @@ -354,6 +354,7 @@ void *uma_zalloc_domain(uma_zone_t zone, void *arg, int domain, int flags); * */ static __inline void *uma_zalloc(uma_zone_t zone, int flags); +static __inline void *uma_zalloc_pcpu(uma_zone_t zone, int flags); static __inline void * uma_zalloc(uma_zone_t zone, int flags) @@ -361,6 +362,12 @@ uma_zalloc(uma_zone_t zone, int flags) return uma_zalloc_arg(zone, NULL, flags); } +static __inline void * +uma_zalloc_pcpu(uma_zone_t zone, int flags) +{ + return uma_zalloc_pcpu_arg(zone, NULL, flags); +} + /* * Frees an item back into the specified zone. * @@ -374,6 +381,7 @@ uma_zalloc(uma_zone_t zone, int flags) */ void uma_zfree_arg(uma_zone_t zone, void *item, void *arg); +void uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *arg); /* * Frees an item back to the specified zone's domain specific pool. @@ -392,6 +400,7 @@ void uma_zfree_domain(uma_zone_t zone, void *item, void *arg); * */ static __inline void uma_zfree(uma_zone_t zone, void *item); +static __inline void uma_zfree_pcpu(uma_zone_t zone, void *item); static __inline void uma_zfree(uma_zone_t zone, void *item) @@ -399,6 +408,12 @@ uma_zfree(uma_zone_t zone, void *item) uma_zfree_arg(zone, item, NULL); } +static __inline void +uma_zfree_pcpu(uma_zone_t zone, void *item) +{ + uma_zfree_pcpu_arg(zone, item, NULL); +} + /* * Wait until the specified zone can allocate an item. */ @@ -603,12 +618,12 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef); #ifndef __rtems__ #define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */ #endif /* __rtems__ */ -#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */ +#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kmem */ #ifndef __rtems__ #define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */ #define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */ #define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */ -/* 0x02, 0x40 and 0x80 are available */ +/* 0x02, 0x40, and 0x80 are available */ #endif /* __rtems__ */ /* diff --git a/freebsd/sys/vm/uma_core.c b/freebsd/sys/vm/uma_core.c index b8145c72..0f4bbb35 100644 --- a/freebsd/sys/vm/uma_core.c +++ b/freebsd/sys/vm/uma_core.c @@ -276,9 +276,13 @@ static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); #endif /* __rtems__ */ static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); #ifndef __rtems__ +static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); #endif /* __rtems__ */ static void page_free(void *, vm_size_t, uint8_t); +#ifndef __rtems__ +static void pcpu_page_free(void *, vm_size_t, uint8_t); +#endif /* __rtems__ */ static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int); static void cache_drain(uma_zone_t); static void bucket_drain(uma_zone_t, uma_bucket_t); @@ -323,8 +327,25 @@ static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); #ifdef INVARIANTS +static bool uma_dbg_kskip(uma_keg_t keg, void *mem); +static bool uma_dbg_zskip(uma_zone_t zone, void *mem); static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); + +static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0, + "Memory allocation debugging"); + +static u_int dbg_divisor = 1; +SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, + "Debug & thrash every this item in memory allocator"); + +static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; +static counter_u64_t uma_skip_cnt = EARLY_COUNTER; +SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, + &uma_dbg_cnt, "memory items debugged"); +SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, + &uma_skip_cnt, "memory items skipped, not debugged"); #endif SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); @@ -910,6 +931,18 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) i = start; if (keg->uk_fini != NULL) { for (i--; i > -1; i--) +#ifdef INVARIANTS + /* + * trash_fini implies that dtor was trash_dtor. trash_fini + * would check that memory hasn't been modified since free, + * which executed trash_dtor. + * That's why we need to run uma_dbg_kskip() check here, + * albeit we don't make skip check for other init/fini + * invocations. + */ + if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) || + keg->uk_fini != trash_fini) +#endif keg->uk_fini(slab->us_data + (keg->uk_rsize * i), keg->uk_size); } @@ -1209,6 +1242,57 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, } #ifndef __rtems__ +static void * +pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, + int wait) +{ + struct pglist alloctail; + vm_offset_t addr, zkva; + int cpu, flags; + vm_page_t p, p_next; +#ifdef NUMA + struct pcpu *pc; +#endif + + MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); + + TAILQ_INIT(&alloctail); + flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | + malloc2vm_flags(wait); + *pflag = UMA_SLAB_KERNEL; + for (cpu = 0; cpu <= mp_maxid; cpu++) { + if (CPU_ABSENT(cpu)) { + p = vm_page_alloc(NULL, 0, flags); + } else { +#ifndef NUMA + p = vm_page_alloc(NULL, 0, flags); +#else + pc = pcpu_find(cpu); + p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags); + if (__predict_false(p == NULL)) + p = vm_page_alloc(NULL, 0, flags); +#endif + } + if (__predict_false(p == NULL)) + goto fail; + TAILQ_INSERT_TAIL(&alloctail, p, listq); + } + if ((addr = kva_alloc(bytes)) == 0) + goto fail; + zkva = addr; + TAILQ_FOREACH(p, &alloctail, listq) { + pmap_qenter(zkva, &p, 1); + zkva += PAGE_SIZE; + } + return ((void*)addr); + fail: + TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { + vm_page_unwire(p, PQ_NONE); + vm_page_free(p); + } + return (NULL); +} + /* * Allocates a number of pages from within an object * @@ -1286,14 +1370,11 @@ static void page_free(void *mem, vm_size_t size, uint8_t flags) { #ifndef __rtems__ - struct vmem *vmem; - if (flags & UMA_SLAB_KERNEL) - vmem = kernel_arena; - else + if ((flags & UMA_SLAB_KERNEL) == 0) panic("UMA: page_free used with invalid flags %x", flags); - kmem_free(vmem, (vm_offset_t)mem, size); + kmem_free((vm_offset_t)mem, size); #else /* __rtems__ */ if (flags & UMA_SLAB_KERNEL) free(mem, M_TEMP); @@ -1302,6 +1383,39 @@ page_free(void *mem, vm_size_t size, uint8_t flags) #endif /* __rtems__ */ } +#ifndef __rtems__ +/* + * Frees pcpu zone allocations + * + * Arguments: + * mem A pointer to the memory to be freed + * size The size of the memory being freed + * flags The original p->us_flags field + * + * Returns: + * Nothing + */ +static void +pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) +{ + vm_offset_t sva, curva; + vm_paddr_t paddr; + vm_page_t m; + + MPASS(size == (mp_maxid+1)*PAGE_SIZE); + sva = (vm_offset_t)mem; + for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { + paddr = pmap_kextract(curva); + m = PHYS_TO_VM_PAGE(paddr); + vm_page_unwire(m, PQ_NONE); + vm_page_free(m); + } + pmap_qremove(sva, size >> PAGE_SHIFT); + kva_free(sva, size); +} +#endif /* __rtems__ */ + + /* * Zero fill initializer * @@ -1335,9 +1449,8 @@ keg_small_init(uma_keg_t keg) if (keg->uk_flags & UMA_ZONE_PCPU) { u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; - slabsize = sizeof(struct pcpu); - keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), - PAGE_SIZE); + slabsize = UMA_PCPU_ALLOC_SIZE; + keg->uk_ppera = ncpus; } else { slabsize = UMA_SLAB_SIZE; keg->uk_ppera = 1; @@ -1356,7 +1469,7 @@ keg_small_init(uma_keg_t keg) keg->uk_rsize = rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || - keg->uk_rsize < sizeof(struct pcpu), + keg->uk_rsize < UMA_PCPU_ALLOC_SIZE, ("%s: size %u too large", __func__, keg->uk_rsize)); if (keg->uk_flags & UMA_ZONE_OFFPAGE) @@ -1575,6 +1688,8 @@ keg_ctor(void *mem, int size, void *udata, int flags) else if (keg->uk_ppera == 1) keg->uk_allocf = uma_small_alloc; #endif + else if (keg->uk_flags & UMA_ZONE_PCPU) + keg->uk_allocf = pcpu_page_alloc; else #endif /* __rtems__ */ keg->uk_allocf = page_alloc; @@ -1584,6 +1699,9 @@ keg_ctor(void *mem, int size, void *udata, int flags) keg->uk_freef = uma_small_free; else #endif + if (keg->uk_flags & UMA_ZONE_PCPU) + keg->uk_freef = pcpu_page_free; + else #endif /* __rtems__ */ keg->uk_freef = page_free; @@ -2066,11 +2184,16 @@ static void uma_startup3(void) { +#ifdef INVARIANTS + TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); + uma_dbg_cnt = counter_u64_alloc(M_WAITOK); + uma_skip_cnt = counter_u64_alloc(M_WAITOK); +#endif + callout_init(&uma_callout, 1); + callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); #ifndef __rtems__ booted = BOOT_RUNNING; #endif /* __rtems__ */ - callout_init(&uma_callout, 1); - callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); } static uma_keg_t @@ -2324,6 +2447,40 @@ uma_zwait(uma_zone_t zone) uma_zfree(zone, item); } +void * +uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) +{ + void *item; +#ifdef SMP + int i; + + MPASS(zone->uz_flags & UMA_ZONE_PCPU); +#endif + item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); + if (item != NULL && (flags & M_ZERO)) { +#ifdef SMP + for (i = 0; i <= mp_maxid; i++) + bzero(zpcpu_get_cpu(item, i), zone->uz_size); +#else + bzero(item, zone->uz_size); +#endif + } + return (item); +} + +/* + * A stub while both regular and pcpu cases are identical. + */ +void +uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata) +{ + +#ifdef SMP + MPASS(zone->uz_flags & UMA_ZONE_PCPU); +#endif + uma_zfree_arg(zone, item, udata); +} + /* See uma.h */ void * uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) @@ -2333,9 +2490,12 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) uma_cache_t cache; void *item; int cpu, domain, lockfail; +#ifdef INVARIANTS + bool skipdbg; +#endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ - random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); + random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); /* This is the fast path allocation */ CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", @@ -2346,8 +2506,12 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) "uma_zalloc_arg: zone \"%s\"", zone->uz_name); } #ifndef __rtems__ + KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC")); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_arg: called with spinlock or critical section held")); + if (zone->uz_flags & UMA_ZONE_PCPU) + KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone " + "with M_ZERO passed")); #endif /* __rtems__ */ #ifdef DEBUG_MEMGUARD @@ -2394,14 +2558,22 @@ zalloc_start: KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); cache->uc_allocs++; critical_exit(); +#ifdef INVARIANTS + skipdbg = uma_dbg_zskip(zone, item); +#endif if (zone->uz_ctor != NULL && +#ifdef INVARIANTS + (!skipdbg || zone->uz_ctor != trash_ctor || + zone->uz_dtor != trash_dtor) && +#endif zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { atomic_add_long(&zone->uz_fails, 1); zone_free_item(zone, item, udata, SKIP_DTOR); return (NULL); } #ifdef INVARIANTS - uma_dbg_alloc(zone, NULL, item); + if (!skipdbg) + uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); @@ -2534,7 +2706,7 @@ uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) { /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ - random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); + random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); /* This is the fast path allocation */ CTR5(KTR_UMA, @@ -2820,9 +2992,9 @@ zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) { uma_slab_t slab; uma_keg_t keg; -#ifndef __rtems__ +#ifdef NUMA int stripe; -#endif /* __rtems__ */ +#endif int i; slab = NULL; @@ -2832,9 +3004,9 @@ zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) break; keg = slab->us_keg; -#ifndef __rtems__ +#ifdef NUMA stripe = howmany(max, vm_ndomains); -#endif /* __rtems__ */ +#endif while (slab->us_freecount && i < max) { bucket[i++] = slab_alloc_item(keg, slab); if (keg->uk_free <= keg->uk_reserve) @@ -2930,6 +3102,9 @@ static void * zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) { void *item; +#ifdef INVARIANTS + bool skipdbg; +#endif item = NULL; @@ -2937,6 +3112,9 @@ zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) goto fail; atomic_add_long(&zone->uz_allocs, 1); +#ifdef INVARIANTS + skipdbg = uma_dbg_zskip(zone, item); +#endif /* * We have to call both the zone's init (not the keg's init) * and the zone's ctor. This is because the item is going from @@ -2949,14 +3127,18 @@ zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) goto fail; } } - if (zone->uz_ctor != NULL) { - if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { - zone_free_item(zone, item, udata, SKIP_DTOR); - goto fail; - } + if (zone->uz_ctor != NULL && +#ifdef INVARIANTS + (!skipdbg || zone->uz_ctor != trash_ctor || + zone->uz_dtor != trash_dtor) && +#endif + zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { + zone_free_item(zone, item, udata, SKIP_DTOR); + goto fail; } #ifdef INVARIANTS - uma_dbg_alloc(zone, NULL, item); + if (!skipdbg) + uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); @@ -2981,9 +3163,12 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata) uma_bucket_t bucket; uma_zone_domain_t zdom; int cpu, domain, lockfail; +#ifdef INVARIANTS + bool skipdbg; +#endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ - random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); + random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, zone->uz_name); @@ -3007,12 +3192,18 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata) } #endif #ifdef INVARIANTS - if (zone->uz_flags & UMA_ZONE_MALLOC) - uma_dbg_free(zone, udata, item); - else - uma_dbg_free(zone, NULL, item); -#endif + skipdbg = uma_dbg_zskip(zone, item); + if (skipdbg == false) { + if (zone->uz_flags & UMA_ZONE_MALLOC) + uma_dbg_free(zone, udata, item); + else + uma_dbg_free(zone, NULL, item); + } + if (zone->uz_dtor != NULL && (!skipdbg || + zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) +#else if (zone->uz_dtor != NULL) +#endif zone->uz_dtor(item, zone->uz_size, udata); /* @@ -3079,14 +3270,6 @@ zfree_start: cpu = curcpu; cache = &zone->uz_cpu[cpu]; - /* - * Since we have locked the zone we may as well send back our stats. - */ - atomic_add_long(&zone->uz_allocs, cache->uc_allocs); - atomic_add_long(&zone->uz_frees, cache->uc_frees); - cache->uc_allocs = 0; - cache->uc_frees = 0; - bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { ZONE_UNLOCK(zone); @@ -3163,7 +3346,7 @@ uma_zfree_domain(uma_zone_t zone, void *item, void *udata) { /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ - random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); + random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, zone->uz_name); @@ -3276,16 +3459,23 @@ zone_release(uma_zone_t zone, void **bucket, int cnt) static void zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) { - #ifdef INVARIANTS - if (skip == SKIP_NONE) { + bool skipdbg; + + skipdbg = uma_dbg_zskip(zone, item); + if (skip == SKIP_NONE && !skipdbg) { if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); } + + if (skip < SKIP_DTOR && zone->uz_dtor != NULL && + (!skipdbg || zone->uz_dtor != trash_dtor || + zone->uz_ctor != trash_ctor)) +#else + if (skip < SKIP_DTOR && zone->uz_dtor != NULL) #endif - if (skip < SKIP_DTOR && zone->uz_dtor) zone->uz_dtor(item, zone->uz_size, udata); if (skip < SKIP_FINI && zone->uz_fini) @@ -3648,7 +3838,7 @@ uma_large_malloc_domain(vm_size_t size, int domain, int wait) if (slab == NULL) return (NULL); if (domain == UMA_ANYDOMAIN) - addr = kmem_malloc(kernel_arena, size, wait); + addr = kmem_malloc(size, wait); else addr = kmem_malloc_domain(domain, size, wait); if (addr != 0) { @@ -3679,7 +3869,7 @@ uma_large_free(uma_slab_t slab) KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, ("uma_large_free: Memory not allocated with uma_large_malloc.")); - kmem_free(kernel_arena, (vm_offset_t)slab->us_data, slab->us_size); + kmem_free((vm_offset_t)slab->us_data, slab->us_size); uma_total_dec(slab->us_size); zone_free_item(slabzone, slab, NULL, SKIP_NONE); } @@ -3688,13 +3878,8 @@ uma_large_free(uma_slab_t slab) static void uma_zero_item(void *item, uma_zone_t zone) { - int i; - if (zone->uz_flags & UMA_ZONE_PCPU) { - CPU_FOREACH(i) - bzero(zpcpu_get_cpu(item, i), zone->uz_size); - } else - bzero(item, zone->uz_size); + bzero(item, zone->uz_size); } unsigned long @@ -4022,6 +4207,43 @@ uma_dbg_getslab(uma_zone_t zone, void *item) return (slab); } +static bool +uma_dbg_zskip(uma_zone_t zone, void *mem) +{ + uma_keg_t keg; + + if ((keg = zone_first_keg(zone)) == NULL) + return (true); + + return (uma_dbg_kskip(keg, mem)); +} + +static bool +uma_dbg_kskip(uma_keg_t keg, void *mem) +{ + uintptr_t idx; + + if (dbg_divisor == 0) + return (true); + + if (dbg_divisor == 1) + return (false); + + idx = (uintptr_t)mem >> PAGE_SHIFT; + if (keg->uk_ipers > 1) { + idx *= keg->uk_ipers; + idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; + } + + if ((idx / dbg_divisor) * dbg_divisor != idx) { + counter_u64_add(uma_skip_cnt, 1); + return (true); + } + counter_u64_add(uma_dbg_cnt, 1); + + return (false); +} + /* * Set up the slab's freei data such that uma_dbg_free can function. * @@ -4032,8 +4254,6 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) uma_keg_t keg; int freei; - if (zone_first_keg(zone) == NULL) - return; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) @@ -4062,8 +4282,6 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) uma_keg_t keg; int freei; - if (zone_first_keg(zone) == NULL) - return; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) diff --git a/freebsd/sys/vm/uma_int.h b/freebsd/sys/vm/uma_int.h index 8d58fa33..5f787dfa 100644 --- a/freebsd/sys/vm/uma_int.h +++ b/freebsd/sys/vm/uma_int.h @@ -176,7 +176,7 @@ struct uma_hash { /* * align field or structure to cache line */ -#if defined(__amd64__) +#if defined(__amd64__) || defined(__powerpc64__) #define UMA_ALIGN __aligned(128) #else #define UMA_ALIGN @@ -188,7 +188,7 @@ struct uma_hash { struct uma_bucket { LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ - int16_t ub_cnt; /* Count of free items. */ + int16_t ub_cnt; /* Count of items in bucket. */ int16_t ub_entries; /* Max items. */ void *ub_bucket[]; /* actual allocation storage */ }; @@ -222,9 +222,8 @@ typedef struct uma_domain * uma_domain_t; * */ struct uma_keg { - struct mtx_padalign uk_lock; /* Lock for the keg */ + struct mtx uk_lock; /* Lock for the keg */ struct uma_hash uk_hash; - LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ uint32_t uk_cursor; /* Domain alloc cursor. */ @@ -319,41 +318,49 @@ typedef struct uma_zone_domain * uma_zone_domain_t; * */ struct uma_zone { - struct mtx_padalign uz_lock; /* Lock for the zone */ - struct mtx_padalign *uz_lockptr; - const char *uz_name; /* Text name of the zone */ - - LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ + /* Offset 0, used in alloc/free fast/medium fast path and const. */ + struct mtx *uz_lockptr; + const char *uz_name; /* Text name of the zone */ struct uma_zone_domain *uz_domain; /* per-domain buckets */ - - LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ - struct uma_klink uz_klink; /* klink for first keg. */ - - uma_slaballoc uz_slab; /* Allocate a slab from the backend. */ + uint32_t uz_flags; /* Flags inherited from kegs */ + uint32_t uz_size; /* Size inherited from kegs */ uma_ctor uz_ctor; /* Constructor for each allocation */ uma_dtor uz_dtor; /* Destructor */ uma_init uz_init; /* Initializer for each item */ uma_fini uz_fini; /* Finalizer for each item. */ + + /* Offset 64, used in bucket replenish. */ uma_import uz_import; /* Import new memory to cache. */ uma_release uz_release; /* Release memory from cache. */ void *uz_arg; /* Import/release argument. */ - - uint32_t uz_flags; /* Flags inherited from kegs */ - uint32_t uz_size; /* Size inherited from kegs */ - - volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */ - volatile u_long uz_fails; /* Total number of alloc failures */ - volatile u_long uz_frees; /* Total number of frees */ - uint64_t uz_sleeps; /* Total number of alloc sleeps */ + uma_slaballoc uz_slab; /* Allocate a slab from the backend. */ uint16_t uz_count; /* Amount of items in full bucket */ uint16_t uz_count_min; /* Minimal amount of items there */ + /* 32bit pad on 64bit. */ + LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ + LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ + /* Offset 128 Rare. */ + /* + * The lock is placed here to avoid adjacent line prefetcher + * in fast paths and to take up space near infrequently accessed + * members to reduce alignment overhead. + */ + struct mtx uz_lock; /* Lock for the zone */ + struct uma_klink uz_klink; /* klink for first keg. */ /* The next two fields are used to print a rate-limited warnings. */ const char *uz_warning; /* Warning to print on failure */ struct timeval uz_ratecheck; /* Warnings rate-limiting */ - struct task uz_maxaction; /* Task to run when at limit */ + /* 16 bytes of pad. */ + + /* Offset 256, atomic stats. */ + volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */ + volatile u_long uz_fails; /* Total number of alloc failures */ + volatile u_long uz_frees; /* Total number of frees */ + uint64_t uz_sleeps; /* Total number of alloc sleeps */ + /* * This HAS to be the last item because we adjust the zone size * based on NCPU and then allocate the space for the zones. diff --git a/freebsd/sys/vm/vm_extern.h b/freebsd/sys/vm/vm_extern.h index 47e35b2d..b2f1d726 100644 --- a/freebsd/sys/vm/vm_extern.h +++ b/freebsd/sys/vm/vm_extern.h @@ -54,19 +54,19 @@ vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t); void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t); /* These operate on virtual addresses backed by memory. */ -vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags, +vm_offset_t kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); -vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags, +vm_offset_t kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); -vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags); +vm_offset_t kmem_malloc(vm_size_t size, int flags); vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags); -void kmem_free(struct vmem *, vm_offset_t, vm_size_t); +void kmem_free(vm_offset_t addr, vm_size_t size); /* This provides memory for previously allocated address space. */ int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int); @@ -74,6 +74,7 @@ int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int); void kmem_unback(vm_object_t, vm_offset_t, vm_size_t); /* Bootstrapping. */ +void kmem_bootstrap_free(vm_offset_t, vm_size_t); vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t); void kmem_init(vm_offset_t, vm_offset_t); |