summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/vm/uma.h
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/vm/uma.h')
-rw-r--r--freebsd/sys/vm/uma.h23
1 files changed, 19 insertions, 4 deletions
diff --git a/freebsd/sys/vm/uma.h b/freebsd/sys/vm/uma.h
index d71f0ee3..3ab65563 100644
--- a/freebsd/sys/vm/uma.h
+++ b/freebsd/sys/vm/uma.h
@@ -279,8 +279,7 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
* mini-dumps.
*/
#define UMA_ZONE_PCPU 0x8000 /*
- * Allocates mp_maxid + 1 slabs sized to
- * sizeof(struct pcpu).
+ * Allocates mp_maxid + 1 slabs of PAGE_SIZE
*/
#define UMA_ZONE_NUMA 0x10000 /*
* NUMA aware Zone. Implements a best
@@ -333,6 +332,7 @@ void uma_zdestroy(uma_zone_t zone);
*/
void *uma_zalloc_arg(uma_zone_t zone, void *arg, int flags);
+void *uma_zalloc_pcpu_arg(uma_zone_t zone, void *arg, int flags);
/*
* Allocate an item from a specific NUMA domain. This uses a slow path in
@@ -354,6 +354,7 @@ void *uma_zalloc_domain(uma_zone_t zone, void *arg, int domain, int flags);
*
*/
static __inline void *uma_zalloc(uma_zone_t zone, int flags);
+static __inline void *uma_zalloc_pcpu(uma_zone_t zone, int flags);
static __inline void *
uma_zalloc(uma_zone_t zone, int flags)
@@ -361,6 +362,12 @@ uma_zalloc(uma_zone_t zone, int flags)
return uma_zalloc_arg(zone, NULL, flags);
}
+static __inline void *
+uma_zalloc_pcpu(uma_zone_t zone, int flags)
+{
+ return uma_zalloc_pcpu_arg(zone, NULL, flags);
+}
+
/*
* Frees an item back into the specified zone.
*
@@ -374,6 +381,7 @@ uma_zalloc(uma_zone_t zone, int flags)
*/
void uma_zfree_arg(uma_zone_t zone, void *item, void *arg);
+void uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *arg);
/*
* Frees an item back to the specified zone's domain specific pool.
@@ -392,6 +400,7 @@ void uma_zfree_domain(uma_zone_t zone, void *item, void *arg);
*
*/
static __inline void uma_zfree(uma_zone_t zone, void *item);
+static __inline void uma_zfree_pcpu(uma_zone_t zone, void *item);
static __inline void
uma_zfree(uma_zone_t zone, void *item)
@@ -399,6 +408,12 @@ uma_zfree(uma_zone_t zone, void *item)
uma_zfree_arg(zone, item, NULL);
}
+static __inline void
+uma_zfree_pcpu(uma_zone_t zone, void *item)
+{
+ uma_zfree_pcpu_arg(zone, item, NULL);
+}
+
/*
* Wait until the specified zone can allocate an item.
*/
@@ -603,12 +618,12 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
#ifndef __rtems__
#define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */
#endif /* __rtems__ */
-#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */
+#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kmem */
#ifndef __rtems__
#define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */
#define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */
#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */
-/* 0x02, 0x40 and 0x80 are available */
+/* 0x02, 0x40, and 0x80 are available */
#endif /* __rtems__ */
/*