summaryrefslogtreecommitdiffstats
path: root/rtems/freebsd/rtems
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@oarcorp.com>2012-03-07 09:52:04 -0600
committerJoel Sherrill <joel.sherrill@oarcorp.com>2012-03-07 09:52:04 -0600
commita9153ec3040f54fa52b68e14dafed2aba7b780ae (patch)
treefda80e3380dfebf7d97868507aa185757852e882 /rtems/freebsd/rtems
downloadrtems-libbsd-a9153ec3040f54fa52b68e14dafed2aba7b780ae.tar.bz2
Initial import
Code is based on FreeBSD 8.2 with USB support from Sebastian Huber and Thomas Doerfler. Initial TCP/IP stack work is from Kevel Kirspel.
Diffstat (limited to 'rtems/freebsd/rtems')
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-assert.c39
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-autoconf.c51
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-bus-dma.c455
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-callout.c122
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-cam.c495
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-condvar.c167
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-delay.c45
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-generic.c209
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-init-with-irq.c46
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-init.c65
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-jail.c92
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-lock.c45
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-malloc.c77
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-mutex.c314
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-nexus.c71
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-panic.c70
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-prot.c142
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-resource.c173
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-rwlock.c340
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-shell.c181
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-signal.c33
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-support.c75
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sx.c335
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-synch.c274
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-syscalls.c1487
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sysctl.c64
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c43
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c67
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-thread.c208
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-uma.c2796
30 files changed, 8581 insertions, 0 deletions
diff --git a/rtems/freebsd/rtems/rtems-bsd-assert.c b/rtems/freebsd/rtems/rtems-bsd-assert.c
new file mode 100644
index 00000000..6e04db8c
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-assert.c
@@ -0,0 +1,39 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+
+void
+rtems_bsd_assert_func(const char *file, int line, const char *func, const char *expr)
+{
+ panic(
+ "assertion \"%s\" failed: file \"%s\", line %d%s%s\n",
+ expr,
+ file,
+ line,
+ (func != NULL) ? ", function: " : "",
+ (func != NULL) ? func : ""
+ );
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-autoconf.c b/rtems/freebsd/rtems/rtems-bsd-autoconf.c
new file mode 100644
index 00000000..cdf9fc61
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-autoconf.c
@@ -0,0 +1,51 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+static void
+configure_first(void *dummy)
+{
+ device_add_child(root_bus, "nexus", 0);
+}
+
+static void
+configure(void *dummy)
+{
+ root_bus_configure();
+}
+
+static void
+configure_final(void *dummy)
+{
+ /* Do nothing */
+}
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
diff --git a/rtems/freebsd/rtems/rtems-bsd-bus-dma.c b/rtems/freebsd/rtems/rtems-bsd-bus-dma.c
new file mode 100644
index 00000000..1ed8564e
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-bus-dma.c
@@ -0,0 +1,455 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'sys/powerpc/powerpc/busdma_machdep.c'.
+ */
+
+/*-
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 2002 Peter Grehan
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/freebsd/machine/rtems-bsd-cache.h>
+#include <rtems/malloc.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/machine/atomic.h>
+#include <rtems/freebsd/machine/bus.h>
+
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ #define CLSZ ((uintptr_t) CPU_DATA_CACHE_ALIGNMENT)
+ #define CLMASK (CLSZ - (uintptr_t) 1)
+#endif
+
+struct bus_dma_tag {
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+};
+
+struct bus_dmamap {
+ void *buffer_begin;
+ bus_size_t buffer_size;
+};
+
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example). Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+ struct mtx *dmtx;
+
+ dmtx = (struct mtx *)arg;
+ switch (op) {
+ case BUS_DMA_LOCK:
+ mtx_lock(dmtx);
+ break;
+ case BUS_DMA_UNLOCK:
+ mtx_unlock(dmtx);
+ break;
+ default:
+ panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+ }
+}
+
+/*
+ * dflt_lock should never get called. It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+static void
+dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+ panic("driver error: busdma dflt_lock called");
+}
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+
+ newtag = malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (newtag == NULL)
+ return (ENOMEM);
+
+ newtag->parent = parent;
+ newtag->alignment = alignment;
+ newtag->boundary = boundary;
+ newtag->lowaddr = lowaddr;
+ newtag->highaddr = highaddr;
+ newtag->filter = filter;
+ newtag->filterarg = filterarg;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
+ newtag->maxsegsz = maxsegsz;
+ newtag->flags = flags;
+ newtag->ref_count = 1; /* Count ourself */
+ newtag->map_count = 0;
+ if (lockfunc != NULL) {
+ newtag->lockfunc = lockfunc;
+ newtag->lockfuncarg = lockfuncarg;
+ } else {
+ newtag->lockfunc = dflt_lock;
+ newtag->lockfuncarg = NULL;
+ }
+
+ /*
+ * Take into account any restrictions imposed by our parent tag
+ */
+ if (parent != NULL) {
+ newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = max(parent->highaddr, newtag->highaddr);
+ if (newtag->boundary == 0)
+ newtag->boundary = parent->boundary;
+ else if (parent->boundary != 0)
+ newtag->boundary = MIN(parent->boundary,
+ newtag->boundary);
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
+ }
+ if (newtag->parent != NULL)
+ atomic_add_int(&parent->ref_count, 1);
+ }
+
+ *dmat = newtag;
+ return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ if (dmat != NULL) {
+
+ if (dmat->map_count != 0)
+ return (EBUSY);
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ atomic_subtract_int(&dmat->ref_count, 1);
+ if (dmat->ref_count == 0) {
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+ return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ return ENOMEM;
+ }
+
+ dmat->map_count++;
+
+ return (0);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ free(map, M_DEVBUF);
+
+ dmat->map_count--;
+
+ return (0);
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ return ENOMEM;
+ }
+
+ *vaddr = rtems_heap_allocate_aligned_with_boundary(dmat->maxsize, dmat->alignment, dmat->boundary);
+ if (*vaddr == NULL) {
+ free(*mapp, M_DEVBUF);
+
+ return ENOMEM;
+ }
+
+ (*mapp)->buffer_begin = *vaddr;
+ (*mapp)->buffer_size = dmat->maxsize;
+
+ if ((flags & BUS_DMA_ZERO) != 0) {
+ memset(*vaddr, 0, dmat->maxsize);
+ }
+
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allocated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ free(vaddr, M_RTEMS_HEAP);
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static int
+bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
+ void *buf, bus_size_t buflen, struct thread *td, int flags,
+ vm_offset_t *lastaddrp, int *segp, int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vm_offset_t vaddr = (vm_offset_t)buf;
+ int seg;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(dmat->boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ curaddr = vaddr;
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (sgsize > dmat->maxsegsz)
+ sgsize = dmat->maxsegsz;
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * the previous segment if possible.
+ */
+ if (first) {
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->nsegments)
+ break;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
+{
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+ vm_offset_t lastaddr;
+ int error, nsegs;
+
+ map->buffer_begin = buf;
+ map->buffer_size = buflen;
+
+ lastaddr = (vm_offset_t)0;
+ nsegs = 0;
+ error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
+ NULL, flags, &lastaddr, &nsegs, 1);
+
+ if (error == 0)
+ (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
+ else
+ (*callback)(callback_arg, NULL, 0, error);
+
+ return (0);
+}
+
+/*
+ * Release the mapping held by map. A no-op on PowerPC.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+ return;
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ uintptr_t size = map->buffer_size;
+ uintptr_t begin = (uintptr_t) map->buffer_begin;
+ uintptr_t end = begin + size;
+
+ if ((op & BUS_DMASYNC_PREWRITE) != 0 && (op & BUS_DMASYNC_PREREAD) == 0) {
+ rtems_cache_flush_multiple_data_lines((void *) begin, size);
+ }
+ if ((op & BUS_DMASYNC_PREREAD) != 0) {
+ if ((op & BUS_DMASYNC_PREWRITE) != 0 || ((begin | size) & CLMASK) != 0) {
+ rtems_cache_flush_multiple_data_lines((void *) begin, size);
+ }
+ rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
+ }
+ if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+ char first_buf [CLSZ];
+ char last_buf [CLSZ];
+ bool first_is_aligned = (begin & CLMASK) == 0;
+ bool last_is_aligned = (end & CLMASK) == 0;
+ void *first_begin = (void *) (begin & ~CLMASK);
+ size_t first_size = begin & CLMASK;
+ void *last_begin = (void *) end;
+ size_t last_size = CLSZ - (end & CLMASK);
+
+ if (!first_is_aligned) {
+ memcpy(first_buf, first_begin, first_size);
+ }
+ if (!last_is_aligned) {
+ memcpy(last_buf, last_begin, last_size);
+ }
+
+ rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
+
+ if (!first_is_aligned) {
+ memcpy(first_begin, first_buf, first_size);
+ }
+ if (!last_is_aligned) {
+ memcpy(last_begin, last_buf, last_size);
+ }
+ }
+#endif /* CPU_DATA_CACHE_ALIGNMENT */
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-callout.c b/rtems/freebsd/rtems/rtems-bsd-callout.c
new file mode 100644
index 00000000..d427b636
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-callout.c
@@ -0,0 +1,122 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_callout_chain);
+
+static void
+rtems_bsd_callout_dispatch(rtems_id id, void *arg)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct callout *c = arg;
+
+ if (c->c_lock != NULL) {
+ sc = rtems_semaphore_obtain(c->c_lock->lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ BSD_ASSERT_SC(sc);
+ }
+
+ if (c->c_func != NULL) {
+ (*c->c_func)(c->c_arg);
+ }
+
+ if (c->c_lock != NULL && (c->c_flags & CALLOUT_RETURNUNLOCKED) == 0) {
+ sc = rtems_semaphore_release(c->c_lock->lo_id);
+ BSD_ASSERT_SC(sc);
+ }
+}
+
+void
+callout_init(struct callout *c, int mpsafe)
+{
+ _callout_init_lock(c, mpsafe ? NULL : &Giant.lock_object, mpsafe ? CALLOUT_RETURNUNLOCKED : 0);
+}
+
+void
+_callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+
+ sc = rtems_timer_create(rtems_build_name('_', 'T', 'M', 'R'), &id);
+ BSD_ASSERT_SC(sc);
+
+ c->c_id = id;
+ c->c_lock = lock;
+ c->c_flags = flags;
+ c->c_func = NULL;
+ c->c_arg = NULL;
+
+ rtems_chain_append(&rtems_bsd_callout_chain, &c->c_node);
+}
+
+int
+callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
+{
+ /* FIXME: Integer conversions */
+
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ if (to_ticks <= 0) {
+ to_ticks = 1;
+ }
+
+ c->c_func = ftn;
+ c->c_arg = arg;
+
+ sc = rtems_timer_server_fire_after(c->c_id, (rtems_interval) to_ticks, rtems_bsd_callout_dispatch, c);
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+}
+
+int
+callout_schedule(struct callout *c, int to_ticks)
+{
+ return callout_reset(c, to_ticks, c->c_func, c->c_arg);
+}
+
+int
+_callout_stop_safe(struct callout *c, int safe)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ if (!safe) {
+ sc = rtems_timer_cancel(c->c_id);
+ BSD_ASSERT_SC(sc);
+ } else {
+ sc = rtems_timer_delete(c->c_id);
+ BSD_ASSERT_SC(sc);
+
+ c->c_id = RTEMS_ID_NONE;
+ rtems_chain_extract(&c->c_node);
+ }
+
+ return 0;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-cam.c b/rtems/freebsd/rtems/rtems-bsd-cam.c
new file mode 100644
index 00000000..36d4d67c
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-cam.c
@@ -0,0 +1,495 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+#include <rtems/freebsd/cam/cam.h>
+#include <rtems/freebsd/cam/cam_ccb.h>
+#include <rtems/freebsd/cam/cam_sim.h>
+#include <rtems/freebsd/cam/cam_xpt.h>
+#include <rtems/freebsd/cam/cam_xpt_sim.h>
+#include <rtems/freebsd/cam/cam_debug.h>
+
+#include <rtems/freebsd/cam/scsi/scsi_all.h>
+
+#include <rtems/media.h>
+#include <rtems/libio.h>
+#include <rtems/diskdevs.h>
+
+#define BSD_CAM_DEVQ_DUMMY ((struct cam_devq *) 0xdeadbeef)
+
+#define BSD_SCSI_TAG 0
+
+#define BSD_SCSI_RETRIES 4
+
+#define BSD_SCSI_TIMEOUT (60 * 1000)
+
+#define BSD_SCSI_MIN_COMMAND_SIZE 10
+
+MALLOC_DEFINE(M_CAMSIM, "CAM SIM", "CAM SIM buffers");
+
+static void
+rtems_bsd_sim_set_state(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ sim->state = state;
+}
+
+static void
+rtems_bsd_sim_set_state_and_notify(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ sim->state = state;
+ cv_broadcast(&sim->state_changed);
+}
+
+static void
+rtems_bsd_sim_wait_for_state(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ while (sim->state != state) {
+ cv_wait(&sim->state_changed, sim->mtx);
+ }
+}
+
+static void
+rtems_bsd_sim_wait_for_state_and_cancel_ccb(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ while (sim->state != state) {
+ if (sim->state != BSD_SIM_BUSY) {
+ cv_wait(&sim->state_changed, sim->mtx);
+ } else {
+ sim->ccb.ccb_h.status = CAM_SEL_TIMEOUT;
+ (*sim->ccb.ccb_h.cbfcnp)(NULL, &sim->ccb);
+ }
+ }
+}
+
+static void
+rtems_bsd_ccb_callback(struct cam_periph *periph, union ccb *ccb)
+{
+ struct cam_sim *sim = ccb->ccb_h.sim;
+
+ BSD_ASSERT(periph == NULL && sim->state == BSD_SIM_INIT_BUSY);
+
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_INIT_READY);
+}
+
+static rtems_status_code
+rtems_bsd_ccb_action(union ccb *ccb)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct cam_sim *sim = ccb->ccb_h.sim;
+
+ mtx_lock(sim->mtx);
+
+ BSD_ASSERT(sim->state == BSD_SIM_INIT);
+ rtems_bsd_sim_set_state(sim, BSD_SIM_INIT_BUSY);
+ (*sim->sim_action)(sim, ccb);
+ rtems_bsd_sim_wait_for_state(sim, BSD_SIM_INIT_READY);
+ if (ccb->ccb_h.status != CAM_REQ_CMP) {
+ sc = RTEMS_IO_ERROR;
+ }
+ rtems_bsd_sim_set_state(sim, BSD_SIM_INIT);
+
+ mtx_unlock(sim->mtx);
+
+ return sc;
+}
+
+static rtems_status_code
+rtems_bsd_scsi_inquiry(union ccb *ccb, struct scsi_inquiry_data *inq_data)
+{
+ memset(inq_data, 0, sizeof(*inq_data));
+
+ scsi_inquiry(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_ccb_callback,
+ BSD_SCSI_TAG,
+ (u_int8_t *) inq_data,
+ sizeof(*inq_data) - 1,
+ FALSE,
+ 0,
+ SSD_MIN_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+
+ return rtems_bsd_ccb_action(ccb);
+}
+
+static rtems_status_code
+rtems_bsd_scsi_test_unit_ready(union ccb *ccb)
+{
+ scsi_test_unit_ready(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_ccb_callback,
+ BSD_SCSI_TAG,
+ SSD_FULL_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+
+ return rtems_bsd_ccb_action(ccb);
+}
+
+static rtems_status_code
+rtems_bsd_scsi_read_capacity(union ccb *ccb, uint32_t *block_count, uint32_t *block_size)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct scsi_read_capacity_data rdcap;
+
+ memset(&rdcap, 0, sizeof(rdcap));
+
+ scsi_read_capacity(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_ccb_callback,
+ BSD_SCSI_TAG,
+ &rdcap,
+ SSD_FULL_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+
+ sc = rtems_bsd_ccb_action(ccb);
+ if (sc != RTEMS_SUCCESSFUL) {
+ return RTEMS_IO_ERROR;
+ }
+
+ *block_size = scsi_4btoul(rdcap.length);
+ *block_count = scsi_4btoul(rdcap.addr) + 1;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void
+rtems_bsd_csio_callback(struct cam_periph *periph, union ccb *ccb)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ bool done = false;
+ struct cam_sim *sim = ccb->ccb_h.sim;
+
+ BSD_ASSERT(periph == NULL && sim->state == BSD_SIM_BUSY);
+
+ if (ccb->ccb_h.status == CAM_REQ_CMP) {
+ rtems_blkdev_sg_buffer *sg = ccb->csio.sg_current;
+
+ if (sg != ccb->csio.sg_end) {
+ scsi_read_write(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_csio_callback,
+ BSD_SCSI_TAG,
+ ccb->csio.readop,
+ 0,
+ BSD_SCSI_MIN_COMMAND_SIZE,
+ sg->block,
+ sg->length / 512, /* FIXME */
+ sg->buffer,
+ sg->length,
+ SSD_FULL_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+ ccb->csio.sg_current = sg + 1;
+ (*sim->sim_action)(sim, ccb);
+ } else {
+ done = true;
+ }
+ } else if (ccb->ccb_h.status == CAM_SEL_TIMEOUT) {
+ sc = RTEMS_UNSATISFIED;
+ done = true;
+ } else {
+ sc = RTEMS_IO_ERROR;
+ done = true;
+ }
+
+ if (done) {
+ ccb->csio.req->req_done(ccb->csio.req->done_arg, sc);
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_IDLE);
+ }
+}
+
+static int rtems_bsd_sim_disk_read_write(struct cam_sim *sim, rtems_blkdev_request *req)
+{
+ mtx_lock(sim->mtx);
+
+ rtems_bsd_sim_wait_for_state(sim, BSD_SIM_IDLE);
+ rtems_bsd_sim_set_state(sim, BSD_SIM_BUSY);
+
+ switch (req->req) {
+ case RTEMS_BLKDEV_REQ_READ:
+ sim->ccb.csio.readop = TRUE;
+ break;
+ case RTEMS_BLKDEV_REQ_WRITE:
+ sim->ccb.csio.readop = FALSE;
+ break;
+ default:
+ mtx_unlock(sim->mtx);
+ return -1;
+ }
+
+ sim->ccb.csio.sg_current = req->bufs;
+ sim->ccb.csio.sg_end = req->bufs + req->bufnum;
+ sim->ccb.csio.req = req;
+
+ sim->ccb.ccb_h.status = CAM_REQ_CMP;
+
+ rtems_bsd_csio_callback(NULL, &sim->ccb);
+
+ mtx_unlock(sim->mtx);
+
+ return 0;
+}
+
+static int rtems_bsd_sim_disk_ioctl(rtems_disk_device *dd, uint32_t req, void *arg)
+{
+ struct cam_sim *sim = rtems_disk_get_driver_data(dd);
+
+ if (req == RTEMS_BLKIO_REQUEST) {
+ rtems_blkdev_request *r = arg;
+
+ return rtems_bsd_sim_disk_read_write(sim, r);
+ } else if (req == RTEMS_BLKIO_DELETED) {
+ mtx_lock(sim->mtx);
+
+ free(sim->disk, M_RTEMS_HEAP);
+ sim->disk = NULL;
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_DELETED);
+
+ mtx_unlock(sim->mtx);
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static void
+rtems_bsd_sim_disk_initialized(struct cam_sim *sim, char *disk)
+{
+ mtx_lock(sim->mtx);
+
+ sim->disk = disk;
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_IDLE);
+
+ mtx_unlock(sim->mtx);
+}
+
+static rtems_status_code
+rtems_bsd_sim_attach_worker(rtems_media_state state, const char *src, char **dest, void *arg)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_device_major_number major = UINT32_MAX;
+ struct cam_sim *sim = arg;
+ char *disk = NULL;
+
+ if (state == RTEMS_MEDIA_STATE_READY) {
+ dev_t dev = 0;
+ unsigned retries = 0;
+
+ struct scsi_inquiry_data inq_data;
+ uint32_t block_count = 0;
+ uint32_t block_size = 0;
+
+ sc = rtems_io_register_driver(0, &rtems_blkdev_generic_ops, &major);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: register driver failed\n");
+ goto error;
+ }
+
+ disk = rtems_media_create_path("/dev", src, major);
+ if (disk == NULL) {
+ BSD_PRINTF("OOPS: create path failed\n");
+ goto unregister_and_error;
+ }
+
+ sc = rtems_bsd_scsi_inquiry(&sim->ccb, &inq_data);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: inquiry failed\n");
+ goto unregister_and_error;
+ }
+ scsi_print_inquiry(&inq_data);
+
+ for (retries = 0; retries <= 3; ++retries) {
+ sc = rtems_bsd_scsi_test_unit_ready(&sim->ccb);
+ if (sc == RTEMS_SUCCESSFUL) {
+ break;
+ }
+ }
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: test unit ready failed\n");
+ goto unregister_and_error;
+ }
+
+ sc = rtems_bsd_scsi_read_capacity(&sim->ccb, &block_count, &block_size);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: read capacity failed\n");
+ goto unregister_and_error;
+ }
+
+ BSD_PRINTF("read capacity: block count %u, block size %u\n", block_count, block_size);
+
+ dev = rtems_filesystem_make_dev_t(major, 0);
+
+ sc = rtems_disk_create_phys(dev, block_size, block_count, rtems_bsd_sim_disk_ioctl, sim, disk);
+ if (sc != RTEMS_SUCCESSFUL) {
+ goto unregister_and_error;
+ }
+
+ /* FIXME */
+#if 0
+ rtems_disk_device *dd = rtems_disk_obtain(dev);
+ dd->block_size *= 64;
+ rtems_disk_release(dd);
+#endif
+
+ rtems_bsd_sim_disk_initialized(sim, disk);
+
+ *dest = strdup(disk, M_RTEMS_HEAP);
+ }
+
+ return RTEMS_SUCCESSFUL;
+
+unregister_and_error:
+
+ rtems_io_unregister_driver(major);
+
+error:
+
+ free(disk, M_RTEMS_HEAP);
+
+ rtems_bsd_sim_disk_initialized(sim, NULL);
+
+ return RTEMS_IO_ERROR;
+}
+
+struct cam_sim *
+cam_sim_alloc(
+ sim_action_func sim_action,
+ sim_poll_func sim_poll,
+ const char *sim_name,
+ void *softc,
+ u_int32_t unit,
+ struct mtx *mtx,
+ int max_dev_transactions,
+ int max_tagged_dev_transactions,
+ struct cam_devq *queue
+)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct cam_sim *sim = NULL;
+
+ if (mtx == NULL) {
+ return NULL;
+ }
+
+ sim = malloc(sizeof(*sim), M_CAMSIM, M_NOWAIT | M_ZERO);
+ if (sim == NULL) {
+ return NULL;
+ }
+
+ sim->sim_action = sim_action;
+ sim->sim_poll = sim_poll;
+ sim->sim_name = sim_name;
+ sim->softc = softc;
+ sim->mtx = mtx;
+ sim->unit_number = unit;
+ sim->ccb.ccb_h.sim = sim;
+
+ cv_init(&sim->state_changed, "SIM state changed");
+
+ sc = rtems_media_server_disk_attach(sim_name, rtems_bsd_sim_attach_worker, sim);
+ BSD_ASSERT_SC(sc);
+
+ return sim;
+}
+
+void
+cam_sim_free(struct cam_sim *sim, int free_devq)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ /*
+ * The umass_detach() cancels all transfers via
+ * usbd_transfer_unsetup(). This prevents also the start of new
+ * transfers since the transfer descriptors will be removed. Started
+ * transfers that are not in the transferring state will be canceled
+ * and the callbacks will be not called. Thus it is necessary to do
+ * this here if we are in the BUSY state.
+ */
+ rtems_bsd_sim_wait_for_state_and_cancel_ccb(sim, BSD_SIM_IDLE);
+
+ if (sim->disk != NULL) {
+ sc = rtems_media_server_disk_detach(sim->disk);
+ BSD_ASSERT_SC(sc);
+
+ rtems_bsd_sim_wait_for_state(sim, BSD_SIM_DELETED);
+ }
+
+ cv_destroy(&sim->state_changed);
+ free(sim, M_CAMSIM);
+}
+
+struct cam_devq *
+cam_simq_alloc(u_int32_t max_sim_transactions)
+{
+ return BSD_CAM_DEVQ_DUMMY;
+}
+
+void
+cam_simq_free(struct cam_devq *devq)
+{
+ BSD_ASSERT(devq == BSD_CAM_DEVQ_DUMMY);
+}
+
+void
+xpt_done(union ccb *done_ccb)
+{
+ (*done_ccb->ccb_h.cbfcnp)(NULL, done_ccb);
+}
+
+int32_t
+xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
+{
+ /*
+ * We ignore this bus stuff completely. This is easier than removing
+ * the calls from "umass.c".
+ */
+
+ return CAM_SUCCESS;
+}
+
+int32_t
+xpt_bus_deregister(path_id_t pathid)
+{
+ /*
+ * We ignore this bus stuff completely. This is easier than removing
+ * the calls from "umass.c".
+ */
+
+ return CAM_REQ_CMP;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-condvar.c b/rtems/freebsd/rtems/rtems-bsd-condvar.c
new file mode 100644
index 00000000..80b9db73
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-condvar.c
@@ -0,0 +1,167 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/posix/cond.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_condvar_chain);
+
+void
+cv_init(struct cv *cv, const char *desc)
+{
+ int rv = pthread_cond_init(&cv->cv_id, NULL);
+
+ BSD_ASSERT_RV(rv);
+
+ cv->cv_description = desc;
+
+ rtems_chain_append(&rtems_bsd_condvar_chain, &cv->cv_node);
+}
+
+void
+cv_destroy(struct cv *cv)
+{
+ int rv = pthread_cond_destroy(&cv->cv_id);
+
+ BSD_ASSERT_RV(rv);
+
+ rtems_chain_extract(&cv->cv_node);
+}
+
+static int _cv_wait_support(struct cv *cv, struct lock_object *lock, int timo, bool relock)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ int eno = 0;
+ Objects_Locations location = OBJECTS_ERROR;
+ POSIX_Condition_variables_Control *pcv = _POSIX_Condition_variables_Get(&cv->cv_id, &location);
+
+ if (location == OBJECTS_LOCAL) {
+ if (pcv->Mutex != POSIX_CONDITION_VARIABLES_NO_MUTEX && pcv->Mutex != lock->lo_id) {
+ _Thread_Enable_dispatch();
+
+ BSD_ASSERT(false);
+
+ return EINVAL;
+ }
+
+ sc = rtems_semaphore_release(lock->lo_id);
+ if (sc != RTEMS_SUCCESSFUL) {
+ _Thread_Enable_dispatch();
+
+ BSD_ASSERT(false);
+
+ return EINVAL;
+ }
+
+ pcv->Mutex = lock->lo_id;
+
+ _Thread_queue_Enter_critical_section(&pcv->Wait_queue);
+ _Thread_Executing->Wait.return_code = 0;
+ _Thread_Executing->Wait.queue = &pcv->Wait_queue;
+ _Thread_Executing->Wait.id = cv->cv_id;
+
+ /* FIXME: Integer conversion */
+ _Thread_queue_Enqueue(&pcv->Wait_queue, (Watchdog_Interval) timo);
+
+ DROP_GIANT();
+
+ _Thread_Enable_dispatch();
+
+ PICKUP_GIANT();
+
+ eno = (int) _Thread_Executing->Wait.return_code;
+ if (eno != 0) {
+ if (eno == ETIMEDOUT) {
+ eno = EWOULDBLOCK;
+ } else {
+ BSD_ASSERT(false);
+
+ eno = EINVAL;
+ }
+ }
+
+ if (relock) {
+ sc = rtems_semaphore_obtain(lock->lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_ASSERT(false);
+
+ eno = EINVAL;
+ }
+ }
+
+ return eno;
+ }
+
+ BSD_PANIC("unexpected object location");
+}
+
+void
+_cv_wait(struct cv *cv, struct lock_object *lock)
+{
+ _cv_wait_support(cv, lock, 0, true);
+}
+
+void
+_cv_wait_unlock(struct cv *cv, struct lock_object *lock)
+{
+ _cv_wait_support(cv, lock, 0, false);
+}
+
+int
+_cv_timedwait(struct cv *cv, struct lock_object *lock, int timo)
+{
+ if (timo <= 0) {
+ timo = 1;
+ }
+
+ return _cv_wait_support(cv, lock, timo, true);
+}
+
+void
+cv_signal(struct cv *cv)
+{
+ int rv = pthread_cond_signal(&cv->cv_id);
+
+ BSD_ASSERT_RV(rv);
+}
+
+void
+cv_broadcastpri(struct cv *cv, int pri)
+{
+ int rv = 0;
+
+ BSD_ASSERT(pri == 0);
+
+ rv = pthread_cond_broadcast(&cv->cv_id);
+ BSD_ASSERT_RV(rv);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-delay.c b/rtems/freebsd/rtems/rtems-bsd-delay.c
new file mode 100644
index 00000000..b047ab6e
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-delay.c
@@ -0,0 +1,45 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+void
+DELAY(int usec)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ /* FIXME: Integer conversion */
+ rtems_interval ticks =
+ ((rtems_interval) usec * (rtems_interval) hz) / 1000000;
+
+ if (ticks == 0) {
+ ticks = 1;
+ }
+
+ sc = rtems_task_wake_after(ticks);
+ BSD_ASSERT_SC(sc);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-generic.c b/rtems/freebsd/rtems/rtems-bsd-generic.c
new file mode 100644
index 00000000..3a46da43
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-generic.c
@@ -0,0 +1,209 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <sys/types.h> //needed for fd_mask and such
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/select.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+MALLOC_DEFINE(M_IOV, "iov", "large iov's");
+
+void selrecord(struct thread *selector, struct selinfo *sip)
+{
+ BSD_PANIC("not implemented");
+}
+
+void selwakeup(struct selinfo *sip)
+{
+ BSD_PANIC("not implemented");
+}
+
+void selwakeuppri(struct selinfo *sip, int pri)
+{
+ BSD_PANIC("not implemented");
+}
+
+void seltdfini(struct thread *td)
+{
+ BSD_PANIC("not implemented");
+}
+
+/*
+ *********************************************************************
+ * RTEMS implementation of select() system call *
+ *********************************************************************
+ */
+
+/*
+ * This implementation is quite restricted:
+ * Works on sockets only -- no support for other devices!
+ * A given socket can be in a read-select or a read/recv* by only
+ * one task at a time.
+ * A given socket can be in a write-select or a write/send* by only
+ * one task at a time.
+ *
+ * NOTE - select() is a very expensive system call. It should be avoided
+ * if at all possible. In many cases, rewriting the application
+ * to use multiple tasks (one per socket) is a better solution.
+ */
+
+struct socket *rtems_bsdnet_fdToSocket(int fd);
+
+static int
+socket_select (struct socket *so, int which, rtems_id tid)
+{
+ switch (which) {
+
+ case FREAD:
+ if (soreadable(so))
+ return (1);
+ SOCK_LOCK(so);
+ so->so_rcv.sb_flags |= SB_WAIT;
+ so->so_rcv.sb_sel.si_pid = tid;
+ SOCK_UNLOCK(so);
+ break;
+
+ case FWRITE:
+ if (sowriteable(so))
+ return (1);
+ SOCK_LOCK(so);
+ so->so_snd.sb_flags |= SB_WAIT;
+ so->so_snd.sb_sel.si_pid = tid;
+ SOCK_UNLOCK(so);
+ break;
+
+ case 0:
+ if (so->so_oobmark || (so->so_state & SBS_RCVATMARK))
+ return (1);
+ SOCK_LOCK(so);
+ so->so_rcv.sb_sel.si_pid = tid;
+ SOCK_UNLOCK(so);
+ break;
+ }
+ return (0);
+}
+
+static int
+selscan (rtems_id tid, fd_mask **ibits, fd_mask **obits, int nfd, int *retval)
+{
+ struct socket *so;
+ int msk, i, fd;
+ fd_mask bits, bit;
+ int n = 0;
+ static int flag[3] = { FREAD, FWRITE, 0 };
+
+ for (msk = 0; msk < 3; msk++) {
+ if (ibits[msk] == NULL)
+ continue;
+ for (i = 0; i < nfd; i += NFDBITS) {
+ bits = ibits[msk][i/NFDBITS];
+ for (fd = i, bit = 1 ; bits && (fd < nfd) ; fd++, bit <<= 1) {
+ if ((bits & bit) == 0)
+ continue;
+ bits &= ~bit;
+ so = rtems_bsdnet_fdToSocket (fd);
+ if (so == NULL)
+ return (EBADF);
+ if (socket_select (so, flag[msk], tid)) {
+ obits[msk][fd/NFDBITS] |=
+ (1 << (fd % NFDBITS));
+ n++;
+ }
+ }
+ }
+ }
+ *retval = n;
+ return (0);
+}
+
+int
+select (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *tv)
+{
+ fd_mask *ibits[3], *obits[3];
+ fd_set ob[3];
+ int error, timo;
+ int retval = 0;
+ rtems_id tid;
+ rtems_interval then = 0, now;
+ rtems_event_set events;
+
+ if (nfds < 0)
+ return (EINVAL);
+ if (tv) {
+ timo = tv->tv_sec * hz + tv->tv_usec / tick;
+ if (timo == 0)
+ timo = 1;
+ then = rtems_clock_get_ticks_since_boot();
+ }
+ else {
+ timo = 0;
+ }
+
+#define getbits(name,i) if (name) { \
+ ibits[i] = &name->fds_bits[0]; \
+ obits[i] = &ob[i].fds_bits[0]; \
+ FD_ZERO(&ob[i]); \
+ } \
+ else ibits[i] = NULL
+ getbits (readfds, 0);
+ getbits (writefds, 1);
+ getbits (exceptfds, 2);
+#undef getbits
+
+ //rtems_task_ident (RTEMS_SELF, 0, &tid);
+ //rtems_event_receive (SBWAIT_EVENT, RTEMS_EVENT_ANY | RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT, &events);
+ for (;;) {
+ error = selscan(tid, ibits, obits, nfds, &retval);
+ if (error || retval)
+ break;
+ if (timo) {
+ now = rtems_clock_get_ticks_since_boot();
+ timo -= now - then;
+ if (timo <= 0)
+ break;
+ then = now;
+ }
+ //rtems_event_receive (SBWAIT_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, timo, &events);
+ }
+
+#define putbits(name,i) if (name) *name = ob[i]
+ putbits (readfds, 0);
+ putbits (writefds, 1);
+ putbits (exceptfds, 2);
+#undef putbits
+ if (error) {
+ errno = error;
+ retval = -1;
+ }
+ return (retval);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-init-with-irq.c b/rtems/freebsd/rtems/rtems-bsd-init-with-irq.c
new file mode 100644
index 00000000..c8b3ddc7
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-init-with-irq.c
@@ -0,0 +1,46 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/irq-extension.h>
+
+#include <rtems/freebsd/bsd.h>
+
+rtems_status_code
+rtems_bsd_initialize_with_interrupt_server(void)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_interrupt_server_initialize(
+ BSD_TASK_PRIORITY_INTERRUPT,
+ BSD_MINIMUM_TASK_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ NULL
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ return RTEMS_UNSATISFIED;
+ }
+
+ return rtems_bsd_initialize();
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-init.c b/rtems/freebsd/rtems/rtems-bsd-init.c
new file mode 100644
index 00000000..347d6710
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-init.c
@@ -0,0 +1,65 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+
+#include <rtems/freebsd/bsd.h>
+
+/* In FreeBSD this is a local function */
+void mi_startup(void);
+
+int hz;
+int tick;
+int maxusers; /* base tunable */
+
+rtems_status_code
+rtems_bsd_initialize(void)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ hz = (int) rtems_clock_get_ticks_per_second();
+ tick = 1000000 / hz;
+ maxusers = 1;
+
+ sc = rtems_timer_initiate_server(
+ BSD_TASK_PRIORITY_TIMER,
+ BSD_MINIMUM_TASK_STACK_SIZE,
+ RTEMS_DEFAULT_ATTRIBUTES
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ return RTEMS_UNSATISFIED;
+ }
+
+ mutex_init();
+
+ mi_startup();
+
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-jail.c b/rtems/freebsd/rtems/rtems-bsd-jail.c
new file mode 100644
index 00000000..d04efe5b
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-jail.c
@@ -0,0 +1,92 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>*/
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/sysproto.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/osd.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysent.h>
+#include <rtems/freebsd/sys/namei.h>
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syscallsubr.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#define DEFAULT_HOSTUUID "00000000-0000-0000-0000-000000000000"
+
+/* Keep struct prison prison0 and some code in kern_jail_set() readable. */
+#ifdef INET
+#ifdef INET6
+#define _PR_IP_SADDRSEL PR_IP4_SADDRSEL|PR_IP6_SADDRSEL
+#else
+#define _PR_IP_SADDRSEL PR_IP4_SADDRSEL
+#endif
+#else /* !INET */
+#ifdef INET6
+#define _PR_IP_SADDRSEL PR_IP6_SADDRSEL
+#else
+#define _PR_IP_SADDRSEL 0
+#endif
+#endif
+
+/* prison0 describes what is "real" about the system. */
+struct prison prison0 = {
+ .pr_id = 0,
+ .pr_name = "0",
+ .pr_ref = 1,
+ .pr_uref = 1,
+ .pr_path = "/",
+ .pr_securelevel = -1,
+ .pr_childmax = JAIL_MAX,
+ .pr_hostuuid = DEFAULT_HOSTUUID,
+ .pr_children = LIST_HEAD_INITIALIZER(prison0.pr_children),
+#ifdef VIMAGE
+ .pr_flags = PR_HOST|PR_VNET|_PR_IP_SADDRSEL,
+#else
+ .pr_flags = PR_HOST|_PR_IP_SADDRSEL,
+#endif
+ .pr_allow = PR_ALLOW_ALL,
+};
+MTX_SYSINIT(prison0, &prison0.pr_mtx, "jail mutex", MTX_DEF);
+
diff --git a/rtems/freebsd/rtems/rtems-bsd-lock.c b/rtems/freebsd/rtems/rtems-bsd-lock.c
new file mode 100644
index 00000000..e351debc
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-lock.c
@@ -0,0 +1,45 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/proc.h>
+
+struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
+ &lock_class_mtx_spin,
+ &lock_class_mtx_sleep,
+ &lock_class_sx,
+ &lock_class_rm,
+ &lock_class_rw,
+};
+
diff --git a/rtems/freebsd/rtems/rtems-bsd-malloc.c b/rtems/freebsd/rtems/rtems-bsd-malloc.c
new file mode 100644
index 00000000..b534b729
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-malloc.c
@@ -0,0 +1,77 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
+
+MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
+
+void
+malloc_init(void *data)
+{
+ struct malloc_type *mtp = data;
+}
+
+void
+malloc_uninit(void *data)
+{
+ struct malloc_type *mtp = data;
+
+ BSD_PRINTF( "desc = %s\n", mtp->ks_shortdesc);
+}
+
+#undef malloc
+
+void *
+_bsd_malloc(unsigned long size, struct malloc_type *mtp, int flags)
+{
+ void *p = malloc(size);
+
+ if ((flags & M_ZERO) != 0 && p != NULL) {
+ memset(p, 0, size);
+ }
+
+ return p;
+}
+
+#undef free
+
+void
+_bsd_free(void *addr, struct malloc_type *mtp)
+{
+ free(addr);
+}
+
+#undef strdup
+
+char *
+_bsd_strdup(const char *__restrict s, struct malloc_type *type)
+{
+ return strdup(s);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-mutex.c b/rtems/freebsd/rtems/rtems-bsd-mutex.c
new file mode 100644
index 00000000..837232d2
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-mutex.c
@@ -0,0 +1,314 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+
+static void assert_mtx(struct lock_object *lock, int what);
+static void lock_mtx(struct lock_object *lock, int how);
+static void lock_spin(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int owner_mtx(struct lock_object *lock, struct thread **owner);
+#endif
+static int unlock_mtx(struct lock_object *lock);
+static int unlock_spin(struct lock_object *lock);
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_mtx_chain);
+
+/*
+ * Lock classes for sleep and spin mutexes.
+ */
+struct lock_class lock_class_mtx_sleep = {
+ .lc_name = "sleep mutex",
+ .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
+ .lc_assert = assert_mtx,
+#ifdef DDB
+ .lc_ddb_show = db_show_mtx,
+#endif
+ .lc_lock = lock_mtx,
+ .lc_unlock = unlock_mtx,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_mtx,
+#endif
+};
+
+struct lock_class lock_class_mtx_spin = {
+ .lc_name = "spin mutex",
+ .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
+ .lc_assert = assert_mtx,
+#ifdef DDB
+ .lc_ddb_show = db_show_mtx,
+#endif
+ .lc_lock = lock_spin,
+ .lc_unlock = unlock_spin,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_mtx,
+#endif
+};
+
+struct mtx Giant;
+
+void
+assert_mtx(struct lock_object *lock, int what)
+{
+ mtx_assert((struct mtx *)lock, what);
+}
+
+void
+lock_mtx(struct lock_object *lock, int how)
+{
+
+ mtx_lock((struct mtx *)lock);
+}
+
+void
+lock_spin(struct lock_object *lock, int how)
+{
+
+ panic("spin locks can only use msleep_spin");
+}
+
+int
+unlock_mtx(struct lock_object *lock)
+{
+ struct mtx *m;
+
+ m = (struct mtx *)lock;
+ mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
+ mtx_unlock(m);
+ return (0);
+}
+
+int
+unlock_spin(struct lock_object *lock)
+{
+
+ panic("spin locks can only use msleep_spin");
+}
+
+#ifdef KDTRACE_HOOKS
+int
+owner_mtx(struct lock_object *lock, struct thread **owner)
+{
+ struct mtx *m = (struct mtx *)lock;
+
+ *owner = mtx_owner(m);
+ return (mtx_unowned(m) == 0);
+}
+#endif
+
+void
+mtx_init(struct mtx *m, const char *name, const char *type, int opts)
+{
+ struct lock_class *class;
+ int i;
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+ /* rtems_attribute attr = RTEMS_LOCAL | RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING; */
+ rtems_attribute attr = RTEMS_LOCAL | RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE;
+
+ if ((opts & MTX_RECURSE) != 0 )
+ {
+ /*FIXME*/
+ }
+
+ /* Determine lock class and lock flags. */
+ if (opts & MTX_SPIN)
+ class = &lock_class_mtx_spin;
+ else
+ class = &lock_class_mtx_sleep;
+
+ /* Check for double-init and zero object. */
+ KASSERT(!lock_initalized(&m->lock_object), ("lock \"%s\" %p already initialized", name, m->lock_object));
+
+ /* Look up lock class to find its index. */
+ for (i = 0; i < LOCK_CLASS_MAX; i++)
+ {
+ if (lock_classes[i] == class)
+ {
+ m->lock_object.lo_flags = i << LO_CLASSSHIFT;
+ break;
+ }
+ }
+ KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('_', 'M', 'T', 'X'),
+ 1,
+ attr,
+ BSD_TASK_PRIORITY_RESOURCE_OWNER,
+ &id
+ );
+ BSD_ASSERT_SC(sc);
+
+ m->lock_object.lo_name = name;
+ m->lock_object.lo_flags |= LO_INITIALIZED;
+ m->lock_object.lo_id = id;
+
+ rtems_chain_append(&rtems_bsd_mtx_chain, &m->lock_object.lo_node);
+}
+
+void
+_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_obtain(m->lock_object.lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ BSD_ASSERT_SC(sc);
+}
+
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_obtain(m->lock_object.lo_id, RTEMS_NO_WAIT, 0);
+ if (sc == RTEMS_SUCCESSFUL) {
+ return 1;
+ } else if (sc == RTEMS_UNSATISFIED) {
+ return 0;
+ } else {
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+ }
+}
+
+void
+_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_release(m->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+}
+
+/*
+ * The backing function for the INVARIANTS-enabled mtx_assert()
+ */
+#ifdef INVARIANT_SUPPORT
+void
+_mtx_assert(struct mtx *m, int what, const char *file, int line)
+{
+
+ if (panicstr != NULL || dumping)
+ return;
+ switch (what) {
+ case MA_OWNED:
+ case MA_OWNED | MA_RECURSED:
+ case MA_OWNED | MA_NOTRECURSED:
+ if (!mtx_owned(m))
+ panic("mutex %s not owned at %s:%d",
+ m->lock_object.lo_name, file, line);
+ if (mtx_recursed(m)) {
+ if ((what & MA_NOTRECURSED) != 0)
+ panic("mutex %s recursed at %s:%d",
+ m->lock_object.lo_name, file, line);
+ } else if ((what & MA_RECURSED) != 0) {
+ panic("mutex %s unrecursed at %s:%d",
+ m->lock_object.lo_name, file, line);
+ }
+ break;
+ case MA_NOTOWNED:
+ if (mtx_owned(m))
+ panic("mutex %s owned at %s:%d",
+ m->lock_object.lo_name, file, line);
+ break;
+ default:
+ panic("unknown mtx_assert at %s:%d", file, line);
+ }
+}
+#endif
+
+int mtx_owned(struct mtx *m)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(m->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int owned = sema->Core_control.mutex.holder_id == rtems_task_self();
+
+ _Thread_Enable_dispatch();
+
+ return owned;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
+
+int mtx_recursed(struct mtx *m)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(m->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int recursed = sema->Core_control.mutex.nest_count != 0;
+
+ _Thread_Enable_dispatch();
+
+ return recursed;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
+
+void
+mtx_sysinit(void *arg)
+{
+ struct mtx_args *margs = arg;
+
+ mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
+}
+
+void
+mtx_destroy(struct mtx *m)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_delete(m->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+
+ rtems_chain_extract(&m->lock_object.lo_node);
+
+ m->lock_object.lo_id = 0;
+ m->lock_object.lo_flags &= ~LO_INITIALIZED;
+}
+
+void
+mutex_init(void)
+{
+ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
+ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-nexus.c b/rtems/freebsd/rtems/rtems-bsd-nexus.c
new file mode 100644
index 00000000..480307fc
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-nexus.c
@@ -0,0 +1,71 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/freebsd/machine/rtems-bsd-sysinit.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+
+static int
+nexus_probe(device_t dev)
+{
+ size_t unit = 0;
+
+ /* FIXME */
+ for (unit = 0; _bsd_nexus_devices [unit] != NULL; ++unit) {
+ device_add_child(dev, _bsd_nexus_devices [unit], unit);
+ }
+
+ device_set_desc(dev, "RTEMS Nexus device");
+
+ return (0);
+}
+
+static device_method_t nexus_methods [] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_probe),
+ DEVMETHOD(device_attach, bus_generic_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+
+ { 0, 0 }
+};
+
+static driver_t nexus_driver = {
+ .name = "nexus",
+ .methods = nexus_methods,
+ .size = 0
+};
+
+static devclass_t nexus_devclass;
+
+DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
diff --git a/rtems/freebsd/rtems/rtems-bsd-panic.c b/rtems/freebsd/rtems/rtems-bsd-panic.c
new file mode 100644
index 00000000..2425abed
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-panic.c
@@ -0,0 +1,70 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+
+static void
+suspend_all_threads(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_thread_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+ rtems_id self = rtems_task_self();
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct thread *td = (struct thread *) node;
+
+ if (td->td_id != self && td->td_id != RTEMS_SELF) {
+ rtems_task_suspend(td->td_id);
+ }
+
+ node = rtems_chain_next(node);
+ }
+
+ rtems_task_suspend(RTEMS_SELF);
+}
+
+void
+panic(const char *fmt, ...)
+{
+ va_list ap;
+
+ printf("*** BSD PANIC *** ");
+
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ printf("\n");
+
+ suspend_all_threads();
+
+ /* FIXME */
+ rtems_fatal_error_occurred(0xdeadbeef);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-prot.c b/rtems/freebsd/rtems/rtems-bsd-prot.c
new file mode 100644
index 00000000..3199c4ba
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-prot.c
@@ -0,0 +1,142 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/ucred.h>
+
+static MALLOC_DEFINE(M_CRED, "cred", "credentials");
+
+/*
+ * Allocate a zeroed cred structure.
+ */
+struct ucred *
+crget(void)
+{
+ register struct ucred *cr;
+
+ cr = malloc(sizeof(*cr), M_CRED, M_WAITOK | M_ZERO);
+ refcount_init(&cr->cr_ref, 1);
+#ifdef AUDIT
+ audit_cred_init(cr);
+#endif
+#ifdef MAC
+ mac_cred_init(cr);
+#endif
+ crextend(cr, XU_NGROUPS);
+ return (cr);
+}
+
+/*
+ * Claim another reference to a ucred structure.
+ */
+struct ucred *
+crhold(struct ucred *cr)
+{
+
+ refcount_acquire(&cr->cr_ref);
+ return (cr);
+}
+
+/*
+ * Free a cred structure. Throws away space when ref count gets to 0.
+ */
+void
+crfree(struct ucred *cr)
+{
+
+ KASSERT(cr->cr_ref > 0, ("bad ucred refcount: %d", cr->cr_ref));
+ KASSERT(cr->cr_ref != 0xdeadc0de, ("dangling reference to ucred"));
+ if (refcount_release(&cr->cr_ref)) {
+ /*
+ * Some callers of crget(), such as nfs_statfs(),
+ * allocate a temporary credential, but don't
+ * allocate a uidinfo structure.
+ */
+ if (cr->cr_uidinfo != NULL)
+ uifree(cr->cr_uidinfo);
+ if (cr->cr_ruidinfo != NULL)
+ uifree(cr->cr_ruidinfo);
+ /*
+ * Free a prison, if any.
+ */
+ if (cr->cr_prison != NULL)
+ prison_free(cr->cr_prison);
+#ifdef AUDIT
+ audit_cred_destroy(cr);
+#endif
+#ifdef MAC
+ mac_cred_destroy(cr);
+#endif
+ free(cr->cr_groups, M_CRED);
+ free(cr, M_CRED);
+ }
+}
+
+/*
+ * Check to see if this ucred is shared.
+ */
+int
+crshared(struct ucred *cr)
+{
+
+ return (cr->cr_ref > 1);
+}
+
+/*
+ * Copy a ucred's contents from a template. Does not block.
+ */
+void
+crcopy(struct ucred *dest, struct ucred *src)
+{
+
+ KASSERT(crshared(dest) == 0, ("crcopy of shared ucred"));
+ bcopy(&src->cr_startcopy, &dest->cr_startcopy,
+ (unsigned)((caddr_t)&src->cr_endcopy -
+ (caddr_t)&src->cr_startcopy));
+ crsetgroups(dest, src->cr_ngroups, src->cr_groups);
+ uihold(dest->cr_uidinfo);
+ uihold(dest->cr_ruidinfo);
+ prison_hold(dest->cr_prison);
+#ifdef AUDIT
+ audit_cred_copy(src, dest);
+#endif
+#ifdef MAC
+ mac_cred_copy(src, dest);
+#endif
+}
+
+/*
+ * Dup cred struct to a new held one.
+ */
+struct ucred *
+crdup(struct ucred *cr)
+{
+ struct ucred *newcr;
+
+ newcr = crget();
+ crcopy(newcr, cr);
+ return (newcr);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-resource.c b/rtems/freebsd/rtems/rtems-bsd-resource.c
new file mode 100644
index 00000000..3b85d4b7
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-resource.c
@@ -0,0 +1,173 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/resourcevar.h>
+#include <rtems/freebsd/sys/rwlock.h>
+
+static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
+
+#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
+static struct rwlock uihashtbl_lock;
+static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
+static u_long uihash; /* size of hash table - 1 */
+
+/*
+ * Find the uidinfo structure for a uid. This structure is used to
+ * track the total resource consumption (process count, socket buffer
+ * size, etc.) for the uid and impose limits.
+ */
+void
+uihashinit()
+{
+
+ uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
+ rw_init(&uihashtbl_lock, "uidinfo hash");
+}
+
+/*
+ * Look up a uidinfo struct for the parameter uid.
+ * uihashtbl_lock must be locked.
+ */
+static struct uidinfo *
+uilookup(uid)
+ uid_t uid;
+{
+ struct uihashhead *uipp;
+ struct uidinfo *uip;
+
+ rw_assert(&uihashtbl_lock, RA_LOCKED);
+ uipp = UIHASH(uid);
+ LIST_FOREACH(uip, uipp, ui_hash)
+ if (uip->ui_uid == uid)
+ break;
+
+ return (uip);
+}
+
+/*
+ * Find or allocate a struct uidinfo for a particular uid.
+ * Increase refcount on uidinfo struct returned.
+ * uifree() should be called on a struct uidinfo when released.
+ */
+struct uidinfo *
+uifind(uid)
+ uid_t uid;
+{
+ struct uidinfo *old_uip, *uip;
+
+ rw_rlock(&uihashtbl_lock);
+ uip = uilookup(uid);
+ if (uip == NULL) {
+ rw_runlock(&uihashtbl_lock);
+ uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
+ rw_wlock(&uihashtbl_lock);
+ /*
+ * There's a chance someone created our uidinfo while we
+ * were in malloc and not holding the lock, so we have to
+ * make sure we don't insert a duplicate uidinfo.
+ */
+ if ((old_uip = uilookup(uid)) != NULL) {
+ /* Someone else beat us to it. */
+ free(uip, M_UIDINFO);
+ uip = old_uip;
+ } else {
+ refcount_init(&uip->ui_ref, 0);
+ uip->ui_uid = uid;
+ mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
+ MTX_DEF);
+ LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
+ }
+ }
+ uihold(uip);
+ rw_unlock(&uihashtbl_lock);
+ return (uip);
+}
+
+/*
+ * Place another refcount on a uidinfo struct.
+ */
+void
+uihold(uip)
+ struct uidinfo *uip;
+{
+
+ refcount_acquire(&uip->ui_ref);
+}
+
+/*-
+ * Since uidinfo structs have a long lifetime, we use an
+ * opportunistic refcounting scheme to avoid locking the lookup hash
+ * for each release.
+ *
+ * If the refcount hits 0, we need to free the structure,
+ * which means we need to lock the hash.
+ * Optimal case:
+ * After locking the struct and lowering the refcount, if we find
+ * that we don't need to free, simply unlock and return.
+ * Suboptimal case:
+ * If refcount lowering results in need to free, bump the count
+ * back up, lose the lock and acquire the locks in the proper
+ * order to try again.
+ */
+void
+uifree(uip)
+ struct uidinfo *uip;
+{
+ int old;
+
+ /* Prepare for optimal case. */
+ old = uip->ui_ref;
+ if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
+ return;
+
+ /* Prepare for suboptimal case. */
+ rw_wlock(&uihashtbl_lock);
+ if (refcount_release(&uip->ui_ref)) {
+ LIST_REMOVE(uip, ui_hash);
+ rw_wunlock(&uihashtbl_lock);
+ if (uip->ui_sbsize != 0)
+ printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
+ uip->ui_uid, uip->ui_sbsize);
+ if (uip->ui_proccnt != 0)
+ printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
+ uip->ui_uid, uip->ui_proccnt);
+ if (uip->ui_vmsize != 0)
+ printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
+ uip->ui_uid, (unsigned long long)uip->ui_vmsize);
+ mtx_destroy(&uip->ui_vmsize_mtx);
+ free(uip, M_UIDINFO);
+ return;
+ }
+ /*
+ * Someone added a reference between atomic_cmpset_int() and
+ * rw_wlock(&uihashtbl_lock).
+ */
+ rw_wunlock(&uihashtbl_lock);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-rwlock.c b/rtems/freebsd/rtems/rtems-bsd-rwlock.c
new file mode 100644
index 00000000..de9decd2
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-rwlock.c
@@ -0,0 +1,340 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2011 OPTI Medical. All rights reserved.
+ *
+ * OPTI Medical
+ * 235 Hembree Park Drive
+ * Roswell, GA 30076
+ * USA
+ * <kevin.kirspel@optimedical.com>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <pthread.h>
+
+#ifndef INVARIANTS
+#define _rw_assert(rw, what, file, line)
+#endif
+
+static void assert_rw(struct lock_object *lock, int what);
+static void lock_rw(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int owner_rw(struct lock_object *lock, struct thread **owner);
+#endif
+static int unlock_rw(struct lock_object *lock);
+
+typedef uint32_t pthread_rwlock_t;
+
+struct lock_class lock_class_rw = {
+ .lc_name = "rw",
+ .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
+ .lc_assert = assert_rw,
+#ifdef DDB
+ .lc_ddb_show = db_show_rwlock,
+#endif
+ .lc_lock = lock_rw,
+ .lc_unlock = unlock_rw,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_rw,
+#endif
+};
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_rwlock_chain);
+
+void
+assert_rw(struct lock_object *lock, int what)
+{
+ rw_assert((struct rwlock *)lock, what);
+}
+
+void
+lock_rw(struct lock_object *lock, int how)
+{
+ struct rwlock *rw;
+
+ rw = (struct rwlock *)lock;
+ if (how)
+ rw_wlock(rw);
+ else
+ rw_rlock(rw);
+}
+
+int
+unlock_rw(struct lock_object *lock)
+{
+ struct rwlock *rw;
+
+ rw = (struct rwlock *)lock;
+ rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
+ if (rw->rw_lock & RW_LOCK_READ) {
+ rw_runlock(rw);
+ return (0);
+ } else {
+ rw_wunlock(rw);
+ return (1);
+ }
+}
+
+#ifdef KDTRACE_HOOKS
+int
+owner_rw(struct lock_object *lock, struct thread **owner)
+{
+ struct rwlock *rw = (struct rwlock *)lock;
+ uintptr_t x = rw->rw_lock;
+
+ *owner = rw_wowner(rw);
+ return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) :
+ (*owner != NULL));
+}
+#endif
+
+void
+rw_init_flags(struct rwlock *rw, const char *name, int opts)
+{
+ struct lock_class *class;
+ int i;
+ pthread_rwlock_t lock;
+ int iret;
+
+ if ((opts & RW_RECURSE) != 0) {
+ /* FIXME */
+ }
+
+ class = &lock_class_rw;
+
+ /* Check for double-init and zero object. */
+ KASSERT(!lock_initalized(&rw->lock_object), ("lock \"%s\" %p already initialized", name, rw->lock_object));
+
+ /* Look up lock class to find its index. */
+ for (i = 0; i < LOCK_CLASS_MAX; i++)
+ {
+ if (lock_classes[i] == class)
+ {
+ rw->lock_object.lo_flags = i << LO_CLASSSHIFT;
+ break;
+ }
+ }
+ KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
+
+ iret = pthread_rwlock_init( &lock, NULL );
+ BSD_ASSERT( iret == 0 );
+
+ rw->lock_object.lo_name = name;
+ rw->lock_object.lo_flags |= LO_INITIALIZED;
+ rw->lock_object.lo_id = lock;
+
+ rtems_chain_append(&rtems_bsd_rwlock_chain, &rw->lock_object.lo_node);
+}
+
+void
+rw_destroy(struct rwlock *rw)
+{
+ int iret;
+ pthread_rwlock_destroy( rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+ rtems_chain_extract( &rw->lock_object.lo_node );
+ rw->lock_object.lo_id = 0;
+ rw->lock_object.lo_flags &= ~LO_INITIALIZED;
+}
+
+void
+rw_sysinit(void *arg)
+{
+ struct rw_args *args = arg;
+
+ rw_init(args->ra_rw, args->ra_desc);
+}
+
+void
+rw_sysinit_flags(void *arg)
+{
+ struct rw_args_flags *args = arg;
+
+ rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
+}
+
+int
+rw_wowned(struct rwlock *rw)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(rw->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int owned = sema->Core_control.mutex.holder_id == rtems_task_self();
+
+ _Thread_Enable_dispatch();
+
+ return owned;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
+
+void
+_rw_wlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ pthread_rwlock_wrlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+
+ return 0;
+}
+
+int
+_rw_try_wlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_trywrlock( &rw->lock_object.lo_id );
+ if (iret == 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+void
+_rw_wunlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_unlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+}
+
+void
+_rw_rlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_rdlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+}
+
+int
+_rw_try_rlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_tryrdlock( &rw->lock_object.lo_id );
+ if (iret == 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+void
+_rw_runlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_unlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+}
+
+#ifdef INVARIANT_SUPPORT
+#ifndef INVARIANTS
+#undef _rw_assert
+#endif
+
+/*
+ * In the non-WITNESS case, rw_assert() can only detect that at least
+ * *some* thread owns an rlock, but it cannot guarantee that *this*
+ * thread owns an rlock.
+ */
+void
+_rw_assert(struct rwlock *rw, int what, const char *file, int line)
+{
+
+ if (panicstr != NULL)
+ return;
+ switch (what) {
+ case RA_LOCKED:
+ case RA_LOCKED | RA_RECURSED:
+ case RA_LOCKED | RA_NOTRECURSED:
+ case RA_RLOCKED:
+#ifdef WITNESS
+ witness_assert(&rw->lock_object, what, file, line);
+#else
+ /*
+ * If some other thread has a write lock or we have one
+ * and are asserting a read lock, fail. Also, if no one
+ * has a lock at all, fail.
+ */
+ if (rw->rw_lock == RW_UNLOCKED ||
+ (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
+ rw_wowner(rw) != curthread)))
+ panic("Lock %s not %slocked @ %s:%d\n",
+ rw->lock_object.lo_name, (what == RA_RLOCKED) ?
+ "read " : "", file, line);
+
+ if (!(rw->rw_lock & RW_LOCK_READ)) {
+ if (rw_recursed(rw)) {
+ if (what & RA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file,
+ line);
+ } else if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ }
+#endif
+ break;
+ case RA_WLOCKED:
+ case RA_WLOCKED | RA_RECURSED:
+ case RA_WLOCKED | RA_NOTRECURSED:
+ if (rw_wowner(rw) != curthread)
+ panic("Lock %s not exclusively locked @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ if (rw_recursed(rw)) {
+ if (what & RA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ } else if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ break;
+ case RA_UNLOCKED:
+#ifdef WITNESS
+ witness_assert(&rw->lock_object, what, file, line);
+#else
+ /*
+ * If we hold a write lock fail. We can't reliably check
+ * to see if we hold a read lock or not.
+ */
+ if (rw_wowner(rw) == curthread)
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+#endif
+ break;
+ default:
+ panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
+ line);
+ }
+}
+#endif /* INVARIANT_SUPPORT */
diff --git a/rtems/freebsd/rtems/rtems-bsd-shell.c b/rtems/freebsd/rtems/rtems-bsd-shell.c
new file mode 100644
index 00000000..ec704c81
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-shell.c
@@ -0,0 +1,181 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/proc.h>
+
+#include <rtems/freebsd/bsd.h>
+#include <rtems/shell.h>
+
+static void
+rtems_bsd_dump_callout(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_callout_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("callout dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct callout *c = (struct callout *) node;
+
+ printf("\t%08x\n", c->c_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_mtx(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_mtx_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("mtx dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct lock_object *lo = (struct lock_object *) node;
+
+ printf("\t%s: 0x%08x\n", lo->lo_name, lo->lo_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_sx(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_sx_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("sx dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct lock_object *lo = (struct lock_object *) node;
+
+ printf("\t%s: 0x%08x\n", lo->lo_name, lo->lo_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_condvar(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_condvar_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("condvar dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct cv *cv = (struct cv *) node;
+
+ printf("\t%s: 0x%08x\n", cv->cv_description, cv->cv_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_thread(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_thread_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("thread dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct thread *td = (struct thread *) node;
+
+ printf("\t%s: 0x%08x\n", td->td_name, td->td_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static const char rtems_bsd_usage [] =
+ "bsd {all|mtx|sx|condvar|thread|callout}";
+
+#define CMP(s) all || strcasecmp(argv [1], s) == 0
+
+static int
+rtems_bsd_info(int argc, char **argv)
+{
+ bool usage = true;
+
+ if (argc == 2) {
+ bool all = false;
+
+ if (CMP("all")) {
+ all = true;
+ }
+
+ if (CMP("mtx")) {
+ rtems_bsd_dump_mtx();
+ usage = false;
+ }
+ if (CMP("sx")) {
+ rtems_bsd_dump_sx();
+ usage = false;
+ }
+ if (CMP("condvar")) {
+ rtems_bsd_dump_condvar();
+ usage = false;
+ }
+ if (CMP("thread")) {
+ rtems_bsd_dump_thread();
+ usage = false;
+ }
+ if (CMP("callout")) {
+ rtems_bsd_dump_callout();
+ usage = false;
+ }
+ }
+
+ if (usage) {
+ puts(rtems_bsd_usage);
+ }
+
+ return 0;
+}
+
+static rtems_shell_cmd_t rtems_bsd_info_command = {
+ .name = "bsd",
+ .usage = rtems_bsd_usage,
+ .topic = "bsp",
+ .command = rtems_bsd_info,
+ .alias = NULL,
+ .next = NULL
+};
+
+void
+rtems_bsd_shell_initialize(void)
+{
+ rtems_shell_add_cmd_struct(&rtems_bsd_info_command);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-signal.c b/rtems/freebsd/rtems/rtems-bsd-signal.c
new file mode 100644
index 00000000..02294f96
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-signal.c
@@ -0,0 +1,33 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/signalvar.h>
+
+void
+psignal(struct proc *p, int sig)
+{
+ BSD_PANIC("not implemented");
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-support.c b/rtems/freebsd/rtems/rtems-bsd-support.c
new file mode 100644
index 00000000..461078e9
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-support.c
@@ -0,0 +1,75 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/uio.h>
+
+int
+copyout(const void *kaddr, void *udaddr, size_t len)
+{
+ bcopy(kaddr, udaddr, len);
+ return (0);
+}
+
+int
+copyin(const void *udaddr, void *kaddr, size_t len)
+{
+ bcopy(udaddr, kaddr, len);
+ return (0);
+}
+
+int
+copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
+{
+ u_int iovlen;
+
+ *iov = NULL;
+ if (iovcnt > UIO_MAXIOV)
+ return (error);
+ iovlen = iovcnt * sizeof (struct iovec);
+ *iov = malloc(iovlen, M_IOV, M_WAITOK);
+ error = copyin(iovp, *iov, iovlen);
+ if (error) {
+ free(*iov, M_IOV);
+ *iov = NULL;
+ }
+ return (error);
+}
+
+void
+critical_enter(void)
+{
+ _Thread_Disable_dispatch();
+}
+
+void
+critical_exit(void)
+{
+ _Thread_Enable_dispatch();
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-sx.c b/rtems/freebsd/rtems/rtems-bsd-sx.c
new file mode 100644
index 00000000..93232be4
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sx.c
@@ -0,0 +1,335 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/sx.h>
+
+#ifndef INVARIANTS
+#define _sx_assert(sx, what, file, line)
+#endif
+
+static void assert_sx(struct lock_object *lock, int what);
+static void lock_sx(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int owner_sx(struct lock_object *lock, struct thread **owner);
+#endif
+static int unlock_sx(struct lock_object *lock);
+
+struct lock_class lock_class_sx = {
+ .lc_name = "sx",
+ .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
+ .lc_assert = assert_sx,
+#ifdef DDB
+ .lc_ddb_show = db_show_sx,
+#endif
+ .lc_lock = lock_sx,
+ .lc_unlock = unlock_sx,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_sx,
+#endif
+};
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_sx_chain);
+
+void
+assert_sx(struct lock_object *lock, int what)
+{
+ sx_assert((struct sx *)lock, what);
+}
+
+void
+lock_sx(struct lock_object *lock, int how)
+{
+ struct sx *sx;
+
+ sx = (struct sx *)lock;
+ if (how)
+ sx_xlock(sx);
+ else
+ sx_slock(sx);
+}
+
+int
+unlock_sx(struct lock_object *lock)
+{
+ struct sx *sx;
+
+ sx = (struct sx *)lock;
+ sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
+ if (sx_xlocked(sx)) {
+ sx_xunlock(sx);
+ return (1);
+ } else {
+ sx_sunlock(sx);
+ return (0);
+ }
+}
+
+#ifdef KDTRACE_HOOKS
+int
+owner_sx(struct lock_object *lock, struct thread **owner)
+{
+ struct sx *sx = (struct sx *)lock;
+ uintptr_t x = sx->sx_lock;
+
+ *owner = (struct thread *)SX_OWNER(x);
+ return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
+ (*owner != NULL));
+}
+#endif
+
+void
+sx_sysinit(void *arg)
+{
+ struct sx_args *sargs = arg;
+
+ sx_init(sargs->sa_sx, sargs->sa_desc);
+}
+
+void
+sx_init_flags(struct sx *sx, const char *description, int opts)
+{
+ struct lock_class *class;
+ int i;
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+ rtems_attribute attr = RTEMS_LOCAL | RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE;
+
+ if ((opts & SX_RECURSE) != 0) {
+ /* FIXME */
+ }
+
+ class = &lock_class_sx;
+
+ /* Check for double-init and zero object. */
+ KASSERT(!lock_initalized(&sx->lock_object), ("lock \"%s\" %p already initialized", name, sx->lock_object));
+
+ /* Look up lock class to find its index. */
+ for (i = 0; i < LOCK_CLASS_MAX; i++)
+ {
+ if (lock_classes[i] == class)
+ {
+ sx->lock_object.lo_flags = i << LO_CLASSSHIFT;
+ break;
+ }
+ }
+ KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( '_', 'S', 'X', ' '),
+ 1,
+ attr,
+ 0,
+ &id
+ );
+ BSD_ASSERT_SC(sc);
+
+ sx->lock_object.lo_name = description;
+ sx->lock_object.lo_flags |= LO_INITIALIZED;
+ sx->lock_object.lo_id = id;
+
+ rtems_chain_append(&rtems_bsd_sx_chain, &sx->lock_object.lo_node);
+}
+
+void
+sx_destroy(struct sx *sx)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_delete( sx->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+
+ rtems_chain_extract(&sx->lock_object.lo_node);
+
+ sx->lock_object.lo_id = 0;
+ sx->lock_object.lo_flags &= ~LO_INITIALIZED;
+}
+
+int
+_sx_xlock(struct sx *sx, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ BSD_ASSERT((opts & SX_INTERRUPTIBLE) == 0);
+
+ sc = rtems_semaphore_obtain( sx->lock_object.lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+}
+
+int
+_sx_try_xlock(struct sx *sx, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_obtain( sx->lock_object.lo_id, RTEMS_NO_WAIT, 0);
+ if (sc == RTEMS_SUCCESSFUL) {
+ return 1;
+ } else if (sc == RTEMS_UNSATISFIED) {
+ return 0;
+ } else {
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+ }
+}
+
+void
+_sx_xunlock(struct sx *sx, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_release( sx->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+}
+
+int
+_sx_try_upgrade(struct sx *sx, const char *file, int line)
+{
+ return 1;
+}
+
+void
+_sx_downgrade(struct sx *sx, const char *file, int line)
+{
+ /* Do nothing */
+}
+
+#ifdef INVARIANT_SUPPORT
+#ifndef INVARIANTS
+#undef _sx_assert
+#endif
+
+/*
+ * In the non-WITNESS case, sx_assert() can only detect that at least
+ * *some* thread owns an slock, but it cannot guarantee that *this*
+ * thread owns an slock.
+ */
+void
+_sx_assert(struct sx *sx, int what, const char *file, int line)
+{
+#ifndef WITNESS
+ int slocked = 0;
+#endif
+
+ if (panicstr != NULL)
+ return;
+ switch (what) {
+ case SA_SLOCKED:
+ case SA_SLOCKED | SA_NOTRECURSED:
+ case SA_SLOCKED | SA_RECURSED:
+#ifndef WITNESS
+ slocked = 1;
+ /* FALLTHROUGH */
+#endif
+ case SA_LOCKED:
+ case SA_LOCKED | SA_NOTRECURSED:
+ case SA_LOCKED | SA_RECURSED:
+#ifdef WITNESS
+ witness_assert(&sx->lock_object, what, file, line);
+#else
+ /*
+ * If some other thread has an exclusive lock or we
+ * have one and are asserting a shared lock, fail.
+ * Also, if no one has a lock at all, fail.
+ */
+ if (sx->sx_lock == SX_LOCK_UNLOCKED ||
+ (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
+ sx_xholder(sx) != curthread)))
+ panic("Lock %s not %slocked @ %s:%d\n",
+ sx->lock_object.lo_name, slocked ? "share " : "",
+ file, line);
+
+ if (!(sx->sx_lock & SX_LOCK_SHARED)) {
+ if (sx_recursed(sx)) {
+ if (what & SA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file,
+ line);
+ } else if (what & SA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ }
+#endif
+ break;
+ case SA_XLOCKED:
+ case SA_XLOCKED | SA_NOTRECURSED:
+ case SA_XLOCKED | SA_RECURSED:
+ if (sx_xholder(sx) != curthread)
+ panic("Lock %s not exclusively locked @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ if (sx_recursed(sx)) {
+ if (what & SA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ } else if (what & SA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ break;
+ case SA_UNLOCKED:
+#ifdef WITNESS
+ witness_assert(&sx->lock_object, what, file, line);
+#else
+ /*
+ * If we hold an exclusve lock fail. We can't
+ * reliably check to see if we hold a shared lock or
+ * not.
+ */
+ if (sx_xholder(sx) == curthread)
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+#endif
+ break;
+ default:
+ panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
+ line);
+ }
+}
+#endif /* INVARIANT_SUPPORT */
+
+int
+sx_xlocked(struct sx *sx)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(sx->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int xlocked = sema->Core_control.mutex.holder_id == rtems_task_self();
+
+ _Thread_Enable_dispatch();
+
+ return xlocked;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-synch.c b/rtems/freebsd/rtems/rtems-bsd-synch.c
new file mode 100644
index 00000000..2102c1a7
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-synch.c
@@ -0,0 +1,274 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/machine/pcpu.h>
+
+#define STATES_WAITING_FOR_SLEEP 0x40000
+
+static int pause_wchan;
+
+typedef struct
+{
+ Chain_Node node;
+ void *ident;
+ Thread_queue_Control queue;
+}sleep_queue_control_t;
+
+sleep_queue_control_t sleep_queue[BSD_MAXIMUM_SLEEP_QUEUES]; //this memory allocation could use _Workspace_Allocate once inside RTEMS tree
+Chain_Control sleep_queue_inactive_nodes; //chain of inactive nodes
+Chain_Control sleep_queue_active_nodes; //chain of active nodes
+
+void
+sleepinit(void)
+{
+ int ii;
+
+ /* initialize the sleep queue */
+ for( ii = 0; ii < BSD_MAXIMUM_SLEEP_QUEUES; ii++ )
+ {
+ sleep_queue[ii].ident = NULL;
+ /*
+ * Initialize the queue we use to block for signals
+ */
+ _Thread_queue_Initialize(
+ &sleep_queue[ii].queue,
+ THREAD_QUEUE_DISCIPLINE_FIFO,
+ STATES_WAITING_FOR_SLEEP | STATES_INTERRUPTIBLE_BY_SIGNAL,
+ EAGAIN
+ );
+ }
+ //initialize active chain
+ _Chain_Initialize_empty( &sleep_queue_active_nodes );
+ //initialize inactive chain
+ _Chain_Initialize( &sleep_queue_inactive_nodes, sleep_queue, BSD_MAXIMUM_SLEEP_QUEUES, sizeof( sleep_queue_control_t ));
+}
+
+sleep_queue_control_t*
+sleep_queue_lookup(void *ident)
+{
+ int ii;
+
+ /* initialize the sleep queue */
+ for( ii = 0; ii < BSD_MAXIMUM_SLEEP_QUEUES; ii++ )
+ {
+ if( sleep_queue[ii].ident == ident )
+ {
+ return &sleep_queue[ii];
+ }
+ }
+ return NULL;
+}
+
+sleep_queue_control_t*
+sleep_queue_get(void *ident)
+{
+ sleep_queue_control_t *sq;
+
+ sq = sleep_queue_lookup( ident );
+ if (sq == NULL)
+ {
+ KASSERT(!_Chain_Is_empty( &inactive_nodes ), ("sleep_queue_get"));
+ //get a control from the inactive chain
+ sq = ( sleep_queue_control_t * )_Chain_Get( &sleep_queue_inactive_nodes );
+ sq->ident = ident;
+ _Chain_Append( &sleep_queue_active_nodes, &sq->node );
+ }
+ return sq;
+}
+
+/*
+ * Block the current thread until it is awakened from its sleep queue
+ * or it times out while waiting.
+ */
+int
+sleep_queue_timedwait(void *wchan, int pri, int timeout, int catch)
+{
+ sleep_queue_control_t *sq;
+ Thread_Control *executing;
+ ISR_Level level;
+
+ _Thread_Disable_dispatch();
+
+ sq = sleep_queue_get( wchan );
+
+ executing = _Thread_Executing;
+ if( timeout )
+ {
+ executing->Wait.return_code = EWOULDBLOCK;
+ }
+ else
+ {
+ executing->Wait.return_code = 0;
+ }
+ _ISR_Disable( level );
+ _Thread_queue_Enter_critical_section( &sq->queue );
+ if( catch )
+ {
+ sq->queue.state |= STATES_INTERRUPTIBLE_BY_SIGNAL;
+ }
+ else
+ {
+ sq->queue.state &= ~STATES_INTERRUPTIBLE_BY_SIGNAL;
+ }
+ executing->Wait.queue = &sq->queue;
+ _ISR_Enable( level );
+
+ _Thread_queue_Enqueue( &sq->queue, timeout );
+ _Thread_Enable_dispatch();
+ return _Thread_Executing->Wait.return_code;
+}
+
+/*
+ * General sleep call. Suspends the current thread until a wakeup is
+ * performed on the specified identifier. The thread will then be made
+ * runnable with the specified priority. Sleeps at most timo/hz seconds
+ * (0 means no timeout). If pri includes PCATCH flag, signals are checked
+ * before and after sleeping, else signals are not checked. Returns 0 if
+ * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
+ * signal needs to be delivered, ERESTART is returned if the current system
+ * call should be restarted if possible, and EINTR is returned if the system
+ * call should be interrupted by the signal (return EINTR).
+ *
+ * The lock argument is unlocked before the caller is suspended, and
+ * re-locked before _sleep() returns. If priority includes the PDROP
+ * flag the lock is not re-locked before returning.
+ */
+int
+_sleep(void *ident, struct lock_object *lock, int priority, const char *wmesg, int timo)
+{
+ struct thread *td;
+ struct proc *p;
+ struct lock_class *class;
+ int catch, flags, lock_state, pri, rval;
+
+ td = curthread;
+ p = td->td_proc;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(1, 0);
+#endif
+ KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
+ ("sleeping without a lock"));
+ KASSERT(p != NULL, ("msleep1"));
+ KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+ if (priority & PDROP)
+ KASSERT(lock != NULL && lock != &Giant.lock_object,
+ ("PDROP requires a non-Giant lock"));
+ if (lock != NULL)
+ class = LOCK_CLASS(lock);
+ else
+ class = NULL;
+
+ if (cold) {
+ /*
+ * During autoconfiguration, just return;
+ * don't run any other threads or panic below,
+ * in case this is the idle thread and already asleep.
+ * XXX: this used to do "s = splhigh(); splx(safepri);
+ * splx(s);" to give interrupts a chance, but there is
+ * no way to give interrupts a chance now.
+ */
+ if (lock != NULL && priority & PDROP)
+ class->lc_unlock(lock);
+ return (0);
+ }
+ catch = priority & PCATCH;
+ pri = priority & PRIMASK;
+
+ CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
+ td->td_tid, p->p_pid, td->td_name, wmesg, ident);
+
+ if (lock == &Giant.lock_object)
+ mtx_assert(&Giant, MA_OWNED);
+ DROP_GIANT();
+ if (lock != NULL && lock != &Giant.lock_object &&
+ !(class->lc_flags & LC_SLEEPABLE)) {
+ lock_state = class->lc_unlock(lock);
+ } else
+ /* GCC needs to follow the Yellow Brick Road */
+ lock_state = -1;
+
+ if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
+ lock_state = class->lc_unlock(lock);
+ }
+
+ rval = sleep_queue_timedwait(ident, pri, timo, catch);
+
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(0, 0);
+#endif
+ PICKUP_GIANT();
+ if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
+ class->lc_lock(lock, lock_state);
+ }
+ return (rval);
+}
+
+/*
+ * pause() is like tsleep() except that the intention is to not be
+ * explicitly woken up by another thread. Instead, the current thread
+ * simply wishes to sleep until the timeout expires. It is
+ * implemented using a dummy wait channel.
+ */
+int
+pause(const char *wmesg, int timo)
+{
+
+ KASSERT(timo != 0, ("pause: timeout required"));
+ return (tsleep(&pause_wchan, 0, wmesg, timo));
+}
+
+/*
+ * Make all threads sleeping on the specified identifier runnable.
+ */
+void
+wakeup(void *ident)
+{
+ sleep_queue_control_t *sq;
+ Thread_Control *the_thread;
+
+ sq = sleep_queue_lookup( ident );
+ if (sq == NULL)
+ {
+ return (0);
+ }
+
+ while ( (the_thread = _Thread_queue_Dequeue(&sq->queue)) )
+ {
+ }
+ return 0;
+}
+
diff --git a/rtems/freebsd/rtems/rtems-bsd-syscalls.c b/rtems/freebsd/rtems/rtems-bsd-syscalls.c
new file mode 100644
index 00000000..0a15bc4b
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-syscalls.c
@@ -0,0 +1,1487 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/machine/pcpu.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/libio_.h>
+#include <rtems/seterr.h>
+
+static const rtems_filesystem_file_handlers_r socket_handlers;
+extern int killinfo( pid_t pid, int sig, const union sigval *value );
+
+/*
+ * Convert an RTEMS file descriptor to a BSD socket pointer.
+ */
+
+struct socket *rtems_bsdnet_fdToSocket(
+ int fd
+)
+{
+ rtems_libio_t *iop;
+
+ /* same as rtems_libio_check_fd(_fd) but different return */
+ if ((uint32_t)fd >= rtems_libio_number_iops) {
+ errno = EBADF;
+ return NULL;
+ }
+ iop = &rtems_libio_iops[fd];
+
+ /* same as rtems_libio_check_is_open(iop) but different return */
+ if ((iop->flags & LIBIO_FLAGS_OPEN) == 0) {
+ errno = EBADF;
+ return NULL;
+ }
+
+ if (iop->data1 == NULL)
+ errno = EBADF;
+ return iop->data1;
+}
+
+/*
+ * Create an RTEMS file descriptor for a socket
+ */
+
+int rtems_bsdnet_makeFdForSocket(
+ void *so,
+ const rtems_filesystem_file_handlers_r *h
+)
+{
+ rtems_libio_t *iop;
+ int fd;
+
+ iop = rtems_libio_allocate();
+ if (iop == 0)
+ rtems_set_errno_and_return_minus_one( ENFILE );
+
+ fd = iop - rtems_libio_iops;
+ iop->flags |= LIBIO_FLAGS_WRITE | LIBIO_FLAGS_READ;
+ iop->data0 = fd;
+ iop->data1 = so;
+ iop->pathinfo.handlers = h;
+ iop->pathinfo.ops = &rtems_filesystem_operations_default;
+ return fd;
+}
+
+/*
+ * The following code is based on FreeBSD uipc_syscalls.c
+ */
+
+int
+sockargs(mp, buf, buflen, type)
+ struct mbuf **mp;
+ caddr_t buf;
+ int buflen, type;
+{
+ struct sockaddr *sa;
+ struct mbuf *m;
+ int error;
+
+ if ((u_int)buflen > MLEN) {
+#ifdef COMPAT_OLDSOCK
+ if (type == MT_SONAME && (u_int)buflen <= 112)
+ buflen = MLEN; /* unix domain compat. hack */
+ else
+#endif
+ if ((u_int)buflen > MCLBYTES)
+ return (EINVAL);
+ }
+ m = m_get(M_WAIT, type);
+ if ((u_int)buflen > MLEN)
+ MCLGET(m, M_WAIT);
+ m->m_len = buflen;
+ error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
+ if (error)
+ (void) m_free(m);
+ else {
+ *mp = m;
+ if (type == MT_SONAME) {
+ sa = mtod(m, struct sockaddr *);
+
+#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
+ if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
+ sa->sa_family = sa->sa_len;
+#endif
+ sa->sa_len = buflen;
+ }
+ }
+ return (error);
+}
+
+int
+getsockaddr(namp, uaddr, len)
+ struct sockaddr **namp;
+ caddr_t uaddr;
+ size_t len;
+{
+ struct sockaddr *sa;
+ int error;
+
+ if (len > SOCK_MAXADDRLEN)
+ return (ENAMETOOLONG);
+ if (len < offsetof(struct sockaddr, sa_data[0]))
+ return (EINVAL);
+ sa = malloc(len, M_SONAME, M_WAITOK);
+ error = copyin(uaddr, sa, len);
+ if (error) {
+ free(sa, M_SONAME);
+ } else {
+#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
+ if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
+ sa->sa_family = sa->sa_len;
+#endif
+ sa->sa_len = len;
+ *namp = sa;
+ }
+ return (error);
+}
+
+/*
+ *********************************************************************
+ * BSD-style entry points *
+ *********************************************************************
+ */
+int
+socket (int domain, int type, int protocol)
+{
+ struct thread *td;
+ struct socket *so;
+ int fd, error;
+
+ td = curthread;
+#ifdef MAC
+ error = mac_socket_check_create(td->td_ucred, domain, type, protocol);
+ if (error == 0 )
+ {
+#endif
+ /* An extra reference on `fp' has been held for us by falloc(). */
+ error = socreate(domain, &so, type, protocol, td->td_ucred, td);
+ if (error == 0) {
+ fd = rtems_bsdnet_makeFdForSocket (so, &socket_handlers);
+ if (fd < 0)
+ {
+ soclose (so);
+ error = EBADF;
+ }
+ }
+#ifdef MAC
+ }
+#endif
+ if( error == 0 )
+ {
+ return fd;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_bind(td, fd, sa)
+ struct thread *td;
+ int fd;
+ struct sockaddr *sa;
+{
+ struct socket *so;
+ int error;
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(sa);
+#endif
+#ifdef MAC
+ error = mac_socket_check_bind(td->td_ucred, so, sa);
+ if (error == 0)
+#endif
+ error = sobind(so, sa, td);
+ return (error);
+}
+
+int
+bind (int s, struct sockaddr *name, int namelen)
+{
+ struct thread *td;
+ struct sockaddr *sa;
+ int error;
+
+ error = getsockaddr(&sa, name, namelen);
+ if( error == 0 )
+ {
+ td = curthread;
+ error = kern_bind(td, s, sa);
+ free(sa, M_SONAME);
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_connect(td, fd, sa)
+ struct thread *td;
+ int fd;
+ struct sockaddr *sa;
+{
+ struct socket *so;
+ int error;
+ int interrupted = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+
+ if (so->so_state & SS_ISCONNECTING) {
+ error = EALREADY;
+ goto done1;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(sa);
+#endif
+#ifdef MAC
+ error = mac_socket_check_connect(td->td_ucred, so, sa);
+ if (error)
+ goto bad;
+#endif
+ error = soconnect(so, sa, td);
+ if (error)
+ goto bad;
+ if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
+ error = EINPROGRESS;
+ goto done1;
+ }
+ SOCK_LOCK(so);
+ while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
+ error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
+ "connec", 0);
+ if (error) {
+ if (error == EINTR || error == ERESTART)
+ interrupted = 1;
+ break;
+ }
+ }
+ if (error == 0) {
+ error = so->so_error;
+ so->so_error = 0;
+ }
+ SOCK_UNLOCK(so);
+bad:
+ if (!interrupted)
+ so->so_state &= ~SS_ISCONNECTING;
+ if (error == ERESTART)
+ error = EINTR;
+done1:
+ return (error);
+}
+
+int
+connect (int s, struct sockaddr *name, int namelen)
+{
+ int error;
+ struct sockaddr *sa;
+ struct thread *td;
+
+ error = getsockaddr(&sa, name, namelen);
+ if (error == 0)
+ {
+ td = curthread;
+ error = kern_connect(td, s, sa);
+ free(sa, M_SONAME);
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+listen (int s, int backlog)
+{
+ struct thread *td;
+ struct socket *so;
+ int error = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ }
+ if( error == 0 )
+ {
+ td = curthread;
+#ifdef MAC
+ error = mac_socket_check_listen(td->td_ucred, so);
+ if (error == 0) {
+#endif
+ CURVNET_SET(so->so_vnet);
+ error = solisten(so, backlog, td);
+ CURVNET_RESTORE();
+#ifdef MAC
+ }
+#endif
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_accept(struct thread *td, int s, struct sockaddr **name, socklen_t *namelen)
+{
+ struct sockaddr *sa = NULL;
+ int error;
+ struct socket *head, *so;
+ int fd;
+ u_int fflag;
+ pid_t pgid;
+ int tmp;
+
+ if (name) {
+ *name = NULL;
+ if (*namelen < 0)
+ return (EINVAL);
+ }
+
+ if ((head = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+ if ((head->so_options & SO_ACCEPTCONN) == 0) {
+ error = EINVAL;
+ goto done;
+ }
+#ifdef MAC
+ error = mac_socket_check_accept(td->td_ucred, head);
+ if (error != 0)
+ goto done;
+#endif
+ ACCEPT_LOCK();
+ if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
+ ACCEPT_UNLOCK();
+ error = EWOULDBLOCK;
+ goto noconnection;
+ }
+ while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
+ if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ head->so_error = ECONNABORTED;
+ break;
+ }
+ error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
+ "accept", 0);
+ if (error) {
+ ACCEPT_UNLOCK();
+ goto noconnection;
+ }
+ }
+ if (head->so_error) {
+ error = head->so_error;
+ head->so_error = 0;
+ ACCEPT_UNLOCK();
+ goto noconnection;
+ }
+ so = TAILQ_FIRST(&head->so_comp);
+ KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
+ KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
+
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so); /* soref() and so_state update */
+ soref(so); /* file descriptor reference */
+
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+
+ fd = rtems_bsdnet_makeFdForSocket (so, &socket_handlers);
+ if (fd < 0) {
+ TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
+ head->so_qlen++;
+ wakeup(head);
+ error = EBADF;
+ return (error);
+ }
+
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+ td->td_retval[0] = fd;
+
+ sa = 0;
+ CURVNET_SET(so->so_vnet);
+ error = soaccept(so, &sa);
+ CURVNET_RESTORE();
+ if (error) {
+ /*
+ * return a namelen of zero for older code which might
+ * ignore the return value from accept.
+ */
+ if (name)
+ *namelen = 0;
+ goto noconnection;
+ }
+ if (sa == NULL) {
+ if (name)
+ *namelen = 0;
+ goto done;
+ }
+ if (name) {
+ /* check sa_len before it is destroyed */
+ if (*namelen > sa->sa_len)
+ *namelen = sa->sa_len;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(sa);
+#endif
+ *name = sa;
+ sa = NULL;
+ }
+noconnection:
+ if (sa)
+ free(sa, M_SONAME);
+
+done:
+ return (error);
+}
+
+static int
+accept1(td, s, _name, _namelen, compat)
+ struct thread *td;
+ int s;
+ struct sockaddr *_name;
+ int *_namelen;
+ int compat;
+{
+ struct sockaddr *name;
+ socklen_t namelen;
+ int error;
+
+ if (_name == NULL)
+ return (kern_accept(td, s, NULL, NULL));
+
+ error = copyin(_namelen, &namelen, sizeof (namelen));
+ if (error)
+ return (error);
+
+ error = kern_accept(td, s, &name, &namelen);
+
+ /*
+ * return a namelen of zero for older code which might
+ * ignore the return value from accept.
+ */
+ if (error) {
+ (void) copyout(&namelen,
+ _namelen, sizeof(*_namelen));
+ return (error);
+ }
+
+ if (error == 0 && name != NULL) {
+#ifdef COMPAT_OLDSOCK
+ if (compat)
+ ((struct osockaddr *)name)->sa_family =
+ name->sa_family;
+#endif
+ error = copyout(name, _name, namelen);
+ }
+ if (error == 0)
+ error = copyout(&namelen, _namelen,
+ sizeof(namelen));
+ free(name, M_SONAME);
+ return (error);
+}
+
+int
+accept (int s, struct sockaddr *name, int *namelen)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = accept1(td, s, name, namelen, 0);
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ * Shutdown routine
+ */
+
+int
+shutdown (int s, int how)
+{
+ struct socket *so;
+ int error = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ }
+ if( error == 0 )
+ {
+ error = soshutdown(so, how);
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_sendit(td, s, mp, flags, control, segflg)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ int flags;
+ struct mbuf *control;
+ enum uio_seg segflg;
+{
+ struct uio auio;
+ struct iovec *iov;
+ struct socket *so;
+ int i;
+ int len, error;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+
+#ifdef MAC
+ if (mp->msg_name != NULL) {
+ error = mac_socket_check_connect(td->td_ucred, so,
+ mp->msg_name);
+ if (error)
+ goto bad;
+ }
+ error = mac_socket_check_send(td->td_ucred, so);
+ if (error)
+ goto bad;
+#endif
+
+ auio.uio_iov = mp->msg_iov;
+ auio.uio_iovcnt = mp->msg_iovlen;
+ auio.uio_segflg = segflg;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ iov = mp->msg_iov;
+ for (i = 0; i < mp->msg_iovlen; i++, iov++) {
+ if ((auio.uio_resid += iov->iov_len) < 0) {
+ error = EINVAL;
+ goto bad;
+ }
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_GENIO))
+ ktruio = cloneuio(&auio);
+#endif
+ len = auio.uio_resid;
+ error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
+ if (error) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ /* Generation of SIGPIPE can be controlled per socket */
+ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
+ !(flags & MSG_NOSIGNAL)) {
+ PROC_LOCK(td->td_proc);
+ killinfo(td->td_proc->p_pid, SIGPIPE, NULL);
+ PROC_UNLOCK(td->td_proc);
+ }
+ }
+ if (error == 0)
+ td->td_retval[0] = len - auio.uio_resid;
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = td->td_retval[0];
+ ktrgenio(s, UIO_WRITE, ktruio, error);
+ }
+#endif
+bad:
+ return (error);
+}
+
+static int
+sendit(td, s, mp, flags)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ int flags;
+{
+ struct mbuf *control;
+ struct sockaddr *to;
+ int error;
+
+ if (mp->msg_name != NULL) {
+ error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
+ if (error) {
+ to = NULL;
+ goto bad;
+ }
+ mp->msg_name = to;
+ } else {
+ to = NULL;
+ }
+
+ if (mp->msg_control) {
+ if (mp->msg_controllen < sizeof(struct cmsghdr)
+#ifdef COMPAT_OLDSOCK
+ && mp->msg_flags != MSG_COMPAT
+#endif
+ ) {
+ error = EINVAL;
+ goto bad;
+ }
+ error = sockargs(&control, mp->msg_control,
+ mp->msg_controllen, MT_CONTROL);
+ if (error)
+ goto bad;
+#ifdef COMPAT_OLDSOCK
+ if (mp->msg_flags == MSG_COMPAT) {
+ struct cmsghdr *cm;
+
+ M_PREPEND(control, sizeof(*cm), M_WAIT);
+ cm = mtod(control, struct cmsghdr *);
+ cm->cmsg_len = control->m_len;
+ cm->cmsg_level = SOL_SOCKET;
+ cm->cmsg_type = SCM_RIGHTS;
+ }
+#endif
+ } else {
+ control = NULL;
+ }
+
+ error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
+
+bad:
+ if (to)
+ free(to, M_SONAME);
+ return (error);
+}
+
+/*
+ * All `transmit' operations end up calling this routine.
+ */
+ssize_t
+sendmsg (int s, const struct msghdr *mp, int flags)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec *iov;
+ int error;
+
+ td = curthread;
+ error = copyin(mp, &msg, sizeof (msg));
+ if (error)
+ return (error);
+ error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
+ if (error)
+ return (error);
+ msg.msg_iov = iov;
+#ifdef COMPAT_OLDSOCK
+ msg.msg_flags = 0;
+#endif
+ error = sendit(td, s, &msg, flags);
+ free(iov, M_IOV);
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ * Send a message to a host
+ */
+ssize_t
+sendto (int s, const void *buf, size_t len, int flags, const struct sockaddr *to, int tolen)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec aiov;
+ int error;
+
+ td = curthread;
+ msg.msg_name = to;
+ msg.msg_namelen = tolen;
+ msg.msg_iov = &aiov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = 0;
+#ifdef COMPAT_OLDSOCK
+ msg.msg_flags = 0;
+#endif
+ aiov.iov_base = buf;
+ aiov.iov_len = len;
+ error = sendit(td, s, &msg, flags);
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+ssize_t
+send( int s, const void *msg, size_t len, int flags )
+{
+ return (sendto(s, msg, len, flags, NULL, 0));
+}
+
+int
+kern_recvit(td, s, mp, fromseg, controlp)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ enum uio_seg fromseg;
+ struct mbuf **controlp;
+{
+ struct uio auio;
+ struct iovec *iov;
+ int i;
+ socklen_t len;
+ int error;
+ struct mbuf *m, *control = 0;
+ caddr_t ctlbuf;
+ struct socket *so;
+ struct sockaddr *fromsa = 0;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+
+ if(controlp != NULL)
+ *controlp = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+
+#ifdef MAC
+ error = mac_socket_check_receive(td->td_ucred, so);
+ if (error) {
+ return (error);
+ }
+#endif
+
+ auio.uio_iov = mp->msg_iov;
+ auio.uio_iovcnt = mp->msg_iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ iov = mp->msg_iov;
+ for (i = 0; i < mp->msg_iovlen; i++, iov++) {
+ if ((auio.uio_resid += iov->iov_len) < 0) {
+ return (EINVAL);
+ }
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_GENIO))
+ ktruio = cloneuio(&auio);
+#endif
+ len = auio.uio_resid;
+ CURVNET_SET(so->so_vnet);
+ error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
+ (mp->msg_control || controlp) ? &control : (struct mbuf **)0,
+ &mp->msg_flags);
+ CURVNET_RESTORE();
+ if (error) {
+ if (auio.uio_resid != (int)len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ }
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = (int)len - auio.uio_resid;
+ ktrgenio(s, UIO_READ, ktruio, error);
+ }
+#endif
+ if (error)
+ goto out;
+ td->td_retval[0] = (int)len - auio.uio_resid;
+ if (mp->msg_name) {
+ len = mp->msg_namelen;
+ if (len <= 0 || fromsa == 0)
+ len = 0;
+ else {
+ /* save sa_len before it is destroyed by MSG_COMPAT */
+ len = MIN(len, fromsa->sa_len);
+#ifdef COMPAT_OLDSOCK
+ if (mp->msg_flags & MSG_COMPAT)
+ ((struct osockaddr *)fromsa)->sa_family =
+ fromsa->sa_family;
+#endif
+ if (fromseg == UIO_USERSPACE) {
+ error = copyout(fromsa, mp->msg_name,
+ (unsigned)len);
+ if (error)
+ goto out;
+ } else
+ bcopy(fromsa, mp->msg_name, len);
+ }
+ mp->msg_namelen = len;
+ }
+ if (mp->msg_control && controlp == NULL) {
+#ifdef COMPAT_OLDSOCK
+ /*
+ * We assume that old recvmsg calls won't receive access
+ * rights and other control info, esp. as control info
+ * is always optional and those options didn't exist in 4.3.
+ * If we receive rights, trim the cmsghdr; anything else
+ * is tossed.
+ */
+ if (control && mp->msg_flags & MSG_COMPAT) {
+ if (mtod(control, struct cmsghdr *)->cmsg_level !=
+ SOL_SOCKET ||
+ mtod(control, struct cmsghdr *)->cmsg_type !=
+ SCM_RIGHTS) {
+ mp->msg_controllen = 0;
+ goto out;
+ }
+ control->m_len -= sizeof (struct cmsghdr);
+ control->m_data += sizeof (struct cmsghdr);
+ }
+#endif
+ len = mp->msg_controllen;
+ m = control;
+ mp->msg_controllen = 0;
+ ctlbuf = mp->msg_control;
+
+ while (m && len > 0) {
+ unsigned int tocopy;
+
+ if (len >= m->m_len)
+ tocopy = m->m_len;
+ else {
+ mp->msg_flags |= MSG_CTRUNC;
+ tocopy = len;
+ }
+
+ if ((error = copyout(mtod(m, caddr_t),
+ ctlbuf, tocopy)) != 0)
+ goto out;
+
+ ctlbuf += tocopy;
+ len -= tocopy;
+ m = m->m_next;
+ }
+ mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
+ }
+out:
+#ifdef KTRACE
+ if (fromsa && KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(fromsa);
+#endif
+ if (fromsa)
+ free(fromsa, M_SONAME);
+
+ if (error == 0 && controlp != NULL)
+ *controlp = control;
+ else if (control)
+ m_freem(control);
+
+ return (error);
+}
+
+static int
+recvit(td, s, mp, namelenp)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ void *namelenp;
+{
+ int error;
+
+ error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
+ if (error)
+ return (error);
+ if (namelenp) {
+ error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
+#ifdef COMPAT_OLDSOCK
+ if (mp->msg_flags & MSG_COMPAT)
+ error = 0; /* old recvfrom didn't check */
+#endif
+ }
+ return (error);
+}
+
+/*
+ * All `receive' operations end up calling this routine.
+ */
+ssize_t
+recvmsg (int s, struct msghdr *mp, int flags)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec *uiov, *iov;
+ int error;
+
+ td = curthread;
+ error = copyin(mp, &msg, sizeof (msg));
+ if (error == 0 )
+ {
+ error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
+ if (error == 0)
+ {
+ msg.msg_flags = flags;
+ #ifdef COMPAT_OLDSOCK
+ msg.msg_flags &= ~MSG_COMPAT;
+ #endif
+ uiov = msg.msg_iov;
+ msg.msg_iov = iov;
+ error = recvit(td, s, &msg, NULL);
+ if (error == 0) {
+ msg.msg_iov = uiov;
+ error = copyout(&msg, mp, sizeof(msg));
+ }
+ free(iov, M_IOV);
+ }
+ }
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ * Receive a message from a host
+ */
+ssize_t
+recvfrom (int s, void *buf, size_t len, int flags, const struct sockaddr *from, socklen_t *fromlenaddr)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec aiov;
+ int error;
+
+ td = curthread;
+ if (fromlenaddr) {
+ error = copyin(fromlenaddr,
+ &msg.msg_namelen, sizeof (msg.msg_namelen));
+ if (error)
+ goto done2;
+ } else {
+ msg.msg_namelen = 0;
+ }
+ msg.msg_name = from;
+ msg.msg_iov = &aiov;
+ msg.msg_iovlen = 1;
+ aiov.iov_base = buf;
+ aiov.iov_len = len;
+ msg.msg_control = 0;
+ msg.msg_flags = flags;
+ error = recvit(td, s, &msg, fromlenaddr);
+done2:
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+ssize_t
+recv( int s, void *buf, size_t len, int flags )
+{
+ return (recvfrom(s, buf, len, flags, NULL, 0));
+}
+
+int
+kern_setsockopt(td, s, level, name, val, valseg, valsize)
+ struct thread *td;
+ int s;
+ int level;
+ int name;
+ void *val;
+ enum uio_seg valseg;
+ socklen_t valsize;
+{
+ int error;
+ struct socket *so;
+ struct sockopt sopt;
+
+ if (val == NULL && valsize != 0)
+ return (EFAULT);
+ if ((int)valsize < 0)
+ return (EINVAL);
+
+ sopt.sopt_dir = SOPT_SET;
+ sopt.sopt_level = level;
+ sopt.sopt_name = name;
+ sopt.sopt_val = val;
+ sopt.sopt_valsize = valsize;
+ switch (valseg) {
+ case UIO_USERSPACE:
+ sopt.sopt_td = td;
+ break;
+ case UIO_SYSSPACE:
+ sopt.sopt_td = NULL;
+ break;
+ default:
+ panic("kern_setsockopt called with bad valseg");
+ }
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ CURVNET_SET(so->so_vnet);
+ error = sosetopt(so, &sopt);
+ CURVNET_RESTORE();
+ return(error);
+}
+
+int
+setsockopt (int s, int level, int name, const void *val, socklen_t valsize)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = kern_setsockopt(td, s, level, name, val, UIO_USERSPACE, valsize);
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_getsockopt(td, s, level, name, val, valseg, valsize)
+ struct thread *td;
+ int s;
+ int level;
+ int name;
+ void *val;
+ enum uio_seg valseg;
+ socklen_t *valsize;
+{
+ int error;
+ struct socket *so;
+ struct sockopt sopt;
+
+ if (val == NULL)
+ *valsize = 0;
+ if ((int)*valsize < 0)
+ return (EINVAL);
+
+ sopt.sopt_dir = SOPT_GET;
+ sopt.sopt_level = level;
+ sopt.sopt_name = name;
+ sopt.sopt_val = val;
+ sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
+ switch (valseg) {
+ case UIO_USERSPACE:
+ sopt.sopt_td = td;
+ break;
+ case UIO_SYSSPACE:
+ sopt.sopt_td = NULL;
+ break;
+ default:
+ panic("kern_getsockopt called with bad valseg");
+ }
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ CURVNET_SET(so->so_vnet);
+ error = sogetopt(so, &sopt);
+ CURVNET_RESTORE();
+ *valsize = sopt.sopt_valsize;
+ return (error);
+}
+
+int
+getsockopt (int s, int level, int name, void *val, socklen_t *avalsize)
+{
+ struct thread *td;
+ socklen_t valsize;
+ int error = 0;
+
+ td = curthread;
+ if (val) {
+ error = copyin(avalsize, &valsize, sizeof (valsize));
+ }
+
+ if( error == 0 )
+ {
+ error = kern_getsockopt(td, s, level, name, val, UIO_USERSPACE, &valsize);
+
+ if (error == 0)
+ error = copyout(&valsize, avalsize, sizeof (valsize));
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
+ socklen_t *alen)
+{
+ struct socket *so;
+ socklen_t len;
+ int error;
+
+ if (*alen < 0)
+ return (EINVAL);
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
+ error = ENOTCONN;
+ goto done;
+ }
+ *sa = NULL;
+ CURVNET_SET(so->so_vnet);
+ error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
+ CURVNET_RESTORE();
+ if (error)
+ goto bad;
+ if (*sa == NULL)
+ len = 0;
+ else
+ len = MIN(*alen, (*sa)->sa_len);
+ *alen = len;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(*sa);
+#endif
+bad:
+ if (error && *sa) {
+ free(*sa, M_SONAME);
+ *sa = NULL;
+ }
+done:
+ return (error);
+}
+
+static int
+getpeername1(td, fdes, asa, alen, compat)
+ struct thread *td;
+ int fdes;
+ struct sockaddr * asa;
+ socklen_t * alen;
+ int compat;
+{
+ struct sockaddr *sa;
+ socklen_t len;
+ int error;
+
+ error = copyin(alen, &len, sizeof (len));
+ if (error)
+ return (error);
+
+ error = kern_getpeername(td, fdes, &sa, &len);
+ if (error)
+ return (error);
+
+ if (len != 0) {
+#ifdef COMPAT_OLDSOCK
+ if (compat)
+ ((struct osockaddr *)sa)->sa_family = sa->sa_family;
+#endif
+ error = copyout(sa, asa, (u_int)len);
+ }
+ free(sa, M_SONAME);
+ if (error == 0)
+ error = copyout(&len, alen, sizeof(len));
+ return (error);
+}
+
+int
+getpeername (int s, struct sockaddr *name, socklen_t *namelen)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = getpeername1(td, s, name, namelen, 0);
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
+ socklen_t *alen)
+{
+ struct socket *so;
+ socklen_t len;
+ int error;
+
+ if (*alen < 0)
+ return (EINVAL);
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ *sa = NULL;
+ CURVNET_SET(so->so_vnet);
+ error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
+ CURVNET_RESTORE();
+ if (error)
+ goto bad;
+ if (*sa == NULL)
+ len = 0;
+ else
+ len = MIN(*alen, (*sa)->sa_len);
+ *alen = len;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(*sa);
+#endif
+bad:
+ if (error && *sa) {
+ free(*sa, M_SONAME);
+ *sa = NULL;
+ }
+ return (error);
+}
+
+static int
+getsockname1(td, fdes, asa, alen, compat)
+ struct thread *td;
+ int fdes;
+ struct sockaddr * asa;
+ socklen_t * alen;
+ int compat;
+{
+ struct sockaddr *sa;
+ socklen_t len;
+ int error;
+
+ error = copyin(alen, &len, sizeof(len));
+ if (error)
+ return (error);
+
+ error = kern_getsockname(td, fdes, &sa, &len);
+ if (error)
+ return (error);
+
+ if (len != 0) {
+#ifdef COMPAT_OLDSOCK
+ if (compat)
+ ((struct osockaddr *)sa)->sa_family = sa->sa_family;
+#endif
+ error = copyout(sa, asa, (u_int)len);
+ }
+ free(sa, M_SONAME);
+ if (error == 0)
+ error = copyout(&len, alen, sizeof(len));
+ return (error);
+}
+
+int
+getsockname (int s, struct sockaddr *name, socklen_t *namelen)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = getsockname1(td, s, name, namelen, 0);
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ ************************************************************************
+ * RTEMS I/O HANDLER ROUTINES *
+ ************************************************************************
+ */
+static int
+rtems_bsdnet_close (rtems_libio_t *iop)
+{
+ struct socket *so;
+ int error;
+
+ if ((so = iop->data1) == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+ error = soclose (so);
+ if (error) {
+ errno = error;
+ return -1;
+ }
+ return 0;
+}
+
+static ssize_t
+rtems_bsdnet_read (rtems_libio_t *iop, void *buffer, size_t count)
+{
+ return recv (iop->data0, buffer, count, 0);
+}
+
+static ssize_t
+rtems_bsdnet_write (rtems_libio_t *iop, const void *buffer, size_t count)
+{
+ return send (iop->data0, buffer, count, 0);
+}
+
+static int
+so_ioctl (rtems_libio_t *iop, struct socket *so, uint32_t command, void *buffer)
+{
+ switch (command) {
+ case FIONBIO:
+ SOCK_LOCK(so);
+ if (*(int *)buffer) {
+ iop->flags |= O_NONBLOCK;
+ so->so_state |= SS_NBIO;
+ }
+ else {
+ iop->flags &= ~O_NONBLOCK;
+ so->so_state &= ~SS_NBIO;
+ }
+ SOCK_UNLOCK(so);
+ return 0;
+
+ case FIONREAD:
+ *(int *)buffer = so->so_rcv.sb_cc;
+ return 0;
+ }
+
+ if (IOCGROUP(command) == 'i')
+ return ifioctl (so, command, buffer, NULL);
+ if (IOCGROUP(command) == 'r')
+ return rtioctl (command, buffer, NULL);
+ return (*so->so_proto->pr_usrreqs->pru_control)(so, command, buffer, 0, curthread);
+}
+
+static int
+rtems_bsdnet_ioctl (rtems_libio_t *iop, uint32_t command, void *buffer)
+{
+ struct socket *so;
+ int error;
+
+ if ((so = iop->data1) == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+ error = so_ioctl (iop, so, command, buffer);
+ if (error) {
+ errno = error;
+ return -1;
+ }
+ return 0;
+}
+
+static int
+rtems_bsdnet_fcntl (int cmd, rtems_libio_t *iop)
+{
+ struct socket *so;
+
+ if (cmd == F_SETFL) {
+ if ((so = iop->data1) == NULL) {
+ return EBADF;
+ }
+ SOCK_LOCK(so);
+ if (iop->flags & LIBIO_FLAGS_NO_DELAY)
+ so->so_state |= SS_NBIO;
+ else
+ so->so_state &= ~SS_NBIO;
+ SOCK_UNLOCK(so);
+ }
+ return 0;
+}
+
+static int
+rtems_bsdnet_fstat (rtems_filesystem_location_info_t *loc, struct stat *sp)
+{
+ sp->st_mode = S_IFSOCK;
+ return 0;
+}
+
+static const rtems_filesystem_file_handlers_r socket_handlers = {
+ rtems_filesystem_default_open, /* open */
+ rtems_bsdnet_close, /* close */
+ rtems_bsdnet_read, /* read */
+ rtems_bsdnet_write, /* write */
+ rtems_bsdnet_ioctl, /* ioctl */
+ rtems_filesystem_default_lseek, /* lseek */
+ rtems_bsdnet_fstat, /* fstat */
+ rtems_filesystem_default_fchmod, /* fchmod */
+ rtems_filesystem_default_ftruncate, /* ftruncate */
+ rtems_filesystem_default_fpathconf, /* fpathconf */
+ rtems_filesystem_default_fsync, /* fsync */
+ rtems_filesystem_default_fdatasync, /* fdatasync */
+ rtems_bsdnet_fcntl, /* fcntl */
+ rtems_filesystem_default_rmnod /* rmnod */
+};
diff --git a/rtems/freebsd/rtems/rtems-bsd-sysctl.c b/rtems/freebsd/rtems/rtems-bsd-sysctl.c
new file mode 100644
index 00000000..dcf963f9
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sysctl.c
@@ -0,0 +1,64 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+int sysctl(
+ int *name,
+ u_int namelen,
+ void *oldp,
+ size_t *oldlenp,
+ void *newp,
+ size_t newlen
+)
+{
+ int eno = EINVAL;
+
+ if (namelen <= CTL_MAXNAME) {
+ int namedup [CTL_MAXNAME];
+
+ memcpy(namedup, name, namelen * sizeof(*name));
+
+ eno = kernel_sysctl(
+ NULL,
+ namedup,
+ namelen,
+ oldp,
+ oldlenp,
+ newp,
+ newlen,
+ oldlenp,
+ 0
+ );
+ }
+
+ if (eno == 0) {
+ return 0;
+ } else {
+ errno = eno;
+
+ return -1;
+ }
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c b/rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c
new file mode 100644
index 00000000..b2953cc2
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c
@@ -0,0 +1,43 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'lib/libc/gen/sysctlbyname.c'.
+ */
+
+/*
+ * ----------------------------------------------------------------------------
+ * "THE BEER-WARE LICENSE" (Revision 42):
+ * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
+ * can do whatever you want with this stuff. If we meet some day, and you think
+ * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+int
+sysctlbyname(const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int real_oid[CTL_MAXNAME+2];
+ int error;
+ size_t oidlen;
+
+ oidlen = sizeof(real_oid) / sizeof(int);
+ error = sysctlnametomib(name, real_oid, &oidlen);
+ if (error < 0)
+ return (error);
+ error = sysctl(real_oid, oidlen, oldp, oldlenp, newp, newlen);
+ return (error);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c b/rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c
new file mode 100644
index 00000000..0ce5f088
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c
@@ -0,0 +1,67 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'lib/libc/gen/sysctlnametomib.c'.
+ */
+
+/*
+ * Copyright 2001 The FreeBSD Project. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <string.h>
+
+/*
+ * This function uses a presently undocumented interface to the kernel
+ * to walk the tree and get the type so it can print the value.
+ * This interface is under work and consideration, and should probably
+ * be killed with a big axe by the first person who can find the time.
+ * (be aware though, that the proper interface isn't as obvious as it
+ * may seem, there are various conflicting requirements.
+ */
+int
+sysctlnametomib(const char *name, int *mibp, size_t *sizep)
+{
+ int oid[2];
+ int error;
+
+ oid[0] = 0;
+ oid[1] = 3;
+
+ *sizep *= sizeof(int);
+ error = sysctl(oid, 2, mibp, sizep, name, strlen(name));
+ *sizep /= sizeof(int);
+ return (error);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-thread.c b/rtems/freebsd/rtems/rtems-bsd-thread.c
new file mode 100644
index 00000000..92bf79c0
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-thread.c
@@ -0,0 +1,208 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_thread_chain);
+
+static int
+rtems_bsd_thread_start(struct thread **td_ptr, void (*func)(void *), void *arg, int flags, int pages, const char *fmt, va_list ap)
+{
+ struct proc *p = &proc0;;
+ struct thread *td = malloc(sizeof(struct thread), M_TEMP, M_WAITOK | M_ZERO);
+
+ if (td != NULL) {
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+ unsigned index = 0;
+ char name [5] = "_???";
+
+ BSD_ASSERT(pages >= 0);
+
+ sc = rtems_task_create(
+ rtems_build_name('_', 'T', 'S', 'K'),
+ BSD_TASK_PRIORITY_NORMAL,
+ BSD_MINIMUM_TASK_STACK_SIZE + (size_t) pages * PAGE_SIZE,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &id
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ sc = rtems_task_set_note( id, RTEMS_NOTEPAD_0, ( uint32_t )td );
+ if (sc != RTEMS_SUCCESSFUL) {
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ index = rtems_object_id_get_index(id);
+ snprintf(name + 1, sizeof(name) - 1, "%03u", index);
+ sc = rtems_object_set_name(id, name);
+ if (sc != RTEMS_SUCCESSFUL) {
+ rtems_task_delete(id);
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ sc = rtems_task_start(id, (rtems_task_entry) func, (rtems_task_argument) arg);
+ if (sc != RTEMS_SUCCESSFUL) {
+ rtems_task_delete(id);
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ td->td_id = id;
+ vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
+ bzero(&td->td_ru, sizeof(td->td_ru));
+ td->td_ucred = crhold(p->p_ucred);
+ td->td_proc = p;
+
+ rtems_chain_append(&rtems_bsd_thread_chain, &td->td_node);
+
+ if (td_ptr != NULL) {
+ *td_ptr = td;
+ }
+
+ return 0;
+ }
+
+ return ENOMEM;
+}
+
+static void rtems_bsd_thread_delete(void) __dead2;
+
+static void
+rtems_bsd_thread_delete(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_thread_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+ rtems_id id = rtems_task_self();
+ struct thread *td = NULL;
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct thread *cur = (struct thread *) node;
+
+ if (cur->td_id == id) {
+ td = cur;
+ break;
+ }
+
+ node = rtems_chain_next(node);
+ }
+
+ if (td != NULL) {
+ rtems_chain_extract(&td->td_node);
+
+ free(td, M_TEMP);
+ } else {
+ BSD_PANIC("cannot find task entry");
+ }
+
+ rtems_task_delete(RTEMS_SELF);
+
+ while (true) {
+ /* Do nothing */
+ }
+}
+
+void
+kproc_start(const void *udata)
+{
+ const struct kproc_desc *pd = udata;
+ int eno = kproc_create((void (*)(void *))pd->func, NULL, pd->global_procpp, 0, 0, "%s", pd->arg0);
+
+ BSD_ASSERT(eno == 0);
+}
+
+int
+kproc_create(void (*func)(void *), void *arg, struct proc **newpp, int flags, int pages, const char *fmt, ...)
+{
+ int eno = 0;
+ va_list ap;
+
+ va_start(ap, fmt);
+ eno = rtems_bsd_thread_start(newpp, func, arg, flags, pages, fmt, ap);
+ va_end(ap);
+
+ return eno;
+}
+
+void
+kproc_exit(int ecode)
+{
+ rtems_bsd_thread_delete();
+}
+
+void
+kthread_start(const void *udata)
+{
+ const struct kthread_desc *td = udata;
+ int eno = kthread_add((void (*)(void *)) td->func, NULL, NULL, td->global_threadpp, 0, 0, "%s", td->arg0);
+
+ BSD_ASSERT(eno == 0);
+}
+
+int
+kthread_add(void (*func)(void *), void *arg, struct proc *p, struct thread **newtdp, int flags, int pages, const char *fmt, ...)
+{
+ int eno = 0;
+ va_list ap;
+
+ va_start(ap, fmt);
+ eno = rtems_bsd_thread_start(newtdp, func, arg, flags, pages, fmt, ap);
+ va_end(ap);
+
+ return eno;
+}
+
+void
+kthread_exit(void)
+{
+ rtems_bsd_thread_delete();
+}
+
+int
+kproc_kthread_add(void (*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char * procname, const char *fmt, ...)
+{
+ int eno = 0;
+ va_list ap;
+
+ va_start(ap, fmt);
+ eno = rtems_bsd_thread_start(tdptr, func, arg, flags, pages, fmt, ap);
+ va_end(ap);
+
+ return eno;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-uma.c b/rtems/freebsd/rtems/rtems-bsd-uma.c
new file mode 100644
index 00000000..c289bf00
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-uma.c
@@ -0,0 +1,2796 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/vm/uma.h>
+#include <rtems/freebsd/vm/uma_int.h>
+#include <rtems/freebsd/vm/uma_dbg.h>
+
+/*
+ * This is the zone and keg from which all zones are spawned. The idea is that
+ * even the zone & keg heads are allocated from the allocator, so we use the
+ * bss section to bootstrap us.
+ */
+static struct uma_keg masterkeg;
+static struct uma_zone masterzone_k;
+static struct uma_zone masterzone_z;
+static uma_zone_t kegs = &masterzone_k;
+static uma_zone_t zones = &masterzone_z;
+
+/* This is the zone from which all of uma_slab_t's are allocated. */
+static uma_zone_t slabzone;
+static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
+
+static u_int mp_maxid = 0; /* simulate 1 CPU. This should really come from RTEMS SMP. AT this time, RTEMS SMP is not functional */
+#define CPU_ABSENT(x_cpu) 0 /* force all cpus to be present. This should really come from RTEMS SMP. */
+#define CPU_FOREACH(i) \
+ for ((i) = 0; (i) <= mp_maxid; (i)++) \
+ if (!CPU_ABSENT((i)))
+
+/*
+ * The initial hash tables come out of this zone so they can be allocated
+ * prior to malloc coming up.
+ */
+static uma_zone_t hashzone;
+
+/* The boot-time adjusted value for cache line alignment. */
+static int uma_align_cache = 64 - 1;
+
+static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
+
+/*
+ * Are we allowed to allocate buckets?
+ */
+static int bucketdisable = 1;
+
+/* Linked list of all kegs in the system */
+static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
+
+/* This mutex protects the keg list */
+static struct mtx uma_mtx;
+
+/* Linked list of boot time pages */
+static LIST_HEAD(,uma_slab) uma_boot_pages =
+ LIST_HEAD_INITIALIZER(uma_boot_pages);
+
+/* This mutex protects the boot time pages list */
+static struct mtx uma_boot_pages_mtx;
+
+/* Is the VM done starting up? */
+static int booted = 0;
+
+/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
+static u_int uma_max_ipers;
+static u_int uma_max_ipers_ref;
+
+/*
+ * This is the handle used to schedule events that need to happen
+ * outside of the allocation fast path.
+ */
+static struct callout uma_callout;
+#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
+
+/*
+ * This structure is passed as the zone ctor arg so that I don't have to create
+ * a special allocation function just for zones.
+ */
+struct uma_zctor_args {
+ char *name;
+ size_t size;
+ uma_ctor ctor;
+ uma_dtor dtor;
+ uma_init uminit;
+ uma_fini fini;
+ uma_keg_t keg;
+ int align;
+ u_int32_t flags;
+};
+
+struct uma_kctor_args {
+ uma_zone_t zone;
+ size_t size;
+ uma_init uminit;
+ uma_fini fini;
+ int align;
+ u_int32_t flags;
+};
+
+struct uma_bucket_zone {
+ uma_zone_t ubz_zone;
+ char *ubz_name;
+ int ubz_entries;
+};
+
+#define BUCKET_MAX 128
+
+struct uma_bucket_zone bucket_zones[] = {
+ { NULL, "16 Bucket", 16 },
+ { NULL, "32 Bucket", 32 },
+ { NULL, "64 Bucket", 64 },
+ { NULL, "128 Bucket", 128 },
+ { NULL, NULL, 0}
+};
+
+#define BUCKET_SHIFT 4
+#define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
+
+/*
+ * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
+ * of approximately the right size.
+ */
+static uint8_t bucket_size[BUCKET_ZONES];
+
+/*
+ * Flags and enumerations to be passed to internal functions.
+ */
+enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
+
+#define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */
+#define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */
+
+/* Prototypes.. */
+
+static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
+static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
+static void page_free(void *, int, u_int8_t);
+static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
+static void cache_drain(uma_zone_t);
+static void bucket_drain(uma_zone_t, uma_bucket_t);
+static void bucket_cache_drain(uma_zone_t zone);
+static int keg_ctor(void *, int, void *, int);
+static void keg_dtor(void *, int, void *);
+static int zone_ctor(void *, int, void *, int);
+static void zone_dtor(void *, int, void *);
+static int zero_init(void *, int, int);
+static void keg_small_init(uma_keg_t keg);
+static void keg_large_init(uma_keg_t keg);
+static void zone_foreach(void (*zfunc)(uma_zone_t));
+static void zone_timeout(uma_zone_t zone);
+static int hash_alloc(struct uma_hash *);
+static int hash_expand(struct uma_hash *, struct uma_hash *);
+static void hash_free(struct uma_hash *hash);
+static void *zone_alloc_item(uma_zone_t, void *, int);
+static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
+ int);
+static void bucket_init(void);
+static uma_bucket_t bucket_alloc(int, int);
+static void bucket_free(uma_bucket_t);
+static void bucket_zone_drain(void);
+static int zone_alloc_bucket(uma_zone_t zone, int flags);
+static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
+static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
+static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
+static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
+ uma_fini fini, int align, u_int32_t flags);
+static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
+static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
+
+void uma_print_zone(uma_zone_t);
+void uma_print_stats(void);
+
+/*
+ * Initialize bucket_zones, the array of zones of buckets of various sizes.
+ *
+ * For each zone, calculate the memory required for each bucket, consisting
+ * of the header and an array of pointers. Initialize bucket_size[] to point
+ * the range of appropriate bucket sizes at the zone.
+ */
+static void
+bucket_init(void)
+{
+ struct uma_bucket_zone *ubz;
+ int i;
+ int j;
+
+ for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
+ int size;
+
+ ubz = &bucket_zones[j];
+ size = roundup(sizeof(struct uma_bucket), sizeof(void *));
+ size += sizeof(void *) * ubz->ubz_entries;
+ ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
+ UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
+ for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
+ bucket_size[i >> BUCKET_SHIFT] = j;
+ }
+}
+
+/*
+ * Given a desired number of entries for a bucket, return the zone from which
+ * to allocate the bucket.
+ */
+static struct uma_bucket_zone *
+bucket_zone_lookup(int entries)
+{
+ int idx;
+
+ idx = howmany(entries, 1 << BUCKET_SHIFT);
+ return (&bucket_zones[bucket_size[idx]]);
+}
+
+static uma_bucket_t
+bucket_alloc(int entries, int bflags)
+{
+ struct uma_bucket_zone *ubz;
+ uma_bucket_t bucket;
+
+ /*
+ * This is to stop us from allocating per cpu buckets while we're
+ * running out of vm.boot_pages. Otherwise, we would exhaust the
+ * boot pages. This also prevents us from allocating buckets in
+ * low memory situations.
+ */
+ if (bucketdisable)
+ return (NULL);
+
+ ubz = bucket_zone_lookup(entries);
+ bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
+ if (bucket) {
+#ifdef INVARIANTS
+ bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
+#endif
+ bucket->ub_cnt = 0;
+ bucket->ub_entries = ubz->ubz_entries;
+ }
+
+ return (bucket);
+}
+
+static void
+bucket_free(uma_bucket_t bucket)
+{
+ struct uma_bucket_zone *ubz;
+
+ ubz = bucket_zone_lookup(bucket->ub_entries);
+ zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
+ ZFREE_STATFREE);
+}
+
+static void
+bucket_zone_drain(void)
+{
+ struct uma_bucket_zone *ubz;
+
+ for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
+ zone_drain(ubz->ubz_zone);
+}
+
+static inline uma_keg_t
+zone_first_keg(uma_zone_t zone)
+{
+
+ return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
+}
+
+static void
+zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
+{
+ uma_klink_t klink;
+
+ LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
+ kegfn(klink->kl_keg);
+}
+
+/*
+ * Routine to perform timeout driven calculations. This expands the
+ * hashes and does per cpu statistics aggregation.
+ *
+ * Returns nothing.
+ */
+static void
+keg_timeout(uma_keg_t keg)
+{
+
+ KEG_LOCK(keg);
+ /*
+ * Expand the keg hash table.
+ *
+ * This is done if the number of slabs is larger than the hash size.
+ * What I'm trying to do here is completely reduce collisions. This
+ * may be a little aggressive. Should I allow for two collisions max?
+ */
+ if (keg->uk_flags & UMA_ZONE_HASH &&
+ keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
+ struct uma_hash newhash;
+ struct uma_hash oldhash;
+ int ret;
+
+ /*
+ * This is so involved because allocating and freeing
+ * while the keg lock is held will lead to deadlock.
+ * I have to do everything in stages and check for
+ * races.
+ */
+ newhash = keg->uk_hash;
+ KEG_UNLOCK(keg);
+ ret = hash_alloc(&newhash);
+ KEG_LOCK(keg);
+ if (ret) {
+ if (hash_expand(&keg->uk_hash, &newhash)) {
+ oldhash = keg->uk_hash;
+ keg->uk_hash = newhash;
+ } else
+ oldhash = newhash;
+
+ KEG_UNLOCK(keg);
+ hash_free(&oldhash);
+ KEG_LOCK(keg);
+ }
+ }
+ KEG_UNLOCK(keg);
+}
+
+static void
+zone_timeout(uma_zone_t zone)
+{
+
+ zone_foreach_keg(zone, &keg_timeout);
+}
+
+/*
+ * Allocate and zero fill the next sized hash table from the appropriate
+ * backing store.
+ *
+ * Arguments:
+ * hash A new hash structure with the old hash size in uh_hashsize
+ *
+ * Returns:
+ * 1 on sucess and 0 on failure.
+ */
+static int
+hash_alloc(struct uma_hash *hash)
+{
+ int oldsize;
+ int alloc;
+
+ oldsize = hash->uh_hashsize;
+
+ /* We're just going to go to a power of two greater */
+ if (oldsize) {
+ hash->uh_hashsize = oldsize * 2;
+ alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
+ hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
+ M_UMAHASH, M_NOWAIT);
+ } else {
+ alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
+ hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
+ M_WAITOK);
+ hash->uh_hashsize = UMA_HASH_SIZE_INIT;
+ }
+ if (hash->uh_slab_hash) {
+ bzero(hash->uh_slab_hash, alloc);
+ hash->uh_hashmask = hash->uh_hashsize - 1;
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * Expands the hash table for HASH zones. This is done from zone_timeout
+ * to reduce collisions. This must not be done in the regular allocation
+ * path, otherwise, we can recurse on the vm while allocating pages.
+ *
+ * Arguments:
+ * oldhash The hash you want to expand
+ * newhash The hash structure for the new table
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ */
+static int
+hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
+{
+ uma_slab_t slab;
+ int hval;
+ int i;
+
+ if (!newhash->uh_slab_hash)
+ return (0);
+
+ if (oldhash->uh_hashsize >= newhash->uh_hashsize)
+ return (0);
+
+ /*
+ * I need to investigate hash algorithms for resizing without a
+ * full rehash.
+ */
+
+ for (i = 0; i < oldhash->uh_hashsize; i++)
+ while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
+ slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
+ SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
+ hval = UMA_HASH(newhash, slab->us_data);
+ SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
+ slab, us_hlink);
+ }
+
+ return (1);
+}
+
+/*
+ * Free the hash bucket to the appropriate backing store.
+ *
+ * Arguments:
+ * slab_hash The hash bucket we're freeing
+ * hashsize The number of entries in that hash bucket
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+hash_free(struct uma_hash *hash)
+{
+ if (hash->uh_slab_hash == NULL)
+ return;
+ if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
+ zone_free_item(hashzone,
+ hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
+ else
+ free(hash->uh_slab_hash, M_UMAHASH);
+}
+
+/*
+ * Frees all outstanding items in a bucket
+ *
+ * Arguments:
+ * zone The zone to free to, must be unlocked.
+ * bucket The free/alloc bucket with items, cpu queue must be locked.
+ *
+ * Returns:
+ * Nothing
+ */
+
+static void
+bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
+{
+ void *item;
+
+ if (bucket == NULL)
+ return;
+
+ while (bucket->ub_cnt > 0) {
+ bucket->ub_cnt--;
+ item = bucket->ub_bucket[bucket->ub_cnt];
+#ifdef INVARIANTS
+ bucket->ub_bucket[bucket->ub_cnt] = NULL;
+ KASSERT(item != NULL,
+ ("bucket_drain: botched ptr, item is NULL"));
+#endif
+ zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
+ }
+}
+
+/*
+ * Drains the per cpu caches for a zone.
+ *
+ * NOTE: This may only be called while the zone is being turn down, and not
+ * during normal operation. This is necessary in order that we do not have
+ * to migrate CPUs to drain the per-CPU caches.
+ *
+ * Arguments:
+ * zone The zone to drain, must be unlocked.
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+cache_drain(uma_zone_t zone)
+{
+ uma_cache_t cache;
+ int cpu;
+
+ /*
+ * XXX: It is safe to not lock the per-CPU caches, because we're
+ * tearing down the zone anyway. I.e., there will be no further use
+ * of the caches at this point.
+ *
+ * XXX: It would good to be able to assert that the zone is being
+ * torn down to prevent improper use of cache_drain().
+ *
+ * XXX: We lock the zone before passing into bucket_cache_drain() as
+ * it is used elsewhere. Should the tear-down path be made special
+ * there in some form?
+ */
+ for (cpu = 0; cpu <= mp_maxid; cpu++) {
+ if (CPU_ABSENT(cpu))
+ continue;
+ cache = &zone->uz_cpu[cpu];
+ bucket_drain(zone, cache->uc_allocbucket);
+ bucket_drain(zone, cache->uc_freebucket);
+ if (cache->uc_allocbucket != NULL)
+ bucket_free(cache->uc_allocbucket);
+ if (cache->uc_freebucket != NULL)
+ bucket_free(cache->uc_freebucket);
+ cache->uc_allocbucket = cache->uc_freebucket = NULL;
+ }
+ ZONE_LOCK(zone);
+ bucket_cache_drain(zone);
+ ZONE_UNLOCK(zone);
+}
+
+/*
+ * Drain the cached buckets from a zone. Expects a locked zone on entry.
+ */
+static void
+bucket_cache_drain(uma_zone_t zone)
+{
+ uma_bucket_t bucket;
+
+ /*
+ * Drain the bucket queues and free the buckets, we just keep two per
+ * cpu (alloc/free).
+ */
+ while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
+ LIST_REMOVE(bucket, ub_link);
+ ZONE_UNLOCK(zone);
+ bucket_drain(zone, bucket);
+ bucket_free(bucket);
+ ZONE_LOCK(zone);
+ }
+
+ /* Now we do the free queue.. */
+ while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
+ LIST_REMOVE(bucket, ub_link);
+ bucket_free(bucket);
+ }
+}
+
+/*
+ * Frees pages from a keg back to the system. This is done on demand from
+ * the pageout daemon.
+ *
+ * Returns nothing.
+ */
+static void
+keg_drain(uma_keg_t keg)
+{
+ struct slabhead freeslabs = { 0 };
+ uma_slab_t slab;
+ uma_slab_t n;
+ u_int8_t flags;
+ u_int8_t *mem;
+ int i;
+
+ /*
+ * We don't want to take pages from statically allocated kegs at this
+ * time
+ */
+ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
+ return;
+
+#ifdef UMA_DEBUG
+ printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
+#endif
+ KEG_LOCK(keg);
+ if (keg->uk_free == 0)
+ goto finished;
+
+ slab = LIST_FIRST(&keg->uk_free_slab);
+ while (slab) {
+ n = LIST_NEXT(slab, us_link);
+
+ /* We have no where to free these to */
+ if (slab->us_flags & UMA_SLAB_BOOT) {
+ slab = n;
+ continue;
+ }
+
+ LIST_REMOVE(slab, us_link);
+ keg->uk_pages -= keg->uk_ppera;
+ keg->uk_free -= keg->uk_ipers;
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
+
+ SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
+
+ slab = n;
+ }
+finished:
+ KEG_UNLOCK(keg);
+
+ while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
+ SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
+ if (keg->uk_fini)
+ for (i = 0; i < keg->uk_ipers; i++)
+ keg->uk_fini(
+ slab->us_data + (keg->uk_rsize * i),
+ keg->uk_size);
+ flags = slab->us_flags;
+ mem = slab->us_data;
+
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ zone_free_item(keg->uk_slabzone, slab, NULL,
+ SKIP_NONE, ZFREE_STATFREE);
+#ifdef UMA_DEBUG
+ printf("%s: Returning %d bytes.\n",
+ keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
+#endif
+ keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
+ }
+}
+
+static void
+zone_drain_wait(uma_zone_t zone, int waitok)
+{
+
+ /*
+ * Set draining to interlock with zone_dtor() so we can release our
+ * locks as we go. Only dtor() should do a WAITOK call since it
+ * is the only call that knows the structure will still be available
+ * when it wakes up.
+ */
+ ZONE_LOCK(zone);
+ while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
+ if (waitok == M_NOWAIT)
+ goto out;
+ mtx_unlock(&uma_mtx);
+ msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
+ mtx_lock(&uma_mtx);
+ }
+ zone->uz_flags |= UMA_ZFLAG_DRAINING;
+ bucket_cache_drain(zone);
+ ZONE_UNLOCK(zone);
+ /*
+ * The DRAINING flag protects us from being freed while
+ * we're running. Normally the uma_mtx would protect us but we
+ * must be able to release and acquire the right lock for each keg.
+ */
+ zone_foreach_keg(zone, &keg_drain);
+ ZONE_LOCK(zone);
+ zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
+ wakeup(zone);
+out:
+ ZONE_UNLOCK(zone);
+}
+
+void
+zone_drain(uma_zone_t zone)
+{
+
+ zone_drain_wait(zone, M_NOWAIT);
+}
+
+/*
+ * Allocate a new slab for a keg. This does not insert the slab onto a list.
+ *
+ * Arguments:
+ * wait Shall we wait?
+ *
+ * Returns:
+ * The slab that was allocated or NULL if there is no memory and the
+ * caller specified M_NOWAIT.
+ */
+static uma_slab_t
+keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
+{
+ uma_slabrefcnt_t slabref;
+ uma_alloc allocf;
+ uma_slab_t slab;
+ u_int8_t *mem;
+ u_int8_t flags;
+ int i;
+
+ mtx_assert(&keg->uk_lock, MA_OWNED);
+ slab = NULL;
+
+#ifdef UMA_DEBUG
+ printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
+#endif
+ allocf = keg->uk_allocf;
+ KEG_UNLOCK(keg);
+
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
+ slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
+ if (slab == NULL) {
+ KEG_LOCK(keg);
+ return NULL;
+ }
+ }
+
+ /*
+ * This reproduces the old vm_zone behavior of zero filling pages the
+ * first time they are added to a zone.
+ *
+ * Malloced items are zeroed in uma_zalloc.
+ */
+
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
+ wait |= M_ZERO;
+ else
+ wait &= ~M_ZERO;
+
+ /* zone is passed for legacy reasons. */
+ mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
+ if (mem == NULL) {
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ zone_free_item(keg->uk_slabzone, slab, NULL,
+ SKIP_NONE, ZFREE_STATFREE);
+ KEG_LOCK(keg);
+ return (NULL);
+ }
+
+ /* Point the slab into the allocated memory */
+ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
+ slab = (uma_slab_t )(mem + keg->uk_pgoff);
+
+ slab->us_keg = keg;
+ slab->us_data = mem;
+ slab->us_freecount = keg->uk_ipers;
+ slab->us_firstfree = 0;
+ slab->us_flags = flags;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ for (i = 0; i < keg->uk_ipers; i++) {
+ slabref->us_freelist[i].us_refcnt = 0;
+ slabref->us_freelist[i].us_item = i+1;
+ }
+ } else {
+ for (i = 0; i < keg->uk_ipers; i++)
+ slab->us_freelist[i].us_item = i+1;
+ }
+
+ if (keg->uk_init != NULL) {
+ for (i = 0; i < keg->uk_ipers; i++)
+ if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
+ keg->uk_size, wait) != 0)
+ break;
+ if (i != keg->uk_ipers) {
+ if (keg->uk_fini != NULL) {
+ for (i--; i > -1; i--)
+ keg->uk_fini(slab->us_data +
+ (keg->uk_rsize * i),
+ keg->uk_size);
+ }
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ zone_free_item(keg->uk_slabzone, slab,
+ NULL, SKIP_NONE, ZFREE_STATFREE);
+ keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
+ flags);
+ KEG_LOCK(keg);
+ return (NULL);
+ }
+ }
+ KEG_LOCK(keg);
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
+
+ keg->uk_pages += keg->uk_ppera;
+ keg->uk_free += keg->uk_ipers;
+
+ return (slab);
+}
+
+/*
+ * This function is intended to be used early on in place of page_alloc() so
+ * that we may use the boot time page cache to satisfy allocations before
+ * the VM is ready.
+ */
+static void *
+startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
+{
+ uma_keg_t keg;
+ uma_slab_t tmps;
+ int pages, check_pages;
+
+ keg = zone_first_keg(zone);
+ pages = howmany(bytes, PAGE_SIZE);
+ check_pages = pages - 1;
+ KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
+
+ /*
+ * Check our small startup cache to see if it has pages remaining.
+ */
+ mtx_lock(&uma_boot_pages_mtx);
+
+ /* First check if we have enough room. */
+ tmps = LIST_FIRST(&uma_boot_pages);
+ while (tmps != NULL && check_pages-- > 0)
+ tmps = LIST_NEXT(tmps, us_link);
+ if (tmps != NULL) {
+ /*
+ * It's ok to lose tmps references. The last one will
+ * have tmps->us_data pointing to the start address of
+ * "pages" contiguous pages of memory.
+ */
+ while (pages-- > 0) {
+ tmps = LIST_FIRST(&uma_boot_pages);
+ LIST_REMOVE(tmps, us_link);
+ }
+ mtx_unlock(&uma_boot_pages_mtx);
+ *pflag = tmps->us_flags;
+ return (tmps->us_data);
+ }
+ mtx_unlock(&uma_boot_pages_mtx);
+ if (booted == 0)
+ panic("UMA: Increase vm.boot_pages");
+ /*
+ * Now that we've booted reset these users to their real allocator.
+ */
+#ifdef UMA_MD_SMALL_ALLOC
+ keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
+#else
+ keg->uk_allocf = page_alloc;
+#endif
+ return keg->uk_allocf(zone, bytes, pflag, wait);
+}
+
+/*
+ * Allocates a number of pages from the system
+ *
+ * Arguments:
+ * bytes The number of bytes requested
+ * wait Shall we wait?
+ *
+ * Returns:
+ * A pointer to the alloced memory or possibly
+ * NULL if M_NOWAIT is set.
+ */
+static void *
+page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
+{
+ void *p; /* Returned page */
+
+ *pflag = UMA_SLAB_KMEM;
+ p = (void *) malloc(bytes, M_TEMP, wait);
+
+ return (p);
+}
+
+/*
+ * Frees a number of pages to the system
+ *
+ * Arguments:
+ * mem A pointer to the memory to be freed
+ * size The size of the memory being freed
+ * flags The original p->us_flags field
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+page_free(void *mem, int size, u_int8_t flags)
+{
+ free( mem, M_TEMP );
+}
+
+/*
+ * Zero fill initializer
+ *
+ * Arguments/Returns follow uma_init specifications
+ */
+static int
+zero_init(void *mem, int size, int flags)
+{
+ bzero(mem, size);
+ return (0);
+}
+
+/*
+ * Finish creating a small uma keg. This calculates ipers, and the keg size.
+ *
+ * Arguments
+ * keg The zone we should initialize
+ *
+ * Returns
+ * Nothing
+ */
+static void
+keg_small_init(uma_keg_t keg)
+{
+ u_int rsize;
+ u_int memused;
+ u_int wastedspace;
+ u_int shsize;
+
+ KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
+ rsize = keg->uk_size;
+
+ if (rsize < UMA_SMALLEST_UNIT)
+ rsize = UMA_SMALLEST_UNIT;
+ if (rsize & keg->uk_align)
+ rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
+
+ keg->uk_rsize = rsize;
+ keg->uk_ppera = 1;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
+ shsize = sizeof(struct uma_slab_refcnt);
+ } else {
+ rsize += UMA_FRITM_SZ; /* Account for linkage */
+ shsize = sizeof(struct uma_slab);
+ }
+
+ keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
+ KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
+ memused = keg->uk_ipers * rsize + shsize;
+ wastedspace = UMA_SLAB_SIZE - memused;
+
+ /*
+ * We can't do OFFPAGE if we're internal or if we've been
+ * asked to not go to the VM for buckets. If we do this we
+ * may end up going to the VM (kmem_map) for slabs which we
+ * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
+ * result of UMA_ZONE_VM, which clearly forbids it.
+ */
+ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
+ (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
+ return;
+
+ if ((wastedspace >= UMA_MAX_WASTE) &&
+ (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
+ keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
+ KASSERT(keg->uk_ipers <= 255,
+ ("keg_small_init: keg->uk_ipers too high!"));
+#ifdef UMA_DEBUG
+ printf("UMA decided we need offpage slab headers for "
+ "keg: %s, calculated wastedspace = %d, "
+ "maximum wasted space allowed = %d, "
+ "calculated ipers = %d, "
+ "new wasted space = %d\n", keg->uk_name, wastedspace,
+ UMA_MAX_WASTE, keg->uk_ipers,
+ UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
+#endif
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
+ }
+}
+
+/*
+ * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
+ * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
+ * more complicated.
+ *
+ * Arguments
+ * keg The keg we should initialize
+ *
+ * Returns
+ * Nothing
+ */
+static void
+keg_large_init(uma_keg_t keg)
+{
+ int pages;
+
+ KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
+ KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
+ ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
+
+ pages = keg->uk_size / UMA_SLAB_SIZE;
+
+ /* Account for remainder */
+ if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
+ pages++;
+
+ keg->uk_ppera = pages;
+ keg->uk_ipers = 1;
+ keg->uk_rsize = keg->uk_size;
+
+ /* We can't do OFFPAGE if we're internal, bail out here. */
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
+ return;
+
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
+}
+
+static void
+keg_cachespread_init(uma_keg_t keg)
+{
+ int alignsize;
+ int trailer;
+ int pages;
+ int rsize;
+
+ alignsize = keg->uk_align + 1;
+ rsize = keg->uk_size;
+ /*
+ * We want one item to start on every align boundary in a page. To
+ * do this we will span pages. We will also extend the item by the
+ * size of align if it is an even multiple of align. Otherwise, it
+ * would fall on the same boundary every time.
+ */
+ if (rsize & keg->uk_align)
+ rsize = (rsize & ~keg->uk_align) + alignsize;
+ if ((rsize & alignsize) == 0)
+ rsize += alignsize;
+ trailer = rsize - keg->uk_size;
+ pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
+ pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
+ keg->uk_rsize = rsize;
+ keg->uk_ppera = pages;
+ keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
+ //keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
+ KASSERT(keg->uk_ipers <= uma_max_ipers,
+ ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
+ keg->uk_ipers));
+}
+
+/*
+ * Keg header ctor. This initializes all fields, locks, etc. And inserts
+ * the keg onto the global keg list.
+ *
+ * Arguments/Returns follow uma_ctor specifications
+ * udata Actually uma_kctor_args
+ */
+static int
+keg_ctor(void *mem, int size, void *udata, int flags)
+{
+ struct uma_kctor_args *arg = udata;
+ uma_keg_t keg = mem;
+ uma_zone_t zone;
+
+ bzero(keg, size);
+ keg->uk_size = arg->size;
+ keg->uk_init = arg->uminit;
+ keg->uk_fini = arg->fini;
+ keg->uk_align = arg->align;
+ keg->uk_free = 0;
+ keg->uk_pages = 0;
+ keg->uk_flags = arg->flags;
+ keg->uk_allocf = page_alloc;
+ keg->uk_freef = page_free;
+ keg->uk_recurse = 0;
+ keg->uk_slabzone = NULL;
+
+ /*
+ * The master zone is passed to us at keg-creation time.
+ */
+ zone = arg->zone;
+ keg->uk_name = zone->uz_name;
+
+ if (arg->flags & UMA_ZONE_VM)
+ keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
+
+ if (arg->flags & UMA_ZONE_ZINIT)
+ keg->uk_init = zero_init;
+
+ /*if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
+ keg->uk_flags |= UMA_ZONE_VTOSLAB;*/
+
+ /*
+ * The +UMA_FRITM_SZ added to uk_size is to account for the
+ * linkage that is added to the size in keg_small_init(). If
+ * we don't account for this here then we may end up in
+ * keg_small_init() with a calculated 'ipers' of 0.
+ */
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
+ keg_cachespread_init(keg);
+ else if ((keg->uk_size+UMA_FRITMREF_SZ) >
+ (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
+ keg_large_init(keg);
+ else
+ keg_small_init(keg);
+ } else {
+ if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
+ keg_cachespread_init(keg);
+ else if ((keg->uk_size+UMA_FRITM_SZ) >
+ (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
+ keg_large_init(keg);
+ else
+ keg_small_init(keg);
+ }
+
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ keg->uk_slabzone = slabrefzone;
+ else
+ keg->uk_slabzone = slabzone;
+ }
+
+ /*
+ * If we haven't booted yet we need allocations to go through the
+ * startup cache until the vm is ready.
+ */
+ if (keg->uk_ppera == 1) {
+#ifdef UMA_MD_SMALL_ALLOC
+ keg->uk_allocf = uma_small_alloc;
+ keg->uk_freef = uma_small_free;
+#endif
+ if (booted == 0)
+ keg->uk_allocf = startup_alloc;
+ } else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL))
+ keg->uk_allocf = startup_alloc;
+
+ /*
+ * Initialize keg's lock (shared among zones).
+ */
+ if (arg->flags & UMA_ZONE_MTXCLASS)
+ KEG_LOCK_INIT(keg, 1);
+ else
+ KEG_LOCK_INIT(keg, 0);
+
+ /*
+ * If we're putting the slab header in the actual page we need to
+ * figure out where in each page it goes. This calculates a right
+ * justified offset into the memory on an ALIGN_PTR boundary.
+ */
+ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
+ u_int totsize;
+
+ /* Size of the slab struct and free list */
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ totsize = sizeof(struct uma_slab_refcnt) +
+ keg->uk_ipers * UMA_FRITMREF_SZ;
+ else
+ totsize = sizeof(struct uma_slab) +
+ keg->uk_ipers * UMA_FRITM_SZ;
+
+ if (totsize & UMA_ALIGN_PTR)
+ totsize = (totsize & ~UMA_ALIGN_PTR) +
+ (UMA_ALIGN_PTR + 1);
+ keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
+ + keg->uk_ipers * UMA_FRITMREF_SZ;
+ else
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab)
+ + keg->uk_ipers * UMA_FRITM_SZ;
+
+ /*
+ * The only way the following is possible is if with our
+ * UMA_ALIGN_PTR adjustments we are now bigger than
+ * UMA_SLAB_SIZE. I haven't checked whether this is
+ * mathematically possible for all cases, so we make
+ * sure here anyway.
+ */
+ if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
+ printf("zone %s ipers %d rsize %d size %d\n",
+ zone->uz_name, keg->uk_ipers, keg->uk_rsize,
+ keg->uk_size);
+ panic("UMA slab won't fit.");
+ }
+ }
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ hash_alloc(&keg->uk_hash);
+
+#ifdef UMA_DEBUG
+ printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
+ zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
+ keg->uk_ipers, keg->uk_ppera,
+ (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
+#endif
+
+ LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
+
+ mtx_lock(&uma_mtx);
+ LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
+ mtx_unlock(&uma_mtx);
+ return (0);
+}
+
+/*
+ * Zone header ctor. This initializes all fields, locks, etc.
+ *
+ * Arguments/Returns follow uma_ctor specifications
+ * udata Actually uma_zctor_args
+ */
+static int
+zone_ctor(void *mem, int size, void *udata, int flags)
+{
+ struct uma_zctor_args *arg = udata;
+ uma_zone_t zone = mem;
+ uma_zone_t z;
+ uma_keg_t keg;
+
+ bzero(zone, size);
+ zone->uz_name = arg->name;
+ zone->uz_ctor = arg->ctor;
+ zone->uz_dtor = arg->dtor;
+ zone->uz_slab = zone_fetch_slab;
+ zone->uz_init = NULL;
+ zone->uz_fini = NULL;
+ zone->uz_allocs = 0;
+ zone->uz_frees = 0;
+ zone->uz_fails = 0;
+ zone->uz_fills = zone->uz_count = 0;
+ zone->uz_flags = 0;
+ keg = arg->keg;
+
+ if (arg->flags & UMA_ZONE_SECONDARY) {
+ KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
+ zone->uz_init = arg->uminit;
+ zone->uz_fini = arg->fini;
+ zone->uz_lock = &keg->uk_lock;
+ zone->uz_flags |= UMA_ZONE_SECONDARY;
+ mtx_lock(&uma_mtx);
+ ZONE_LOCK(zone);
+ LIST_FOREACH(z, &keg->uk_zones, uz_link) {
+ if (LIST_NEXT(z, uz_link) == NULL) {
+ LIST_INSERT_AFTER(z, zone, uz_link);
+ break;
+ }
+ }
+ ZONE_UNLOCK(zone);
+ mtx_unlock(&uma_mtx);
+ } else if (keg == NULL) {
+ if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
+ arg->align, arg->flags)) == NULL)
+ return (ENOMEM);
+ } else {
+ struct uma_kctor_args karg;
+ int error;
+
+ /* We should only be here from uma_startup() */
+ karg.size = arg->size;
+ karg.uminit = arg->uminit;
+ karg.fini = arg->fini;
+ karg.align = arg->align;
+ karg.flags = arg->flags;
+ karg.zone = zone;
+ error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
+ flags);
+ if (error)
+ return (error);
+ }
+ /*
+ * Link in the first keg.
+ */
+ zone->uz_klink.kl_keg = keg;
+ LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
+ zone->uz_lock = &keg->uk_lock;
+ zone->uz_size = keg->uk_size;
+ zone->uz_flags |= (keg->uk_flags &
+ (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
+
+ /*
+ * Some internal zones don't have room allocated for the per cpu
+ * caches. If we're internal, bail out here.
+ */
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
+ KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
+ ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
+ return (0);
+ }
+
+ if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
+ zone->uz_count = BUCKET_MAX;
+ else if (keg->uk_ipers <= BUCKET_MAX)
+ zone->uz_count = keg->uk_ipers;
+ else
+ zone->uz_count = BUCKET_MAX;
+ return (0);
+}
+
+/*
+ * Keg header dtor. This frees all data, destroys locks, frees the hash
+ * table and removes the keg from the global list.
+ *
+ * Arguments/Returns follow uma_dtor specifications
+ * udata unused
+ */
+static void
+keg_dtor(void *arg, int size, void *udata)
+{
+ uma_keg_t keg;
+
+ keg = (uma_keg_t)arg;
+ KEG_LOCK(keg);
+ if (keg->uk_free != 0) {
+ printf("Freed UMA keg was not empty (%d items). "
+ " Lost %d pages of memory.\n",
+ keg->uk_free, keg->uk_pages);
+ }
+ KEG_UNLOCK(keg);
+
+ hash_free(&keg->uk_hash);
+
+ KEG_LOCK_FINI(keg);
+}
+
+/*
+ * Zone header dtor.
+ *
+ * Arguments/Returns follow uma_dtor specifications
+ * udata unused
+ */
+static void
+zone_dtor(void *arg, int size, void *udata)
+{
+ uma_klink_t klink;
+ uma_zone_t zone;
+ uma_keg_t keg;
+
+ zone = (uma_zone_t)arg;
+ keg = zone_first_keg(zone);
+
+ if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
+ cache_drain(zone);
+
+ mtx_lock(&uma_mtx);
+ LIST_REMOVE(zone, uz_link);
+ mtx_unlock(&uma_mtx);
+ /*
+ * XXX there are some races here where
+ * the zone can be drained but zone lock
+ * released and then refilled before we
+ * remove it... we dont care for now
+ */
+ zone_drain_wait(zone, M_WAITOK);
+ /*
+ * Unlink all of our kegs.
+ */
+ while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
+ klink->kl_keg = NULL;
+ LIST_REMOVE(klink, kl_link);
+ if (klink == &zone->uz_klink)
+ continue;
+ free(klink, M_TEMP);
+ }
+ /*
+ * We only destroy kegs from non secondary zones.
+ */
+ if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
+ mtx_lock(&uma_mtx);
+ LIST_REMOVE(keg, uk_link);
+ mtx_unlock(&uma_mtx);
+ zone_free_item(kegs, keg, NULL, SKIP_NONE,
+ ZFREE_STATFREE);
+ }
+}
+
+/*
+ * Traverses every zone in the system and calls a callback
+ *
+ * Arguments:
+ * zfunc A pointer to a function which accepts a zone
+ * as an argument.
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+zone_foreach(void (*zfunc)(uma_zone_t))
+{
+ uma_keg_t keg;
+ uma_zone_t zone;
+
+ mtx_lock(&uma_mtx);
+ LIST_FOREACH(keg, &uma_kegs, uk_link) {
+ LIST_FOREACH(zone, &keg->uk_zones, uz_link)
+ zfunc(zone);
+ }
+ mtx_unlock(&uma_mtx);
+}
+
+/* Public functions */
+/* See uma.h */
+void
+uma_startup(void *bootmem, int boot_pages)
+{
+ struct uma_zctor_args args;
+ uma_slab_t slab;
+ u_int slabsize;
+ u_int objsize, totsize, wsize;
+ int i;
+
+#ifdef UMA_DEBUG
+ printf("Creating uma keg headers zone and keg.\n");
+#endif
+ mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
+
+ /*
+ * Figure out the maximum number of items-per-slab we'll have if
+ * we're using the OFFPAGE slab header to track free items, given
+ * all possible object sizes and the maximum desired wastage
+ * (UMA_MAX_WASTE).
+ *
+ * We iterate until we find an object size for
+ * which the calculated wastage in keg_small_init() will be
+ * enough to warrant OFFPAGE. Since wastedspace versus objsize
+ * is an overall increasing see-saw function, we find the smallest
+ * objsize such that the wastage is always acceptable for objects
+ * with that objsize or smaller. Since a smaller objsize always
+ * generates a larger possible uma_max_ipers, we use this computed
+ * objsize to calculate the largest ipers possible. Since the
+ * ipers calculated for OFFPAGE slab headers is always larger than
+ * the ipers initially calculated in keg_small_init(), we use
+ * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
+ * obtain the maximum ipers possible for offpage slab headers.
+ *
+ * It should be noted that ipers versus objsize is an inversly
+ * proportional function which drops off rather quickly so as
+ * long as our UMA_MAX_WASTE is such that the objsize we calculate
+ * falls into the portion of the inverse relation AFTER the steep
+ * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
+ *
+ * Note that we have 8-bits (1 byte) to use as a freelist index
+ * inside the actual slab header itself and this is enough to
+ * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
+ * object with offpage slab header would have ipers =
+ * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
+ * 1 greater than what our byte-integer freelist index can
+ * accomodate, but we know that this situation never occurs as
+ * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
+ * that we need to go to offpage slab headers. Or, if we do,
+ * then we trap that condition below and panic in the INVARIANTS case.
+ */
+ wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
+ totsize = wsize;
+ objsize = UMA_SMALLEST_UNIT;
+ while (totsize >= wsize) {
+ totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
+ (objsize + UMA_FRITM_SZ);
+ totsize *= (UMA_FRITM_SZ + objsize);
+ objsize++;
+ }
+ if (objsize > UMA_SMALLEST_UNIT)
+ objsize--;
+ uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
+
+ wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
+ totsize = wsize;
+ objsize = UMA_SMALLEST_UNIT;
+ while (totsize >= wsize) {
+ totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
+ (objsize + UMA_FRITMREF_SZ);
+ totsize *= (UMA_FRITMREF_SZ + objsize);
+ objsize++;
+ }
+ if (objsize > UMA_SMALLEST_UNIT)
+ objsize--;
+ uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
+
+ KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
+ ("uma_startup: calculated uma_max_ipers values too large!"));
+
+#ifdef UMA_DEBUG
+ printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
+ printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
+ uma_max_ipers_ref);
+#endif
+
+ /* "manually" create the initial zone */
+ args.name = "UMA Kegs";
+ args.size = sizeof(struct uma_keg);
+ args.ctor = keg_ctor;
+ args.dtor = keg_dtor;
+ args.uminit = zero_init;
+ args.fini = NULL;
+ args.keg = &masterkeg;
+ args.align = 32 - 1;
+ args.flags = UMA_ZFLAG_INTERNAL;
+ /* The initial zone has no Per cpu queues so it's smaller */
+ zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
+
+#ifdef UMA_DEBUG
+ printf("Filling boot free list.\n");
+#endif
+ for (i = 0; i < boot_pages; i++) {
+ slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
+ slab->us_data = (u_int8_t *)slab;
+ slab->us_flags = UMA_SLAB_BOOT;
+ LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
+ }
+ mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
+
+#ifdef UMA_DEBUG
+ printf("Creating uma zone headers zone and keg.\n");
+#endif
+ args.name = "UMA Zones";
+ args.size = sizeof(struct uma_zone) +
+ (sizeof(struct uma_cache) * (mp_maxid + 1));
+ args.ctor = zone_ctor;
+ args.dtor = zone_dtor;
+ args.uminit = zero_init;
+ args.fini = NULL;
+ args.keg = NULL;
+ args.align = 32 - 1;
+ args.flags = UMA_ZFLAG_INTERNAL;
+ /* The initial zone has no Per cpu queues so it's smaller */
+ zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
+
+#ifdef UMA_DEBUG
+ printf("Initializing pcpu cache locks.\n");
+#endif
+#ifdef UMA_DEBUG
+ printf("Creating slab and hash zones.\n");
+#endif
+
+ /*
+ * This is the max number of free list items we'll have with
+ * offpage slabs.
+ */
+ slabsize = uma_max_ipers * UMA_FRITM_SZ;
+ slabsize += sizeof(struct uma_slab);
+
+ /* Now make a zone for slab headers */
+ slabzone = uma_zcreate("UMA Slabs",
+ slabsize,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
+
+ /*
+ * We also create a zone for the bigger slabs with reference
+ * counts in them, to accomodate UMA_ZONE_REFCNT zones.
+ */
+ slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
+ slabsize += sizeof(struct uma_slab_refcnt);
+ slabrefzone = uma_zcreate("UMA RCntSlabs",
+ slabsize,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR,
+ UMA_ZFLAG_INTERNAL);
+
+ hashzone = uma_zcreate("UMA Hash",
+ sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
+
+ bucket_init();
+
+#if defined(UMA_MD_SMALL_ALLOC) && !defined(UMA_MD_SMALL_ALLOC_NEEDS_VM)
+ booted = 1;
+#endif
+
+#ifdef UMA_DEBUG
+ printf("UMA startup complete.\n");
+#endif
+}
+
+static uma_keg_t
+uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
+ int align, u_int32_t flags)
+{
+ struct uma_kctor_args args;
+
+ args.size = size;
+ args.uminit = uminit;
+ args.fini = fini;
+ args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
+ args.flags = flags;
+ args.zone = zone;
+ return (zone_alloc_item(kegs, &args, M_WAITOK));
+}
+
+/* See uma.h */
+void
+uma_set_align(int align)
+{
+
+ if (align != UMA_ALIGN_CACHE)
+ uma_align_cache = align;
+}
+
+/* See uma.h */
+uma_zone_t
+uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
+ uma_init uminit, uma_fini fini, int align, u_int32_t flags)
+
+{
+ struct uma_zctor_args args;
+
+ /* This stuff is essential for the zone ctor */
+ args.name = name;
+ args.size = size;
+ args.ctor = ctor;
+ args.dtor = dtor;
+ args.uminit = uminit;
+ args.fini = fini;
+ args.align = align;
+ args.flags = flags;
+ args.keg = NULL;
+
+ return (zone_alloc_item(zones, &args, M_WAITOK));
+}
+
+/* See uma.h */
+uma_zone_t
+uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
+ uma_init zinit, uma_fini zfini, uma_zone_t master)
+{
+ struct uma_zctor_args args;
+ uma_keg_t keg;
+
+ keg = zone_first_keg(master);
+ args.name = name;
+ args.size = keg->uk_size;
+ args.ctor = ctor;
+ args.dtor = dtor;
+ args.uminit = zinit;
+ args.fini = zfini;
+ args.align = keg->uk_align;
+ args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
+ args.keg = keg;
+
+ /* XXX Attaches only one keg of potentially many. */
+ return (zone_alloc_item(zones, &args, M_WAITOK));
+}
+
+static void
+zone_lock_pair(uma_zone_t a, uma_zone_t b)
+{
+ if (a < b) {
+ ZONE_LOCK(a);
+ mtx_lock_flags(b->uz_lock, MTX_DUPOK);
+ } else {
+ ZONE_LOCK(b);
+ mtx_lock_flags(a->uz_lock, MTX_DUPOK);
+ }
+}
+
+static void
+zone_unlock_pair(uma_zone_t a, uma_zone_t b)
+{
+
+ ZONE_UNLOCK(a);
+ ZONE_UNLOCK(b);
+}
+
+
+/* See uma.h */
+void
+uma_zdestroy(uma_zone_t zone)
+{
+
+ zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
+}
+
+/* See uma.h */
+void *
+uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
+{
+ void *item;
+ uma_cache_t cache;
+ uma_bucket_t bucket;
+ int cpu;
+
+ /* This is the fast path allocation */
+#ifdef UMA_DEBUG_ALLOC_1
+ printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
+#endif
+ CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
+ zone->uz_name, flags);
+
+ if (flags & M_WAITOK) {
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
+ }
+
+ /*
+ * If possible, allocate from the per-CPU cache. There are two
+ * requirements for safe access to the per-CPU cache: (1) the thread
+ * accessing the cache must not be preempted or yield during access,
+ * and (2) the thread must not migrate CPUs without switching which
+ * cache it accesses. We rely on a critical section to prevent
+ * preemption and migration. We release the critical section in
+ * order to acquire the zone mutex if we are unable to allocate from
+ * the current cache; when we re-acquire the critical section, we
+ * must detect and handle migration if it has occurred.
+ */
+zalloc_restart:
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+
+zalloc_start:
+ bucket = cache->uc_allocbucket;
+
+ if (bucket) {
+ if (bucket->ub_cnt > 0) {
+ bucket->ub_cnt--;
+ item = bucket->ub_bucket[bucket->ub_cnt];
+#ifdef INVARIANTS
+ bucket->ub_bucket[bucket->ub_cnt] = NULL;
+#endif
+ KASSERT(item != NULL,
+ ("uma_zalloc: Bucket pointer mangled."));
+ cache->uc_allocs++;
+ critical_exit();
+#ifdef INVARIANTS
+ ZONE_LOCK(zone);
+ uma_dbg_alloc(zone, NULL, item);
+ ZONE_UNLOCK(zone);
+#endif
+ if (zone->uz_ctor != NULL) {
+ if (zone->uz_ctor(item, zone->uz_size,
+ udata, flags) != 0) {
+ zone_free_item(zone, item, udata,
+ SKIP_DTOR, ZFREE_STATFAIL |
+ ZFREE_STATFREE);
+ return (NULL);
+ }
+ }
+ if (flags & M_ZERO)
+ bzero(item, zone->uz_size);
+ return (item);
+ } else if (cache->uc_freebucket) {
+ /*
+ * We have run out of items in our allocbucket.
+ * See if we can switch with our free bucket.
+ */
+ if (cache->uc_freebucket->ub_cnt > 0) {
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zalloc: Swapping empty with"
+ " alloc.\n");
+#endif
+ bucket = cache->uc_freebucket;
+ cache->uc_freebucket = cache->uc_allocbucket;
+ cache->uc_allocbucket = bucket;
+
+ goto zalloc_start;
+ }
+ }
+ }
+ /*
+ * Attempt to retrieve the item from the per-CPU cache has failed, so
+ * we must go back to the zone. This requires the zone lock, so we
+ * must drop the critical section, then re-acquire it when we go back
+ * to the cache. Since the critical section is released, we may be
+ * preempted or migrate. As such, make sure not to maintain any
+ * thread-local state specific to the cache from prior to releasing
+ * the critical section.
+ */
+ critical_exit();
+ ZONE_LOCK(zone);
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+ bucket = cache->uc_allocbucket;
+ if (bucket != NULL) {
+ if (bucket->ub_cnt > 0) {
+ ZONE_UNLOCK(zone);
+ goto zalloc_start;
+ }
+ bucket = cache->uc_freebucket;
+ if (bucket != NULL && bucket->ub_cnt > 0) {
+ ZONE_UNLOCK(zone);
+ goto zalloc_start;
+ }
+ }
+
+ /* Since we have locked the zone we may as well send back our stats */
+ zone->uz_allocs += cache->uc_allocs;
+ cache->uc_allocs = 0;
+ zone->uz_frees += cache->uc_frees;
+ cache->uc_frees = 0;
+
+ /* Our old one is now a free bucket */
+ if (cache->uc_allocbucket) {
+ KASSERT(cache->uc_allocbucket->ub_cnt == 0,
+ ("uma_zalloc_arg: Freeing a non free bucket."));
+ LIST_INSERT_HEAD(&zone->uz_free_bucket,
+ cache->uc_allocbucket, ub_link);
+ cache->uc_allocbucket = NULL;
+ }
+
+ /* Check the free list for a new alloc bucket */
+ if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
+ KASSERT(bucket->ub_cnt != 0,
+ ("uma_zalloc_arg: Returning an empty bucket."));
+
+ LIST_REMOVE(bucket, ub_link);
+ cache->uc_allocbucket = bucket;
+ ZONE_UNLOCK(zone);
+ goto zalloc_start;
+ }
+ /* We are no longer associated with this CPU. */
+ critical_exit();
+
+ /* Bump up our uz_count so we get here less */
+ if (zone->uz_count < BUCKET_MAX)
+ zone->uz_count++;
+
+ /*
+ * Now lets just fill a bucket and put it on the free list. If that
+ * works we'll restart the allocation from the begining.
+ */
+ if (zone_alloc_bucket(zone, flags)) {
+ ZONE_UNLOCK(zone);
+ goto zalloc_restart;
+ }
+ ZONE_UNLOCK(zone);
+ /*
+ * We may not be able to get a bucket so return an actual item.
+ */
+#ifdef UMA_DEBUG
+ printf("uma_zalloc_arg: Bucketzone returned NULL\n");
+#endif
+
+ item = zone_alloc_item(zone, udata, flags);
+ return (item);
+}
+
+static uma_slab_t
+keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
+{
+ uma_slab_t slab;
+
+ mtx_assert(&keg->uk_lock, MA_OWNED);
+ slab = NULL;
+
+ for (;;) {
+ /*
+ * Find a slab with some space. Prefer slabs that are partially
+ * used over those that are totally full. This helps to reduce
+ * fragmentation.
+ */
+ if (keg->uk_free != 0) {
+ if (!LIST_EMPTY(&keg->uk_part_slab)) {
+ slab = LIST_FIRST(&keg->uk_part_slab);
+ } else {
+ slab = LIST_FIRST(&keg->uk_free_slab);
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
+ us_link);
+ }
+ MPASS(slab->us_keg == keg);
+ return (slab);
+ }
+
+ /*
+ * M_NOVM means don't ask at all!
+ */
+ if (flags & M_NOVM)
+ break;
+
+ if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
+ keg->uk_flags |= UMA_ZFLAG_FULL;
+ /*
+ * If this is not a multi-zone, set the FULL bit.
+ * Otherwise slab_multi() takes care of it.
+ */
+ if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
+ zone->uz_flags |= UMA_ZFLAG_FULL;
+ if (flags & M_NOWAIT)
+ break;
+ msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
+ continue;
+ }
+ keg->uk_recurse++;
+ slab = keg_alloc_slab(keg, zone, flags);
+ keg->uk_recurse--;
+ /*
+ * If we got a slab here it's safe to mark it partially used
+ * and return. We assume that the caller is going to remove
+ * at least one item.
+ */
+ if (slab) {
+ MPASS(slab->us_keg == keg);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
+ return (slab);
+ }
+ /*
+ * We might not have been able to get a slab but another cpu
+ * could have while we were unlocked. Check again before we
+ * fail.
+ */
+ flags |= M_NOVM;
+ }
+ return (slab);
+}
+
+static inline void
+zone_relock(uma_zone_t zone, uma_keg_t keg)
+{
+ if (zone->uz_lock != &keg->uk_lock) {
+ KEG_UNLOCK(keg);
+ ZONE_LOCK(zone);
+ }
+}
+
+static inline void
+keg_relock(uma_keg_t keg, uma_zone_t zone)
+{
+ if (zone->uz_lock != &keg->uk_lock) {
+ ZONE_UNLOCK(zone);
+ KEG_LOCK(keg);
+ }
+}
+
+static uma_slab_t
+zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
+{
+ uma_slab_t slab;
+
+ if (keg == NULL)
+ keg = zone_first_keg(zone);
+ /*
+ * This is to prevent us from recursively trying to allocate
+ * buckets. The problem is that if an allocation forces us to
+ * grab a new bucket we will call page_alloc, which will go off
+ * and cause the vm to allocate vm_map_entries. If we need new
+ * buckets there too we will recurse in kmem_alloc and bad
+ * things happen. So instead we return a NULL bucket, and make
+ * the code that allocates buckets smart enough to deal with it
+ */
+ if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
+ return (NULL);
+
+ for (;;) {
+ slab = keg_fetch_slab(keg, zone, flags);
+ if (slab)
+ return (slab);
+ if (flags & (M_NOWAIT | M_NOVM))
+ break;
+ }
+ return (NULL);
+}
+
+/*
+ * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
+ * with the keg locked. Caller must call zone_relock() afterwards if the
+ * zone lock is required. On NULL the zone lock is held.
+ *
+ * The last pointer is used to seed the search. It is not required.
+ */
+static uma_slab_t
+zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
+{
+ uma_klink_t klink;
+ uma_slab_t slab;
+ uma_keg_t keg;
+ int flags;
+ int empty;
+ int full;
+
+ /*
+ * Don't wait on the first pass. This will skip limit tests
+ * as well. We don't want to block if we can find a provider
+ * without blocking.
+ */
+ flags = (rflags & ~M_WAITOK) | M_NOWAIT;
+ /*
+ * Use the last slab allocated as a hint for where to start
+ * the search.
+ */
+ if (last) {
+ slab = keg_fetch_slab(last, zone, flags);
+ if (slab)
+ return (slab);
+ zone_relock(zone, last);
+ last = NULL;
+ }
+ /*
+ * Loop until we have a slab incase of transient failures
+ * while M_WAITOK is specified. I'm not sure this is 100%
+ * required but we've done it for so long now.
+ */
+ for (;;) {
+ empty = 0;
+ full = 0;
+ /*
+ * Search the available kegs for slabs. Be careful to hold the
+ * correct lock while calling into the keg layer.
+ */
+ LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
+ keg = klink->kl_keg;
+ keg_relock(keg, zone);
+ if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
+ slab = keg_fetch_slab(keg, zone, flags);
+ if (slab)
+ return (slab);
+ }
+ if (keg->uk_flags & UMA_ZFLAG_FULL)
+ full++;
+ else
+ empty++;
+ zone_relock(zone, keg);
+ }
+ if (rflags & (M_NOWAIT | M_NOVM))
+ break;
+ flags = rflags;
+ /*
+ * All kegs are full. XXX We can't atomically check all kegs
+ * and sleep so just sleep for a short period and retry.
+ */
+ if (full && !empty) {
+ zone->uz_flags |= UMA_ZFLAG_FULL;
+ msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
+ zone->uz_flags &= ~UMA_ZFLAG_FULL;
+ continue;
+ }
+ }
+ return (NULL);
+}
+
+static void *
+slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
+{
+ uma_keg_t keg;
+ uma_slabrefcnt_t slabref;
+ void *item;
+ u_int8_t freei;
+
+ keg = slab->us_keg;
+ mtx_assert(&keg->uk_lock, MA_OWNED);
+
+ freei = slab->us_firstfree;
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ slab->us_firstfree = slabref->us_freelist[freei].us_item;
+ } else {
+ slab->us_firstfree = slab->us_freelist[freei].us_item;
+ }
+ item = slab->us_data + (keg->uk_rsize * freei);
+
+ slab->us_freecount--;
+ keg->uk_free--;
+#ifdef INVARIANTS
+ uma_dbg_alloc(zone, slab, item);
+#endif
+ /* Move this slab to the full list */
+ if (slab->us_freecount == 0) {
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
+ }
+
+ return (item);
+}
+
+static int
+zone_alloc_bucket(uma_zone_t zone, int flags)
+{
+ uma_bucket_t bucket;
+ uma_slab_t slab;
+ uma_keg_t keg;
+ int16_t saved;
+ int max, origflags = flags;
+
+ /*
+ * Try this zone's free list first so we don't allocate extra buckets.
+ */
+ if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
+ KASSERT(bucket->ub_cnt == 0,
+ ("zone_alloc_bucket: Bucket on free list is not empty."));
+ LIST_REMOVE(bucket, ub_link);
+ } else {
+ int bflags;
+
+ bflags = (flags & ~M_ZERO);
+ if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
+ bflags |= M_NOVM;
+
+ ZONE_UNLOCK(zone);
+ bucket = bucket_alloc(zone->uz_count, bflags);
+ ZONE_LOCK(zone);
+ }
+
+ if (bucket == NULL) {
+ return (0);
+ }
+
+#ifdef SMP
+ /*
+ * This code is here to limit the number of simultaneous bucket fills
+ * for any given zone to the number of per cpu caches in this zone. This
+ * is done so that we don't allocate more memory than we really need.
+ */
+ if (zone->uz_fills >= mp_ncpus)
+ goto done;
+
+#endif
+ zone->uz_fills++;
+
+ max = MIN(bucket->ub_entries, zone->uz_count);
+ /* Try to keep the buckets totally full */
+ saved = bucket->ub_cnt;
+ slab = NULL;
+ keg = NULL;
+ while (bucket->ub_cnt < max &&
+ (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
+ keg = slab->us_keg;
+ while (slab->us_freecount && bucket->ub_cnt < max) {
+ bucket->ub_bucket[bucket->ub_cnt++] =
+ slab_alloc_item(zone, slab);
+ }
+
+ /* Don't block on the next fill */
+ flags |= M_NOWAIT;
+ }
+ if (slab)
+ zone_relock(zone, keg);
+
+ /*
+ * We unlock here because we need to call the zone's init.
+ * It should be safe to unlock because the slab dealt with
+ * above is already on the appropriate list within the keg
+ * and the bucket we filled is not yet on any list, so we
+ * own it.
+ */
+ if (zone->uz_init != NULL) {
+ int i;
+
+ ZONE_UNLOCK(zone);
+ for (i = saved; i < bucket->ub_cnt; i++)
+ if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
+ origflags) != 0)
+ break;
+ /*
+ * If we couldn't initialize the whole bucket, put the
+ * rest back onto the freelist.
+ */
+ if (i != bucket->ub_cnt) {
+ int j;
+
+ for (j = i; j < bucket->ub_cnt; j++) {
+ zone_free_item(zone, bucket->ub_bucket[j],
+ NULL, SKIP_FINI, 0);
+#ifdef INVARIANTS
+ bucket->ub_bucket[j] = NULL;
+#endif
+ }
+ bucket->ub_cnt = i;
+ }
+ ZONE_LOCK(zone);
+ }
+
+ zone->uz_fills--;
+ if (bucket->ub_cnt != 0) {
+ LIST_INSERT_HEAD(&zone->uz_full_bucket,
+ bucket, ub_link);
+ return (1);
+ }
+#ifdef SMP
+done:
+#endif
+ bucket_free(bucket);
+
+ return (0);
+}
+/*
+ * Allocates an item for an internal zone
+ *
+ * Arguments
+ * zone The zone to alloc for.
+ * udata The data to be passed to the constructor.
+ * flags M_WAITOK, M_NOWAIT, M_ZERO.
+ *
+ * Returns
+ * NULL if there is no memory and M_NOWAIT is set
+ * An item if successful
+ */
+
+static void *
+zone_alloc_item(uma_zone_t zone, void *udata, int flags)
+{
+ uma_slab_t slab;
+ void *item;
+
+ item = NULL;
+
+#ifdef UMA_DEBUG_ALLOC
+ printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
+#endif
+ ZONE_LOCK(zone);
+
+ slab = zone->uz_slab(zone, NULL, flags);
+ if (slab == NULL) {
+ zone->uz_fails++;
+ ZONE_UNLOCK(zone);
+ return (NULL);
+ }
+
+ item = slab_alloc_item(zone, slab);
+
+ zone_relock(zone, slab->us_keg);
+ zone->uz_allocs++;
+ ZONE_UNLOCK(zone);
+
+ /*
+ * We have to call both the zone's init (not the keg's init)
+ * and the zone's ctor. This is because the item is going from
+ * a keg slab directly to the user, and the user is expecting it
+ * to be both zone-init'd as well as zone-ctor'd.
+ */
+ if (zone->uz_init != NULL) {
+ if (zone->uz_init(item, zone->uz_size, flags) != 0) {
+ zone_free_item(zone, item, udata, SKIP_FINI,
+ ZFREE_STATFAIL | ZFREE_STATFREE);
+ return (NULL);
+ }
+ }
+ if (zone->uz_ctor != NULL) {
+ if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
+ zone_free_item(zone, item, udata, SKIP_DTOR,
+ ZFREE_STATFAIL | ZFREE_STATFREE);
+ return (NULL);
+ }
+ }
+ if (flags & M_ZERO)
+ bzero(item, zone->uz_size);
+
+ return (item);
+}
+
+/* See uma.h */
+void
+uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
+{
+ uma_cache_t cache;
+ uma_bucket_t bucket;
+ int bflags;
+ int cpu;
+
+#ifdef UMA_DEBUG_ALLOC_1
+ printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
+#endif
+ CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
+ zone->uz_name);
+
+ /* uma_zfree(..., NULL) does nothing, to match free(9). */
+ if (item == NULL)
+ return;
+
+ if (zone->uz_dtor)
+ zone->uz_dtor(item, zone->uz_size, udata);
+
+#ifdef INVARIANTS
+ ZONE_LOCK(zone);
+ if (zone->uz_flags & UMA_ZONE_MALLOC)
+ uma_dbg_free(zone, udata, item);
+ else
+ uma_dbg_free(zone, NULL, item);
+ ZONE_UNLOCK(zone);
+#endif
+ /*
+ * The race here is acceptable. If we miss it we'll just have to wait
+ * a little longer for the limits to be reset.
+ */
+ if (zone->uz_flags & UMA_ZFLAG_FULL)
+ goto zfree_internal;
+
+ /*
+ * If possible, free to the per-CPU cache. There are two
+ * requirements for safe access to the per-CPU cache: (1) the thread
+ * accessing the cache must not be preempted or yield during access,
+ * and (2) the thread must not migrate CPUs without switching which
+ * cache it accesses. We rely on a critical section to prevent
+ * preemption and migration. We release the critical section in
+ * order to acquire the zone mutex if we are unable to free to the
+ * current cache; when we re-acquire the critical section, we must
+ * detect and handle migration if it has occurred.
+ */
+zfree_restart:
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+
+zfree_start:
+ bucket = cache->uc_freebucket;
+
+ if (bucket) {
+ /*
+ * Do we have room in our bucket? It is OK for this uz count
+ * check to be slightly out of sync.
+ */
+
+ if (bucket->ub_cnt < bucket->ub_entries) {
+ KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
+ ("uma_zfree: Freeing to non free bucket index."));
+ bucket->ub_bucket[bucket->ub_cnt] = item;
+ bucket->ub_cnt++;
+ cache->uc_frees++;
+ critical_exit();
+ return;
+ } else if (cache->uc_allocbucket) {
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zfree: Swapping buckets.\n");
+#endif
+ /*
+ * We have run out of space in our freebucket.
+ * See if we can switch with our alloc bucket.
+ */
+ if (cache->uc_allocbucket->ub_cnt <
+ cache->uc_freebucket->ub_cnt) {
+ bucket = cache->uc_freebucket;
+ cache->uc_freebucket = cache->uc_allocbucket;
+ cache->uc_allocbucket = bucket;
+ goto zfree_start;
+ }
+ }
+ }
+ /*
+ * We can get here for two reasons:
+ *
+ * 1) The buckets are NULL
+ * 2) The alloc and free buckets are both somewhat full.
+ *
+ * We must go back the zone, which requires acquiring the zone lock,
+ * which in turn means we must release and re-acquire the critical
+ * section. Since the critical section is released, we may be
+ * preempted or migrate. As such, make sure not to maintain any
+ * thread-local state specific to the cache from prior to releasing
+ * the critical section.
+ */
+ critical_exit();
+ ZONE_LOCK(zone);
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+ if (cache->uc_freebucket != NULL) {
+ if (cache->uc_freebucket->ub_cnt <
+ cache->uc_freebucket->ub_entries) {
+ ZONE_UNLOCK(zone);
+ goto zfree_start;
+ }
+ if (cache->uc_allocbucket != NULL &&
+ (cache->uc_allocbucket->ub_cnt <
+ cache->uc_freebucket->ub_cnt)) {
+ ZONE_UNLOCK(zone);
+ goto zfree_start;
+ }
+ }
+
+ /* Since we have locked the zone we may as well send back our stats */
+ zone->uz_allocs += cache->uc_allocs;
+ cache->uc_allocs = 0;
+ zone->uz_frees += cache->uc_frees;
+ cache->uc_frees = 0;
+
+ bucket = cache->uc_freebucket;
+ cache->uc_freebucket = NULL;
+
+ /* Can we throw this on the zone full list? */
+ if (bucket != NULL) {
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zfree: Putting old bucket on the free list.\n");
+#endif
+ /* ub_cnt is pointing to the last free item */
+ KASSERT(bucket->ub_cnt != 0,
+ ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
+ LIST_INSERT_HEAD(&zone->uz_full_bucket,
+ bucket, ub_link);
+ }
+ if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
+ LIST_REMOVE(bucket, ub_link);
+ ZONE_UNLOCK(zone);
+ cache->uc_freebucket = bucket;
+ goto zfree_start;
+ }
+ /* We are no longer associated with this CPU. */
+ critical_exit();
+
+ /* And the zone.. */
+ ZONE_UNLOCK(zone);
+
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zfree: Allocating new free bucket.\n");
+#endif
+ bflags = M_NOWAIT;
+
+ if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
+ bflags |= M_NOVM;
+ bucket = bucket_alloc(zone->uz_count, bflags);
+ if (bucket) {
+ ZONE_LOCK(zone);
+ LIST_INSERT_HEAD(&zone->uz_free_bucket,
+ bucket, ub_link);
+ ZONE_UNLOCK(zone);
+ goto zfree_restart;
+ }
+
+ /*
+ * If nothing else caught this, we'll just do an internal free.
+ */
+zfree_internal:
+ zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
+
+ return;
+}
+
+/*
+ * Frees an item to an INTERNAL zone or allocates a free bucket
+ *
+ * Arguments:
+ * zone The zone to free to
+ * item The item we're freeing
+ * udata User supplied data for the dtor
+ * skip Skip dtors and finis
+ */
+static void
+zone_free_item(uma_zone_t zone, void *item, void *udata,
+ enum zfreeskip skip, int flags)
+{
+ uma_slab_t slab;
+ uma_slabrefcnt_t slabref;
+ uma_keg_t keg;
+ u_int8_t *mem;
+ u_int8_t freei;
+ int clearfull;
+
+ if (skip < SKIP_DTOR && zone->uz_dtor)
+ zone->uz_dtor(item, zone->uz_size, udata);
+
+ if (skip < SKIP_FINI && zone->uz_fini)
+ zone->uz_fini(item, zone->uz_size);
+
+ ZONE_LOCK(zone);
+
+ if (flags & ZFREE_STATFAIL)
+ zone->uz_fails++;
+ if (flags & ZFREE_STATFREE)
+ zone->uz_frees++;
+
+ if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
+ mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
+ keg = zone_first_keg(zone); /* Must only be one. */
+ if (zone->uz_flags & UMA_ZONE_HASH) {
+ slab = hash_sfind(&keg->uk_hash, mem);
+ } else {
+ mem += keg->uk_pgoff;
+ slab = (uma_slab_t)mem;
+ }
+ } else {
+ panic("uma virtual memory not supported!" );
+ }
+ MPASS(keg == slab->us_keg);
+
+ /* Do we need to remove from any lists? */
+ if (slab->us_freecount+1 == keg->uk_ipers) {
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
+ } else if (slab->us_freecount == 0) {
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
+ }
+
+ /* Slab management stuff */
+ freei = ((unsigned long)item - (unsigned long)slab->us_data)
+ / keg->uk_rsize;
+
+#ifdef INVARIANTS
+ if (!skip)
+ uma_dbg_free(zone, slab, item);
+#endif
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ slabref->us_freelist[freei].us_item = slab->us_firstfree;
+ } else {
+ slab->us_freelist[freei].us_item = slab->us_firstfree;
+ }
+ slab->us_firstfree = freei;
+ slab->us_freecount++;
+
+ /* Zone statistics */
+ keg->uk_free++;
+
+ clearfull = 0;
+ if (keg->uk_flags & UMA_ZFLAG_FULL) {
+ if (keg->uk_pages < keg->uk_maxpages) {
+ keg->uk_flags &= ~UMA_ZFLAG_FULL;
+ clearfull = 1;
+ }
+
+ /*
+ * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
+ * wake up all procs blocked on pages. This should be uncommon, so
+ * keeping this simple for now (rather than adding count of blocked
+ * threads etc).
+ */
+ wakeup(keg);
+ }
+ if (clearfull) {
+ zone_relock(zone, keg);
+ zone->uz_flags &= ~UMA_ZFLAG_FULL;
+ wakeup(zone);
+ ZONE_UNLOCK(zone);
+ } else
+ KEG_UNLOCK(keg);
+}
+
+/* See uma.h */
+void
+uma_zone_set_max(uma_zone_t zone, int nitems)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
+ if (keg->uk_maxpages * keg->uk_ipers < nitems)
+ keg->uk_maxpages += keg->uk_ppera;
+
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+int
+uma_zone_get_max(uma_zone_t zone)
+{
+ int nitems;
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ nitems = keg->uk_maxpages * keg->uk_ipers;
+ ZONE_UNLOCK(zone);
+
+ return (nitems);
+}
+
+/* See uma.h */
+int
+uma_zone_get_cur(uma_zone_t zone)
+{
+ int64_t nitems;
+ u_int i;
+
+ ZONE_LOCK(zone);
+ nitems = zone->uz_allocs - zone->uz_frees;
+ CPU_FOREACH(i) {
+ /*
+ * See the comment in sysctl_vm_zone_stats() regarding the
+ * safety of accessing the per-cpu caches. With the zone lock
+ * held, it is safe, but can potentially result in stale data.
+ */
+ nitems += zone->uz_cpu[i].uc_allocs -
+ zone->uz_cpu[i].uc_frees;
+ }
+ ZONE_UNLOCK(zone);
+
+ return (nitems < 0 ? 0 : nitems);
+}
+
+/* See uma.h */
+void
+uma_zone_set_init(uma_zone_t zone, uma_init uminit)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ KASSERT(keg->uk_pages == 0,
+ ("uma_zone_set_init on non-empty keg"));
+ keg->uk_init = uminit;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ KASSERT(keg->uk_pages == 0,
+ ("uma_zone_set_fini on non-empty keg"));
+ keg->uk_fini = fini;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone_first_keg(zone)->uk_pages == 0,
+ ("uma_zone_set_zinit on non-empty keg"));
+ zone->uz_init = zinit;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone_first_keg(zone)->uk_pages == 0,
+ ("uma_zone_set_zfini on non-empty keg"));
+ zone->uz_fini = zfini;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+/* XXX uk_freef is not actually used with the zone locked */
+void
+uma_zone_set_freef(uma_zone_t zone, uma_free freef)
+{
+
+ ZONE_LOCK(zone);
+ zone_first_keg(zone)->uk_freef = freef;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+/* XXX uk_allocf is not actually used with the zone locked */
+void
+uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
+ keg->uk_allocf = allocf;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_prealloc(uma_zone_t zone, int items)
+{
+ int slabs;
+ uma_slab_t slab;
+ uma_keg_t keg;
+
+ keg = zone_first_keg(zone);
+ ZONE_LOCK(zone);
+ slabs = items / keg->uk_ipers;
+ if (slabs * keg->uk_ipers < items)
+ slabs++;
+ while (slabs > 0) {
+ slab = keg_alloc_slab(keg, zone, M_WAITOK);
+ if (slab == NULL)
+ break;
+ MPASS(slab->us_keg == keg);
+ LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
+ slabs--;
+ }
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_reclaim(void)
+{
+#ifdef UMA_DEBUG
+ printf("UMA: vm asked us to release pages!\n");
+#endif
+ zone_foreach(zone_drain);
+ /*
+ * Some slabs may have been freed but this zone will be visited early
+ * we visit again so that we can free pages that are empty once other
+ * zones are drained. We have to do the same for buckets.
+ */
+ zone_drain(slabzone);
+ zone_drain(slabrefzone);
+ bucket_zone_drain();
+}
+
+/* See uma.h */
+int
+uma_zone_exhausted(uma_zone_t zone)
+{
+ int full;
+
+ ZONE_LOCK(zone);
+ full = (zone->uz_flags & UMA_ZFLAG_FULL);
+ ZONE_UNLOCK(zone);
+ return (full);
+}
+
+int
+uma_zone_exhausted_nolock(uma_zone_t zone)
+{
+ return (zone->uz_flags & UMA_ZFLAG_FULL);
+}
+
+void *
+uma_large_malloc(int size, int wait)
+{
+ void *mem;
+ uma_slab_t slab;
+ u_int8_t flags;
+
+ slab = zone_alloc_item(slabzone, NULL, wait);
+ if (slab == NULL)
+ return (NULL);
+ mem = page_alloc(NULL, size, &flags, wait);
+ if (mem) {
+ slab->us_data = mem;
+ slab->us_flags = flags | UMA_SLAB_MALLOC;
+ slab->us_size = size;
+ } else {
+ zone_free_item(slabzone, slab, NULL, SKIP_NONE,
+ ZFREE_STATFAIL | ZFREE_STATFREE);
+ }
+
+ return (mem);
+}
+
+void
+uma_large_free(uma_slab_t slab)
+{
+ page_free(slab->us_data, slab->us_size, slab->us_flags);
+ zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
+}
+
+void
+uma_print_stats(void)
+{
+ zone_foreach(uma_print_zone);
+}
+
+static void
+slab_print(uma_slab_t slab)
+{
+ printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
+ slab->us_keg, slab->us_data, slab->us_freecount,
+ slab->us_firstfree);
+}
+
+static void
+cache_print(uma_cache_t cache)
+{
+ printf("alloc: %p(%d), free: %p(%d)\n",
+ cache->uc_allocbucket,
+ cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
+ cache->uc_freebucket,
+ cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
+}
+
+static void
+uma_print_keg(uma_keg_t keg)
+{
+ uma_slab_t slab;
+
+ printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
+ "out %d free %d limit %d\n",
+ keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
+ keg->uk_ipers, keg->uk_ppera,
+ (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
+ (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
+ printf("Part slabs:\n");
+ LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
+ slab_print(slab);
+ printf("Free slabs:\n");
+ LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
+ slab_print(slab);
+ printf("Full slabs:\n");
+ LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
+ slab_print(slab);
+}
+
+void
+uma_print_zone(uma_zone_t zone)
+{
+ uma_cache_t cache;
+ uma_klink_t kl;
+ int i;
+
+ printf("zone: %s(%p) size %d flags %d\n",
+ zone->uz_name, zone, zone->uz_size, zone->uz_flags);
+ LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
+ uma_print_keg(kl->kl_keg);
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ cache = &zone->uz_cpu[i];
+ printf("CPU %d Cache:\n", i);
+ cache_print(cache);
+ }
+}
+