summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@oarcorp.com>2012-03-08 15:54:05 -0600
committerJoel Sherrill <joel.sherrill@oarcorp.com>2012-03-08 15:54:05 -0600
commitb55081acae9f808116f6f05585d8fd0cde2191df (patch)
treee4a9dfc16c4aaf51cbc9c1b4f9d64ec936f92dc1
parentfreebsd-to-rtems.py - Only copy if new version of file is different (diff)
downloadrtems-libbsd-b55081acae9f808116f6f05585d8fd0cde2191df.tar.bz2
kern/kern_subr.c added for hashinit
More undefined symbols resulted.
-rw-r--r--Changes15
-rw-r--r--Makefile1
-rwxr-xr-xfreebsd-to-rtems.py16
-rw-r--r--freebsd/kern/kern_subr.c586
-rw-r--r--freebsd/sys/mutex.h6
5 files changed, 616 insertions, 8 deletions
diff --git a/Changes b/Changes
index 93ced5dd..4203902e 100644
--- a/Changes
+++ b/Changes
@@ -13,3 +13,18 @@
- Added argument for destination directory
- Support config.inc
- Fix for loop line too long on install stanza
+
++ kern_subr.c
+ - had to add #include <sys/uio.h>
+
++ RTEMS does not have
+ - any thread td_ structure elements
+ - sys/mutex.h - stubbed thread_lock and friends
+
++ Include Files to ifdef include out
+ #include <sys/vnode.h>
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_kern.h>
+ #include <vm/vm_page.h>
+
diff --git a/Makefile b/Makefile
index 46411544..3161b58e 100644
--- a/Makefile
+++ b/Makefile
@@ -271,6 +271,7 @@ C_FILES = \
freebsd/kern/init_main.c \
freebsd/kern/kern_mbuf.c \
freebsd/kern/kern_module.c \
+ freebsd/kern/kern_subr.c \
freebsd/kern/kern_sysctl.c \
freebsd/kern/subr_bus.c \
freebsd/kern/subr_kobj.c \
diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py
index 421b2444..2398e100 100755
--- a/freebsd-to-rtems.py
+++ b/freebsd-to-rtems.py
@@ -152,18 +152,22 @@ def mapContribPath(path):
# generate an empty file as a place holder
def installEmptyFile(src):
+ global tempFile
dst = RTEMS_DIR + '/' + PREFIX + '/' + src.replace('rtems/', '')
- if isVerbose == True:
- print "Install empty - " + dst
if isDryRun == True:
+ if isVerbose == True:
+ print "Install empty - " + dst
return
try:
os.makedirs(os.path.dirname(dst))
except OSError:
pass
- out = open(dst, 'w')
+ out = open(tempFile, 'w')
out.write('/* EMPTY */\n')
out.close()
+ if copyIfDifferent(tempFile, dst) == True:
+ if isVerbose == True:
+ print "Install empty - " + dst
# fix include paths inside a C or .h file
def fixIncludes(data):
@@ -981,6 +985,7 @@ devUsbBase.addSourceFiles(
#'kern/kern_mib.c',
'kern/kern_mbuf.c',
'kern/kern_module.c',
+ 'kern/kern_subr.c',
'kern/kern_sysctl.c',
'kern/subr_bus.c',
'kern/subr_kobj.c',
@@ -1657,20 +1662,16 @@ mm.addEmptyFiles(
'machine/elf.h',
'machine/sf_buf.h',
#'machine/vmparam.h',
- 'net/vnet.h',
'security/audit/audit.h',
- 'security/mac/mac_framework.h',
'sys/bio.h',
'sys/copyright.h',
'sys/cpuset.h',
- 'sys/errno.h',
'sys/exec.h',
'sys/fail.h',
'sys/limits.h',
'sys/namei.h',
'sys/_pthreadtypes.h',
#'sys/resourcevar.h',
- 'sys/sbuf.h',
'sys/sched.h',
'sys/select.h',
'sys/syscallsubr.h',
@@ -1678,7 +1679,6 @@ mm.addEmptyFiles(
'sys/syslimits.h',
'sys/sysproto.h',
'sys/stat.h',
- 'sys/taskqueue.h',
#'sys/time.h',
'time.h',
'sys/timespec.h',
diff --git a/freebsd/kern/kern_subr.c b/freebsd/kern/kern_subr.c
new file mode 100644
index 00000000..f32e8750
--- /dev/null
+++ b/freebsd/kern/kern_subr.c
@@ -0,0 +1,586 @@
+#include <freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
+ */
+
+#include <freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <freebsd/local/opt_zero.h>
+
+#include <freebsd/sys/param.h>
+#include <freebsd/sys/systm.h>
+#include <freebsd/sys/kernel.h>
+#include <freebsd/sys/ktr.h>
+#include <freebsd/sys/limits.h>
+#include <freebsd/sys/lock.h>
+#include <freebsd/sys/mutex.h>
+#include <freebsd/sys/proc.h>
+#include <freebsd/sys/malloc.h>
+#include <freebsd/sys/resourcevar.h>
+#include <freebsd/sys/sched.h>
+#include <freebsd/sys/sysctl.h>
+#ifndef __rtems__
+#include <freebsd/sys/vnode.h>
+#endif
+
+#ifndef __rtems__
+#include <freebsd/vm/vm.h>
+#include <freebsd/vm/vm_page.h>
+#include <freebsd/vm/vm_map.h>
+#endif
+#ifdef __rtems__
+#include <freebsd/sys/uio.h>
+#endif
+#ifdef ZERO_COPY_SOCKETS
+#include <freebsd/vm/vm_param.h>
+#include <freebsd/vm/vm_object.h>
+#endif
+
+SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
+ "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
+
+#ifdef ZERO_COPY_SOCKETS
+/* Declared in uipc_socket.c */
+extern int so_zero_copy_receive;
+
+/*
+ * Identify the physical page mapped at the given kernel virtual
+ * address. Insert this physical page into the given address space at
+ * the given virtual address, replacing the physical page, if any,
+ * that already exists there.
+ */
+static int
+vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
+{
+ vm_map_t map = mapa;
+ vm_page_t kern_pg, user_pg;
+ vm_object_t uobject;
+ vm_map_entry_t entry;
+ vm_pindex_t upindex;
+ vm_prot_t prot;
+ boolean_t wired;
+
+ KASSERT((uaddr & PAGE_MASK) == 0,
+ ("vm_pgmoveco: uaddr is not page aligned"));
+
+ /*
+ * Herein the physical page is validated and dirtied. It is
+ * unwired in sf_buf_mext().
+ */
+ kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
+ kern_pg->valid = VM_PAGE_BITS_ALL;
+ KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
+ ("vm_pgmoveco: kern_pg is not correctly wired"));
+
+ if ((vm_map_lookup(&map, uaddr,
+ VM_PROT_WRITE, &entry, &uobject,
+ &upindex, &prot, &wired)) != KERN_SUCCESS) {
+ return(EFAULT);
+ }
+ VM_OBJECT_LOCK(uobject);
+retry:
+ if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
+ if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
+ goto retry;
+ vm_page_lock_queues();
+ pmap_remove_all(user_pg);
+ vm_page_free(user_pg);
+ } else {
+ /*
+ * Even if a physical page does not exist in the
+ * object chain's first object, a physical page from a
+ * backing object may be mapped read only.
+ */
+ if (uobject->backing_object != NULL)
+ pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
+ vm_page_lock_queues();
+ }
+ vm_page_insert(kern_pg, uobject, upindex);
+ vm_page_dirty(kern_pg);
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(uobject);
+ vm_map_lookup_done(map, entry);
+ return(KERN_SUCCESS);
+}
+#endif /* ZERO_COPY_SOCKETS */
+
+int
+uiomove(void *cp, int n, struct uio *uio)
+{
+ struct thread *td = curthread;
+ struct iovec *iov;
+ u_int cnt;
+ int error = 0;
+ int save = 0;
+
+ KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+ ("uiomove: mode"));
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+ ("uiomove proc"));
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "Calling uiomove()");
+
+#ifndef __rtems__
+ save = td->td_pflags & TDP_DEADLKTREAT;
+ td->td_pflags |= TDP_DEADLKTREAT;
+#endif
+
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > n)
+ cnt = n;
+
+ switch (uio->uio_segflg) {
+
+ case UIO_USERSPACE:
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
+ uio_yield();
+ if (uio->uio_rw == UIO_READ)
+ error = copyout(cp, iov->iov_base, cnt);
+ else
+ error = copyin(iov->iov_base, cp, cnt);
+ if (error)
+ goto out;
+ break;
+
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ bcopy(cp, iov->iov_base, cnt);
+ else
+ bcopy(iov->iov_base, cp, cnt);
+ break;
+ case UIO_NOCOPY:
+ break;
+ }
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ cp = (char *)cp + cnt;
+ n -= cnt;
+ }
+out:
+#ifndef __rtems__
+ if (save == 0)
+ td->td_pflags &= ~TDP_DEADLKTREAT;
+#endif
+ return (error);
+}
+
+/*
+ * Wrapper for uiomove() that validates the arguments against a known-good
+ * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
+ * is almost definitely a bad thing, so we catch that here as well. We
+ * return a runtime failure, but it might be desirable to generate a runtime
+ * assertion failure instead.
+ */
+int
+uiomove_frombuf(void *buf, int buflen, struct uio *uio)
+{
+ unsigned int offset, n;
+
+ if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
+ (offset = uio->uio_offset) != uio->uio_offset)
+ return (EINVAL);
+ if (buflen <= 0 || offset >= buflen)
+ return (0);
+ if ((n = buflen - offset) > INT_MAX)
+ return (EINVAL);
+ return (uiomove((char *)buf + offset, n, uio));
+}
+
+#ifdef ZERO_COPY_SOCKETS
+/*
+ * Experimental support for zero-copy I/O
+ */
+static int
+userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
+{
+ struct iovec *iov;
+ int error;
+
+ iov = uio->uio_iov;
+ if (uio->uio_rw == UIO_READ) {
+ if ((so_zero_copy_receive != 0)
+ && ((cnt & PAGE_MASK) == 0)
+ && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
+ && ((uio->uio_offset & PAGE_MASK) == 0)
+ && ((((intptr_t) cp) & PAGE_MASK) == 0)
+ && (disposable != 0)) {
+ /* SOCKET: use page-trading */
+ /*
+ * We only want to call vm_pgmoveco() on
+ * disposeable pages, since it gives the
+ * kernel page to the userland process.
+ */
+ error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
+ (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
+
+ /*
+ * If we get an error back, attempt
+ * to use copyout() instead. The
+ * disposable page should be freed
+ * automatically if we weren't able to move
+ * it into userland.
+ */
+ if (error != 0)
+ error = copyout(cp, iov->iov_base, cnt);
+ } else {
+ error = copyout(cp, iov->iov_base, cnt);
+ }
+ } else {
+ error = copyin(iov->iov_base, cp, cnt);
+ }
+ return (error);
+}
+
+int
+uiomoveco(void *cp, int n, struct uio *uio, int disposable)
+{
+ struct iovec *iov;
+ u_int cnt;
+ int error;
+
+ KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+ ("uiomoveco: mode"));
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+ ("uiomoveco proc"));
+
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > n)
+ cnt = n;
+
+ switch (uio->uio_segflg) {
+
+ case UIO_USERSPACE:
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
+ uio_yield();
+
+ error = userspaceco(cp, cnt, uio, disposable);
+
+ if (error)
+ return (error);
+ break;
+
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ bcopy(cp, iov->iov_base, cnt);
+ else
+ bcopy(iov->iov_base, cp, cnt);
+ break;
+ case UIO_NOCOPY:
+ break;
+ }
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ cp = (char *)cp + cnt;
+ n -= cnt;
+ }
+ return (0);
+}
+#endif /* ZERO_COPY_SOCKETS */
+
+/*
+ * Give next character to user as result of read.
+ */
+int
+ureadc(int c, struct uio *uio)
+{
+ struct iovec *iov;
+ char *iov_base;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "Calling ureadc()");
+
+again:
+ if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
+ panic("ureadc");
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iovcnt--;
+ uio->uio_iov++;
+ goto again;
+ }
+ switch (uio->uio_segflg) {
+
+ case UIO_USERSPACE:
+ if (subyte(iov->iov_base, c) < 0)
+ return (EFAULT);
+ break;
+
+ case UIO_SYSSPACE:
+ iov_base = iov->iov_base;
+ *iov_base = c;
+ iov->iov_base = iov_base;
+ break;
+
+ case UIO_NOCOPY:
+ break;
+ }
+ iov->iov_base = (char *)iov->iov_base + 1;
+ iov->iov_len--;
+ uio->uio_resid--;
+ uio->uio_offset++;
+ return (0);
+}
+
+/*
+ * General routine to allocate a hash table with control of memory flags.
+ */
+void *
+hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
+ int flags)
+{
+ long hashsize;
+ LIST_HEAD(generic, generic) *hashtbl;
+ int i;
+
+ if (elements <= 0)
+ panic("hashinit: bad elements");
+
+ /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
+ KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
+ ("Bad flags (0x%x) passed to hashinit_flags", flags));
+
+ for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
+ continue;
+ hashsize >>= 1;
+
+ if (flags & HASH_NOWAIT)
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
+ type, M_NOWAIT);
+ else
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
+ type, M_WAITOK);
+
+ if (hashtbl != NULL) {
+ for (i = 0; i < hashsize; i++)
+ LIST_INIT(&hashtbl[i]);
+ *hashmask = hashsize - 1;
+ }
+ return (hashtbl);
+}
+
+/*
+ * Allocate and initialize a hash table with default flag: may sleep.
+ */
+void *
+hashinit(int elements, struct malloc_type *type, u_long *hashmask)
+{
+
+ return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
+}
+
+void
+hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
+{
+ LIST_HEAD(generic, generic) *hashtbl, *hp;
+
+ hashtbl = vhashtbl;
+ for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
+ if (!LIST_EMPTY(hp))
+ panic("hashdestroy: hash not empty");
+ free(hashtbl, type);
+}
+
+static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
+ 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
+ 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
+#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
+
+/*
+ * General routine to allocate a prime number sized hash table.
+ */
+void *
+phashinit(int elements, struct malloc_type *type, u_long *nentries)
+{
+ long hashsize;
+ LIST_HEAD(generic, generic) *hashtbl;
+ int i;
+
+ if (elements <= 0)
+ panic("phashinit: bad elements");
+ for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
+ i++;
+ if (i == NPRIMES)
+ break;
+ hashsize = primes[i];
+ }
+ hashsize = primes[i - 1];
+ hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
+ for (i = 0; i < hashsize; i++)
+ LIST_INIT(&hashtbl[i]);
+ *nentries = hashsize;
+ return (hashtbl);
+}
+
+void
+uio_yield(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ DROP_GIANT();
+ thread_lock(td);
+#ifndef __rtems__
+ sched_prio(td, td->td_user_pri);
+#endif
+ mi_switch(SW_INVOL | SWT_RELINQUISH, NULL);
+ thread_unlock(td);
+ PICKUP_GIANT();
+}
+
+int
+copyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
+ int seg)
+{
+ int error = 0;
+
+ switch (seg) {
+ case UIO_USERSPACE:
+ error = copyin(src, dst, len);
+ break;
+ case UIO_SYSSPACE:
+ bcopy(src, dst, len);
+ break;
+ default:
+ panic("copyinfrom: bad seg %d\n", seg);
+ }
+ return (error);
+}
+
+int
+copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
+ size_t * __restrict copied, int seg)
+{
+ int error = 0;
+
+ switch (seg) {
+ case UIO_USERSPACE:
+ error = copyinstr(src, dst, len, copied);
+ break;
+ case UIO_SYSSPACE:
+ error = copystr(src, dst, len, copied);
+ break;
+ default:
+ panic("copyinstrfrom: bad seg %d\n", seg);
+ }
+ return (error);
+}
+
+int
+copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
+{
+ u_int iovlen;
+
+ *iov = NULL;
+ if (iovcnt > UIO_MAXIOV)
+ return (error);
+ iovlen = iovcnt * sizeof (struct iovec);
+ *iov = malloc(iovlen, M_IOV, M_WAITOK);
+ error = copyin(iovp, *iov, iovlen);
+ if (error) {
+ free(*iov, M_IOV);
+ *iov = NULL;
+ }
+ return (error);
+}
+
+int
+copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
+{
+ struct iovec *iov;
+ struct uio *uio;
+ u_int iovlen;
+ int error, i;
+
+ *uiop = NULL;
+ if (iovcnt > UIO_MAXIOV)
+ return (EINVAL);
+ iovlen = iovcnt * sizeof (struct iovec);
+ uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
+ iov = (struct iovec *)(uio + 1);
+ error = copyin(iovp, iov, iovlen);
+ if (error) {
+ free(uio, M_IOV);
+ return (error);
+ }
+ uio->uio_iov = iov;
+ uio->uio_iovcnt = iovcnt;
+ uio->uio_segflg = UIO_USERSPACE;
+ uio->uio_offset = -1;
+ uio->uio_resid = 0;
+ for (i = 0; i < iovcnt; i++) {
+ if (iov->iov_len > INT_MAX - uio->uio_resid) {
+ free(uio, M_IOV);
+ return (EINVAL);
+ }
+ uio->uio_resid += iov->iov_len;
+ iov++;
+ }
+ *uiop = uio;
+ return (0);
+}
+
+struct uio *
+cloneuio(struct uio *uiop)
+{
+ struct uio *uio;
+ int iovlen;
+
+ iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
+ uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
+ *uio = *uiop;
+ uio->uio_iov = (struct iovec *)(uio + 1);
+ bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
+ return (uio);
+}
diff --git a/freebsd/sys/mutex.h b/freebsd/sys/mutex.h
index 22ffa6fb..434a1eaf 100644
--- a/freebsd/sys/mutex.h
+++ b/freebsd/sys/mutex.h
@@ -131,12 +131,18 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#endif
void _thread_lock_flags(struct thread *, int, const char *, int);
+#ifndef __rtems__
#define thread_lock(tdp) \
_thread_lock_flags((tdp), 0, __FILE__, __LINE__)
#define thread_lock_flags(tdp, opt) \
_thread_lock_flags((tdp), (opt), __FILE__, __LINE__)
#define thread_unlock(tdp) \
mtx_unlock_spin((tdp)->td_lock)
+#else
+#define thread_lock(tdp)
+#define thread_lock_flags(tdp, opt)
+#define thread_unlock(tdp)
+#endif
#define mtx_recurse lock_object.lo_data