summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-08-21 13:47:02 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-09-21 10:29:41 +0200
commitbcdce02d9bc8150e1d191ed5ca9da45b7604964a (patch)
tree3b2faf509db7672ee1fc98857736470be97e7ed8 /freebsd/sys/kern
parentUpdate to FreeBSD head 2018-04-01 (diff)
downloadrtems-libbsd-bcdce02d9bc8150e1d191ed5ca9da45b7604964a.tar.bz2
Update to FreeBSD head 2018-06-01
Git mirror commit fb63610a69b0eb7f69a201ba05c4c1a7a2739cf9. Update #3472.
Diffstat (limited to 'freebsd/sys/kern')
-rw-r--r--freebsd/sys/kern/init_main.c27
-rw-r--r--freebsd/sys/kern/kern_conf.c8
-rw-r--r--freebsd/sys/kern/kern_event.c7
-rw-r--r--freebsd/sys/kern/kern_intr.c580
-rw-r--r--freebsd/sys/kern/kern_linker.c8
-rw-r--r--freebsd/sys/kern/kern_mbuf.c200
-rw-r--r--freebsd/sys/kern/kern_mib.c6
-rw-r--r--freebsd/sys/kern/kern_module.c4
-rw-r--r--freebsd/sys/kern/kern_synch.c8
-rw-r--r--freebsd/sys/kern/kern_sysctl.c3
-rw-r--r--freebsd/sys/kern/subr_bus.c112
-rw-r--r--freebsd/sys/kern/subr_gtaskqueue.c1059
-rw-r--r--freebsd/sys/kern/subr_lock.c6
-rw-r--r--freebsd/sys/kern/subr_pcpu.c2
-rw-r--r--freebsd/sys/kern/subr_prf.c1
-rw-r--r--freebsd/sys/kern/subr_sleepqueue.c7
-rw-r--r--freebsd/sys/kern/subr_uio.c6
-rw-r--r--freebsd/sys/kern/sys_generic.c123
-rwxr-xr-xfreebsd/sys/kern/sys_pipe.c2
-rw-r--r--freebsd/sys/kern/tty.c1
-rw-r--r--freebsd/sys/kern/tty_inq.c2
-rw-r--r--freebsd/sys/kern/tty_outq.c2
-rw-r--r--freebsd/sys/kern/uipc_mbuf.c6
-rw-r--r--freebsd/sys/kern/uipc_sockbuf.c1
-rw-r--r--freebsd/sys/kern/uipc_socket.c1
-rw-r--r--freebsd/sys/kern/uipc_syscalls.c48
-rw-r--r--freebsd/sys/kern/uipc_usrreq.c615
27 files changed, 1862 insertions, 983 deletions
diff --git a/freebsd/sys/kern/init_main.c b/freebsd/sys/kern/init_main.c
index 86bc11fc..42afff5e 100644
--- a/freebsd/sys/kern/init_main.c
+++ b/freebsd/sys/kern/init_main.c
@@ -722,7 +722,9 @@ start_init(void *dummy)
vm_offset_t addr;
struct execve_args args;
int options, error;
- char *var, *path, *next, *s;
+ size_t pathlen;
+ char *var, *path;
+ char *free_init_path, *tmp_init_path;
char *ucp, **uap, *arg0, *arg1;
struct thread *td;
struct proc *p;
@@ -751,17 +753,12 @@ start_init(void *dummy)
strlcpy(init_path, var, sizeof(init_path));
freeenv(var);
}
+ free_init_path = tmp_init_path = strdup(init_path, M_TEMP);
- for (path = init_path; *path != '\0'; path = next) {
- while (*path == ':')
- path++;
- if (*path == '\0')
- break;
- for (next = path; *next != '\0' && *next != ':'; next++)
- /* nothing */ ;
+ while ((path = strsep(&tmp_init_path, ":")) != NULL) {
+ pathlen = strlen(path) + 1;
if (bootverbose)
- printf("start_init: trying %.*s\n", (int)(next - path),
- path);
+ printf("start_init: trying %s\n", path);
/*
* Move out the boot flag argument.
@@ -793,9 +790,8 @@ start_init(void *dummy)
/*
* Move out the file name (also arg 0).
*/
- (void)subyte(--ucp, 0);
- for (s = next - 1; s >= path; s--)
- (void)subyte(--ucp, *s);
+ ucp -= pathlen;
+ copyout(path, ucp, pathlen);
arg0 = ucp;
/*
@@ -821,13 +817,14 @@ start_init(void *dummy)
* to user mode as init!
*/
if ((error = sys_execve(td, &args)) == EJUSTRETURN) {
+ free(free_init_path, M_TEMP);
TSEXIT();
return;
}
if (error != ENOENT)
- printf("exec %.*s: error %d\n", (int)(next - path),
- path, error);
+ printf("exec %s: error %d\n", path, error);
}
+ free(free_init_path, M_TEMP);
printf("init: not found in path %s\n", init_path);
panic("no init");
}
diff --git a/freebsd/sys/kern/kern_conf.c b/freebsd/sys/kern/kern_conf.c
index f62e8e4d..8605cc43 100644
--- a/freebsd/sys/kern/kern_conf.c
+++ b/freebsd/sys/kern/kern_conf.c
@@ -902,11 +902,11 @@ make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode,
{
struct cdev *dev;
va_list ap;
- int res;
+ int res __unused;
va_start(ap, fmt);
res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt,
- ap);
+ ap);
va_end(ap);
KASSERT(res == 0 && dev != NULL,
("make_dev: failed make_dev_credv (error=%d)", res));
@@ -920,7 +920,7 @@ make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid,
{
struct cdev *dev;
va_list ap;
- int res;
+ int res __unused;
va_start(ap, fmt);
res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap);
@@ -1034,7 +1034,7 @@ make_dev_alias(struct cdev *pdev, const char *fmt, ...)
{
struct cdev *dev;
va_list ap;
- int res;
+ int res __unused;
va_start(ap, fmt);
res = make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap);
diff --git a/freebsd/sys/kern/kern_event.c b/freebsd/sys/kern/kern_event.c
index 905ef23c..33fca549 100644
--- a/freebsd/sys/kern/kern_event.c
+++ b/freebsd/sys/kern/kern_event.c
@@ -33,7 +33,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <rtems/bsd/local/opt_compat.h>
#include <rtems/bsd/local/opt_ktrace.h>
#include <rtems/bsd/local/opt_kqueue.h>
@@ -791,7 +790,7 @@ static void
filt_timerdetach(struct knote *kn)
{
struct kq_timer_cb_data *kc;
- unsigned int old;
+ unsigned int old __unused;
kc = kn->kn_ptr.p_v;
callout_drain(&kc->c);
@@ -1409,7 +1408,6 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int wa
struct file *fp;
struct knote *kn, *tkn;
struct knlist *knl;
- cap_rights_t rights;
int error, filt, event;
int haskqglobal, filedesc_unlock;
@@ -1445,8 +1443,7 @@ findkn:
if (kev->ident > INT_MAX)
error = EBADF;
else
- error = fget(td, kev->ident,
- cap_rights_init(&rights, CAP_EVENT), &fp);
+ error = fget(td, kev->ident, &cap_event_rights, &fp);
if (error)
goto done;
diff --git a/freebsd/sys/kern/kern_intr.c b/freebsd/sys/kern/kern_intr.c
index aa896467..8f6c2a6d 100644
--- a/freebsd/sys/kern/kern_intr.c
+++ b/freebsd/sys/kern/kern_intr.c
@@ -63,9 +63,6 @@ __FBSDID("$FreeBSD$");
#ifndef __rtems__
#include <machine/md_var.h>
#else /* __rtems__ */
- #ifdef INTR_FILTER
- #error INTR_FILTER is currently not suppported with RTEMS
- #endif
#include <machine/rtems-bsd-thread.h>
#define RTEMSBSD_SWI_WAKEUP_EVENT RTEMS_EVENT_31
#undef ticks
@@ -115,26 +112,13 @@ static struct mtx event_lock;
MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
static void intr_event_update(struct intr_event *ie);
-#ifdef INTR_FILTER
-static int intr_event_schedule_thread(struct intr_event *ie,
- struct intr_thread *ithd);
-static int intr_filter_loop(struct intr_event *ie,
- struct trapframe *frame, struct intr_thread **ithd);
-static struct intr_thread *ithread_create(const char *name,
- struct intr_handler *ih);
-#else
static int intr_event_schedule_thread(struct intr_event *ie);
static struct intr_thread *ithread_create(const char *name);
-#endif
#ifndef __rtems__
static void ithread_destroy(struct intr_thread *ithread);
#endif /* __rtems__ */
static void ithread_execute_handlers(struct proc *p,
struct intr_event *ie);
-#ifdef INTR_FILTER
-static void priv_ithread_execute_handler(struct proc *p,
- struct intr_handler *ih);
-#endif
static void ithread_loop(void *);
static void ithread_update(struct intr_thread *ithd);
#ifndef __rtems__
@@ -534,7 +518,6 @@ intr_event_destroy(struct intr_event *ie)
}
#endif /* __rtems__ */
-#ifndef INTR_FILTER
static struct intr_thread *
ithread_create(const char *name)
{
@@ -565,36 +548,6 @@ ithread_create(const char *name)
CTR2(KTR_INTR, "%s: created %s", __func__, name);
return (ithd);
}
-#else
-#ifndef __rtems__
-static struct intr_thread *
-ithread_create(const char *name, struct intr_handler *ih)
-{
-#ifdef __rtems__
- struct proc *intrproc;
-#endif /* __rtems__ */
- struct intr_thread *ithd;
- struct thread *td;
- int error;
-
- ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
-
- error = kproc_kthread_add(ithread_loop, ih, &intrproc,
- &td, RFSTOPPED | RFHIGHPID,
- 0, "intr", "%s", name);
- if (error)
- panic("kproc_create() failed with %d", error);
- thread_lock(td);
- sched_class(td, PRI_ITHD);
- TD_SET_IWAIT(td);
- thread_unlock(td);
- td->td_pflags |= TDP_ITHREAD;
- ithd->it_thread = td;
- CTR2(KTR_INTR, "%s: created %s", __func__, name);
- return (ithd);
-}
-#endif /* __rtems__ */
-#endif
#ifndef __rtems__
static void
@@ -614,7 +567,6 @@ ithread_destroy(struct intr_thread *ithread)
}
#endif /* __rtems__ */
-#ifndef INTR_FILTER
int
intr_event_add_handler(struct intr_event *ie, const char *name,
driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
@@ -688,92 +640,6 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
*cookiep = ih;
return (0);
}
-#else
-#ifndef __rtems__
-int
-intr_event_add_handler(struct intr_event *ie, const char *name,
- driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
- enum intr_type flags, void **cookiep)
-{
- struct intr_handler *ih, *temp_ih;
- struct intr_thread *it;
-
- if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
- return (EINVAL);
-
- /* Allocate and populate an interrupt handler structure. */
- ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
- ih->ih_filter = filter;
- ih->ih_handler = handler;
- ih->ih_argument = arg;
- strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
- ih->ih_event = ie;
- ih->ih_pri = pri;
- if (flags & INTR_EXCL)
- ih->ih_flags = IH_EXCLUSIVE;
- if (flags & INTR_MPSAFE)
- ih->ih_flags |= IH_MPSAFE;
- if (flags & INTR_ENTROPY)
- ih->ih_flags |= IH_ENTROPY;
-
- /* We can only have one exclusive handler in a event. */
- mtx_lock(&ie->ie_lock);
- if (!TAILQ_EMPTY(&ie->ie_handlers)) {
- if ((flags & INTR_EXCL) ||
- (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
- mtx_unlock(&ie->ie_lock);
- free(ih, M_ITHREAD);
- return (EINVAL);
- }
- }
-
- /* For filtered handlers, create a private ithread to run on. */
- if (filter != NULL && handler != NULL) {
- mtx_unlock(&ie->ie_lock);
- it = ithread_create("intr: newborn", ih);
- mtx_lock(&ie->ie_lock);
- it->it_event = ie;
- ih->ih_thread = it;
- ithread_update(it); /* XXX - do we really need this?!?!? */
- } else { /* Create the global per-event thread if we need one. */
- while (ie->ie_thread == NULL && handler != NULL) {
- if (ie->ie_flags & IE_ADDING_THREAD)
- msleep(ie, &ie->ie_lock, 0, "ithread", 0);
- else {
- ie->ie_flags |= IE_ADDING_THREAD;
- mtx_unlock(&ie->ie_lock);
- it = ithread_create("intr: newborn", ih);
- mtx_lock(&ie->ie_lock);
- ie->ie_flags &= ~IE_ADDING_THREAD;
- ie->ie_thread = it;
- it->it_event = ie;
- ithread_update(it);
- wakeup(ie);
- }
- }
- }
-
- /* Add the new handler to the event in priority order. */
- TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
- if (temp_ih->ih_pri > ih->ih_pri)
- break;
- }
- if (temp_ih == NULL)
- TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
- else
- TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
- intr_event_update(ie);
-
- CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
- ie->ie_name);
- mtx_unlock(&ie->ie_lock);
-
- if (cookiep != NULL)
- *cookiep = ih;
- return (0);
-}
-#endif /* __rtems__ */
-#endif
#ifndef __rtems__
/*
@@ -892,7 +758,6 @@ _intr_drain(int irq)
#endif /* __rtems__ */
-#ifndef INTR_FILTER
#ifndef __rtems__
int
intr_event_remove_handler(void *cookie)
@@ -997,9 +862,6 @@ intr_event_schedule_thread(struct intr_event *ie)
struct intr_thread *it;
struct thread *td;
struct thread *ctd;
-#ifndef __rtems__
- struct proc *p;
-#endif /* __rtems__ */
/*
* If no ithread or no handlers, then we have a stray interrupt.
@@ -1011,9 +873,6 @@ intr_event_schedule_thread(struct intr_event *ie)
ctd = curthread;
it = ie->ie_thread;
td = it->it_thread;
-#ifndef __rtems__
- p = td->td_proc;
-#endif /* __rtems__ */
/*
* If any of the handlers for this ithread claim to be good
@@ -1026,7 +885,7 @@ intr_event_schedule_thread(struct intr_event *ie)
}
#ifndef __rtems__
- KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
+ KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
#endif /* __rtems__ */
/*
@@ -1042,13 +901,13 @@ intr_event_schedule_thread(struct intr_event *ie)
thread_lock(td);
#ifndef __rtems__
if (TD_AWAITING_INTR(td)) {
- CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
+ CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
td->td_name);
TD_CLR_IWAIT(td);
sched_add(td, SRQ_INTR);
} else {
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
- __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
+ __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state);
}
#else /* __rtems__ */
/* Send event to wake the thread up.
@@ -1061,174 +920,6 @@ intr_event_schedule_thread(struct intr_event *ie)
return (0);
}
-#else
-#ifndef __rtems__
-int
-intr_event_remove_handler(void *cookie)
-{
- struct intr_handler *handler = (struct intr_handler *)cookie;
- struct intr_event *ie;
- struct intr_thread *it;
-#ifdef INVARIANTS
- struct intr_handler *ih;
-#endif
-#ifdef notyet
- int dead;
-#endif
-
- if (handler == NULL)
- return (EINVAL);
- ie = handler->ih_event;
- KASSERT(ie != NULL,
- ("interrupt handler \"%s\" has a NULL interrupt event",
- handler->ih_name));
- mtx_lock(&ie->ie_lock);
- CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
- ie->ie_name);
-#ifdef INVARIANTS
- TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
- if (ih == handler)
- goto ok;
- mtx_unlock(&ie->ie_lock);
- panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
- ih->ih_name, ie->ie_name);
-ok:
-#endif
- /*
- * If there are no ithreads (per event and per handler), then
- * just remove the handler and return.
- * XXX: Note that an INTR_FAST handler might be running on another CPU!
- */
- if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
- TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
- mtx_unlock(&ie->ie_lock);
- free(handler, M_ITHREAD);
- return (0);
- }
-
- /* Private or global ithread? */
- it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
- /*
- * If the interrupt thread is already running, then just mark this
- * handler as being dead and let the ithread do the actual removal.
- *
- * During a cold boot while cold is set, msleep() does not sleep,
- * so we have to remove the handler here rather than letting the
- * thread do it.
- */
- thread_lock(it->it_thread);
- if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
- handler->ih_flags |= IH_DEAD;
-
- /*
- * Ensure that the thread will process the handler list
- * again and remove this handler if it has already passed
- * it on the list.
- *
- * The release part of the following store ensures
- * that the update of ih_flags is ordered before the
- * it_need setting. See the comment before
- * atomic_cmpset_acq(&ithd->it_need, ...) operation in
- * the ithread_execute_handlers().
- */
- atomic_store_rel_int(&it->it_need, 1);
- } else
- TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
- thread_unlock(it->it_thread);
- while (handler->ih_flags & IH_DEAD)
- msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
- /*
- * At this point, the handler has been disconnected from the event,
- * so we can kill the private ithread if any.
- */
- if (handler->ih_thread) {
- ithread_destroy(handler->ih_thread);
- handler->ih_thread = NULL;
- }
- intr_event_update(ie);
-#ifdef notyet
- /*
- * XXX: This could be bad in the case of ppbus(8). Also, I think
- * this could lead to races of stale data when servicing an
- * interrupt.
- */
- dead = 1;
- TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
- if (handler != NULL) {
- dead = 0;
- break;
- }
- }
- if (dead) {
- ithread_destroy(ie->ie_thread);
- ie->ie_thread = NULL;
- }
-#endif
- mtx_unlock(&ie->ie_lock);
- free(handler, M_ITHREAD);
- return (0);
-}
-
-static int
-intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
-{
- struct intr_entropy entropy;
- struct thread *td;
- struct thread *ctd;
-#ifndef __rtems__
- struct proc *p;
-#endif /* __rtems__ */
-
- /*
- * If no ithread or no handlers, then we have a stray interrupt.
- */
- if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
- return (EINVAL);
-
- ctd = curthread;
- td = it->it_thread;
-#ifndef __rtems__
- p = td->td_proc;
-#endif /* __rtems__ */
-
- /*
- * If any of the handlers for this ithread claim to be good
- * sources of entropy, then gather some.
- */
- if (ie->ie_flags & IE_ENTROPY) {
- entropy.event = (uintptr_t)ie;
- entropy.td = ctd;
- random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
- }
-
- KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
-
- /*
- * Set it_need to tell the thread to keep running if it is already
- * running. Then, lock the thread and see if we actually need to
- * put it on the runqueue.
- *
- * Use store_rel to arrange that the store to ih_need in
- * swi_sched() is before the store to it_need and prepare for
- * transfer of this order to loads in the ithread.
- */
- atomic_store_rel_int(&it->it_need, 1);
- thread_lock(td);
- if (TD_AWAITING_INTR(td)) {
- CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
- td->td_name);
- TD_CLR_IWAIT(td);
- sched_add(td, SRQ_INTR);
- } else {
- CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
- __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
- }
- thread_unlock(td);
-
- return (0);
-}
-#endif /* __rtems__ */
-#endif
/*
* Allow interrupt event binding for software interrupt handlers -- a no-op,
@@ -1283,7 +974,7 @@ swi_sched(void *cookie, int flags)
struct intr_handler *ih = (struct intr_handler *)cookie;
struct intr_event *ie = ih->ih_event;
struct intr_entropy entropy;
- int error;
+ int error __unused;
CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
ih->ih_need);
@@ -1303,11 +994,7 @@ swi_sched(void *cookie, int flags)
#ifndef __rtems__
VM_CNT_INC(v_soft);
#endif /* __rtems__ */
-#ifdef INTR_FILTER
- error = intr_event_schedule_thread(ie, ie->ie_thread);
-#else
error = intr_event_schedule_thread(ie);
-#endif
KASSERT(error == 0, ("stray software interrupt"));
}
}
@@ -1326,38 +1013,6 @@ swi_remove(void *cookie)
return (intr_event_remove_handler(cookie));
}
-#ifdef INTR_FILTER
-static void
-priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
-{
- struct intr_event *ie;
-
- ie = ih->ih_event;
- /*
- * If this handler is marked for death, remove it from
- * the list of handlers and wake up the sleeper.
- */
- if (ih->ih_flags & IH_DEAD) {
- mtx_lock(&ie->ie_lock);
- TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
- ih->ih_flags &= ~IH_DEAD;
- wakeup(ih);
- mtx_unlock(&ie->ie_lock);
- return;
- }
-
- /* Execute this handler. */
- CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
- __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
- ih->ih_name, ih->ih_flags);
-
- if (!(ih->ih_flags & IH_MPSAFE))
- mtx_lock(&Giant);
- ih->ih_handler(ih->ih_argument);
- if (!(ih->ih_flags & IH_MPSAFE))
- mtx_unlock(&Giant);
-}
-#endif
#endif /* __rtems__ */
/*
@@ -1461,7 +1116,6 @@ ithread_execute_handlers(struct proc *p, struct intr_event *ie)
ie->ie_post_ithread(ie->ie_source);
}
-#ifndef INTR_FILTER
/*
* This is the main code for interrupt threads.
*/
@@ -1571,7 +1225,7 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
struct intr_handler *ih;
struct trapframe *oldframe;
struct thread *td;
- int error, ret, thread;
+ int ret, thread;
td = curthread;
@@ -1644,233 +1298,15 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
/* Schedule the ithread if needed. */
if (thread) {
- error = intr_event_schedule_thread(ie);
- KASSERT(error == 0, ("bad stray interrupt"));
- }
- critical_exit();
- td->td_intr_nesting_level--;
- return (0);
-}
-#endif /* __rtems__ */
-#else
-#ifndef __rtems__
-/*
- * This is the main code for interrupt threads.
- */
-static void
-ithread_loop(void *arg)
-{
- struct intr_thread *ithd;
- struct intr_handler *ih;
- struct intr_event *ie;
- struct thread *td;
- struct proc *p;
- int priv;
- int wake;
-
- td = curthread;
- p = td->td_proc;
- ih = (struct intr_handler *)arg;
- priv = (ih->ih_thread != NULL) ? 1 : 0;
- ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
- KASSERT(ithd->it_thread == td,
- ("%s: ithread and proc linkage out of sync", __func__));
- ie = ithd->it_event;
- ie->ie_count = 0;
- wake = 0;
-
- /*
- * As long as we have interrupts outstanding, go through the
- * list of handlers, giving each one a go at it.
- */
- for (;;) {
- /*
- * If we are an orphaned thread, then just die.
- */
- if (ithd->it_flags & IT_DEAD) {
- CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
- p->p_pid, td->td_name);
- free(ithd, M_ITHREAD);
- kthread_exit();
- }
+ int error __unused;
- /*
- * Service interrupts. If another interrupt arrives while
- * we are running, it will set it_need to note that we
- * should make another pass.
- *
- * The load_acq part of the following cmpset ensures
- * that the load of ih_need in ithread_execute_handlers()
- * is ordered after the load of it_need here.
- */
- while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
- if (priv)
- priv_ithread_execute_handler(p, ih);
- else
- ithread_execute_handlers(p, ie);
- }
- WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
- mtx_assert(&Giant, MA_NOTOWNED);
-
- /*
- * Processed all our interrupts. Now get the sched
- * lock. This may take a while and it_need may get
- * set again, so we have to check it again.
- */
- thread_lock(td);
- if (atomic_load_acq_int(&ithd->it_need) == 0 &&
- (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
- TD_SET_IWAIT(td);
- ie->ie_count = 0;
- mi_switch(SW_VOL | SWT_IWAIT, NULL);
- }
- if (ithd->it_flags & IT_WAIT) {
- wake = 1;
- ithd->it_flags &= ~IT_WAIT;
- }
- thread_unlock(td);
- if (wake) {
- wakeup(ithd);
- wake = 0;
- }
- }
-}
-
-/*
- * Main loop for interrupt filter.
- *
- * Some architectures (i386, amd64 and arm) require the optional frame
- * parameter, and use it as the main argument for fast handler execution
- * when ih_argument == NULL.
- *
- * Return value:
- * o FILTER_STRAY: No filter recognized the event, and no
- * filter-less handler is registered on this
- * line.
- * o FILTER_HANDLED: A filter claimed the event and served it.
- * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at
- * least one filter-less handler on this line.
- * o FILTER_HANDLED |
- * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for
- * scheduling the per-handler ithread.
- *
- * In case an ithread has to be scheduled, in *ithd there will be a
- * pointer to a struct intr_thread containing the thread to be
- * scheduled.
- */
-
-static int
-intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
- struct intr_thread **ithd)
-{
- struct intr_handler *ih;
- void *arg;
- int ret, thread_only;
-
- ret = 0;
- thread_only = 0;
- TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
- /*
- * Execute fast interrupt handlers directly.
- * To support clock handlers, if a handler registers
- * with a NULL argument, then we pass it a pointer to
- * a trapframe as its argument.
- */
- arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
-
- CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
- ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
-
- if (ih->ih_filter != NULL)
- ret = ih->ih_filter(arg);
- else {
- thread_only = 1;
- continue;
- }
- KASSERT(ret == FILTER_STRAY ||
- ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
- (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
- ("%s: incorrect return value %#x from %s", __func__, ret,
- ih->ih_name));
- if (ret & FILTER_STRAY)
- continue;
- else {
- *ithd = ih->ih_thread;
- return (ret);
- }
- }
-
- /*
- * No filters handled the interrupt and we have at least
- * one handler without a filter. In this case, we schedule
- * all of the filter-less handlers to run in the ithread.
- */
- if (thread_only) {
- *ithd = ie->ie_thread;
- return (FILTER_SCHEDULE_THREAD);
- }
- return (FILTER_STRAY);
-}
-
-/*
- * Main interrupt handling body.
- *
- * Input:
- * o ie: the event connected to this interrupt.
- * o frame: some archs (i.e. i386) pass a frame to some.
- * handlers as their main argument.
- * Return value:
- * o 0: everything ok.
- * o EINVAL: stray interrupt.
- */
-int
-intr_event_handle(struct intr_event *ie, struct trapframe *frame)
-{
- struct intr_thread *ithd;
- struct trapframe *oldframe;
- struct thread *td;
- int thread;
-
- ithd = NULL;
- td = curthread;
-
- if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
- return (EINVAL);
-
- td->td_intr_nesting_level++;
- thread = 0;
- critical_enter();
- oldframe = td->td_intr_frame;
- td->td_intr_frame = frame;
- thread = intr_filter_loop(ie, frame, &ithd);
- if (thread & FILTER_HANDLED) {
- if (ie->ie_post_filter != NULL)
- ie->ie_post_filter(ie->ie_source);
- } else {
- if (ie->ie_pre_ithread != NULL)
- ie->ie_pre_ithread(ie->ie_source);
+ error = intr_event_schedule_thread(ie);
+ KASSERT(error == 0, ("bad stray interrupt"));
}
- td->td_intr_frame = oldframe;
critical_exit();
-
- /* Interrupt storm logic */
- if (thread & FILTER_STRAY) {
- ie->ie_count++;
- if (ie->ie_count < intr_storm_threshold)
- printf("Interrupt stray detection not present\n");
- }
-
- /* Schedule an ithread if needed. */
- if (thread & FILTER_SCHEDULE_THREAD) {
- if (intr_event_schedule_thread(ie, ithd) != 0)
- panic("%s: impossible stray interrupt", __func__);
- }
td->td_intr_nesting_level--;
return (0);
}
-#endif /* __rtems__ */
-#endif
-#ifndef __rtems__
#ifdef DDB
/*
diff --git a/freebsd/sys/kern/kern_linker.c b/freebsd/sys/kern/kern_linker.c
index c19071a6..197ee5bb 100644
--- a/freebsd/sys/kern/kern_linker.c
+++ b/freebsd/sys/kern/kern_linker.c
@@ -168,7 +168,7 @@ linker_init(void *arg)
TAILQ_INIT(&linker_files);
}
-SYSINIT(linker, SI_SUB_KLD, SI_ORDER_FIRST, linker_init, 0);
+SYSINIT(linker, SI_SUB_KLD, SI_ORDER_FIRST, linker_init, NULL);
static void
linker_stop_class_add(void *arg)
@@ -426,7 +426,7 @@ linker_init_kernel_modules(void)
}
SYSINIT(linker_kernel, SI_SUB_KLD, SI_ORDER_ANY, linker_init_kernel_modules,
- 0);
+ NULL);
#ifndef __rtems__
static int
@@ -1700,7 +1700,7 @@ fail:
/* woohoo! we made it! */
}
-SYSINIT(preload, SI_SUB_KLD, SI_ORDER_MIDDLE, linker_preload, 0);
+SYSINIT(preload, SI_SUB_KLD, SI_ORDER_MIDDLE, linker_preload, NULL);
/*
* Handle preload files that failed to load any modules.
@@ -1735,7 +1735,7 @@ linker_preload_finish(void *arg)
* becomes runnable in SI_SUB_KTHREAD_INIT, so go slightly before that.
*/
SYSINIT(preload_finish, SI_SUB_KTHREAD_INIT - 100, SI_ORDER_MIDDLE,
- linker_preload_finish, 0);
+ linker_preload_finish, NULL);
/*
* Search for a not-loaded module by name.
diff --git a/freebsd/sys/kern/kern_mbuf.c b/freebsd/sys/kern/kern_mbuf.c
index 78e3528f..13467dfe 100644
--- a/freebsd/sys/kern/kern_mbuf.c
+++ b/freebsd/sys/kern/kern_mbuf.c
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/domain.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
+#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/protosw.h>
@@ -393,6 +394,199 @@ mbuf_init(void *dummy)
}
SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
+#ifdef NETDUMP
+/*
+ * netdump makes use of a pre-allocated pool of mbufs and clusters. When
+ * netdump is configured, we initialize a set of UMA cache zones which return
+ * items from this pool. At panic-time, the regular UMA zone pointers are
+ * overwritten with those of the cache zones so that drivers may allocate and
+ * free mbufs and clusters without attempting to allocate physical memory.
+ *
+ * We keep mbufs and clusters in a pair of mbuf queues. In particular, for
+ * the purpose of caching clusters, we treat them as mbufs.
+ */
+static struct mbufq nd_mbufq =
+ { STAILQ_HEAD_INITIALIZER(nd_mbufq.mq_head), 0, INT_MAX };
+static struct mbufq nd_clustq =
+ { STAILQ_HEAD_INITIALIZER(nd_clustq.mq_head), 0, INT_MAX };
+
+static int nd_clsize;
+static uma_zone_t nd_zone_mbuf;
+static uma_zone_t nd_zone_clust;
+static uma_zone_t nd_zone_pack;
+
+static int
+nd_buf_import(void *arg, void **store, int count, int domain __unused,
+ int flags)
+{
+ struct mbufq *q;
+ struct mbuf *m;
+ int i;
+
+ q = arg;
+
+ for (i = 0; i < count; i++) {
+ m = mbufq_dequeue(q);
+ if (m == NULL)
+ break;
+ trash_init(m, q == &nd_mbufq ? MSIZE : nd_clsize, flags);
+ store[i] = m;
+ }
+ return (i);
+}
+
+static void
+nd_buf_release(void *arg, void **store, int count)
+{
+ struct mbufq *q;
+ struct mbuf *m;
+ int i;
+
+ q = arg;
+
+ for (i = 0; i < count; i++) {
+ m = store[i];
+ (void)mbufq_enqueue(q, m);
+ }
+}
+
+static int
+nd_pack_import(void *arg __unused, void **store, int count, int domain __unused,
+ int flags __unused)
+{
+ struct mbuf *m;
+ void *clust;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ m = m_get(MT_DATA, M_NOWAIT);
+ if (m == NULL)
+ break;
+ clust = uma_zalloc(nd_zone_clust, M_NOWAIT);
+ if (clust == NULL) {
+ m_free(m);
+ break;
+ }
+ mb_ctor_clust(clust, nd_clsize, m, 0);
+ store[i] = m;
+ }
+ return (i);
+}
+
+static void
+nd_pack_release(void *arg __unused, void **store, int count)
+{
+ struct mbuf *m;
+ void *clust;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ m = store[i];
+ clust = m->m_ext.ext_buf;
+ uma_zfree(nd_zone_clust, clust);
+ uma_zfree(nd_zone_mbuf, m);
+ }
+}
+
+/*
+ * Free the pre-allocated mbufs and clusters reserved for netdump, and destroy
+ * the corresponding UMA cache zones.
+ */
+void
+netdump_mbuf_drain(void)
+{
+ struct mbuf *m;
+ void *item;
+
+ if (nd_zone_mbuf != NULL) {
+ uma_zdestroy(nd_zone_mbuf);
+ nd_zone_mbuf = NULL;
+ }
+ if (nd_zone_clust != NULL) {
+ uma_zdestroy(nd_zone_clust);
+ nd_zone_clust = NULL;
+ }
+ if (nd_zone_pack != NULL) {
+ uma_zdestroy(nd_zone_pack);
+ nd_zone_pack = NULL;
+ }
+
+ while ((m = mbufq_dequeue(&nd_mbufq)) != NULL)
+ m_free(m);
+ while ((item = mbufq_dequeue(&nd_clustq)) != NULL)
+ uma_zfree(m_getzone(nd_clsize), item);
+}
+
+/*
+ * Callback invoked immediately prior to starting a netdump.
+ */
+void
+netdump_mbuf_dump(void)
+{
+
+ /*
+ * All cluster zones return buffers of the size requested by the
+ * drivers. It's up to the driver to reinitialize the zones if the
+ * MTU of a netdump-enabled interface changes.
+ */
+ printf("netdump: overwriting mbuf zone pointers\n");
+ zone_mbuf = nd_zone_mbuf;
+ zone_clust = nd_zone_clust;
+ zone_pack = nd_zone_pack;
+ zone_jumbop = nd_zone_clust;
+ zone_jumbo9 = nd_zone_clust;
+ zone_jumbo16 = nd_zone_clust;
+}
+
+/*
+ * Reinitialize the netdump mbuf+cluster pool and cache zones.
+ */
+void
+netdump_mbuf_reinit(int nmbuf, int nclust, int clsize)
+{
+ struct mbuf *m;
+ void *item;
+
+ netdump_mbuf_drain();
+
+ nd_clsize = clsize;
+
+ nd_zone_mbuf = uma_zcache_create("netdump_" MBUF_MEM_NAME,
+ MSIZE, mb_ctor_mbuf, mb_dtor_mbuf,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ nd_buf_import, nd_buf_release,
+ &nd_mbufq, UMA_ZONE_NOBUCKET);
+
+ nd_zone_clust = uma_zcache_create("netdump_" MBUF_CLUSTER_MEM_NAME,
+ clsize, mb_ctor_clust,
+#ifdef INVARIANTS
+ trash_dtor, trash_init, trash_fini,
+#else
+ NULL, NULL, NULL,
+#endif
+ nd_buf_import, nd_buf_release,
+ &nd_clustq, UMA_ZONE_NOBUCKET);
+
+ nd_zone_pack = uma_zcache_create("netdump_" MBUF_PACKET_MEM_NAME,
+ MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL,
+ nd_pack_import, nd_pack_release,
+ NULL, UMA_ZONE_NOBUCKET);
+
+ while (nmbuf-- > 0) {
+ m = m_get(MT_DATA, M_WAITOK);
+ uma_zfree(nd_zone_mbuf, m);
+ }
+ while (nclust-- > 0) {
+ item = uma_zalloc(m_getzone(nd_clsize), M_WAITOK);
+ uma_zfree(nd_zone_clust, item);
+ }
+}
+#endif /* NETDUMP */
+
/*
* UMA backend page allocator for the jumbo frame zones.
*
@@ -702,18 +896,18 @@ mb_free_ext(struct mbuf *m)
case EXT_MOD_TYPE:
case EXT_DISPOSABLE:
KASSERT(mref->m_ext.ext_free != NULL,
- ("%s: ext_free not set", __func__));
+ ("%s: ext_free not set", __func__));
mref->m_ext.ext_free(mref);
uma_zfree(zone_mbuf, mref);
break;
case EXT_EXTREF:
KASSERT(m->m_ext.ext_free != NULL,
- ("%s: ext_free not set", __func__));
+ ("%s: ext_free not set", __func__));
m->m_ext.ext_free(m);
break;
default:
KASSERT(m->m_ext.ext_type == 0,
- ("%s: unknown ext_type", __func__));
+ ("%s: unknown ext_type", __func__));
}
}
diff --git a/freebsd/sys/kern/kern_mib.c b/freebsd/sys/kern/kern_mib.c
index 3fd48334..cacf497d 100644
--- a/freebsd/sys/kern/kern_mib.c
+++ b/freebsd/sys/kern/kern_mib.c
@@ -42,7 +42,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <rtems/bsd/local/opt_compat.h>
#include <rtems/bsd/local/opt_posix.h>
#include <rtems/bsd/local/opt_config.h>
@@ -100,11 +99,6 @@ SYSCTL_ROOT_NODE(OID_AUTO, regression, CTLFLAG_RW, 0,
"Regression test MIB");
#endif
-#ifdef EXT_RESOURCES
-SYSCTL_ROOT_NODE(OID_AUTO, clock, CTLFLAG_RW, 0,
- "Clocks");
-#endif
-
SYSCTL_STRING(_kern, OID_AUTO, ident, CTLFLAG_RD|CTLFLAG_MPSAFE,
kern_ident, 0, "Kernel identifier");
diff --git a/freebsd/sys/kern/kern_module.c b/freebsd/sys/kern/kern_module.c
index 58a2f83a..81686061 100644
--- a/freebsd/sys/kern/kern_module.c
+++ b/freebsd/sys/kern/kern_module.c
@@ -28,8 +28,6 @@
* SUCH DAMAGE.
*/
-#include <rtems/bsd/local/opt_compat.h>
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -99,7 +97,7 @@ module_init(void *arg)
#endif /* __rtems__ */
}
-SYSINIT(module, SI_SUB_KLD, SI_ORDER_FIRST, module_init, 0);
+SYSINIT(module, SI_SUB_KLD, SI_ORDER_FIRST, module_init, NULL);
#ifndef __rtems__
static void
diff --git a/freebsd/sys/kern/kern_synch.c b/freebsd/sys/kern/kern_synch.c
index 59f00ed4..9c0d1206 100644
--- a/freebsd/sys/kern/kern_synch.c
+++ b/freebsd/sys/kern/kern_synch.c
@@ -117,7 +117,7 @@ sleepinit(void *unused)
* vmem tries to lock the sleepq mutexes when free'ing kva, so make sure
* it is available.
*/
-SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, 0);
+SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, NULL);
/*
* General sleep call. Suspends the current thread until a wakeup is
@@ -160,6 +160,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
KASSERT(ident != NULL, ("_sleep: NULL ident"));
#ifndef __rtems__
KASSERT(TD_IS_RUNNING(td), ("_sleep: curthread not running"));
+ KASSERT(td->td_epochnest == 0, ("sleeping in an epoch section"));
if (priority & PDROP)
KASSERT(lock != NULL && lock != &Giant.lock_object,
("PDROP requires a non-Giant lock"));
@@ -465,8 +466,9 @@ mi_switch(int flags, struct thread *newtd)
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
#ifdef KDTRACE_HOOKS
- if ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 &&
- (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED))
+ if (__predict_false(sdt_probes_enabled) &&
+ ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 &&
+ (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED)))
SDT_PROBE0(sched, , , preempt);
#endif
sched_switch(td, newtd, flags);
diff --git a/freebsd/sys/kern/kern_sysctl.c b/freebsd/sys/kern/kern_sysctl.c
index 8b27a5c6..b4e9711f 100644
--- a/freebsd/sys/kern/kern_sysctl.c
+++ b/freebsd/sys/kern/kern_sysctl.c
@@ -43,7 +43,6 @@
__FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_capsicum.h>
-#include <rtems/bsd/local/opt_compat.h>
#include <rtems/bsd/local/opt_ktrace.h>
#include <sys/param.h>
@@ -928,7 +927,7 @@ sysctl_register_all(void *arg)
sysctl_register_oid(*oidp);
SYSCTL_WUNLOCK();
}
-SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0);
+SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, NULL);
/*
* "Staff-functions"
diff --git a/freebsd/sys/kern/subr_bus.c b/freebsd/sys/kern/subr_bus.c
index 8076e7e3..0626ec0a 100644
--- a/freebsd/sys/kern/subr_bus.c
+++ b/freebsd/sys/kern/subr_bus.c
@@ -1863,6 +1863,8 @@ make_device(device_t parent, const char *name, int unit)
return (NULL);
}
}
+ if (parent != NULL && device_has_quiet_children(parent))
+ dev->flags |= DF_QUIET | DF_QUIET_CHILDREN;
dev->ivars = NULL;
dev->softc = NULL;
@@ -2688,6 +2690,15 @@ device_quiet(device_t dev)
}
/**
+ * @brief Set the DF_QUIET_CHILDREN flag for the device
+ */
+void
+device_quiet_children(device_t dev)
+{
+ dev->flags |= DF_QUIET_CHILDREN;
+}
+
+/**
* @brief Clear the DF_QUIET flag for the device
*/
void
@@ -2697,6 +2708,15 @@ device_verbose(device_t dev)
}
/**
+ * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device
+ */
+int
+device_has_quiet_children(device_t dev)
+{
+ return ((dev->flags & DF_QUIET_CHILDREN) != 0);
+}
+
+/**
* @brief Return non-zero if the DF_QUIET flag is set on the device
*/
int
@@ -3760,7 +3780,11 @@ bus_generic_detach(device_t dev)
if (dev->state != DS_ATTACHED)
return (EBUSY);
- TAILQ_FOREACH(child, &dev->children, link) {
+ /*
+ * Detach children in the reverse order.
+ * See bus_generic_suspend for details.
+ */
+ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) {
if ((error = device_detach(child)) != 0)
return (error);
}
@@ -3780,7 +3804,11 @@ bus_generic_shutdown(device_t dev)
{
device_t child;
- TAILQ_FOREACH(child, &dev->children, link) {
+ /*
+ * Shut down children in the reverse order.
+ * See bus_generic_suspend for details.
+ */
+ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) {
device_shutdown(child);
}
@@ -3833,15 +3861,23 @@ int
bus_generic_suspend(device_t dev)
{
int error;
- device_t child, child2;
+ device_t child;
- TAILQ_FOREACH(child, &dev->children, link) {
+ /*
+ * Suspend children in the reverse order.
+ * For most buses all children are equal, so the order does not matter.
+ * Other buses, such as acpi, carefully order their child devices to
+ * express implicit dependencies between them. For such buses it is
+ * safer to bring down devices in the reverse order.
+ */
+ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) {
error = BUS_SUSPEND_CHILD(dev, child);
- if (error) {
- for (child2 = TAILQ_FIRST(&dev->children);
- child2 && child2 != child;
- child2 = TAILQ_NEXT(child2, link))
- BUS_RESUME_CHILD(dev, child2);
+ if (error != 0) {
+ child = TAILQ_NEXT(child, link);
+ if (child != NULL) {
+ TAILQ_FOREACH_FROM(child, &dev->children, link)
+ BUS_RESUME_CHILD(dev, child);
+ }
return (error);
}
}
@@ -5285,8 +5321,9 @@ sysctl_devices(SYSCTL_HANDLER_ARGS)
u_int namelen = arg2;
int index;
device_t dev;
- struct u_device udev; /* XXX this is a bit big */
+ struct u_device *udev;
int error;
+ char *walker, *ep;
if (namelen != 2)
return (EINVAL);
@@ -5307,24 +5344,45 @@ sysctl_devices(SYSCTL_HANDLER_ARGS)
return (ENOENT);
/*
- * Populate the return array.
+ * Populate the return item, careful not to overflow the buffer.
*/
- bzero(&udev, sizeof(udev));
- udev.dv_handle = (uintptr_t)dev;
- udev.dv_parent = (uintptr_t)dev->parent;
- if (dev->nameunit != NULL)
- strlcpy(udev.dv_name, dev->nameunit, sizeof(udev.dv_name));
- if (dev->desc != NULL)
- strlcpy(udev.dv_desc, dev->desc, sizeof(udev.dv_desc));
- if (dev->driver != NULL && dev->driver->name != NULL)
- strlcpy(udev.dv_drivername, dev->driver->name,
- sizeof(udev.dv_drivername));
- bus_child_pnpinfo_str(dev, udev.dv_pnpinfo, sizeof(udev.dv_pnpinfo));
- bus_child_location_str(dev, udev.dv_location, sizeof(udev.dv_location));
- udev.dv_devflags = dev->devflags;
- udev.dv_flags = dev->flags;
- udev.dv_state = dev->state;
- error = SYSCTL_OUT(req, &udev, sizeof(udev));
+ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO);
+ if (udev == NULL)
+ return (ENOMEM);
+ udev->dv_handle = (uintptr_t)dev;
+ udev->dv_parent = (uintptr_t)dev->parent;
+ udev->dv_devflags = dev->devflags;
+ udev->dv_flags = dev->flags;
+ udev->dv_state = dev->state;
+ walker = udev->dv_fields;
+ ep = walker + sizeof(udev->dv_fields);
+#define CP(src) \
+ if ((src) == NULL) \
+ *walker++ = '\0'; \
+ else { \
+ strlcpy(walker, (src), ep - walker); \
+ walker += strlen(walker) + 1; \
+ } \
+ if (walker >= ep) \
+ break;
+
+ do {
+ CP(dev->nameunit);
+ CP(dev->desc);
+ CP(dev->driver != NULL ? dev->driver->name : NULL);
+ bus_child_pnpinfo_str(dev, walker, ep - walker);
+ walker += strlen(walker) + 1;
+ if (walker >= ep)
+ break;
+ bus_child_location_str(dev, walker, ep - walker);
+ walker += strlen(walker) + 1;
+ if (walker >= ep)
+ break;
+ *walker++ = '\0';
+ } while (0);
+#undef CP
+ error = SYSCTL_OUT(req, udev, sizeof(*udev));
+ free(udev, M_BUS);
return (error);
}
diff --git a/freebsd/sys/kern/subr_gtaskqueue.c b/freebsd/sys/kern/subr_gtaskqueue.c
new file mode 100644
index 00000000..aa5c922d
--- /dev/null
+++ b/freebsd/sys/kern/subr_gtaskqueue.c
@@ -0,0 +1,1059 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * Copyright (c) 2014 Jeff Roberson
+ * Copyright (c) 2016 Matthew Macy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/libkern.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/gtaskqueue.h>
+#include <rtems/bsd/sys/unistd.h>
+#include <machine/stdarg.h>
+#ifdef __rtems__
+#include <machine/rtems-bsd-thread.h>
+#endif /* __rtems__ */
+
+static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
+static void gtaskqueue_thread_enqueue(void *);
+static void gtaskqueue_thread_loop(void *arg);
+
+TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
+TASKQGROUP_DEFINE(config, 1, 1);
+
+struct gtaskqueue_busy {
+ struct gtask *tb_running;
+ TAILQ_ENTRY(gtaskqueue_busy) tb_link;
+};
+
+static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
+
+struct gtaskqueue {
+ STAILQ_HEAD(, gtask) tq_queue;
+ gtaskqueue_enqueue_fn tq_enqueue;
+ void *tq_context;
+ char *tq_name;
+ TAILQ_HEAD(, gtaskqueue_busy) tq_active;
+ struct mtx tq_mutex;
+ struct thread **tq_threads;
+ int tq_tcount;
+ int tq_spin;
+ int tq_flags;
+ int tq_callouts;
+ taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
+ void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
+};
+
+#define TQ_FLAGS_ACTIVE (1 << 0)
+#define TQ_FLAGS_BLOCKED (1 << 1)
+#define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
+
+#define DT_CALLOUT_ARMED (1 << 0)
+
+#define TQ_LOCK(tq) \
+ do { \
+ if ((tq)->tq_spin) \
+ mtx_lock_spin(&(tq)->tq_mutex); \
+ else \
+ mtx_lock(&(tq)->tq_mutex); \
+ } while (0)
+#define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
+
+#define TQ_UNLOCK(tq) \
+ do { \
+ if ((tq)->tq_spin) \
+ mtx_unlock_spin(&(tq)->tq_mutex); \
+ else \
+ mtx_unlock(&(tq)->tq_mutex); \
+ } while (0)
+#define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
+
+#ifdef INVARIANTS
+static void
+gtask_dump(struct gtask *gtask)
+{
+ printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
+ gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
+}
+#endif
+
+static __inline int
+TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
+ int t)
+{
+ if (tq->tq_spin)
+ return (msleep_spin(p, m, wm, t));
+ return (msleep(p, m, pri, wm, t));
+}
+
+static struct gtaskqueue *
+_gtaskqueue_create(const char *name, int mflags,
+ taskqueue_enqueue_fn enqueue, void *context,
+ int mtxflags, const char *mtxname __unused)
+{
+ struct gtaskqueue *queue;
+ char *tq_name;
+
+ tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
+ if (!tq_name)
+ return (NULL);
+
+ snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
+
+ queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
+ if (!queue) {
+ free(tq_name, M_GTASKQUEUE);
+ return (NULL);
+ }
+
+ STAILQ_INIT(&queue->tq_queue);
+ TAILQ_INIT(&queue->tq_active);
+ queue->tq_enqueue = enqueue;
+ queue->tq_context = context;
+ queue->tq_name = tq_name;
+ queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
+ queue->tq_flags |= TQ_FLAGS_ACTIVE;
+ if (enqueue == gtaskqueue_thread_enqueue)
+ queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
+ mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
+
+ return (queue);
+}
+
+
+/*
+ * Signal a taskqueue thread to terminate.
+ */
+static void
+gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
+{
+
+ while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
+ wakeup(tq);
+ TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
+ }
+}
+
+static void
+gtaskqueue_free(struct gtaskqueue *queue)
+{
+
+ TQ_LOCK(queue);
+ queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
+ gtaskqueue_terminate(queue->tq_threads, queue);
+ KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
+ KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
+ mtx_destroy(&queue->tq_mutex);
+ free(queue->tq_threads, M_GTASKQUEUE);
+ free(queue->tq_name, M_GTASKQUEUE);
+ free(queue, M_GTASKQUEUE);
+}
+
+int
+grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
+{
+#ifdef INVARIANTS
+ if (queue == NULL) {
+ gtask_dump(gtask);
+ panic("queue == NULL");
+ }
+#endif
+ TQ_LOCK(queue);
+ if (gtask->ta_flags & TASK_ENQUEUED) {
+ TQ_UNLOCK(queue);
+ return (0);
+ }
+ STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
+ gtask->ta_flags |= TASK_ENQUEUED;
+ TQ_UNLOCK(queue);
+ if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
+ queue->tq_enqueue(queue->tq_context);
+ return (0);
+}
+
+static void
+gtaskqueue_task_nop_fn(void *context)
+{
+}
+
+/*
+ * Block until all currently queued tasks in this taskqueue
+ * have begun execution. Tasks queued during execution of
+ * this function are ignored.
+ */
+static void
+gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
+{
+ struct gtask t_barrier;
+
+ if (STAILQ_EMPTY(&queue->tq_queue))
+ return;
+
+ /*
+ * Enqueue our barrier after all current tasks, but with
+ * the highest priority so that newly queued tasks cannot
+ * pass it. Because of the high priority, we can not use
+ * taskqueue_enqueue_locked directly (which drops the lock
+ * anyway) so just insert it at tail while we have the
+ * queue lock.
+ */
+ GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
+ STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
+ t_barrier.ta_flags |= TASK_ENQUEUED;
+
+ /*
+ * Once the barrier has executed, all previously queued tasks
+ * have completed or are currently executing.
+ */
+ while (t_barrier.ta_flags & TASK_ENQUEUED)
+ TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
+}
+
+/*
+ * Block until all currently executing tasks for this taskqueue
+ * complete. Tasks that begin execution during the execution
+ * of this function are ignored.
+ */
+static void
+gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
+{
+ struct gtaskqueue_busy tb_marker, *tb_first;
+
+ if (TAILQ_EMPTY(&queue->tq_active))
+ return;
+
+ /* Block taskq_terminate().*/
+ queue->tq_callouts++;
+
+ /*
+ * Wait for all currently executing taskqueue threads
+ * to go idle.
+ */
+ tb_marker.tb_running = TB_DRAIN_WAITER;
+ TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
+ while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
+ TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
+ TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
+
+ /*
+ * Wakeup any other drain waiter that happened to queue up
+ * without any intervening active thread.
+ */
+ tb_first = TAILQ_FIRST(&queue->tq_active);
+ if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
+ wakeup(tb_first);
+
+ /* Release taskqueue_terminate(). */
+ queue->tq_callouts--;
+ if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
+ wakeup_one(queue->tq_threads);
+}
+
+void
+gtaskqueue_block(struct gtaskqueue *queue)
+{
+
+ TQ_LOCK(queue);
+ queue->tq_flags |= TQ_FLAGS_BLOCKED;
+ TQ_UNLOCK(queue);
+}
+
+void
+gtaskqueue_unblock(struct gtaskqueue *queue)
+{
+
+ TQ_LOCK(queue);
+ queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
+ if (!STAILQ_EMPTY(&queue->tq_queue))
+ queue->tq_enqueue(queue->tq_context);
+ TQ_UNLOCK(queue);
+}
+
+static void
+gtaskqueue_run_locked(struct gtaskqueue *queue)
+{
+ struct gtaskqueue_busy tb;
+ struct gtaskqueue_busy *tb_first;
+ struct gtask *gtask;
+
+ KASSERT(queue != NULL, ("tq is NULL"));
+ TQ_ASSERT_LOCKED(queue);
+ tb.tb_running = NULL;
+
+ while (STAILQ_FIRST(&queue->tq_queue)) {
+ TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
+
+ /*
+ * Carefully remove the first task from the queue and
+ * clear its TASK_ENQUEUED flag
+ */
+ gtask = STAILQ_FIRST(&queue->tq_queue);
+ KASSERT(gtask != NULL, ("task is NULL"));
+ STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
+ gtask->ta_flags &= ~TASK_ENQUEUED;
+ tb.tb_running = gtask;
+ TQ_UNLOCK(queue);
+
+ KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
+ gtask->ta_func(gtask->ta_context);
+
+ TQ_LOCK(queue);
+ tb.tb_running = NULL;
+ wakeup(gtask);
+
+ TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
+ tb_first = TAILQ_FIRST(&queue->tq_active);
+ if (tb_first != NULL &&
+ tb_first->tb_running == TB_DRAIN_WAITER)
+ wakeup(tb_first);
+ }
+}
+
+static int
+task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
+{
+ struct gtaskqueue_busy *tb;
+
+ TQ_ASSERT_LOCKED(queue);
+ TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
+ if (tb->tb_running == gtask)
+ return (1);
+ }
+ return (0);
+}
+
+static int
+gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
+{
+
+ if (gtask->ta_flags & TASK_ENQUEUED)
+ STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
+ gtask->ta_flags &= ~TASK_ENQUEUED;
+ return (task_is_running(queue, gtask) ? EBUSY : 0);
+}
+
+int
+gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
+{
+ int error;
+
+ TQ_LOCK(queue);
+ error = gtaskqueue_cancel_locked(queue, gtask);
+ TQ_UNLOCK(queue);
+
+ return (error);
+}
+
+void
+gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
+{
+
+ if (!queue->tq_spin)
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
+
+ TQ_LOCK(queue);
+ while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
+ TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
+ TQ_UNLOCK(queue);
+}
+
+void
+gtaskqueue_drain_all(struct gtaskqueue *queue)
+{
+
+ if (!queue->tq_spin)
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
+
+ TQ_LOCK(queue);
+ gtaskqueue_drain_tq_queue(queue);
+ gtaskqueue_drain_tq_active(queue);
+ TQ_UNLOCK(queue);
+}
+
+static int
+_gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
+ cpuset_t *mask, const char *name, va_list ap)
+{
+ char ktname[MAXCOMLEN + 1];
+ struct thread *td;
+ struct gtaskqueue *tq;
+ int i, error;
+
+ if (count <= 0)
+ return (EINVAL);
+
+ vsnprintf(ktname, sizeof(ktname), name, ap);
+ tq = *tqp;
+
+ tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
+ M_NOWAIT | M_ZERO);
+ if (tq->tq_threads == NULL) {
+ printf("%s: no memory for %s threads\n", __func__, ktname);
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (count == 1)
+ error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
+ &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
+ else
+ error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
+ &tq->tq_threads[i], RFSTOPPED, 0,
+ "%s_%d", ktname, i);
+ if (error) {
+ /* should be ok to continue, taskqueue_free will dtrt */
+ printf("%s: kthread_add(%s): error %d", __func__,
+ ktname, error);
+ tq->tq_threads[i] = NULL; /* paranoid */
+ } else
+ tq->tq_tcount++;
+ }
+ for (i = 0; i < count; i++) {
+ if (tq->tq_threads[i] == NULL)
+ continue;
+ td = tq->tq_threads[i];
+ if (mask) {
+#ifndef __rtems__
+ error = cpuset_setthread(td->td_tid, mask);
+ /*
+ * Failing to pin is rarely an actual fatal error;
+ * it'll just affect performance.
+ */
+ if (error)
+ printf("%s: curthread=%llu: can't pin; "
+ "error=%d\n",
+ __func__,
+ (unsigned long long) td->td_tid,
+ error);
+#else /* __rtems__ */
+ rtems_status_code sc;
+
+ sc = rtems_task_set_affinity(rtems_bsd_get_task_id(td),
+ sizeof(*mask), mask);
+ if (sc != RTEMS_SUCCESSFUL)
+ printf("%s: cannot set affinity\n", __func__);
+#endif /* __rtems__ */
+ }
+#ifndef __rtems__
+ thread_lock(td);
+ sched_prio(td, pri);
+ sched_add(td, SRQ_BORING);
+ thread_unlock(td);
+#endif /* __rtems__ */
+ }
+
+ return (0);
+}
+
+static int
+gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
+ const char *name, ...)
+{
+ va_list ap;
+ int error;
+
+ va_start(ap, name);
+ error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
+ va_end(ap);
+ return (error);
+}
+
+static inline void
+gtaskqueue_run_callback(struct gtaskqueue *tq,
+ enum taskqueue_callback_type cb_type)
+{
+ taskqueue_callback_fn tq_callback;
+
+ TQ_ASSERT_UNLOCKED(tq);
+ tq_callback = tq->tq_callbacks[cb_type];
+ if (tq_callback != NULL)
+ tq_callback(tq->tq_cb_contexts[cb_type]);
+}
+
+static void
+gtaskqueue_thread_loop(void *arg)
+{
+ struct gtaskqueue **tqp, *tq;
+
+ tqp = arg;
+ tq = *tqp;
+ gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
+ TQ_LOCK(tq);
+ while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
+ /* XXX ? */
+ gtaskqueue_run_locked(tq);
+ /*
+ * Because taskqueue_run() can drop tq_mutex, we need to
+ * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
+ * meantime, which means we missed a wakeup.
+ */
+ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
+ break;
+ TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
+ }
+ gtaskqueue_run_locked(tq);
+ /*
+ * This thread is on its way out, so just drop the lock temporarily
+ * in order to call the shutdown callback. This allows the callback
+ * to look at the taskqueue, even just before it dies.
+ */
+ TQ_UNLOCK(tq);
+ gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
+ TQ_LOCK(tq);
+
+ /* rendezvous with thread that asked us to terminate */
+ tq->tq_tcount--;
+ wakeup_one(tq->tq_threads);
+ TQ_UNLOCK(tq);
+ kthread_exit();
+}
+
+static void
+gtaskqueue_thread_enqueue(void *context)
+{
+ struct gtaskqueue **tqp, *tq;
+
+ tqp = context;
+ tq = *tqp;
+ wakeup_one(tq);
+}
+
+
+static struct gtaskqueue *
+gtaskqueue_create_fast(const char *name, int mflags,
+ taskqueue_enqueue_fn enqueue, void *context)
+{
+ return _gtaskqueue_create(name, mflags, enqueue, context,
+ MTX_SPIN, "fast_taskqueue");
+}
+
+
+struct taskqgroup_cpu {
+ LIST_HEAD(, grouptask) tgc_tasks;
+ struct gtaskqueue *tgc_taskq;
+ int tgc_cnt;
+ int tgc_cpu;
+};
+
+struct taskqgroup {
+ struct taskqgroup_cpu tqg_queue[MAXCPU];
+ struct mtx tqg_lock;
+ const char * tqg_name;
+ int tqg_adjusting;
+ int tqg_stride;
+ int tqg_cnt;
+};
+
+struct taskq_bind_task {
+ struct gtask bt_task;
+ int bt_cpuid;
+};
+
+static void
+taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
+{
+ struct taskqgroup_cpu *qcpu;
+
+ qcpu = &qgroup->tqg_queue[idx];
+ LIST_INIT(&qcpu->tgc_tasks);
+ qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
+ taskqueue_thread_enqueue, &qcpu->tgc_taskq);
+ gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
+ "%s_%d", qgroup->tqg_name, idx);
+ qcpu->tgc_cpu = cpu;
+}
+
+static void
+taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
+{
+
+ gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
+}
+
+/*
+ * Find the taskq with least # of tasks that doesn't currently have any
+ * other queues from the uniq identifier.
+ */
+static int
+taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
+{
+ struct grouptask *n;
+ int i, idx, mincnt;
+ int strict;
+
+ mtx_assert(&qgroup->tqg_lock, MA_OWNED);
+ if (qgroup->tqg_cnt == 0)
+ return (0);
+ idx = -1;
+ mincnt = INT_MAX;
+ /*
+ * Two passes; First scan for a queue with the least tasks that
+ * does not already service this uniq id. If that fails simply find
+ * the queue with the least total tasks;
+ */
+ for (strict = 1; mincnt == INT_MAX; strict = 0) {
+ for (i = 0; i < qgroup->tqg_cnt; i++) {
+ if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
+ continue;
+ if (strict) {
+ LIST_FOREACH(n,
+ &qgroup->tqg_queue[i].tgc_tasks, gt_list)
+ if (n->gt_uniq == uniq)
+ break;
+ if (n != NULL)
+ continue;
+ }
+ mincnt = qgroup->tqg_queue[i].tgc_cnt;
+ idx = i;
+ }
+ }
+ if (idx == -1)
+ panic("taskqgroup_find: Failed to pick a qid.");
+
+ return (idx);
+}
+
+#ifndef __rtems__
+/*
+ * smp_started is unusable since it is not set for UP kernels or even for
+ * SMP kernels when there is 1 CPU. This is usually handled by adding a
+ * (mp_ncpus == 1) test, but that would be broken here since we need to
+ * to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case
+ * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP.
+ *
+ * So maintain our own flag. It must be set after all CPUs are started
+ * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed
+ * adjustment is properly delayed. SI_ORDER_FOURTH is clearly before
+ * SI_ORDER_ANY and unclearly after the CPUs are started. It would be
+ * simpler for adjustment to pass a flag indicating if it is delayed.
+ */
+
+static int tqg_smp_started;
+
+static void
+tqg_record_smp_started(void *arg)
+{
+ tqg_smp_started = 1;
+}
+
+SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
+ tqg_record_smp_started, NULL);
+#else /* __rtems__ */
+#define tqg_smp_started 1
+#endif /* __rtems__ */
+
+void
+taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
+ void *uniq, int irq, const char *name)
+{
+#ifndef __rtems__
+ cpuset_t mask;
+ int qid, error;
+#else /* __rtems__ */
+ int qid;
+#endif /* __rtems__ */
+
+ gtask->gt_uniq = uniq;
+ snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
+ gtask->gt_irq = irq;
+ gtask->gt_cpu = -1;
+ mtx_lock(&qgroup->tqg_lock);
+ qid = taskqgroup_find(qgroup, uniq);
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+#ifndef __rtems__
+ if (irq != -1 && tqg_smp_started) {
+ gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
+ CPU_ZERO(&mask);
+ CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
+ mtx_unlock(&qgroup->tqg_lock);
+ error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
+ if (error)
+ printf("%s: setaffinity failed for %s: %d\n", __func__, gtask->gt_name, error);
+ } else
+#else /* __rtems__ */
+ BSD_ASSERT(irq == -1);
+#endif /* __rtems__ */
+ mtx_unlock(&qgroup->tqg_lock);
+}
+
+static void
+taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+#ifndef __rtems__
+ cpuset_t mask;
+ int qid, cpu, error;
+#else /* __rtems__ */
+ int qid;
+#endif /* __rtems__ */
+
+ mtx_lock(&qgroup->tqg_lock);
+ qid = taskqgroup_find(qgroup, gtask->gt_uniq);
+#ifndef __rtems__
+ cpu = qgroup->tqg_queue[qid].tgc_cpu;
+ if (gtask->gt_irq != -1) {
+ mtx_unlock(&qgroup->tqg_lock);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+ error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
+ mtx_lock(&qgroup->tqg_lock);
+ if (error)
+ printf("%s: %s setaffinity failed: %d\n", __func__, gtask->gt_name, error);
+
+ }
+#else /* __rtems__ */
+ BSD_ASSERT(gtask->gt_irq == -1);
+#endif /* __rtems__ */
+ qgroup->tqg_queue[qid].tgc_cnt++;
+
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
+ gt_list);
+ MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ mtx_unlock(&qgroup->tqg_lock);
+}
+
+int
+taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
+ void *uniq, int cpu, int irq, const char *name)
+{
+#ifndef __rtems__
+ cpuset_t mask;
+ int i, qid, error;
+#else /* __rtems__ */
+ int i, qid;
+#endif /* __rtems__ */
+
+ qid = -1;
+ gtask->gt_uniq = uniq;
+ snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
+ gtask->gt_irq = irq;
+ gtask->gt_cpu = cpu;
+ mtx_lock(&qgroup->tqg_lock);
+ if (tqg_smp_started) {
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
+ qid = i;
+ break;
+ }
+ if (qid == -1) {
+ mtx_unlock(&qgroup->tqg_lock);
+ printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
+ return (EINVAL);
+ }
+ } else
+ qid = 0;
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+#ifndef __rtems__
+ cpu = qgroup->tqg_queue[qid].tgc_cpu;
+#endif /* __rtems__ */
+ mtx_unlock(&qgroup->tqg_lock);
+
+#ifndef __rtems__
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+ if (irq != -1 && tqg_smp_started) {
+ error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
+ if (error)
+ printf("%s: setaffinity failed: %d\n", __func__, error);
+ }
+#else /* __rtems__ */
+ BSD_ASSERT(irq == -1);
+#endif /* __rtems__ */
+ return (0);
+}
+
+static int
+taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+#ifndef __rtems__
+ cpuset_t mask;
+ int i, qid, irq, cpu, error;
+#else /* __rtems__ */
+ int i, qid, irq, cpu;
+#endif /* __rtems__ */
+
+ qid = -1;
+ irq = gtask->gt_irq;
+ cpu = gtask->gt_cpu;
+ MPASS(tqg_smp_started);
+ mtx_lock(&qgroup->tqg_lock);
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
+ qid = i;
+ break;
+ }
+ if (qid == -1) {
+ mtx_unlock(&qgroup->tqg_lock);
+ printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
+ return (EINVAL);
+ }
+ qgroup->tqg_queue[qid].tgc_cnt++;
+ LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+ MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
+ gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+ mtx_unlock(&qgroup->tqg_lock);
+
+#ifndef __rtems__
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ if (irq != -1) {
+ error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
+ if (error)
+ printf("%s: setaffinity failed: %d\n", __func__, error);
+ }
+#else /* __rtems__ */
+ BSD_ASSERT(irq == -1);
+#endif /* __rtems__ */
+ return (0);
+}
+
+void
+taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+ int i;
+
+ mtx_lock(&qgroup->tqg_lock);
+ for (i = 0; i < qgroup->tqg_cnt; i++)
+ if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
+ break;
+ if (i == qgroup->tqg_cnt)
+ panic("taskqgroup_detach: task %s not in group\n", gtask->gt_name);
+ qgroup->tqg_queue[i].tgc_cnt--;
+ LIST_REMOVE(gtask, gt_list);
+ mtx_unlock(&qgroup->tqg_lock);
+ gtask->gt_taskqueue = NULL;
+}
+
+static void
+taskqgroup_binder(void *ctx)
+{
+ struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
+ cpuset_t mask;
+#ifndef __rtems__
+ int error;
+#else /* __rtems__ */
+ rtems_status_code sc;
+#endif /* __rtems__ */
+
+ CPU_ZERO(&mask);
+ CPU_SET(gtask->bt_cpuid, &mask);
+#ifndef __rtems__
+ error = cpuset_setthread(curthread->td_tid, &mask);
+ thread_lock(curthread);
+ sched_bind(curthread, gtask->bt_cpuid);
+ thread_unlock(curthread);
+
+ if (error)
+ printf("%s: setaffinity failed: %d\n", __func__,
+ error);
+#else /* __rtems__ */
+ sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(mask), &mask);
+ if (sc != RTEMS_SUCCESSFUL)
+ printf("%s: cannot set affinity\n", __func__);
+#endif /* __rtems__ */
+ free(gtask, M_DEVBUF);
+}
+
+static void
+taskqgroup_bind(struct taskqgroup *qgroup)
+{
+ struct taskq_bind_task *gtask;
+ int i;
+
+ /*
+ * Bind taskqueue threads to specific CPUs, if they have been assigned
+ * one.
+ */
+ if (qgroup->tqg_cnt == 1)
+ return;
+
+ for (i = 0; i < qgroup->tqg_cnt; i++) {
+ gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK);
+ GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_binder, gtask);
+ gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
+ grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
+ &gtask->bt_task);
+ }
+}
+
+static int
+_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
+{
+ LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
+ struct grouptask *gtask;
+ int i, k, old_cnt, old_cpu, cpu;
+
+ mtx_assert(&qgroup->tqg_lock, MA_OWNED);
+
+ if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) {
+ printf("%s: failed cnt: %d stride: %d "
+ "mp_ncpus: %d tqg_smp_started: %d\n",
+ __func__, cnt, stride, mp_ncpus, tqg_smp_started);
+ return (EINVAL);
+ }
+ if (qgroup->tqg_adjusting) {
+ printf("%s failed: adjusting\n", __func__);
+ return (EBUSY);
+ }
+ qgroup->tqg_adjusting = 1;
+ old_cnt = qgroup->tqg_cnt;
+ old_cpu = 0;
+ if (old_cnt < cnt)
+ old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
+ mtx_unlock(&qgroup->tqg_lock);
+ /*
+ * Set up queue for tasks added before boot.
+ */
+ if (old_cnt == 0) {
+ LIST_SWAP(&gtask_head, &qgroup->tqg_queue[0].tgc_tasks,
+ grouptask, gt_list);
+ qgroup->tqg_queue[0].tgc_cnt = 0;
+ }
+
+ /*
+ * If new taskq threads have been added.
+ */
+ cpu = old_cpu;
+ for (i = old_cnt; i < cnt; i++) {
+ taskqgroup_cpu_create(qgroup, i, cpu);
+
+ for (k = 0; k < stride; k++)
+ cpu = CPU_NEXT(cpu);
+ }
+ mtx_lock(&qgroup->tqg_lock);
+ qgroup->tqg_cnt = cnt;
+ qgroup->tqg_stride = stride;
+
+ /*
+ * Adjust drivers to use new taskqs.
+ */
+ for (i = 0; i < old_cnt; i++) {
+ while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
+ LIST_REMOVE(gtask, gt_list);
+ qgroup->tqg_queue[i].tgc_cnt--;
+ LIST_INSERT_HEAD(&gtask_head, gtask, gt_list);
+ }
+ }
+ mtx_unlock(&qgroup->tqg_lock);
+
+ while ((gtask = LIST_FIRST(&gtask_head))) {
+ LIST_REMOVE(gtask, gt_list);
+ if (gtask->gt_cpu == -1)
+ taskqgroup_attach_deferred(qgroup, gtask);
+ else if (taskqgroup_attach_cpu_deferred(qgroup, gtask))
+ taskqgroup_attach_deferred(qgroup, gtask);
+ }
+
+#ifdef INVARIANTS
+ mtx_lock(&qgroup->tqg_lock);
+ for (i = 0; i < qgroup->tqg_cnt; i++) {
+ MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL);
+ LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list)
+ MPASS(gtask->gt_taskqueue != NULL);
+ }
+ mtx_unlock(&qgroup->tqg_lock);
+#endif
+ /*
+ * If taskq thread count has been reduced.
+ */
+ for (i = cnt; i < old_cnt; i++)
+ taskqgroup_cpu_remove(qgroup, i);
+
+ taskqgroup_bind(qgroup);
+
+ mtx_lock(&qgroup->tqg_lock);
+ qgroup->tqg_adjusting = 0;
+
+ return (0);
+}
+
+int
+taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
+{
+ int error;
+
+ mtx_lock(&qgroup->tqg_lock);
+ error = _taskqgroup_adjust(qgroup, cnt, stride);
+ mtx_unlock(&qgroup->tqg_lock);
+
+ return (error);
+}
+
+struct taskqgroup *
+taskqgroup_create(const char *name)
+{
+ struct taskqgroup *qgroup;
+
+ qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
+ mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
+ qgroup->tqg_name = name;
+ LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
+
+ return (qgroup);
+}
+
+void
+taskqgroup_destroy(struct taskqgroup *qgroup)
+{
+
+}
+
+void
+taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
+ const char *name)
+{
+
+ GROUPTASK_INIT(gtask, 0, fn, ctx);
+ taskqgroup_attach(qgroup_config, gtask, gtask, -1, name);
+}
+
+void
+taskqgroup_config_gtask_deinit(struct grouptask *gtask)
+{
+ taskqgroup_detach(qgroup_config, gtask);
+}
diff --git a/freebsd/sys/kern/subr_lock.c b/freebsd/sys/kern/subr_lock.c
index 4f577b65..c2587cd0 100644
--- a/freebsd/sys/kern/subr_lock.c
+++ b/freebsd/sys/kern/subr_lock.c
@@ -171,8 +171,10 @@ void
lock_delay_default_init(struct lock_delay_config *lc)
{
- lc->base = lock_roundup_2(mp_ncpus) / 4;
- lc->max = lc->base * 1024;
+ lc->base = 1;
+ lc->max = lock_roundup_2(mp_ncpus) * 256;
+ if (lc->max > 32678)
+ lc->max = 32678;
}
#endif /* __rtems__ */
diff --git a/freebsd/sys/kern/subr_pcpu.c b/freebsd/sys/kern/subr_pcpu.c
index 67a1f528..1b866e3a 100644
--- a/freebsd/sys/kern/subr_pcpu.c
+++ b/freebsd/sys/kern/subr_pcpu.c
@@ -130,7 +130,7 @@ dpcpu_startup(void *dummy __unused)
TAILQ_INSERT_HEAD(&dpcpu_head, df, df_link);
sx_init(&dpcpu_lock, "dpcpu alloc lock");
}
-SYSINIT(dpcpu, SI_SUB_KLD, SI_ORDER_FIRST, dpcpu_startup, 0);
+SYSINIT(dpcpu, SI_SUB_KLD, SI_ORDER_FIRST, dpcpu_startup, NULL);
#endif /* __rtems__ */
/*
diff --git a/freebsd/sys/kern/subr_prf.c b/freebsd/sys/kern/subr_prf.c
index 12a0825d..4c45bcfe 100644
--- a/freebsd/sys/kern/subr_prf.c
+++ b/freebsd/sys/kern/subr_prf.c
@@ -687,6 +687,7 @@ kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_lis
int stop = 0, retval = 0;
num = 0;
+ q = NULL;
if (!func)
d = (char *) arg;
else
diff --git a/freebsd/sys/kern/subr_sleepqueue.c b/freebsd/sys/kern/subr_sleepqueue.c
index b9de1580..65bd8dcc 100644
--- a/freebsd/sys/kern/subr_sleepqueue.c
+++ b/freebsd/sys/kern/subr_sleepqueue.c
@@ -433,7 +433,7 @@ sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
int flags)
{
#ifndef __rtems__
- struct sleepqueue_chain *sc;
+ struct sleepqueue_chain *sc __unused;
struct thread *td;
sbintime_t pr1;
@@ -982,7 +982,7 @@ sleepq_type(void *wchan)
static int
sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
{
- struct sleepqueue_chain *sc;
+ struct sleepqueue_chain *sc __unused;
#ifdef __rtems__
Thread_Control *thread;
ISR_lock_Context lock_context;
@@ -1022,7 +1022,6 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
LIST_REMOVE(td->td_sleepqueue, sq_hash);
#ifdef __rtems__
- (void)sc;
thread = td->td_thread;
_ISR_lock_ISR_disable(&lock_context);
_Thread_Wait_acquire_default_critical(thread, &lock_context);
@@ -1228,7 +1227,7 @@ sleepq_remove_matching(struct sleepqueue *sq, int queue,
static void
sleepq_timeout(void *arg)
{
- struct sleepqueue_chain *sc;
+ struct sleepqueue_chain *sc __unused;
struct sleepqueue *sq;
struct thread *td;
void *wchan;
diff --git a/freebsd/sys/kern/subr_uio.c b/freebsd/sys/kern/subr_uio.c
index 58db0ffc..c14aea8d 100644
--- a/freebsd/sys/kern/subr_uio.c
+++ b/freebsd/sys/kern/subr_uio.c
@@ -222,9 +222,9 @@ uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
int error;
#endif /* __rtems__ */
- error = 0;
-
#ifndef __rtems__
+ save = error = 0;
+
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomove: mode"));
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
@@ -290,7 +290,7 @@ uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
}
out:
#ifndef __rtems__
- if (uio->uio_segflg == UIO_USERSPACE)
+ if (save)
curthread_pflags_restore(save);
#endif /* __rtems__ */
return (error);
diff --git a/freebsd/sys/kern/sys_generic.c b/freebsd/sys/kern/sys_generic.c
index 0d5193c7..9e41f0f6 100644
--- a/freebsd/sys/kern/sys_generic.c
+++ b/freebsd/sys/kern/sys_generic.c
@@ -42,7 +42,6 @@
__FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_capsicum.h>
-#include <rtems/bsd/local/opt_compat.h>
#include <rtems/bsd/local/opt_ktrace.h>
#include <sys/param.h>
@@ -201,9 +200,7 @@ struct read_args {
};
#endif
int
-sys_read(td, uap)
- struct thread *td;
- struct read_args *uap;
+sys_read(struct thread *td, struct read_args *uap)
{
struct uio auio;
struct iovec aiov;
@@ -296,10 +293,9 @@ int
kern_readv(struct thread *td, int fd, struct uio *auio)
{
struct file *fp;
- cap_rights_t rights;
int error;
- error = fget_read(td, fd, cap_rights_init(&rights, CAP_READ), &fp);
+ error = fget_read(td, fd, &cap_read_rights, &fp);
if (error)
return (error);
error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
@@ -333,17 +329,12 @@ sys_preadv(struct thread *td, struct preadv_args *uap)
}
int
-kern_preadv(td, fd, auio, offset)
- struct thread *td;
- int fd;
- struct uio *auio;
- off_t offset;
+kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset)
{
struct file *fp;
- cap_rights_t rights;
int error;
- error = fget_read(td, fd, cap_rights_init(&rights, CAP_PREAD), &fp);
+ error = fget_read(td, fd, &cap_pread_rights, &fp);
if (error)
return (error);
if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
@@ -362,13 +353,8 @@ kern_preadv(td, fd, auio, offset)
* from a file using the passed in uio, offset, and flags.
*/
static int
-dofileread(td, fd, fp, auio, offset, flags)
- struct thread *td;
- int fd;
- struct file *fp;
- struct uio *auio;
- off_t offset;
- int flags;
+dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio,
+ off_t offset, int flags)
{
ssize_t cnt;
int error;
@@ -415,9 +401,7 @@ struct write_args {
};
#endif
int
-sys_write(td, uap)
- struct thread *td;
- struct write_args *uap;
+sys_write(struct thread *td, struct write_args *uap)
{
struct uio auio;
struct iovec aiov;
@@ -511,10 +495,9 @@ int
kern_writev(struct thread *td, int fd, struct uio *auio)
{
struct file *fp;
- cap_rights_t rights;
int error;
- error = fget_write(td, fd, cap_rights_init(&rights, CAP_WRITE), &fp);
+ error = fget_write(td, fd, &cap_write_rights, &fp);
if (error)
return (error);
error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
@@ -548,17 +531,12 @@ sys_pwritev(struct thread *td, struct pwritev_args *uap)
}
int
-kern_pwritev(td, fd, auio, offset)
- struct thread *td;
- struct uio *auio;
- int fd;
- off_t offset;
+kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset)
{
struct file *fp;
- cap_rights_t rights;
int error;
- error = fget_write(td, fd, cap_rights_init(&rights, CAP_PWRITE), &fp);
+ error = fget_write(td, fd, &cap_pwrite_rights, &fp);
if (error)
return (error);
if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
@@ -577,13 +555,8 @@ kern_pwritev(td, fd, auio, offset)
* a file using the passed in uio, offset, and flags.
*/
static int
-dofilewrite(td, fd, fp, auio, offset, flags)
- struct thread *td;
- int fd;
- struct file *fp;
- struct uio *auio;
- off_t offset;
- int flags;
+dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio,
+ off_t offset, int flags)
{
ssize_t cnt;
int error;
@@ -632,19 +605,15 @@ dofilewrite(td, fd, fp, auio, offset, flags)
* descriptor isn't writable.
*/
int
-kern_ftruncate(td, fd, length)
- struct thread *td;
- int fd;
- off_t length;
+kern_ftruncate(struct thread *td, int fd, off_t length)
{
struct file *fp;
- cap_rights_t rights;
int error;
AUDIT_ARG_FD(fd);
if (length < 0)
return (EINVAL);
- error = fget(td, fd, cap_rights_init(&rights, CAP_FTRUNCATE), &fp);
+ error = fget(td, fd, &cap_ftruncate_rights, &fp);
if (error)
return (error);
AUDIT_ARG_FILE(td->td_proc, fp);
@@ -665,9 +634,7 @@ struct ftruncate_args {
};
#endif
int
-sys_ftruncate(td, uap)
- struct thread *td;
- struct ftruncate_args *uap;
+sys_ftruncate(struct thread *td, struct ftruncate_args *uap)
{
return (kern_ftruncate(td, uap->fd, uap->length));
@@ -681,9 +648,7 @@ struct oftruncate_args {
};
#endif
int
-oftruncate(td, uap)
- struct thread *td;
- struct oftruncate_args *uap;
+oftruncate(struct thread *td, struct oftruncate_args *uap)
{
return (kern_ftruncate(td, uap->fd, uap->length));
@@ -772,9 +737,6 @@ kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
{
struct file *fp;
struct filedesc *fdp;
-#ifndef CAPABILITIES
- cap_rights_t rights;
-#endif
int error, tmp, locked;
AUDIT_ARG_FD(fd);
@@ -813,7 +775,7 @@ kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
locked = LA_UNLOCKED;
}
#else
- error = fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
+ error = fget(td, fd, &cap_ioctl_rights, &fp);
if (error != 0) {
fp = NULL;
goto out;
@@ -1284,11 +1246,8 @@ selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
static __inline int
getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
{
- cap_rights_t rights;
- cap_rights_init(&rights, CAP_EVENT);
-
- return (fget_unlocked(fdp, fd, &rights, fpp, NULL));
+ return (fget_unlocked(fdp, fd, &cap_event_rights, fpp, NULL));
}
/*
@@ -1342,10 +1301,7 @@ selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
* each selinfo.
*/
static int
-selscan(td, ibits, obits, nfd)
- struct thread *td;
- fd_mask **ibits, **obits;
- int nfd;
+selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd)
{
struct filedesc *fdp;
struct file *fp;
@@ -1573,9 +1529,6 @@ pollrescan(struct thread *td)
struct filedesc *fdp;
struct file *fp;
struct pollfd *fd;
-#ifdef CAPABILITIES
- cap_rights_t rights;
-#endif
int n;
n = 0;
@@ -1600,8 +1553,7 @@ pollrescan(struct thread *td)
#endif /* __rtems__ */
#ifdef CAPABILITIES
if (fp == NULL ||
- cap_check(cap_rights(fdp, fd->fd),
- cap_rights_init(&rights, CAP_EVENT)) != 0)
+ cap_check(cap_rights(fdp, fd->fd), &cap_event_rights) != 0)
#else
if (fp == NULL)
#endif
@@ -1630,11 +1582,7 @@ pollrescan(struct thread *td)
static int
-pollout(td, fds, ufds, nfd)
- struct thread *td;
- struct pollfd *fds;
- struct pollfd *ufds;
- u_int nfd;
+pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
{
int error = 0;
u_int i = 0;
@@ -1655,10 +1603,7 @@ pollout(td, fds, ufds, nfd)
}
static int
-pollscan(td, fds, nfd)
- struct thread *td;
- struct pollfd *fds;
- u_int nfd;
+pollscan(struct thread *td, struct pollfd *fds, u_int nfd)
{
#ifndef __rtems__
struct filedesc *fdp = td->td_proc->p_fd;
@@ -1666,9 +1611,6 @@ pollscan(td, fds, nfd)
struct filedesc *fdp = NULL;
#endif /* __rtems__ */
struct file *fp;
-#ifdef CAPABILITIES
- cap_rights_t rights;
-#endif
int i, n = 0;
FILEDESC_SLOCK(fdp);
@@ -1690,8 +1632,7 @@ pollscan(td, fds, nfd)
#endif /* __rtems__ */
#ifdef CAPABILITIES
if (fp == NULL ||
- cap_check(cap_rights(fdp, fds->fd),
- cap_rights_init(&rights, CAP_EVENT)) != 0)
+ cap_check(cap_rights(fdp, fds->fd), &cap_event_rights) != 0)
#else
if (fp == NULL)
#endif
@@ -1822,8 +1763,7 @@ selfdfree(struct seltd *stp, struct selfd *sfp)
/* Drain the waiters tied to all the selfd belonging the specified selinfo. */
void
-seldrain(sip)
- struct selinfo *sip;
+seldrain(struct selinfo *sip)
{
/*
@@ -1841,9 +1781,7 @@ seldrain(sip)
* Record a select request.
*/
void
-selrecord(selector, sip)
- struct thread *selector;
- struct selinfo *sip;
+selrecord(struct thread *selector, struct selinfo *sip)
{
struct selfd *sfp;
struct seltd *stp;
@@ -1892,17 +1830,14 @@ selrecord(selector, sip)
/* Wake up a selecting thread. */
void
-selwakeup(sip)
- struct selinfo *sip;
+selwakeup(struct selinfo *sip)
{
doselwakeup(sip, -1);
}
/* Wake up a selecting thread, and set its priority. */
void
-selwakeuppri(sip, pri)
- struct selinfo *sip;
- int pri;
+selwakeuppri(struct selinfo *sip, int pri)
{
doselwakeup(sip, pri);
}
@@ -1911,9 +1846,7 @@ selwakeuppri(sip, pri)
* Do a wakeup when a selectable event occurs.
*/
static void
-doselwakeup(sip, pri)
- struct selinfo *sip;
- int pri;
+doselwakeup(struct selinfo *sip, int pri)
{
struct selfd *sfp;
struct selfd *sfn;
diff --git a/freebsd/sys/kern/sys_pipe.c b/freebsd/sys/kern/sys_pipe.c
index cc5b123c..e527495a 100755
--- a/freebsd/sys/kern/sys_pipe.c
+++ b/freebsd/sys/kern/sys_pipe.c
@@ -93,8 +93,6 @@
* in the structure may have changed.
*/
-#include <rtems/bsd/local/opt_compat.h>
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
diff --git a/freebsd/sys/kern/tty.c b/freebsd/sys/kern/tty.c
index 1350c1d9..f4a2b01f 100644
--- a/freebsd/sys/kern/tty.c
+++ b/freebsd/sys/kern/tty.c
@@ -35,7 +35,6 @@
__FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_capsicum.h>
-#include <rtems/bsd/local/opt_compat.h>
#include <sys/param.h>
#include <sys/capsicum.h>
diff --git a/freebsd/sys/kern/tty_inq.c b/freebsd/sys/kern/tty_inq.c
index b470cb5a..8d557a55 100644
--- a/freebsd/sys/kern/tty_inq.c
+++ b/freebsd/sys/kern/tty_inq.c
@@ -330,7 +330,7 @@ ttyinq_write(struct ttyinq *ti, const void *buf, size_t nbytes, int quote)
int
ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t nbytes, int quote)
{
- size_t ret;
+ size_t ret __unused;
if (ttyinq_bytesleft(ti) < nbytes)
return (-1);
diff --git a/freebsd/sys/kern/tty_outq.c b/freebsd/sys/kern/tty_outq.c
index 121e9975..1643fe40 100644
--- a/freebsd/sys/kern/tty_outq.c
+++ b/freebsd/sys/kern/tty_outq.c
@@ -326,7 +326,7 @@ ttyoutq_write(struct ttyoutq *to, const void *buf, size_t nbytes)
int
ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes)
{
- size_t ret;
+ size_t ret __unused;
if (ttyoutq_bytesleft(to) < nbytes)
return (-1);
diff --git a/freebsd/sys/kern/uipc_mbuf.c b/freebsd/sys/kern/uipc_mbuf.c
index 7bf531e0..185d14a0 100644
--- a/freebsd/sys/kern/uipc_mbuf.c
+++ b/freebsd/sys/kern/uipc_mbuf.c
@@ -1635,9 +1635,6 @@ m_unshare(struct mbuf *m0, int how)
mprev->m_len += m->m_len;
mprev->m_next = m->m_next; /* unlink from chain */
m_free(m); /* reclaim mbuf */
-#if 0
- newipsecstat.ips_mbcoalesced++;
-#endif
} else {
mprev = m;
}
@@ -1667,9 +1664,6 @@ m_unshare(struct mbuf *m0, int how)
mprev->m_len += m->m_len;
mprev->m_next = m->m_next; /* unlink from chain */
m_free(m); /* reclaim mbuf */
-#if 0
- newipsecstat.ips_clcoalesced++;
-#endif
continue;
}
diff --git a/freebsd/sys/kern/uipc_sockbuf.c b/freebsd/sys/kern/uipc_sockbuf.c
index 0c4ace6b..ec493c04 100644
--- a/freebsd/sys/kern/uipc_sockbuf.c
+++ b/freebsd/sys/kern/uipc_sockbuf.c
@@ -466,6 +466,7 @@ sbsetopt(struct socket *so, int cmd, u_long cc)
u_int *hiwat, *lowat;
int error;
+ sb = NULL;
SOCK_LOCK(so);
if (SOLISTENING(so)) {
switch (cmd) {
diff --git a/freebsd/sys/kern/uipc_socket.c b/freebsd/sys/kern/uipc_socket.c
index 43763026..e82642e4 100644
--- a/freebsd/sys/kern/uipc_socket.c
+++ b/freebsd/sys/kern/uipc_socket.c
@@ -109,7 +109,6 @@ __FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_inet.h>
#include <rtems/bsd/local/opt_inet6.h>
-#include <rtems/bsd/local/opt_compat.h>
#include <rtems/bsd/local/opt_sctp.h>
#include <sys/param.h>
diff --git a/freebsd/sys/kern/uipc_syscalls.c b/freebsd/sys/kern/uipc_syscalls.c
index 6f3c95c0..0872aa62 100644
--- a/freebsd/sys/kern/uipc_syscalls.c
+++ b/freebsd/sys/kern/uipc_syscalls.c
@@ -39,7 +39,6 @@ __FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_capsicum.h>
#include <rtems/bsd/local/opt_inet.h>
#include <rtems/bsd/local/opt_inet6.h>
-#include <rtems/bsd/local/opt_compat.h>
#include <rtems/bsd/local/opt_ktrace.h>
#include <sys/param.h>
@@ -300,12 +299,16 @@ kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
{
struct socket *so;
struct file *fp;
- cap_rights_t rights;
int error;
+#ifdef CAPABILITY_MODE
+ if (IN_CAPABILITY_MODE(td) && (dirfd == AT_FDCWD))
+ return (ECAPMODE);
+#endif
+
AUDIT_ARG_FD(fd);
AUDIT_ARG_SOCKADDR(td, dirfd, sa);
- error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_BIND),
+ error = getsock_cap(td, fd, &cap_bind_rights,
&fp, NULL, NULL);
if (error != 0)
return (error);
@@ -364,11 +367,10 @@ kern_listen(struct thread *td, int s, int backlog)
{
struct socket *so;
struct file *fp;
- cap_rights_t rights;
int error;
AUDIT_ARG_FD(s);
- error = getsock_cap(td, s, cap_rights_init(&rights, CAP_LISTEN),
+ error = getsock_cap(td, s, &cap_listen_rights,
&fp, NULL, NULL);
if (error == 0) {
so = fp->f_data;
@@ -493,7 +495,6 @@ kern_accept4(struct thread *td, int s, struct sockaddr **name,
struct sockaddr *sa = NULL;
struct socket *head, *so;
struct filecaps fcaps;
- cap_rights_t rights;
u_int fflag;
pid_t pgid;
int error, fd, tmp;
@@ -502,7 +503,7 @@ kern_accept4(struct thread *td, int s, struct sockaddr **name,
*name = NULL;
AUDIT_ARG_FD(s);
- error = getsock_cap(td, s, cap_rights_init(&rights, CAP_ACCEPT),
+ error = getsock_cap(td, s, &cap_accept_rights,
&headfp, &fflag, &fcaps);
if (error != 0)
return (error);
@@ -692,12 +693,16 @@ kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
{
struct socket *so;
struct file *fp;
- cap_rights_t rights;
int error, interrupted = 0;
+#ifdef CAPABILITY_MODE
+ if (IN_CAPABILITY_MODE(td) && (dirfd == AT_FDCWD))
+ return (ECAPMODE);
+#endif
+
AUDIT_ARG_FD(fd);
AUDIT_ARG_SOCKADDR(td, dirfd, sa);
- error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_CONNECT),
+ error = getsock_cap(td, fd, &cap_connect_rights,
&fp, NULL, NULL);
if (error != 0)
return (error);
@@ -986,7 +991,7 @@ kern_sendit(struct thread *td, int s, struct msghdr *mp, int flags,
struct iovec *iov;
struct socket *so;
#ifndef __rtems__
- cap_rights_t rights;
+ cap_rights_t *rights;
#endif /* __rtems__ */
#ifdef KTRACE
struct uio *ktruio = NULL;
@@ -996,13 +1001,14 @@ kern_sendit(struct thread *td, int s, struct msghdr *mp, int flags,
AUDIT_ARG_FD(s);
#ifndef __rtems__
+ rights = &cap_send_rights;
cap_rights_init(&rights, CAP_SEND);
if (mp->msg_name != NULL) {
AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name);
- cap_rights_set(&rights, CAP_CONNECT);
+ rights = &cap_send_connect_rights;
}
#endif /* __rtems__ */
- error = getsock_cap(td, s, &rights, &fp, NULL, NULL);
+ error = getsock_cap(td, s, rights, &fp, NULL, NULL);
if (error != 0) {
m_freem(control);
return (error);
@@ -1259,7 +1265,6 @@ kern_recvit(struct thread *td, int s, struct msghdr *mp, enum uio_seg fromseg,
struct file *fp;
struct socket *so;
struct sockaddr *fromsa = NULL;
- cap_rights_t rights;
#ifdef KTRACE
struct uio *ktruio = NULL;
#endif
@@ -1270,7 +1275,7 @@ kern_recvit(struct thread *td, int s, struct msghdr *mp, enum uio_seg fromseg,
*controlp = NULL;
AUDIT_ARG_FD(s);
- error = getsock_cap(td, s, cap_rights_init(&rights, CAP_RECV),
+ error = getsock_cap(td, s, &cap_recv_rights,
&fp, NULL, NULL);
if (error != 0)
return (error);
@@ -1613,11 +1618,10 @@ kern_shutdown(struct thread *td, int s, int how)
{
struct socket *so;
struct file *fp;
- cap_rights_t rights;
int error;
AUDIT_ARG_FD(s);
- error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SHUTDOWN),
+ error = getsock_cap(td, s, &cap_shutdown_rights,
&fp, NULL, NULL);
if (error == 0) {
so = fp->f_data;
@@ -1696,7 +1700,6 @@ kern_setsockopt(struct thread *td, int s, int level, int name, void *val,
struct socket *so;
struct file *fp;
struct sockopt sopt;
- cap_rights_t rights;
int error;
if (val == NULL && valsize != 0)
@@ -1721,7 +1724,7 @@ kern_setsockopt(struct thread *td, int s, int level, int name, void *val,
}
AUDIT_ARG_FD(s);
- error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SETSOCKOPT),
+ error = getsock_cap(td, s, &cap_setsockopt_rights,
&fp, NULL, NULL);
if (error == 0) {
so = fp->f_data;
@@ -1792,7 +1795,6 @@ kern_getsockopt(struct thread *td, int s, int level, int name, void *val,
struct socket *so;
struct file *fp;
struct sockopt sopt;
- cap_rights_t rights;
int error;
if (val == NULL)
@@ -1817,7 +1819,7 @@ kern_getsockopt(struct thread *td, int s, int level, int name, void *val,
}
AUDIT_ARG_FD(s);
- error = getsock_cap(td, s, cap_rights_init(&rights, CAP_GETSOCKOPT),
+ error = getsock_cap(td, s, &cap_getsockopt_rights,
&fp, NULL, NULL);
if (error == 0) {
so = fp->f_data;
@@ -1892,12 +1894,11 @@ kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
{
struct socket *so;
struct file *fp;
- cap_rights_t rights;
socklen_t len;
int error;
AUDIT_ARG_FD(fd);
- error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_GETSOCKNAME),
+ error = getsock_cap(td, fd, &cap_getsockname_rights,
&fp, NULL, NULL);
if (error != 0)
return (error);
@@ -2008,12 +2009,11 @@ kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
{
struct socket *so;
struct file *fp;
- cap_rights_t rights;
socklen_t len;
int error;
AUDIT_ARG_FD(fd);
- error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_GETPEERNAME),
+ error = getsock_cap(td, fd, &cap_getpeername_rights,
&fp, NULL, NULL);
if (error != 0)
return (error);
diff --git a/freebsd/sys/kern/uipc_usrreq.c b/freebsd/sys/kern/uipc_usrreq.c
index 7849be9d..688682d4 100644
--- a/freebsd/sys/kern/uipc_usrreq.c
+++ b/freebsd/sys/kern/uipc_usrreq.c
@@ -4,9 +4,9 @@
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
- * The Regents of the University of California.
- * Copyright (c) 2004-2009 Robert N. M. Watson
- * All rights reserved.
+ * The Regents of the University of California. All Rights Reserved.
+ * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved.
+ * Copyright (c) 2018 Matthew Macy
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -204,12 +204,40 @@ SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
/*
* Locking and synchronization:
*
- * Two types of locks exist in the local domain socket implementation: a
- * a global linkage rwlock and per-unpcb mutexes. The linkage lock protects
- * the socket count, global generation number, stream/datagram global lists and
- * interconnection of unpcbs, the v_socket and unp_vnode pointers, and can be
- * held exclusively over the acquisition of multiple unpcb locks to prevent
- * deadlock.
+ * Three types of locks exist in the local domain socket implementation: a
+ * a global linkage rwlock, the mtxpool lock, and per-unpcb mutexes.
+ * The linkage lock protects the socket count, global generation number,
+ * and stream/datagram global lists.
+ *
+ * The mtxpool lock protects the vnode from being modified while referenced.
+ * Lock ordering requires that it be acquired before any unpcb locks.
+ *
+ * The unpcb lock (unp_mtx) protects all fields in the unpcb. Of particular
+ * note is that this includes the unp_conn field. So long as the unpcb lock
+ * is held the reference to the unpcb pointed to by unp_conn is valid. If we
+ * require that the unpcb pointed to by unp_conn remain live in cases where
+ * we need to drop the unp_mtx as when we need to acquire the lock for a
+ * second unpcb the caller must first acquire an additional reference on the
+ * second unpcb and then revalidate any state (typically check that unp_conn
+ * is non-NULL) upon requiring the initial unpcb lock. The lock ordering
+ * between unpcbs is the conventional ascending address order. Two helper
+ * routines exist for this:
+ *
+ * - unp_pcb_lock2(unp, unp2) - which just acquires the two locks in the
+ * safe ordering.
+ *
+ * - unp_pcb_owned_lock2(unp, unp2, freed) - the lock for unp is held
+ * when called. If unp is unlocked and unp2 is subsequently freed
+ * freed will be set to 1.
+ *
+ * The helper routines for references are:
+ *
+ * - unp_pcb_hold(unp): Can be called any time we currently hold a valid
+ * reference to unp.
+ *
+ * - unp_pcb_rele(unp): The caller must hold the unp lock. If we are
+ * releasing the last reference, detach must have been called thus
+ * unp->unp_socket be NULL.
*
* UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
* allocated in pru_attach() and freed in pru_detach(). The validity of that
@@ -223,15 +251,8 @@ SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
* to the unpcb is held. Typically, this reference will be from the socket,
* or from another unpcb when the referring unpcb's lock is held (in order
* that the reference not be invalidated during use). For example, to follow
- * unp->unp_conn->unp_socket, you need unlock the lock on unp, not unp_conn,
- * as unp_socket remains valid as long as the reference to unp_conn is valid.
- *
- * Fields of unpcbss are locked using a per-unpcb lock, unp_mtx. Individual
- * atomic reads without the lock may be performed "lockless", but more
- * complex reads and read-modify-writes require the mutex to be held. No
- * lock order is defined between unpcb locks -- multiple unpcb locks may be
- * acquired at the same time only when holding the linkage rwlock
- * exclusively, which prevents deadlocks.
+ * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee
+ * that detach is not run clearing unp_socket.
*
* Blocking with UNIX domain sockets is a tricky issue: unlike most network
* protocols, bind() is a non-atomic operation, and connect() requires
@@ -270,13 +291,19 @@ static struct mtx unp_defers_lock;
#define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock)
#define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock)
+#define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK();
+#define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK();
+
#define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \
- "unp_mtx", "unp_mtx", \
- MTX_DUPOK|MTX_DEF|MTX_RECURSE)
+ "unp", "unp", \
+ MTX_DUPOK|MTX_DEF)
#define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx)
#define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx)
+#define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx)
#define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx)
+#define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx)
#define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED)
+#define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED)
static int uipc_connect2(struct socket *, struct socket *);
static int uipc_ctloutput(struct socket *, struct sockopt *);
@@ -308,6 +335,77 @@ static struct mbuf *unp_addsockcred(struct thread *, struct mbuf *);
static void unp_process_defers(void * __unused, int);
#endif /* __rtems__ */
+
+static void
+unp_pcb_hold(struct unpcb *unp)
+{
+ MPASS(unp->unp_refcount);
+ refcount_acquire(&unp->unp_refcount);
+}
+
+static int
+unp_pcb_rele(struct unpcb *unp)
+{
+ int freed;
+
+ UNP_PCB_LOCK_ASSERT(unp);
+ MPASS(unp->unp_refcount);
+ if ((freed = refcount_release(&unp->unp_refcount))) {
+ /* we got here with having detached? */
+ MPASS(unp->unp_socket == NULL);
+ UNP_PCB_UNLOCK(unp);
+ UNP_PCB_LOCK_DESTROY(unp);
+ uma_zfree(unp_zone, unp);
+ }
+ return (freed);
+}
+
+static void
+unp_pcb_lock2(struct unpcb *unp, struct unpcb *unp2)
+{
+ MPASS(unp != unp2);
+ UNP_PCB_UNLOCK_ASSERT(unp);
+ UNP_PCB_UNLOCK_ASSERT(unp2);
+ if ((uintptr_t)unp2 > (uintptr_t)unp) {
+ UNP_PCB_LOCK(unp);
+ UNP_PCB_LOCK(unp2);
+ } else {
+ UNP_PCB_LOCK(unp2);
+ UNP_PCB_LOCK(unp);
+ }
+}
+
+static __noinline void
+unp_pcb_owned_lock2_slowpath(struct unpcb *unp, struct unpcb **unp2p, int *freed)
+
+{
+ struct unpcb *unp2;
+
+ unp2 = *unp2p;
+ unp_pcb_hold((unp2));
+ UNP_PCB_UNLOCK((unp));
+ UNP_PCB_LOCK((unp2));
+ UNP_PCB_LOCK((unp));
+ *freed = unp_pcb_rele((unp2));
+ if (*freed)
+ *unp2p = NULL;
+}
+
+#define unp_pcb_owned_lock2(unp, unp2, freed) do { \
+ freed = 0; \
+ UNP_PCB_LOCK_ASSERT((unp)); \
+ UNP_PCB_UNLOCK_ASSERT((unp2)); \
+ MPASS(unp != unp2); \
+ if (__predict_true(UNP_PCB_TRYLOCK((unp2)))) \
+ break; \
+ else if ((uintptr_t)(unp2) > (uintptr_t)(unp)) \
+ UNP_PCB_LOCK((unp2)); \
+ else { \
+ unp_pcb_owned_lock2_slowpath((unp), &(unp2), &freed); \
+ } \
+} while (0)
+
+
/*
* Definitions of protocols supported in the LOCAL domain.
*/
@@ -365,17 +463,16 @@ uipc_abort(struct socket *so)
unp = sotounpcb(so);
KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
+ UNP_PCB_UNLOCK_ASSERT(unp);
- UNP_LINK_WLOCK();
UNP_PCB_LOCK(unp);
unp2 = unp->unp_conn;
if (unp2 != NULL) {
- UNP_PCB_LOCK(unp2);
+ unp_pcb_hold(unp2);
+ UNP_PCB_UNLOCK(unp);
unp_drop(unp2);
- UNP_PCB_UNLOCK(unp2);
- }
- UNP_PCB_UNLOCK(unp);
- UNP_LINK_WUNLOCK();
+ } else
+ UNP_PCB_UNLOCK(unp);
}
static int
@@ -636,7 +733,6 @@ restart:
#endif /* __rtems__ */
soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
- UNP_LINK_WLOCK();
UNP_PCB_LOCK(unp);
#ifndef __rtems__
VOP_UNP_BIND(vp, unp);
@@ -645,7 +741,6 @@ restart:
unp->unp_addr = soun;
unp->unp_flags &= ~UNP_BINDING;
UNP_PCB_UNLOCK(unp);
- UNP_LINK_WUNLOCK();
#ifndef __rtems__
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
@@ -676,9 +771,7 @@ uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
int error;
KASSERT(td == curthread, ("uipc_connect: td != curthread"));
- UNP_LINK_WLOCK();
error = unp_connect(so, nam, td);
- UNP_LINK_WUNLOCK();
return (error);
}
@@ -689,9 +782,7 @@ uipc_connectat(int fd, struct socket *so, struct sockaddr *nam,
int error;
KASSERT(td == curthread, ("uipc_connectat: td != curthread"));
- UNP_LINK_WLOCK();
error = unp_connectat(fd, so, nam, td);
- UNP_LINK_WUNLOCK();
return (error);
}
@@ -701,30 +792,53 @@ uipc_close(struct socket *so)
struct unpcb *unp, *unp2;
#ifndef __rtems__
struct vnode *vp = NULL;
+ struct mtx *vplock;
#else /* __rtems__ */
IMFS_generic_t *vp = NULL;
#endif /* __rtems__ */
-
+ int freed;
unp = sotounpcb(so);
KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
- UNP_LINK_WLOCK();
+
+#ifndef __rtems__
+ vplock = NULL;
+#endif /* __rtems__ */
+ if ((vp = unp->unp_vnode) != NULL) {
+#ifndef __rtems__
+ vplock = mtx_pool_find(mtxpool_sleep, vp);
+ mtx_lock(vplock);
+#endif /* __rtems__ */
+ }
UNP_PCB_LOCK(unp);
- unp2 = unp->unp_conn;
- if (unp2 != NULL) {
- UNP_PCB_LOCK(unp2);
- unp_disconnect(unp, unp2);
- UNP_PCB_UNLOCK(unp2);
+ if (vp && unp->unp_vnode == NULL) {
+#ifndef __rtems__
+ mtx_unlock(vplock);
+#endif /* __rtems__ */
+ vp = NULL;
}
- if (SOLISTENING(so) && ((vp = unp->unp_vnode) != NULL)) {
+ if (vp != NULL) {
VOP_UNP_DETACH(vp);
unp->unp_vnode = NULL;
}
- UNP_PCB_UNLOCK(unp);
- UNP_LINK_WUNLOCK();
+ unp2 = unp->unp_conn;
+ unp_pcb_hold(unp);
+ if (__predict_false(unp == unp2)) {
+ unp_disconnect(unp, unp2);
+ } else if (unp2 != NULL) {
+ unp_pcb_hold(unp2);
+ unp_pcb_owned_lock2(unp, unp2, freed);
+ unp_disconnect(unp, unp2);
+ if (unp_pcb_rele(unp2) == 0)
+ UNP_PCB_UNLOCK(unp2);
+ }
+ if (unp_pcb_rele(unp) == 0)
+ UNP_PCB_UNLOCK(unp);
#ifndef __rtems__
- if (vp)
+ if (vp) {
+ mtx_unlock(vplock);
vrele(vp);
+ }
#endif /* __rtems__ */
}
@@ -734,17 +848,18 @@ uipc_connect2(struct socket *so1, struct socket *so2)
struct unpcb *unp, *unp2;
int error;
- UNP_LINK_WLOCK();
unp = so1->so_pcb;
KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
- UNP_PCB_LOCK(unp);
unp2 = so2->so_pcb;
KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
- UNP_PCB_LOCK(unp2);
+ if (unp != unp2)
+ unp_pcb_lock2(unp, unp2);
+ else
+ UNP_PCB_LOCK(unp);
error = unp_connect2(so1, so2, PRU_CONNECT2);
- UNP_PCB_UNLOCK(unp2);
+ if (unp != unp2)
+ UNP_PCB_UNLOCK(unp2);
UNP_PCB_UNLOCK(unp);
- UNP_LINK_WUNLOCK();
return (error);
}
@@ -752,6 +867,9 @@ static void
uipc_detach(struct socket *so)
{
struct unpcb *unp, *unp2;
+#ifndef __rtems__
+ struct mtx *vplock;
+#endif /* __rtems__ */
struct sockaddr_un *saved_unp_addr;
#ifndef __rtems__
struct vnode *vp;
@@ -766,6 +884,7 @@ uipc_detach(struct socket *so)
vp = NULL;
#ifndef __rtems__
+ vplock = NULL;
local_unp_rights = 0;
#endif /* __rtems__ */
@@ -773,52 +892,87 @@ uipc_detach(struct socket *so)
LIST_REMOVE(unp, unp_link);
unp->unp_gencnt = ++unp_gencnt;
--unp_count;
+ UNP_LINK_WUNLOCK();
+
+ UNP_PCB_UNLOCK_ASSERT(unp);
+ restart:
+ if ((vp = unp->unp_vnode) != NULL) {
+#ifndef __rtems__
+ vplock = mtx_pool_find(mtxpool_sleep, vp);
+ mtx_lock(vplock);
+#endif /* __rtems__ */
+ }
UNP_PCB_LOCK(unp);
- if ((unp->unp_flags & UNP_NASCENT) != 0)
+ if (unp->unp_vnode != vp &&
+ unp->unp_vnode != NULL) {
+#ifndef __rtems__
+ if (vplock)
+ mtx_unlock(vplock);
+#endif /* __rtems__ */
+ UNP_PCB_UNLOCK(unp);
+ goto restart;
+ }
+ if ((unp->unp_flags & UNP_NASCENT) != 0) {
goto teardown;
-
+ }
if ((vp = unp->unp_vnode) != NULL) {
VOP_UNP_DETACH(vp);
unp->unp_vnode = NULL;
}
- unp2 = unp->unp_conn;
+ if (__predict_false(unp == unp->unp_conn)) {
+ unp_disconnect(unp, unp);
+ unp2 = NULL;
+ goto connect_self;
+ }
+ if ((unp2 = unp->unp_conn) != NULL) {
+ unp_pcb_owned_lock2(unp, unp2, freeunp);
+ if (freeunp)
+ unp2 = NULL;
+ }
+ unp_pcb_hold(unp);
if (unp2 != NULL) {
- UNP_PCB_LOCK(unp2);
+ unp_pcb_hold(unp2);
unp_disconnect(unp, unp2);
- UNP_PCB_UNLOCK(unp2);
+ if (unp_pcb_rele(unp2) == 0)
+ UNP_PCB_UNLOCK(unp2);
}
-
- /*
- * We hold the linkage lock exclusively, so it's OK to acquire
- * multiple pcb locks at a time.
- */
+ connect_self:
+ UNP_PCB_UNLOCK(unp);
+ UNP_REF_LIST_LOCK();
while (!LIST_EMPTY(&unp->unp_refs)) {
struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
- UNP_PCB_LOCK(ref);
+ unp_pcb_hold(ref);
+ UNP_REF_LIST_UNLOCK();
+
+ MPASS(ref != unp);
+ UNP_PCB_UNLOCK_ASSERT(ref);
unp_drop(ref);
- UNP_PCB_UNLOCK(ref);
+ UNP_REF_LIST_LOCK();
}
+
+ UNP_REF_LIST_UNLOCK();
+ UNP_PCB_LOCK(unp);
+ freeunp = unp_pcb_rele(unp);
+ MPASS(freeunp == 0);
#ifndef __rtems__
local_unp_rights = unp_rights;
#endif /* __rtems__ */
teardown:
- UNP_LINK_WUNLOCK();
unp->unp_socket->so_pcb = NULL;
saved_unp_addr = unp->unp_addr;
unp->unp_addr = NULL;
- unp->unp_refcount--;
- freeunp = (unp->unp_refcount == 0);
+ unp->unp_socket = NULL;
+ freeunp = unp_pcb_rele(unp);
if (saved_unp_addr != NULL)
free(saved_unp_addr, M_SONAME);
- if (freeunp) {
- UNP_PCB_LOCK_DESTROY(unp);
- uma_zfree(unp_zone, unp);
- } else
+ if (!freeunp)
UNP_PCB_UNLOCK(unp);
#ifndef __rtems__
- if (vp)
+ if (vp) {
+ mtx_unlock(vplock);
vrele(vp);
+ }
if (local_unp_rights)
taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1);
#endif /* __rtems__ */
@@ -828,20 +982,32 @@ static int
uipc_disconnect(struct socket *so)
{
struct unpcb *unp, *unp2;
+ int freed;
unp = sotounpcb(so);
KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
- UNP_LINK_WLOCK();
UNP_PCB_LOCK(unp);
- unp2 = unp->unp_conn;
- if (unp2 != NULL) {
- UNP_PCB_LOCK(unp2);
- unp_disconnect(unp, unp2);
- UNP_PCB_UNLOCK(unp2);
+ if ((unp2 = unp->unp_conn) == NULL) {
+ UNP_PCB_UNLOCK(unp);
+ return (0);
}
- UNP_PCB_UNLOCK(unp);
- UNP_LINK_WUNLOCK();
+ if (unp == unp2) {
+ if (unp_pcb_rele(unp) == 0)
+ UNP_PCB_UNLOCK(unp);
+ }
+ unp_pcb_owned_lock2(unp, unp2, freed);
+ if (__predict_false(freed)) {
+ UNP_PCB_UNLOCK(unp);
+ return (0);
+ }
+ unp_pcb_hold(unp2);
+ unp_pcb_hold(unp);
+ unp_disconnect(unp, unp2);
+ if (unp_pcb_rele(unp) == 0)
+ UNP_PCB_UNLOCK(unp);
+ if (unp_pcb_rele(unp2) == 0)
+ UNP_PCB_UNLOCK(unp2);
return (0);
}
@@ -960,13 +1126,35 @@ uipc_rcvd(struct socket *so, int flags)
}
static int
+connect_internal(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error;
+ struct unpcb *unp;
+
+ unp = so->so_pcb;
+ if (unp->unp_conn != NULL)
+ return (EISCONN);
+ error = unp_connect(so, nam, td);
+ if (error)
+ return (error);
+ UNP_PCB_LOCK(unp);
+ if (unp->unp_conn == NULL) {
+ UNP_PCB_UNLOCK(unp);
+ if (error == 0)
+ error = ENOTCONN;
+ }
+ return (error);
+}
+
+
+static int
uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
struct mbuf *control, struct thread *td)
{
struct unpcb *unp, *unp2;
struct socket *so2;
u_int mbcnt, sbcc;
- int error = 0;
+ int freed, error;
unp = sotounpcb(so);
KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
@@ -974,6 +1162,7 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
so->so_type == SOCK_SEQPACKET,
("%s: socktype %d", __func__, so->so_type));
+ freed = error = 0;
if (flags & PRUS_OOB) {
error = EOPNOTSUPP;
goto release;
@@ -981,53 +1170,72 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
#ifndef __rtems__
if (control != NULL && (error = unp_internalize(&control, td)))
goto release;
-#else /* __rtems__ */
- if (control != NULL) {
- error = ENOSYS;
- goto release;
- }
#endif /* __rtems__ */
- if ((nam != NULL) || (flags & PRUS_EOF))
- UNP_LINK_WLOCK();
- else
- UNP_LINK_RLOCK();
+
+ unp2 = NULL;
switch (so->so_type) {
case SOCK_DGRAM:
{
const struct sockaddr *from;
- unp2 = unp->unp_conn;
if (nam != NULL) {
- UNP_LINK_WLOCK_ASSERT();
- if (unp2 != NULL) {
- error = EISCONN;
+ /*
+ * We return with UNP_PCB_LOCK_HELD so we know that
+ * the reference is live if the pointer is valid.
+ */
+ if ((error = connect_internal(so, nam, td)))
+ break;
+ MPASS(unp->unp_conn != NULL);
+ unp2 = unp->unp_conn;
+ } else {
+ UNP_PCB_LOCK(unp);
+
+ /*
+ * Because connect() and send() are non-atomic in a sendto()
+ * with a target address, it's possible that the socket will
+ * have disconnected before the send() can run. In that case
+ * return the slightly counter-intuitive but otherwise
+ * correct error that the socket is not connected.
+ */
+ if ((unp2 = unp->unp_conn) == NULL) {
+ UNP_PCB_UNLOCK(unp);
+ error = ENOTCONN;
break;
}
- error = unp_connect(so, nam, td);
- if (error)
+ }
+ if (__predict_false(unp == unp2)) {
+ if (unp->unp_socket == NULL) {
+ error = ENOTCONN;
break;
- unp2 = unp->unp_conn;
+ }
+ goto connect_self;
+ }
+ unp_pcb_owned_lock2(unp, unp2, freed);
+ if (__predict_false(freed)) {
+ UNP_PCB_UNLOCK(unp);
+ error = ENOTCONN;
+ break;
}
-
/*
- * Because connect() and send() are non-atomic in a sendto()
- * with a target address, it's possible that the socket will
- * have disconnected before the send() can run. In that case
- * return the slightly counter-intuitive but otherwise
- * correct error that the socket is not connected.
+ * The socket referencing unp2 may have been closed
+ * or unp may have been disconnected if the unp lock
+ * was dropped to acquire unp2.
*/
- if (unp2 == NULL) {
+ if (__predict_false(unp->unp_conn == NULL) ||
+ unp2->unp_socket == NULL) {
+ UNP_PCB_UNLOCK(unp);
+ if (unp_pcb_rele(unp2) == 0)
+ UNP_PCB_UNLOCK(unp2);
error = ENOTCONN;
break;
}
- /* Lockless read. */
+ connect_self:
if (unp2->unp_flags & UNP_WANTCRED)
#ifndef __rtems__
control = unp_addsockcred(td, control);
#else /* __rtems__ */
control = NULL;
#endif /* __rtems__ */
- UNP_PCB_LOCK(unp);
if (unp->unp_addr != NULL)
from = (struct sockaddr *)unp->unp_addr;
else
@@ -1043,12 +1251,10 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
SOCKBUF_UNLOCK(&so2->so_rcv);
error = ENOBUFS;
}
- if (nam != NULL) {
- UNP_LINK_WLOCK_ASSERT();
- UNP_PCB_LOCK(unp2);
+ if (nam != NULL)
unp_disconnect(unp, unp2);
+ if (__predict_true(unp != unp2))
UNP_PCB_UNLOCK(unp2);
- }
UNP_PCB_UNLOCK(unp);
break;
}
@@ -1057,42 +1263,37 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
case SOCK_STREAM:
if ((so->so_state & SS_ISCONNECTED) == 0) {
if (nam != NULL) {
- UNP_LINK_WLOCK_ASSERT();
- error = unp_connect(so, nam, td);
- if (error)
- break; /* XXX */
- } else {
+ if ((error = connect_internal(so, nam, td)))
+ break;
+ } else {
error = ENOTCONN;
break;
}
- }
-
- /* Lockless read. */
- if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
+ } else if ((unp2 = unp->unp_conn) == NULL) {
+ error = ENOTCONN;
+ break;
+ } else if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
error = EPIPE;
break;
+ } else {
+ UNP_PCB_LOCK(unp);
+ if ((unp2 = unp->unp_conn) == NULL) {
+ UNP_PCB_UNLOCK(unp);
+ error = ENOTCONN;
+ break;
+ }
}
-
- /*
- * Because connect() and send() are non-atomic in a sendto()
- * with a target address, it's possible that the socket will
- * have disconnected before the send() can run. In that case
- * return the slightly counter-intuitive but otherwise
- * correct error that the socket is not connected.
- *
- * Locking here must be done carefully: the linkage lock
- * prevents interconnections between unpcbs from changing, so
- * we can traverse from unp to unp2 without acquiring unp's
- * lock. Socket buffer locks follow unpcb locks, so we can
- * acquire both remote and lock socket buffer locks.
- */
- unp2 = unp->unp_conn;
- if (unp2 == NULL) {
+ unp_pcb_owned_lock2(unp, unp2, freed);
+ UNP_PCB_UNLOCK(unp);
+ if (__predict_false(freed)) {
+ error = ENOTCONN;
+ break;
+ }
+ if ((so2 = unp2->unp_socket) == NULL) {
+ UNP_PCB_UNLOCK(unp2);
error = ENOTCONN;
break;
}
- so2 = unp2->unp_socket;
- UNP_PCB_LOCK(unp2);
SOCKBUF_LOCK(&so2->so_rcv);
if (unp2->unp_flags & UNP_WANTCRED) {
#ifndef __rtems__
@@ -1167,12 +1368,6 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
unp_shutdown(unp);
UNP_PCB_UNLOCK(unp);
}
-
- if ((nam != NULL) || (flags & PRUS_EOF))
- UNP_LINK_WUNLOCK();
- else
- UNP_LINK_RUNLOCK();
-
if (control != NULL && error != 0)
#ifndef __rtems__
unp_dispose_mbuf(control);
@@ -1249,12 +1444,10 @@ uipc_shutdown(struct socket *so)
unp = sotounpcb(so);
KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL"));
- UNP_LINK_WLOCK();
UNP_PCB_LOCK(unp);
socantsendmore(so);
unp_shutdown(unp);
UNP_PCB_UNLOCK(unp);
- UNP_LINK_WUNLOCK();
return (0);
}
@@ -1470,16 +1663,13 @@ unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
#ifndef __rtems__
cap_rights_t rights;
#endif /* __rtems__ */
- int error, len;
+ int error, len, freed;
+#ifndef __rtems__
+ struct mtx *vplock;
+#endif /* __rtems__ */
if (nam->sa_family != AF_UNIX)
return (EAFNOSUPPORT);
-
- UNP_LINK_WLOCK_ASSERT();
-
- unp = sotounpcb(so);
- KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
-
if (nam->sa_len > sizeof(struct sockaddr_un))
return (EINVAL);
len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
@@ -1490,12 +1680,12 @@ unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
buf[len] = 0;
#endif /* __rtems__ */
+ unp = sotounpcb(so);
UNP_PCB_LOCK(unp);
if (unp->unp_flags & UNP_CONNECTING) {
UNP_PCB_UNLOCK(unp);
return (EALREADY);
}
- UNP_LINK_WUNLOCK();
unp->unp_flags |= UNP_CONNECTING;
UNP_PCB_UNLOCK(unp);
@@ -1548,12 +1738,9 @@ unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
unp = sotounpcb(so);
KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
- /*
- * Lock linkage lock for two reasons: make sure v_socket is stable,
- * and to protect simultaneous locking of multiple pcbs.
- */
- UNP_LINK_WLOCK();
#ifndef __rtems__
+ vplock = mtx_pool_find(mtxpool_sleep, vp);
+ mtx_lock(vplock);
VOP_UNP_CONNECT(vp, &unp2);
if (unp2 == NULL) {
error = ECONNREFUSED;
@@ -1572,8 +1759,6 @@ unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
error = EPROTOTYPE;
goto bad2;
}
- UNP_PCB_LOCK(unp);
- UNP_PCB_LOCK(unp2);
if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
if (so2->so_options & SO_ACCEPTCONN) {
CURVNET_SET(so2->so_vnet);
@@ -1583,10 +1768,10 @@ unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
so2 = NULL;
if (so2 == NULL) {
error = ECONNREFUSED;
- goto bad3;
+ goto bad2;
}
unp3 = sotounpcb(so2);
- UNP_PCB_LOCK(unp3);
+ unp_pcb_lock2(unp2, unp3);
if (unp2->unp_addr != NULL) {
bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
unp3->unp_addr = (struct sockaddr_un *) sa;
@@ -1613,28 +1798,40 @@ unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
unp3->unp_flags |= UNP_WANTCRED;
UNP_PCB_UNLOCK(unp2);
unp2 = unp3;
+ unp_pcb_owned_lock2(unp2, unp, freed);
+ if (__predict_false(freed)) {
+ UNP_PCB_UNLOCK(unp2);
+ error = ECONNREFUSED;
+ goto bad2;
+ }
#ifdef MAC
mac_socketpeer_set_from_socket(so, so2);
mac_socketpeer_set_from_socket(so2, so);
#endif
+ } else {
+ if (unp == unp2)
+ UNP_PCB_LOCK(unp);
+ else
+ unp_pcb_lock2(unp, unp2);
}
-
KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 &&
sotounpcb(so2) == unp2,
("%s: unp2 %p so2 %p", __func__, unp2, so2));
error = unp_connect2(so, so2, PRU_CONNECT);
-bad3:
- UNP_PCB_UNLOCK(unp2);
+ if (unp != unp2)
+ UNP_PCB_UNLOCK(unp2);
UNP_PCB_UNLOCK(unp);
bad2:
- UNP_LINK_WUNLOCK();
+#ifndef __rtems__
+ mtx_unlock(vplock);
+#endif /* __rtems__ */
bad:
#ifndef __rtems__
- if (vp != NULL)
+ if (vp != NULL) {
vput(vp);
+ }
#endif /* __rtems__ */
free(sa, M_SONAME);
- UNP_LINK_WLOCK();
UNP_PCB_LOCK(unp);
unp->unp_flags &= ~UNP_CONNECTING;
UNP_PCB_UNLOCK(unp);
@@ -1652,7 +1849,6 @@ unp_connect2(struct socket *so, struct socket *so2, int req)
unp2 = sotounpcb(so2);
KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
- UNP_LINK_WLOCK_ASSERT();
UNP_PCB_LOCK_ASSERT(unp);
UNP_PCB_LOCK_ASSERT(unp2);
@@ -1660,10 +1856,13 @@ unp_connect2(struct socket *so, struct socket *so2, int req)
return (EPROTOTYPE);
unp2->unp_flags &= ~UNP_NASCENT;
unp->unp_conn = unp2;
-
+ unp_pcb_hold(unp2);
+ unp_pcb_hold(unp);
switch (so->so_type) {
case SOCK_DGRAM:
+ UNP_REF_LIST_LOCK();
LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
+ UNP_REF_LIST_UNLOCK();
soisconnected(so);
break;
@@ -1687,31 +1886,47 @@ unp_connect2(struct socket *so, struct socket *so2, int req)
static void
unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
{
- struct socket *so;
+ struct socket *so, *so2;
+ int freed __unused;
KASSERT(unp2 != NULL, ("unp_disconnect: unp2 == NULL"));
- UNP_LINK_WLOCK_ASSERT();
UNP_PCB_LOCK_ASSERT(unp);
UNP_PCB_LOCK_ASSERT(unp2);
+ if (unp->unp_conn == NULL && unp2->unp_conn == NULL)
+ return;
+
+ MPASS(unp->unp_conn == unp2);
unp->unp_conn = NULL;
+ so = unp->unp_socket;
+ so2 = unp2->unp_socket;
switch (unp->unp_socket->so_type) {
case SOCK_DGRAM:
+ UNP_REF_LIST_LOCK();
LIST_REMOVE(unp, unp_reflink);
- so = unp->unp_socket;
- SOCK_LOCK(so);
- so->so_state &= ~SS_ISCONNECTED;
- SOCK_UNLOCK(so);
+ UNP_REF_LIST_UNLOCK();
+ if (so) {
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_ISCONNECTED;
+ SOCK_UNLOCK(so);
+ }
break;
case SOCK_STREAM:
case SOCK_SEQPACKET:
- soisdisconnected(unp->unp_socket);
+ if (so)
+ soisdisconnected(so);
+ MPASS(unp2->unp_conn == unp);
unp2->unp_conn = NULL;
- soisdisconnected(unp2->unp_socket);
+ if (so2)
+ soisdisconnected(so2);
break;
}
+ freed = unp_pcb_rele(unp);
+ MPASS(freed == 0);
+ freed = unp_pcb_rele(unp2);
+ MPASS(freed == 0);
}
/*
@@ -1795,7 +2010,7 @@ unp_pcblist(SYSCTL_HANDLER_ARGS)
continue;
}
unp_list[i++] = unp;
- unp->unp_refcount++;
+ unp_pcb_hold(unp);
}
UNP_PCB_UNLOCK(unp);
}
@@ -1807,8 +2022,9 @@ unp_pcblist(SYSCTL_HANDLER_ARGS)
for (i = 0; i < n; i++) {
unp = unp_list[i];
UNP_PCB_LOCK(unp);
- unp->unp_refcount--;
- if (unp->unp_refcount != 0 && unp->unp_gencnt <= gencnt) {
+ freeunp = unp_pcb_rele(unp);
+
+ if (freeunp == 0 && unp->unp_gencnt <= gencnt) {
xu->xu_len = sizeof *xu;
xu->xu_unpp = unp;
/*
@@ -1835,14 +2051,8 @@ unp_pcblist(SYSCTL_HANDLER_ARGS)
sotoxsocket(unp->unp_socket, &xu->xu_socket);
UNP_PCB_UNLOCK(unp);
error = SYSCTL_OUT(req, xu, sizeof *xu);
- } else {
- freeunp = (unp->unp_refcount == 0);
+ } else if (freeunp == 0)
UNP_PCB_UNLOCK(unp);
- if (freeunp) {
- UNP_PCB_LOCK_DESTROY(unp);
- uma_zfree(unp_zone, unp);
- }
- }
}
free(xu, M_TEMP);
if (!error) {
@@ -1879,7 +2089,6 @@ unp_shutdown(struct unpcb *unp)
struct unpcb *unp2;
struct socket *so;
- UNP_LINK_WLOCK_ASSERT();
UNP_PCB_LOCK_ASSERT(unp);
unp2 = unp->unp_conn;
@@ -1896,22 +2105,30 @@ unp_drop(struct unpcb *unp)
{
struct socket *so = unp->unp_socket;
struct unpcb *unp2;
-
- UNP_LINK_WLOCK_ASSERT();
- UNP_PCB_LOCK_ASSERT(unp);
+ int freed;
/*
* Regardless of whether the socket's peer dropped the connection
* with this socket by aborting or disconnecting, POSIX requires
* that ECONNRESET is returned.
*/
- so->so_error = ECONNRESET;
+ /* acquire a reference so that unp isn't freed from underneath us */
+
+ UNP_PCB_LOCK(unp);
+ if (so)
+ so->so_error = ECONNRESET;
unp2 = unp->unp_conn;
- if (unp2 == NULL)
- return;
- UNP_PCB_LOCK(unp2);
- unp_disconnect(unp, unp2);
- UNP_PCB_UNLOCK(unp2);
+ if (unp2 == unp) {
+ unp_disconnect(unp, unp2);
+ } else if (unp2 != NULL) {
+ unp_pcb_hold(unp2);
+ unp_pcb_owned_lock2(unp, unp2, freed);
+ unp_disconnect(unp, unp2);
+ if (unp_pcb_rele(unp2) == 0)
+ UNP_PCB_UNLOCK(unp2);
+ }
+ if (unp_pcb_rele(unp) == 0)
+ UNP_PCB_UNLOCK(unp);
}
#ifndef __rtems__
@@ -2053,7 +2270,7 @@ unp_init(void)
return;
#endif
unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, NULL,
- NULL, NULL, UMA_ALIGN_PTR, 0);
+ NULL, NULL, UMA_ALIGN_CACHE, 0);
if (unp_zone == NULL)
panic("unp_init");
uma_zone_set_max(unp_zone, maxsockets);
@@ -2639,13 +2856,15 @@ vfs_unp_reclaim(struct vnode *vp)
{
struct unpcb *unp;
int active;
+ struct mtx *vplock;
ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim");
KASSERT(vp->v_type == VSOCK,
("vfs_unp_reclaim: vp->v_type != VSOCK"));
active = 0;
- UNP_LINK_WLOCK();
+ vplock = mtx_pool_find(mtxpool_sleep, vp);
+ mtx_lock(vplock);
VOP_UNP_CONNECT(vp, &unp);
if (unp == NULL)
goto done;
@@ -2656,8 +2875,8 @@ vfs_unp_reclaim(struct vnode *vp)
active = 1;
}
UNP_PCB_UNLOCK(unp);
-done:
- UNP_LINK_WUNLOCK();
+ done:
+ mtx_unlock(vplock);
if (active)
vunref(vp);
}