summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-12-20 11:12:40 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-12-20 13:36:34 +0100
commit2b2563da953978f63e3e707f758fd600dcd19a32 (patch)
treea207b096c10788192b56025e8187f14d1b5a978d /freebsd/sys/kern
parentfreebsd/if_cpsw: Port. (diff)
downloadrtems-libbsd-2b2563da953978f63e3e707f758fd600dcd19a32.tar.bz2
Update to FreeBSD head 2018-12-20
Git mirror commit 19a6ceb89dbacf74697d493e48c388767126d418. It includes an update of wpa_supplicant to version 2.7. It includes an update of the OpenSSL baseline to version 1.1.1a. Update #3472.
Diffstat (limited to 'freebsd/sys/kern')
-rw-r--r--freebsd/sys/kern/init_main.c99
-rw-r--r--freebsd/sys/kern/kern_event.c110
-rw-r--r--freebsd/sys/kern/kern_intr.c104
-rw-r--r--freebsd/sys/kern/kern_synch.c2
-rw-r--r--freebsd/sys/kern/kern_sysctl.c24
-rw-r--r--freebsd/sys/kern/subr_blist.c55
-rw-r--r--freebsd/sys/kern/subr_bus.c68
-rw-r--r--freebsd/sys/kern/subr_rman.c15
-rw-r--r--freebsd/sys/kern/subr_taskqueue.c28
-rw-r--r--freebsd/sys/kern/subr_unit.c13
-rwxr-xr-xfreebsd/sys/kern/sys_pipe.c27
-rw-r--r--freebsd/sys/kern/uipc_sockbuf.c47
-rw-r--r--freebsd/sys/kern/uipc_socket.c3
-rw-r--r--freebsd/sys/kern/uipc_syscalls.c4
-rw-r--r--freebsd/sys/kern/uipc_usrreq.c7
15 files changed, 360 insertions, 246 deletions
diff --git a/freebsd/sys/kern/init_main.c b/freebsd/sys/kern/init_main.c
index 8fcc314b..c6a9e310 100644
--- a/freebsd/sys/kern/init_main.c
+++ b/freebsd/sys/kern/init_main.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/exec.h>
#include <sys/file.h>
#include <sys/filedesc.h>
+#include <sys/imgact.h>
#include <sys/jail.h>
#include <sys/ktr.h>
#include <sys/lock.h>
@@ -428,7 +429,6 @@ null_set_syscall_retval(struct thread *td __unused, int error __unused)
struct sysentvec null_sysvec = {
.sv_size = 0,
.sv_table = NULL,
- .sv_mask = 0,
.sv_errsize = 0,
.sv_errtbl = NULL,
.sv_transtrap = NULL,
@@ -744,15 +744,13 @@ SYSCTL_INT(_kern, OID_AUTO, init_shutdown_timeout,
static void
start_init(void *dummy)
{
- vm_offset_t addr;
- struct execve_args args;
- int options, error;
- size_t pathlen;
+ struct image_args args;
+ int error;
char *var, *path;
char *free_init_path, *tmp_init_path;
- char *ucp, **uap, *arg0, *arg1;
struct thread *td;
struct proc *p;
+ struct vmspace *oldvmspace;
TSENTER(); /* Here so we don't overlap with mi_startup. */
@@ -764,16 +762,6 @@ start_init(void *dummy)
/* Wipe GELI passphrase from the environment. */
kern_unsetenv("kern.geom.eli.passphrase");
- /*
- * Need just enough stack to hold the faked-up "execve()" arguments.
- */
- addr = p->p_sysent->sv_usrstack - PAGE_SIZE;
- if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE, 0,
- VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0)
- panic("init: couldn't allocate argument space");
- p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
- p->p_vmspace->vm_ssize = 1;
-
if ((var = kern_getenv("init_path")) != NULL) {
strlcpy(init_path, var, sizeof(init_path));
freeenv(var);
@@ -781,58 +769,25 @@ start_init(void *dummy)
free_init_path = tmp_init_path = strdup(init_path, M_TEMP);
while ((path = strsep(&tmp_init_path, ":")) != NULL) {
- pathlen = strlen(path) + 1;
if (bootverbose)
printf("start_init: trying %s\n", path);
- /*
- * Move out the boot flag argument.
- */
- options = 0;
- ucp = (char *)p->p_sysent->sv_usrstack;
- (void)subyte(--ucp, 0); /* trailing zero */
- if (boothowto & RB_SINGLE) {
- (void)subyte(--ucp, 's');
- options = 1;
- }
-#ifdef notyet
- if (boothowto & RB_FASTBOOT) {
- (void)subyte(--ucp, 'f');
- options = 1;
- }
-#endif
-
-#ifdef BOOTCDROM
- (void)subyte(--ucp, 'C');
- options = 1;
-#endif
-
- if (options == 0)
- (void)subyte(--ucp, '-');
- (void)subyte(--ucp, '-'); /* leading hyphen */
- arg1 = ucp;
-
- /*
- * Move out the file name (also arg 0).
- */
- ucp -= pathlen;
- copyout(path, ucp, pathlen);
- arg0 = ucp;
-
- /*
- * Move out the arg pointers.
- */
- uap = (char **)rounddown2((intptr_t)ucp, sizeof(intptr_t));
- (void)suword((caddr_t)--uap, (long)0); /* terminator */
- (void)suword((caddr_t)--uap, (long)(intptr_t)arg1);
- (void)suword((caddr_t)--uap, (long)(intptr_t)arg0);
-
- /*
- * Point at the arguments.
- */
- args.fname = arg0;
- args.argv = uap;
- args.envv = NULL;
+ memset(&args, 0, sizeof(args));
+ error = exec_alloc_args(&args);
+ if (error != 0)
+ panic("%s: Can't allocate space for init arguments %d",
+ __func__, error);
+
+ error = exec_args_add_fname(&args, path, UIO_SYSSPACE);
+ if (error != 0)
+ panic("%s: Can't add fname %d", __func__, error);
+ error = exec_args_add_arg(&args, path, UIO_SYSSPACE);
+ if (error != 0)
+ panic("%s: Can't add argv[0] %d", __func__, error);
+ if (boothowto & RB_SINGLE)
+ error = exec_args_add_arg(&args, "-s", UIO_SYSSPACE);
+ if (error != 0)
+ panic("%s: Can't add argv[0] %d", __func__, error);
/*
* Now try to exec the program. If can't for any reason
@@ -841,7 +796,19 @@ start_init(void *dummy)
* Otherwise, return via fork_trampoline() all the way
* to user mode as init!
*/
- if ((error = sys_execve(td, &args)) == EJUSTRETURN) {
+ KASSERT((td->td_pflags & TDP_EXECVMSPC) == 0,
+ ("nested execve"));
+ oldvmspace = td->td_proc->p_vmspace;
+ error = kern_execve(td, &args, NULL);
+ KASSERT(error != 0,
+ ("kern_execve returned success, not EJUSTRETURN"));
+ if (error == EJUSTRETURN) {
+ if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
+ KASSERT(p->p_vmspace != oldvmspace,
+ ("oldvmspace still used"));
+ vmspace_free(oldvmspace);
+ td->td_pflags &= ~TDP_EXECVMSPC;
+ }
free(free_init_path, M_TEMP);
TSEXIT();
return;
diff --git a/freebsd/sys/kern/kern_event.c b/freebsd/sys/kern/kern_event.c
index 2fae2d89..5c75c657 100644
--- a/freebsd/sys/kern/kern_event.c
+++ b/freebsd/sys/kern/kern_event.c
@@ -110,13 +110,13 @@ TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
static int kevent_copyout(void *arg, struct kevent *kevp, int count);
static int kevent_copyin(void *arg, struct kevent *kevp, int count);
static int kqueue_register(struct kqueue *kq, struct kevent *kev,
- struct thread *td, int waitok);
+ struct thread *td, int mflag);
static int kqueue_acquire(struct file *fp, struct kqueue **kqp);
static void kqueue_release(struct kqueue *kq, int locked);
static void kqueue_destroy(struct kqueue *kq);
static void kqueue_drain(struct kqueue *kq, struct thread *td);
static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
- uintptr_t ident, int waitok);
+ uintptr_t ident, int mflag);
static void kqueue_task(void *arg, int pending);
static int kqueue_scan(struct kqueue *kq, int maxevents,
struct kevent_copyops *k_ops,
@@ -165,7 +165,7 @@ static void knote_drop_detached(struct knote *kn, struct thread *td);
static void knote_enqueue(struct knote *kn);
static void knote_dequeue(struct knote *kn);
static void knote_init(void);
-static struct knote *knote_alloc(int waitok);
+static struct knote *knote_alloc(int mflag);
static void knote_free(struct knote *kn);
static void filt_kqdetach(struct knote *kn);
@@ -571,10 +571,12 @@ knote_fork(struct knlist *list, int pid)
struct kevent kev;
int error;
- if (list == NULL)
+ MPASS(list != NULL);
+ KNL_ASSERT_LOCKED(list);
+ if (SLIST_EMPTY(&list->kl_list))
return;
- list->kl_lock(list->kl_lockarg);
+ memset(&kev, 0, sizeof(kev));
SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
kq = kn->kn_kq;
KQ_LOCK(kq);
@@ -587,10 +589,8 @@ knote_fork(struct knlist *list, int pid)
* The same as knote(), activate the event.
*/
if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
- kn->kn_status |= KN_HASKQLOCK;
if (kn->kn_fop->f_event(kn, NOTE_FORK))
KNOTE_ACTIVATE(kn, 1);
- kn->kn_status &= ~KN_HASKQLOCK;
KQ_UNLOCK(kq);
continue;
}
@@ -621,7 +621,7 @@ knote_fork(struct knlist *list, int pid)
kev.fflags = kn->kn_sfflags;
kev.data = kn->kn_id; /* parent */
kev.udata = kn->kn_kevent.udata;/* preserve udata */
- error = kqueue_register(kq, &kev, NULL, 0);
+ error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
if (error)
kn->kn_fflags |= NOTE_TRACKERR;
@@ -635,17 +635,16 @@ knote_fork(struct knlist *list, int pid)
kev.fflags = kn->kn_sfflags;
kev.data = kn->kn_id; /* parent */
kev.udata = kn->kn_kevent.udata;/* preserve udata */
- error = kqueue_register(kq, &kev, NULL, 0);
+ error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
if (error)
kn->kn_fflags |= NOTE_TRACKERR;
if (kn->kn_fop->f_event(kn, NOTE_FORK))
KNOTE_ACTIVATE(kn, 0);
+ list->kl_lock(list->kl_lockarg);
KQ_LOCK(kq);
kn_leave_flux(kn);
KQ_UNLOCK_FLUX(kq);
- list->kl_lock(list->kl_lockarg);
}
- list->kl_unlock(list->kl_lockarg);
}
#endif /* __rtems__ */
@@ -1352,7 +1351,7 @@ kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
if (!kevp->filter)
continue;
kevp->flags &= ~EV_SYSFLAGS;
- error = kqueue_register(kq, kevp, td, 1);
+ error = kqueue_register(kq, kevp, td, M_WAITOK);
if (error || (kevp->flags & EV_RECEIPT)) {
if (nevents == 0)
return (error);
@@ -1495,12 +1494,11 @@ kqueue_fo_release(int filt)
}
/*
- * A ref to kq (obtained via kqueue_acquire) must be held. waitok will
- * influence if memory allocation should wait. Make sure it is 0 if you
- * hold any mutexes.
+ * A ref to kq (obtained via kqueue_acquire) must be held.
*/
static int
-kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
+kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
+ int mflag)
{
struct filterops *fops;
struct file *fp;
@@ -1530,7 +1528,7 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int wa
* allocation failures are handled in the loop, only
* if the spare knote appears to be actually required.
*/
- tkn = knote_alloc(waitok);
+ tkn = knote_alloc(mflag);
} else {
tkn = NULL;
}
@@ -1546,11 +1544,11 @@ findkn:
goto done;
if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
- kev->ident, 0) != 0) {
+ kev->ident, M_NOWAIT) != 0) {
/* try again */
fdrop(fp, td);
fp = NULL;
- error = kqueue_expand(kq, fops, kev->ident, waitok);
+ error = kqueue_expand(kq, fops, kev->ident, mflag);
if (error)
goto done;
goto findkn;
@@ -1590,8 +1588,11 @@ findkn:
break;
}
} else {
- if ((kev->flags & EV_ADD) == EV_ADD)
- kqueue_expand(kq, fops, kev->ident, waitok);
+ if ((kev->flags & EV_ADD) == EV_ADD) {
+ error = kqueue_expand(kq, fops, kev->ident, mflag);
+ if (error != 0)
+ goto done;
+ }
KQ_LOCK(kq);
@@ -1663,6 +1664,8 @@ findkn:
kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
kn->kn_status = KN_DETACHED;
+ if ((kev->flags & EV_DISABLE) != 0)
+ kn->kn_status |= KN_DISABLED;
kn_enter_flux(kn);
error = knote_attach(kn, kq);
@@ -1698,6 +1701,11 @@ findkn:
KNOTE_ACTIVATE(kn, 1);
}
+ if ((kev->flags & EV_ENABLE) != 0)
+ kn->kn_status &= ~KN_DISABLED;
+ else if ((kev->flags & EV_DISABLE) != 0)
+ kn->kn_status |= KN_DISABLED;
+
/*
* The user may change some filter values after the initial EV_ADD,
* but doing so will not reset any filter which has already been
@@ -1715,19 +1723,17 @@ findkn:
kn->kn_sdata = kev->data;
}
+done_ev_add:
/*
* We can get here with kn->kn_knlist == NULL. This can happen when
* the initial attach event decides that the event is "completed"
- * already. i.e. filt_procattach is called on a zombie process. It
- * will call filt_proc which will remove it from the list, and NULL
+ * already, e.g., filt_procattach() is called on a zombie process. It
+ * will call filt_proc() which will remove it from the list, and NULL
* kn_knlist.
+ *
+ * KN_DISABLED will be stable while the knote is in flux, so the
+ * unlocked read will not race with an update.
*/
-done_ev_add:
- if ((kev->flags & EV_ENABLE) != 0)
- kn->kn_status &= ~KN_DISABLED;
- else if ((kev->flags & EV_DISABLE) != 0)
- kn->kn_status |= KN_DISABLED;
-
if ((kn->kn_status & KN_DISABLED) == 0)
event = kn->kn_fop->f_event(kn, 0);
else
@@ -1815,23 +1821,18 @@ kqueue_schedtask(struct kqueue *kq)
* Expand the kq to make sure we have storage for fops/ident pair.
*
* Return 0 on success (or no work necessary), return errno on failure.
- *
- * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
- * If kqueue_register is called from a non-fd context, there usually/should
- * be no locks held.
*/
static int
kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
- int waitok)
+ int mflag)
{
struct klist *list, *tmp_knhash, *to_free;
u_long tmp_knhashmask;
- int size;
- int fd;
- int mflag = waitok ? M_WAITOK : M_NOWAIT;
+ int error, fd, size;
KQ_NOTOWNED(kq);
+ error = 0;
to_free = NULL;
if (fops->f_isfd) {
fd = ident;
@@ -1843,9 +1844,11 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
if (list == NULL)
return ENOMEM;
KQ_LOCK(kq);
- if (kq->kq_knlistsize > fd) {
+ if ((kq->kq_state & KQ_CLOSING) != 0) {
+ to_free = list;
+ error = EBADF;
+ } else if (kq->kq_knlistsize > fd) {
to_free = list;
- list = NULL;
} else {
if (kq->kq_knlist != NULL) {
bcopy(kq->kq_knlist, list,
@@ -1863,12 +1866,16 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
}
} else {
if (kq->kq_knhashmask == 0) {
- tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
- &tmp_knhashmask);
+ tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE,
+ &tmp_knhashmask, (mflag & M_WAITOK) != 0 ?
+ HASH_WAITOK : HASH_NOWAIT);
if (tmp_knhash == NULL)
- return ENOMEM;
+ return (ENOMEM);
KQ_LOCK(kq);
- if (kq->kq_knhashmask == 0) {
+ if ((kq->kq_state & KQ_CLOSING) != 0) {
+ to_free = tmp_knhash;
+ error = EBADF;
+ } else if (kq->kq_knhashmask == 0) {
kq->kq_knhash = tmp_knhash;
kq->kq_knhashmask = tmp_knhashmask;
} else {
@@ -1880,7 +1887,7 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
free(to_free, M_KQUEUE);
KQ_NOTOWNED(kq);
- return 0;
+ return (error);
}
static void
@@ -1950,7 +1957,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
asbt = -1;
} else
asbt = 0;
- marker = knote_alloc(1);
+ marker = knote_alloc(M_WAITOK);
marker->kn_status = KN_MARKER;
KQ_LOCK(kq);
@@ -2463,10 +2470,8 @@ knote(struct knlist *list, long hint, int lockflags)
KNOTE_ACTIVATE(kn, 1);
KQ_UNLOCK_FLUX(kq);
} else {
- kn->kn_status |= KN_HASKQLOCK;
if (kn->kn_fop->f_event(kn, hint))
KNOTE_ACTIVATE(kn, 1);
- kn->kn_status &= ~KN_HASKQLOCK;
KQ_UNLOCK(kq);
}
}
@@ -2807,6 +2812,8 @@ knote_attach(struct knote *kn, struct kqueue *kq)
KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
KQ_OWNED(kq);
+ if ((kq->kq_state & KQ_CLOSING) != 0)
+ return (EBADF);
if (kn->kn_fop->f_isfd) {
if (kn->kn_id >= kq->kq_knlistsize)
return (ENOMEM);
@@ -2902,11 +2909,10 @@ knote_init(void)
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
static struct knote *
-knote_alloc(int waitok)
+knote_alloc(int mflag)
{
- return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) |
- M_ZERO));
+ return (uma_zalloc(knote_zone, mflag | M_ZERO));
}
static void
@@ -2920,7 +2926,7 @@ knote_free(struct knote *kn)
* Register the kev w/ the kq specified by fd.
*/
int
-kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
+kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
{
struct kqueue *kq;
struct file *fp;
@@ -2933,7 +2939,7 @@ kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
if ((error = kqueue_acquire(fp, &kq)) != 0)
goto noacquire;
- error = kqueue_register(kq, kev, td, waitok);
+ error = kqueue_register(kq, kev, td, mflag);
kqueue_release(kq, 0);
noacquire:
diff --git a/freebsd/sys/kern/kern_intr.c b/freebsd/sys/kern/kern_intr.c
index 04914e93..4e311cb8 100644
--- a/freebsd/sys/kern/kern_intr.c
+++ b/freebsd/sys/kern/kern_intr.c
@@ -756,6 +756,28 @@ intr_event_barrier(struct intr_event *ie)
atomic_thread_fence_acq();
}
+static void
+intr_handler_barrier(struct intr_handler *handler)
+{
+ struct intr_event *ie;
+
+ ie = handler->ih_event;
+ mtx_assert(&ie->ie_lock, MA_OWNED);
+ KASSERT((handler->ih_flags & IH_DEAD) == 0,
+ ("update for a removed handler"));
+
+ if (ie->ie_thread == NULL) {
+ intr_event_barrier(ie);
+ return;
+ }
+ if ((handler->ih_flags & IH_CHANGED) == 0) {
+ handler->ih_flags |= IH_CHANGED;
+ intr_event_schedule_thread(ie);
+ }
+ while ((handler->ih_flags & IH_CHANGED) != 0)
+ msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
+}
+
/*
* Sleep until an ithread finishes executing an interrupt handler.
*
@@ -879,7 +901,50 @@ intr_event_remove_handler(void *cookie)
return (0);
}
+int
+intr_event_suspend_handler(void *cookie)
+{
+ struct intr_handler *handler = (struct intr_handler *)cookie;
+ struct intr_event *ie;
+
+ if (handler == NULL)
+ return (EINVAL);
+ ie = handler->ih_event;
+ KASSERT(ie != NULL,
+ ("interrupt handler \"%s\" has a NULL interrupt event",
+ handler->ih_name));
+ mtx_lock(&ie->ie_lock);
+ handler->ih_flags |= IH_SUSP;
+ intr_handler_barrier(handler);
+ mtx_unlock(&ie->ie_lock);
+ return (0);
+}
+
+int
+intr_event_resume_handler(void *cookie)
+{
+ struct intr_handler *handler = (struct intr_handler *)cookie;
+ struct intr_event *ie;
+
+ if (handler == NULL)
+ return (EINVAL);
+ ie = handler->ih_event;
+ KASSERT(ie != NULL,
+ ("interrupt handler \"%s\" has a NULL interrupt event",
+ handler->ih_name));
+
+ /*
+ * intr_handler_barrier() acts not only as a barrier,
+ * it also allows to check for any pending interrupts.
+ */
+ mtx_lock(&ie->ie_lock);
+ handler->ih_flags &= ~IH_SUSP;
+ intr_handler_barrier(handler);
+ mtx_unlock(&ie->ie_lock);
+ return (0);
+}
#endif /* __rtems__ */
+
static int
intr_event_schedule_thread(struct intr_event *ie)
{
@@ -1068,10 +1133,21 @@ intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
*/
ihp = ih;
+ if ((ih->ih_flags & IH_CHANGED) != 0) {
+ mtx_lock(&ie->ie_lock);
+ ih->ih_flags &= ~IH_CHANGED;
+ wakeup(ih);
+ mtx_unlock(&ie->ie_lock);
+ }
+
/* Skip filter only handlers */
if (ih->ih_handler == NULL)
continue;
+ /* Skip suspended handlers */
+ if ((ih->ih_flags & IH_SUSP) != 0)
+ continue;
+
/*
* For software interrupt threads, we only execute
* handlers that have their need flag set. Hardware
@@ -1255,8 +1331,9 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
struct intr_handler *ih;
struct trapframe *oldframe;
struct thread *td;
- int ret, thread;
int phase;
+ int ret;
+ bool filter, thread;
td = curthread;
@@ -1275,7 +1352,8 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
* a trapframe as its argument.
*/
td->td_intr_nesting_level++;
- thread = 0;
+ filter = false;
+ thread = false;
ret = 0;
critical_enter();
oldframe = td->td_intr_frame;
@@ -1291,8 +1369,10 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
atomic_thread_fence_seq_cst();
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
+ if ((ih->ih_flags & IH_SUSP) != 0)
+ continue;
if (ih->ih_filter == NULL) {
- thread = 1;
+ thread = true;
continue;
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
@@ -1307,24 +1387,25 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
(ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
("%s: incorrect return value %#x from %s", __func__, ret,
ih->ih_name));
+ filter = filter || ret == FILTER_HANDLED;
- /*
+ /*
* Wrapper handler special handling:
*
- * in some particular cases (like pccard and pccbb),
+ * in some particular cases (like pccard and pccbb),
* the _real_ device handler is wrapped in a couple of
* functions - a filter wrapper and an ithread wrapper.
- * In this case (and just in this case), the filter wrapper
+ * In this case (and just in this case), the filter wrapper
* could ask the system to schedule the ithread and mask
* the interrupt source if the wrapped handler is composed
* of just an ithread handler.
*
- * TODO: write a generic wrapper to avoid people rolling
- * their own
+ * TODO: write a generic wrapper to avoid people rolling
+ * their own.
*/
if (!thread) {
if (ret == FILTER_SCHEDULE_THREAD)
- thread = 1;
+ thread = true;
}
}
atomic_add_rel_int(&ie->ie_active[phase], -1);
@@ -1348,6 +1429,11 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
}
critical_exit();
td->td_intr_nesting_level--;
+#ifdef notyet
+ /* The interrupt is not aknowledged by any filter and has no ithread. */
+ if (!thread && !filter)
+ return (EINVAL);
+#endif
return (0);
}
diff --git a/freebsd/sys/kern/kern_synch.c b/freebsd/sys/kern/kern_synch.c
index 9c0d1206..2597f91d 100644
--- a/freebsd/sys/kern/kern_synch.c
+++ b/freebsd/sys/kern/kern_synch.c
@@ -466,7 +466,7 @@ mi_switch(int flags, struct thread *newtd)
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
#ifdef KDTRACE_HOOKS
- if (__predict_false(sdt_probes_enabled) &&
+ if (SDT_PROBES_ENABLED() &&
((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 &&
(flags & SW_TYPE_MASK) == SWT_NEEDRESCHED)))
SDT_PROBE0(sched, , , preempt);
diff --git a/freebsd/sys/kern/kern_sysctl.c b/freebsd/sys/kern/kern_sysctl.c
index ea9c1821..dc7c4c72 100644
--- a/freebsd/sys/kern/kern_sysctl.c
+++ b/freebsd/sys/kern/kern_sysctl.c
@@ -552,10 +552,10 @@ sysctl_unregister_oid(struct sysctl_oid *oidp)
int error;
SYSCTL_ASSERT_WLOCKED();
- error = ENOENT;
if (oidp->oid_number == OID_AUTO) {
error = EINVAL;
} else {
+ error = ENOENT;
SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
if (p == oidp) {
SLIST_REMOVE(oidp->oid_parent, oidp,
@@ -571,8 +571,10 @@ sysctl_unregister_oid(struct sysctl_oid *oidp)
* being unloaded afterwards. It should not be a panic()
* for normal use.
*/
- if (error)
- printf("%s: failed to unregister sysctl\n", __func__);
+ if (error) {
+ printf("%s: failed(%d) to unregister sysctl(%s)\n",
+ __func__, error, oidp->oid_name);
+ }
}
/* Initialize a new context to keep track of dynamically added sysctls. */
@@ -1714,13 +1716,13 @@ sysctl_usec_to_sbintime(SYSCTL_HANDLER_ARGS)
sbintime_t sb;
tt = *(int64_t *)arg1;
- sb = ustosbt(tt);
+ sb = sbttous(tt);
error = sysctl_handle_64(oidp, &sb, 0, req);
if (error || !req->newptr)
return (error);
- tt = sbttous(sb);
+ tt = ustosbt(sb);
*(int64_t *)arg1 = tt;
return (0);
@@ -1737,13 +1739,13 @@ sysctl_msec_to_sbintime(SYSCTL_HANDLER_ARGS)
sbintime_t sb;
tt = *(int64_t *)arg1;
- sb = mstosbt(tt);
+ sb = sbttoms(tt);
error = sysctl_handle_64(oidp, &sb, 0, req);
if (error || !req->newptr)
return (error);
- tt = sbttoms(sb);
+ tt = mstosbt(sb);
*(int64_t *)arg1 = tt;
return (0);
@@ -1782,7 +1784,7 @@ sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
return (0);
if (req->newlen - req->newidx < l)
return (EINVAL);
- bcopy((char *)req->newptr + req->newidx, p, l);
+ bcopy((const char *)req->newptr + req->newidx, p, l);
req->newidx += l;
return (0);
}
@@ -1919,7 +1921,7 @@ sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
return (EINVAL);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"sysctl_new_user()");
- error = copyin((char *)req->newptr + req->newidx, p, l);
+ error = copyin((const char *)req->newptr + req->newidx, p, l);
req->newidx += l;
return (error);
}
@@ -2153,8 +2155,8 @@ sys___sysctl(struct thread *td, struct sysctl_args *uap)
*/
int
userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
- size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
- int flags)
+ size_t *oldlenp, int inkernel, const void *new, size_t newlen,
+ size_t *retval, int flags)
{
int error = 0, memlocked;
struct sysctl_req req;
diff --git a/freebsd/sys/kern/subr_blist.c b/freebsd/sys/kern/subr_blist.c
index 79a5a7b4..807a7f3c 100644
--- a/freebsd/sys/kern/subr_blist.c
+++ b/freebsd/sys/kern/subr_blist.c
@@ -297,9 +297,9 @@ blist_alloc(blist_t bl, daddr_t count)
* This loop iterates at most twice. An allocation failure in the
* first iteration leads to a second iteration only if the cursor was
* non-zero. When the cursor is zero, an allocation failure will
- * reduce the hint, stopping further iterations.
+ * stop further iterations.
*/
- while (count <= bl->bl_root->bm_bighint) {
+ for (;;) {
blk = blst_meta_alloc(bl->bl_root, bl->bl_cursor, count,
bl->bl_radix);
if (blk != SWAPBLK_NONE) {
@@ -308,10 +308,10 @@ blist_alloc(blist_t bl, daddr_t count)
if (bl->bl_cursor == bl->bl_blocks)
bl->bl_cursor = 0;
return (blk);
- }
+ } else if (bl->bl_cursor == 0)
+ return (SWAPBLK_NONE);
bl->bl_cursor = 0;
}
- return (SWAPBLK_NONE);
}
/*
@@ -646,14 +646,14 @@ blst_next_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
/*
* BLST_LEAF_ALLOC() - allocate at a leaf in the radix tree (a bitmap).
*
- * This is the core of the allocator and is optimized for the
- * BLIST_BMAP_RADIX block allocation case. Otherwise, execution
- * time is proportional to log2(count) + bitpos time.
+ * This function is the core of the allocator. Its execution time is
+ * proportional to log(count), plus height of the tree if the allocation
+ * crosses a leaf boundary.
*/
static daddr_t
blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
{
- u_daddr_t mask;
+ u_daddr_t cursor_mask, mask;
int count1, hi, lo, num_shifts, range1, range_ext;
range1 = 0;
@@ -663,14 +663,14 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
while ((-mask & ~mask) != 0 && num_shifts > 0) {
/*
* If bit i is set in mask, then bits in [i, i+range1] are set
- * in scan->bm_bitmap. The value of range1 is equal to
- * count1 >> num_shifts. Grow range and reduce num_shifts to 0,
- * while preserving these invariants. The updates to mask leave
- * fewer bits set, but each bit that remains set represents a
- * longer string of consecutive bits set in scan->bm_bitmap.
- * If more updates to mask cannot clear more bits, because mask
- * is partitioned with all 0 bits preceding all 1 bits, the loop
- * terminates immediately.
+ * in scan->bm_bitmap. The value of range1 is equal to count1
+ * >> num_shifts. Grow range1 and reduce num_shifts to 0,
+ * while preserving these invariants. The updates to mask
+ * leave fewer bits set, but each bit that remains set
+ * represents a longer string of consecutive bits set in
+ * scan->bm_bitmap. If more updates to mask cannot clear more
+ * bits, because mask is partitioned with all 0 bits preceding
+ * all 1 bits, the loop terminates immediately.
*/
num_shifts--;
range_ext = range1 + ((count1 >> num_shifts) & 1);
@@ -693,9 +693,22 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
}
/* Discard any candidates that appear before blk. */
- mask &= (u_daddr_t)-1 << (blk & BLIST_BMAP_MASK);
- if (mask == 0)
- return (SWAPBLK_NONE);
+ if ((blk & BLIST_BMAP_MASK) != 0) {
+ cursor_mask = mask & bitrange(0, blk & BLIST_BMAP_MASK);
+ if (cursor_mask != 0) {
+ mask ^= cursor_mask;
+ if (mask == 0)
+ return (SWAPBLK_NONE);
+
+ /*
+ * Bighint change for last block allocation cannot
+ * assume that any other blocks are allocated, so the
+ * bighint cannot be reduced much.
+ */
+ range1 = BLIST_MAX_ALLOC - 1;
+ }
+ blk &= ~BLIST_BMAP_MASK;
+ }
/*
* The least significant set bit in mask marks the start of the first
@@ -736,7 +749,7 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
}
/* Clear the allocated bits from this leaf. */
scan->bm_bitmap &= ~mask;
- return ((blk & ~BLIST_BMAP_MASK) + lo);
+ return (blk + lo);
}
/*
@@ -766,6 +779,8 @@ blst_meta_alloc(blmeta_t *scan, daddr_t cursor, daddr_t count, u_daddr_t radix)
/* Discard any candidates that appear before cursor. */
digit = (cursor / radix) & BLIST_META_MASK;
mask &= (u_daddr_t)-1 << digit;
+ if (mask == 0)
+ return (SWAPBLK_NONE);
/*
* If the first try is for a block that includes the cursor, pre-undo
diff --git a/freebsd/sys/kern/subr_bus.c b/freebsd/sys/kern/subr_bus.c
index 391b2ed6..a87c02a5 100644
--- a/freebsd/sys/kern/subr_bus.c
+++ b/freebsd/sys/kern/subr_bus.c
@@ -2821,6 +2821,16 @@ device_set_devclass_fixed(device_t dev, const char *classname)
}
/**
+ * @brief Query the device to determine if it's of a fixed devclass
+ * @see device_set_devclass_fixed()
+ */
+bool
+device_is_devclass_fixed(device_t dev)
+{
+ return ((dev->flags & DF_FIXEDCLASS) != 0);
+}
+
+/**
* @brief Set the driver of a device
*
* @retval 0 success
@@ -4101,6 +4111,36 @@ bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq,
}
/**
+ * @brief Helper function for implementing BUS_SUSPEND_INTR().
+ *
+ * This simple implementation of BUS_SUSPEND_INTR() simply calls the
+ * BUS_SUSPEND_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_SUSPEND_INTR(dev->parent, child, irq));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_RESUME_INTR().
+ *
+ * This simple implementation of BUS_RESUME_INTR() simply calls the
+ * BUS_RESUME_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_RESUME_INTR(dev->parent, child, irq));
+ return (EINVAL);
+}
+
+/**
* @brief Helper function for implementing BUS_ADJUST_RESOURCE().
*
* This simple implementation of BUS_ADJUST_RESOURCE() simply calls the
@@ -4668,6 +4708,34 @@ bus_teardown_intr(device_t dev, struct resource *r, void *cookie)
}
/**
+ * @brief Wrapper function for BUS_SUSPEND_INTR().
+ *
+ * This function simply calls the BUS_SUSPEND_INTR() method of the
+ * parent of @p dev.
+ */
+int
+bus_suspend_intr(device_t dev, struct resource *r)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_SUSPEND_INTR(dev->parent, dev, r));
+}
+
+/**
+ * @brief Wrapper function for BUS_RESUME_INTR().
+ *
+ * This function simply calls the BUS_RESUME_INTR() method of the
+ * parent of @p dev.
+ */
+int
+bus_resume_intr(device_t dev, struct resource *r)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_RESUME_INTR(dev->parent, dev, r));
+}
+
+/**
* @brief Wrapper function for BUS_BIND_INTR().
*
* This function simply calls the BUS_BIND_INTR() method of the
diff --git a/freebsd/sys/kern/subr_rman.c b/freebsd/sys/kern/subr_rman.c
index 75f22dd5..e307df46 100644
--- a/freebsd/sys/kern/subr_rman.c
+++ b/freebsd/sys/kern/subr_rman.c
@@ -96,6 +96,7 @@ struct resource_i {
rman_res_t r_end; /* index of the last entry (inclusive) */
u_int r_flags;
void *r_virtual; /* virtual address of this resource */
+ void *r_irq_cookie; /* interrupt cookie for this (interrupt) resource */
device_t r_dev; /* device which has allocated this resource */
struct rman *r_rm; /* resource manager from whence this came */
int r_rid; /* optional rid for this resource. */
@@ -871,6 +872,20 @@ rman_get_virtual(struct resource *r)
}
void
+rman_set_irq_cookie(struct resource *r, void *c)
+{
+
+ r->__r_i->r_irq_cookie = c;
+}
+
+void *
+rman_get_irq_cookie(struct resource *r)
+{
+
+ return (r->__r_i->r_irq_cookie);
+}
+
+void
rman_set_bustag(struct resource *r, bus_space_tag_t t)
{
diff --git a/freebsd/sys/kern/subr_taskqueue.c b/freebsd/sys/kern/subr_taskqueue.c
index 60057f78..39d9f939 100644
--- a/freebsd/sys/kern/subr_taskqueue.c
+++ b/freebsd/sys/kern/subr_taskqueue.c
@@ -375,13 +375,13 @@ taskqueue_task_nop_fn(void *context, int pending)
* have begun execution. Tasks queued during execution of
* this function are ignored.
*/
-static void
+static int
taskqueue_drain_tq_queue(struct taskqueue *queue)
{
struct task t_barrier;
if (STAILQ_EMPTY(&queue->tq_queue))
- return;
+ return (0);
/*
* Enqueue our barrier after all current tasks, but with
@@ -401,6 +401,7 @@ taskqueue_drain_tq_queue(struct taskqueue *queue)
*/
while (t_barrier.ta_pending != 0)
TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
+ return (1);
}
/*
@@ -408,13 +409,13 @@ taskqueue_drain_tq_queue(struct taskqueue *queue)
* complete. Tasks that begin execution during the execution
* of this function are ignored.
*/
-static void
+static int
taskqueue_drain_tq_active(struct taskqueue *queue)
{
struct taskqueue_busy tb_marker, *tb_first;
if (TAILQ_EMPTY(&queue->tq_active))
- return;
+ return (0);
/* Block taskq_terminate().*/
queue->tq_callouts++;
@@ -441,6 +442,7 @@ taskqueue_drain_tq_active(struct taskqueue *queue)
queue->tq_callouts--;
if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
wakeup_one(queue->tq_threads);
+ return (1);
}
void
@@ -615,8 +617,8 @@ taskqueue_drain_all(struct taskqueue *queue)
#endif /* __rtems__ */
TQ_LOCK(queue);
- taskqueue_drain_tq_queue(queue);
- taskqueue_drain_tq_active(queue);
+ (void)taskqueue_drain_tq_queue(queue);
+ (void)taskqueue_drain_tq_active(queue);
TQ_UNLOCK(queue);
}
@@ -645,6 +647,20 @@ taskqueue_drain_timeout(struct taskqueue *queue,
TQ_UNLOCK(queue);
}
+void
+taskqueue_quiesce(struct taskqueue *queue)
+{
+ int ret;
+
+ TQ_LOCK(queue);
+ do {
+ ret = taskqueue_drain_tq_queue(queue);
+ if (ret == 0)
+ ret = taskqueue_drain_tq_active(queue);
+ } while (ret != 0);
+ TQ_UNLOCK(queue);
+}
+
static void
taskqueue_swi_enqueue(void *context)
{
diff --git a/freebsd/sys/kern/subr_unit.c b/freebsd/sys/kern/subr_unit.c
index 426253dc..c4bdea34 100644
--- a/freebsd/sys/kern/subr_unit.c
+++ b/freebsd/sys/kern/subr_unit.c
@@ -100,6 +100,19 @@ static struct mtx unitmtx;
MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
+#ifdef UNR64_LOCKED
+uint64_t
+alloc_unr64(struct unrhdr64 *unr64)
+{
+ uint64_t item;
+
+ mtx_lock(&unitmtx);
+ item = unr64->counter++;
+ mtx_unlock(&unitmtx);
+ return (item);
+}
+#endif
+
#else /* ...USERLAND */
#include <bitstring.h>
diff --git a/freebsd/sys/kern/sys_pipe.c b/freebsd/sys/kern/sys_pipe.c
index b6616271..050d63a4 100755
--- a/freebsd/sys/kern/sys_pipe.c
+++ b/freebsd/sys/kern/sys_pipe.c
@@ -294,7 +294,7 @@ static int pipe_zone_init(void *mem, int size, int flags);
static void pipe_zone_fini(void *mem, int size);
static uma_zone_t pipe_zone;
-static struct unrhdr *pipeino_unr;
+static struct unrhdr64 pipeino_unr;
static dev_t pipedev_ino;
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
@@ -307,8 +307,7 @@ pipeinit(void *dummy __unused)
pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
UMA_ALIGN_PTR, 0);
KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
- pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
- KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
+ new_unrhdr64(&pipeino_unr, 1);
pipedev_ino = devfs_alloc_cdp_inode();
KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
}
@@ -444,8 +443,6 @@ pipe_dtor(struct pipe *dpipe)
funsetown(&peer->pipe_sigio);
pipeclose(peer);
}
- if (ino != 0 && ino != (ino_t)-1)
- free_unr(pipeino_unr, ino);
}
/*
@@ -730,7 +727,7 @@ pipe_create(struct pipe *pipe, int backing)
(void)pipespace_new(pipe, PIPE_SIZE);
}
- pipe->pipe_ino = -1;
+ pipe->pipe_ino = alloc_unr64(&pipeino_unr);
}
/* ARGSUSED */
@@ -1762,7 +1759,6 @@ static int
pipe_stat(struct pipe *pipe, struct stat *ub)
{
#endif /* __rtems__ */
- int new_unr;
#ifdef MAC
int error;
#endif
@@ -1789,23 +1785,6 @@ pipe_stat(struct pipe *pipe, struct stat *ub)
#endif /* __rtems__ */
}
- /*
- * Lazily allocate an inode number for the pipe. Most pipe
- * users do not call fstat(2) on the pipe, which means that
- * postponing the inode allocation until it is must be
- * returned to userland is useful. If alloc_unr failed,
- * assign st_ino zero instead of returning an error.
- * Special pipe_ino values:
- * -1 - not yet initialized;
- * 0 - alloc_unr failed, return 0 as st_ino forever.
- */
- if (pipe->pipe_ino == (ino_t)-1) {
- new_unr = alloc_unr(pipeino_unr);
- if (new_unr != -1)
- pipe->pipe_ino = new_unr;
- else
- pipe->pipe_ino = 0;
- }
PIPE_UNLOCK(pipe);
#ifndef __rtems__
diff --git a/freebsd/sys/kern/uipc_sockbuf.c b/freebsd/sys/kern/uipc_sockbuf.c
index cf99c615..0830206a 100644
--- a/freebsd/sys/kern/uipc_sockbuf.c
+++ b/freebsd/sys/kern/uipc_sockbuf.c
@@ -1230,53 +1230,6 @@ sbdrop(struct sockbuf *sb, int len)
m_freem(mfree);
}
-/*
- * Maintain a pointer and offset pair into the socket buffer mbuf chain to
- * avoid traversal of the entire socket buffer for larger offsets.
- */
-struct mbuf *
-sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
-{
- struct mbuf *m, *ret;
-
- KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
- KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__));
- KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__));
-
- /*
- * Is off below stored offset? Happens on retransmits.
- * Just return, we can't help here.
- */
- if (sb->sb_sndptroff > off) {
- *moff = off;
- return (sb->sb_mb);
- }
-
- /* Return closest mbuf in chain for current offset. */
- *moff = off - sb->sb_sndptroff;
- m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb;
- if (*moff == m->m_len) {
- *moff = 0;
- sb->sb_sndptroff += m->m_len;
- m = ret = m->m_next;
- KASSERT(ret->m_len > 0,
- ("mbuf %p in sockbuf %p chain has no valid data", ret, sb));
- }
-
- /* Advance by len to be as close as possible for the next transmit. */
- for (off = off - sb->sb_sndptroff + len - 1;
- off > 0 && m != NULL && off >= m->m_len;
- m = m->m_next) {
- sb->sb_sndptroff += m->m_len;
- off -= m->m_len;
- }
- if (off > 0 && m == NULL)
- panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret);
- sb->sb_sndptr = m;
-
- return (ret);
-}
-
struct mbuf *
#ifndef __rtems__
sbsndptr_noadv(struct sockbuf *sb, uint32_t off, uint32_t *moff)
diff --git a/freebsd/sys/kern/uipc_socket.c b/freebsd/sys/kern/uipc_socket.c
index aa045cd9..380c97dd 100644
--- a/freebsd/sys/kern/uipc_socket.c
+++ b/freebsd/sys/kern/uipc_socket.c
@@ -4063,6 +4063,7 @@ void
sotoxsocket(struct socket *so, struct xsocket *xso)
{
+ bzero(xso, sizeof(*xso));
xso->xso_len = sizeof *xso;
xso->xso_so = (uintptr_t)so;
xso->so_type = so->so_type;
@@ -4085,8 +4086,6 @@ sotoxsocket(struct socket *so, struct xsocket *xso)
xso->so_incqlen = so->sol_incqlen;
xso->so_qlimit = so->sol_qlimit;
xso->so_oobmark = 0;
- bzero(&xso->so_snd, sizeof(xso->so_snd));
- bzero(&xso->so_rcv, sizeof(xso->so_rcv));
} else {
xso->so_state |= so->so_qstate;
xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0;
diff --git a/freebsd/sys/kern/uipc_syscalls.c b/freebsd/sys/kern/uipc_syscalls.c
index f338629e..529268a9 100644
--- a/freebsd/sys/kern/uipc_syscalls.c
+++ b/freebsd/sys/kern/uipc_syscalls.c
@@ -643,9 +643,7 @@ sys_accept4(td, uap)
#ifdef COMPAT_OLDSOCK
int
-oaccept(td, uap)
- struct thread *td;
- struct accept_args *uap;
+oaccept(struct thread *td, struct oaccept_args *uap)
{
return (accept1(td, uap->s, uap->name, uap->anamelen,
diff --git a/freebsd/sys/kern/uipc_usrreq.c b/freebsd/sys/kern/uipc_usrreq.c
index c1885ed6..6b34dcb8 100644
--- a/freebsd/sys/kern/uipc_usrreq.c
+++ b/freebsd/sys/kern/uipc_usrreq.c
@@ -551,6 +551,7 @@ uipc_attach(struct socket *so, int proto, struct thread *td)
UNP_LINK_WLOCK();
unp->unp_gencnt = ++unp_gencnt;
+ unp->unp_ino = ++unp_ino;
unp_count++;
switch (so->so_type) {
case SOCK_STREAM:
@@ -1434,12 +1435,8 @@ uipc_sense(struct socket *so, struct stat *sb)
KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
sb->st_blksize = so->so_snd.sb_hiwat;
- UNP_PCB_LOCK(unp);
sb->st_dev = NODEV;
- if (unp->unp_ino == 0)
- unp->unp_ino = (++unp_ino == 0) ? ++unp_ino : unp_ino;
sb->st_ino = unp->unp_ino;
- UNP_PCB_UNLOCK(unp);
return (0);
}
@@ -1993,7 +1990,7 @@ unp_pcblist(SYSCTL_HANDLER_ARGS)
/*
* OK, now we're committed to doing something.
*/
- xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK);
+ xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO);
UNP_LINK_RLOCK();
gencnt = unp_gencnt;
n = unp_count;