summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--freebsd/sys/fs/devfs/devfs_vnops.c1931
-rw-r--r--freebsd/sys/kern/tty.c2349
-rw-r--r--freebsd/sys/kern/tty_inq.c497
-rw-r--r--freebsd/sys/kern/tty_outq.c347
-rw-r--r--freebsd/sys/kern/tty_ttydisc.c1267
-rw-r--r--freebsd/sys/sys/cons.h144
-rw-r--r--freebsd/sys/sys/serial.h92
7 files changed, 6627 insertions, 0 deletions
diff --git a/freebsd/sys/fs/devfs/devfs_vnops.c b/freebsd/sys/fs/devfs/devfs_vnops.c
new file mode 100644
index 00000000..eb7df0f5
--- /dev/null
+++ b/freebsd/sys/fs/devfs/devfs_vnops.c
@@ -0,0 +1,1931 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2000-2004
+ * Poul-Henning Kamp. All rights reserved.
+ * Copyright (c) 1989, 1992-1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software donated to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95
+ * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * TODO:
+ * mkdir: want it ?
+ */
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/dirent.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/filio.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <rtems/bsd/sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/ttycom.h>
+#include <rtems/bsd/sys/unistd.h>
+#include <sys/vnode.h>
+
+static struct vop_vector devfs_vnodeops;
+static struct vop_vector devfs_specops;
+static struct fileops devfs_ops_f;
+
+#include <fs/devfs/devfs.h>
+#include <fs/devfs/devfs_int.h>
+
+#include <security/mac/mac_framework.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+
+static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
+
+struct mtx devfs_de_interlock;
+MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
+struct sx clone_drain_lock;
+SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
+struct mtx cdevpriv_mtx;
+MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
+
+SYSCTL_DECL(_vfs_devfs);
+
+static int devfs_dotimes;
+SYSCTL_INT(_vfs_devfs, OID_AUTO, dotimes, CTLFLAG_RW,
+ &devfs_dotimes, 0, "Update timestamps on DEVFS with default precision");
+
+/*
+ * Update devfs node timestamp. Note that updates are unlocked and
+ * stat(2) could see partially updated times.
+ */
+static void
+devfs_timestamp(struct timespec *tsp)
+{
+ time_t ts;
+
+ if (devfs_dotimes) {
+ vfs_timestamp(tsp);
+ } else {
+ ts = time_second;
+ if (tsp->tv_sec != ts) {
+ tsp->tv_sec = ts;
+ tsp->tv_nsec = 0;
+ }
+ }
+}
+
+static int
+devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
+ int *ref)
+{
+
+ *dswp = devvn_refthread(fp->f_vnode, devp, ref);
+ if (*devp != fp->f_data) {
+ if (*dswp != NULL)
+ dev_relthread(*devp, *ref);
+ return (ENXIO);
+ }
+ KASSERT((*devp)->si_refcount > 0,
+ ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
+ if (*dswp == NULL)
+ return (ENXIO);
+ curthread->td_fpop = fp;
+ return (0);
+}
+
+int
+devfs_get_cdevpriv(void **datap)
+{
+ struct file *fp;
+ struct cdev_privdata *p;
+ int error;
+
+ fp = curthread->td_fpop;
+ if (fp == NULL)
+ return (EBADF);
+ p = fp->f_cdevpriv;
+ if (p != NULL) {
+ error = 0;
+ *datap = p->cdpd_data;
+ } else
+ error = ENOENT;
+ return (error);
+}
+
+int
+devfs_set_cdevpriv(void *priv, d_priv_dtor_t *priv_dtr)
+{
+ struct file *fp;
+ struct cdev_priv *cdp;
+ struct cdev_privdata *p;
+ int error;
+
+ fp = curthread->td_fpop;
+ if (fp == NULL)
+ return (ENOENT);
+ cdp = cdev2priv((struct cdev *)fp->f_data);
+ p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
+ p->cdpd_data = priv;
+ p->cdpd_dtr = priv_dtr;
+ p->cdpd_fp = fp;
+ mtx_lock(&cdevpriv_mtx);
+ if (fp->f_cdevpriv == NULL) {
+ LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
+ fp->f_cdevpriv = p;
+ mtx_unlock(&cdevpriv_mtx);
+ error = 0;
+ } else {
+ mtx_unlock(&cdevpriv_mtx);
+ free(p, M_CDEVPDATA);
+ error = EBUSY;
+ }
+ return (error);
+}
+
+void
+devfs_destroy_cdevpriv(struct cdev_privdata *p)
+{
+
+ mtx_assert(&cdevpriv_mtx, MA_OWNED);
+ KASSERT(p->cdpd_fp->f_cdevpriv == p,
+ ("devfs_destoy_cdevpriv %p != %p", p->cdpd_fp->f_cdevpriv, p));
+ p->cdpd_fp->f_cdevpriv = NULL;
+ LIST_REMOVE(p, cdpd_list);
+ mtx_unlock(&cdevpriv_mtx);
+ (p->cdpd_dtr)(p->cdpd_data);
+ free(p, M_CDEVPDATA);
+}
+
+static void
+devfs_fpdrop(struct file *fp)
+{
+ struct cdev_privdata *p;
+
+ mtx_lock(&cdevpriv_mtx);
+ if ((p = fp->f_cdevpriv) == NULL) {
+ mtx_unlock(&cdevpriv_mtx);
+ return;
+ }
+ devfs_destroy_cdevpriv(p);
+}
+
+void
+devfs_clear_cdevpriv(void)
+{
+ struct file *fp;
+
+ fp = curthread->td_fpop;
+ if (fp == NULL)
+ return;
+ devfs_fpdrop(fp);
+}
+
+/*
+ * On success devfs_populate_vp() returns with dmp->dm_lock held.
+ */
+static int
+devfs_populate_vp(struct vnode *vp)
+{
+ struct devfs_dirent *de;
+ struct devfs_mount *dmp;
+ int locked;
+
+ ASSERT_VOP_LOCKED(vp, "devfs_populate_vp");
+
+ dmp = VFSTODEVFS(vp->v_mount);
+ locked = VOP_ISLOCKED(vp);
+
+ sx_xlock(&dmp->dm_lock);
+ DEVFS_DMP_HOLD(dmp);
+
+ /* Can't call devfs_populate() with the vnode lock held. */
+ VOP_UNLOCK(vp, 0);
+ devfs_populate(dmp);
+
+ sx_xunlock(&dmp->dm_lock);
+ vn_lock(vp, locked | LK_RETRY);
+ sx_xlock(&dmp->dm_lock);
+ if (DEVFS_DMP_DROP(dmp)) {
+ sx_xunlock(&dmp->dm_lock);
+ devfs_unmount_final(dmp);
+ return (ERESTART);
+ }
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ sx_xunlock(&dmp->dm_lock);
+ return (ERESTART);
+ }
+ de = vp->v_data;
+ KASSERT(de != NULL,
+ ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed"));
+ if ((de->de_flags & DE_DOOMED) != 0) {
+ sx_xunlock(&dmp->dm_lock);
+ return (ERESTART);
+ }
+
+ return (0);
+}
+
+static int
+devfs_vptocnp(struct vop_vptocnp_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct vnode **dvp = ap->a_vpp;
+ struct devfs_mount *dmp;
+ char *buf = ap->a_buf;
+ int *buflen = ap->a_buflen;
+ struct devfs_dirent *dd, *de;
+ int i, error;
+
+ dmp = VFSTODEVFS(vp->v_mount);
+
+ error = devfs_populate_vp(vp);
+ if (error != 0)
+ return (error);
+
+ i = *buflen;
+ dd = vp->v_data;
+
+ if (vp->v_type == VCHR) {
+ i -= strlen(dd->de_cdp->cdp_c.si_name);
+ if (i < 0) {
+ error = ENOMEM;
+ goto finished;
+ }
+ bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
+ strlen(dd->de_cdp->cdp_c.si_name));
+ de = dd->de_dir;
+ } else if (vp->v_type == VDIR) {
+ if (dd == dmp->dm_rootdir) {
+ *dvp = vp;
+ vref(*dvp);
+ goto finished;
+ }
+ i -= dd->de_dirent->d_namlen;
+ if (i < 0) {
+ error = ENOMEM;
+ goto finished;
+ }
+ bcopy(dd->de_dirent->d_name, buf + i,
+ dd->de_dirent->d_namlen);
+ de = dd;
+ } else {
+ error = ENOENT;
+ goto finished;
+ }
+ *buflen = i;
+ de = devfs_parent_dirent(de);
+ if (de == NULL) {
+ error = ENOENT;
+ goto finished;
+ }
+ mtx_lock(&devfs_de_interlock);
+ *dvp = de->de_vnode;
+ if (*dvp != NULL) {
+ VI_LOCK(*dvp);
+ mtx_unlock(&devfs_de_interlock);
+ vholdl(*dvp);
+ VI_UNLOCK(*dvp);
+ vref(*dvp);
+ vdrop(*dvp);
+ } else {
+ mtx_unlock(&devfs_de_interlock);
+ error = ENOENT;
+ }
+finished:
+ sx_xunlock(&dmp->dm_lock);
+ return (error);
+}
+
+/*
+ * Construct the fully qualified path name relative to the mountpoint.
+ * If a NULL cnp is provided, no '/' is appended to the resulting path.
+ */
+char *
+devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd,
+ struct componentname *cnp)
+{
+ int i;
+ struct devfs_dirent *de;
+
+ sx_assert(&dmp->dm_lock, SA_LOCKED);
+
+ i = SPECNAMELEN;
+ buf[i] = '\0';
+ if (cnp != NULL)
+ i -= cnp->cn_namelen;
+ if (i < 0)
+ return (NULL);
+ if (cnp != NULL)
+ bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
+ de = dd;
+ while (de != dmp->dm_rootdir) {
+ if (cnp != NULL || i < SPECNAMELEN) {
+ i--;
+ if (i < 0)
+ return (NULL);
+ buf[i] = '/';
+ }
+ i -= de->de_dirent->d_namlen;
+ if (i < 0)
+ return (NULL);
+ bcopy(de->de_dirent->d_name, buf + i,
+ de->de_dirent->d_namlen);
+ de = devfs_parent_dirent(de);
+ if (de == NULL)
+ return (NULL);
+ }
+ return (buf + i);
+}
+
+static int
+devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
+ struct devfs_dirent *de)
+{
+ int not_found;
+
+ not_found = 0;
+ if (de->de_flags & DE_DOOMED)
+ not_found = 1;
+ if (DEVFS_DE_DROP(de)) {
+ KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
+ devfs_dirent_free(de);
+ }
+ if (DEVFS_DMP_DROP(dmp)) {
+ KASSERT(not_found == 1,
+ ("DEVFS mount struct freed before dirent"));
+ not_found = 2;
+ sx_xunlock(&dmp->dm_lock);
+ devfs_unmount_final(dmp);
+ }
+ if (not_found == 1 || (drop_dm_lock && not_found != 2))
+ sx_unlock(&dmp->dm_lock);
+ return (not_found);
+}
+
+static void
+devfs_insmntque_dtr(struct vnode *vp, void *arg)
+{
+ struct devfs_dirent *de;
+
+ de = (struct devfs_dirent *)arg;
+ mtx_lock(&devfs_de_interlock);
+ vp->v_data = NULL;
+ de->de_vnode = NULL;
+ mtx_unlock(&devfs_de_interlock);
+ vgone(vp);
+ vput(vp);
+}
+
+/*
+ * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
+ * it on return.
+ */
+int
+devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
+ struct vnode **vpp)
+{
+ int error;
+ struct vnode *vp;
+ struct cdev *dev;
+ struct devfs_mount *dmp;
+ struct cdevsw *dsw;
+
+ dmp = VFSTODEVFS(mp);
+ if (de->de_flags & DE_DOOMED) {
+ sx_xunlock(&dmp->dm_lock);
+ return (ENOENT);
+ }
+loop:
+ DEVFS_DE_HOLD(de);
+ DEVFS_DMP_HOLD(dmp);
+ mtx_lock(&devfs_de_interlock);
+ vp = de->de_vnode;
+ if (vp != NULL) {
+ VI_LOCK(vp);
+ mtx_unlock(&devfs_de_interlock);
+ sx_xunlock(&dmp->dm_lock);
+ vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
+ sx_xlock(&dmp->dm_lock);
+ if (devfs_allocv_drop_refs(0, dmp, de)) {
+ vput(vp);
+ return (ENOENT);
+ }
+ else if ((vp->v_iflag & VI_DOOMED) != 0) {
+ mtx_lock(&devfs_de_interlock);
+ if (de->de_vnode == vp) {
+ de->de_vnode = NULL;
+ vp->v_data = NULL;
+ }
+ mtx_unlock(&devfs_de_interlock);
+ vput(vp);
+ goto loop;
+ }
+ sx_xunlock(&dmp->dm_lock);
+ *vpp = vp;
+ return (0);
+ }
+ mtx_unlock(&devfs_de_interlock);
+ if (de->de_dirent->d_type == DT_CHR) {
+ if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
+ devfs_allocv_drop_refs(1, dmp, de);
+ return (ENOENT);
+ }
+ dev = &de->de_cdp->cdp_c;
+ } else {
+ dev = NULL;
+ }
+ error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
+ if (error != 0) {
+ devfs_allocv_drop_refs(1, dmp, de);
+ printf("devfs_allocv: failed to allocate new vnode\n");
+ return (error);
+ }
+
+ if (de->de_dirent->d_type == DT_CHR) {
+ vp->v_type = VCHR;
+ VI_LOCK(vp);
+ dev_lock();
+ dev_refl(dev);
+ /* XXX: v_rdev should be protect by vnode lock */
+ vp->v_rdev = dev;
+ KASSERT(vp->v_usecount == 1,
+ ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
+ dev->si_usecount += vp->v_usecount;
+ /* Special casing of ttys for deadfs. Probably redundant. */
+ dsw = dev->si_devsw;
+ if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
+ vp->v_vflag |= VV_ISTTY;
+ dev_unlock();
+ VI_UNLOCK(vp);
+ if ((dev->si_flags & SI_ETERNAL) != 0)
+ vp->v_vflag |= VV_ETERNALDEV;
+ vp->v_op = &devfs_specops;
+ } else if (de->de_dirent->d_type == DT_DIR) {
+ vp->v_type = VDIR;
+ } else if (de->de_dirent->d_type == DT_LNK) {
+ vp->v_type = VLNK;
+ } else {
+ vp->v_type = VBAD;
+ }
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
+ VN_LOCK_ASHARE(vp);
+ mtx_lock(&devfs_de_interlock);
+ vp->v_data = de;
+ de->de_vnode = vp;
+ mtx_unlock(&devfs_de_interlock);
+ error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
+ if (error != 0) {
+ (void) devfs_allocv_drop_refs(1, dmp, de);
+ return (error);
+ }
+ if (devfs_allocv_drop_refs(0, dmp, de)) {
+ vput(vp);
+ return (ENOENT);
+ }
+#ifdef MAC
+ mac_devfs_vnode_associate(mp, de, vp);
+#endif
+ sx_xunlock(&dmp->dm_lock);
+ *vpp = vp;
+ return (0);
+}
+
+static int
+devfs_access(struct vop_access_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct devfs_dirent *de;
+ struct proc *p;
+ int error;
+
+ de = vp->v_data;
+ if (vp->v_type == VDIR)
+ de = de->de_dir;
+
+ error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
+ ap->a_accmode, ap->a_cred, NULL);
+ if (error == 0)
+ return (0);
+ if (error != EACCES)
+ return (error);
+ p = ap->a_td->td_proc;
+ /* We do, however, allow access to the controlling terminal */
+ PROC_LOCK(p);
+ if (!(p->p_flag & P_CONTROLT)) {
+ PROC_UNLOCK(p);
+ return (error);
+ }
+ if (p->p_session->s_ttydp == de->de_cdp)
+ error = 0;
+ PROC_UNLOCK(p);
+ return (error);
+}
+
+_Static_assert(((FMASK | FCNTLFLAGS) & (FLASTCLOSE | FREVOKE)) == 0,
+ "devfs-only flag reuse failed");
+
+static int
+devfs_close(struct vop_close_args *ap)
+{
+ struct vnode *vp = ap->a_vp, *oldvp;
+ struct thread *td = ap->a_td;
+ struct proc *p;
+ struct cdev *dev = vp->v_rdev;
+ struct cdevsw *dsw;
+ int dflags, error, ref, vp_locked;
+
+ /*
+ * XXX: Don't call d_close() if we were called because of
+ * XXX: insmntque1() failure.
+ */
+ if (vp->v_data == NULL)
+ return (0);
+
+ /*
+ * Hack: a tty device that is a controlling terminal
+ * has a reference from the session structure.
+ * We cannot easily tell that a character device is
+ * a controlling terminal, unless it is the closing
+ * process' controlling terminal. In that case,
+ * if the reference count is 2 (this last descriptor
+ * plus the session), release the reference from the session.
+ */
+ if (td != NULL) {
+ p = td->td_proc;
+ PROC_LOCK(p);
+ if (vp == p->p_session->s_ttyvp) {
+ PROC_UNLOCK(p);
+ oldvp = NULL;
+ sx_xlock(&proctree_lock);
+ if (vp == p->p_session->s_ttyvp) {
+ SESS_LOCK(p->p_session);
+ VI_LOCK(vp);
+ if (count_dev(dev) == 2 &&
+ (vp->v_iflag & VI_DOOMED) == 0) {
+ p->p_session->s_ttyvp = NULL;
+ p->p_session->s_ttydp = NULL;
+ oldvp = vp;
+ }
+ VI_UNLOCK(vp);
+ SESS_UNLOCK(p->p_session);
+ }
+ sx_xunlock(&proctree_lock);
+ if (oldvp != NULL)
+ vrele(oldvp);
+ } else
+ PROC_UNLOCK(p);
+ }
+ /*
+ * We do not want to really close the device if it
+ * is still in use unless we are trying to close it
+ * forcibly. Since every use (buffer, vnode, swap, cmap)
+ * holds a reference to the vnode, and because we mark
+ * any other vnodes that alias this device, when the
+ * sum of the reference counts on all the aliased
+ * vnodes descends to one, we are on last close.
+ */
+ dsw = dev_refthread(dev, &ref);
+ if (dsw == NULL)
+ return (ENXIO);
+ dflags = 0;
+ VI_LOCK(vp);
+ if (vp->v_iflag & VI_DOOMED) {
+ /* Forced close. */
+ dflags |= FREVOKE | FNONBLOCK;
+ } else if (dsw->d_flags & D_TRACKCLOSE) {
+ /* Keep device updated on status. */
+ } else if (count_dev(dev) > 1) {
+ VI_UNLOCK(vp);
+ dev_relthread(dev, ref);
+ return (0);
+ }
+ if (count_dev(dev) == 1)
+ dflags |= FLASTCLOSE;
+ vholdl(vp);
+ VI_UNLOCK(vp);
+ vp_locked = VOP_ISLOCKED(vp);
+ VOP_UNLOCK(vp, 0);
+ KASSERT(dev->si_refcount > 0,
+ ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
+ error = dsw->d_close(dev, ap->a_fflag | dflags, S_IFCHR, td);
+ dev_relthread(dev, ref);
+ vn_lock(vp, vp_locked | LK_RETRY);
+ vdrop(vp);
+ return (error);
+}
+
+static int
+devfs_close_f(struct file *fp, struct thread *td)
+{
+ int error;
+ struct file *fpop;
+
+ /*
+ * NB: td may be NULL if this descriptor is closed due to
+ * garbage collection from a closed UNIX domain socket.
+ */
+ fpop = curthread->td_fpop;
+ curthread->td_fpop = fp;
+ error = vnops.fo_close(fp, td);
+ curthread->td_fpop = fpop;
+
+ /*
+ * The f_cdevpriv cannot be assigned non-NULL value while we
+ * are destroying the file.
+ */
+ if (fp->f_cdevpriv != NULL)
+ devfs_fpdrop(fp);
+ return (error);
+}
+
+static int
+devfs_getattr(struct vop_getattr_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct vattr *vap = ap->a_vap;
+ struct devfs_dirent *de;
+ struct devfs_mount *dmp;
+ struct cdev *dev;
+ struct timeval boottime;
+ int error;
+
+ error = devfs_populate_vp(vp);
+ if (error != 0)
+ return (error);
+
+ dmp = VFSTODEVFS(vp->v_mount);
+ sx_xunlock(&dmp->dm_lock);
+
+ de = vp->v_data;
+ KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
+ if (vp->v_type == VDIR) {
+ de = de->de_dir;
+ KASSERT(de != NULL,
+ ("Null dir dirent in devfs_getattr vp=%p", vp));
+ }
+ vap->va_uid = de->de_uid;
+ vap->va_gid = de->de_gid;
+ vap->va_mode = de->de_mode;
+ if (vp->v_type == VLNK)
+ vap->va_size = strlen(de->de_symlink);
+ else if (vp->v_type == VDIR)
+ vap->va_size = vap->va_bytes = DEV_BSIZE;
+ else
+ vap->va_size = 0;
+ if (vp->v_type != VDIR)
+ vap->va_bytes = 0;
+ vap->va_blocksize = DEV_BSIZE;
+ vap->va_type = vp->v_type;
+
+ getboottime(&boottime);
+#define fix(aa) \
+ do { \
+ if ((aa).tv_sec <= 3600) { \
+ (aa).tv_sec = boottime.tv_sec; \
+ (aa).tv_nsec = boottime.tv_usec * 1000; \
+ } \
+ } while (0)
+
+ if (vp->v_type != VCHR) {
+ fix(de->de_atime);
+ vap->va_atime = de->de_atime;
+ fix(de->de_mtime);
+ vap->va_mtime = de->de_mtime;
+ fix(de->de_ctime);
+ vap->va_ctime = de->de_ctime;
+ } else {
+ dev = vp->v_rdev;
+ fix(dev->si_atime);
+ vap->va_atime = dev->si_atime;
+ fix(dev->si_mtime);
+ vap->va_mtime = dev->si_mtime;
+ fix(dev->si_ctime);
+ vap->va_ctime = dev->si_ctime;
+
+ vap->va_rdev = cdev2priv(dev)->cdp_inode;
+ }
+ vap->va_gen = 0;
+ vap->va_flags = 0;
+ vap->va_filerev = 0;
+ vap->va_nlink = de->de_links;
+ vap->va_fileid = de->de_inode;
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
+{
+ struct file *fpop;
+ int error;
+
+ fpop = td->td_fpop;
+ td->td_fpop = fp;
+ error = vnops.fo_ioctl(fp, com, data, cred, td);
+ td->td_fpop = fpop;
+ return (error);
+}
+
+static int
+devfs_ioctl(struct vop_ioctl_args *ap)
+{
+ struct fiodgname_arg *fgn;
+ struct vnode *vpold, *vp;
+ struct cdevsw *dsw;
+ struct thread *td;
+ struct cdev *dev;
+ int error, ref, i;
+ const char *p;
+ u_long com;
+
+ vp = ap->a_vp;
+ com = ap->a_command;
+ td = ap->a_td;
+
+ dsw = devvn_refthread(vp, &dev, &ref);
+ if (dsw == NULL)
+ return (ENXIO);
+ KASSERT(dev->si_refcount > 0,
+ ("devfs: un-referenced struct cdev *(%s)", devtoname(dev)));
+
+ if (com == FIODTYPE) {
+ *(int *)ap->a_data = dsw->d_flags & D_TYPEMASK;
+ error = 0;
+ goto out;
+ } else if (com == FIODGNAME) {
+ fgn = ap->a_data;
+ p = devtoname(dev);
+ i = strlen(p) + 1;
+ if (i > fgn->len)
+ error = EINVAL;
+ else
+ error = copyout(p, fgn->buf, i);
+ goto out;
+ }
+
+ error = dsw->d_ioctl(dev, com, ap->a_data, ap->a_fflag, td);
+
+out:
+ dev_relthread(dev, ref);
+ if (error == ENOIOCTL)
+ error = ENOTTY;
+
+ if (error == 0 && com == TIOCSCTTY) {
+ /* Do nothing if reassigning same control tty */
+ sx_slock(&proctree_lock);
+ if (td->td_proc->p_session->s_ttyvp == vp) {
+ sx_sunlock(&proctree_lock);
+ return (0);
+ }
+
+ vpold = td->td_proc->p_session->s_ttyvp;
+ VREF(vp);
+ SESS_LOCK(td->td_proc->p_session);
+ td->td_proc->p_session->s_ttyvp = vp;
+ td->td_proc->p_session->s_ttydp = cdev2priv(dev);
+ SESS_UNLOCK(td->td_proc->p_session);
+
+ sx_sunlock(&proctree_lock);
+
+ /* Get rid of reference to old control tty */
+ if (vpold)
+ vrele(vpold);
+ }
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+devfs_kqfilter_f(struct file *fp, struct knote *kn)
+{
+ struct cdev *dev;
+ struct cdevsw *dsw;
+ int error, ref;
+ struct file *fpop;
+ struct thread *td;
+
+ td = curthread;
+ fpop = td->td_fpop;
+ error = devfs_fp_check(fp, &dev, &dsw, &ref);
+ if (error)
+ return (error);
+ error = dsw->d_kqfilter(dev, kn);
+ td->td_fpop = fpop;
+ dev_relthread(dev, ref);
+ return (error);
+}
+
+static inline int
+devfs_prison_check(struct devfs_dirent *de, struct thread *td)
+{
+ struct cdev_priv *cdp;
+ struct ucred *dcr;
+ struct proc *p;
+ int error;
+
+ cdp = de->de_cdp;
+ if (cdp == NULL)
+ return (0);
+ dcr = cdp->cdp_c.si_cred;
+ if (dcr == NULL)
+ return (0);
+
+ error = prison_check(td->td_ucred, dcr);
+ if (error == 0)
+ return (0);
+ /* We do, however, allow access to the controlling terminal */
+ p = td->td_proc;
+ PROC_LOCK(p);
+ if (!(p->p_flag & P_CONTROLT)) {
+ PROC_UNLOCK(p);
+ return (error);
+ }
+ if (p->p_session->s_ttydp == cdp)
+ error = 0;
+ PROC_UNLOCK(p);
+ return (error);
+}
+
+static int
+devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
+{
+ struct componentname *cnp;
+ struct vnode *dvp, **vpp;
+ struct thread *td;
+ struct devfs_dirent *de, *dd;
+ struct devfs_dirent **dde;
+ struct devfs_mount *dmp;
+ struct cdev *cdev;
+ int error, flags, nameiop, dvplocked;
+ char specname[SPECNAMELEN + 1], *pname;
+
+ cnp = ap->a_cnp;
+ vpp = ap->a_vpp;
+ dvp = ap->a_dvp;
+ pname = cnp->cn_nameptr;
+ td = cnp->cn_thread;
+ flags = cnp->cn_flags;
+ nameiop = cnp->cn_nameiop;
+ dmp = VFSTODEVFS(dvp->v_mount);
+ dd = dvp->v_data;
+ *vpp = NULLVP;
+
+ if ((flags & ISLASTCN) && nameiop == RENAME)
+ return (EOPNOTSUPP);
+
+ if (dvp->v_type != VDIR)
+ return (ENOTDIR);
+
+ if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
+ return (EIO);
+
+ error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
+ if (error)
+ return (error);
+
+ if (cnp->cn_namelen == 1 && *pname == '.') {
+ if ((flags & ISLASTCN) && nameiop != LOOKUP)
+ return (EINVAL);
+ *vpp = dvp;
+ VREF(dvp);
+ return (0);
+ }
+
+ if (flags & ISDOTDOT) {
+ if ((flags & ISLASTCN) && nameiop != LOOKUP)
+ return (EINVAL);
+ de = devfs_parent_dirent(dd);
+ if (de == NULL)
+ return (ENOENT);
+ dvplocked = VOP_ISLOCKED(dvp);
+ VOP_UNLOCK(dvp, 0);
+ error = devfs_allocv(de, dvp->v_mount,
+ cnp->cn_lkflags & LK_TYPE_MASK, vpp);
+ *dm_unlock = 0;
+ vn_lock(dvp, dvplocked | LK_RETRY);
+ return (error);
+ }
+
+ dd = dvp->v_data;
+ de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0);
+ while (de == NULL) { /* While(...) so we can use break */
+
+ if (nameiop == DELETE)
+ return (ENOENT);
+
+ /*
+ * OK, we didn't have an entry for the name we were asked for
+ * so we try to see if anybody can create it on demand.
+ */
+ pname = devfs_fqpn(specname, dmp, dd, cnp);
+ if (pname == NULL)
+ break;
+
+ cdev = NULL;
+ DEVFS_DMP_HOLD(dmp);
+ sx_xunlock(&dmp->dm_lock);
+ sx_slock(&clone_drain_lock);
+ EVENTHANDLER_INVOKE(dev_clone,
+ td->td_ucred, pname, strlen(pname), &cdev);
+ sx_sunlock(&clone_drain_lock);
+
+ if (cdev == NULL)
+ sx_xlock(&dmp->dm_lock);
+ else if (devfs_populate_vp(dvp) != 0) {
+ *dm_unlock = 0;
+ sx_xlock(&dmp->dm_lock);
+ if (DEVFS_DMP_DROP(dmp)) {
+ sx_xunlock(&dmp->dm_lock);
+ devfs_unmount_final(dmp);
+ } else
+ sx_xunlock(&dmp->dm_lock);
+ dev_rel(cdev);
+ return (ENOENT);
+ }
+ if (DEVFS_DMP_DROP(dmp)) {
+ *dm_unlock = 0;
+ sx_xunlock(&dmp->dm_lock);
+ devfs_unmount_final(dmp);
+ if (cdev != NULL)
+ dev_rel(cdev);
+ return (ENOENT);
+ }
+
+ if (cdev == NULL)
+ break;
+
+ dev_lock();
+ dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
+ if (dde != NULL && *dde != NULL)
+ de = *dde;
+ dev_unlock();
+ dev_rel(cdev);
+ break;
+ }
+
+ if (de == NULL || de->de_flags & DE_WHITEOUT) {
+ if ((nameiop == CREATE || nameiop == RENAME) &&
+ (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
+ cnp->cn_flags |= SAVENAME;
+ return (EJUSTRETURN);
+ }
+ return (ENOENT);
+ }
+
+ if (devfs_prison_check(de, td))
+ return (ENOENT);
+
+ if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
+ error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
+ if (error)
+ return (error);
+ if (*vpp == dvp) {
+ VREF(dvp);
+ *vpp = dvp;
+ return (0);
+ }
+ }
+ error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
+ vpp);
+ *dm_unlock = 0;
+ return (error);
+}
+
+static int
+devfs_lookup(struct vop_lookup_args *ap)
+{
+ int j;
+ struct devfs_mount *dmp;
+ int dm_unlock;
+
+ if (devfs_populate_vp(ap->a_dvp) != 0)
+ return (ENOTDIR);
+
+ dmp = VFSTODEVFS(ap->a_dvp->v_mount);
+ dm_unlock = 1;
+ j = devfs_lookupx(ap, &dm_unlock);
+ if (dm_unlock == 1)
+ sx_xunlock(&dmp->dm_lock);
+ return (j);
+}
+
+static int
+devfs_mknod(struct vop_mknod_args *ap)
+{
+ struct componentname *cnp;
+ struct vnode *dvp, **vpp;
+ struct devfs_dirent *dd, *de;
+ struct devfs_mount *dmp;
+ int error;
+
+ /*
+ * The only type of node we should be creating here is a
+ * character device, for anything else return EOPNOTSUPP.
+ */
+ if (ap->a_vap->va_type != VCHR)
+ return (EOPNOTSUPP);
+ dvp = ap->a_dvp;
+ dmp = VFSTODEVFS(dvp->v_mount);
+
+ cnp = ap->a_cnp;
+ vpp = ap->a_vpp;
+ dd = dvp->v_data;
+
+ error = ENOENT;
+ sx_xlock(&dmp->dm_lock);
+ TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
+ if (cnp->cn_namelen != de->de_dirent->d_namlen)
+ continue;
+ if (de->de_dirent->d_type == DT_CHR &&
+ (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
+ continue;
+ if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
+ de->de_dirent->d_namlen) != 0)
+ continue;
+ if (de->de_flags & DE_WHITEOUT)
+ break;
+ goto notfound;
+ }
+ if (de == NULL)
+ goto notfound;
+ de->de_flags &= ~DE_WHITEOUT;
+ error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
+ return (error);
+notfound:
+ sx_xunlock(&dmp->dm_lock);
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+devfs_open(struct vop_open_args *ap)
+{
+ struct thread *td = ap->a_td;
+ struct vnode *vp = ap->a_vp;
+ struct cdev *dev = vp->v_rdev;
+ struct file *fp = ap->a_fp;
+ int error, ref, vlocked;
+ struct cdevsw *dsw;
+ struct file *fpop;
+ struct mtx *mtxp;
+
+ if (vp->v_type == VBLK)
+ return (ENXIO);
+
+ if (dev == NULL)
+ return (ENXIO);
+
+ /* Make this field valid before any I/O in d_open. */
+ if (dev->si_iosize_max == 0)
+ dev->si_iosize_max = DFLTPHYS;
+
+ dsw = dev_refthread(dev, &ref);
+ if (dsw == NULL)
+ return (ENXIO);
+ if (fp == NULL && dsw->d_fdopen != NULL) {
+ dev_relthread(dev, ref);
+ return (ENXIO);
+ }
+
+ vlocked = VOP_ISLOCKED(vp);
+ VOP_UNLOCK(vp, 0);
+
+ fpop = td->td_fpop;
+ td->td_fpop = fp;
+ if (fp != NULL) {
+ fp->f_data = dev;
+ fp->f_vnode = vp;
+ }
+ if (dsw->d_fdopen != NULL)
+ error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
+ else
+ error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
+ /* Clean up any cdevpriv upon error. */
+ if (error != 0)
+ devfs_clear_cdevpriv();
+ td->td_fpop = fpop;
+
+ vn_lock(vp, vlocked | LK_RETRY);
+ dev_relthread(dev, ref);
+ if (error != 0) {
+ if (error == ERESTART)
+ error = EINTR;
+ return (error);
+ }
+
+#if 0 /* /dev/console */
+ KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
+#else
+ if (fp == NULL)
+ return (error);
+#endif
+ if (fp->f_ops == &badfileops)
+ finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
+ mtxp = mtx_pool_find(mtxpool_sleep, fp);
+
+ /*
+ * Hint to the dofilewrite() to not force the buffer draining
+ * on the writer to the file. Most likely, the write would
+ * not need normal buffers.
+ */
+ mtx_lock(mtxp);
+ fp->f_vnread_flags |= FDEVFS_VNODE;
+ mtx_unlock(mtxp);
+ return (error);
+}
+
+static int
+devfs_pathconf(struct vop_pathconf_args *ap)
+{
+
+ switch (ap->a_name) {
+ case _PC_MAC_PRESENT:
+#ifdef MAC
+ /*
+ * If MAC is enabled, devfs automatically supports
+ * trivial non-persistant label storage.
+ */
+ *ap->a_retval = 1;
+#else
+ *ap->a_retval = 0;
+#endif
+ return (0);
+ default:
+ return (vop_stdpathconf(ap));
+ }
+ /* NOTREACHED */
+}
+
+/* ARGSUSED */
+static int
+devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
+{
+ struct cdev *dev;
+ struct cdevsw *dsw;
+ int error, ref;
+ struct file *fpop;
+
+ fpop = td->td_fpop;
+ error = devfs_fp_check(fp, &dev, &dsw, &ref);
+ if (error != 0) {
+ error = vnops.fo_poll(fp, events, cred, td);
+ return (error);
+ }
+ error = dsw->d_poll(dev, events, td);
+ td->td_fpop = fpop;
+ dev_relthread(dev, ref);
+ return(error);
+}
+
+/*
+ * Print out the contents of a special device vnode.
+ */
+static int
+devfs_print(struct vop_print_args *ap)
+{
+
+ printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
+ return (0);
+}
+
+static int
+devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred,
+ int flags, struct thread *td)
+{
+ struct cdev *dev;
+ int ioflag, error, ref;
+ ssize_t resid;
+ struct cdevsw *dsw;
+ struct file *fpop;
+
+ if (uio->uio_resid > DEVFS_IOSIZE_MAX)
+ return (EINVAL);
+ fpop = td->td_fpop;
+ error = devfs_fp_check(fp, &dev, &dsw, &ref);
+ if (error != 0) {
+ error = vnops.fo_read(fp, uio, cred, flags, td);
+ return (error);
+ }
+ resid = uio->uio_resid;
+ ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
+ if (ioflag & O_DIRECT)
+ ioflag |= IO_DIRECT;
+
+ foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
+ error = dsw->d_read(dev, uio, ioflag);
+ if (uio->uio_resid != resid || (error == 0 && resid != 0))
+ devfs_timestamp(&dev->si_atime);
+ td->td_fpop = fpop;
+ dev_relthread(dev, ref);
+
+ foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
+ return (error);
+}
+
+static int
+devfs_readdir(struct vop_readdir_args *ap)
+{
+ int error;
+ struct uio *uio;
+ struct dirent *dp;
+ struct devfs_dirent *dd;
+ struct devfs_dirent *de;
+ struct devfs_mount *dmp;
+ off_t off;
+ int *tmp_ncookies = NULL;
+
+ if (ap->a_vp->v_type != VDIR)
+ return (ENOTDIR);
+
+ uio = ap->a_uio;
+ if (uio->uio_offset < 0)
+ return (EINVAL);
+
+ /*
+ * XXX: This is a temporary hack to get around this filesystem not
+ * supporting cookies. We store the location of the ncookies pointer
+ * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
+ * and set the number of cookies to 0. We then set the pointer to
+ * NULL so that vfs_read_dirent doesn't try to call realloc() on
+ * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
+ * pointer to its original location before returning to the caller.
+ */
+ if (ap->a_ncookies != NULL) {
+ tmp_ncookies = ap->a_ncookies;
+ *ap->a_ncookies = 0;
+ ap->a_ncookies = NULL;
+ }
+
+ dmp = VFSTODEVFS(ap->a_vp->v_mount);
+ if (devfs_populate_vp(ap->a_vp) != 0) {
+ if (tmp_ncookies != NULL)
+ ap->a_ncookies = tmp_ncookies;
+ return (EIO);
+ }
+ error = 0;
+ de = ap->a_vp->v_data;
+ off = 0;
+ TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
+ KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
+ if (dd->de_flags & (DE_COVERED | DE_WHITEOUT))
+ continue;
+ if (devfs_prison_check(dd, uio->uio_td))
+ continue;
+ if (dd->de_dirent->d_type == DT_DIR)
+ de = dd->de_dir;
+ else
+ de = dd;
+ dp = dd->de_dirent;
+ if (dp->d_reclen > uio->uio_resid)
+ break;
+ dp->d_fileno = de->de_inode;
+ if (off >= uio->uio_offset) {
+ error = vfs_read_dirent(ap, dp, off);
+ if (error)
+ break;
+ }
+ off += dp->d_reclen;
+ }
+ sx_xunlock(&dmp->dm_lock);
+ uio->uio_offset = off;
+
+ /*
+ * Restore ap->a_ncookies if it wasn't originally NULL in the first
+ * place.
+ */
+ if (tmp_ncookies != NULL)
+ ap->a_ncookies = tmp_ncookies;
+
+ return (error);
+}
+
+static int
+devfs_readlink(struct vop_readlink_args *ap)
+{
+ struct devfs_dirent *de;
+
+ de = ap->a_vp->v_data;
+ return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
+}
+
+static int
+devfs_reclaim(struct vop_reclaim_args *ap)
+{
+ struct vnode *vp;
+ struct devfs_dirent *de;
+
+ vp = ap->a_vp;
+ mtx_lock(&devfs_de_interlock);
+ de = vp->v_data;
+ if (de != NULL) {
+ de->de_vnode = NULL;
+ vp->v_data = NULL;
+ }
+ mtx_unlock(&devfs_de_interlock);
+ vnode_destroy_vobject(vp);
+ return (0);
+}
+
+static int
+devfs_reclaim_vchr(struct vop_reclaim_args *ap)
+{
+ struct vnode *vp;
+ struct cdev *dev;
+
+ vp = ap->a_vp;
+ MPASS(vp->v_type == VCHR);
+
+ devfs_reclaim(ap);
+
+ VI_LOCK(vp);
+ dev_lock();
+ dev = vp->v_rdev;
+ vp->v_rdev = NULL;
+ if (dev != NULL)
+ dev->si_usecount -= vp->v_usecount;
+ dev_unlock();
+ VI_UNLOCK(vp);
+ if (dev != NULL)
+ dev_rel(dev);
+ return (0);
+}
+
+static int
+devfs_remove(struct vop_remove_args *ap)
+{
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp = ap->a_vp;
+ struct devfs_dirent *dd;
+ struct devfs_dirent *de, *de_covered;
+ struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
+
+ ASSERT_VOP_ELOCKED(dvp, "devfs_remove");
+ ASSERT_VOP_ELOCKED(vp, "devfs_remove");
+
+ sx_xlock(&dmp->dm_lock);
+ dd = ap->a_dvp->v_data;
+ de = vp->v_data;
+ if (de->de_cdp == NULL) {
+ TAILQ_REMOVE(&dd->de_dlist, de, de_list);
+ if (de->de_dirent->d_type == DT_LNK) {
+ de_covered = devfs_find(dd, de->de_dirent->d_name,
+ de->de_dirent->d_namlen, 0);
+ if (de_covered != NULL)
+ de_covered->de_flags &= ~DE_COVERED;
+ }
+ /* We need to unlock dvp because devfs_delete() may lock it. */
+ VOP_UNLOCK(vp, 0);
+ if (dvp != vp)
+ VOP_UNLOCK(dvp, 0);
+ devfs_delete(dmp, de, 0);
+ sx_xunlock(&dmp->dm_lock);
+ if (dvp != vp)
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ } else {
+ de->de_flags |= DE_WHITEOUT;
+ sx_xunlock(&dmp->dm_lock);
+ }
+ return (0);
+}
+
+/*
+ * Revoke is called on a tty when a terminal session ends. The vnode
+ * is orphaned by setting v_op to deadfs so we need to let go of it
+ * as well so that we create a new one next time around.
+ *
+ */
+static int
+devfs_revoke(struct vop_revoke_args *ap)
+{
+ struct vnode *vp = ap->a_vp, *vp2;
+ struct cdev *dev;
+ struct cdev_priv *cdp;
+ struct devfs_dirent *de;
+ u_int i;
+
+ KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
+
+ dev = vp->v_rdev;
+ cdp = cdev2priv(dev);
+
+ dev_lock();
+ cdp->cdp_inuse++;
+ dev_unlock();
+
+ vhold(vp);
+ vgone(vp);
+ vdrop(vp);
+
+ VOP_UNLOCK(vp,0);
+ loop:
+ for (;;) {
+ mtx_lock(&devfs_de_interlock);
+ dev_lock();
+ vp2 = NULL;
+ for (i = 0; i <= cdp->cdp_maxdirent; i++) {
+ de = cdp->cdp_dirents[i];
+ if (de == NULL)
+ continue;
+
+ vp2 = de->de_vnode;
+ if (vp2 != NULL) {
+ dev_unlock();
+ VI_LOCK(vp2);
+ mtx_unlock(&devfs_de_interlock);
+ if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
+ curthread))
+ goto loop;
+ vhold(vp2);
+ vgone(vp2);
+ vdrop(vp2);
+ vput(vp2);
+ break;
+ }
+ }
+ if (vp2 != NULL) {
+ continue;
+ }
+ dev_unlock();
+ mtx_unlock(&devfs_de_interlock);
+ break;
+ }
+ dev_lock();
+ cdp->cdp_inuse--;
+ if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
+ TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
+ dev_unlock();
+ dev_rel(&cdp->cdp_c);
+ } else
+ dev_unlock();
+
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ return (0);
+}
+
+static int
+devfs_rioctl(struct vop_ioctl_args *ap)
+{
+ struct vnode *vp;
+ struct devfs_mount *dmp;
+ int error;
+
+ vp = ap->a_vp;
+ vn_lock(vp, LK_SHARED | LK_RETRY);
+ if (vp->v_iflag & VI_DOOMED) {
+ VOP_UNLOCK(vp, 0);
+ return (EBADF);
+ }
+ dmp = VFSTODEVFS(vp->v_mount);
+ sx_xlock(&dmp->dm_lock);
+ VOP_UNLOCK(vp, 0);
+ DEVFS_DMP_HOLD(dmp);
+ devfs_populate(dmp);
+ if (DEVFS_DMP_DROP(dmp)) {
+ sx_xunlock(&dmp->dm_lock);
+ devfs_unmount_final(dmp);
+ return (ENOENT);
+ }
+ error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
+ sx_xunlock(&dmp->dm_lock);
+ return (error);
+}
+
+static int
+devfs_rread(struct vop_read_args *ap)
+{
+
+ if (ap->a_vp->v_type != VDIR)
+ return (EINVAL);
+ return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
+}
+
+static int
+devfs_setattr(struct vop_setattr_args *ap)
+{
+ struct devfs_dirent *de;
+ struct vattr *vap;
+ struct vnode *vp;
+ struct thread *td;
+ int c, error;
+ uid_t uid;
+ gid_t gid;
+
+ vap = ap->a_vap;
+ vp = ap->a_vp;
+ td = curthread;
+ if ((vap->va_type != VNON) ||
+ (vap->va_nlink != VNOVAL) ||
+ (vap->va_fsid != VNOVAL) ||
+ (vap->va_fileid != VNOVAL) ||
+ (vap->va_blocksize != VNOVAL) ||
+ (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
+ (vap->va_rdev != VNOVAL) ||
+ ((int)vap->va_bytes != VNOVAL) ||
+ (vap->va_gen != VNOVAL)) {
+ return (EINVAL);
+ }
+
+ error = devfs_populate_vp(vp);
+ if (error != 0)
+ return (error);
+
+ de = vp->v_data;
+ if (vp->v_type == VDIR)
+ de = de->de_dir;
+
+ c = 0;
+ if (vap->va_uid == (uid_t)VNOVAL)
+ uid = de->de_uid;
+ else
+ uid = vap->va_uid;
+ if (vap->va_gid == (gid_t)VNOVAL)
+ gid = de->de_gid;
+ else
+ gid = vap->va_gid;
+ if (uid != de->de_uid || gid != de->de_gid) {
+ if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
+ (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
+ error = priv_check(td, PRIV_VFS_CHOWN);
+ if (error != 0)
+ goto ret;
+ }
+ de->de_uid = uid;
+ de->de_gid = gid;
+ c = 1;
+ }
+
+ if (vap->va_mode != (mode_t)VNOVAL) {
+ if (ap->a_cred->cr_uid != de->de_uid) {
+ error = priv_check(td, PRIV_VFS_ADMIN);
+ if (error != 0)
+ goto ret;
+ }
+ de->de_mode = vap->va_mode;
+ c = 1;
+ }
+
+ if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
+ error = vn_utimes_perm(vp, vap, ap->a_cred, td);
+ if (error != 0)
+ goto ret;
+ if (vap->va_atime.tv_sec != VNOVAL) {
+ if (vp->v_type == VCHR)
+ vp->v_rdev->si_atime = vap->va_atime;
+ else
+ de->de_atime = vap->va_atime;
+ }
+ if (vap->va_mtime.tv_sec != VNOVAL) {
+ if (vp->v_type == VCHR)
+ vp->v_rdev->si_mtime = vap->va_mtime;
+ else
+ de->de_mtime = vap->va_mtime;
+ }
+ c = 1;
+ }
+
+ if (c) {
+ if (vp->v_type == VCHR)
+ vfs_timestamp(&vp->v_rdev->si_ctime);
+ else
+ vfs_timestamp(&de->de_mtime);
+ }
+
+ret:
+ sx_xunlock(&VFSTODEVFS(vp->v_mount)->dm_lock);
+ return (error);
+}
+
+#ifdef MAC
+static int
+devfs_setlabel(struct vop_setlabel_args *ap)
+{
+ struct vnode *vp;
+ struct devfs_dirent *de;
+
+ vp = ap->a_vp;
+ de = vp->v_data;
+
+ mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
+ mac_devfs_update(vp->v_mount, de, vp);
+
+ return (0);
+}
+#endif
+
+static int
+devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
+{
+
+ return (vnops.fo_stat(fp, sb, cred, td));
+}
+
+static int
+devfs_symlink(struct vop_symlink_args *ap)
+{
+ int i, error;
+ struct devfs_dirent *dd;
+ struct devfs_dirent *de, *de_covered, *de_dotdot;
+ struct devfs_mount *dmp;
+
+ error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
+ if (error)
+ return(error);
+ dmp = VFSTODEVFS(ap->a_dvp->v_mount);
+ if (devfs_populate_vp(ap->a_dvp) != 0)
+ return (ENOENT);
+
+ dd = ap->a_dvp->v_data;
+ de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
+ de->de_flags = DE_USER;
+ de->de_uid = 0;
+ de->de_gid = 0;
+ de->de_mode = 0755;
+ de->de_inode = alloc_unr(devfs_inos);
+ de->de_dir = dd;
+ de->de_dirent->d_type = DT_LNK;
+ i = strlen(ap->a_target) + 1;
+ de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
+ bcopy(ap->a_target, de->de_symlink, i);
+#ifdef MAC
+ mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
+#endif
+ de_covered = devfs_find(dd, de->de_dirent->d_name,
+ de->de_dirent->d_namlen, 0);
+ if (de_covered != NULL) {
+ if ((de_covered->de_flags & DE_USER) != 0) {
+ devfs_delete(dmp, de, DEVFS_DEL_NORECURSE);
+ sx_xunlock(&dmp->dm_lock);
+ return (EEXIST);
+ }
+ KASSERT((de_covered->de_flags & DE_COVERED) == 0,
+ ("devfs_symlink: entry %p already covered", de_covered));
+ de_covered->de_flags |= DE_COVERED;
+ }
+
+ de_dotdot = TAILQ_FIRST(&dd->de_dlist); /* "." */
+ de_dotdot = TAILQ_NEXT(de_dotdot, de_list); /* ".." */
+ TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list);
+ devfs_dir_ref_de(dmp, dd);
+ devfs_rules_apply(dmp, de);
+
+ return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
+}
+
+static int
+devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
+{
+
+ return (vnops.fo_truncate(fp, length, cred, td));
+}
+
+static int
+devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred,
+ int flags, struct thread *td)
+{
+ struct cdev *dev;
+ int error, ioflag, ref;
+ ssize_t resid;
+ struct cdevsw *dsw;
+ struct file *fpop;
+
+ if (uio->uio_resid > DEVFS_IOSIZE_MAX)
+ return (EINVAL);
+ fpop = td->td_fpop;
+ error = devfs_fp_check(fp, &dev, &dsw, &ref);
+ if (error != 0) {
+ error = vnops.fo_write(fp, uio, cred, flags, td);
+ return (error);
+ }
+ KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
+ ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
+ if (ioflag & O_DIRECT)
+ ioflag |= IO_DIRECT;
+ foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
+
+ resid = uio->uio_resid;
+
+ error = dsw->d_write(dev, uio, ioflag);
+ if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
+ devfs_timestamp(&dev->si_ctime);
+ dev->si_mtime = dev->si_ctime;
+ }
+ td->td_fpop = fpop;
+ dev_relthread(dev, ref);
+
+ foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
+ return (error);
+}
+
+static int
+devfs_mmap_f(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
+ vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
+ struct thread *td)
+{
+ struct cdev *dev;
+ struct cdevsw *dsw;
+ struct mount *mp;
+ struct vnode *vp;
+ struct file *fpop;
+ vm_object_t object;
+ vm_prot_t maxprot;
+ int error, ref;
+
+ vp = fp->f_vnode;
+
+ /*
+ * Ensure that file and memory protections are
+ * compatible.
+ */
+ mp = vp->v_mount;
+ if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
+ maxprot = VM_PROT_NONE;
+ if ((prot & VM_PROT_EXECUTE) != 0)
+ return (EACCES);
+ } else
+ maxprot = VM_PROT_EXECUTE;
+ if ((fp->f_flag & FREAD) != 0)
+ maxprot |= VM_PROT_READ;
+ else if ((prot & VM_PROT_READ) != 0)
+ return (EACCES);
+
+ /*
+ * If we are sharing potential changes via MAP_SHARED and we
+ * are trying to get write permission although we opened it
+ * without asking for it, bail out.
+ *
+ * Note that most character devices always share mappings.
+ * The one exception is that D_MMAP_ANON devices
+ * (i.e. /dev/zero) permit private writable mappings.
+ *
+ * Rely on vm_mmap_cdev() to fail invalid MAP_PRIVATE requests
+ * as well as updating maxprot to permit writing for
+ * D_MMAP_ANON devices rather than doing that here.
+ */
+ if ((flags & MAP_SHARED) != 0) {
+ if ((fp->f_flag & FWRITE) != 0)
+ maxprot |= VM_PROT_WRITE;
+ else if ((prot & VM_PROT_WRITE) != 0)
+ return (EACCES);
+ }
+ maxprot &= cap_maxprot;
+
+ fpop = td->td_fpop;
+ error = devfs_fp_check(fp, &dev, &dsw, &ref);
+ if (error != 0)
+ return (error);
+
+ error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, dev, dsw, &foff,
+ &object);
+ td->td_fpop = fpop;
+ dev_relthread(dev, ref);
+ if (error != 0)
+ return (error);
+
+ error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
+ foff, FALSE, td);
+ if (error != 0)
+ vm_object_deallocate(object);
+ return (error);
+}
+
+dev_t
+dev2udev(struct cdev *x)
+{
+ if (x == NULL)
+ return (NODEV);
+ return (cdev2priv(x)->cdp_inode);
+}
+
+static struct fileops devfs_ops_f = {
+ .fo_read = devfs_read_f,
+ .fo_write = devfs_write_f,
+ .fo_truncate = devfs_truncate_f,
+ .fo_ioctl = devfs_ioctl_f,
+ .fo_poll = devfs_poll_f,
+ .fo_kqfilter = devfs_kqfilter_f,
+ .fo_stat = devfs_stat_f,
+ .fo_close = devfs_close_f,
+ .fo_chmod = vn_chmod,
+ .fo_chown = vn_chown,
+ .fo_sendfile = vn_sendfile,
+ .fo_seek = vn_seek,
+ .fo_fill_kinfo = vn_fill_kinfo,
+ .fo_mmap = devfs_mmap_f,
+ .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
+};
+
+/* Vops for non-CHR vnodes in /dev. */
+static struct vop_vector devfs_vnodeops = {
+ .vop_default = &default_vnodeops,
+
+ .vop_access = devfs_access,
+ .vop_getattr = devfs_getattr,
+ .vop_ioctl = devfs_rioctl,
+ .vop_lookup = devfs_lookup,
+ .vop_mknod = devfs_mknod,
+ .vop_pathconf = devfs_pathconf,
+ .vop_read = devfs_rread,
+ .vop_readdir = devfs_readdir,
+ .vop_readlink = devfs_readlink,
+ .vop_reclaim = devfs_reclaim,
+ .vop_remove = devfs_remove,
+ .vop_revoke = devfs_revoke,
+ .vop_setattr = devfs_setattr,
+#ifdef MAC
+ .vop_setlabel = devfs_setlabel,
+#endif
+ .vop_symlink = devfs_symlink,
+ .vop_vptocnp = devfs_vptocnp,
+};
+
+/* Vops for VCHR vnodes in /dev. */
+static struct vop_vector devfs_specops = {
+ .vop_default = &default_vnodeops,
+
+ .vop_access = devfs_access,
+ .vop_bmap = VOP_PANIC,
+ .vop_close = devfs_close,
+ .vop_create = VOP_PANIC,
+ .vop_fsync = vop_stdfsync,
+ .vop_getattr = devfs_getattr,
+ .vop_ioctl = devfs_ioctl,
+ .vop_link = VOP_PANIC,
+ .vop_mkdir = VOP_PANIC,
+ .vop_mknod = VOP_PANIC,
+ .vop_open = devfs_open,
+ .vop_pathconf = devfs_pathconf,
+ .vop_poll = dead_poll,
+ .vop_print = devfs_print,
+ .vop_read = dead_read,
+ .vop_readdir = VOP_PANIC,
+ .vop_readlink = VOP_PANIC,
+ .vop_reallocblks = VOP_PANIC,
+ .vop_reclaim = devfs_reclaim_vchr,
+ .vop_remove = devfs_remove,
+ .vop_rename = VOP_PANIC,
+ .vop_revoke = devfs_revoke,
+ .vop_rmdir = VOP_PANIC,
+ .vop_setattr = devfs_setattr,
+#ifdef MAC
+ .vop_setlabel = devfs_setlabel,
+#endif
+ .vop_strategy = VOP_PANIC,
+ .vop_symlink = VOP_PANIC,
+ .vop_vptocnp = devfs_vptocnp,
+ .vop_write = dead_write,
+};
+
+/*
+ * Our calling convention to the device drivers used to be that we passed
+ * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
+ * flags instead since that's what open(), close() and ioctl() takes and
+ * we don't really want vnode.h in device drivers.
+ * We solved the source compatibility by redefining some vnode flags to
+ * be the same as the fcntl ones and by sending down the bitwise OR of
+ * the respective fcntl/vnode flags. These CTASSERTS make sure nobody
+ * pulls the rug out under this.
+ */
+CTASSERT(O_NONBLOCK == IO_NDELAY);
+CTASSERT(O_FSYNC == IO_SYNC);
diff --git a/freebsd/sys/kern/tty.c b/freebsd/sys/kern/tty.c
new file mode 100644
index 00000000..95afaebc
--- /dev/null
+++ b/freebsd/sys/kern/tty.c
@@ -0,0 +1,2349 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Portions of this software were developed under sponsorship from Snow
+ * B.V., the Netherlands.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_capsicum.h>
+#include <rtems/bsd/local/opt_compat.h>
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/capsicum.h>
+#include <sys/conf.h>
+#include <sys/cons.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/filio.h>
+#ifdef COMPAT_43TTY
+#include <sys/ioctl_compat.h>
+#endif /* COMPAT_43TTY */
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/serial.h>
+#include <sys/signal.h>
+#include <sys/stat.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/tty.h>
+#include <sys/ttycom.h>
+#define TTYDEFCHARS
+#include <sys/ttydefaults.h>
+#undef TTYDEFCHARS
+#include <sys/ucred.h>
+#include <sys/vnode.h>
+
+#include <machine/stdarg.h>
+
+static MALLOC_DEFINE(M_TTY, "tty", "tty device");
+
+static void tty_rel_free(struct tty *tp);
+
+static TAILQ_HEAD(, tty) tty_list = TAILQ_HEAD_INITIALIZER(tty_list);
+static struct sx tty_list_sx;
+SX_SYSINIT(tty_list, &tty_list_sx, "tty list");
+static unsigned int tty_list_count = 0;
+
+/* Character device of /dev/console. */
+static struct cdev *dev_console;
+static const char *dev_console_filename;
+
+/*
+ * Flags that are supported and stored by this implementation.
+ */
+#define TTYSUP_IFLAG (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK|ISTRIP|\
+ INLCR|IGNCR|ICRNL|IXON|IXOFF|IXANY|IMAXBEL)
+#define TTYSUP_OFLAG (OPOST|ONLCR|TAB3|ONOEOT|OCRNL|ONOCR|ONLRET)
+#define TTYSUP_LFLAG (ECHOKE|ECHOE|ECHOK|ECHO|ECHONL|ECHOPRT|\
+ ECHOCTL|ISIG|ICANON|ALTWERASE|IEXTEN|TOSTOP|\
+ FLUSHO|NOKERNINFO|NOFLSH)
+#define TTYSUP_CFLAG (CIGNORE|CSIZE|CSTOPB|CREAD|PARENB|PARODD|\
+ HUPCL|CLOCAL|CCTS_OFLOW|CRTS_IFLOW|CDTR_IFLOW|\
+ CDSR_OFLOW|CCAR_OFLOW)
+
+#define TTY_CALLOUT(tp,d) (dev2unit(d) & TTYUNIT_CALLOUT)
+
+static int tty_drainwait = 5 * 60;
+SYSCTL_INT(_kern, OID_AUTO, tty_drainwait, CTLFLAG_RWTUN,
+ &tty_drainwait, 0, "Default output drain timeout in seconds");
+
+/*
+ * Set TTY buffer sizes.
+ */
+
+#define TTYBUF_MAX 65536
+
+/*
+ * Allocate buffer space if necessary, and set low watermarks, based on speed.
+ * Note that the ttyxxxq_setsize() functions may drop and then reacquire the tty
+ * lock during memory allocation. They will return ENXIO if the tty disappears
+ * while unlocked.
+ */
+static int
+tty_watermarks(struct tty *tp)
+{
+ size_t bs = 0;
+ int error;
+
+ /* Provide an input buffer for 2 seconds of data. */
+ if (tp->t_termios.c_cflag & CREAD)
+ bs = MIN(tp->t_termios.c_ispeed / 5, TTYBUF_MAX);
+ error = ttyinq_setsize(&tp->t_inq, tp, bs);
+ if (error != 0)
+ return (error);
+
+ /* Set low watermark at 10% (when 90% is available). */
+ tp->t_inlow = (ttyinq_getallocatedsize(&tp->t_inq) * 9) / 10;
+
+ /* Provide an output buffer for 2 seconds of data. */
+ bs = MIN(tp->t_termios.c_ospeed / 5, TTYBUF_MAX);
+ error = ttyoutq_setsize(&tp->t_outq, tp, bs);
+ if (error != 0)
+ return (error);
+
+ /* Set low watermark at 10% (when 90% is available). */
+ tp->t_outlow = (ttyoutq_getallocatedsize(&tp->t_outq) * 9) / 10;
+
+ return (0);
+}
+
+static int
+tty_drain(struct tty *tp, int leaving)
+{
+ sbintime_t timeout_at;
+ size_t bytes;
+ int error;
+
+ if (ttyhook_hashook(tp, getc_inject))
+ /* buffer is inaccessible */
+ return (0);
+
+ /*
+ * For close(), use the recent historic timeout of "1 second without
+ * making progress". For tcdrain(), use t_drainwait as the timeout,
+ * with zero meaning "no timeout" which gives POSIX behavior.
+ */
+ if (leaving)
+ timeout_at = getsbinuptime() + SBT_1S;
+ else if (tp->t_drainwait != 0)
+ timeout_at = getsbinuptime() + SBT_1S * tp->t_drainwait;
+ else
+ timeout_at = 0;
+
+ /*
+ * Poll the output buffer and the hardware for completion, at 10 Hz.
+ * Polling is required for devices which are not able to signal an
+ * interrupt when the transmitter becomes idle (most USB serial devs).
+ * The unusual structure of this loop ensures we check for busy one more
+ * time after tty_timedwait() returns EWOULDBLOCK, so that success has
+ * higher priority than timeout if the IO completed in the last 100mS.
+ */
+ error = 0;
+ bytes = ttyoutq_bytesused(&tp->t_outq);
+ for (;;) {
+ if (ttyoutq_bytesused(&tp->t_outq) == 0 && !ttydevsw_busy(tp))
+ return (0);
+ if (error != 0)
+ return (error);
+ ttydevsw_outwakeup(tp);
+ error = tty_timedwait(tp, &tp->t_outwait, hz / 10);
+ if (error != 0 && error != EWOULDBLOCK)
+ return (error);
+ else if (timeout_at == 0 || getsbinuptime() < timeout_at)
+ error = 0;
+ else if (leaving && ttyoutq_bytesused(&tp->t_outq) < bytes) {
+ /* In close, making progress, grant an extra second. */
+ error = 0;
+ timeout_at += SBT_1S;
+ bytes = ttyoutq_bytesused(&tp->t_outq);
+ }
+ }
+}
+
+/*
+ * Though ttydev_enter() and ttydev_leave() seem to be related, they
+ * don't have to be used together. ttydev_enter() is used by the cdev
+ * operations to prevent an actual operation from being processed when
+ * the TTY has been abandoned. ttydev_leave() is used by ttydev_open()
+ * and ttydev_close() to determine whether per-TTY data should be
+ * deallocated.
+ */
+
+static __inline int
+ttydev_enter(struct tty *tp)
+{
+
+ tty_lock(tp);
+
+ if (tty_gone(tp) || !tty_opened(tp)) {
+ /* Device is already gone. */
+ tty_unlock(tp);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static void
+ttydev_leave(struct tty *tp)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tty_opened(tp) || tp->t_flags & TF_OPENCLOSE) {
+ /* Device is still opened somewhere. */
+ tty_unlock(tp);
+ return;
+ }
+
+ tp->t_flags |= TF_OPENCLOSE;
+
+ /* Stop asynchronous I/O. */
+ funsetown(&tp->t_sigio);
+
+ /* Remove console TTY. */
+ if (constty == tp)
+ constty_clear();
+
+ /* Drain any output. */
+ if (!tty_gone(tp))
+ tty_drain(tp, 1);
+
+ ttydisc_close(tp);
+
+ /* Free i/o queues now since they might be large. */
+ ttyinq_free(&tp->t_inq);
+ tp->t_inlow = 0;
+ ttyoutq_free(&tp->t_outq);
+ tp->t_outlow = 0;
+
+ knlist_clear(&tp->t_inpoll.si_note, 1);
+ knlist_clear(&tp->t_outpoll.si_note, 1);
+
+ if (!tty_gone(tp))
+ ttydevsw_close(tp);
+
+ tp->t_flags &= ~TF_OPENCLOSE;
+ cv_broadcast(&tp->t_dcdwait);
+ tty_rel_free(tp);
+}
+
+/*
+ * Operations that are exposed through the character device in /dev.
+ */
+static int
+ttydev_open(struct cdev *dev, int oflags, int devtype __unused,
+ struct thread *td)
+{
+ struct tty *tp;
+ int error;
+
+ tp = dev->si_drv1;
+ error = 0;
+ tty_lock(tp);
+ if (tty_gone(tp)) {
+ /* Device is already gone. */
+ tty_unlock(tp);
+ return (ENXIO);
+ }
+
+ /*
+ * Block when other processes are currently opening or closing
+ * the TTY.
+ */
+ while (tp->t_flags & TF_OPENCLOSE) {
+ error = tty_wait(tp, &tp->t_dcdwait);
+ if (error != 0) {
+ tty_unlock(tp);
+ return (error);
+ }
+ }
+ tp->t_flags |= TF_OPENCLOSE;
+
+ /*
+ * Make sure the "tty" and "cua" device cannot be opened at the
+ * same time. The console is a "tty" device.
+ */
+ if (TTY_CALLOUT(tp, dev)) {
+ if (tp->t_flags & (TF_OPENED_CONS | TF_OPENED_IN)) {
+ error = EBUSY;
+ goto done;
+ }
+ } else {
+ if (tp->t_flags & TF_OPENED_OUT) {
+ error = EBUSY;
+ goto done;
+ }
+ }
+
+ if (tp->t_flags & TF_EXCLUDE && priv_check(td, PRIV_TTY_EXCLUSIVE)) {
+ error = EBUSY;
+ goto done;
+ }
+
+ if (!tty_opened(tp)) {
+ /* Set proper termios flags. */
+ if (TTY_CALLOUT(tp, dev))
+ tp->t_termios = tp->t_termios_init_out;
+ else
+ tp->t_termios = tp->t_termios_init_in;
+ ttydevsw_param(tp, &tp->t_termios);
+ /* Prevent modem control on callout devices and /dev/console. */
+ if (TTY_CALLOUT(tp, dev) || dev == dev_console)
+ tp->t_termios.c_cflag |= CLOCAL;
+
+ ttydevsw_modem(tp, SER_DTR|SER_RTS, 0);
+
+ error = ttydevsw_open(tp);
+ if (error != 0)
+ goto done;
+
+ ttydisc_open(tp);
+ error = tty_watermarks(tp);
+ if (error != 0)
+ goto done;
+ }
+
+ /* Wait for Carrier Detect. */
+ if ((oflags & O_NONBLOCK) == 0 &&
+ (tp->t_termios.c_cflag & CLOCAL) == 0) {
+ while ((ttydevsw_modem(tp, 0, 0) & SER_DCD) == 0) {
+ error = tty_wait(tp, &tp->t_dcdwait);
+ if (error != 0)
+ goto done;
+ }
+ }
+
+ if (dev == dev_console)
+ tp->t_flags |= TF_OPENED_CONS;
+ else if (TTY_CALLOUT(tp, dev))
+ tp->t_flags |= TF_OPENED_OUT;
+ else
+ tp->t_flags |= TF_OPENED_IN;
+ MPASS((tp->t_flags & (TF_OPENED_CONS | TF_OPENED_IN)) == 0 ||
+ (tp->t_flags & TF_OPENED_OUT) == 0);
+
+done: tp->t_flags &= ~TF_OPENCLOSE;
+ cv_broadcast(&tp->t_dcdwait);
+ ttydev_leave(tp);
+
+ return (error);
+}
+
+static int
+ttydev_close(struct cdev *dev, int fflag, int devtype __unused,
+ struct thread *td __unused)
+{
+ struct tty *tp = dev->si_drv1;
+
+ tty_lock(tp);
+
+ /*
+ * Don't actually close the device if it is being used as the
+ * console.
+ */
+ MPASS((tp->t_flags & (TF_OPENED_CONS | TF_OPENED_IN)) == 0 ||
+ (tp->t_flags & TF_OPENED_OUT) == 0);
+ if (dev == dev_console)
+ tp->t_flags &= ~TF_OPENED_CONS;
+ else
+ tp->t_flags &= ~(TF_OPENED_IN|TF_OPENED_OUT);
+
+ if (tp->t_flags & TF_OPENED) {
+ tty_unlock(tp);
+ return (0);
+ }
+
+ /* If revoking, flush output now to avoid draining it later. */
+ if (fflag & FREVOKE)
+ tty_flush(tp, FWRITE);
+
+ tp->t_flags &= ~TF_EXCLUDE;
+
+ /* Properly wake up threads that are stuck - revoke(). */
+ tp->t_revokecnt++;
+ tty_wakeup(tp, FREAD|FWRITE);
+ cv_broadcast(&tp->t_bgwait);
+ cv_broadcast(&tp->t_dcdwait);
+
+ ttydev_leave(tp);
+
+ return (0);
+}
+
+static __inline int
+tty_is_ctty(struct tty *tp, struct proc *p)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ return (p->p_session == tp->t_session && p->p_flag & P_CONTROLT);
+}
+
+int
+tty_wait_background(struct tty *tp, struct thread *td, int sig)
+{
+ struct proc *p = td->td_proc;
+ struct pgrp *pg;
+ ksiginfo_t ksi;
+ int error;
+
+ MPASS(sig == SIGTTIN || sig == SIGTTOU);
+ tty_lock_assert(tp, MA_OWNED);
+
+ for (;;) {
+ PROC_LOCK(p);
+ /*
+ * The process should only sleep, when:
+ * - This terminal is the controlling terminal
+ * - Its process group is not the foreground process
+ * group
+ * - The parent process isn't waiting for the child to
+ * exit
+ * - the signal to send to the process isn't masked
+ */
+ if (!tty_is_ctty(tp, p) || p->p_pgrp == tp->t_pgrp) {
+ /* Allow the action to happen. */
+ PROC_UNLOCK(p);
+ return (0);
+ }
+
+ if (SIGISMEMBER(p->p_sigacts->ps_sigignore, sig) ||
+ SIGISMEMBER(td->td_sigmask, sig)) {
+ /* Only allow them in write()/ioctl(). */
+ PROC_UNLOCK(p);
+ return (sig == SIGTTOU ? 0 : EIO);
+ }
+
+ pg = p->p_pgrp;
+ if (p->p_flag & P_PPWAIT || pg->pg_jobc == 0) {
+ /* Don't allow the action to happen. */
+ PROC_UNLOCK(p);
+ return (EIO);
+ }
+ PROC_UNLOCK(p);
+
+ /*
+ * Send the signal and sleep until we're the new
+ * foreground process group.
+ */
+ if (sig != 0) {
+ ksiginfo_init(&ksi);
+ ksi.ksi_code = SI_KERNEL;
+ ksi.ksi_signo = sig;
+ sig = 0;
+ }
+ PGRP_LOCK(pg);
+ pgsignal(pg, ksi.ksi_signo, 1, &ksi);
+ PGRP_UNLOCK(pg);
+
+ error = tty_wait(tp, &tp->t_bgwait);
+ if (error)
+ return (error);
+ }
+}
+
+static int
+ttydev_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct tty *tp = dev->si_drv1;
+ int error;
+
+ error = ttydev_enter(tp);
+ if (error)
+ goto done;
+ error = ttydisc_read(tp, uio, ioflag);
+ tty_unlock(tp);
+
+ /*
+ * The read() call should not throw an error when the device is
+ * being destroyed. Silently convert it to an EOF.
+ */
+done: if (error == ENXIO)
+ error = 0;
+ return (error);
+}
+
+static int
+ttydev_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct tty *tp = dev->si_drv1;
+ int error;
+
+ error = ttydev_enter(tp);
+ if (error)
+ return (error);
+
+ if (tp->t_termios.c_lflag & TOSTOP) {
+ error = tty_wait_background(tp, curthread, SIGTTOU);
+ if (error)
+ goto done;
+ }
+
+ if (ioflag & IO_NDELAY && tp->t_flags & TF_BUSY_OUT) {
+ /* Allow non-blocking writes to bypass serialization. */
+ error = ttydisc_write(tp, uio, ioflag);
+ } else {
+ /* Serialize write() calls. */
+ while (tp->t_flags & TF_BUSY_OUT) {
+ error = tty_wait(tp, &tp->t_outserwait);
+ if (error)
+ goto done;
+ }
+
+ tp->t_flags |= TF_BUSY_OUT;
+ error = ttydisc_write(tp, uio, ioflag);
+ tp->t_flags &= ~TF_BUSY_OUT;
+ cv_signal(&tp->t_outserwait);
+ }
+
+done: tty_unlock(tp);
+ return (error);
+}
+
+static int
+ttydev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ struct tty *tp = dev->si_drv1;
+ int error;
+
+ error = ttydev_enter(tp);
+ if (error)
+ return (error);
+
+ switch (cmd) {
+ case TIOCCBRK:
+ case TIOCCONS:
+ case TIOCDRAIN:
+ case TIOCEXCL:
+ case TIOCFLUSH:
+ case TIOCNXCL:
+ case TIOCSBRK:
+ case TIOCSCTTY:
+ case TIOCSETA:
+ case TIOCSETAF:
+ case TIOCSETAW:
+ case TIOCSPGRP:
+ case TIOCSTART:
+ case TIOCSTAT:
+ case TIOCSTI:
+ case TIOCSTOP:
+ case TIOCSWINSZ:
+#if 0
+ case TIOCSDRAINWAIT:
+ case TIOCSETD:
+#endif
+#ifdef COMPAT_43TTY
+ case TIOCLBIC:
+ case TIOCLBIS:
+ case TIOCLSET:
+ case TIOCSETC:
+ case OTIOCSETD:
+ case TIOCSETN:
+ case TIOCSETP:
+ case TIOCSLTC:
+#endif /* COMPAT_43TTY */
+ /*
+ * If the ioctl() causes the TTY to be modified, let it
+ * wait in the background.
+ */
+ error = tty_wait_background(tp, curthread, SIGTTOU);
+ if (error)
+ goto done;
+ }
+
+ if (cmd == TIOCSETA || cmd == TIOCSETAW || cmd == TIOCSETAF) {
+ struct termios *old = &tp->t_termios;
+ struct termios *new = (struct termios *)data;
+ struct termios *lock = TTY_CALLOUT(tp, dev) ?
+ &tp->t_termios_lock_out : &tp->t_termios_lock_in;
+ int cc;
+
+ /*
+ * Lock state devices. Just overwrite the values of the
+ * commands that are currently in use.
+ */
+ new->c_iflag = (old->c_iflag & lock->c_iflag) |
+ (new->c_iflag & ~lock->c_iflag);
+ new->c_oflag = (old->c_oflag & lock->c_oflag) |
+ (new->c_oflag & ~lock->c_oflag);
+ new->c_cflag = (old->c_cflag & lock->c_cflag) |
+ (new->c_cflag & ~lock->c_cflag);
+ new->c_lflag = (old->c_lflag & lock->c_lflag) |
+ (new->c_lflag & ~lock->c_lflag);
+ for (cc = 0; cc < NCCS; ++cc)
+ if (lock->c_cc[cc])
+ new->c_cc[cc] = old->c_cc[cc];
+ if (lock->c_ispeed)
+ new->c_ispeed = old->c_ispeed;
+ if (lock->c_ospeed)
+ new->c_ospeed = old->c_ospeed;
+ }
+
+ error = tty_ioctl(tp, cmd, data, fflag, td);
+done: tty_unlock(tp);
+
+ return (error);
+}
+
+static int
+ttydev_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct tty *tp = dev->si_drv1;
+ int error, revents = 0;
+
+ error = ttydev_enter(tp);
+ if (error)
+ return ((events & (POLLIN|POLLRDNORM)) | POLLHUP);
+
+ if (events & (POLLIN|POLLRDNORM)) {
+ /* See if we can read something. */
+ if (ttydisc_read_poll(tp) > 0)
+ revents |= events & (POLLIN|POLLRDNORM);
+ }
+
+ if (tp->t_flags & TF_ZOMBIE) {
+ /* Hangup flag on zombie state. */
+ revents |= POLLHUP;
+ } else if (events & (POLLOUT|POLLWRNORM)) {
+ /* See if we can write something. */
+ if (ttydisc_write_poll(tp) > 0)
+ revents |= events & (POLLOUT|POLLWRNORM);
+ }
+
+ if (revents == 0) {
+ if (events & (POLLIN|POLLRDNORM))
+ selrecord(td, &tp->t_inpoll);
+ if (events & (POLLOUT|POLLWRNORM))
+ selrecord(td, &tp->t_outpoll);
+ }
+
+ tty_unlock(tp);
+
+ return (revents);
+}
+
+static int
+ttydev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int nprot, vm_memattr_t *memattr)
+{
+ struct tty *tp = dev->si_drv1;
+ int error;
+
+ /* Handle mmap() through the driver. */
+
+ error = ttydev_enter(tp);
+ if (error)
+ return (-1);
+ error = ttydevsw_mmap(tp, offset, paddr, nprot, memattr);
+ tty_unlock(tp);
+
+ return (error);
+}
+
+/*
+ * kqueue support.
+ */
+
+static void
+tty_kqops_read_detach(struct knote *kn)
+{
+ struct tty *tp = kn->kn_hook;
+
+ knlist_remove(&tp->t_inpoll.si_note, kn, 0);
+}
+
+static int
+tty_kqops_read_event(struct knote *kn, long hint __unused)
+{
+ struct tty *tp = kn->kn_hook;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tty_gone(tp) || tp->t_flags & TF_ZOMBIE) {
+ kn->kn_flags |= EV_EOF;
+ return (1);
+ } else {
+ kn->kn_data = ttydisc_read_poll(tp);
+ return (kn->kn_data > 0);
+ }
+}
+
+static void
+tty_kqops_write_detach(struct knote *kn)
+{
+ struct tty *tp = kn->kn_hook;
+
+ knlist_remove(&tp->t_outpoll.si_note, kn, 0);
+}
+
+static int
+tty_kqops_write_event(struct knote *kn, long hint __unused)
+{
+ struct tty *tp = kn->kn_hook;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tty_gone(tp)) {
+ kn->kn_flags |= EV_EOF;
+ return (1);
+ } else {
+ kn->kn_data = ttydisc_write_poll(tp);
+ return (kn->kn_data > 0);
+ }
+}
+
+static struct filterops tty_kqops_read = {
+ .f_isfd = 1,
+ .f_detach = tty_kqops_read_detach,
+ .f_event = tty_kqops_read_event,
+};
+
+static struct filterops tty_kqops_write = {
+ .f_isfd = 1,
+ .f_detach = tty_kqops_write_detach,
+ .f_event = tty_kqops_write_event,
+};
+
+static int
+ttydev_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct tty *tp = dev->si_drv1;
+ int error;
+
+ error = ttydev_enter(tp);
+ if (error)
+ return (error);
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ kn->kn_hook = tp;
+ kn->kn_fop = &tty_kqops_read;
+ knlist_add(&tp->t_inpoll.si_note, kn, 1);
+ break;
+ case EVFILT_WRITE:
+ kn->kn_hook = tp;
+ kn->kn_fop = &tty_kqops_write;
+ knlist_add(&tp->t_outpoll.si_note, kn, 1);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ tty_unlock(tp);
+ return (error);
+}
+
+static struct cdevsw ttydev_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = ttydev_open,
+ .d_close = ttydev_close,
+ .d_read = ttydev_read,
+ .d_write = ttydev_write,
+ .d_ioctl = ttydev_ioctl,
+ .d_kqfilter = ttydev_kqfilter,
+ .d_poll = ttydev_poll,
+ .d_mmap = ttydev_mmap,
+ .d_name = "ttydev",
+ .d_flags = D_TTY,
+};
+
+/*
+ * Init/lock-state devices
+ */
+
+static int
+ttyil_open(struct cdev *dev, int oflags __unused, int devtype __unused,
+ struct thread *td)
+{
+ struct tty *tp;
+ int error;
+
+ tp = dev->si_drv1;
+ error = 0;
+ tty_lock(tp);
+ if (tty_gone(tp))
+ error = ENODEV;
+ tty_unlock(tp);
+
+ return (error);
+}
+
+static int
+ttyil_close(struct cdev *dev __unused, int flag __unused, int mode __unused,
+ struct thread *td __unused)
+{
+
+ return (0);
+}
+
+static int
+ttyil_rdwr(struct cdev *dev __unused, struct uio *uio __unused,
+ int ioflag __unused)
+{
+
+ return (ENODEV);
+}
+
+static int
+ttyil_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ struct tty *tp = dev->si_drv1;
+ int error;
+
+ tty_lock(tp);
+ if (tty_gone(tp)) {
+ error = ENODEV;
+ goto done;
+ }
+
+ error = ttydevsw_cioctl(tp, dev2unit(dev), cmd, data, td);
+ if (error != ENOIOCTL)
+ goto done;
+ error = 0;
+
+ switch (cmd) {
+ case TIOCGETA:
+ /* Obtain terminal flags through tcgetattr(). */
+ *(struct termios*)data = *(struct termios*)dev->si_drv2;
+ break;
+ case TIOCSETA:
+ /* Set terminal flags through tcsetattr(). */
+ error = priv_check(td, PRIV_TTY_SETA);
+ if (error)
+ break;
+ *(struct termios*)dev->si_drv2 = *(struct termios*)data;
+ break;
+ case TIOCGETD:
+ *(int *)data = TTYDISC;
+ break;
+ case TIOCGWINSZ:
+ bzero(data, sizeof(struct winsize));
+ break;
+ default:
+ error = ENOTTY;
+ }
+
+done: tty_unlock(tp);
+ return (error);
+}
+
+static struct cdevsw ttyil_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = ttyil_open,
+ .d_close = ttyil_close,
+ .d_read = ttyil_rdwr,
+ .d_write = ttyil_rdwr,
+ .d_ioctl = ttyil_ioctl,
+ .d_name = "ttyil",
+ .d_flags = D_TTY,
+};
+
+static void
+tty_init_termios(struct tty *tp)
+{
+ struct termios *t = &tp->t_termios_init_in;
+
+ t->c_cflag = TTYDEF_CFLAG;
+ t->c_iflag = TTYDEF_IFLAG;
+ t->c_lflag = TTYDEF_LFLAG;
+ t->c_oflag = TTYDEF_OFLAG;
+ t->c_ispeed = TTYDEF_SPEED;
+ t->c_ospeed = TTYDEF_SPEED;
+ memcpy(&t->c_cc, ttydefchars, sizeof ttydefchars);
+
+ tp->t_termios_init_out = *t;
+}
+
+void
+tty_init_console(struct tty *tp, speed_t s)
+{
+ struct termios *ti = &tp->t_termios_init_in;
+ struct termios *to = &tp->t_termios_init_out;
+
+ if (s != 0) {
+ ti->c_ispeed = ti->c_ospeed = s;
+ to->c_ispeed = to->c_ospeed = s;
+ }
+
+ ti->c_cflag |= CLOCAL;
+ to->c_cflag |= CLOCAL;
+}
+
+/*
+ * Standard device routine implementations, mostly meant for
+ * pseudo-terminal device drivers. When a driver creates a new terminal
+ * device class, missing routines are patched.
+ */
+
+static int
+ttydevsw_defopen(struct tty *tp __unused)
+{
+
+ return (0);
+}
+
+static void
+ttydevsw_defclose(struct tty *tp __unused)
+{
+
+}
+
+static void
+ttydevsw_defoutwakeup(struct tty *tp __unused)
+{
+
+ panic("Terminal device has output, while not implemented");
+}
+
+static void
+ttydevsw_definwakeup(struct tty *tp __unused)
+{
+
+}
+
+static int
+ttydevsw_defioctl(struct tty *tp __unused, u_long cmd __unused,
+ caddr_t data __unused, struct thread *td __unused)
+{
+
+ return (ENOIOCTL);
+}
+
+static int
+ttydevsw_defcioctl(struct tty *tp __unused, int unit __unused,
+ u_long cmd __unused, caddr_t data __unused, struct thread *td __unused)
+{
+
+ return (ENOIOCTL);
+}
+
+static int
+ttydevsw_defparam(struct tty *tp __unused, struct termios *t)
+{
+
+ /*
+ * Allow the baud rate to be adjusted for pseudo-devices, but at
+ * least restrict it to 115200 to prevent excessive buffer
+ * usage. Also disallow 0, to prevent foot shooting.
+ */
+ if (t->c_ispeed < B50)
+ t->c_ispeed = B50;
+ else if (t->c_ispeed > B115200)
+ t->c_ispeed = B115200;
+ if (t->c_ospeed < B50)
+ t->c_ospeed = B50;
+ else if (t->c_ospeed > B115200)
+ t->c_ospeed = B115200;
+ t->c_cflag |= CREAD;
+
+ return (0);
+}
+
+static int
+ttydevsw_defmodem(struct tty *tp __unused, int sigon __unused,
+ int sigoff __unused)
+{
+
+ /* Simulate a carrier to make the TTY layer happy. */
+ return (SER_DCD);
+}
+
+static int
+ttydevsw_defmmap(struct tty *tp __unused, vm_ooffset_t offset __unused,
+ vm_paddr_t *paddr __unused, int nprot __unused,
+ vm_memattr_t *memattr __unused)
+{
+
+ return (-1);
+}
+
+static void
+ttydevsw_defpktnotify(struct tty *tp __unused, char event __unused)
+{
+
+}
+
+static void
+ttydevsw_deffree(void *softc __unused)
+{
+
+ panic("Terminal device freed without a free-handler");
+}
+
+static bool
+ttydevsw_defbusy(struct tty *tp __unused)
+{
+
+ return (FALSE);
+}
+
+/*
+ * TTY allocation and deallocation. TTY devices can be deallocated when
+ * the driver doesn't use it anymore, when the TTY isn't a session's
+ * controlling TTY and when the device node isn't opened through devfs.
+ */
+
+struct tty *
+tty_alloc(struct ttydevsw *tsw, void *sc)
+{
+
+ return (tty_alloc_mutex(tsw, sc, NULL));
+}
+
+struct tty *
+tty_alloc_mutex(struct ttydevsw *tsw, void *sc, struct mtx *mutex)
+{
+ struct tty *tp;
+
+ /* Make sure the driver defines all routines. */
+#define PATCH_FUNC(x) do { \
+ if (tsw->tsw_ ## x == NULL) \
+ tsw->tsw_ ## x = ttydevsw_def ## x; \
+} while (0)
+ PATCH_FUNC(open);
+ PATCH_FUNC(close);
+ PATCH_FUNC(outwakeup);
+ PATCH_FUNC(inwakeup);
+ PATCH_FUNC(ioctl);
+ PATCH_FUNC(cioctl);
+ PATCH_FUNC(param);
+ PATCH_FUNC(modem);
+ PATCH_FUNC(mmap);
+ PATCH_FUNC(pktnotify);
+ PATCH_FUNC(free);
+ PATCH_FUNC(busy);
+#undef PATCH_FUNC
+
+ tp = malloc(sizeof(struct tty), M_TTY, M_WAITOK|M_ZERO);
+ tp->t_devsw = tsw;
+ tp->t_devswsoftc = sc;
+ tp->t_flags = tsw->tsw_flags;
+ tp->t_drainwait = tty_drainwait;
+
+ tty_init_termios(tp);
+
+ cv_init(&tp->t_inwait, "ttyin");
+ cv_init(&tp->t_outwait, "ttyout");
+ cv_init(&tp->t_outserwait, "ttyosr");
+ cv_init(&tp->t_bgwait, "ttybg");
+ cv_init(&tp->t_dcdwait, "ttydcd");
+
+ /* Allow drivers to use a custom mutex to lock the TTY. */
+ if (mutex != NULL) {
+ tp->t_mtx = mutex;
+ } else {
+ tp->t_mtx = &tp->t_mtxobj;
+ mtx_init(&tp->t_mtxobj, "ttymtx", NULL, MTX_DEF);
+ }
+
+ knlist_init_mtx(&tp->t_inpoll.si_note, tp->t_mtx);
+ knlist_init_mtx(&tp->t_outpoll.si_note, tp->t_mtx);
+
+ return (tp);
+}
+
+static void
+tty_dealloc(void *arg)
+{
+ struct tty *tp = arg;
+
+ /*
+ * ttyydev_leave() usually frees the i/o queues earlier, but it is
+ * not always called between queue allocation and here. The queues
+ * may be allocated by ioctls on a pty control device without the
+ * corresponding pty slave device ever being open, or after it is
+ * closed.
+ */
+ ttyinq_free(&tp->t_inq);
+ ttyoutq_free(&tp->t_outq);
+ seldrain(&tp->t_inpoll);
+ seldrain(&tp->t_outpoll);
+ knlist_destroy(&tp->t_inpoll.si_note);
+ knlist_destroy(&tp->t_outpoll.si_note);
+
+ cv_destroy(&tp->t_inwait);
+ cv_destroy(&tp->t_outwait);
+ cv_destroy(&tp->t_bgwait);
+ cv_destroy(&tp->t_dcdwait);
+ cv_destroy(&tp->t_outserwait);
+
+ if (tp->t_mtx == &tp->t_mtxobj)
+ mtx_destroy(&tp->t_mtxobj);
+ ttydevsw_free(tp);
+ free(tp, M_TTY);
+}
+
+static void
+tty_rel_free(struct tty *tp)
+{
+ struct cdev *dev;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+#define TF_ACTIVITY (TF_GONE|TF_OPENED|TF_HOOK|TF_OPENCLOSE)
+ if (tp->t_sessioncnt != 0 || (tp->t_flags & TF_ACTIVITY) != TF_GONE) {
+ /* TTY is still in use. */
+ tty_unlock(tp);
+ return;
+ }
+
+ /* TTY can be deallocated. */
+ dev = tp->t_dev;
+ tp->t_dev = NULL;
+ tty_unlock(tp);
+
+ if (dev != NULL) {
+ sx_xlock(&tty_list_sx);
+ TAILQ_REMOVE(&tty_list, tp, t_list);
+ tty_list_count--;
+ sx_xunlock(&tty_list_sx);
+ destroy_dev_sched_cb(dev, tty_dealloc, tp);
+ }
+}
+
+void
+tty_rel_pgrp(struct tty *tp, struct pgrp *pg)
+{
+
+ MPASS(tp->t_sessioncnt > 0);
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tp->t_pgrp == pg)
+ tp->t_pgrp = NULL;
+
+ tty_unlock(tp);
+}
+
+void
+tty_rel_sess(struct tty *tp, struct session *sess)
+{
+
+ MPASS(tp->t_sessioncnt > 0);
+
+ /* Current session has left. */
+ if (tp->t_session == sess) {
+ tp->t_session = NULL;
+ MPASS(tp->t_pgrp == NULL);
+ }
+ tp->t_sessioncnt--;
+ tty_rel_free(tp);
+}
+
+void
+tty_rel_gone(struct tty *tp)
+{
+
+ MPASS(!tty_gone(tp));
+
+ /* Simulate carrier removal. */
+ ttydisc_modem(tp, 0);
+
+ /* Wake up all blocked threads. */
+ tty_wakeup(tp, FREAD|FWRITE);
+ cv_broadcast(&tp->t_bgwait);
+ cv_broadcast(&tp->t_dcdwait);
+
+ tp->t_flags |= TF_GONE;
+ tty_rel_free(tp);
+}
+
+/*
+ * Exposing information about current TTY's through sysctl
+ */
+
+static void
+tty_to_xtty(struct tty *tp, struct xtty *xt)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ xt->xt_size = sizeof(struct xtty);
+ xt->xt_insize = ttyinq_getsize(&tp->t_inq);
+ xt->xt_incc = ttyinq_bytescanonicalized(&tp->t_inq);
+ xt->xt_inlc = ttyinq_bytesline(&tp->t_inq);
+ xt->xt_inlow = tp->t_inlow;
+ xt->xt_outsize = ttyoutq_getsize(&tp->t_outq);
+ xt->xt_outcc = ttyoutq_bytesused(&tp->t_outq);
+ xt->xt_outlow = tp->t_outlow;
+ xt->xt_column = tp->t_column;
+ xt->xt_pgid = tp->t_pgrp ? tp->t_pgrp->pg_id : 0;
+ xt->xt_sid = tp->t_session ? tp->t_session->s_sid : 0;
+ xt->xt_flags = tp->t_flags;
+ xt->xt_dev = tp->t_dev ? dev2udev(tp->t_dev) : NODEV;
+}
+
+static int
+sysctl_kern_ttys(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long lsize;
+ struct xtty *xtlist, *xt;
+ struct tty *tp;
+ int error;
+
+ sx_slock(&tty_list_sx);
+ lsize = tty_list_count * sizeof(struct xtty);
+ if (lsize == 0) {
+ sx_sunlock(&tty_list_sx);
+ return (0);
+ }
+
+ xtlist = xt = malloc(lsize, M_TTY, M_WAITOK);
+
+ TAILQ_FOREACH(tp, &tty_list, t_list) {
+ tty_lock(tp);
+ tty_to_xtty(tp, xt);
+ tty_unlock(tp);
+ xt++;
+ }
+ sx_sunlock(&tty_list_sx);
+
+ error = SYSCTL_OUT(req, xtlist, lsize);
+ free(xtlist, M_TTY);
+ return (error);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, ttys, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,
+ 0, 0, sysctl_kern_ttys, "S,xtty", "List of TTYs");
+
+/*
+ * Device node creation. Device has been set up, now we can expose it to
+ * the user.
+ */
+
+int
+tty_makedevf(struct tty *tp, struct ucred *cred, int flags,
+ const char *fmt, ...)
+{
+ va_list ap;
+ struct make_dev_args args;
+ struct cdev *dev, *init, *lock, *cua, *cinit, *clock;
+ const char *prefix = "tty";
+ char name[SPECNAMELEN - 3]; /* for "tty" and "cua". */
+ uid_t uid;
+ gid_t gid;
+ mode_t mode;
+ int error;
+
+ /* Remove "tty" prefix from devices like PTY's. */
+ if (tp->t_flags & TF_NOPREFIX)
+ prefix = "";
+
+ va_start(ap, fmt);
+ vsnrprintf(name, sizeof name, 32, fmt, ap);
+ va_end(ap);
+
+ if (cred == NULL) {
+ /* System device. */
+ uid = UID_ROOT;
+ gid = GID_WHEEL;
+ mode = S_IRUSR|S_IWUSR;
+ } else {
+ /* User device. */
+ uid = cred->cr_ruid;
+ gid = GID_TTY;
+ mode = S_IRUSR|S_IWUSR|S_IWGRP;
+ }
+
+ flags = flags & TTYMK_CLONING ? MAKEDEV_REF : 0;
+ flags |= MAKEDEV_CHECKNAME;
+
+ /* Master call-in device. */
+ make_dev_args_init(&args);
+ args.mda_flags = flags;
+ args.mda_devsw = &ttydev_cdevsw;
+ args.mda_cr = cred;
+ args.mda_uid = uid;
+ args.mda_gid = gid;
+ args.mda_mode = mode;
+ args.mda_si_drv1 = tp;
+ error = make_dev_s(&args, &dev, "%s%s", prefix, name);
+ if (error != 0)
+ return (error);
+ tp->t_dev = dev;
+
+ init = lock = cua = cinit = clock = NULL;
+
+ /* Slave call-in devices. */
+ if (tp->t_flags & TF_INITLOCK) {
+ args.mda_devsw = &ttyil_cdevsw;
+ args.mda_unit = TTYUNIT_INIT;
+ args.mda_si_drv1 = tp;
+ args.mda_si_drv2 = &tp->t_termios_init_in;
+ error = make_dev_s(&args, &init, "%s%s.init", prefix, name);
+ if (error != 0)
+ goto fail;
+ dev_depends(dev, init);
+
+ args.mda_unit = TTYUNIT_LOCK;
+ args.mda_si_drv2 = &tp->t_termios_lock_in;
+ error = make_dev_s(&args, &lock, "%s%s.lock", prefix, name);
+ if (error != 0)
+ goto fail;
+ dev_depends(dev, lock);
+ }
+
+ /* Call-out devices. */
+ if (tp->t_flags & TF_CALLOUT) {
+ make_dev_args_init(&args);
+ args.mda_flags = flags;
+ args.mda_devsw = &ttydev_cdevsw;
+ args.mda_cr = cred;
+ args.mda_uid = UID_UUCP;
+ args.mda_gid = GID_DIALER;
+ args.mda_mode = 0660;
+ args.mda_unit = TTYUNIT_CALLOUT;
+ args.mda_si_drv1 = tp;
+ error = make_dev_s(&args, &cua, "cua%s", name);
+ if (error != 0)
+ goto fail;
+ dev_depends(dev, cua);
+
+ /* Slave call-out devices. */
+ if (tp->t_flags & TF_INITLOCK) {
+ args.mda_devsw = &ttyil_cdevsw;
+ args.mda_unit = TTYUNIT_CALLOUT | TTYUNIT_INIT;
+ args.mda_si_drv2 = &tp->t_termios_init_out;
+ error = make_dev_s(&args, &cinit, "cua%s.init", name);
+ if (error != 0)
+ goto fail;
+ dev_depends(dev, cinit);
+
+ args.mda_unit = TTYUNIT_CALLOUT | TTYUNIT_LOCK;
+ args.mda_si_drv2 = &tp->t_termios_lock_out;
+ error = make_dev_s(&args, &clock, "cua%s.lock", name);
+ if (error != 0)
+ goto fail;
+ dev_depends(dev, clock);
+ }
+ }
+
+ sx_xlock(&tty_list_sx);
+ TAILQ_INSERT_TAIL(&tty_list, tp, t_list);
+ tty_list_count++;
+ sx_xunlock(&tty_list_sx);
+
+ return (0);
+
+fail:
+ destroy_dev(dev);
+ if (init)
+ destroy_dev(init);
+ if (lock)
+ destroy_dev(lock);
+ if (cinit)
+ destroy_dev(cinit);
+ if (clock)
+ destroy_dev(clock);
+
+ return (error);
+}
+
+/*
+ * Signalling processes.
+ */
+
+void
+tty_signal_sessleader(struct tty *tp, int sig)
+{
+ struct proc *p;
+
+ tty_lock_assert(tp, MA_OWNED);
+ MPASS(sig >= 1 && sig < NSIG);
+
+ /* Make signals start output again. */
+ tp->t_flags &= ~TF_STOPPED;
+
+ if (tp->t_session != NULL && tp->t_session->s_leader != NULL) {
+ p = tp->t_session->s_leader;
+ PROC_LOCK(p);
+ kern_psignal(p, sig);
+ PROC_UNLOCK(p);
+ }
+}
+
+void
+tty_signal_pgrp(struct tty *tp, int sig)
+{
+ ksiginfo_t ksi;
+
+ tty_lock_assert(tp, MA_OWNED);
+ MPASS(sig >= 1 && sig < NSIG);
+
+ /* Make signals start output again. */
+ tp->t_flags &= ~TF_STOPPED;
+
+ if (sig == SIGINFO && !(tp->t_termios.c_lflag & NOKERNINFO))
+ tty_info(tp);
+ if (tp->t_pgrp != NULL) {
+ ksiginfo_init(&ksi);
+ ksi.ksi_signo = sig;
+ ksi.ksi_code = SI_KERNEL;
+ PGRP_LOCK(tp->t_pgrp);
+ pgsignal(tp->t_pgrp, sig, 1, &ksi);
+ PGRP_UNLOCK(tp->t_pgrp);
+ }
+}
+
+void
+tty_wakeup(struct tty *tp, int flags)
+{
+
+ if (tp->t_flags & TF_ASYNC && tp->t_sigio != NULL)
+ pgsigio(&tp->t_sigio, SIGIO, (tp->t_session != NULL));
+
+ if (flags & FWRITE) {
+ cv_broadcast(&tp->t_outwait);
+ selwakeup(&tp->t_outpoll);
+ KNOTE_LOCKED(&tp->t_outpoll.si_note, 0);
+ }
+ if (flags & FREAD) {
+ cv_broadcast(&tp->t_inwait);
+ selwakeup(&tp->t_inpoll);
+ KNOTE_LOCKED(&tp->t_inpoll.si_note, 0);
+ }
+}
+
+int
+tty_wait(struct tty *tp, struct cv *cv)
+{
+ int error;
+ int revokecnt = tp->t_revokecnt;
+
+ tty_lock_assert(tp, MA_OWNED|MA_NOTRECURSED);
+ MPASS(!tty_gone(tp));
+
+ error = cv_wait_sig(cv, tp->t_mtx);
+
+ /* Bail out when the device slipped away. */
+ if (tty_gone(tp))
+ return (ENXIO);
+
+ /* Restart the system call when we may have been revoked. */
+ if (tp->t_revokecnt != revokecnt)
+ return (ERESTART);
+
+ return (error);
+}
+
+int
+tty_timedwait(struct tty *tp, struct cv *cv, int hz)
+{
+ int error;
+ int revokecnt = tp->t_revokecnt;
+
+ tty_lock_assert(tp, MA_OWNED|MA_NOTRECURSED);
+ MPASS(!tty_gone(tp));
+
+ error = cv_timedwait_sig(cv, tp->t_mtx, hz);
+
+ /* Bail out when the device slipped away. */
+ if (tty_gone(tp))
+ return (ENXIO);
+
+ /* Restart the system call when we may have been revoked. */
+ if (tp->t_revokecnt != revokecnt)
+ return (ERESTART);
+
+ return (error);
+}
+
+void
+tty_flush(struct tty *tp, int flags)
+{
+
+ if (flags & FWRITE) {
+ tp->t_flags &= ~TF_HIWAT_OUT;
+ ttyoutq_flush(&tp->t_outq);
+ tty_wakeup(tp, FWRITE);
+ if (!tty_gone(tp)) {
+ ttydevsw_outwakeup(tp);
+ ttydevsw_pktnotify(tp, TIOCPKT_FLUSHWRITE);
+ }
+ }
+ if (flags & FREAD) {
+ tty_hiwat_in_unblock(tp);
+ ttyinq_flush(&tp->t_inq);
+ tty_wakeup(tp, FREAD);
+ if (!tty_gone(tp)) {
+ ttydevsw_inwakeup(tp);
+ ttydevsw_pktnotify(tp, TIOCPKT_FLUSHREAD);
+ }
+ }
+}
+
+void
+tty_set_winsize(struct tty *tp, const struct winsize *wsz)
+{
+
+ if (memcmp(&tp->t_winsize, wsz, sizeof(*wsz)) == 0)
+ return;
+ tp->t_winsize = *wsz;
+ tty_signal_pgrp(tp, SIGWINCH);
+}
+
+static int
+tty_generic_ioctl(struct tty *tp, u_long cmd, void *data, int fflag,
+ struct thread *td)
+{
+ int error;
+
+ switch (cmd) {
+ /*
+ * Modem commands.
+ * The SER_* and TIOCM_* flags are the same, but one bit
+ * shifted. I don't know why.
+ */
+ case TIOCSDTR:
+ ttydevsw_modem(tp, SER_DTR, 0);
+ return (0);
+ case TIOCCDTR:
+ ttydevsw_modem(tp, 0, SER_DTR);
+ return (0);
+ case TIOCMSET: {
+ int bits = *(int *)data;
+ ttydevsw_modem(tp,
+ (bits & (TIOCM_DTR | TIOCM_RTS)) >> 1,
+ ((~bits) & (TIOCM_DTR | TIOCM_RTS)) >> 1);
+ return (0);
+ }
+ case TIOCMBIS: {
+ int bits = *(int *)data;
+ ttydevsw_modem(tp, (bits & (TIOCM_DTR | TIOCM_RTS)) >> 1, 0);
+ return (0);
+ }
+ case TIOCMBIC: {
+ int bits = *(int *)data;
+ ttydevsw_modem(tp, 0, (bits & (TIOCM_DTR | TIOCM_RTS)) >> 1);
+ return (0);
+ }
+ case TIOCMGET:
+ *(int *)data = TIOCM_LE + (ttydevsw_modem(tp, 0, 0) << 1);
+ return (0);
+
+ case FIOASYNC:
+ if (*(int *)data)
+ tp->t_flags |= TF_ASYNC;
+ else
+ tp->t_flags &= ~TF_ASYNC;
+ return (0);
+ case FIONBIO:
+ /* This device supports non-blocking operation. */
+ return (0);
+ case FIONREAD:
+ *(int *)data = ttyinq_bytescanonicalized(&tp->t_inq);
+ return (0);
+ case FIONWRITE:
+ case TIOCOUTQ:
+ *(int *)data = ttyoutq_bytesused(&tp->t_outq);
+ return (0);
+ case FIOSETOWN:
+ if (tp->t_session != NULL && !tty_is_ctty(tp, td->td_proc))
+ /* Not allowed to set ownership. */
+ return (ENOTTY);
+
+ /* Temporarily unlock the TTY to set ownership. */
+ tty_unlock(tp);
+ error = fsetown(*(int *)data, &tp->t_sigio);
+ tty_lock(tp);
+ return (error);
+ case FIOGETOWN:
+ if (tp->t_session != NULL && !tty_is_ctty(tp, td->td_proc))
+ /* Not allowed to set ownership. */
+ return (ENOTTY);
+
+ /* Get ownership. */
+ *(int *)data = fgetown(&tp->t_sigio);
+ return (0);
+ case TIOCGETA:
+ /* Obtain terminal flags through tcgetattr(). */
+ *(struct termios*)data = tp->t_termios;
+ return (0);
+ case TIOCSETA:
+ case TIOCSETAW:
+ case TIOCSETAF: {
+ struct termios *t = data;
+
+ /*
+ * Who makes up these funny rules? According to POSIX,
+ * input baud rate is set equal to the output baud rate
+ * when zero.
+ */
+ if (t->c_ispeed == 0)
+ t->c_ispeed = t->c_ospeed;
+
+ /* Discard any unsupported bits. */
+ t->c_iflag &= TTYSUP_IFLAG;
+ t->c_oflag &= TTYSUP_OFLAG;
+ t->c_lflag &= TTYSUP_LFLAG;
+ t->c_cflag &= TTYSUP_CFLAG;
+
+ /* Set terminal flags through tcsetattr(). */
+ if (cmd == TIOCSETAW || cmd == TIOCSETAF) {
+ error = tty_drain(tp, 0);
+ if (error)
+ return (error);
+ if (cmd == TIOCSETAF)
+ tty_flush(tp, FREAD);
+ }
+
+ /*
+ * Only call param() when the flags really change.
+ */
+ if ((t->c_cflag & CIGNORE) == 0 &&
+ (tp->t_termios.c_cflag != t->c_cflag ||
+ ((tp->t_termios.c_iflag ^ t->c_iflag) &
+ (IXON|IXOFF|IXANY)) ||
+ tp->t_termios.c_ispeed != t->c_ispeed ||
+ tp->t_termios.c_ospeed != t->c_ospeed)) {
+ error = ttydevsw_param(tp, t);
+ if (error)
+ return (error);
+
+ /* XXX: CLOCAL? */
+
+ tp->t_termios.c_cflag = t->c_cflag & ~CIGNORE;
+ tp->t_termios.c_ispeed = t->c_ispeed;
+ tp->t_termios.c_ospeed = t->c_ospeed;
+
+ /* Baud rate has changed - update watermarks. */
+ error = tty_watermarks(tp);
+ if (error)
+ return (error);
+ }
+
+ /* Copy new non-device driver parameters. */
+ tp->t_termios.c_iflag = t->c_iflag;
+ tp->t_termios.c_oflag = t->c_oflag;
+ tp->t_termios.c_lflag = t->c_lflag;
+ memcpy(&tp->t_termios.c_cc, t->c_cc, sizeof t->c_cc);
+
+ ttydisc_optimize(tp);
+
+ if ((t->c_lflag & ICANON) == 0) {
+ /*
+ * When in non-canonical mode, wake up all
+ * readers. Canonicalize any partial input. VMIN
+ * and VTIME could also be adjusted.
+ */
+ ttyinq_canonicalize(&tp->t_inq);
+ tty_wakeup(tp, FREAD);
+ }
+
+ /*
+ * For packet mode: notify the PTY consumer that VSTOP
+ * and VSTART may have been changed.
+ */
+ if (tp->t_termios.c_iflag & IXON &&
+ tp->t_termios.c_cc[VSTOP] == CTRL('S') &&
+ tp->t_termios.c_cc[VSTART] == CTRL('Q'))
+ ttydevsw_pktnotify(tp, TIOCPKT_DOSTOP);
+ else
+ ttydevsw_pktnotify(tp, TIOCPKT_NOSTOP);
+ return (0);
+ }
+ case TIOCGETD:
+ /* For compatibility - we only support TTYDISC. */
+ *(int *)data = TTYDISC;
+ return (0);
+ case TIOCGPGRP:
+ if (!tty_is_ctty(tp, td->td_proc))
+ return (ENOTTY);
+
+ if (tp->t_pgrp != NULL)
+ *(int *)data = tp->t_pgrp->pg_id;
+ else
+ *(int *)data = NO_PID;
+ return (0);
+ case TIOCGSID:
+ if (!tty_is_ctty(tp, td->td_proc))
+ return (ENOTTY);
+
+ MPASS(tp->t_session);
+ *(int *)data = tp->t_session->s_sid;
+ return (0);
+ case TIOCSCTTY: {
+ struct proc *p = td->td_proc;
+
+ /* XXX: This looks awful. */
+ tty_unlock(tp);
+ sx_xlock(&proctree_lock);
+ tty_lock(tp);
+
+ if (!SESS_LEADER(p)) {
+ /* Only the session leader may do this. */
+ sx_xunlock(&proctree_lock);
+ return (EPERM);
+ }
+
+ if (tp->t_session != NULL && tp->t_session == p->p_session) {
+ /* This is already our controlling TTY. */
+ sx_xunlock(&proctree_lock);
+ return (0);
+ }
+
+ if (p->p_session->s_ttyp != NULL ||
+ (tp->t_session != NULL && tp->t_session->s_ttyvp != NULL &&
+ tp->t_session->s_ttyvp->v_type != VBAD)) {
+ /*
+ * There is already a relation between a TTY and
+ * a session, or the caller is not the session
+ * leader.
+ *
+ * Allow the TTY to be stolen when the vnode is
+ * invalid, but the reference to the TTY is
+ * still active. This allows immediate reuse of
+ * TTYs of which the session leader has been
+ * killed or the TTY revoked.
+ */
+ sx_xunlock(&proctree_lock);
+ return (EPERM);
+ }
+
+ /* Connect the session to the TTY. */
+ tp->t_session = p->p_session;
+ tp->t_session->s_ttyp = tp;
+ tp->t_sessioncnt++;
+ sx_xunlock(&proctree_lock);
+
+ /* Assign foreground process group. */
+ tp->t_pgrp = p->p_pgrp;
+ PROC_LOCK(p);
+ p->p_flag |= P_CONTROLT;
+ PROC_UNLOCK(p);
+
+ return (0);
+ }
+ case TIOCSPGRP: {
+ struct pgrp *pg;
+
+ /*
+ * XXX: Temporarily unlock the TTY to locate the process
+ * group. This code would be lot nicer if we would ever
+ * decompose proctree_lock.
+ */
+ tty_unlock(tp);
+ sx_slock(&proctree_lock);
+ pg = pgfind(*(int *)data);
+ if (pg != NULL)
+ PGRP_UNLOCK(pg);
+ if (pg == NULL || pg->pg_session != td->td_proc->p_session) {
+ sx_sunlock(&proctree_lock);
+ tty_lock(tp);
+ return (EPERM);
+ }
+ tty_lock(tp);
+
+ /*
+ * Determine if this TTY is the controlling TTY after
+ * relocking the TTY.
+ */
+ if (!tty_is_ctty(tp, td->td_proc)) {
+ sx_sunlock(&proctree_lock);
+ return (ENOTTY);
+ }
+ tp->t_pgrp = pg;
+ sx_sunlock(&proctree_lock);
+
+ /* Wake up the background process groups. */
+ cv_broadcast(&tp->t_bgwait);
+ return (0);
+ }
+ case TIOCFLUSH: {
+ int flags = *(int *)data;
+
+ if (flags == 0)
+ flags = (FREAD|FWRITE);
+ else
+ flags &= (FREAD|FWRITE);
+ tty_flush(tp, flags);
+ return (0);
+ }
+ case TIOCDRAIN:
+ /* Drain TTY output. */
+ return tty_drain(tp, 0);
+ case TIOCGDRAINWAIT:
+ *(int *)data = tp->t_drainwait;
+ return (0);
+ case TIOCSDRAINWAIT:
+ error = priv_check(td, PRIV_TTY_DRAINWAIT);
+ if (error == 0)
+ tp->t_drainwait = *(int *)data;
+ return (error);
+ case TIOCCONS:
+ /* Set terminal as console TTY. */
+ if (*(int *)data) {
+ error = priv_check(td, PRIV_TTY_CONSOLE);
+ if (error)
+ return (error);
+
+ /*
+ * XXX: constty should really need to be locked!
+ * XXX: allow disconnected constty's to be stolen!
+ */
+
+ if (constty == tp)
+ return (0);
+ if (constty != NULL)
+ return (EBUSY);
+
+ tty_unlock(tp);
+ constty_set(tp);
+ tty_lock(tp);
+ } else if (constty == tp) {
+ constty_clear();
+ }
+ return (0);
+ case TIOCGWINSZ:
+ /* Obtain window size. */
+ *(struct winsize*)data = tp->t_winsize;
+ return (0);
+ case TIOCSWINSZ:
+ /* Set window size. */
+ tty_set_winsize(tp, data);
+ return (0);
+ case TIOCEXCL:
+ tp->t_flags |= TF_EXCLUDE;
+ return (0);
+ case TIOCNXCL:
+ tp->t_flags &= ~TF_EXCLUDE;
+ return (0);
+ case TIOCSTOP:
+ tp->t_flags |= TF_STOPPED;
+ ttydevsw_pktnotify(tp, TIOCPKT_STOP);
+ return (0);
+ case TIOCSTART:
+ tp->t_flags &= ~TF_STOPPED;
+ ttydevsw_outwakeup(tp);
+ ttydevsw_pktnotify(tp, TIOCPKT_START);
+ return (0);
+ case TIOCSTAT:
+ tty_info(tp);
+ return (0);
+ case TIOCSTI:
+ if ((fflag & FREAD) == 0 && priv_check(td, PRIV_TTY_STI))
+ return (EPERM);
+ if (!tty_is_ctty(tp, td->td_proc) &&
+ priv_check(td, PRIV_TTY_STI))
+ return (EACCES);
+ ttydisc_rint(tp, *(char *)data, 0);
+ ttydisc_rint_done(tp);
+ return (0);
+ }
+
+#ifdef COMPAT_43TTY
+ return tty_ioctl_compat(tp, cmd, data, fflag, td);
+#else /* !COMPAT_43TTY */
+ return (ENOIOCTL);
+#endif /* COMPAT_43TTY */
+}
+
+int
+tty_ioctl(struct tty *tp, u_long cmd, void *data, int fflag, struct thread *td)
+{
+ int error;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tty_gone(tp))
+ return (ENXIO);
+
+ error = ttydevsw_ioctl(tp, cmd, data, td);
+ if (error == ENOIOCTL)
+ error = tty_generic_ioctl(tp, cmd, data, fflag, td);
+
+ return (error);
+}
+
+dev_t
+tty_udev(struct tty *tp)
+{
+
+ if (tp->t_dev)
+ return (dev2udev(tp->t_dev));
+ else
+ return (NODEV);
+}
+
+int
+tty_checkoutq(struct tty *tp)
+{
+
+ /* 256 bytes should be enough to print a log message. */
+ return (ttyoutq_bytesleft(&tp->t_outq) >= 256);
+}
+
+void
+tty_hiwat_in_block(struct tty *tp)
+{
+
+ if ((tp->t_flags & TF_HIWAT_IN) == 0 &&
+ tp->t_termios.c_iflag & IXOFF &&
+ tp->t_termios.c_cc[VSTOP] != _POSIX_VDISABLE) {
+ /*
+ * Input flow control. Only enter the high watermark when we
+ * can successfully store the VSTOP character.
+ */
+ if (ttyoutq_write_nofrag(&tp->t_outq,
+ &tp->t_termios.c_cc[VSTOP], 1) == 0)
+ tp->t_flags |= TF_HIWAT_IN;
+ } else {
+ /* No input flow control. */
+ tp->t_flags |= TF_HIWAT_IN;
+ }
+}
+
+void
+tty_hiwat_in_unblock(struct tty *tp)
+{
+
+ if (tp->t_flags & TF_HIWAT_IN &&
+ tp->t_termios.c_iflag & IXOFF &&
+ tp->t_termios.c_cc[VSTART] != _POSIX_VDISABLE) {
+ /*
+ * Input flow control. Only leave the high watermark when we
+ * can successfully store the VSTART character.
+ */
+ if (ttyoutq_write_nofrag(&tp->t_outq,
+ &tp->t_termios.c_cc[VSTART], 1) == 0)
+ tp->t_flags &= ~TF_HIWAT_IN;
+ } else {
+ /* No input flow control. */
+ tp->t_flags &= ~TF_HIWAT_IN;
+ }
+
+ if (!tty_gone(tp))
+ ttydevsw_inwakeup(tp);
+}
+
+/*
+ * TTY hooks interface.
+ */
+
+static int
+ttyhook_defrint(struct tty *tp, char c, int flags)
+{
+
+ if (ttyhook_rint_bypass(tp, &c, 1) != 1)
+ return (-1);
+
+ return (0);
+}
+
+int
+ttyhook_register(struct tty **rtp, struct proc *p, int fd, struct ttyhook *th,
+ void *softc)
+{
+ struct tty *tp;
+ struct file *fp;
+ struct cdev *dev;
+ struct cdevsw *cdp;
+ struct filedesc *fdp;
+ cap_rights_t rights;
+ int error, ref;
+
+ /* Validate the file descriptor. */
+ fdp = p->p_fd;
+ error = fget_unlocked(fdp, fd, cap_rights_init(&rights, CAP_TTYHOOK),
+ &fp, NULL);
+ if (error != 0)
+ return (error);
+ if (fp->f_ops == &badfileops) {
+ error = EBADF;
+ goto done1;
+ }
+
+ /*
+ * Make sure the vnode is bound to a character device.
+ * Unlocked check for the vnode type is ok there, because we
+ * only shall prevent calling devvn_refthread on the file that
+ * never has been opened over a character device.
+ */
+ if (fp->f_type != DTYPE_VNODE || fp->f_vnode->v_type != VCHR) {
+ error = EINVAL;
+ goto done1;
+ }
+
+ /* Make sure it is a TTY. */
+ cdp = devvn_refthread(fp->f_vnode, &dev, &ref);
+ if (cdp == NULL) {
+ error = ENXIO;
+ goto done1;
+ }
+ if (dev != fp->f_data) {
+ error = ENXIO;
+ goto done2;
+ }
+ if (cdp != &ttydev_cdevsw) {
+ error = ENOTTY;
+ goto done2;
+ }
+ tp = dev->si_drv1;
+
+ /* Try to attach the hook to the TTY. */
+ error = EBUSY;
+ tty_lock(tp);
+ MPASS((tp->t_hook == NULL) == ((tp->t_flags & TF_HOOK) == 0));
+ if (tp->t_flags & TF_HOOK)
+ goto done3;
+
+ tp->t_flags |= TF_HOOK;
+ tp->t_hook = th;
+ tp->t_hooksoftc = softc;
+ *rtp = tp;
+ error = 0;
+
+ /* Maybe we can switch into bypass mode now. */
+ ttydisc_optimize(tp);
+
+ /* Silently convert rint() calls to rint_bypass() when possible. */
+ if (!ttyhook_hashook(tp, rint) && ttyhook_hashook(tp, rint_bypass))
+ th->th_rint = ttyhook_defrint;
+
+done3: tty_unlock(tp);
+done2: dev_relthread(dev, ref);
+done1: fdrop(fp, curthread);
+ return (error);
+}
+
+void
+ttyhook_unregister(struct tty *tp)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+ MPASS(tp->t_flags & TF_HOOK);
+
+ /* Disconnect the hook. */
+ tp->t_flags &= ~TF_HOOK;
+ tp->t_hook = NULL;
+
+ /* Maybe we need to leave bypass mode. */
+ ttydisc_optimize(tp);
+
+ /* Maybe deallocate the TTY as well. */
+ tty_rel_free(tp);
+}
+
+/*
+ * /dev/console handling.
+ */
+
+static int
+ttyconsdev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ struct tty *tp;
+
+ /* System has no console device. */
+ if (dev_console_filename == NULL)
+ return (ENXIO);
+
+ /* Look up corresponding TTY by device name. */
+ sx_slock(&tty_list_sx);
+ TAILQ_FOREACH(tp, &tty_list, t_list) {
+ if (strcmp(dev_console_filename, tty_devname(tp)) == 0) {
+ dev_console->si_drv1 = tp;
+ break;
+ }
+ }
+ sx_sunlock(&tty_list_sx);
+
+ /* System console has no TTY associated. */
+ if (dev_console->si_drv1 == NULL)
+ return (ENXIO);
+
+ return (ttydev_open(dev, oflags, devtype, td));
+}
+
+static int
+ttyconsdev_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+
+ log_console(uio);
+
+ return (ttydev_write(dev, uio, ioflag));
+}
+
+/*
+ * /dev/console is a little different than normal TTY's. When opened,
+ * it determines which TTY to use. When data gets written to it, it
+ * will be logged in the kernel message buffer.
+ */
+static struct cdevsw ttyconsdev_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = ttyconsdev_open,
+ .d_close = ttydev_close,
+ .d_read = ttydev_read,
+ .d_write = ttyconsdev_write,
+ .d_ioctl = ttydev_ioctl,
+ .d_kqfilter = ttydev_kqfilter,
+ .d_poll = ttydev_poll,
+ .d_mmap = ttydev_mmap,
+ .d_name = "ttyconsdev",
+ .d_flags = D_TTY,
+};
+
+static void
+ttyconsdev_init(void *unused __unused)
+{
+
+ dev_console = make_dev_credf(MAKEDEV_ETERNAL, &ttyconsdev_cdevsw, 0,
+ NULL, UID_ROOT, GID_WHEEL, 0600, "console");
+}
+
+SYSINIT(tty, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyconsdev_init, NULL);
+
+void
+ttyconsdev_select(const char *name)
+{
+
+ dev_console_filename = name;
+}
+
+/*
+ * Debugging routines.
+ */
+
+#include <rtems/bsd/local/opt_ddb.h>
+#ifdef DDB
+#include <ddb/ddb.h>
+#include <ddb/db_sym.h>
+
+static const struct {
+ int flag;
+ char val;
+} ttystates[] = {
+#if 0
+ { TF_NOPREFIX, 'N' },
+#endif
+ { TF_INITLOCK, 'I' },
+ { TF_CALLOUT, 'C' },
+
+ /* Keep these together -> 'Oi' and 'Oo'. */
+ { TF_OPENED, 'O' },
+ { TF_OPENED_IN, 'i' },
+ { TF_OPENED_OUT, 'o' },
+ { TF_OPENED_CONS, 'c' },
+
+ { TF_GONE, 'G' },
+ { TF_OPENCLOSE, 'B' },
+ { TF_ASYNC, 'Y' },
+ { TF_LITERAL, 'L' },
+
+ /* Keep these together -> 'Hi' and 'Ho'. */
+ { TF_HIWAT, 'H' },
+ { TF_HIWAT_IN, 'i' },
+ { TF_HIWAT_OUT, 'o' },
+
+ { TF_STOPPED, 'S' },
+ { TF_EXCLUDE, 'X' },
+ { TF_BYPASS, 'l' },
+ { TF_ZOMBIE, 'Z' },
+ { TF_HOOK, 's' },
+
+ /* Keep these together -> 'bi' and 'bo'. */
+ { TF_BUSY, 'b' },
+ { TF_BUSY_IN, 'i' },
+ { TF_BUSY_OUT, 'o' },
+
+ { 0, '\0'},
+};
+
+#define TTY_FLAG_BITS \
+ "\20\1NOPREFIX\2INITLOCK\3CALLOUT\4OPENED_IN" \
+ "\5OPENED_OUT\6OPENED_CONS\7GONE\10OPENCLOSE" \
+ "\11ASYNC\12LITERAL\13HIWAT_IN\14HIWAT_OUT" \
+ "\15STOPPED\16EXCLUDE\17BYPASS\20ZOMBIE" \
+ "\21HOOK\22BUSY_IN\23BUSY_OUT"
+
+#define DB_PRINTSYM(name, addr) \
+ db_printf("%s " #name ": ", sep); \
+ db_printsym((db_addr_t) addr, DB_STGY_ANY); \
+ db_printf("\n");
+
+static void
+_db_show_devsw(const char *sep, const struct ttydevsw *tsw)
+{
+
+ db_printf("%sdevsw: ", sep);
+ db_printsym((db_addr_t)tsw, DB_STGY_ANY);
+ db_printf(" (%p)\n", tsw);
+ DB_PRINTSYM(open, tsw->tsw_open);
+ DB_PRINTSYM(close, tsw->tsw_close);
+ DB_PRINTSYM(outwakeup, tsw->tsw_outwakeup);
+ DB_PRINTSYM(inwakeup, tsw->tsw_inwakeup);
+ DB_PRINTSYM(ioctl, tsw->tsw_ioctl);
+ DB_PRINTSYM(param, tsw->tsw_param);
+ DB_PRINTSYM(modem, tsw->tsw_modem);
+ DB_PRINTSYM(mmap, tsw->tsw_mmap);
+ DB_PRINTSYM(pktnotify, tsw->tsw_pktnotify);
+ DB_PRINTSYM(free, tsw->tsw_free);
+}
+
+static void
+_db_show_hooks(const char *sep, const struct ttyhook *th)
+{
+
+ db_printf("%shook: ", sep);
+ db_printsym((db_addr_t)th, DB_STGY_ANY);
+ db_printf(" (%p)\n", th);
+ if (th == NULL)
+ return;
+ DB_PRINTSYM(rint, th->th_rint);
+ DB_PRINTSYM(rint_bypass, th->th_rint_bypass);
+ DB_PRINTSYM(rint_done, th->th_rint_done);
+ DB_PRINTSYM(rint_poll, th->th_rint_poll);
+ DB_PRINTSYM(getc_inject, th->th_getc_inject);
+ DB_PRINTSYM(getc_capture, th->th_getc_capture);
+ DB_PRINTSYM(getc_poll, th->th_getc_poll);
+ DB_PRINTSYM(close, th->th_close);
+}
+
+static void
+_db_show_termios(const char *name, const struct termios *t)
+{
+
+ db_printf("%s: iflag 0x%x oflag 0x%x cflag 0x%x "
+ "lflag 0x%x ispeed %u ospeed %u\n", name,
+ t->c_iflag, t->c_oflag, t->c_cflag, t->c_lflag,
+ t->c_ispeed, t->c_ospeed);
+}
+
+/* DDB command to show TTY statistics. */
+DB_SHOW_COMMAND(tty, db_show_tty)
+{
+ struct tty *tp;
+
+ if (!have_addr) {
+ db_printf("usage: show tty <addr>\n");
+ return;
+ }
+ tp = (struct tty *)addr;
+
+ db_printf("%p: %s\n", tp, tty_devname(tp));
+ db_printf("\tmtx: %p\n", tp->t_mtx);
+ db_printf("\tflags: 0x%b\n", tp->t_flags, TTY_FLAG_BITS);
+ db_printf("\trevokecnt: %u\n", tp->t_revokecnt);
+
+ /* Buffering mechanisms. */
+ db_printf("\tinq: %p begin %u linestart %u reprint %u end %u "
+ "nblocks %u quota %u\n", &tp->t_inq, tp->t_inq.ti_begin,
+ tp->t_inq.ti_linestart, tp->t_inq.ti_reprint, tp->t_inq.ti_end,
+ tp->t_inq.ti_nblocks, tp->t_inq.ti_quota);
+ db_printf("\toutq: %p begin %u end %u nblocks %u quota %u\n",
+ &tp->t_outq, tp->t_outq.to_begin, tp->t_outq.to_end,
+ tp->t_outq.to_nblocks, tp->t_outq.to_quota);
+ db_printf("\tinlow: %zu\n", tp->t_inlow);
+ db_printf("\toutlow: %zu\n", tp->t_outlow);
+ _db_show_termios("\ttermios", &tp->t_termios);
+ db_printf("\twinsize: row %u col %u xpixel %u ypixel %u\n",
+ tp->t_winsize.ws_row, tp->t_winsize.ws_col,
+ tp->t_winsize.ws_xpixel, tp->t_winsize.ws_ypixel);
+ db_printf("\tcolumn: %u\n", tp->t_column);
+ db_printf("\twritepos: %u\n", tp->t_writepos);
+ db_printf("\tcompatflags: 0x%x\n", tp->t_compatflags);
+
+ /* Init/lock-state devices. */
+ _db_show_termios("\ttermios_init_in", &tp->t_termios_init_in);
+ _db_show_termios("\ttermios_init_out", &tp->t_termios_init_out);
+ _db_show_termios("\ttermios_lock_in", &tp->t_termios_lock_in);
+ _db_show_termios("\ttermios_lock_out", &tp->t_termios_lock_out);
+
+ /* Hooks */
+ _db_show_devsw("\t", tp->t_devsw);
+ _db_show_hooks("\t", tp->t_hook);
+
+ /* Process info. */
+ db_printf("\tpgrp: %p gid %d jobc %d\n", tp->t_pgrp,
+ tp->t_pgrp ? tp->t_pgrp->pg_id : 0,
+ tp->t_pgrp ? tp->t_pgrp->pg_jobc : 0);
+ db_printf("\tsession: %p", tp->t_session);
+ if (tp->t_session != NULL)
+ db_printf(" count %u leader %p tty %p sid %d login %s",
+ tp->t_session->s_count, tp->t_session->s_leader,
+ tp->t_session->s_ttyp, tp->t_session->s_sid,
+ tp->t_session->s_login);
+ db_printf("\n");
+ db_printf("\tsessioncnt: %u\n", tp->t_sessioncnt);
+ db_printf("\tdevswsoftc: %p\n", tp->t_devswsoftc);
+ db_printf("\thooksoftc: %p\n", tp->t_hooksoftc);
+ db_printf("\tdev: %p\n", tp->t_dev);
+}
+
+/* DDB command to list TTYs. */
+DB_SHOW_ALL_COMMAND(ttys, db_show_all_ttys)
+{
+ struct tty *tp;
+ size_t isiz, osiz;
+ int i, j;
+
+ /* Make the output look like `pstat -t'. */
+ db_printf("PTR ");
+#if defined(__LP64__)
+ db_printf(" ");
+#endif
+ db_printf(" LINE INQ CAN LIN LOW OUTQ USE LOW "
+ "COL SESS PGID STATE\n");
+
+ TAILQ_FOREACH(tp, &tty_list, t_list) {
+ isiz = tp->t_inq.ti_nblocks * TTYINQ_DATASIZE;
+ osiz = tp->t_outq.to_nblocks * TTYOUTQ_DATASIZE;
+
+ db_printf("%p %10s %5zu %4u %4u %4zu %5zu %4u %4zu %5u %5d "
+ "%5d ", tp, tty_devname(tp), isiz,
+ tp->t_inq.ti_linestart - tp->t_inq.ti_begin,
+ tp->t_inq.ti_end - tp->t_inq.ti_linestart,
+ isiz - tp->t_inlow, osiz,
+ tp->t_outq.to_end - tp->t_outq.to_begin,
+ osiz - tp->t_outlow, MIN(tp->t_column, 99999),
+ tp->t_session ? tp->t_session->s_sid : 0,
+ tp->t_pgrp ? tp->t_pgrp->pg_id : 0);
+
+ /* Flag bits. */
+ for (i = j = 0; ttystates[i].flag; i++)
+ if (tp->t_flags & ttystates[i].flag) {
+ db_printf("%c", ttystates[i].val);
+ j++;
+ }
+ if (j == 0)
+ db_printf("-");
+ db_printf("\n");
+ }
+}
+#endif /* DDB */
diff --git a/freebsd/sys/kern/tty_inq.c b/freebsd/sys/kern/tty_inq.c
new file mode 100644
index 00000000..0093cb31
--- /dev/null
+++ b/freebsd/sys/kern/tty_inq.c
@@ -0,0 +1,497 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Portions of this software were developed under sponsorship from Snow
+ * B.V., the Netherlands.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/kernel.h>
+#include <rtems/bsd/sys/lock.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/tty.h>
+#include <sys/uio.h>
+
+#include <vm/uma.h>
+
+/*
+ * TTY input queue buffering.
+ *
+ * Unlike the output queue, the input queue has more features that are
+ * needed to properly implement various features offered by the TTY
+ * interface:
+ *
+ * - Data can be removed from the tail of the queue, which is used to
+ * implement backspace.
+ * - Once in a while, input has to be `canonicalized'. When ICANON is
+ * turned on, this will be done after a CR has been inserted.
+ * Otherwise, it should be done after any character has been inserted.
+ * - The input queue can store one bit per byte, called the quoting bit.
+ * This bit is used by TTYDISC to make backspace work on quoted
+ * characters.
+ *
+ * In most cases, there is probably less input than output, so unlike
+ * the outq, we'll stick to 128 byte blocks here.
+ */
+
+static int ttyinq_flush_secure = 1;
+SYSCTL_INT(_kern, OID_AUTO, tty_inq_flush_secure, CTLFLAG_RW,
+ &ttyinq_flush_secure, 0, "Zero buffers while flushing");
+
+#define TTYINQ_QUOTESIZE (TTYINQ_DATASIZE / BMSIZE)
+#define BMSIZE 32
+#define GETBIT(tib,boff) \
+ ((tib)->tib_quotes[(boff) / BMSIZE] & (1 << ((boff) % BMSIZE)))
+#define SETBIT(tib,boff) \
+ ((tib)->tib_quotes[(boff) / BMSIZE] |= (1 << ((boff) % BMSIZE)))
+#define CLRBIT(tib,boff) \
+ ((tib)->tib_quotes[(boff) / BMSIZE] &= ~(1 << ((boff) % BMSIZE)))
+
+struct ttyinq_block {
+ struct ttyinq_block *tib_prev;
+ struct ttyinq_block *tib_next;
+ uint32_t tib_quotes[TTYINQ_QUOTESIZE];
+ char tib_data[TTYINQ_DATASIZE];
+};
+
+static uma_zone_t ttyinq_zone;
+
+#define TTYINQ_INSERT_TAIL(ti, tib) do { \
+ if (ti->ti_end == 0) { \
+ tib->tib_prev = NULL; \
+ tib->tib_next = ti->ti_firstblock; \
+ ti->ti_firstblock = tib; \
+ } else { \
+ tib->tib_prev = ti->ti_lastblock; \
+ tib->tib_next = ti->ti_lastblock->tib_next; \
+ ti->ti_lastblock->tib_next = tib; \
+ } \
+ if (tib->tib_next != NULL) \
+ tib->tib_next->tib_prev = tib; \
+ ti->ti_nblocks++; \
+} while (0)
+
+#define TTYINQ_REMOVE_HEAD(ti) do { \
+ ti->ti_firstblock = ti->ti_firstblock->tib_next; \
+ if (ti->ti_firstblock != NULL) \
+ ti->ti_firstblock->tib_prev = NULL; \
+ ti->ti_nblocks--; \
+} while (0)
+
+#define TTYINQ_RECYCLE(ti, tib) do { \
+ if (ti->ti_quota <= ti->ti_nblocks) \
+ uma_zfree(ttyinq_zone, tib); \
+ else \
+ TTYINQ_INSERT_TAIL(ti, tib); \
+} while (0)
+
+int
+ttyinq_setsize(struct ttyinq *ti, struct tty *tp, size_t size)
+{
+ struct ttyinq_block *tib;
+
+ ti->ti_quota = howmany(size, TTYINQ_DATASIZE);
+
+ while (ti->ti_quota > ti->ti_nblocks) {
+ /*
+ * List is getting bigger.
+ * Add new blocks to the tail of the list.
+ *
+ * We must unlock the TTY temporarily, because we need
+ * to allocate memory. This won't be a problem, because
+ * in the worst case, another thread ends up here, which
+ * may cause us to allocate too many blocks, but this
+ * will be caught by the loop below.
+ */
+ tty_unlock(tp);
+ tib = uma_zalloc(ttyinq_zone, M_WAITOK);
+ tty_lock(tp);
+
+ if (tty_gone(tp)) {
+ uma_zfree(ttyinq_zone, tib);
+ return (ENXIO);
+ }
+
+ TTYINQ_INSERT_TAIL(ti, tib);
+ }
+ return (0);
+}
+
+void
+ttyinq_free(struct ttyinq *ti)
+{
+ struct ttyinq_block *tib;
+
+ ttyinq_flush(ti);
+ ti->ti_quota = 0;
+
+ while ((tib = ti->ti_firstblock) != NULL) {
+ TTYINQ_REMOVE_HEAD(ti);
+ uma_zfree(ttyinq_zone, tib);
+ }
+
+ MPASS(ti->ti_nblocks == 0);
+}
+
+int
+ttyinq_read_uio(struct ttyinq *ti, struct tty *tp, struct uio *uio,
+ size_t rlen, size_t flen)
+{
+
+ MPASS(rlen <= uio->uio_resid);
+
+ while (rlen > 0) {
+ int error;
+ struct ttyinq_block *tib;
+ size_t cbegin, cend, clen;
+
+ /* See if there still is data. */
+ if (ti->ti_begin == ti->ti_linestart)
+ return (0);
+ tib = ti->ti_firstblock;
+ if (tib == NULL)
+ return (0);
+
+ /*
+ * The end address should be the lowest of these three:
+ * - The write pointer
+ * - The blocksize - we can't read beyond the block
+ * - The end address if we could perform the full read
+ */
+ cbegin = ti->ti_begin;
+ cend = MIN(MIN(ti->ti_linestart, ti->ti_begin + rlen),
+ TTYINQ_DATASIZE);
+ clen = cend - cbegin;
+ MPASS(clen >= flen);
+ rlen -= clen;
+
+ /*
+ * We can prevent buffering in some cases:
+ * - We need to read the block until the end.
+ * - We don't need to read the block until the end, but
+ * there is no data beyond it, which allows us to move
+ * the write pointer to a new block.
+ */
+ if (cend == TTYINQ_DATASIZE || cend == ti->ti_end) {
+ /*
+ * Fast path: zero copy. Remove the first block,
+ * so we can unlock the TTY temporarily.
+ */
+ TTYINQ_REMOVE_HEAD(ti);
+ ti->ti_begin = 0;
+
+ /*
+ * Because we remove the first block, we must
+ * fix up the block offsets.
+ */
+#define CORRECT_BLOCK(t) do { \
+ if (t <= TTYINQ_DATASIZE) \
+ t = 0; \
+ else \
+ t -= TTYINQ_DATASIZE; \
+} while (0)
+ CORRECT_BLOCK(ti->ti_linestart);
+ CORRECT_BLOCK(ti->ti_reprint);
+ CORRECT_BLOCK(ti->ti_end);
+#undef CORRECT_BLOCK
+
+ /*
+ * Temporary unlock and copy the data to
+ * userspace. We may need to flush trailing
+ * bytes, like EOF characters.
+ */
+ tty_unlock(tp);
+ error = uiomove(tib->tib_data + cbegin,
+ clen - flen, uio);
+ tty_lock(tp);
+
+ /* Block can now be readded to the list. */
+ TTYINQ_RECYCLE(ti, tib);
+ } else {
+ char ob[TTYINQ_DATASIZE - 1];
+
+ /*
+ * Slow path: store data in a temporary buffer.
+ */
+ memcpy(ob, tib->tib_data + cbegin, clen - flen);
+ ti->ti_begin += clen;
+ MPASS(ti->ti_begin < TTYINQ_DATASIZE);
+
+ /* Temporary unlock and copy the data to userspace. */
+ tty_unlock(tp);
+ error = uiomove(ob, clen - flen, uio);
+ tty_lock(tp);
+ }
+
+ if (error != 0)
+ return (error);
+ if (tty_gone(tp))
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static __inline void
+ttyinq_set_quotes(struct ttyinq_block *tib, size_t offset,
+ size_t length, int value)
+{
+
+ if (value) {
+ /* Set the bits. */
+ for (; length > 0; length--, offset++)
+ SETBIT(tib, offset);
+ } else {
+ /* Unset the bits. */
+ for (; length > 0; length--, offset++)
+ CLRBIT(tib, offset);
+ }
+}
+
+size_t
+ttyinq_write(struct ttyinq *ti, const void *buf, size_t nbytes, int quote)
+{
+ const char *cbuf = buf;
+ struct ttyinq_block *tib;
+ unsigned int boff;
+ size_t l;
+
+ while (nbytes > 0) {
+ boff = ti->ti_end % TTYINQ_DATASIZE;
+
+ if (ti->ti_end == 0) {
+ /* First time we're being used or drained. */
+ MPASS(ti->ti_begin == 0);
+ tib = ti->ti_firstblock;
+ if (tib == NULL) {
+ /* Queue has no blocks. */
+ break;
+ }
+ ti->ti_lastblock = tib;
+ } else if (boff == 0) {
+ /* We reached the end of this block on last write. */
+ tib = ti->ti_lastblock->tib_next;
+ if (tib == NULL) {
+ /* We've reached the watermark. */
+ break;
+ }
+ ti->ti_lastblock = tib;
+ } else {
+ tib = ti->ti_lastblock;
+ }
+
+ /* Don't copy more than was requested. */
+ l = MIN(nbytes, TTYINQ_DATASIZE - boff);
+ MPASS(l > 0);
+ memcpy(tib->tib_data + boff, cbuf, l);
+
+ /* Set the quoting bits for the proper region. */
+ ttyinq_set_quotes(tib, boff, l, quote);
+
+ cbuf += l;
+ nbytes -= l;
+ ti->ti_end += l;
+ }
+
+ return (cbuf - (const char *)buf);
+}
+
+int
+ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t nbytes, int quote)
+{
+ size_t ret;
+
+ if (ttyinq_bytesleft(ti) < nbytes)
+ return (-1);
+
+ /* We should always be able to write it back. */
+ ret = ttyinq_write(ti, buf, nbytes, quote);
+ MPASS(ret == nbytes);
+
+ return (0);
+}
+
+void
+ttyinq_canonicalize(struct ttyinq *ti)
+{
+
+ ti->ti_linestart = ti->ti_reprint = ti->ti_end;
+ ti->ti_startblock = ti->ti_reprintblock = ti->ti_lastblock;
+}
+
+size_t
+ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
+ char *lastc)
+{
+ struct ttyinq_block *tib = ti->ti_firstblock;
+ unsigned int boff = ti->ti_begin;
+ unsigned int bend = MIN(MIN(TTYINQ_DATASIZE, ti->ti_linestart),
+ ti->ti_begin + maxlen);
+
+ MPASS(maxlen > 0);
+
+ if (tib == NULL)
+ return (0);
+
+ while (boff < bend) {
+ if (strchr(breakc, tib->tib_data[boff]) && !GETBIT(tib, boff)) {
+ *lastc = tib->tib_data[boff];
+ return (boff - ti->ti_begin + 1);
+ }
+ boff++;
+ }
+
+ /* Not found - just process the entire block. */
+ return (bend - ti->ti_begin);
+}
+
+void
+ttyinq_flush(struct ttyinq *ti)
+{
+ struct ttyinq_block *tib;
+
+ ti->ti_begin = 0;
+ ti->ti_linestart = 0;
+ ti->ti_reprint = 0;
+ ti->ti_end = 0;
+
+ /* Zero all data in the input queue to get rid of passwords. */
+ if (ttyinq_flush_secure) {
+ for (tib = ti->ti_firstblock; tib != NULL; tib = tib->tib_next)
+ bzero(&tib->tib_data, sizeof tib->tib_data);
+ }
+}
+
+int
+ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote)
+{
+ unsigned int boff;
+ struct ttyinq_block *tib = ti->ti_lastblock;
+
+ if (ti->ti_linestart == ti->ti_end)
+ return (-1);
+
+ MPASS(ti->ti_end > 0);
+ boff = (ti->ti_end - 1) % TTYINQ_DATASIZE;
+
+ *c = tib->tib_data[boff];
+ *quote = GETBIT(tib, boff);
+
+ return (0);
+}
+
+void
+ttyinq_unputchar(struct ttyinq *ti)
+{
+
+ MPASS(ti->ti_linestart < ti->ti_end);
+
+ if (--ti->ti_end % TTYINQ_DATASIZE == 0) {
+ /* Roll back to the previous block. */
+ ti->ti_lastblock = ti->ti_lastblock->tib_prev;
+ /*
+ * This can only fail if we are unputchar()'ing the
+ * first character in the queue.
+ */
+ MPASS((ti->ti_lastblock == NULL) == (ti->ti_end == 0));
+ }
+}
+
+void
+ttyinq_reprintpos_set(struct ttyinq *ti)
+{
+
+ ti->ti_reprint = ti->ti_end;
+ ti->ti_reprintblock = ti->ti_lastblock;
+}
+
+void
+ttyinq_reprintpos_reset(struct ttyinq *ti)
+{
+
+ ti->ti_reprint = ti->ti_linestart;
+ ti->ti_reprintblock = ti->ti_startblock;
+}
+
+static void
+ttyinq_line_iterate(struct ttyinq *ti,
+ ttyinq_line_iterator_t *iterator, void *data,
+ unsigned int offset, struct ttyinq_block *tib)
+{
+ unsigned int boff;
+
+ /* Use the proper block when we're at the queue head. */
+ if (offset == 0)
+ tib = ti->ti_firstblock;
+
+ /* Iterate all characters and call the iterator function. */
+ for (; offset < ti->ti_end; offset++) {
+ boff = offset % TTYINQ_DATASIZE;
+ MPASS(tib != NULL);
+
+ /* Call back the iterator function. */
+ iterator(data, tib->tib_data[boff], GETBIT(tib, boff));
+
+ /* Last byte iterated - go to the next block. */
+ if (boff == TTYINQ_DATASIZE - 1)
+ tib = tib->tib_next;
+ MPASS(tib != NULL);
+ }
+}
+
+void
+ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
+ ttyinq_line_iterator_t *iterator, void *data)
+{
+
+ ttyinq_line_iterate(ti, iterator, data,
+ ti->ti_linestart, ti->ti_startblock);
+}
+
+void
+ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
+ ttyinq_line_iterator_t *iterator, void *data)
+{
+
+ ttyinq_line_iterate(ti, iterator, data,
+ ti->ti_reprint, ti->ti_reprintblock);
+}
+
+static void
+ttyinq_startup(void *dummy)
+{
+
+ ttyinq_zone = uma_zcreate("ttyinq", sizeof(struct ttyinq_block),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+}
+
+SYSINIT(ttyinq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyinq_startup, NULL);
diff --git a/freebsd/sys/kern/tty_outq.c b/freebsd/sys/kern/tty_outq.c
new file mode 100644
index 00000000..b6bc96d5
--- /dev/null
+++ b/freebsd/sys/kern/tty_outq.c
@@ -0,0 +1,347 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Portions of this software were developed under sponsorship from Snow
+ * B.V., the Netherlands.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/kernel.h>
+#include <rtems/bsd/sys/lock.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/tty.h>
+#include <sys/uio.h>
+
+#include <vm/uma.h>
+
+/*
+ * TTY output queue buffering.
+ *
+ * The previous design of the TTY layer offered the so-called clists.
+ * These clists were used for both the input queues and the output
+ * queue. We don't use certain features on the output side, like quoting
+ * bits for parity marking and such. This mechanism is similar to the
+ * old clists, but only contains the features we need to buffer the
+ * output.
+ */
+
+struct ttyoutq_block {
+ struct ttyoutq_block *tob_next;
+ char tob_data[TTYOUTQ_DATASIZE];
+};
+
+static uma_zone_t ttyoutq_zone;
+
+#define TTYOUTQ_INSERT_TAIL(to, tob) do { \
+ if (to->to_end == 0) { \
+ tob->tob_next = to->to_firstblock; \
+ to->to_firstblock = tob; \
+ } else { \
+ tob->tob_next = to->to_lastblock->tob_next; \
+ to->to_lastblock->tob_next = tob; \
+ } \
+ to->to_nblocks++; \
+} while (0)
+
+#define TTYOUTQ_REMOVE_HEAD(to) do { \
+ to->to_firstblock = to->to_firstblock->tob_next; \
+ to->to_nblocks--; \
+} while (0)
+
+#define TTYOUTQ_RECYCLE(to, tob) do { \
+ if (to->to_quota <= to->to_nblocks) \
+ uma_zfree(ttyoutq_zone, tob); \
+ else \
+ TTYOUTQ_INSERT_TAIL(to, tob); \
+} while(0)
+
+void
+ttyoutq_flush(struct ttyoutq *to)
+{
+
+ to->to_begin = 0;
+ to->to_end = 0;
+}
+
+int
+ttyoutq_setsize(struct ttyoutq *to, struct tty *tp, size_t size)
+{
+ struct ttyoutq_block *tob;
+
+ to->to_quota = howmany(size, TTYOUTQ_DATASIZE);
+
+ while (to->to_quota > to->to_nblocks) {
+ /*
+ * List is getting bigger.
+ * Add new blocks to the tail of the list.
+ *
+ * We must unlock the TTY temporarily, because we need
+ * to allocate memory. This won't be a problem, because
+ * in the worst case, another thread ends up here, which
+ * may cause us to allocate too many blocks, but this
+ * will be caught by the loop below.
+ */
+ tty_unlock(tp);
+ tob = uma_zalloc(ttyoutq_zone, M_WAITOK);
+ tty_lock(tp);
+
+ if (tty_gone(tp)) {
+ uma_zfree(ttyoutq_zone, tob);
+ return (ENXIO);
+ }
+
+ TTYOUTQ_INSERT_TAIL(to, tob);
+ }
+ return (0);
+}
+
+void
+ttyoutq_free(struct ttyoutq *to)
+{
+ struct ttyoutq_block *tob;
+
+ ttyoutq_flush(to);
+ to->to_quota = 0;
+
+ while ((tob = to->to_firstblock) != NULL) {
+ TTYOUTQ_REMOVE_HEAD(to);
+ uma_zfree(ttyoutq_zone, tob);
+ }
+
+ MPASS(to->to_nblocks == 0);
+}
+
+size_t
+ttyoutq_read(struct ttyoutq *to, void *buf, size_t len)
+{
+ char *cbuf = buf;
+
+ while (len > 0) {
+ struct ttyoutq_block *tob;
+ size_t cbegin, cend, clen;
+
+ /* See if there still is data. */
+ if (to->to_begin == to->to_end)
+ break;
+ tob = to->to_firstblock;
+ if (tob == NULL)
+ break;
+
+ /*
+ * The end address should be the lowest of these three:
+ * - The write pointer
+ * - The blocksize - we can't read beyond the block
+ * - The end address if we could perform the full read
+ */
+ cbegin = to->to_begin;
+ cend = MIN(MIN(to->to_end, to->to_begin + len),
+ TTYOUTQ_DATASIZE);
+ clen = cend - cbegin;
+
+ /* Copy the data out of the buffers. */
+ memcpy(cbuf, tob->tob_data + cbegin, clen);
+ cbuf += clen;
+ len -= clen;
+
+ if (cend == to->to_end) {
+ /* Read the complete queue. */
+ to->to_begin = 0;
+ to->to_end = 0;
+ } else if (cend == TTYOUTQ_DATASIZE) {
+ /* Read the block until the end. */
+ TTYOUTQ_REMOVE_HEAD(to);
+ to->to_begin = 0;
+ to->to_end -= TTYOUTQ_DATASIZE;
+ TTYOUTQ_RECYCLE(to, tob);
+ } else {
+ /* Read the block partially. */
+ to->to_begin += clen;
+ }
+ }
+
+ return (cbuf - (char *)buf);
+}
+
+/*
+ * An optimized version of ttyoutq_read() which can be used in pseudo
+ * TTY drivers to directly copy data from the outq to userspace, instead
+ * of buffering it.
+ *
+ * We can only copy data directly if we need to read the entire block
+ * back to the user, because we temporarily remove the block from the
+ * queue. Otherwise we need to copy it to a temporary buffer first, to
+ * make sure data remains in the correct order.
+ */
+int
+ttyoutq_read_uio(struct ttyoutq *to, struct tty *tp, struct uio *uio)
+{
+
+ while (uio->uio_resid > 0) {
+ int error;
+ struct ttyoutq_block *tob;
+ size_t cbegin, cend, clen;
+
+ /* See if there still is data. */
+ if (to->to_begin == to->to_end)
+ return (0);
+ tob = to->to_firstblock;
+ if (tob == NULL)
+ return (0);
+
+ /*
+ * The end address should be the lowest of these three:
+ * - The write pointer
+ * - The blocksize - we can't read beyond the block
+ * - The end address if we could perform the full read
+ */
+ cbegin = to->to_begin;
+ cend = MIN(MIN(to->to_end, to->to_begin + uio->uio_resid),
+ TTYOUTQ_DATASIZE);
+ clen = cend - cbegin;
+
+ /*
+ * We can prevent buffering in some cases:
+ * - We need to read the block until the end.
+ * - We don't need to read the block until the end, but
+ * there is no data beyond it, which allows us to move
+ * the write pointer to a new block.
+ */
+ if (cend == TTYOUTQ_DATASIZE || cend == to->to_end) {
+ /*
+ * Fast path: zero copy. Remove the first block,
+ * so we can unlock the TTY temporarily.
+ */
+ TTYOUTQ_REMOVE_HEAD(to);
+ to->to_begin = 0;
+ if (to->to_end <= TTYOUTQ_DATASIZE)
+ to->to_end = 0;
+ else
+ to->to_end -= TTYOUTQ_DATASIZE;
+
+ /* Temporary unlock and copy the data to userspace. */
+ tty_unlock(tp);
+ error = uiomove(tob->tob_data + cbegin, clen, uio);
+ tty_lock(tp);
+
+ /* Block can now be readded to the list. */
+ TTYOUTQ_RECYCLE(to, tob);
+ } else {
+ char ob[TTYOUTQ_DATASIZE - 1];
+
+ /*
+ * Slow path: store data in a temporary buffer.
+ */
+ memcpy(ob, tob->tob_data + cbegin, clen);
+ to->to_begin += clen;
+ MPASS(to->to_begin < TTYOUTQ_DATASIZE);
+
+ /* Temporary unlock and copy the data to userspace. */
+ tty_unlock(tp);
+ error = uiomove(ob, clen, uio);
+ tty_lock(tp);
+ }
+
+ if (error != 0)
+ return (error);
+ }
+
+ return (0);
+}
+
+size_t
+ttyoutq_write(struct ttyoutq *to, const void *buf, size_t nbytes)
+{
+ const char *cbuf = buf;
+ struct ttyoutq_block *tob;
+ unsigned int boff;
+ size_t l;
+
+ while (nbytes > 0) {
+ boff = to->to_end % TTYOUTQ_DATASIZE;
+
+ if (to->to_end == 0) {
+ /* First time we're being used or drained. */
+ MPASS(to->to_begin == 0);
+ tob = to->to_firstblock;
+ if (tob == NULL) {
+ /* Queue has no blocks. */
+ break;
+ }
+ to->to_lastblock = tob;
+ } else if (boff == 0) {
+ /* We reached the end of this block on last write. */
+ tob = to->to_lastblock->tob_next;
+ if (tob == NULL) {
+ /* We've reached the watermark. */
+ break;
+ }
+ to->to_lastblock = tob;
+ } else {
+ tob = to->to_lastblock;
+ }
+
+ /* Don't copy more than was requested. */
+ l = MIN(nbytes, TTYOUTQ_DATASIZE - boff);
+ MPASS(l > 0);
+ memcpy(tob->tob_data + boff, cbuf, l);
+
+ cbuf += l;
+ nbytes -= l;
+ to->to_end += l;
+ }
+
+ return (cbuf - (const char *)buf);
+}
+
+int
+ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes)
+{
+ size_t ret;
+
+ if (ttyoutq_bytesleft(to) < nbytes)
+ return (-1);
+
+ /* We should always be able to write it back. */
+ ret = ttyoutq_write(to, buf, nbytes);
+ MPASS(ret == nbytes);
+
+ return (0);
+}
+
+static void
+ttyoutq_startup(void *dummy)
+{
+
+ ttyoutq_zone = uma_zcreate("ttyoutq", sizeof(struct ttyoutq_block),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+}
+
+SYSINIT(ttyoutq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyoutq_startup, NULL);
diff --git a/freebsd/sys/kern/tty_ttydisc.c b/freebsd/sys/kern/tty_ttydisc.c
new file mode 100644
index 00000000..edb1d155
--- /dev/null
+++ b/freebsd/sys/kern/tty_ttydisc.c
@@ -0,0 +1,1267 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Portions of this software were developed under sponsorship from Snow
+ * B.V., the Netherlands.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/kernel.h>
+#include <sys/signal.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/tty.h>
+#include <sys/ttycom.h>
+#include <sys/ttydefaults.h>
+#include <sys/uio.h>
+#include <sys/vnode.h>
+
+/*
+ * Standard TTYDISC `termios' line discipline.
+ */
+
+/* Statistics. */
+static unsigned long tty_nin = 0;
+SYSCTL_ULONG(_kern, OID_AUTO, tty_nin, CTLFLAG_RD,
+ &tty_nin, 0, "Total amount of bytes received");
+static unsigned long tty_nout = 0;
+SYSCTL_ULONG(_kern, OID_AUTO, tty_nout, CTLFLAG_RD,
+ &tty_nout, 0, "Total amount of bytes transmitted");
+
+/* termios comparison macro's. */
+#define CMP_CC(v,c) (tp->t_termios.c_cc[v] != _POSIX_VDISABLE && \
+ tp->t_termios.c_cc[v] == (c))
+#define CMP_FLAG(field,opt) (tp->t_termios.c_ ## field ## flag & (opt))
+
+/* Characters that cannot be modified through c_cc. */
+#define CTAB '\t'
+#define CNL '\n'
+#define CCR '\r'
+
+/* Character is a control character. */
+#define CTL_VALID(c) ((c) == 0x7f || (unsigned char)(c) < 0x20)
+/* Control character should be processed on echo. */
+#define CTL_ECHO(c,q) (!(q) && ((c) == CERASE2 || (c) == CTAB || \
+ (c) == CNL || (c) == CCR))
+/* Control character should be printed using ^X notation. */
+#define CTL_PRINT(c,q) ((c) == 0x7f || ((unsigned char)(c) < 0x20 && \
+ ((q) || ((c) != CTAB && (c) != CNL))))
+/* Character is whitespace. */
+#define CTL_WHITE(c) ((c) == ' ' || (c) == CTAB)
+/* Character is alphanumeric. */
+#define CTL_ALNUM(c) (((c) >= '0' && (c) <= '9') || \
+ ((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
+
+#define TTY_STACKBUF 256
+
+void
+ttydisc_open(struct tty *tp)
+{
+ ttydisc_optimize(tp);
+}
+
+void
+ttydisc_close(struct tty *tp)
+{
+
+ /* Clean up our flags when leaving the discipline. */
+ tp->t_flags &= ~(TF_STOPPED|TF_HIWAT|TF_ZOMBIE);
+
+ /*
+ * POSIX states that we must drain output and flush input on
+ * last close. Draining has already been done if possible.
+ */
+ tty_flush(tp, FREAD | FWRITE);
+
+ if (ttyhook_hashook(tp, close))
+ ttyhook_close(tp);
+}
+
+static int
+ttydisc_read_canonical(struct tty *tp, struct uio *uio, int ioflag)
+{
+ char breakc[4] = { CNL }; /* enough to hold \n, VEOF and VEOL. */
+ int error;
+ size_t clen, flen = 0, n = 1;
+ unsigned char lastc = _POSIX_VDISABLE;
+
+#define BREAK_ADD(c) do { \
+ if (tp->t_termios.c_cc[c] != _POSIX_VDISABLE) \
+ breakc[n++] = tp->t_termios.c_cc[c]; \
+} while (0)
+ /* Determine which characters we should trigger on. */
+ BREAK_ADD(VEOF);
+ BREAK_ADD(VEOL);
+#undef BREAK_ADD
+ breakc[n] = '\0';
+
+ do {
+ error = tty_wait_background(tp, curthread, SIGTTIN);
+ if (error)
+ return (error);
+
+ /*
+ * Quite a tricky case: unlike the old TTY
+ * implementation, this implementation copies data back
+ * to userspace in large chunks. Unfortunately, we can't
+ * calculate the line length on beforehand if it crosses
+ * ttyinq_block boundaries, because multiple reads could
+ * then make this code read beyond the newline.
+ *
+ * This is why we limit the read to:
+ * - The size the user has requested
+ * - The blocksize (done in tty_inq.c)
+ * - The amount of bytes until the newline
+ *
+ * This causes the line length to be recalculated after
+ * each block has been copied to userspace. This will
+ * cause the TTY layer to return data in chunks using
+ * the blocksize (except the first and last blocks).
+ */
+ clen = ttyinq_findchar(&tp->t_inq, breakc, uio->uio_resid,
+ &lastc);
+
+ /* No more data. */
+ if (clen == 0) {
+ if (tp->t_flags & TF_ZOMBIE)
+ return (0);
+ else if (ioflag & IO_NDELAY)
+ return (EWOULDBLOCK);
+
+ error = tty_wait(tp, &tp->t_inwait);
+ if (error)
+ return (error);
+ continue;
+ }
+
+ /* Don't send the EOF char back to userspace. */
+ if (CMP_CC(VEOF, lastc))
+ flen = 1;
+
+ MPASS(flen <= clen);
+
+ /* Read and throw away the EOF character. */
+ error = ttyinq_read_uio(&tp->t_inq, tp, uio, clen, flen);
+ if (error)
+ return (error);
+
+ } while (uio->uio_resid > 0 && lastc == _POSIX_VDISABLE);
+
+ return (0);
+}
+
+static int
+ttydisc_read_raw_no_timer(struct tty *tp, struct uio *uio, int ioflag)
+{
+ size_t vmin = tp->t_termios.c_cc[VMIN];
+ ssize_t oresid = uio->uio_resid;
+ int error;
+
+ MPASS(tp->t_termios.c_cc[VTIME] == 0);
+
+ /*
+ * This routine implements the easy cases of read()s while in
+ * non-canonical mode, namely case B and D, where we don't have
+ * any timers at all.
+ */
+
+ for (;;) {
+ error = tty_wait_background(tp, curthread, SIGTTIN);
+ if (error)
+ return (error);
+
+ error = ttyinq_read_uio(&tp->t_inq, tp, uio,
+ uio->uio_resid, 0);
+ if (error)
+ return (error);
+ if (uio->uio_resid == 0 || (oresid - uio->uio_resid) >= vmin)
+ return (0);
+
+ /* We have to wait for more. */
+ if (tp->t_flags & TF_ZOMBIE)
+ return (0);
+ else if (ioflag & IO_NDELAY)
+ return (EWOULDBLOCK);
+
+ error = tty_wait(tp, &tp->t_inwait);
+ if (error)
+ return (error);
+ }
+}
+
+static int
+ttydisc_read_raw_read_timer(struct tty *tp, struct uio *uio, int ioflag,
+ int oresid)
+{
+ size_t vmin = MAX(tp->t_termios.c_cc[VMIN], 1);
+ unsigned int vtime = tp->t_termios.c_cc[VTIME];
+ struct timeval end, now, left;
+ int error, hz;
+
+ MPASS(tp->t_termios.c_cc[VTIME] != 0);
+
+ /* Determine when the read should be expired. */
+ end.tv_sec = vtime / 10;
+ end.tv_usec = (vtime % 10) * 100000;
+ getmicrotime(&now);
+ timevaladd(&end, &now);
+
+ for (;;) {
+ error = tty_wait_background(tp, curthread, SIGTTIN);
+ if (error)
+ return (error);
+
+ error = ttyinq_read_uio(&tp->t_inq, tp, uio,
+ uio->uio_resid, 0);
+ if (error)
+ return (error);
+ if (uio->uio_resid == 0 || (oresid - uio->uio_resid) >= vmin)
+ return (0);
+
+ /* Calculate how long we should wait. */
+ getmicrotime(&now);
+ if (timevalcmp(&now, &end, >))
+ return (0);
+ left = end;
+ timevalsub(&left, &now);
+ hz = tvtohz(&left);
+
+ /*
+ * We have to wait for more. If the timer expires, we
+ * should return a 0-byte read.
+ */
+ if (tp->t_flags & TF_ZOMBIE)
+ return (0);
+ else if (ioflag & IO_NDELAY)
+ return (EWOULDBLOCK);
+
+ error = tty_timedwait(tp, &tp->t_inwait, hz);
+ if (error)
+ return (error == EWOULDBLOCK ? 0 : error);
+ }
+
+ return (0);
+}
+
+static int
+ttydisc_read_raw_interbyte_timer(struct tty *tp, struct uio *uio, int ioflag)
+{
+ size_t vmin = tp->t_termios.c_cc[VMIN];
+ ssize_t oresid = uio->uio_resid;
+ int error;
+
+ MPASS(tp->t_termios.c_cc[VMIN] != 0);
+ MPASS(tp->t_termios.c_cc[VTIME] != 0);
+
+ /*
+ * When using the interbyte timer, the timer should be started
+ * after the first byte has been received. We just call into the
+ * generic read timer code after we've received the first byte.
+ */
+
+ for (;;) {
+ error = tty_wait_background(tp, curthread, SIGTTIN);
+ if (error)
+ return (error);
+
+ error = ttyinq_read_uio(&tp->t_inq, tp, uio,
+ uio->uio_resid, 0);
+ if (error)
+ return (error);
+ if (uio->uio_resid == 0 || (oresid - uio->uio_resid) >= vmin)
+ return (0);
+
+ /*
+ * Not enough data, but we did receive some, which means
+ * we'll now start using the interbyte timer.
+ */
+ if (oresid != uio->uio_resid)
+ break;
+
+ /* We have to wait for more. */
+ if (tp->t_flags & TF_ZOMBIE)
+ return (0);
+ else if (ioflag & IO_NDELAY)
+ return (EWOULDBLOCK);
+
+ error = tty_wait(tp, &tp->t_inwait);
+ if (error)
+ return (error);
+ }
+
+ return ttydisc_read_raw_read_timer(tp, uio, ioflag, oresid);
+}
+
+int
+ttydisc_read(struct tty *tp, struct uio *uio, int ioflag)
+{
+ int error;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (uio->uio_resid == 0)
+ return (0);
+
+ if (CMP_FLAG(l, ICANON))
+ error = ttydisc_read_canonical(tp, uio, ioflag);
+ else if (tp->t_termios.c_cc[VTIME] == 0)
+ error = ttydisc_read_raw_no_timer(tp, uio, ioflag);
+ else if (tp->t_termios.c_cc[VMIN] == 0)
+ error = ttydisc_read_raw_read_timer(tp, uio, ioflag,
+ uio->uio_resid);
+ else
+ error = ttydisc_read_raw_interbyte_timer(tp, uio, ioflag);
+
+ if (ttyinq_bytesleft(&tp->t_inq) >= tp->t_inlow ||
+ ttyinq_bytescanonicalized(&tp->t_inq) == 0) {
+ /* Unset the input watermark when we've got enough space. */
+ tty_hiwat_in_unblock(tp);
+ }
+
+ return (error);
+}
+
+static __inline unsigned int
+ttydisc_findchar(const char *obstart, unsigned int oblen)
+{
+ const char *c = obstart;
+
+ while (oblen--) {
+ if (CTL_VALID(*c))
+ break;
+ c++;
+ }
+
+ return (c - obstart);
+}
+
+static int
+ttydisc_write_oproc(struct tty *tp, char c)
+{
+ unsigned int scnt, error;
+
+ MPASS(CMP_FLAG(o, OPOST));
+ MPASS(CTL_VALID(c));
+
+#define PRINT_NORMAL() ttyoutq_write_nofrag(&tp->t_outq, &c, 1)
+ switch (c) {
+ case CEOF:
+ /* End-of-text dropping. */
+ if (CMP_FLAG(o, ONOEOT))
+ return (0);
+ return PRINT_NORMAL();
+
+ case CERASE2:
+ /* Handle backspace to fix tab expansion. */
+ if (PRINT_NORMAL() != 0)
+ return (-1);
+ if (tp->t_column > 0)
+ tp->t_column--;
+ return (0);
+
+ case CTAB:
+ /* Tab expansion. */
+ scnt = 8 - (tp->t_column & 7);
+ if (CMP_FLAG(o, TAB3)) {
+ error = ttyoutq_write_nofrag(&tp->t_outq,
+ " ", scnt);
+ } else {
+ error = PRINT_NORMAL();
+ }
+ if (error)
+ return (-1);
+
+ tp->t_column += scnt;
+ MPASS((tp->t_column % 8) == 0);
+ return (0);
+
+ case CNL:
+ /* Newline conversion. */
+ if (CMP_FLAG(o, ONLCR)) {
+ /* Convert \n to \r\n. */
+ error = ttyoutq_write_nofrag(&tp->t_outq, "\r\n", 2);
+ } else {
+ error = PRINT_NORMAL();
+ }
+ if (error)
+ return (-1);
+
+ if (CMP_FLAG(o, ONLCR|ONLRET)) {
+ tp->t_column = tp->t_writepos = 0;
+ ttyinq_reprintpos_set(&tp->t_inq);
+ }
+ return (0);
+
+ case CCR:
+ /* Carriage return to newline conversion. */
+ if (CMP_FLAG(o, OCRNL))
+ c = CNL;
+ /* Omit carriage returns on column 0. */
+ if (CMP_FLAG(o, ONOCR) && tp->t_column == 0)
+ return (0);
+ if (PRINT_NORMAL() != 0)
+ return (-1);
+
+ tp->t_column = tp->t_writepos = 0;
+ ttyinq_reprintpos_set(&tp->t_inq);
+ return (0);
+ }
+
+ /*
+ * Invisible control character. Print it, but don't
+ * increase the column count.
+ */
+ return PRINT_NORMAL();
+#undef PRINT_NORMAL
+}
+
+/*
+ * Just like the old TTY implementation, we need to copy data in chunks
+ * into a temporary buffer. One of the reasons why we need to do this,
+ * is because output processing (only TAB3 though) may allow the buffer
+ * to grow eight times.
+ */
+int
+ttydisc_write(struct tty *tp, struct uio *uio, int ioflag)
+{
+ char ob[TTY_STACKBUF];
+ char *obstart;
+ int error = 0;
+ unsigned int oblen = 0;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tp->t_flags & TF_ZOMBIE)
+ return (EIO);
+
+ /*
+ * We don't need to check whether the process is the foreground
+ * process group or if we have a carrier. This is already done
+ * in ttydev_write().
+ */
+
+ while (uio->uio_resid > 0) {
+ unsigned int nlen;
+
+ MPASS(oblen == 0);
+
+ /* Step 1: read data. */
+ obstart = ob;
+ nlen = MIN(uio->uio_resid, sizeof ob);
+ tty_unlock(tp);
+ error = uiomove(ob, nlen, uio);
+ tty_lock(tp);
+ if (error != 0)
+ break;
+ oblen = nlen;
+
+ if (tty_gone(tp)) {
+ error = ENXIO;
+ break;
+ }
+
+ MPASS(oblen > 0);
+
+ /* Step 2: process data. */
+ do {
+ unsigned int plen, wlen;
+
+ /* Search for special characters for post processing. */
+ if (CMP_FLAG(o, OPOST)) {
+ plen = ttydisc_findchar(obstart, oblen);
+ } else {
+ plen = oblen;
+ }
+
+ if (plen == 0) {
+ /*
+ * We're going to process a character
+ * that needs processing
+ */
+ if (ttydisc_write_oproc(tp, *obstart) == 0) {
+ obstart++;
+ oblen--;
+
+ tp->t_writepos = tp->t_column;
+ ttyinq_reprintpos_set(&tp->t_inq);
+ continue;
+ }
+ } else {
+ /* We're going to write regular data. */
+ wlen = ttyoutq_write(&tp->t_outq, obstart, plen);
+ obstart += wlen;
+ oblen -= wlen;
+ tp->t_column += wlen;
+
+ tp->t_writepos = tp->t_column;
+ ttyinq_reprintpos_set(&tp->t_inq);
+
+ if (wlen == plen)
+ continue;
+ }
+
+ /* Watermark reached. Try to sleep. */
+ tp->t_flags |= TF_HIWAT_OUT;
+
+ if (ioflag & IO_NDELAY) {
+ error = EWOULDBLOCK;
+ goto done;
+ }
+
+ /*
+ * The driver may write back the data
+ * synchronously. Be sure to check the high
+ * water mark before going to sleep.
+ */
+ ttydevsw_outwakeup(tp);
+ if ((tp->t_flags & TF_HIWAT_OUT) == 0)
+ continue;
+
+ error = tty_wait(tp, &tp->t_outwait);
+ if (error)
+ goto done;
+
+ if (tp->t_flags & TF_ZOMBIE) {
+ error = EIO;
+ goto done;
+ }
+ } while (oblen > 0);
+ }
+
+done:
+ if (!tty_gone(tp))
+ ttydevsw_outwakeup(tp);
+
+ /*
+ * Add the amount of bytes that we didn't process back to the
+ * uio counters. We need to do this to make sure write() doesn't
+ * count the bytes we didn't store in the queue.
+ */
+ uio->uio_resid += oblen;
+ return (error);
+}
+
+void
+ttydisc_optimize(struct tty *tp)
+{
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (ttyhook_hashook(tp, rint_bypass)) {
+ tp->t_flags |= TF_BYPASS;
+ } else if (ttyhook_hashook(tp, rint)) {
+ tp->t_flags &= ~TF_BYPASS;
+ } else if (!CMP_FLAG(i, ICRNL|IGNCR|IMAXBEL|INLCR|ISTRIP|IXON) &&
+ (!CMP_FLAG(i, BRKINT) || CMP_FLAG(i, IGNBRK)) &&
+ (!CMP_FLAG(i, PARMRK) ||
+ CMP_FLAG(i, IGNPAR|IGNBRK) == (IGNPAR|IGNBRK)) &&
+ !CMP_FLAG(l, ECHO|ICANON|IEXTEN|ISIG|PENDIN)) {
+ tp->t_flags |= TF_BYPASS;
+ } else {
+ tp->t_flags &= ~TF_BYPASS;
+ }
+}
+
+void
+ttydisc_modem(struct tty *tp, int open)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (open)
+ cv_broadcast(&tp->t_dcdwait);
+
+ /*
+ * Ignore modem status lines when CLOCAL is turned on, but don't
+ * enter the zombie state when the TTY isn't opened, because
+ * that would cause the TTY to be in zombie state after being
+ * opened.
+ */
+ if (!tty_opened(tp) || CMP_FLAG(c, CLOCAL))
+ return;
+
+ if (open == 0) {
+ /*
+ * Lost carrier.
+ */
+ tp->t_flags |= TF_ZOMBIE;
+
+ tty_signal_sessleader(tp, SIGHUP);
+ tty_flush(tp, FREAD|FWRITE);
+ } else {
+ /*
+ * Carrier is back again.
+ */
+
+ /* XXX: what should we do here? */
+ }
+}
+
+static int
+ttydisc_echo_force(struct tty *tp, char c, int quote)
+{
+
+ if (CMP_FLAG(o, OPOST) && CTL_ECHO(c, quote)) {
+ /*
+ * Only perform postprocessing when OPOST is turned on
+ * and the character is an unquoted BS/TB/NL/CR.
+ */
+ return ttydisc_write_oproc(tp, c);
+ } else if (CMP_FLAG(l, ECHOCTL) && CTL_PRINT(c, quote)) {
+ /*
+ * Only use ^X notation when ECHOCTL is turned on and
+ * we've got an quoted control character.
+ *
+ * Print backspaces when echoing an end-of-file.
+ */
+ char ob[4] = "^?\b\b";
+
+ /* Print ^X notation. */
+ if (c != 0x7f)
+ ob[1] = c + 'A' - 1;
+
+ if (!quote && CMP_CC(VEOF, c)) {
+ return ttyoutq_write_nofrag(&tp->t_outq, ob, 4);
+ } else {
+ tp->t_column += 2;
+ return ttyoutq_write_nofrag(&tp->t_outq, ob, 2);
+ }
+ } else {
+ /* Can just be printed. */
+ tp->t_column++;
+ return ttyoutq_write_nofrag(&tp->t_outq, &c, 1);
+ }
+}
+
+static int
+ttydisc_echo(struct tty *tp, char c, int quote)
+{
+
+ /*
+ * Only echo characters when ECHO is turned on, or ECHONL when
+ * the character is an unquoted newline.
+ */
+ if (!CMP_FLAG(l, ECHO) &&
+ (!CMP_FLAG(l, ECHONL) || c != CNL || quote))
+ return (0);
+
+ return ttydisc_echo_force(tp, c, quote);
+}
+
+static void
+ttydisc_reprint_char(void *d, char c, int quote)
+{
+ struct tty *tp = d;
+
+ ttydisc_echo(tp, c, quote);
+}
+
+static void
+ttydisc_reprint(struct tty *tp)
+{
+ cc_t c;
+
+ /* Print ^R\n, followed by the line. */
+ c = tp->t_termios.c_cc[VREPRINT];
+ if (c != _POSIX_VDISABLE)
+ ttydisc_echo(tp, c, 0);
+ ttydisc_echo(tp, CNL, 0);
+ ttyinq_reprintpos_reset(&tp->t_inq);
+
+ ttyinq_line_iterate_from_linestart(&tp->t_inq, ttydisc_reprint_char, tp);
+}
+
+struct ttydisc_recalc_length {
+ struct tty *tp;
+ unsigned int curlen;
+};
+
+static void
+ttydisc_recalc_charlength(void *d, char c, int quote)
+{
+ struct ttydisc_recalc_length *data = d;
+ struct tty *tp = data->tp;
+
+ if (CTL_PRINT(c, quote)) {
+ if (CMP_FLAG(l, ECHOCTL))
+ data->curlen += 2;
+ } else if (c == CTAB) {
+ data->curlen += 8 - (data->curlen & 7);
+ } else {
+ data->curlen++;
+ }
+}
+
+static unsigned int
+ttydisc_recalc_linelength(struct tty *tp)
+{
+ struct ttydisc_recalc_length data = { tp, tp->t_writepos };
+
+ ttyinq_line_iterate_from_reprintpos(&tp->t_inq,
+ ttydisc_recalc_charlength, &data);
+ return (data.curlen);
+}
+
+static int
+ttydisc_rubchar(struct tty *tp)
+{
+ char c;
+ int quote;
+ unsigned int prevpos, tablen;
+
+ if (ttyinq_peekchar(&tp->t_inq, &c, &quote) != 0)
+ return (-1);
+ ttyinq_unputchar(&tp->t_inq);
+
+ if (CMP_FLAG(l, ECHO)) {
+ /*
+ * Remove the character from the screen. This is even
+ * safe for characters that span multiple characters
+ * (tabs, quoted, etc).
+ */
+ if (tp->t_writepos >= tp->t_column) {
+ /* Retype the sentence. */
+ ttydisc_reprint(tp);
+ } else if (CMP_FLAG(l, ECHOE)) {
+ if (CTL_PRINT(c, quote)) {
+ /* Remove ^X formatted chars. */
+ if (CMP_FLAG(l, ECHOCTL)) {
+ tp->t_column -= 2;
+ ttyoutq_write_nofrag(&tp->t_outq,
+ "\b\b \b\b", 6);
+ }
+ } else if (c == ' ') {
+ /* Space character needs no rubbing. */
+ tp->t_column -= 1;
+ ttyoutq_write_nofrag(&tp->t_outq, "\b", 1);
+ } else if (c == CTAB) {
+ /*
+ * Making backspace work with tabs is
+ * quite hard. Recalculate the length of
+ * this character and remove it.
+ *
+ * Because terminal settings could be
+ * changed while the line is being
+ * inserted, the calculations don't have
+ * to be correct. Make sure we keep the
+ * tab length within proper bounds.
+ */
+ prevpos = ttydisc_recalc_linelength(tp);
+ if (prevpos >= tp->t_column)
+ tablen = 1;
+ else
+ tablen = tp->t_column - prevpos;
+ if (tablen > 8)
+ tablen = 8;
+
+ tp->t_column = prevpos;
+ ttyoutq_write_nofrag(&tp->t_outq,
+ "\b\b\b\b\b\b\b\b", tablen);
+ return (0);
+ } else {
+ /*
+ * Remove a regular character by
+ * punching a space over it.
+ */
+ tp->t_column -= 1;
+ ttyoutq_write_nofrag(&tp->t_outq, "\b \b", 3);
+ }
+ } else {
+ /* Don't print spaces. */
+ ttydisc_echo(tp, tp->t_termios.c_cc[VERASE], 0);
+ }
+ }
+
+ return (0);
+}
+
+static void
+ttydisc_rubword(struct tty *tp)
+{
+ char c;
+ int quote, alnum;
+
+ /* Strip whitespace first. */
+ for (;;) {
+ if (ttyinq_peekchar(&tp->t_inq, &c, &quote) != 0)
+ return;
+ if (!CTL_WHITE(c))
+ break;
+ ttydisc_rubchar(tp);
+ }
+
+ /*
+ * Record whether the last character from the previous iteration
+ * was alphanumeric or not. We need this to implement ALTWERASE.
+ */
+ alnum = CTL_ALNUM(c);
+ for (;;) {
+ ttydisc_rubchar(tp);
+
+ if (ttyinq_peekchar(&tp->t_inq, &c, &quote) != 0)
+ return;
+ if (CTL_WHITE(c))
+ return;
+ if (CMP_FLAG(l, ALTWERASE) && CTL_ALNUM(c) != alnum)
+ return;
+ }
+}
+
+int
+ttydisc_rint(struct tty *tp, char c, int flags)
+{
+ int signal, quote = 0;
+ char ob[3] = { 0xff, 0x00 };
+ size_t ol;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ atomic_add_long(&tty_nin, 1);
+
+ if (ttyhook_hashook(tp, rint))
+ return ttyhook_rint(tp, c, flags);
+
+ if (tp->t_flags & TF_BYPASS)
+ goto processed;
+
+ if (flags) {
+ if (flags & TRE_BREAK) {
+ if (CMP_FLAG(i, IGNBRK)) {
+ /* Ignore break characters. */
+ return (0);
+ } else if (CMP_FLAG(i, BRKINT)) {
+ /* Generate SIGINT on break. */
+ tty_flush(tp, FREAD|FWRITE);
+ tty_signal_pgrp(tp, SIGINT);
+ return (0);
+ } else {
+ /* Just print it. */
+ goto parmrk;
+ }
+ } else if (flags & TRE_FRAMING ||
+ (flags & TRE_PARITY && CMP_FLAG(i, INPCK))) {
+ if (CMP_FLAG(i, IGNPAR)) {
+ /* Ignore bad characters. */
+ return (0);
+ } else {
+ /* Just print it. */
+ goto parmrk;
+ }
+ }
+ }
+
+ /* Allow any character to perform a wakeup. */
+ if (CMP_FLAG(i, IXANY))
+ tp->t_flags &= ~TF_STOPPED;
+
+ /* Remove the top bit. */
+ if (CMP_FLAG(i, ISTRIP))
+ c &= ~0x80;
+
+ /* Skip input processing when we want to print it literally. */
+ if (tp->t_flags & TF_LITERAL) {
+ tp->t_flags &= ~TF_LITERAL;
+ quote = 1;
+ goto processed;
+ }
+
+ /* Special control characters that are implementation dependent. */
+ if (CMP_FLAG(l, IEXTEN)) {
+ /* Accept the next character as literal. */
+ if (CMP_CC(VLNEXT, c)) {
+ if (CMP_FLAG(l, ECHO)) {
+ if (CMP_FLAG(l, ECHOE))
+ ttyoutq_write_nofrag(&tp->t_outq, "^\b", 2);
+ else
+ ttydisc_echo(tp, c, 0);
+ }
+ tp->t_flags |= TF_LITERAL;
+ return (0);
+ }
+ }
+
+ /*
+ * Handle signal processing.
+ */
+ if (CMP_FLAG(l, ISIG)) {
+ if (CMP_FLAG(l, ICANON|IEXTEN) == (ICANON|IEXTEN)) {
+ if (CMP_CC(VSTATUS, c)) {
+ tty_signal_pgrp(tp, SIGINFO);
+ return (0);
+ }
+ }
+
+ /*
+ * When compared to the old implementation, this
+ * implementation also flushes the output queue. POSIX
+ * is really brief about this, but does makes us assume
+ * we have to do so.
+ */
+ signal = 0;
+ if (CMP_CC(VINTR, c)) {
+ signal = SIGINT;
+ } else if (CMP_CC(VQUIT, c)) {
+ signal = SIGQUIT;
+ } else if (CMP_CC(VSUSP, c)) {
+ signal = SIGTSTP;
+ }
+
+ if (signal != 0) {
+ /*
+ * Echo the character before signalling the
+ * processes.
+ */
+ if (!CMP_FLAG(l, NOFLSH))
+ tty_flush(tp, FREAD|FWRITE);
+ ttydisc_echo(tp, c, 0);
+ tty_signal_pgrp(tp, signal);
+ return (0);
+ }
+ }
+
+ /*
+ * Handle start/stop characters.
+ */
+ if (CMP_FLAG(i, IXON)) {
+ if (CMP_CC(VSTOP, c)) {
+ /* Stop it if we aren't stopped yet. */
+ if ((tp->t_flags & TF_STOPPED) == 0) {
+ tp->t_flags |= TF_STOPPED;
+ return (0);
+ }
+ /*
+ * Fallthrough:
+ * When VSTART == VSTOP, we should make this key
+ * toggle it.
+ */
+ if (!CMP_CC(VSTART, c))
+ return (0);
+ }
+ if (CMP_CC(VSTART, c)) {
+ tp->t_flags &= ~TF_STOPPED;
+ return (0);
+ }
+ }
+
+ /* Conversion of CR and NL. */
+ switch (c) {
+ case CCR:
+ if (CMP_FLAG(i, IGNCR))
+ return (0);
+ if (CMP_FLAG(i, ICRNL))
+ c = CNL;
+ break;
+ case CNL:
+ if (CMP_FLAG(i, INLCR))
+ c = CCR;
+ break;
+ }
+
+ /* Canonical line editing. */
+ if (CMP_FLAG(l, ICANON)) {
+ if (CMP_CC(VERASE, c) || CMP_CC(VERASE2, c)) {
+ ttydisc_rubchar(tp);
+ return (0);
+ } else if (CMP_CC(VKILL, c)) {
+ while (ttydisc_rubchar(tp) == 0);
+ return (0);
+ } else if (CMP_FLAG(l, IEXTEN)) {
+ if (CMP_CC(VWERASE, c)) {
+ ttydisc_rubword(tp);
+ return (0);
+ } else if (CMP_CC(VREPRINT, c)) {
+ ttydisc_reprint(tp);
+ return (0);
+ }
+ }
+ }
+
+processed:
+ if (CMP_FLAG(i, PARMRK) && (unsigned char)c == 0xff) {
+ /* Print 0xff 0xff. */
+ ob[1] = 0xff;
+ ol = 2;
+ quote = 1;
+ } else {
+ ob[0] = c;
+ ol = 1;
+ }
+
+ goto print;
+
+parmrk:
+ if (CMP_FLAG(i, PARMRK)) {
+ /* Prepend 0xff 0x00 0x.. */
+ ob[2] = c;
+ ol = 3;
+ quote = 1;
+ } else {
+ ob[0] = c;
+ ol = 1;
+ }
+
+print:
+ /* See if we can store this on the input queue. */
+ if (ttyinq_write_nofrag(&tp->t_inq, ob, ol, quote) != 0) {
+ if (CMP_FLAG(i, IMAXBEL))
+ ttyoutq_write_nofrag(&tp->t_outq, "\a", 1);
+
+ /*
+ * Prevent a deadlock here. It may be possible that a
+ * user has entered so much data, there is no data
+ * available to read(), but the buffers are full anyway.
+ *
+ * Only enter the high watermark if the device driver
+ * can actually transmit something.
+ */
+ if (ttyinq_bytescanonicalized(&tp->t_inq) == 0)
+ return (0);
+
+ tty_hiwat_in_block(tp);
+ return (-1);
+ }
+
+ /*
+ * In raw mode, we canonicalize after receiving a single
+ * character. Otherwise, we canonicalize when we receive a
+ * newline, VEOL or VEOF, but only when it isn't quoted.
+ */
+ if (!CMP_FLAG(l, ICANON) ||
+ (!quote && (c == CNL || CMP_CC(VEOL, c) || CMP_CC(VEOF, c)))) {
+ ttyinq_canonicalize(&tp->t_inq);
+ }
+
+ ttydisc_echo(tp, c, quote);
+
+ return (0);
+}
+
+size_t
+ttydisc_rint_simple(struct tty *tp, const void *buf, size_t len)
+{
+ const char *cbuf;
+
+ if (ttydisc_can_bypass(tp))
+ return (ttydisc_rint_bypass(tp, buf, len));
+
+ for (cbuf = buf; len-- > 0; cbuf++) {
+ if (ttydisc_rint(tp, *cbuf, 0) != 0)
+ break;
+ }
+
+ return (cbuf - (const char *)buf);
+}
+
+size_t
+ttydisc_rint_bypass(struct tty *tp, const void *buf, size_t len)
+{
+ size_t ret;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ MPASS(tp->t_flags & TF_BYPASS);
+
+ atomic_add_long(&tty_nin, len);
+
+ if (ttyhook_hashook(tp, rint_bypass))
+ return ttyhook_rint_bypass(tp, buf, len);
+
+ ret = ttyinq_write(&tp->t_inq, buf, len, 0);
+ ttyinq_canonicalize(&tp->t_inq);
+ if (ret < len)
+ tty_hiwat_in_block(tp);
+
+ return (ret);
+}
+
+void
+ttydisc_rint_done(struct tty *tp)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (ttyhook_hashook(tp, rint_done))
+ ttyhook_rint_done(tp);
+
+ /* Wake up readers. */
+ tty_wakeup(tp, FREAD);
+ /* Wake up driver for echo. */
+ ttydevsw_outwakeup(tp);
+}
+
+size_t
+ttydisc_rint_poll(struct tty *tp)
+{
+ size_t l;
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (ttyhook_hashook(tp, rint_poll))
+ return ttyhook_rint_poll(tp);
+
+ /*
+ * XXX: Still allow character input when there's no space in the
+ * buffers, but we haven't entered the high watermark. This is
+ * to allow backspace characters to be inserted when in
+ * canonical mode.
+ */
+ l = ttyinq_bytesleft(&tp->t_inq);
+ if (l == 0 && (tp->t_flags & TF_HIWAT_IN) == 0)
+ return (1);
+
+ return (l);
+}
+
+static void
+ttydisc_wakeup_watermark(struct tty *tp)
+{
+ size_t c;
+
+ c = ttyoutq_bytesleft(&tp->t_outq);
+ if (tp->t_flags & TF_HIWAT_OUT) {
+ /* Only allow us to run when we're below the watermark. */
+ if (c < tp->t_outlow)
+ return;
+
+ /* Reset the watermark. */
+ tp->t_flags &= ~TF_HIWAT_OUT;
+ } else {
+ /* Only run when we have data at all. */
+ if (c == 0)
+ return;
+ }
+ tty_wakeup(tp, FWRITE);
+}
+
+size_t
+ttydisc_getc(struct tty *tp, void *buf, size_t len)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tp->t_flags & TF_STOPPED)
+ return (0);
+
+ if (ttyhook_hashook(tp, getc_inject))
+ return ttyhook_getc_inject(tp, buf, len);
+
+ len = ttyoutq_read(&tp->t_outq, buf, len);
+
+ if (ttyhook_hashook(tp, getc_capture))
+ ttyhook_getc_capture(tp, buf, len);
+
+ ttydisc_wakeup_watermark(tp);
+ atomic_add_long(&tty_nout, len);
+
+ return (len);
+}
+
+int
+ttydisc_getc_uio(struct tty *tp, struct uio *uio)
+{
+ int error = 0;
+ ssize_t obytes = uio->uio_resid;
+ size_t len;
+ char buf[TTY_STACKBUF];
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tp->t_flags & TF_STOPPED)
+ return (0);
+
+ /*
+ * When a TTY hook is attached, we cannot perform unbuffered
+ * copying to userspace. Just call ttydisc_getc() and
+ * temporarily store data in a shadow buffer.
+ */
+ if (ttyhook_hashook(tp, getc_capture) ||
+ ttyhook_hashook(tp, getc_inject)) {
+ while (uio->uio_resid > 0) {
+ /* Read to shadow buffer. */
+ len = ttydisc_getc(tp, buf,
+ MIN(uio->uio_resid, sizeof buf));
+ if (len == 0)
+ break;
+
+ /* Copy to userspace. */
+ tty_unlock(tp);
+ error = uiomove(buf, len, uio);
+ tty_lock(tp);
+
+ if (error != 0)
+ break;
+ }
+ } else {
+ error = ttyoutq_read_uio(&tp->t_outq, tp, uio);
+
+ ttydisc_wakeup_watermark(tp);
+ atomic_add_long(&tty_nout, obytes - uio->uio_resid);
+ }
+
+ return (error);
+}
+
+size_t
+ttydisc_getc_poll(struct tty *tp)
+{
+
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tp->t_flags & TF_STOPPED)
+ return (0);
+
+ if (ttyhook_hashook(tp, getc_poll))
+ return ttyhook_getc_poll(tp);
+
+ return ttyoutq_bytesused(&tp->t_outq);
+}
+
+/*
+ * XXX: not really related to the TTYDISC, but we'd better put
+ * tty_putchar() here, because we need to perform proper output
+ * processing.
+ */
+
+int
+tty_putchar(struct tty *tp, char c)
+{
+ tty_lock_assert(tp, MA_OWNED);
+
+ if (tty_gone(tp))
+ return (-1);
+
+ ttydisc_echo_force(tp, c, 0);
+ tp->t_writepos = tp->t_column;
+ ttyinq_reprintpos_set(&tp->t_inq);
+
+ ttydevsw_outwakeup(tp);
+ return (0);
+}
diff --git a/freebsd/sys/sys/cons.h b/freebsd/sys/sys/cons.h
new file mode 100644
index 00000000..78cba61e
--- /dev/null
+++ b/freebsd/sys/sys/cons.h
@@ -0,0 +1,144 @@
+/*-
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)cons.h 7.2 (Berkeley) 5/9/91
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CONS_H_
+#define _MACHINE_CONS_H_
+
+struct consdev;
+struct tty;
+
+typedef void cn_probe_t(struct consdev *);
+typedef void cn_init_t(struct consdev *);
+typedef void cn_term_t(struct consdev *);
+typedef void cn_grab_t(struct consdev *);
+typedef void cn_ungrab_t(struct consdev *);
+typedef int cn_getc_t(struct consdev *);
+typedef void cn_putc_t(struct consdev *, int);
+
+struct consdev_ops {
+ cn_probe_t *cn_probe;
+ /* probe hardware and fill in consdev info */
+ cn_init_t *cn_init;
+ /* turn on as console */
+ cn_term_t *cn_term;
+ /* turn off as console */
+ cn_getc_t *cn_getc;
+ /* kernel getchar interface */
+ cn_putc_t *cn_putc;
+ /* kernel putchar interface */
+ cn_grab_t *cn_grab;
+ /* grab console for exclusive kernel use */
+ cn_ungrab_t *cn_ungrab;
+ /* ungrab console */
+};
+
+struct consdev {
+ const struct consdev_ops *cn_ops;
+ /* console device operations. */
+ short cn_pri; /* pecking order; the higher the better */
+ void *cn_arg; /* drivers method argument */
+ int cn_flags; /* capabilities of this console */
+ char cn_name[SPECNAMELEN + 1]; /* console (device) name */
+};
+
+/* values for cn_pri - reflect our policy for console selection */
+#define CN_DEAD 0 /* device doesn't exist */
+#define CN_LOW 1 /* device is a last restort only */
+#define CN_NORMAL 2 /* device exists but is nothing special */
+#define CN_INTERNAL 3 /* "internal" bit-mapped display */
+#define CN_REMOTE 4 /* serial interface with remote bit set */
+
+/* Values for cn_flags. */
+#define CN_FLAG_NODEBUG 0x00000001 /* Not supported with debugger. */
+#define CN_FLAG_NOAVAIL 0x00000002 /* Temporarily not available. */
+
+/* Visibility of characters in cngets() */
+#define GETS_NOECHO 0 /* Disable echoing of characters. */
+#define GETS_ECHO 1 /* Enable echoing of characters. */
+#define GETS_ECHOPASS 2 /* Print a * for every character. */
+
+#ifdef _KERNEL
+
+extern struct msgbuf consmsgbuf; /* Message buffer for constty. */
+extern struct tty *constty; /* Temporary virtual console. */
+
+#define CONSOLE_DEVICE(name, ops, arg) \
+ static struct consdev name = { \
+ .cn_ops = &ops, \
+ .cn_arg = (arg), \
+ }; \
+ DATA_SET(cons_set, name)
+
+#define CONSOLE_DRIVER(name) \
+ static const struct consdev_ops name##_consdev_ops = { \
+ .cn_probe = name##_cnprobe, \
+ .cn_init = name##_cninit, \
+ .cn_term = name##_cnterm, \
+ .cn_getc = name##_cngetc, \
+ .cn_putc = name##_cnputc, \
+ .cn_grab = name##_cngrab, \
+ .cn_ungrab = name##_cnungrab, \
+ }; \
+ CONSOLE_DEVICE(name##_consdev, name##_consdev_ops, NULL)
+
+/* Other kernel entry points. */
+void cninit(void);
+void cninit_finish(void);
+int cnadd(struct consdev *);
+void cnavailable(struct consdev *, int);
+void cnremove(struct consdev *);
+void cnselect(struct consdev *);
+void cngrab(void);
+void cnungrab(void);
+int cncheckc(void);
+int cngetc(void);
+void cngets(char *, size_t, int);
+void cnputc(int);
+void cnputs(char *);
+int cnunavailable(void);
+void constty_set(struct tty *tp);
+void constty_clear(void);
+
+/* sc(4) / vt(4) coexistence shim */
+#define VTY_SC 0x01
+#define VTY_VT 0x02
+int vty_enabled(unsigned int);
+void vty_set_preferred(unsigned int);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_CONS_H_ */
diff --git a/freebsd/sys/sys/serial.h b/freebsd/sys/sys/serial.h
new file mode 100644
index 00000000..1a149a96
--- /dev/null
+++ b/freebsd/sys/sys/serial.h
@@ -0,0 +1,92 @@
+/*-
+ * Copyright (c) 2004 Poul-Henning Kamp
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This file contains definitions which pertain to serial ports as such,
+ * (both async and sync), but which do not necessarily have anything to
+ * do with tty processing.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SERIAL_H_
+#define _SYS_SERIAL_H_
+
+
+/*
+ * Indentification of modem control signals. These definitions match
+ * the TIOCMGET definitions in <sys/ttycom.h> shifted a bit down, and
+ * that identity is enforced with CTASSERT at the bottom of kern/tty.c
+ * Both the modem bits and delta bits must fit in 16 bit.
+ */
+#define SER_DTR 0x0001 /* data terminal ready */
+#define SER_RTS 0x0002 /* request to send */
+#define SER_STX 0x0004 /* secondary transmit */
+#define SER_SRX 0x0008 /* secondary receive */
+#define SER_CTS 0x0010 /* clear to send */
+#define SER_DCD 0x0020 /* data carrier detect */
+#define SER_RI 0x0040 /* ring indicate */
+#define SER_DSR 0x0080 /* data set ready */
+
+#define SER_MASK_STATE 0x00ff
+
+/* Delta bits, used to indicate which signals should/was affected */
+#define SER_DELTA(x) ((x) << 8)
+
+#define SER_DDTR SER_DELTA(SER_DTR)
+#define SER_DRTS SER_DELTA(SER_RTS)
+#define SER_DSTX SER_DELTA(SER_STX)
+#define SER_DSRX SER_DELTA(SER_SRX)
+#define SER_DCTS SER_DELTA(SER_CTS)
+#define SER_DDCD SER_DELTA(SER_DCD)
+#define SER_DRI SER_DELTA(SER_RI)
+#define SER_DDSR SER_DELTA(SER_DSR)
+
+#define SER_MASK_DELTA SER_DELTA(SER_MASK_STATE)
+
+#ifdef _KERNEL
+/*
+ * Specification of interrupt sources typical for serial ports. These are
+ * useful when some umbrella driver like scc(4) has enough knowledge of
+ * the hardware to obtain the set of pending interrupts but does not itself
+ * handle the interrupt. Each interrupt source can be given an interrupt
+ * resource for which inferior drivers can install handlers. The lower 16
+ * bits are kept free for the signals above.
+ */
+#define SER_INT_OVERRUN 0x010000
+#define SER_INT_BREAK 0x020000
+#define SER_INT_RXREADY 0x040000
+#define SER_INT_SIGCHG 0x080000
+#define SER_INT_TXIDLE 0x100000
+
+#define SER_INT_MASK 0xff0000
+#define SER_INT_SIGMASK (SER_MASK_DELTA | SER_MASK_STATE)
+
+#ifndef LOCORE
+typedef int serdev_intr_t(void*);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_SERIAL_H_ */