summaryrefslogtreecommitdiff
path: root/freebsd/sys/net/bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/net/bpf.c')
-rw-r--r--freebsd/sys/net/bpf.c390
1 files changed, 251 insertions, 139 deletions
diff --git a/freebsd/sys/net/bpf.c b/freebsd/sys/net/bpf.c
index f74ac9a1..e7822586 100644
--- a/freebsd/sys/net/bpf.c
+++ b/freebsd/sys/net/bpf.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_bpf.h>
#include <rtems/bsd/local/opt_compat.h>
+#include <rtems/bsd/local/opt_ddb.h>
#include <rtems/bsd/local/opt_netgraph.h>
#include <sys/types.h>
@@ -69,8 +70,13 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
#include <net/if.h>
-#define BPF_INTERNAL
+#include <net/if_var.h>
+#include <net/if_dl.h>
#include <net/bpf.h>
#include <net/bpf_buffer.h>
#ifdef BPF_JITTER
@@ -78,6 +84,7 @@ __FBSDID("$FreeBSD$");
#endif
#include <net/bpf_zerocopy.h>
#include <net/bpfdesc.h>
+#include <net/route.h>
#include <net/vnet.h>
#include <netinet/in.h>
@@ -96,6 +103,20 @@ __FBSDID("$FreeBSD$");
MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
+struct bpf_if {
+#define bif_next bif_ext.bif_next
+#define bif_dlist bif_ext.bif_dlist
+ struct bpf_if_ext bif_ext; /* public members */
+ u_int bif_dlt; /* link layer type */
+ u_int bif_hdrlen; /* length of link header */
+ struct ifnet *bif_ifp; /* corresponding interface */
+ struct rwlock bif_lock; /* interface lock */
+ LIST_HEAD(, bpf_d) bif_wlist; /* writer-only list */
+ int bif_flags; /* Interface flags */
+};
+
+CTASSERT(offsetof(struct bpf_if, bif_ext) == 0);
+
#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
#define PRINET 26 /* interruptible */
@@ -107,7 +128,7 @@ MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
#include <sys/mount.h>
#include <compat/freebsd32/freebsd32.h>
#define BPF_ALIGNMENT32 sizeof(int32_t)
-#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
+#define BPF_WORDALIGN32(x) roundup2(x, BPF_ALIGNMENT32)
#ifndef BURN_BRIDGES
/*
@@ -148,7 +169,7 @@ struct bpf_dltlist32 {
* structures registered by different layers in the stack (i.e., 802.11
* frames, ethernet frames, etc).
*/
-static LIST_HEAD(, bpf_if) bpf_iflist;
+static LIST_HEAD(, bpf_if) bpf_iflist, bpf_freelist;
static struct mtx bpf_mtx; /* bpf global lock */
static int bpf_bpfd_cnt;
@@ -157,7 +178,7 @@ static void bpf_detachd(struct bpf_d *);
static void bpf_detachd_locked(struct bpf_d *);
static void bpf_freed(struct bpf_d *);
static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
- struct sockaddr *, int *, struct bpf_insn *);
+ struct sockaddr *, int *, struct bpf_d *);
static int bpf_setif(struct bpf_d *, struct ifreq *);
static void bpf_timed_out(void *);
static __inline void
@@ -188,8 +209,8 @@ static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
static VNET_DEFINE(int, bpf_optimize_writers) = 0;
#define V_bpf_optimize_writers VNET(bpf_optimize_writers)
-SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
- CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
+SYSCTL_INT(_net_bpf, OID_AUTO, optimize_writers, CTLFLAG_VNET | CTLFLAG_RW,
+ &VNET_NAME(bpf_optimize_writers), 0,
"Do not send packets until BPF program is set");
#ifndef __rtems__
@@ -479,7 +500,7 @@ bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
*/
static int
bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
- struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
+ struct sockaddr *sockp, int *hdrlen, struct bpf_d *d)
{
const struct ieee80211_bpf_params *p;
struct ether_header *eh;
@@ -561,37 +582,20 @@ bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
}
len = uio->uio_resid;
-
- if (len - hlen > ifp->if_mtu)
+ if (len < hlen || len - hlen > ifp->if_mtu)
return (EMSGSIZE);
- if ((unsigned)len > MJUM16BYTES)
+ m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
+ if (m == NULL)
return (EIO);
-
- if (len <= MHLEN)
- MGETHDR(m, M_WAIT, MT_DATA);
- else if (len <= MCLBYTES)
- m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
- else
- m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
-#if (MJUMPAGESIZE > MCLBYTES)
- len <= MJUMPAGESIZE ? MJUMPAGESIZE :
-#endif
- (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
m->m_pkthdr.len = m->m_len = len;
- m->m_pkthdr.rcvif = NULL;
*mp = m;
- if (m->m_len < hlen) {
- error = EPERM;
- goto bad;
- }
-
error = uiomove(mtod(m, u_char *), len, uio);
if (error)
goto bad;
- slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
+ slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len);
if (slen == 0) {
error = EPERM;
goto bad;
@@ -608,6 +612,10 @@ bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
else
m->m_flags |= M_MCAST;
}
+ if (d->bd_hdrcmplt == 0) {
+ memcpy(eh->ether_shost, IF_LLADDR(ifp),
+ sizeof(eh->ether_shost));
+ }
break;
}
@@ -632,7 +640,7 @@ bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
goto bad;
}
}
- bcopy(m->m_data, sockp->sa_data, hlen);
+ bcopy(mtod(m, const void *), sockp->sa_data, hlen);
}
*hdrlen = hlen;
@@ -656,13 +664,13 @@ bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
* Save sysctl value to protect from sysctl change
* between reads
*/
- op_w = V_bpf_optimize_writers;
+ op_w = V_bpf_optimize_writers || d->bd_writer;
if (d->bd_bif != NULL)
bpf_detachd_locked(d);
/*
* Point d at bp, and add d to the interface's list.
- * Since there are many applicaiotns using BPF for
+ * Since there are many applications using BPF for
* sending raw packets only (dhcpd, cdpd are good examples)
* we can delay adding d to the list of active listeners until
* some filter is configured.
@@ -760,7 +768,7 @@ bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode, int flen)
/*
* Add d to the list of active bp filters.
- * Reuqires bpf_attachd() to be called before
+ * Requires bpf_attachd() to be called before.
*/
static void
bpf_upgraded(struct bpf_d *d)
@@ -909,7 +917,7 @@ bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
{
struct bpf_d *d;
#ifndef __rtems__
- int error, size;
+ int error;
d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
error = devfs_set_cdevpriv(d, bpf_dtor);
@@ -932,6 +940,8 @@ bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
* particular buffer method.
*/
bpf_buffer_init(d);
+ if ((flags & FREAD) == 0)
+ d->bd_writer = 2;
d->bd_hbuf_in_use = 0;
d->bd_bufmode = BPF_BUFMODE_BUFFER;
d->bd_sig = SIGIO;
@@ -945,10 +955,6 @@ bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
- /* Allocate default buffers */
- size = d->bd_bufsize;
- bpf_buffer_ioctl_sblen(d, &size);
-
#ifndef __rtems__
return (0);
#else /* __rtems__ */
@@ -1163,6 +1169,7 @@ bpfwrite(struct bpf_d *d, struct uio *uio, int ioflag)
struct ifnet *ifp;
struct mbuf *m, *mc;
struct sockaddr dst;
+ struct route ro;
int error, hlen;
error = devfs_get_cdevpriv((void **)&d);
@@ -1194,7 +1201,7 @@ bpfwrite(struct bpf_d *d, struct uio *uio, int ioflag)
hlen = 0;
/* XXX: bpf_movein() can sleep */
error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
- &m, &dst, &hlen, d->bd_wfilter);
+ &m, &dst, &hlen, d);
if (error) {
d->bd_wdcount++;
return (error);
@@ -1204,7 +1211,7 @@ bpfwrite(struct bpf_d *d, struct uio *uio, int ioflag)
dst.sa_family = pseudo_AF_HDRCMPLT;
if (d->bd_feedback) {
- mc = m_dup(m, M_DONTWAIT);
+ mc = m_dup(m, M_NOWAIT);
if (mc != NULL)
mc->m_pkthdr.rcvif = ifp;
/* Set M_PROMISC for outgoing packets to be discarded. */
@@ -1226,7 +1233,14 @@ bpfwrite(struct bpf_d *d, struct uio *uio, int ioflag)
BPFD_UNLOCK(d);
#endif
- error = (*ifp->if_output)(ifp, m, &dst, NULL);
+ bzero(&ro, sizeof(ro));
+ if (hlen != 0) {
+ ro.ro_prepend = (u_char *)&dst.sa_data;
+ ro.ro_plen = hlen;
+ ro.ro_flags = RT_HAS_HEADER;
+ }
+
+ error = (*ifp->if_output)(ifp, m, &dst, &ro);
if (error)
d->bd_wdcount++;
@@ -1278,7 +1292,6 @@ reset_d(struct bpf_d *d)
/*
* FIONREAD Check for read packet available.
- * SIOCGIFADDR Get interface address - convenient hook to driver.
* BIOCGBLEN Get buffer len [for read()].
* BIOCSETF Set read filter.
* BIOCSETFNR Set read filter without resetting descriptor.
@@ -1347,7 +1360,7 @@ bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
#endif
case BIOCGETIF:
case BIOCGRTIMEOUT:
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
case BIOCGRTIMEOUT32:
#endif
case BIOCGSTATS:
@@ -1359,7 +1372,7 @@ bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
case FIONREAD:
case BIOCLOCK:
case BIOCSRTIMEOUT:
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
case BIOCSRTIMEOUT32:
#endif
case BIOCIMMEDIATE:
@@ -1415,19 +1428,6 @@ bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
break;
}
- case SIOCGIFADDR:
- {
- struct ifnet *ifp;
-
- if (d->bd_bif == NULL)
- error = EINVAL;
- else {
- ifp = d->bd_bif->bif_ifp;
- error = (*ifp->if_ioctl)(ifp, cmd, addr);
- }
- break;
- }
-
/*
* Get buffer len [for read()].
*/
@@ -1564,21 +1564,44 @@ bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
* Set interface.
*/
case BIOCSETIF:
- BPF_LOCK();
- error = bpf_setif(d, (struct ifreq *)addr);
- BPF_UNLOCK();
- break;
+ {
+ int alloc_buf, size;
+
+ /*
+ * Behavior here depends on the buffering model. If
+ * we're using kernel memory buffers, then we can
+ * allocate them here. If we're using zero-copy,
+ * then the user process must have registered buffers
+ * by the time we get here.
+ */
+ alloc_buf = 0;
+ BPFD_LOCK(d);
+ if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
+ d->bd_sbuf == NULL)
+ alloc_buf = 1;
+ BPFD_UNLOCK(d);
+ if (alloc_buf) {
+ size = d->bd_bufsize;
+ error = bpf_buffer_ioctl_sblen(d, &size);
+ if (error != 0)
+ break;
+ }
+ BPF_LOCK();
+ error = bpf_setif(d, (struct ifreq *)addr);
+ BPF_UNLOCK();
+ break;
+ }
/*
* Set read timeout.
*/
case BIOCSRTIMEOUT:
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
case BIOCSRTIMEOUT32:
#endif
{
struct timeval *tv = (struct timeval *)addr;
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
struct timeval32 *tv32;
struct timeval tv64;
@@ -1604,12 +1627,12 @@ bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
* Get read timeout.
*/
case BIOCGRTIMEOUT:
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
case BIOCGRTIMEOUT32:
#endif
{
struct timeval *tv;
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
struct timeval32 *tv32;
struct timeval tv64;
@@ -1621,7 +1644,7 @@ bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
tv->tv_sec = d->bd_rtout / hz;
tv->tv_usec = (d->bd_rtout % hz) * tick;
-#ifdef COMPAT_FREEBSD32
+#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
if (cmd == BIOCGRTIMEOUT32) {
tv32 = (struct timeval32 *)addr;
tv32->tv_sec = tv->tv_sec;
@@ -2001,17 +2024,15 @@ bpf_setif(struct bpf_d *d, struct ifreq *ifr)
/* Check if interface is not being detached from BPF */
BPFIF_RLOCK(bp);
- if (bp->flags & BPFIF_FLAG_DYING) {
+ if (bp->bif_flags & BPFIF_FLAG_DYING) {
BPFIF_RUNLOCK(bp);
return (ENXIO);
}
BPFIF_RUNLOCK(bp);
/*
- * Behavior here depends on the buffering model. If we're using
- * kernel memory buffers, then we can allocate them here. If we're
- * using zero-copy, then the user process must have registered
- * buffers by the time we get here. If not, return an error.
+ * At this point, we expect the buffer is already allocated. If not,
+ * return an error.
*/
switch (d->bd_bufmode) {
case BPF_BUFMODE_BUFFER:
@@ -2131,10 +2152,10 @@ filt_bpfread(struct knote *kn, long hint)
ready = bpf_ready(d);
if (ready) {
kn->kn_data = d->bd_slen;
- while (d->bd_hbuf_in_use)
- mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
- PRINET, "bd_hbuf", 0);
- if (d->bd_hbuf)
+ /*
+ * Ignore the hold buffer if it is being copied to user space.
+ */
+ if (!d->bd_hbuf_in_use && d->bd_hbuf)
kn->kn_data += d->bd_hlen;
} else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
callout_reset(&d->bd_callout, d->bd_rtout,
@@ -2405,12 +2426,19 @@ bpf_hdrlen(struct bpf_d *d)
static void
bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
{
+#ifndef __rtems__
+ struct bintime bt2, boottimebin;
+#else /* __rtems__ */
struct bintime bt2;
+#endif /* __rtems__ */
struct timeval tsm;
struct timespec tsn;
if ((tstype & BPF_T_MONOTONIC) == 0) {
bt2 = *bt;
+#ifndef __rtems__
+ getboottimebin(&boottimebin);
+#endif /* __rtems__ */
bintime_add(&bt2, &boottimebin);
bt = &bt2;
}
@@ -2466,9 +2494,6 @@ catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
* spot to do it.
*/
if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
- while (d->bd_hbuf_in_use)
- mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
- PRINET, "bd_hbuf", 0);
d->bd_fbuf = d->bd_hbuf;
d->bd_hbuf = NULL;
d->bd_hlen = 0;
@@ -2511,9 +2536,7 @@ catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
++d->bd_dcount;
return;
}
- while (d->bd_hbuf_in_use)
- mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
- PRINET, "bd_hbuf", 0);
+ KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
ROTATE_BUFFERS(d);
do_wakeup = 1;
curlen = 0;
@@ -2652,10 +2675,36 @@ bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
bp->bif_hdrlen = hdrlen;
- if (bootverbose)
+ if (bootverbose && IS_DEFAULT_VNET(curvnet))
if_printf(ifp, "bpf attached\n");
}
+#ifdef VIMAGE
+/*
+ * When moving interfaces between vnet instances we need a way to
+ * query the dlt and hdrlen before detach so we can re-attch the if_bpf
+ * after the vmove. We unfortunately have no device driver infrastructure
+ * to query the interface for these values after creation/attach, thus
+ * add this as a workaround.
+ */
+int
+bpf_get_bp_params(struct bpf_if *bp, u_int *bif_dlt, u_int *bif_hdrlen)
+{
+
+ if (bp == NULL)
+ return (ENXIO);
+ if (bif_dlt == NULL && bif_hdrlen == NULL)
+ return (0);
+
+ if (bif_dlt != NULL)
+ *bif_dlt = bp->bif_dlt;
+ if (bif_hdrlen != NULL)
+ *bif_hdrlen = bp->bif_hdrlen;
+
+ return (0);
+}
+#endif
+
/*
* Detach bpf from an interface. This involves detaching each descriptor
* associated with the interface. Notify each descriptor as it's detached
@@ -2664,52 +2713,51 @@ bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
void
bpfdetach(struct ifnet *ifp)
{
- struct bpf_if *bp;
+ struct bpf_if *bp, *bp_temp;
struct bpf_d *d;
-#ifdef INVARIANTS
int ndetached;
ndetached = 0;
-#endif
BPF_LOCK();
/* Find all bpf_if struct's which reference ifp and detach them. */
- do {
- LIST_FOREACH(bp, &bpf_iflist, bif_next) {
- if (ifp == bp->bif_ifp)
- break;
- }
- if (bp != NULL)
- LIST_REMOVE(bp, bif_next);
+ LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
+ if (ifp != bp->bif_ifp)
+ continue;
- if (bp != NULL) {
-#ifdef INVARIANTS
- ndetached++;
-#endif
- while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
- bpf_detachd_locked(d);
- BPFD_LOCK(d);
- bpf_wakeup(d);
- BPFD_UNLOCK(d);
- }
- /* Free writer-only descriptors */
- while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
- bpf_detachd_locked(d);
- BPFD_LOCK(d);
- bpf_wakeup(d);
- BPFD_UNLOCK(d);
- }
+ LIST_REMOVE(bp, bif_next);
+ /* Add to to-be-freed list */
+ LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
- /*
- * Delay freing bp till interface is detached
- * and all routes through this interface are removed.
- * Mark bp as detached to restrict new consumers.
- */
- BPFIF_WLOCK(bp);
- bp->flags |= BPFIF_FLAG_DYING;
- BPFIF_WUNLOCK(bp);
+ ndetached++;
+ /*
+ * Delay freeing bp till interface is detached
+ * and all routes through this interface are removed.
+ * Mark bp as detached to restrict new consumers.
+ */
+ BPFIF_WLOCK(bp);
+ bp->bif_flags |= BPFIF_FLAG_DYING;
+ BPFIF_WUNLOCK(bp);
+
+ CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
+ __func__, bp->bif_dlt, bp, ifp);
+
+ /* Free common descriptors */
+ while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
+ bpf_detachd_locked(d);
+ BPFD_LOCK(d);
+ bpf_wakeup(d);
+ BPFD_UNLOCK(d);
}
- } while (bp != NULL);
+
+ /* Free writer-only descriptors */
+ while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
+ bpf_detachd_locked(d);
+ BPFD_LOCK(d);
+ bpf_wakeup(d);
+ BPFD_UNLOCK(d);
+ }
+ }
BPF_UNLOCK();
#ifdef INVARIANTS
@@ -2721,32 +2769,46 @@ bpfdetach(struct ifnet *ifp)
/*
* Interface departure handler.
* Note departure event does not guarantee interface is going down.
+ * Interface renaming is currently done via departure/arrival event set.
+ *
+ * Departure handled is called after all routes pointing to
+ * given interface are removed and interface is in down state
+ * restricting any packets to be sent/received. We assume it is now safe
+ * to free data allocated by BPF.
*/
static void
bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
{
- struct bpf_if *bp;
+ struct bpf_if *bp, *bp_temp;
+ int nmatched = 0;
BPF_LOCK();
- if ((bp = ifp->if_bpf) == NULL) {
- BPF_UNLOCK();
- return;
- }
+ /*
+ * Find matching entries in free list.
+ * Nothing should be found if bpfdetach() was not called.
+ */
+ LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
+ if (ifp != bp->bif_ifp)
+ continue;
- /* Check if bpfdetach() was called previously */
- if ((bp->flags & BPFIF_FLAG_DYING) == 0) {
- BPF_UNLOCK();
- return;
- }
+ CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
+ __func__, bp, ifp);
- CTR3(KTR_NET, "%s: freing BPF instance %p for interface %p",
- __func__, bp, ifp);
+ LIST_REMOVE(bp, bif_next);
- ifp->if_bpf = NULL;
+ rw_destroy(&bp->bif_lock);
+ free(bp, M_BPF);
+
+ nmatched++;
+ }
BPF_UNLOCK();
- rw_destroy(&bp->bif_lock);
- free(bp, M_BPF);
+ /*
+ * Note that we cannot zero other pointers to
+ * custom DLTs possibly used by given interface.
+ */
+ if (nmatched != 0)
+ ifp->if_bpf = NULL;
}
/*
@@ -2755,26 +2817,44 @@ bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
static int
bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
{
- int n, error;
struct ifnet *ifp;
struct bpf_if *bp;
+ u_int *lst;
+ int error, n, n1;
BPF_LOCK_ASSERT();
ifp = d->bd_bif->bif_ifp;
+again:
+ n1 = 0;
+ LIST_FOREACH(bp, &bpf_iflist, bif_next) {
+ if (bp->bif_ifp == ifp)
+ n1++;
+ }
+ if (bfl->bfl_list == NULL) {
+ bfl->bfl_len = n1;
+ return (0);
+ }
+ if (n1 > bfl->bfl_len)
+ return (ENOMEM);
+ BPF_UNLOCK();
+ lst = malloc(n1 * sizeof(u_int), M_TEMP, M_WAITOK);
n = 0;
- error = 0;
+ BPF_LOCK();
LIST_FOREACH(bp, &bpf_iflist, bif_next) {
if (bp->bif_ifp != ifp)
continue;
- if (bfl->bfl_list != NULL) {
- if (n >= bfl->bfl_len)
- return (ENOMEM);
- error = copyout(&bp->bif_dlt,
- bfl->bfl_list + n, sizeof(u_int));
+ if (n >= n1) {
+ free(lst, M_TEMP);
+ goto again;
}
+ lst[n] = bp->bif_dlt;
n++;
}
+ BPF_UNLOCK();
+ error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n);
+ free(lst, M_TEMP);
+ BPF_LOCK();
bfl->bfl_len = n;
return (error);
}
@@ -2999,6 +3079,7 @@ bpf_drvinit(void *unused)
mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
LIST_INIT(&bpf_iflist);
+ LIST_INIT(&bpf_freelist);
#ifndef __rtems__
dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
@@ -3214,3 +3295,34 @@ bpf_validate(const struct bpf_insn *f, int len)
}
#endif /* !DEV_BPF && !NETGRAPH_BPF */
+
+#ifdef DDB
+static void
+bpf_show_bpf_if(struct bpf_if *bpf_if)
+{
+
+ if (bpf_if == NULL)
+ return;
+ db_printf("%p:\n", bpf_if);
+#define BPF_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, bpf_if->e);
+ /* bif_ext.bif_next */
+ /* bif_ext.bif_dlist */
+ BPF_DB_PRINTF("%#x", bif_dlt);
+ BPF_DB_PRINTF("%u", bif_hdrlen);
+ BPF_DB_PRINTF("%p", bif_ifp);
+ /* bif_lock */
+ /* bif_wlist */
+ BPF_DB_PRINTF("%#x", bif_flags);
+}
+
+DB_SHOW_COMMAND(bpf_if, db_show_bpf_if)
+{
+
+ if (!have_addr) {
+ db_printf("usage: show bpf_if <struct bpf_if *>\n");
+ return;
+ }
+
+ bpf_show_bpf_if((struct bpf_if *)addr);
+}
+#endif