summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/net
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-10-23 08:22:44 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-10-25 08:38:45 +0200
commitb3169c2a6a01cc0555181f61b5254dd2c1f1c310 (patch)
treef84d67c9d17b2625481513fa6dc85929fdb08442 /freebsd/sys/net
parentUpdate rtems_waf (diff)
downloadrtems-libbsd-b3169c2a6a01cc0555181f61b5254dd2c1f1c310.tar.bz2
Update to FreeBSD head 2018-10-23
Git mirror commit 59f44d20be3f99d181ca742e636d45fc39ec982b. This commit updates OpenSSL to version 1.1.1. This required an update of racoon which uses some internal stuff from OpenSSL and seems to be mostly unmaintained, e.g. there is update in the FreeBSD ports to cope with OpenSSL 1.1.1. Update #3472.
Diffstat (limited to 'freebsd/sys/net')
-rw-r--r--freebsd/sys/net/if.c20
-rw-r--r--freebsd/sys/net/if_gif.c5
-rw-r--r--freebsd/sys/net/if_gif.h1
-rw-r--r--freebsd/sys/net/if_gre.c8
-rw-r--r--freebsd/sys/net/if_gre.h4
-rw-r--r--freebsd/sys/net/if_ipsec.c377
-rw-r--r--freebsd/sys/net/if_lagg.c11
-rw-r--r--freebsd/sys/net/if_tap.c15
-rw-r--r--freebsd/sys/net/if_tun.c23
-rw-r--r--freebsd/sys/net/if_var.h8
-rw-r--r--freebsd/sys/net/if_vlan.c234
-rw-r--r--freebsd/sys/net/iflib.h6
12 files changed, 369 insertions, 343 deletions
diff --git a/freebsd/sys/net/if.c b/freebsd/sys/net/if.c
index 4d3c303c..7721af11 100644
--- a/freebsd/sys/net/if.c
+++ b/freebsd/sys/net/if.c
@@ -271,7 +271,6 @@ static int if_setflag(struct ifnet *, int, int, int *, int);
static int if_transmit(struct ifnet *ifp, struct mbuf *m);
static void if_unroute(struct ifnet *, int flag, int fam);
static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
-static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *);
static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int);
static void do_link_state_change(void *, int);
static int if_getgroup(struct ifgroupreq *, struct ifnet *);
@@ -974,12 +973,18 @@ if_attachdomain1(struct ifnet *ifp)
void
if_purgeaddrs(struct ifnet *ifp)
{
- struct ifaddr *ifa, *next;
+ struct ifaddr *ifa;
- NET_EPOCH_ENTER();
- CK_STAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
- if (ifa->ifa_addr->sa_family == AF_LINK)
- continue;
+ while (1) {
+ NET_EPOCH_ENTER();
+ CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ break;
+ }
+ NET_EPOCH_EXIT();
+
+ if (ifa == NULL)
+ break;
#ifdef INET
/* XXX: Ugly!! ad hoc just for INET */
if (ifa->ifa_addr->sa_family == AF_INET) {
@@ -1006,7 +1011,6 @@ if_purgeaddrs(struct ifnet *ifp)
IF_ADDR_WUNLOCK(ifp);
ifa_free(ifa);
}
- NET_EPOCH_EXIT();
}
/*
@@ -2521,7 +2525,7 @@ ifr_data_get_ptr(void *ifrp)
/*
* Hardware specific interface ioctls.
*/
-static int
+int
ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
{
struct ifreq *ifr;
diff --git a/freebsd/sys/net/if_gif.c b/freebsd/sys/net/if_gif.c
index 5a67e7ff..3a50316f 100644
--- a/freebsd/sys/net/if_gif.c
+++ b/freebsd/sys/net/if_gif.c
@@ -278,6 +278,7 @@ gif_transmit(struct ifnet *ifp, struct mbuf *m)
uint8_t proto, ecn;
int error;
+ GIF_RLOCK();
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m);
if (error) {
@@ -286,10 +287,10 @@ gif_transmit(struct ifnet *ifp, struct mbuf *m)
}
#endif
error = ENETDOWN;
- GIF_RLOCK();
sc = ifp->if_softc;
if ((ifp->if_flags & IFF_MONITOR) != 0 ||
(ifp->if_flags & IFF_UP) == 0 ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->gif_family == 0 ||
(error = if_tunnel_check_nesting(ifp, m, MTAG_GIF,
V_max_gif_nesting)) != 0) {
@@ -680,7 +681,6 @@ gif_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
cmd == SIOCSIFPHYADDR_IN6 ||
#endif
0) {
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
if_link_state_change(ifp, LINK_STATE_UP);
}
}
@@ -695,6 +695,7 @@ gif_delete_tunnel(struct gif_softc *sc)
sx_assert(&gif_ioctl_sx, SA_XLOCKED);
if (sc->gif_family != 0) {
+ CK_LIST_REMOVE(sc, srchash);
CK_LIST_REMOVE(sc, chain);
/* Wait until it become safe to free gif_hdr */
GIF_WAIT();
diff --git a/freebsd/sys/net/if_gif.h b/freebsd/sys/net/if_gif.h
index 501a4e5d..264fe7b3 100644
--- a/freebsd/sys/net/if_gif.h
+++ b/freebsd/sys/net/if_gif.h
@@ -63,6 +63,7 @@ struct gif_softc {
} gif_uhdr;
CK_LIST_ENTRY(gif_softc) chain;
+ CK_LIST_ENTRY(gif_softc) srchash;
};
CK_LIST_HEAD(gif_list, gif_softc);
MALLOC_DECLARE(M_GIF);
diff --git a/freebsd/sys/net/if_gre.c b/freebsd/sys/net/if_gre.c
index 5ff41259..4fbc105e 100644
--- a/freebsd/sys/net/if_gre.c
+++ b/freebsd/sys/net/if_gre.c
@@ -332,7 +332,6 @@ gre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
cmd == SIOCSIFPHYADDR_IN6 ||
#endif
0) {
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
if_link_state_change(ifp, LINK_STATE_UP);
}
}
@@ -348,6 +347,7 @@ gre_delete_tunnel(struct gre_softc *sc)
sx_assert(&gre_ioctl_sx, SA_XLOCKED);
if (sc->gre_family != 0) {
CK_LIST_REMOVE(sc, chain);
+ CK_LIST_REMOVE(sc, srchash);
GRE_WAIT();
free(sc->gre_hdr, M_GRE);
sc->gre_family = 0;
@@ -549,6 +549,7 @@ gre_setseqn(struct grehdr *gh, uint32_t seq)
static int
gre_transmit(struct ifnet *ifp, struct mbuf *m)
{
+ GRE_RLOCK_TRACKER;
struct gre_softc *sc;
struct grehdr *gh;
uint32_t af;
@@ -556,6 +557,7 @@ gre_transmit(struct ifnet *ifp, struct mbuf *m)
uint16_t proto;
len = 0;
+ GRE_RLOCK();
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m);
if (error) {
@@ -564,10 +566,10 @@ gre_transmit(struct ifnet *ifp, struct mbuf *m)
}
#endif
error = ENETDOWN;
- GRE_RLOCK();
sc = ifp->if_softc;
if ((ifp->if_flags & IFF_MONITOR) != 0 ||
(ifp->if_flags & IFF_UP) == 0 ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sc->gre_family == 0 ||
(error = if_tunnel_check_nesting(ifp, m, MTAG_GRE,
V_max_gre_nesting)) != 0) {
@@ -575,6 +577,8 @@ gre_transmit(struct ifnet *ifp, struct mbuf *m)
goto drop;
}
af = m->m_pkthdr.csum_data;
+ BPF_MTAP2(ifp, &af, sizeof(af), m);
+ m->m_flags &= ~(M_BCAST|M_MCAST);
M_SETFIB(m, sc->gre_fibnum);
M_PREPEND(m, sc->gre_hlen, M_NOWAIT);
if (m == NULL) {
diff --git a/freebsd/sys/net/if_gre.h b/freebsd/sys/net/if_gre.h
index cc8b08f9..4b93321a 100644
--- a/freebsd/sys/net/if_gre.h
+++ b/freebsd/sys/net/if_gre.h
@@ -82,6 +82,7 @@ struct gre_softc {
} gre_uhdr;
CK_LIST_ENTRY(gre_softc) chain;
+ CK_LIST_ENTRY(gre_softc) srchash;
};
CK_LIST_HEAD(gre_list, gre_softc);
MALLOC_DECLARE(M_GRE);
@@ -91,7 +92,8 @@ MALLOC_DECLARE(M_GRE);
#endif
#define GRE2IFP(sc) ((sc)->gre_ifp)
-#define GRE_RLOCK() struct epoch_tracker gre_et; epoch_enter_preempt(net_epoch_preempt, &gre_et)
+#define GRE_RLOCK_TRACKER struct epoch_tracker gre_et
+#define GRE_RLOCK() epoch_enter_preempt(net_epoch_preempt, &gre_et)
#define GRE_RUNLOCK() epoch_exit_preempt(net_epoch_preempt, &gre_et)
#define GRE_WAIT() epoch_wait_preempt(net_epoch_preempt)
diff --git a/freebsd/sys/net/if_ipsec.c b/freebsd/sys/net/if_ipsec.c
index 5b1d5e82..08465911 100644
--- a/freebsd/sys/net/if_ipsec.c
+++ b/freebsd/sys/net/if_ipsec.c
@@ -1,8 +1,8 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
- * Copyright (c) 2016 Yandex LLC
- * Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
+ * Copyright (c) 2016-2018 Yandex LLC
+ * Copyright (c) 2016-2018 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
-#include <sys/rmlock.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sx.h>
@@ -63,6 +62,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
+#include <netinet/ip_encap.h>
#include <netinet/ip6.h>
#include <netinet6/in6_var.h>
@@ -89,57 +89,71 @@ static const char ipsecname[] = "ipsec";
struct ipsec_softc {
struct ifnet *ifp;
-
- struct rmlock lock;
struct secpolicy *sp[IPSEC_SPCOUNT];
-
uint32_t reqid;
u_int family;
u_int fibnum;
- LIST_ENTRY(ipsec_softc) chain;
- LIST_ENTRY(ipsec_softc) hash;
+
+ CK_LIST_ENTRY(ipsec_softc) idhash;
+ CK_LIST_ENTRY(ipsec_softc) srchash;
};
-#define IPSEC_LOCK_INIT(sc) rm_init(&(sc)->lock, "if_ipsec softc")
-#define IPSEC_LOCK_DESTROY(sc) rm_destroy(&(sc)->lock)
-#define IPSEC_RLOCK_TRACKER struct rm_priotracker ipsec_tracker
-#define IPSEC_RLOCK(sc) rm_rlock(&(sc)->lock, &ipsec_tracker)
-#define IPSEC_RUNLOCK(sc) rm_runlock(&(sc)->lock, &ipsec_tracker)
-#define IPSEC_RLOCK_ASSERT(sc) rm_assert(&(sc)->lock, RA_RLOCKED)
-#define IPSEC_WLOCK(sc) rm_wlock(&(sc)->lock)
-#define IPSEC_WUNLOCK(sc) rm_wunlock(&(sc)->lock)
-#define IPSEC_WLOCK_ASSERT(sc) rm_assert(&(sc)->lock, RA_WLOCKED)
-
-static struct rmlock ipsec_sc_lock;
-RM_SYSINIT(ipsec_sc_lock, &ipsec_sc_lock, "if_ipsec softc list");
-
-#define IPSEC_SC_RLOCK_TRACKER struct rm_priotracker ipsec_sc_tracker
-#define IPSEC_SC_RLOCK() rm_rlock(&ipsec_sc_lock, &ipsec_sc_tracker)
-#define IPSEC_SC_RUNLOCK() rm_runlock(&ipsec_sc_lock, &ipsec_sc_tracker)
-#define IPSEC_SC_RLOCK_ASSERT() rm_assert(&ipsec_sc_lock, RA_RLOCKED)
-#define IPSEC_SC_WLOCK() rm_wlock(&ipsec_sc_lock)
-#define IPSEC_SC_WUNLOCK() rm_wunlock(&ipsec_sc_lock)
-#define IPSEC_SC_WLOCK_ASSERT() rm_assert(&ipsec_sc_lock, RA_WLOCKED)
-
-LIST_HEAD(ipsec_iflist, ipsec_softc);
-VNET_DEFINE_STATIC(struct ipsec_iflist, ipsec_sc_list);
-VNET_DEFINE_STATIC(struct ipsec_iflist *, ipsec_sc_htbl);
-VNET_DEFINE_STATIC(u_long, ipsec_sc_hmask);
-#define V_ipsec_sc_list VNET(ipsec_sc_list)
-#define V_ipsec_sc_htbl VNET(ipsec_sc_htbl)
-#define V_ipsec_sc_hmask VNET(ipsec_sc_hmask)
-
-static uint32_t
-ipsec_hash(uint32_t id)
+#define IPSEC_RLOCK_TRACKER struct epoch_tracker ipsec_et
+#define IPSEC_RLOCK() epoch_enter_preempt(net_epoch_preempt, &ipsec_et)
+#define IPSEC_RUNLOCK() epoch_exit_preempt(net_epoch_preempt, &ipsec_et)
+#define IPSEC_WAIT() epoch_wait_preempt(net_epoch_preempt)
+
+#ifndef IPSEC_HASH_SIZE
+#define IPSEC_HASH_SIZE (1 << 5)
+#endif
+
+CK_LIST_HEAD(ipsec_iflist, ipsec_softc);
+VNET_DEFINE_STATIC(struct ipsec_iflist *, ipsec_idhtbl) = NULL;
+#define V_ipsec_idhtbl VNET(ipsec_idhtbl)
+
+#ifdef INET
+VNET_DEFINE_STATIC(struct ipsec_iflist *, ipsec4_srchtbl) = NULL;
+#define V_ipsec4_srchtbl VNET(ipsec4_srchtbl)
+static const struct srcaddrtab *ipsec4_srctab = NULL;
+#endif
+
+#ifdef INET6
+VNET_DEFINE_STATIC(struct ipsec_iflist *, ipsec6_srchtbl) = NULL;
+#define V_ipsec6_srchtbl VNET(ipsec6_srchtbl)
+static const struct srcaddrtab *ipsec6_srctab = NULL;
+#endif
+
+static struct ipsec_iflist *
+ipsec_idhash(uint32_t id)
{
- return (fnv_32_buf(&id, sizeof(id), FNV1_32_INIT));
+ return (&V_ipsec_idhtbl[fnv_32_buf(&id, sizeof(id),
+ FNV1_32_INIT) & (IPSEC_HASH_SIZE - 1)]);
}
-#define SCHASH_NHASH_LOG2 5
-#define SCHASH_NHASH (1 << SCHASH_NHASH_LOG2)
-#define SCHASH_HASHVAL(id) (ipsec_hash((id)) & V_ipsec_sc_hmask)
-#define SCHASH_HASH(id) &V_ipsec_sc_htbl[SCHASH_HASHVAL(id)]
+static struct ipsec_iflist *
+ipsec_srchash(const struct sockaddr *sa)
+{
+ uint32_t hval;
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ hval = fnv_32_buf(
+ &((const struct sockaddr_in *)sa)->sin_addr.s_addr,
+ sizeof(in_addr_t), FNV1_32_INIT);
+ return (&V_ipsec4_srchtbl[hval & (IPSEC_HASH_SIZE - 1)]);
+#endif
+#ifdef INET6
+ case AF_INET6:
+ hval = fnv_32_buf(
+ &((const struct sockaddr_in6 *)sa)->sin6_addr,
+ sizeof(struct in6_addr), FNV1_32_INIT);
+ return (&V_ipsec6_srchtbl[hval & (IPSEC_HASH_SIZE - 1)]);
+#endif
+ }
+ return (NULL);
+}
/*
* ipsec_ioctl_sx protects from concurrent ioctls.
@@ -150,12 +164,14 @@ SX_SYSINIT(ipsec_ioctl_sx, &ipsec_ioctl_sx, "ipsec_ioctl");
static int ipsec_init_reqid(struct ipsec_softc *);
static int ipsec_set_tunnel(struct ipsec_softc *, struct sockaddr *,
struct sockaddr *, uint32_t);
-static void ipsec_delete_tunnel(struct ifnet *, int);
+static void ipsec_delete_tunnel(struct ipsec_softc *);
static int ipsec_set_addresses(struct ifnet *, struct sockaddr *,
struct sockaddr *);
-static int ipsec_set_reqid(struct ifnet *, uint32_t);
+static int ipsec_set_reqid(struct ipsec_softc *, uint32_t);
+static void ipsec_set_running(struct ipsec_softc *);
+static void ipsec_srcaddr(void *, const struct sockaddr *, int);
static int ipsec_ioctl(struct ifnet *, u_long, caddr_t);
static int ipsec_transmit(struct ifnet *, struct mbuf *);
static int ipsec_output(struct ifnet *, struct mbuf *,
@@ -180,7 +196,6 @@ ipsec_clone_create(struct if_clone *ifc, int unit, caddr_t params)
sc->fibnum = BSD_DEFAULT_FIB;
#endif /* __rtems__ */
sc->ifp = ifp = if_alloc(IFT_TUNNEL);
- IPSEC_LOCK_INIT(sc);
ifp->if_softc = sc;
if_initname(ifp, ipsecname, unit);
@@ -194,9 +209,6 @@ ipsec_clone_create(struct if_clone *ifc, int unit, caddr_t params)
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(uint32_t));
- IPSEC_SC_WLOCK();
- LIST_INSERT_HEAD(&V_ipsec_sc_list, sc, chain);
- IPSEC_SC_WUNLOCK();
return (0);
}
@@ -207,28 +219,48 @@ ipsec_clone_destroy(struct ifnet *ifp)
sx_xlock(&ipsec_ioctl_sx);
sc = ifp->if_softc;
-
- IPSEC_SC_WLOCK();
- ipsec_delete_tunnel(ifp, 1);
- LIST_REMOVE(sc, chain);
- IPSEC_SC_WUNLOCK();
-
+ ipsec_delete_tunnel(sc);
bpfdetach(ifp);
if_detach(ifp);
ifp->if_softc = NULL;
sx_xunlock(&ipsec_ioctl_sx);
+ IPSEC_WAIT();
if_free(ifp);
- IPSEC_LOCK_DESTROY(sc);
free(sc, M_IPSEC);
}
+static struct ipsec_iflist *
+ipsec_hashinit(void)
+{
+ struct ipsec_iflist *hash;
+ int i;
+
+ hash = malloc(sizeof(struct ipsec_iflist) * IPSEC_HASH_SIZE,
+ M_IPSEC, M_WAITOK);
+ for (i = 0; i < IPSEC_HASH_SIZE; i++)
+ CK_LIST_INIT(&hash[i]);
+
+ return (hash);
+}
+
static void
vnet_ipsec_init(const void *unused __unused)
{
- LIST_INIT(&V_ipsec_sc_list);
- V_ipsec_sc_htbl = hashinit(SCHASH_NHASH, M_IPSEC, &V_ipsec_sc_hmask);
+ V_ipsec_idhtbl = ipsec_hashinit();
+#ifdef INET
+ V_ipsec4_srchtbl = ipsec_hashinit();
+ if (IS_DEFAULT_VNET(curvnet))
+ ipsec4_srctab = ip_encap_register_srcaddr(ipsec_srcaddr,
+ NULL, M_WAITOK);
+#endif
+#ifdef INET6
+ V_ipsec6_srchtbl = ipsec_hashinit();
+ if (IS_DEFAULT_VNET(curvnet))
+ ipsec6_srctab = ip6_encap_register_srcaddr(ipsec_srcaddr,
+ NULL, M_WAITOK);
+#endif
V_ipsec_cloner = if_clone_simple(ipsecname, ipsec_clone_create,
ipsec_clone_destroy, 0);
}
@@ -240,7 +272,17 @@ vnet_ipsec_uninit(const void *unused __unused)
{
if_clone_detach(V_ipsec_cloner);
- hashdestroy(V_ipsec_sc_htbl, M_IPSEC, V_ipsec_sc_hmask);
+ free(V_ipsec_idhtbl, M_IPSEC);
+#ifdef INET
+ if (IS_DEFAULT_VNET(curvnet))
+ ip_encap_unregister_srcaddr(ipsec4_srctab);
+ free(V_ipsec4_srchtbl, M_IPSEC);
+#endif
+#ifdef INET6
+ if (IS_DEFAULT_VNET(curvnet))
+ ip6_encap_unregister_srcaddr(ipsec6_srctab);
+ free(V_ipsec6_srchtbl, M_IPSEC);
+#endif
}
VNET_SYSUNINIT(vnet_ipsec_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_ipsec_uninit, NULL);
@@ -287,6 +329,7 @@ ipsec_transmit(struct ifnet *ifp, struct mbuf *m)
uint32_t af;
int error;
+ IPSEC_RLOCK();
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m);
if (error) {
@@ -298,7 +341,7 @@ ipsec_transmit(struct ifnet *ifp, struct mbuf *m)
sc = ifp->if_softc;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
(ifp->if_flags & IFF_MONITOR) != 0 ||
- (ifp->if_flags & IFF_UP) == 0) {
+ (ifp->if_flags & IFF_UP) == 0 || sc->family == 0) {
m_freem(m);
goto err;
}
@@ -333,16 +376,9 @@ ipsec_transmit(struct ifnet *ifp, struct mbuf *m)
goto err;
}
- IPSEC_RLOCK(sc);
- if (sc->family == 0) {
- IPSEC_RUNLOCK(sc);
- m_freem(m);
- goto err;
- }
sp = ipsec_getpolicy(sc, IPSEC_DIR_OUTBOUND, af);
key_addref(sp);
M_SETFIB(m, sc->fibnum);
- IPSEC_RUNLOCK(sc);
BPF_MTAP2(ifp, &af, sizeof(af), m);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
@@ -365,6 +401,7 @@ ipsec_transmit(struct ifnet *ifp, struct mbuf *m)
err:
if (error != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ IPSEC_RUNLOCK();
return (error);
}
@@ -385,7 +422,7 @@ ipsec_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
int
ipsec_if_input(struct mbuf *m, struct secasvar *sav, uint32_t af)
{
- IPSEC_SC_RLOCK_TRACKER;
+ IPSEC_RLOCK_TRACKER;
struct secasindex *saidx;
struct ipsec_softc *sc;
struct ifnet *ifp;
@@ -400,13 +437,10 @@ ipsec_if_input(struct mbuf *m, struct secasvar *sav, uint32_t af)
sav->sah->saidx.proto != IPPROTO_ESP)
return (0);
- IPSEC_SC_RLOCK();
- /*
- * We only acquire SC_RLOCK() while we are doing search in
- * ipsec_sc_htbl. It is safe, because removing softc or changing
- * of reqid/addresses requires removing from hash table.
- */
- LIST_FOREACH(sc, SCHASH_HASH(sav->sah->saidx.reqid), hash) {
+ IPSEC_RLOCK();
+ CK_LIST_FOREACH(sc, ipsec_idhash(sav->sah->saidx.reqid), idhash) {
+ if (sc->family == 0)
+ continue;
saidx = ipsec_getsaidx(sc, IPSEC_DIR_INBOUND,
sav->sah->saidx.src.sa.sa_family);
/* SA's reqid should match reqid in SP */
@@ -422,14 +456,14 @@ ipsec_if_input(struct mbuf *m, struct secasvar *sav, uint32_t af)
break;
}
if (sc == NULL) {
- IPSEC_SC_RUNLOCK();
+ IPSEC_RUNLOCK();
/* Tunnel was not found. Nothing to do. */
return (0);
}
ifp = sc->ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
(ifp->if_flags & IFF_UP) == 0) {
- IPSEC_SC_RUNLOCK();
+ IPSEC_RUNLOCK();
m_freem(m);
return (ENETDOWN);
}
@@ -438,7 +472,6 @@ ipsec_if_input(struct mbuf *m, struct secasvar *sav, uint32_t af)
* Set its ifnet as receiving interface.
*/
m->m_pkthdr.rcvif = ifp;
- IPSEC_SC_RUNLOCK();
m_clrprotoflags(m);
M_SETFIB(m, ifp->if_fib);
@@ -446,17 +479,17 @@ ipsec_if_input(struct mbuf *m, struct secasvar *sav, uint32_t af)
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
if ((ifp->if_flags & IFF_MONITOR) != 0) {
+ IPSEC_RUNLOCK();
m_freem(m);
return (ENETDOWN);
}
+ IPSEC_RUNLOCK();
return (0);
}
-/* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
-int
+static int
ipsec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
- IPSEC_RLOCK_TRACKER;
struct ifreq *ifr = (struct ifreq*)data;
struct sockaddr *dst, *src;
struct ipsec_softc *sc;
@@ -570,9 +603,10 @@ ipsec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#endif
#ifdef INET6
case AF_INET6:
- if (IN6_IS_ADDR_UNSPECIFIED(&satosin6(src)->sin6_addr)
- ||
- IN6_IS_ADDR_UNSPECIFIED(&satosin6(dst)->sin6_addr))
+ if (IN6_IS_ADDR_UNSPECIFIED(
+ &satosin6(src)->sin6_addr) ||
+ IN6_IS_ADDR_UNSPECIFIED(
+ &satosin6(dst)->sin6_addr))
goto bad;
/*
* Check validity of the scope zone ID of the
@@ -590,7 +624,7 @@ ipsec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
error = ipsec_set_addresses(ifp, src, dst);
break;
case SIOCDIFPHYADDR:
- ipsec_delete_tunnel(ifp, 0);
+ ipsec_delete_tunnel(sc);
break;
case SIOCGIFPSRCADDR:
case SIOCGIFPDSTADDR:
@@ -598,9 +632,7 @@ ipsec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCGIFPSRCADDR_IN6:
case SIOCGIFPDSTADDR_IN6:
#endif
- IPSEC_RLOCK(sc);
if (sc->family == 0) {
- IPSEC_RUNLOCK(sc);
error = EADDRNOTAVAIL;
break;
}
@@ -656,7 +688,6 @@ ipsec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#endif
}
}
- IPSEC_RUNLOCK(sc);
if (error != 0)
break;
switch (cmd) {
@@ -702,7 +733,7 @@ ipsec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
error = copyin(ifr_data_get_ptr(ifr), &reqid, sizeof(reqid));
if (error != 0)
break;
- error = ipsec_set_reqid(ifp, reqid);
+ error = ipsec_set_reqid(sc, reqid);
break;
default:
error = EINVAL;
@@ -714,6 +745,59 @@ bad:
}
/*
+ * Check that ingress address belongs to local host.
+ */
+static void
+ipsec_set_running(struct ipsec_softc *sc)
+{
+ struct secasindex *saidx;
+ int localip;
+
+ saidx = ipsec_getsaidx(sc, IPSEC_DIR_OUTBOUND, sc->family);
+ localip = 0;
+ switch (sc->family) {
+#ifdef INET
+ case AF_INET:
+ localip = in_localip(saidx->src.sin.sin_addr);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ localip = in6_localip(&saidx->src.sin6.sin6_addr);
+ break;
+#endif
+ }
+ if (localip != 0)
+ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ else
+ sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+}
+
+/*
+ * ifaddr_event handler.
+ * Clear IFF_DRV_RUNNING flag when ingress address disappears to prevent
+ * source address spoofing.
+ */
+static void
+ipsec_srcaddr(void *arg __unused, const struct sockaddr *sa,
+ int event __unused)
+{
+ struct ipsec_softc *sc;
+ struct secasindex *saidx;
+
+ MPASS(in_epoch(net_epoch_preempt));
+ CK_LIST_FOREACH(sc, ipsec_srchash(sa), srchash) {
+ if (sc->family == 0)
+ continue;
+ saidx = ipsec_getsaidx(sc, IPSEC_DIR_OUTBOUND, sa->sa_family);
+ if (saidx == NULL ||
+ key_sockaddrcmp(&saidx->src.sa, sa, 0) != 0)
+ continue;
+ ipsec_set_running(sc);
+ }
+}
+
+/*
* Allocate new private security policies for tunneling interface.
* Each tunneling interface has following security policies for
* both AF:
@@ -785,8 +869,8 @@ ipsec_check_reqid(uint32_t reqid)
{
struct ipsec_softc *sc;
- IPSEC_SC_RLOCK_ASSERT();
- LIST_FOREACH(sc, &V_ipsec_sc_list, chain) {
+ sx_assert(&ipsec_ioctl_sx, SA_XLOCKED);
+ CK_LIST_FOREACH(sc, ipsec_idhash(reqid), idhash) {
if (sc->reqid == reqid)
return (EEXIST);
}
@@ -806,8 +890,7 @@ ipsec_init_reqid(struct ipsec_softc *sc)
uint32_t reqid;
int trycount;
- IPSEC_SC_RLOCK_ASSERT();
-
+ sx_assert(&ipsec_ioctl_sx, SA_XLOCKED);
if (sc->reqid != 0) /* already initialized */
return (0);
@@ -820,6 +903,7 @@ ipsec_init_reqid(struct ipsec_softc *sc)
if (trycount == 0)
return (EEXIST);
sc->reqid = reqid;
+ CK_LIST_INSERT_HEAD(ipsec_idhash(reqid), sc, idhash);
return (0);
}
@@ -830,34 +914,30 @@ ipsec_init_reqid(struct ipsec_softc *sc)
* Also softc would not disappear while we hold ioctl_sx lock.
*/
static int
-ipsec_set_reqid(struct ifnet *ifp, uint32_t reqid)
+ipsec_set_reqid(struct ipsec_softc *sc, uint32_t reqid)
{
- IPSEC_SC_RLOCK_TRACKER;
- struct ipsec_softc *sc;
struct secasindex *saidx;
sx_assert(&ipsec_ioctl_sx, SA_XLOCKED);
- sc = ifp->if_softc;
if (sc->reqid == reqid && reqid != 0)
return (0);
- IPSEC_SC_RLOCK();
if (reqid != 0) {
/* Check that specified reqid doesn't exist */
- if (ipsec_check_reqid(reqid) != 0) {
- IPSEC_SC_RUNLOCK();
+ if (ipsec_check_reqid(reqid) != 0)
return (EEXIST);
+ if (sc->reqid != 0) {
+ CK_LIST_REMOVE(sc, idhash);
+ IPSEC_WAIT();
}
sc->reqid = reqid;
+ CK_LIST_INSERT_HEAD(ipsec_idhash(reqid), sc, idhash);
} else {
/* Generate new reqid */
- if (ipsec_init_reqid(sc) != 0) {
- IPSEC_SC_RUNLOCK();
+ if (ipsec_init_reqid(sc) != 0)
return (EEXIST);
- }
}
- IPSEC_SC_RUNLOCK();
/* Tunnel isn't fully configured, just return. */
if (sc->family == 0)
@@ -877,7 +957,6 @@ static int
ipsec_set_addresses(struct ifnet *ifp, struct sockaddr *src,
struct sockaddr *dst)
{
- IPSEC_SC_RLOCK_TRACKER;
struct ipsec_softc *sc, *tsc;
struct secasindex *saidx;
@@ -893,43 +972,21 @@ ipsec_set_addresses(struct ifnet *ifp, struct sockaddr *src,
return (0); /* Nothing has been changed. */
}
- /*
- * We cannot service IPsec tunnel when source address is
- * not our own.
- */
-#ifdef INET
- if (src->sa_family == AF_INET &&
- in_localip(satosin(src)->sin_addr) == 0)
- return (EADDRNOTAVAIL);
-#endif
-#ifdef INET6
- /*
- * NOTE: IPv6 addresses are in kernel internal form with
- * embedded scope zone id.
- */
- if (src->sa_family == AF_INET6 &&
- in6_localip(&satosin6(src)->sin6_addr) == 0)
- return (EADDRNOTAVAIL);
-#endif
/* Check that given addresses aren't already configured */
- IPSEC_SC_RLOCK();
- LIST_FOREACH(tsc, &V_ipsec_sc_list, chain) {
- if (tsc == sc || tsc->family != src->sa_family)
+ CK_LIST_FOREACH(tsc, ipsec_srchash(src), srchash) {
+ if (tsc == sc)
continue;
+ MPASS(tsc->family == src->sa_family);
saidx = ipsec_getsaidx(tsc, IPSEC_DIR_OUTBOUND, tsc->family);
if (key_sockaddrcmp(&saidx->src.sa, src, 0) == 0 &&
key_sockaddrcmp(&saidx->dst.sa, dst, 0) == 0) {
/* We already have tunnel with such addresses */
- IPSEC_SC_RUNLOCK();
return (EADDRNOTAVAIL);
}
}
/* If reqid is not set, generate new one. */
- if (ipsec_init_reqid(sc) != 0) {
- IPSEC_SC_RUNLOCK();
+ if (ipsec_init_reqid(sc) != 0)
return (EEXIST);
- }
- IPSEC_SC_RUNLOCK();
return (ipsec_set_tunnel(sc, src, dst, sc->reqid));
}
@@ -938,8 +995,7 @@ ipsec_set_tunnel(struct ipsec_softc *sc, struct sockaddr *src,
struct sockaddr *dst, uint32_t reqid)
{
struct secpolicy *sp[IPSEC_SPCOUNT];
- struct secpolicy *oldsp[IPSEC_SPCOUNT];
- int i, f;
+ int i;
sx_assert(&ipsec_ioctl_sx, SA_XLOCKED);
@@ -951,58 +1007,41 @@ ipsec_set_tunnel(struct ipsec_softc *sc, struct sockaddr *src,
key_freesp(&sp[i]);
return (EAGAIN);
}
- IPSEC_SC_WLOCK();
- if ((f = sc->family) != 0)
- LIST_REMOVE(sc, hash);
- IPSEC_WLOCK(sc);
- for (i = 0; i < IPSEC_SPCOUNT; i++) {
- oldsp[i] = sc->sp[i];
+ if (sc->family != 0)
+ ipsec_delete_tunnel(sc);
+ for (i = 0; i < IPSEC_SPCOUNT; i++)
sc->sp[i] = sp[i];
- }
sc->family = src->sa_family;
- IPSEC_WUNLOCK(sc);
- LIST_INSERT_HEAD(SCHASH_HASH(sc->reqid), sc, hash);
- IPSEC_SC_WUNLOCK();
+ CK_LIST_INSERT_HEAD(ipsec_srchash(src), sc, srchash);
} else {
sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
return (ENOMEM);
}
-
- sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
- if (f != 0) {
- key_unregister_ifnet(oldsp, IPSEC_SPCOUNT);
- for (i = 0; i < IPSEC_SPCOUNT; i++)
- key_freesp(&oldsp[i]);
- }
+ ipsec_set_running(sc);
return (0);
}
static void
-ipsec_delete_tunnel(struct ifnet *ifp, int locked)
+ipsec_delete_tunnel(struct ipsec_softc *sc)
{
- struct ipsec_softc *sc = ifp->if_softc;
- struct secpolicy *oldsp[IPSEC_SPCOUNT];
int i;
sx_assert(&ipsec_ioctl_sx, SA_XLOCKED);
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if (sc->family != 0) {
- if (!locked)
- IPSEC_SC_WLOCK();
- /* Remove from hash table */
- LIST_REMOVE(sc, hash);
- IPSEC_WLOCK(sc);
- for (i = 0; i < IPSEC_SPCOUNT; i++) {
- oldsp[i] = sc->sp[i];
- sc->sp[i] = NULL;
- }
+ CK_LIST_REMOVE(sc, srchash);
+ IPSEC_WAIT();
+
+ /*
+ * Make sure that ipsec_if_input() will not do access
+ * to softc's policies.
+ */
sc->family = 0;
- IPSEC_WUNLOCK(sc);
- if (!locked)
- IPSEC_SC_WUNLOCK();
- key_unregister_ifnet(oldsp, IPSEC_SPCOUNT);
+ IPSEC_WAIT();
+
+ key_unregister_ifnet(sc->sp, IPSEC_SPCOUNT);
for (i = 0; i < IPSEC_SPCOUNT; i++)
- key_freesp(&oldsp[i]);
+ key_freesp(&sc->sp[i]);
}
}
diff --git a/freebsd/sys/net/if_lagg.c b/freebsd/sys/net/if_lagg.c
index 4d5aaa29..632ea744 100644
--- a/freebsd/sys/net/if_lagg.c
+++ b/freebsd/sys/net/if_lagg.c
@@ -2035,15 +2035,18 @@ lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
{
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
struct lagg_port *lp_next;
- int i = 0;
+ int i = 0, rv;
+ rv = 0;
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
LAGG_RLOCK();
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
if (lp_next == lp)
continue;
- if (i >= LAGG_MAX_PORTS)
- return (EINVAL);
+ if (i >= LAGG_MAX_PORTS) {
+ rv = EINVAL;
+ break;
+ }
if (sc->sc_ifflags & IFF_DEBUG)
printf("%s: port %s at index %d\n",
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
@@ -2051,7 +2054,7 @@ lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
}
LAGG_RUNLOCK();
- return (0);
+ return (rv);
}
static int
diff --git a/freebsd/sys/net/if_tap.c b/freebsd/sys/net/if_tap.c
index c918a14e..a540a59a 100644
--- a/freebsd/sys/net/if_tap.c
+++ b/freebsd/sys/net/if_tap.c
@@ -729,10 +729,12 @@ tapifstart(struct ifnet *ifp)
static int
tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
{
+ struct ifreq ifr;
struct tap_softc *tp = dev->si_drv1;
struct ifnet *ifp = tp->tap_ifp;
struct tapinfo *tapp = NULL;
int f;
+ int error;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4)
int ival;
@@ -744,7 +746,18 @@ tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td
if (ifp->if_type != tapp->type)
return (EPROTOTYPE);
mtx_lock(&tp->tap_mtx);
- ifp->if_mtu = tapp->mtu;
+ if (ifp->if_mtu != tapp->mtu) {
+ strlcpy(ifr.ifr_name, if_name(ifp), IFNAMSIZ);
+ ifr.ifr_mtu = tapp->mtu;
+ CURVNET_SET(ifp->if_vnet);
+ error = ifhwioctl(SIOCSIFMTU, ifp,
+ (caddr_t)&ifr, td);
+ CURVNET_RESTORE();
+ if (error) {
+ mtx_unlock(&tp->tap_mtx);
+ return (error);
+ }
+ }
ifp->if_baudrate = tapp->baudrate;
mtx_unlock(&tp->tap_mtx);
break;
diff --git a/freebsd/sys/net/if_tun.c b/freebsd/sys/net/if_tun.c
index 14a75645..328b1963 100644
--- a/freebsd/sys/net/if_tun.c
+++ b/freebsd/sys/net/if_tun.c
@@ -672,24 +672,29 @@ static int
tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
struct thread *td)
{
- int error;
+ struct ifreq ifr;
struct tun_softc *tp = dev->si_drv1;
struct tuninfo *tunp;
+ int error;
switch (cmd) {
case TUNSIFINFO:
tunp = (struct tuninfo *)data;
- if (tunp->mtu < IF_MINMTU)
- return (EINVAL);
- if (TUN2IFP(tp)->if_mtu != tunp->mtu) {
- error = priv_check(td, PRIV_NET_SETIFMTU);
- if (error)
- return (error);
- }
if (TUN2IFP(tp)->if_type != tunp->type)
return (EPROTOTYPE);
mtx_lock(&tp->tun_mtx);
- TUN2IFP(tp)->if_mtu = tunp->mtu;
+ if (TUN2IFP(tp)->if_mtu != tunp->mtu) {
+ strlcpy(ifr.ifr_name, if_name(TUN2IFP(tp)), IFNAMSIZ);
+ ifr.ifr_mtu = tunp->mtu;
+ CURVNET_SET(TUN2IFP(tp)->if_vnet);
+ error = ifhwioctl(SIOCSIFMTU, TUN2IFP(tp),
+ (caddr_t)&ifr, td);
+ CURVNET_RESTORE();
+ if (error) {
+ mtx_unlock(&tp->tun_mtx);
+ return (error);
+ }
+ }
TUN2IFP(tp)->if_baudrate = tunp->baudrate;
mtx_unlock(&tp->tun_mtx);
break;
diff --git a/freebsd/sys/net/if_var.h b/freebsd/sys/net/if_var.h
index 00fcbebd..6504837b 100644
--- a/freebsd/sys/net/if_var.h
+++ b/freebsd/sys/net/if_var.h
@@ -434,6 +434,7 @@ struct rtems_ifinputreq {
#define NET_EPOCH_ENTER_ET(et) epoch_enter_preempt(net_epoch_preempt, &(et))
#define NET_EPOCH_EXIT() epoch_exit_preempt(net_epoch_preempt, &nep_et)
#define NET_EPOCH_EXIT_ET(et) epoch_exit_preempt(net_epoch_preempt, &(et))
+#define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt)
/*
@@ -454,6 +455,11 @@ EVENTHANDLER_DECLARE(iflladdr_event, iflladdr_event_handler_t);
/* interface address change event */
typedef void (*ifaddr_event_handler_t)(void *, struct ifnet *);
EVENTHANDLER_DECLARE(ifaddr_event, ifaddr_event_handler_t);
+typedef void (*ifaddr_event_ext_handler_t)(void *, struct ifnet *,
+ struct ifaddr *, int);
+EVENTHANDLER_DECLARE(ifaddr_event_ext, ifaddr_event_ext_handler_t);
+#define IFADDR_EVENT_ADD 0
+#define IFADDR_EVENT_DEL 1
/* new interface arrival event */
typedef void (*ifnet_arrival_event_handler_t)(void *, struct ifnet *);
EVENTHANDLER_DECLARE(ifnet_arrival_event, ifnet_arrival_event_handler_t);
@@ -782,6 +788,8 @@ int if_hw_tsomax_update(if_t ifp, struct ifnet_hw_tsomax *);
/* accessors for struct ifreq */
void *ifr_data_get_ptr(void *ifrp);
+int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *);
+
#ifdef DEVICE_POLLING
enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS };
diff --git a/freebsd/sys/net/if_vlan.c b/freebsd/sys/net/if_vlan.c
index 22061dc4..6f07b4b4 100644
--- a/freebsd/sys/net/if_vlan.c
+++ b/freebsd/sys/net/if_vlan.c
@@ -89,11 +89,11 @@ __FBSDID("$FreeBSD$");
#define UP_AND_RUNNING(ifp) \
((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING)
-LIST_HEAD(ifvlanhead, ifvlan);
+CK_SLIST_HEAD(ifvlanhead, ifvlan);
struct ifvlantrunk {
struct ifnet *parent; /* parent interface of this trunk */
- struct rmlock lock;
+ struct mtx lock;
#ifdef VLAN_ARRAY
#define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1)
struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */
@@ -119,7 +119,7 @@ struct ifvlantrunk {
struct ifvlan *_next; \
size_t _i; \
for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \
- LIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next)
+ CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next)
#endif /* VLAN_ARRAY */
/*
@@ -148,13 +148,14 @@ struct ifvlantrunk {
for (_i = 0; \
!(_cond) && _i < (1 << (_trunk)->hwidth); \
_i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \
- if (((_ifv) = LIST_FIRST(&(_trunk)->hash[_i])) != NULL && \
+ if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \
(_touch = true))
#endif /* VLAN_ARRAY */
struct vlan_mc_entry {
struct sockaddr_dl mc_addr;
- SLIST_ENTRY(vlan_mc_entry) mc_entries;
+ CK_SLIST_ENTRY(vlan_mc_entry) mc_entries;
+ struct epoch_context mc_epoch_ctx;
};
struct ifvlan {
@@ -175,9 +176,9 @@ struct ifvlan {
uint8_t ifvm_pcp; /* Priority Code Point (PCP). */
} ifv_mib;
struct task lladdr_task;
- SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead;
+ CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead;
#ifndef VLAN_ARRAY
- LIST_ENTRY(ifvlan) ifv_list;
+ CK_SLIST_ENTRY(ifvlan) ifv_list;
#endif
};
#define ifv_proto ifv_mib.ifvm_proto
@@ -207,55 +208,36 @@ static eventhandler_tag ifdetach_tag;
static eventhandler_tag iflladdr_tag;
/*
- * if_vlan uses two module-level locks to allow concurrent modification of vlan
- * interfaces and (mostly) allow for vlans to be destroyed while they are being
- * used for tx/rx. To accomplish this in a way that has acceptable performance
- * and cooperation with other parts of the network stack there is a
- * non-sleepable rmlock(9) and an sx(9). Both locks are exclusively acquired
- * when destroying a vlan interface, i.e. when the if_vlantrunk field of struct
- * ifnet is de-allocated and NULL'd. Thus a reader holding either lock has a
- * guarantee that the struct ifvlantrunk references a valid vlan trunk.
+ * if_vlan uses two module-level synchronizations primitives to allow concurrent
+ * modification of vlan interfaces and (mostly) allow for vlans to be destroyed
+ * while they are being used for tx/rx. To accomplish this in a way that has
+ * acceptable performance and cooperation with other parts of the network stack
+ * there is a non-sleepable epoch(9) and an sx(9).
*
- * The performance-sensitive paths that warrant using the rmlock(9) are
+ * The performance-sensitive paths that warrant using the epoch(9) are
* vlan_transmit and vlan_input. Both have to check for the vlan interface's
* existence using if_vlantrunk, and being in the network tx/rx paths the use
- * of an rmlock(9) gives a measureable improvement in performance.
+ * of an epoch(9) gives a measureable improvement in performance.
*
* The reason for having an sx(9) is mostly because there are still areas that
* must be sleepable and also have safe concurrent access to a vlan interface.
* Since the sx(9) exists, it is used by default in most paths unless sleeping
* is not permitted, or if it is not clear whether sleeping is permitted.
*
- * Note that despite these protections, there is still an inherent race in the
- * destruction of vlans since there's no guarantee that the ifnet hasn't been
- * freed/reused when the tx/rx functions are called by the stack. This can only
- * be fixed by addressing ifnet's lifetime issues.
*/
-#define _VLAN_RM_ID ifv_rm_lock
#define _VLAN_SX_ID ifv_sx
-static struct rmlock _VLAN_RM_ID;
static struct sx _VLAN_SX_ID;
#define VLAN_LOCKING_INIT() \
- rm_init(&_VLAN_RM_ID, "vlan_rm"); \
sx_init(&_VLAN_SX_ID, "vlan_sx")
#define VLAN_LOCKING_DESTROY() \
- rm_destroy(&_VLAN_RM_ID); \
sx_destroy(&_VLAN_SX_ID)
-#define _VLAN_RM_TRACKER _vlan_rm_tracker
-#define VLAN_RLOCK() rm_rlock(&_VLAN_RM_ID, \
- &_VLAN_RM_TRACKER)
-#define VLAN_RUNLOCK() rm_runlock(&_VLAN_RM_ID, \
- &_VLAN_RM_TRACKER)
-#define VLAN_WLOCK() rm_wlock(&_VLAN_RM_ID)
-#define VLAN_WUNLOCK() rm_wunlock(&_VLAN_RM_ID)
-#define VLAN_RLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_RLOCKED)
-#define VLAN_WLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_WLOCKED)
-#define VLAN_RWLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_LOCKED)
-#define VLAN_LOCK_READER struct rm_priotracker _VLAN_RM_TRACKER
+#define VLAN_RLOCK() NET_EPOCH_ENTER();
+#define VLAN_RUNLOCK() NET_EPOCH_EXIT();
+#define VLAN_RLOCK_ASSERT() MPASS(in_epoch(net_epoch_preempt))
#define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID)
#define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID)
@@ -267,25 +249,18 @@ static struct sx _VLAN_SX_ID;
/*
- * We also have a per-trunk rmlock(9), that is locked shared on packet
- * processing and exclusive when configuration is changed. Note: This should
- * only be acquired while there is a shared lock on either of the global locks
- * via VLAN_SLOCK or VLAN_RLOCK. Thus, an exclusive lock on the global locks
- * makes a call to TRUNK_RLOCK/TRUNK_WLOCK technically superfluous.
+ * We also have a per-trunk mutex that should be acquired when changing
+ * its state.
*/
-#define _TRUNK_RM_TRACKER _trunk_rm_tracker
-#define TRUNK_LOCK_INIT(trunk) rm_init(&(trunk)->lock, vlanname)
-#define TRUNK_LOCK_DESTROY(trunk) rm_destroy(&(trunk)->lock)
-#define TRUNK_RLOCK(trunk) rm_rlock(&(trunk)->lock, \
- &_TRUNK_RM_TRACKER)
-#define TRUNK_WLOCK(trunk) rm_wlock(&(trunk)->lock)
-#define TRUNK_RUNLOCK(trunk) rm_runlock(&(trunk)->lock, \
- &_TRUNK_RM_TRACKER)
-#define TRUNK_WUNLOCK(trunk) rm_wunlock(&(trunk)->lock)
-#define TRUNK_RLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_RLOCKED)
-#define TRUNK_LOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_LOCKED)
-#define TRUNK_WLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_WLOCKED)
-#define TRUNK_LOCK_READER struct rm_priotracker _TRUNK_RM_TRACKER
+#define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF)
+#define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock)
+#define TRUNK_RLOCK(trunk) NET_EPOCH_ENTER()
+#define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock)
+#define TRUNK_RUNLOCK(trunk) NET_EPOCH_EXIT();
+#define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock)
+#define TRUNK_RLOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt))
+#define TRUNK_LOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(trunk)->lock))
+#define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED);
/*
* The VLAN_ARRAY substitutes the dynamic hash with a static array
@@ -345,6 +320,13 @@ VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner);
#define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m))
static void
+vlan_mc_free(struct epoch_context *ctx)
+{
+ struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx);
+ free(mc, M_VLAN);
+}
+
+static void
vlan_inithash(struct ifvlantrunk *trunk)
{
int i, n;
@@ -363,7 +345,7 @@ vlan_inithash(struct ifvlantrunk *trunk)
trunk->hmask = n - 1;
trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK);
for (i = 0; i < n; i++)
- LIST_INIT(&trunk->hash[i]);
+ CK_SLIST_INIT(&trunk->hash[i]);
}
static void
@@ -374,7 +356,7 @@ vlan_freehash(struct ifvlantrunk *trunk)
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
for (i = 0; i < (1 << trunk->hwidth); i++)
- KASSERT(LIST_EMPTY(&trunk->hash[i]),
+ KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]),
("%s: hash table not empty", __func__));
#endif
free(trunk->hash, M_VLAN);
@@ -388,12 +370,12 @@ vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
int i, b;
struct ifvlan *ifv2;
- TRUNK_WLOCK_ASSERT(trunk);
+ VLAN_XLOCK_ASSERT();
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
b = 1 << trunk->hwidth;
i = HASH(ifv->ifv_vid, trunk->hmask);
- LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
+ CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
if (ifv->ifv_vid == ifv2->ifv_vid)
return (EEXIST);
@@ -406,7 +388,7 @@ vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
vlan_growhash(trunk, 1);
i = HASH(ifv->ifv_vid, trunk->hmask);
}
- LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list);
+ CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list);
trunk->refcnt++;
return (0);
@@ -418,15 +400,15 @@ vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
int i, b;
struct ifvlan *ifv2;
- TRUNK_WLOCK_ASSERT(trunk);
+ VLAN_XLOCK_ASSERT();
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
b = 1 << trunk->hwidth;
i = HASH(ifv->ifv_vid, trunk->hmask);
- LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
+ CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
if (ifv2 == ifv) {
trunk->refcnt--;
- LIST_REMOVE(ifv2, ifv_list);
+ CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list);
if (trunk->refcnt < (b * b) / 2)
vlan_growhash(trunk, -1);
return (0);
@@ -446,7 +428,7 @@ vlan_growhash(struct ifvlantrunk *trunk, int howmuch)
struct ifvlanhead *hash2;
int hwidth2, i, j, n, n2;
- TRUNK_WLOCK_ASSERT(trunk);
+ VLAN_XLOCK_ASSERT();
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
if (howmuch == 0) {
@@ -462,21 +444,21 @@ vlan_growhash(struct ifvlantrunk *trunk, int howmuch)
if (hwidth2 < VLAN_DEF_HWIDTH)
return;
- /* M_NOWAIT because we're called with trunk mutex held */
- hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT);
+ hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK);
if (hash2 == NULL) {
printf("%s: out of memory -- hash size not changed\n",
__func__);
return; /* We can live with the old hash table */
}
for (j = 0; j < n2; j++)
- LIST_INIT(&hash2[j]);
+ CK_SLIST_INIT(&hash2[j]);
for (i = 0; i < n; i++)
- while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) {
- LIST_REMOVE(ifv, ifv_list);
+ while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) {
+ CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list);
j = HASH(ifv->ifv_vid, n2 - 1);
- LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list);
+ CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list);
}
+ NET_EPOCH_WAIT();
free(trunk->hash, M_VLAN);
trunk->hash = hash2;
trunk->hwidth = hwidth2;
@@ -494,7 +476,7 @@ vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid)
TRUNK_RLOCK_ASSERT(trunk);
- LIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list)
+ CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list)
if (ifv->ifv_vid == vid)
return (ifv);
return (NULL);
@@ -510,7 +492,7 @@ vlan_dumphash(struct ifvlantrunk *trunk)
for (i = 0; i < (1 << trunk->hwidth); i++) {
printf("%d: ", i);
- LIST_FOREACH(ifv, &trunk->hash[i], ifv_list)
+ CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list)
printf("%s ", ifv->ifv_ifp->if_xname);
printf("\n");
}
@@ -563,7 +545,6 @@ static void
trunk_destroy(struct ifvlantrunk *trunk)
{
VLAN_XLOCK_ASSERT();
- VLAN_WLOCK_ASSERT();
vlan_freehash(trunk);
trunk->parent->if_vlantrunk = NULL;
@@ -589,24 +570,19 @@ vlan_setmulti(struct ifnet *ifp)
struct vlan_mc_entry *mc;
int error;
- /*
- * XXX This stupidly needs the rmlock to avoid sleeping while holding
- * the in6_multi_mtx (see in6_mc_join_locked).
- */
- VLAN_RWLOCK_ASSERT();
+ VLAN_XLOCK_ASSERT();
/* Find the parent. */
sc = ifp->if_softc;
- TRUNK_WLOCK_ASSERT(TRUNK(sc));
ifp_p = PARENT(sc);
CURVNET_SET_QUIET(ifp_p->if_vnet);
/* First, remove any existing filter entries. */
- while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) {
- SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
+ while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) {
+ CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
(void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr);
- free(mc, M_VLAN);
+ epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free);
}
/* Now program new ones. */
@@ -621,10 +597,10 @@ vlan_setmulti(struct ifnet *ifp)
}
bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len);
mc->mc_addr.sdl_index = ifp_p->if_index;
- SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries);
+ CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries);
}
IF_ADDR_WUNLOCK(ifp);
- SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) {
+ CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) {
error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr,
NULL);
if (error)
@@ -647,7 +623,6 @@ vlan_iflladdr(void *arg __unused, struct ifnet *ifp)
struct ifnet *ifv_ifp;
struct ifvlantrunk *trunk;
struct sockaddr_dl *sdl;
- VLAN_LOCK_READER;
/* Need the rmlock since this is run on taskqueue_swi. */
VLAN_RLOCK();
@@ -726,12 +701,10 @@ static struct ifnet *
vlan_trunkdev(struct ifnet *ifp)
{
struct ifvlan *ifv;
- VLAN_LOCK_READER;
if (ifp->if_type != IFT_L2VLAN)
return (NULL);
- /* Not clear if callers are sleepable, so acquire the rmlock. */
VLAN_RLOCK();
ifv = ifp->if_softc;
ifp = NULL;
@@ -811,10 +784,7 @@ vlan_devat(struct ifnet *ifp, uint16_t vid)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
- VLAN_LOCK_READER;
- TRUNK_LOCK_READER;
- /* Not clear if callers are sleepable, so acquire the rmlock. */
VLAN_RLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
@@ -822,11 +792,9 @@ vlan_devat(struct ifnet *ifp, uint16_t vid)
return (NULL);
}
ifp = NULL;
- TRUNK_RLOCK(trunk);
ifv = vlan_gethash(trunk, vid);
if (ifv)
ifp = ifv->ifv_ifp;
- TRUNK_RUNLOCK(trunk);
VLAN_RUNLOCK();
return (ifp);
}
@@ -1078,7 +1046,7 @@ vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
if_rele(p);
return (ENOSPC);
}
- SLIST_INIT(&ifv->vlan_mc_listhead);
+ CK_SLIST_INIT(&ifv->vlan_mc_listhead);
ifp->if_softc = ifv;
/*
* Set the name manually rather than using if_initname because
@@ -1145,6 +1113,7 @@ vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp)
* ifvlan.
*/
taskqueue_drain(taskqueue_thread, &ifv->lladdr_task);
+ NET_EPOCH_WAIT();
if_free(ifp);
free(ifv, M_VLAN);
ifc_free_unit(ifc, unit);
@@ -1169,7 +1138,6 @@ vlan_transmit(struct ifnet *ifp, struct mbuf *m)
struct ifvlan *ifv;
struct ifnet *p;
int error, len, mcast;
- VLAN_LOCK_READER;
VLAN_RLOCK();
ifv = ifp->if_softc;
@@ -1229,8 +1197,6 @@ vlan_input(struct ifnet *ifp, struct mbuf *m)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
- VLAN_LOCK_READER;
- TRUNK_LOCK_READER;
struct m_tag *mtag;
uint16_t vid, tag;
@@ -1291,16 +1257,13 @@ vlan_input(struct ifnet *ifp, struct mbuf *m)
vid = EVL_VLANOFTAG(tag);
- TRUNK_RLOCK(trunk);
ifv = vlan_gethash(trunk, vid);
if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) {
- TRUNK_RUNLOCK(trunk);
- if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
VLAN_RUNLOCK();
+ if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
m_freem(m);
return;
}
- TRUNK_RUNLOCK(trunk);
if (vlan_mtag_pcp) {
/*
@@ -1341,8 +1304,13 @@ vlan_lladdr_fn(void *arg, int pending __unused)
ifv = (struct ifvlan *)arg;
ifp = ifv->ifv_ifp;
+
+ CURVNET_SET(ifp->if_vnet);
+
/* The ifv_ifp already has the lladdr copied in. */
if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen);
+
+ CURVNET_RESTORE();
}
static int
@@ -1371,22 +1339,19 @@ vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid)
if (ifv->ifv_trunk)
return (EBUSY);
- /* Acquire rmlock after the branch so we can M_WAITOK. */
VLAN_XLOCK();
if (p->if_vlantrunk == NULL) {
trunk = malloc(sizeof(struct ifvlantrunk),
M_VLAN, M_WAITOK | M_ZERO);
vlan_inithash(trunk);
TRUNK_LOCK_INIT(trunk);
- VLAN_WLOCK();
TRUNK_WLOCK(trunk);
p->if_vlantrunk = trunk;
trunk->parent = p;
if_ref(trunk->parent);
+ TRUNK_WUNLOCK(trunk);
} else {
- VLAN_WLOCK();
trunk = p->if_vlantrunk;
- TRUNK_WLOCK(trunk);
}
ifv->ifv_vid = vid; /* must set this before vlan_inshash() */
@@ -1450,7 +1415,9 @@ vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid)
ifp->if_link_state = p->if_link_state;
+ TRUNK_RLOCK(TRUNK(ifv));
vlan_capabilities(ifv);
+ TRUNK_RUNLOCK(TRUNK(ifv));
/*
* Set up our interface address to reflect the underlying
@@ -1460,12 +1427,6 @@ vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid)
((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen =
p->if_addrlen;
- /*
- * Configure multicast addresses that may already be
- * joined on the vlan device.
- */
- (void)vlan_setmulti(ifp);
-
TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv);
/* We are ready for operation now. */
@@ -1473,13 +1434,14 @@ vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid)
/* Update flags on the parent, if necessary. */
vlan_setflags(ifp, 1);
-done:
+
/*
- * We need to drop the non-sleepable rmlock so that the underlying
- * devices can sleep in their vlan_config hooks.
+ * Configure multicast addresses that may already be
+ * joined on the vlan device.
*/
- TRUNK_WUNLOCK(trunk);
- VLAN_WUNLOCK();
+ (void)vlan_setmulti(ifp);
+
+done:
if (error == 0)
EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid);
VLAN_XUNLOCK();
@@ -1512,13 +1474,6 @@ vlan_unconfig_locked(struct ifnet *ifp, int departing)
parent = NULL;
if (trunk != NULL) {
- /*
- * Both vlan_transmit and vlan_input rely on the trunk fields
- * being NULL to determine whether to bail, so we need to get
- * an exclusive lock here to prevent them from using bad
- * ifvlans.
- */
- VLAN_WLOCK();
parent = trunk->parent;
/*
@@ -1526,7 +1481,7 @@ vlan_unconfig_locked(struct ifnet *ifp, int departing)
* empty the list of multicast groups that we may have joined
* while we were alive from the parent's list.
*/
- while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) {
+ while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) {
/*
* If the parent interface is being detached,
* all its multicast addresses have already
@@ -1543,19 +1498,13 @@ vlan_unconfig_locked(struct ifnet *ifp, int departing)
"Failed to delete multicast address from parent: %d\n",
error);
}
- SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
- free(mc, M_VLAN);
+ CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
+ epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free);
}
vlan_setflags(ifp, 0); /* clear special flags on parent */
- /*
- * The trunk lock isn't actually required here, but
- * vlan_remhash expects it.
- */
- TRUNK_WLOCK(trunk);
vlan_remhash(trunk, ifv);
- TRUNK_WUNLOCK(trunk);
ifv->ifv_trunk = NULL;
/*
@@ -1563,9 +1512,9 @@ vlan_unconfig_locked(struct ifnet *ifp, int departing)
*/
if (trunk->refcnt == 0) {
parent->if_vlantrunk = NULL;
+ NET_EPOCH_WAIT();
trunk_destroy(trunk);
}
- VLAN_WUNLOCK();
}
/* Disconnect from parent. */
@@ -1642,7 +1591,6 @@ vlan_link_state(struct ifnet *ifp)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
- VLAN_LOCK_READER;
/* Called from a taskqueue_swi task, so we cannot sleep. */
VLAN_RLOCK();
@@ -1672,7 +1620,7 @@ vlan_capabilities(struct ifvlan *ifv)
u_long hwa = 0;
VLAN_SXLOCK_ASSERT();
- TRUNK_WLOCK_ASSERT(TRUNK(ifv));
+ TRUNK_RLOCK_ASSERT(TRUNK(ifv));
p = PARENT(ifv);
ifp = ifv->ifv_ifp;
@@ -1773,11 +1721,11 @@ vlan_trunk_capabilities(struct ifnet *ifp)
VLAN_SUNLOCK();
return;
}
- TRUNK_WLOCK(trunk);
+ TRUNK_RLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
vlan_capabilities(ifv);
}
- TRUNK_WUNLOCK(trunk);
+ TRUNK_RUNLOCK(trunk);
VLAN_SUNLOCK();
}
@@ -1791,7 +1739,6 @@ vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
struct ifvlantrunk *trunk;
struct vlanreq vlr;
int error = 0;
- VLAN_LOCK_READER;
ifr = (struct ifreq *)data;
ifa = (struct ifaddr *) data;
@@ -1927,16 +1874,13 @@ vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
* XXX We need the rmlock here to avoid sleeping while
* holding in6_multi_mtx.
*/
- VLAN_RLOCK();
+ VLAN_XLOCK();
trunk = TRUNK(ifv);
- if (trunk != NULL) {
- TRUNK_WLOCK(trunk);
+ if (trunk != NULL)
error = vlan_setmulti(ifp);
- TRUNK_WUNLOCK(trunk);
- }
- VLAN_RUNLOCK();
- break;
+ VLAN_XUNLOCK();
+ break;
case SIOCGVLANPCP:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
@@ -1973,9 +1917,9 @@ vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
ifv->ifv_capenable = ifr->ifr_reqcap;
trunk = TRUNK(ifv);
if (trunk != NULL) {
- TRUNK_WLOCK(trunk);
+ TRUNK_RLOCK(trunk);
vlan_capabilities(ifv);
- TRUNK_WUNLOCK(trunk);
+ TRUNK_RUNLOCK(trunk);
}
VLAN_SUNLOCK();
break;
diff --git a/freebsd/sys/net/iflib.h b/freebsd/sys/net/iflib.h
index 6e1eee63..8c2be41b 100644
--- a/freebsd/sys/net/iflib.h
+++ b/freebsd/sys/net/iflib.h
@@ -173,7 +173,7 @@ typedef struct pci_vendor_info {
#define IFLIB_PNP_DESCR "U32:vendor;U32:device;U32:subvendor;U32:subdevice;" \
"U32:revision;U32:class;D:#"
#define IFLIB_PNP_INFO(b, u, t) \
- MODULE_PNP_INFO(IFLIB_PNP_DESCR, b, u, t, sizeof(t[0]), nitems(t) - 1)
+ MODULE_PNP_INFO(IFLIB_PNP_DESCR, b, u, t, nitems(t) - 1)
typedef struct if_txrx {
int (*ift_txd_encap) (void *, if_pkt_info_t);
@@ -246,7 +246,7 @@ struct if_shared_ctx {
/* fields necessary for probe */
pci_vendor_info_t *isc_vendor_info;
char *isc_driver_version;
-/* optional function to transform the read values to match the table*/
+ /* optional function to transform the read values to match the table*/
void (*isc_parse_devinfo) (uint16_t *device_id, uint16_t *subvendor_id,
uint16_t *subdevice_id, uint16_t *rev_id);
int isc_nrxd_min[8];
@@ -375,6 +375,8 @@ if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx);
if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx);
void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]);
+void iflib_request_reset(if_ctx_t ctx);
+uint8_t iflib_in_detach(if_ctx_t ctx);
/*
* If the driver can plug cleanly in to newbus use these