summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/net
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-11-06 15:42:44 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-11-15 10:56:14 +0100
commite0b4edbdcc3558d3f38af8398f995c2e9f019f07 (patch)
treeea91a5fcfb9b6a66a8c0b74cf68ff8d450ce17e0 /freebsd/sys/net
parentDisable or make static kern_* functions (diff)
downloadrtems-libbsd-e0b4edbdcc3558d3f38af8398f995c2e9f019f07.tar.bz2
Update to FreeBSD head 2018-11-15
Git mirror commit a18b0830c4be01b39489a891b63d6023ada6358a. Update #3472.
Diffstat (limited to 'freebsd/sys/net')
-rw-r--r--freebsd/sys/net/if.c21
-rw-r--r--freebsd/sys/net/if_ethersubr.c20
-rw-r--r--freebsd/sys/net/if_ipsec.c21
-rw-r--r--freebsd/sys/net/if_lagg.c67
-rw-r--r--freebsd/sys/net/if_stf.c3
-rw-r--r--freebsd/sys/net/if_var.h2
-rw-r--r--freebsd/sys/net/pfvar.h35
7 files changed, 137 insertions, 32 deletions
diff --git a/freebsd/sys/net/if.c b/freebsd/sys/net/if.c
index 7721af11..d846482e 100644
--- a/freebsd/sys/net/if.c
+++ b/freebsd/sys/net/if.c
@@ -1131,6 +1131,9 @@ if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp)
* the work top-down for us.
*/
if (shutdown) {
+ /* Give interface users the chance to clean up. */
+ EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
+
/*
* In case of a vmove we are done here without error.
* If we would signal an error it would lead to the same
@@ -1773,35 +1776,29 @@ if_data_copy(struct ifnet *ifp, struct if_data *ifd)
void
if_addr_rlock(struct ifnet *ifp)
{
- MPASS(*(uint64_t *)&ifp->if_addr_et == 0);
- epoch_enter_preempt(net_epoch_preempt, &ifp->if_addr_et);
+
+ epoch_enter_preempt(net_epoch_preempt, curthread->td_et);
}
void
if_addr_runlock(struct ifnet *ifp)
{
- epoch_exit_preempt(net_epoch_preempt, &ifp->if_addr_et);
-#ifdef INVARIANTS
- bzero(&ifp->if_addr_et, sizeof(struct epoch_tracker));
-#endif
+
+ epoch_exit_preempt(net_epoch_preempt, curthread->td_et);
}
void
if_maddr_rlock(if_t ifp)
{
- MPASS(*(uint64_t *)&ifp->if_maddr_et == 0);
- epoch_enter_preempt(net_epoch_preempt, &ifp->if_maddr_et);
+ epoch_enter_preempt(net_epoch_preempt, curthread->td_et);
}
void
if_maddr_runlock(if_t ifp)
{
- epoch_exit_preempt(net_epoch_preempt, &ifp->if_maddr_et);
-#ifdef INVARIANTS
- bzero(&ifp->if_maddr_et, sizeof(struct epoch_tracker));
-#endif
+ epoch_exit_preempt(net_epoch_preempt, curthread->td_et);
}
/*
diff --git a/freebsd/sys/net/if_ethersubr.c b/freebsd/sys/net/if_ethersubr.c
index 01e757e5..96ed309a 100644
--- a/freebsd/sys/net/if_ethersubr.c
+++ b/freebsd/sys/net/if_ethersubr.c
@@ -477,6 +477,26 @@ ether_output_frame(struct ifnet *ifp, struct mbuf *m)
return (0);
}
+#ifdef EXPERIMENTAL
+#if defined(INET6) && defined(INET)
+ /* draft-ietf-6man-ipv6only-flag */
+ /* Catch ETHERTYPE_IP, and ETHERTYPE_ARP if we are v6-only. */
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY) != 0) {
+ struct ether_header *eh;
+
+ eh = mtod(m, struct ether_header *);
+ switch (ntohs(eh->ether_type)) {
+ case ETHERTYPE_IP:
+ case ETHERTYPE_ARP:
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ /* NOTREACHED */
+ break;
+ };
+ }
+#endif
+#endif
+
/*
* Queue message on interface, update output statistics if
* successful, and start output if interface not yet active.
diff --git a/freebsd/sys/net/if_ipsec.c b/freebsd/sys/net/if_ipsec.c
index 08465911..7cc2c961 100644
--- a/freebsd/sys/net/if_ipsec.c
+++ b/freebsd/sys/net/if_ipsec.c
@@ -220,6 +220,12 @@ ipsec_clone_destroy(struct ifnet *ifp)
sx_xlock(&ipsec_ioctl_sx);
sc = ifp->if_softc;
ipsec_delete_tunnel(sc);
+ /*
+ * Delete softc from idhash on interface destroy, since
+ * ipsec_delete_tunnel() keeps reqid unchanged.
+ */
+ if (sc->reqid != 0)
+ CK_LIST_REMOVE(sc, idhash);
bpfdetach(ifp);
if_detach(ifp);
ifp->if_softc = NULL;
@@ -273,6 +279,13 @@ vnet_ipsec_uninit(const void *unused __unused)
if_clone_detach(V_ipsec_cloner);
free(V_ipsec_idhtbl, M_IPSEC);
+ /*
+ * Use V_ipsec_idhtbl pointer as indicator that VNET is going to be
+ * destroyed, it is used by ipsec_srcaddr() callback.
+ */
+ V_ipsec_idhtbl = NULL;
+ IPSEC_WAIT();
+
#ifdef INET
if (IS_DEFAULT_VNET(curvnet))
ip_encap_unregister_srcaddr(ipsec4_srctab);
@@ -785,6 +798,10 @@ ipsec_srcaddr(void *arg __unused, const struct sockaddr *sa,
struct ipsec_softc *sc;
struct secasindex *saidx;
+ /* Check that VNET is ready */
+ if (V_ipsec_idhtbl == NULL)
+ return;
+
MPASS(in_epoch(net_epoch_preempt));
CK_LIST_FOREACH(sc, ipsec_srchash(sa), srchash) {
if (sc->family == 0)
@@ -1031,13 +1048,11 @@ ipsec_delete_tunnel(struct ipsec_softc *sc)
sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if (sc->family != 0) {
CK_LIST_REMOVE(sc, srchash);
- IPSEC_WAIT();
-
+ sc->family = 0;
/*
* Make sure that ipsec_if_input() will not do access
* to softc's policies.
*/
- sc->family = 0;
IPSEC_WAIT();
key_unregister_ifnet(sc->sp, IPSEC_SPCOUNT);
diff --git a/freebsd/sys/net/if_lagg.c b/freebsd/sys/net/if_lagg.c
index 632ea744..85099115 100644
--- a/freebsd/sys/net/if_lagg.c
+++ b/freebsd/sys/net/if_lagg.c
@@ -635,11 +635,18 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
{
struct lagg_softc *sc_ptr;
struct lagg_port *lp, *tlp;
- int error, i;
+ struct ifreq ifr;
+ int error, i, oldmtu;
uint64_t *pval;
LAGG_XLOCK_ASSERT(sc);
+ if (sc->sc_ifp == ifp) {
+ if_printf(sc->sc_ifp,
+ "cannot add a lagg to itself as a port\n");
+ return (EINVAL);
+ }
+
/* Limit the maximal number of lagg ports */
if (sc->sc_count >= LAGG_MAX_PORTS)
return (ENOSPC);
@@ -658,12 +665,25 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
return (EPROTONOSUPPORT);
/* Allow the first Ethernet member to define the MTU */
- if (CK_SLIST_EMPTY(&sc->sc_ports))
+ oldmtu = -1;
+ if (CK_SLIST_EMPTY(&sc->sc_ports)) {
sc->sc_ifp->if_mtu = ifp->if_mtu;
- else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
- if_printf(sc->sc_ifp, "invalid MTU for %s\n",
- ifp->if_xname);
- return (EINVAL);
+ } else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
+ if (ifp->if_ioctl == NULL) {
+ if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
+ ifp->if_xname);
+ return (EINVAL);
+ }
+ oldmtu = ifp->if_mtu;
+ strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
+ ifr.ifr_mtu = sc->sc_ifp->if_mtu;
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
+ if (error != 0) {
+ if_printf(sc->sc_ifp, "invalid MTU for %s\n",
+ ifp->if_xname);
+ return (error);
+ }
+ ifr.ifr_mtu = oldmtu;
}
lp = malloc(sizeof(struct lagg_port), M_DEVBUF, M_WAITOK|M_ZERO);
@@ -675,6 +695,9 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
if (ifp == sc_ptr->sc_ifp) {
LAGG_LIST_UNLOCK();
free(lp, M_DEVBUF);
+ if (oldmtu != -1)
+ (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
+ (caddr_t)&ifr);
return (EINVAL);
/* XXX disable stacking for the moment, its untested */
#ifdef LAGG_PORT_STACKING
@@ -683,6 +706,9 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
LAGG_MAX_STACKING) {
LAGG_LIST_UNLOCK();
free(lp, M_DEVBUF);
+ if (oldmtu != -1)
+ (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
+ (caddr_t)&ifr);
return (E2BIG);
}
#endif
@@ -748,6 +774,8 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
if ((error = lagg_proto_addport(sc, lp)) != 0) {
/* Remove the port, without calling pr_delport. */
lagg_port_destroy(lp, 0);
+ if (oldmtu != -1)
+ (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
return (error);
}
@@ -1466,8 +1494,31 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCSIFMTU:
- /* Do not allow the MTU to be directly changed */
- error = EINVAL;
+ LAGG_XLOCK(sc);
+ CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (lp->lp_ioctl != NULL)
+ error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
+ else
+ error = EINVAL;
+ if (error != 0) {
+ if_printf(ifp,
+ "failed to change MTU to %d on port %s, "
+ "reverting all ports to original MTU (%d)\n",
+ ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
+ break;
+ }
+ }
+ if (error == 0) {
+ ifp->if_mtu = ifr->ifr_mtu;
+ } else {
+ /* set every port back to the original MTU */
+ ifr->ifr_mtu = ifp->if_mtu;
+ CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (lp->lp_ioctl != NULL)
+ (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
+ }
+ }
+ LAGG_XUNLOCK(sc);
break;
default:
diff --git a/freebsd/sys/net/if_stf.c b/freebsd/sys/net/if_stf.c
index 1102a62d..3ba9f8c0 100644
--- a/freebsd/sys/net/if_stf.c
+++ b/freebsd/sys/net/if_stf.c
@@ -373,6 +373,7 @@ stf_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
static int
stf_getsrcifa6(struct ifnet *ifp, struct in6_addr *addr, struct in6_addr *mask)
{
+ struct rm_priotracker in_ifa_tracker;
struct ifaddr *ia;
struct in_ifaddr *ia4;
struct in6_ifaddr *ia6;
@@ -388,9 +389,11 @@ stf_getsrcifa6(struct ifnet *ifp, struct in6_addr *addr, struct in6_addr *mask)
continue;
bcopy(GET_V4(&sin6->sin6_addr), &in, sizeof(in));
+ IN_IFADDR_RLOCK(&in_ifa_tracker);
LIST_FOREACH(ia4, INADDR_HASH(in.s_addr), ia_hash)
if (ia4->ia_addr.sin_addr.s_addr == in.s_addr)
break;
+ IN_IFADDR_RUNLOCK(&in_ifa_tracker);
if (ia4 == NULL)
continue;
diff --git a/freebsd/sys/net/if_var.h b/freebsd/sys/net/if_var.h
index 6504837b..d23928e5 100644
--- a/freebsd/sys/net/if_var.h
+++ b/freebsd/sys/net/if_var.h
@@ -390,8 +390,6 @@ struct ifnet {
struct netdump_methods *if_netdump_methods;
#endif /* __rtems__ */
struct epoch_context if_epoch_ctx;
- struct epoch_tracker if_addr_et;
- struct epoch_tracker if_maddr_et;
#ifndef __rtems__
/*
diff --git a/freebsd/sys/net/pfvar.h b/freebsd/sys/net/pfvar.h
index 5e80b665..2924c06d 100644
--- a/freebsd/sys/net/pfvar.h
+++ b/freebsd/sys/net/pfvar.h
@@ -824,13 +824,21 @@ typedef void pfsync_update_state_t(struct pf_state *);
typedef void pfsync_delete_state_t(struct pf_state *);
typedef void pfsync_clear_states_t(u_int32_t, const char *);
typedef int pfsync_defer_t(struct pf_state *, struct mbuf *);
-
-extern pfsync_state_import_t *pfsync_state_import_ptr;
-extern pfsync_insert_state_t *pfsync_insert_state_ptr;
-extern pfsync_update_state_t *pfsync_update_state_ptr;
-extern pfsync_delete_state_t *pfsync_delete_state_ptr;
-extern pfsync_clear_states_t *pfsync_clear_states_ptr;
-extern pfsync_defer_t *pfsync_defer_ptr;
+typedef void pfsync_detach_ifnet_t(struct ifnet *);
+
+VNET_DECLARE(pfsync_state_import_t *, pfsync_state_import_ptr);
+#define V_pfsync_state_import_ptr VNET(pfsync_state_import_ptr)
+VNET_DECLARE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
+#define V_pfsync_insert_state_ptr VNET(pfsync_insert_state_ptr)
+VNET_DECLARE(pfsync_update_state_t *, pfsync_update_state_ptr);
+#define V_pfsync_update_state_ptr VNET(pfsync_update_state_ptr)
+VNET_DECLARE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
+#define V_pfsync_delete_state_ptr VNET(pfsync_delete_state_ptr)
+VNET_DECLARE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
+#define V_pfsync_clear_states_ptr VNET(pfsync_clear_states_ptr)
+VNET_DECLARE(pfsync_defer_t *, pfsync_defer_ptr);
+#define V_pfsync_defer_ptr VNET(pfsync_defer_ptr)
+extern pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
void pfsync_state_export(struct pfsync_state *,
struct pf_state *);
@@ -1205,6 +1213,19 @@ struct pf_divert {
#define PFR_KENTRY_HIWAT 200000 /* Number of table entries */
/*
+ * Limit the length of the fragment queue traversal. Remember
+ * search entry points based on the fragment offset.
+ */
+#define PF_FRAG_ENTRY_POINTS 16
+
+/*
+ * The number of entries in the fragment queue must be limited
+ * to avoid DoS by linear seaching. Instead of a global limit,
+ * use a limit per entry point. For large packets these sum up.
+ */
+#define PF_FRAG_ENTRY_LIMIT 64
+
+/*
* ioctl parameter structures
*/