summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/net/netisr.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-10-07 15:10:20 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-01-10 09:53:31 +0100
commitc40e45b75eb76d79a05c7fa85c1fa9b5c728a12f (patch)
treead4f2519067709f00ab98b3c591186c26dc3a21f /freebsd/sys/net/netisr.c
parentuserspace-header-gen.py: Simplify program ports (diff)
downloadrtems-libbsd-c40e45b75eb76d79a05c7fa85c1fa9b5c728a12f.tar.bz2
Update to FreeBSD head 2016-08-23
Git mirror commit 9fe7c416e6abb28b1398fd3e5687099846800cfd.
Diffstat (limited to 'freebsd/sys/net/netisr.c')
-rw-r--r--freebsd/sys/net/netisr.c276
1 files changed, 187 insertions, 89 deletions
diff --git a/freebsd/sys/net/netisr.c b/freebsd/sys/net/netisr.c
index f43cffa1..f14b2e95 100644
--- a/freebsd/sys/net/netisr.c
+++ b/freebsd/sys/net/netisr.c
@@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
+#include <sys/malloc.h>
#include <sys/interrupt.h>
#include <rtems/bsd/sys/lock.h>
#include <sys/mbuf.h>
@@ -131,7 +132,7 @@ static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
/*-
* Three global direct dispatch policies are supported:
*
- * NETISR_DISPATCH_QUEUED: All work is deferred for a netisr, regardless of
+ * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of
* context (may be overriden by protocols).
*
* NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch,
@@ -151,37 +152,25 @@ static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
#define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */
static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT;
static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS);
-SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RW |
- CTLFLAG_TUN, 0, 0, sysctl_netisr_dispatch_policy, "A",
+SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RWTUN,
+ 0, 0, sysctl_netisr_dispatch_policy, "A",
"netisr dispatch policy");
/*
- * These sysctls were used in previous versions to control and export
- * dispatch policy state. Now, we provide read-only export via them so that
- * older netstat binaries work. At some point they can be garbage collected.
- */
-static int netisr_direct_force;
-SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RD,
- &netisr_direct_force, 0, "compat: force direct dispatch");
-
-static int netisr_direct;
-SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RD, &netisr_direct, 0,
- "compat: enable direct dispatch");
-
-/*
* Allow the administrator to limit the number of threads (CPUs) to use for
* netisr. We don't check netisr_maxthreads before creating the thread for
- * CPU 0, so in practice we ignore values <= 1. This must be set at boot.
- * We will create at most one thread per CPU.
+ * CPU 0. This must be set at boot. We will create at most one thread per CPU.
+ * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and
+ * therefore only 1 workstream. If set to -1, netisr would use all cpus
+ * (mp_ncpus) and therefore would have those many workstreams. One workstream
+ * per thread (CPU).
*/
-static int netisr_maxthreads = -1; /* Max number of threads. */
-TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
+static int netisr_maxthreads = 1; /* Max number of threads. */
SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
&netisr_maxthreads, 0,
"Use at most this many CPUs for netisr processing");
static int netisr_bindthreads = 0; /* Bind threads to CPUs. */
-TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
&netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
@@ -192,7 +181,6 @@ SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
*/
#define NETISR_DEFAULT_MAXQLIMIT 10240
static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
-TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
&netisr_maxqlimit, 0,
"Maximum netisr per-protocol, per-CPU queue depth.");
@@ -204,7 +192,6 @@ SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
*/
#define NETISR_DEFAULT_DEFAULTQLIMIT 256
static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
-TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
&netisr_defaultqlimit, 0,
"Default netisr per-protocol, per-CPU queue limit if not set by protocol");
@@ -225,6 +212,23 @@ SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
*/
static struct netisr_proto netisr_proto[NETISR_MAXPROT];
+#ifdef VIMAGE
+/*
+ * The netisr_enable array describes a per-VNET flag for registered
+ * protocols on whether this netisr is active in this VNET or not.
+ * netisr_register() will automatically enable the netisr for the
+ * default VNET and all currently active instances.
+ * netisr_unregister() will disable all active VNETs, including vnet0.
+ * Individual network stack instances can be enabled/disabled by the
+ * netisr_(un)register _vnet() functions.
+ * With this we keep the one netisr_proto per protocol but add a
+ * mechanism to stop netisr processing for vnet teardown.
+ * Apart from that we expect a VNET to always be enabled.
+ */
+static VNET_DEFINE(u_int, netisr_enable[NETISR_MAXPROT]);
+#define V_netisr_enable VNET(netisr_enable)
+#endif
+
#ifndef __rtems__
/*
* Per-CPU workstream data. See netisr_internal.h for more details.
@@ -275,10 +279,7 @@ u_int
netisr_get_cpuid(u_int cpunumber)
{
- KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
- nws_count));
-
- return (nws_array[cpunumber]);
+ return (nws_array[cpunumber % nws_count]);
}
/*
@@ -308,8 +309,6 @@ static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = {
{ NETISR_DISPATCH_HYBRID, "hybrid" },
{ NETISR_DISPATCH_DIRECT, "direct" },
};
-static const u_int netisr_dispatch_table_len =
- (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0]));
static void
netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer,
@@ -320,7 +319,7 @@ netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer,
u_int i;
str = "unknown";
- for (i = 0; i < netisr_dispatch_table_len; i++) {
+ for (i = 0; i < nitems(netisr_dispatch_table); i++) {
ndtep = &netisr_dispatch_table[i];
if (ndtep->ndte_policy == dispatch_policy) {
str = ndtep->ndte_policy_str;
@@ -336,7 +335,7 @@ netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp)
const struct netisr_dispatch_table_entry *ndtep;
u_int i;
- for (i = 0; i < netisr_dispatch_table_len; i++) {
+ for (i = 0; i < nitems(netisr_dispatch_table); i++) {
ndtep = &netisr_dispatch_table[i];
if (strcmp(ndtep->ndte_policy_str, str) == 0) {
*dispatch_policyp = ndtep->ndte_policy;
@@ -346,32 +345,6 @@ netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp)
return (EINVAL);
}
-static void
-netisr_dispatch_policy_compat(void)
-{
-
- switch (netisr_dispatch_policy) {
- case NETISR_DISPATCH_DEFERRED:
- netisr_direct_force = 0;
- netisr_direct = 0;
- break;
-
- case NETISR_DISPATCH_HYBRID:
- netisr_direct_force = 0;
- netisr_direct = 1;
- break;
-
- case NETISR_DISPATCH_DIRECT:
- netisr_direct_force = 1;
- netisr_direct = 1;
- break;
-
- default:
- panic("%s: unknown policy %u", __func__,
- netisr_dispatch_policy);
- }
-}
-
static int
sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
{
@@ -387,10 +360,8 @@ sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
&dispatch_policy);
if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
error = EINVAL;
- if (error == 0) {
+ if (error == 0)
netisr_dispatch_policy = dispatch_policy;
- netisr_dispatch_policy_compat();
- }
}
return (error);
}
@@ -403,6 +374,7 @@ sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
void
netisr_register(const struct netisr_handler *nhp)
{
+ VNET_ITERATOR_DECL(vnet_iter);
struct netisr_work *npwp;
const char *name;
u_int i, proto;
@@ -475,6 +447,22 @@ netisr_register(const struct netisr_handler *nhp)
bzero(npwp, sizeof(*npwp));
npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
}
+
+#ifdef VIMAGE
+ /*
+ * Test that we are in vnet0 and have a curvnet set.
+ */
+ KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__));
+ KASSERT(IS_DEFAULT_VNET(curvnet), ("%s: curvnet %p is not vnet0 %p",
+ __func__, curvnet, vnet0));
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ V_netisr_enable[proto] = 1;
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+#endif
NETISR_WUNLOCK();
}
@@ -651,6 +639,7 @@ netisr_drain_proto(struct netisr_work *npwp)
void
netisr_unregister(const struct netisr_handler *nhp)
{
+ VNET_ITERATOR_DECL(vnet_iter);
struct netisr_work *npwp;
#ifdef INVARIANTS
const char *name;
@@ -669,6 +658,16 @@ netisr_unregister(const struct netisr_handler *nhp)
("%s(%u): protocol not registered for %s", __func__, proto,
name));
+#ifdef VIMAGE
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ V_netisr_enable[proto] = 0;
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+#endif
+
netisr_proto[proto].np_name = NULL;
netisr_proto[proto].np_handler = NULL;
netisr_proto[proto].np_m2flow = NULL;
@@ -687,6 +686,97 @@ netisr_unregister(const struct netisr_handler *nhp)
NETISR_WUNLOCK();
}
+#ifdef VIMAGE
+void
+netisr_register_vnet(const struct netisr_handler *nhp)
+{
+ u_int proto;
+
+ proto = nhp->nh_proto;
+
+ KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__));
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name));
+ NETISR_WLOCK();
+ KASSERT(netisr_proto[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ nhp->nh_name));
+
+ V_netisr_enable[proto] = 1;
+ NETISR_WUNLOCK();
+}
+
+static void
+netisr_drain_proto_vnet(struct vnet *vnet, u_int proto)
+{
+ struct netisr_workstream *nwsp;
+ struct netisr_work *npwp;
+ struct mbuf *m, *mp, *n, *ne;
+ u_int i;
+
+ KASSERT(vnet != NULL, ("%s: vnet is NULL", __func__));
+ NETISR_LOCK_ASSERT();
+
+ CPU_FOREACH(i) {
+ nwsp = DPCPU_ID_PTR(i, nws);
+ if (nwsp->nws_intr_event == NULL)
+ continue;
+ npwp = &nwsp->nws_work[proto];
+ NWS_LOCK(nwsp);
+
+ /*
+ * Rather than dissecting and removing mbufs from the middle
+ * of the chain, we build a new chain if the packet stays and
+ * update the head and tail pointers at the end. All packets
+ * matching the given vnet are freed.
+ */
+ m = npwp->nw_head;
+ n = ne = NULL;
+ while (m != NULL) {
+ mp = m;
+ m = m->m_nextpkt;
+ mp->m_nextpkt = NULL;
+ if (mp->m_pkthdr.rcvif->if_vnet != vnet) {
+ if (n == NULL) {
+ n = ne = mp;
+ } else {
+ ne->m_nextpkt = mp;
+ ne = mp;
+ }
+ continue;
+ }
+ /* This is a packet in the selected vnet. Free it. */
+ npwp->nw_len--;
+ m_freem(mp);
+ }
+ npwp->nw_head = n;
+ npwp->nw_tail = ne;
+ NWS_UNLOCK(nwsp);
+ }
+}
+
+void
+netisr_unregister_vnet(const struct netisr_handler *nhp)
+{
+ u_int proto;
+
+ proto = nhp->nh_proto;
+
+ KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__));
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name));
+ NETISR_WLOCK();
+ KASSERT(netisr_proto[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ nhp->nh_name));
+
+ V_netisr_enable[proto] = 0;
+
+ netisr_drain_proto_vnet(curvnet, proto);
+ NETISR_WUNLOCK();
+}
+#endif
+
/*
* Compose the global and per-protocol policies on dispatch, and return the
* dispatch policy to use.
@@ -746,22 +836,25 @@ netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy,
* dispatch. In the queued case, fall back on the SOURCE
* policy.
*/
- if (*cpuidp != NETISR_CPUID_NONE)
+ if (*cpuidp != NETISR_CPUID_NONE) {
+ *cpuidp = netisr_get_cpuid(*cpuidp);
return (m);
+ }
if (dispatch_policy == NETISR_DISPATCH_HYBRID) {
- *cpuidp = curcpu;
+ *cpuidp = netisr_get_cpuid(curcpu);
return (m);
}
policy = NETISR_POLICY_SOURCE;
}
if (policy == NETISR_POLICY_FLOW) {
- if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
+ if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE &&
+ npp->np_m2flow != NULL) {
m = npp->np_m2flow(m, source);
if (m == NULL)
return (NULL);
}
- if (m->m_flags & M_FLOWID) {
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
*cpuidp =
netisr_default_flow2cpu(m->m_pkthdr.flowid);
return (m);
@@ -984,6 +1077,13 @@ netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
KASSERT(netisr_proto[proto].np_handler != NULL,
("%s: invalid proto %u", __func__, proto));
+#ifdef VIMAGE
+ if (V_netisr_enable[proto] == 0) {
+ m_freem(m);
+ return (ENOPROTOOPT);
+ }
+#endif
+
m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED,
source, m, &cpuid);
if (m != NULL) {
@@ -1030,6 +1130,13 @@ netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__,
proto));
+#ifdef VIMAGE
+ if (V_netisr_enable[proto] == 0) {
+ m_freem(m);
+ return (ENOPROTOOPT);
+ }
+#endif
+
dispatch_policy = netisr_get_dispatch(npp);
if (dispatch_policy == NETISR_DISPATCH_DEFERRED)
return (netisr_queue_src(proto, source, m));
@@ -1215,15 +1322,15 @@ netisr_start_swi(u_int cpuid, struct pcpu *pc)
static void
netisr_init(void *arg)
{
- char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
- u_int dispatch_policy;
- int error;
-
- KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
+#ifdef EARLY_AP_STARTUP
+ struct pcpu *pc;
+#endif
NETISR_LOCK_INIT();
- if (netisr_maxthreads < 1)
- netisr_maxthreads = 1;
+ if (netisr_maxthreads == 0 || netisr_maxthreads < -1 )
+ netisr_maxthreads = 1; /* default behavior */
+ else if (netisr_maxthreads == -1)
+ netisr_maxthreads = mp_ncpus; /* use max cpus */
if (netisr_maxthreads > mp_ncpus) {
printf("netisr_init: forcing maxthreads from %d to %d\n",
netisr_maxthreads, mp_ncpus);
@@ -1248,31 +1355,24 @@ netisr_init(void *arg)
}
#endif
-#ifndef __rtems__
- if (TUNABLE_STR_FETCH("net.isr.dispatch", tmp, sizeof(tmp))) {
- error = netisr_dispatch_policy_from_str(tmp,
- &dispatch_policy);
- if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
- error = EINVAL;
- if (error == 0) {
- netisr_dispatch_policy = dispatch_policy;
- netisr_dispatch_policy_compat();
- } else
- printf(
- "%s: invalid dispatch policy %s, using default\n",
- __func__, tmp);
+#ifdef EARLY_AP_STARTUP
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
+ if (nws_count >= netisr_maxthreads)
+ break;
+ netisr_start_swi(pc->pc_cpuid, pc);
}
-#endif /* __rtems__ */
-
+#else
#ifndef __rtems__
netisr_start_swi(curcpu, pcpu_find(curcpu));
#else /* __rtems__ */
netisr_start_swi(0, NULL);
#endif /* __rtems__ */
+#endif
}
SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
#ifndef __rtems__
+#ifndef EARLY_AP_STARTUP
/*
* Start worker threads for additional CPUs. No attempt to gracefully handle
* work reassignment, we don't yet support dynamic reconfiguration.
@@ -1285,9 +1385,6 @@ netisr_start(void *arg)
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (nws_count >= netisr_maxthreads)
break;
- /* XXXRW: Is skipping absent CPUs still required here? */
- if (CPU_ABSENT(pc->pc_cpuid))
- continue;
/* Worker will already be present for boot CPU. */
if (pc->pc_netisr != NULL)
continue;
@@ -1295,6 +1392,7 @@ netisr_start(void *arg)
}
}
SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
+#endif
#endif /* __rtems__ */
/*