summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/netinet/tcp_output.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-08-20 15:53:03 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-09-21 10:29:39 +0200
commit18fa92c2dcc6c52e0bf27d214d80f0c25a89b47d (patch)
treea3020ac5b1f366f2f0920941b589808e435dbcee /freebsd/sys/netinet/tcp_output.c
parentUpdate to FreeBSD head 2017-12-01 (diff)
downloadrtems-libbsd-18fa92c2dcc6c52e0bf27d214d80f0c25a89b47d.tar.bz2
Update to FreeBSD head 2018-02-01
Git mirror commit d079ae0442af8fa3cfd6d7ede190d04e64a2c0d4. Update #3472.
Diffstat (limited to 'freebsd/sys/netinet/tcp_output.c')
-rw-r--r--freebsd/sys/netinet/tcp_output.c122
1 files changed, 67 insertions, 55 deletions
diff --git a/freebsd/sys/netinet/tcp_output.c b/freebsd/sys/netinet/tcp_output.c
index 1cb622ac..d0f08e3a 100644
--- a/freebsd/sys/netinet/tcp_output.c
+++ b/freebsd/sys/netinet/tcp_output.c
@@ -200,7 +200,9 @@ tcp_output(struct tcpcb *tp)
int off, flags, error = 0; /* Keep compiler happy */
struct mbuf *m;
struct ip *ip = NULL;
+#ifdef TCPDEBUG
struct ipovly *ipov = NULL;
+#endif
struct tcphdr *th;
u_char opt[TCP_MAXOLEN];
unsigned ipoptlen, optlen, hdrlen;
@@ -489,59 +491,7 @@ after_sack_rexmit:
/* len will be >= 0 after this point. */
KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
- /*
- * Automatic sizing of send socket buffer. Often the send buffer
- * size is not optimally adjusted to the actual network conditions
- * at hand (delay bandwidth product). Setting the buffer size too
- * small limits throughput on links with high bandwidth and high
- * delay (eg. trans-continental/oceanic links). Setting the
- * buffer size too big consumes too much real kernel memory,
- * especially with many connections on busy servers.
- *
- * The criteria to step up the send buffer one notch are:
- * 1. receive window of remote host is larger than send buffer
- * (with a fudge factor of 5/4th);
- * 2. send buffer is filled to 7/8th with data (so we actually
- * have data to make use of it);
- * 3. send buffer fill has not hit maximal automatic size;
- * 4. our send window (slow start and cogestion controlled) is
- * larger than sent but unacknowledged data in send buffer.
- *
- * The remote host receive window scaling factor may limit the
- * growing of the send buffer before it reaches its allowed
- * maximum.
- *
- * It scales directly with slow start or congestion window
- * and does at most one step per received ACK. This fast
- * scaling has the drawback of growing the send buffer beyond
- * what is strictly necessary to make full use of a given
- * delay*bandwidth product. However testing has shown this not
- * to be much of an problem. At worst we are trading wasting
- * of available bandwidth (the non-use of it) for wasting some
- * socket buffer memory.
- *
- * TODO: Shrink send buffer during idle periods together
- * with congestion window. Requires another timer. Has to
- * wait for upcoming tcp timer rewrite.
- *
- * XXXGL: should there be used sbused() or sbavail()?
- */
- if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
- int lowat;
-
- lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0;
- if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat &&
- sbused(&so->so_snd) >=
- (so->so_snd.sb_hiwat / 8 * 7) - lowat &&
- sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
- sendwin >= (sbused(&so->so_snd) -
- (tp->snd_nxt - tp->snd_una))) {
- if (!sbreserve_locked(&so->so_snd,
- min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
- V_tcp_autosndbuf_max), so, curthread))
- so->so_snd.sb_flags &= ~SB_AUTOSIZE;
- }
- }
+ tcp_sndbuf_autoscale(tp, so, sendwin);
/*
* Decide if we can use TCP Segmentation Offloading (if supported by
@@ -1145,7 +1095,9 @@ send:
#endif /* INET6 */
{
ip = mtod(m, struct ip *);
+#ifdef TCPDEBUG
ipov = (struct ipovly *)ip;
+#endif
th = (struct tcphdr *)(ip + 1);
tcpip_fillheaders(tp->t_inpcb, ip, th);
}
@@ -1293,12 +1245,13 @@ send:
* NOTE: since TCP options buffer doesn't point into
* mbuf's data, calculate offset and use it.
*/
- if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
- (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
+ if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th,
+ (u_char *)(th + 1) + (to.to_signature - opt))) != 0) {
/*
* Do not send segment if the calculation of MD5
* digest has failed.
*/
+ m_freem(m);
goto out;
}
}
@@ -1860,3 +1813,62 @@ tcp_addoptions(struct tcpopt *to, u_char *optp)
KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
return (optlen);
}
+
+void
+tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin)
+{
+
+ /*
+ * Automatic sizing of send socket buffer. Often the send buffer
+ * size is not optimally adjusted to the actual network conditions
+ * at hand (delay bandwidth product). Setting the buffer size too
+ * small limits throughput on links with high bandwidth and high
+ * delay (eg. trans-continental/oceanic links). Setting the
+ * buffer size too big consumes too much real kernel memory,
+ * especially with many connections on busy servers.
+ *
+ * The criteria to step up the send buffer one notch are:
+ * 1. receive window of remote host is larger than send buffer
+ * (with a fudge factor of 5/4th);
+ * 2. send buffer is filled to 7/8th with data (so we actually
+ * have data to make use of it);
+ * 3. send buffer fill has not hit maximal automatic size;
+ * 4. our send window (slow start and cogestion controlled) is
+ * larger than sent but unacknowledged data in send buffer.
+ *
+ * The remote host receive window scaling factor may limit the
+ * growing of the send buffer before it reaches its allowed
+ * maximum.
+ *
+ * It scales directly with slow start or congestion window
+ * and does at most one step per received ACK. This fast
+ * scaling has the drawback of growing the send buffer beyond
+ * what is strictly necessary to make full use of a given
+ * delay*bandwidth product. However testing has shown this not
+ * to be much of an problem. At worst we are trading wasting
+ * of available bandwidth (the non-use of it) for wasting some
+ * socket buffer memory.
+ *
+ * TODO: Shrink send buffer during idle periods together
+ * with congestion window. Requires another timer. Has to
+ * wait for upcoming tcp timer rewrite.
+ *
+ * XXXGL: should there be used sbused() or sbavail()?
+ */
+ if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
+ int lowat;
+
+ lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0;
+ if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat &&
+ sbused(&so->so_snd) >=
+ (so->so_snd.sb_hiwat / 8 * 7) - lowat &&
+ sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
+ sendwin >= (sbused(&so->so_snd) -
+ (tp->snd_nxt - tp->snd_una))) {
+ if (!sbreserve_locked(&so->so_snd,
+ min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
+ V_tcp_autosndbuf_max), so, curthread))
+ so->so_snd.sb_flags &= ~SB_AUTOSIZE;
+ }
+ }
+}