summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/netinet
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-12-09 14:19:03 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-01-10 09:53:34 +0100
commit75b706fde4cbf82bcd41a1cec319778aa0f8eb2d (patch)
treeea39a351a1f6337b5a5dd6036314693adef5ffe6 /freebsd/sys/netinet
parentVMSTAT(8): Port to RTEMS (diff)
downloadrtems-libbsd-75b706fde4cbf82bcd41a1cec319778aa0f8eb2d.tar.bz2
Update to FreeBSD head 2016-12-10
Git mirror commit 80c55f08a05ab3b26a73b226ccb56adc3122a55c.
Diffstat (limited to 'freebsd/sys/netinet')
-rw-r--r--freebsd/sys/netinet/cc/cc.h1
-rw-r--r--freebsd/sys/netinet/cc/cc_newreno.c29
-rw-r--r--freebsd/sys/netinet/if_ether.c128
-rw-r--r--freebsd/sys/netinet/igmp.c8
-rw-r--r--freebsd/sys/netinet/in.c8
-rw-r--r--freebsd/sys/netinet/in_pcb.c10
-rw-r--r--freebsd/sys/netinet/in_var.h2
-rw-r--r--freebsd/sys/netinet/ip_fastfwd.c135
-rw-r--r--freebsd/sys/netinet/ip_icmp.c2
-rw-r--r--freebsd/sys/netinet/ip_input.c2
-rw-r--r--freebsd/sys/netinet/ip_mroute.c8
-rw-r--r--freebsd/sys/netinet/ip_output.c13
-rw-r--r--freebsd/sys/netinet/raw_ip.c4
-rw-r--r--freebsd/sys/netinet/sctp_asconf.c54
-rw-r--r--freebsd/sys/netinet/sctp_asconf.h1
-rw-r--r--freebsd/sys/netinet/sctp_bsd_addr.c11
-rw-r--r--freebsd/sys/netinet/sctp_bsd_addr.h1
-rw-r--r--freebsd/sys/netinet/sctp_cc_functions.c77
-rw-r--r--freebsd/sys/netinet/sctp_constants.h5
-rw-r--r--freebsd/sys/netinet/sctp_crc32.c1
-rw-r--r--freebsd/sys/netinet/sctp_crc32.h2
-rw-r--r--freebsd/sys/netinet/sctp_header.h21
-rw-r--r--freebsd/sys/netinet/sctp_indata.c776
-rw-r--r--freebsd/sys/netinet/sctp_indata.h11
-rw-r--r--freebsd/sys/netinet/sctp_input.c136
-rw-r--r--freebsd/sys/netinet/sctp_os_bsd.h1
-rw-r--r--freebsd/sys/netinet/sctp_output.c386
-rw-r--r--freebsd/sys/netinet/sctp_pcb.c147
-rw-r--r--freebsd/sys/netinet/sctp_pcb.h9
-rw-r--r--freebsd/sys/netinet/sctp_peeloff.h1
-rw-r--r--freebsd/sys/netinet/sctp_structs.h28
-rw-r--r--freebsd/sys/netinet/sctp_sysctl.c3
-rw-r--r--freebsd/sys/netinet/sctp_timer.c48
-rw-r--r--freebsd/sys/netinet/sctp_uio.h3
-rw-r--r--freebsd/sys/netinet/sctp_usrreq.c142
-rw-r--r--freebsd/sys/netinet/sctp_var.h6
-rw-r--r--freebsd/sys/netinet/sctputil.c185
-rw-r--r--freebsd/sys/netinet/sctputil.h5
-rw-r--r--freebsd/sys/netinet/tcp_debug.c4
-rw-r--r--freebsd/sys/netinet/tcp_fsm.h3
-rw-r--r--freebsd/sys/netinet/tcp_hostcache.c56
-rw-r--r--freebsd/sys/netinet/tcp_hostcache.h28
-rw-r--r--freebsd/sys/netinet/tcp_input.c181
-rw-r--r--freebsd/sys/netinet/tcp_lro.c1
-rw-r--r--freebsd/sys/netinet/tcp_output.c92
-rw-r--r--freebsd/sys/netinet/tcp_seq.h7
-rw-r--r--freebsd/sys/netinet/tcp_subr.c50
-rw-r--r--freebsd/sys/netinet/tcp_syncache.c51
-rw-r--r--freebsd/sys/netinet/tcp_timer.c20
-rw-r--r--freebsd/sys/netinet/tcp_timewait.c5
-rw-r--r--freebsd/sys/netinet/tcp_usrreq.c53
-rw-r--r--freebsd/sys/netinet/tcp_var.h51
-rw-r--r--freebsd/sys/netinet/udp_usrreq.c11
53 files changed, 1436 insertions, 1587 deletions
diff --git a/freebsd/sys/netinet/cc/cc.h b/freebsd/sys/netinet/cc/cc.h
index 1da6f620..5e61b04b 100644
--- a/freebsd/sys/netinet/cc/cc.h
+++ b/freebsd/sys/netinet/cc/cc.h
@@ -86,6 +86,7 @@ struct cc_var {
struct tcpcb *tcp;
struct sctp_nets *sctp;
} ccvc;
+ uint16_t nsegs; /* # segments coalesced into current chain. */
};
/* cc_var flags. */
diff --git a/freebsd/sys/netinet/cc/cc_newreno.c b/freebsd/sys/netinet/cc/cc_newreno.c
index 8077bb22..4c21036a 100644
--- a/freebsd/sys/netinet/cc/cc_newreno.c
+++ b/freebsd/sys/netinet/cc/cc_newreno.c
@@ -139,7 +139,8 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
*/
if (CCV(ccv, snd_nxt) == CCV(ccv, snd_max))
incr = min(ccv->bytes_this_ack,
- V_tcp_abc_l_var * CCV(ccv, t_maxseg));
+ ccv->nsegs * V_tcp_abc_l_var *
+ CCV(ccv, t_maxseg));
else
incr = min(ccv->bytes_this_ack, CCV(ccv, t_maxseg));
}
@@ -183,30 +184,42 @@ newreno_after_idle(struct cc_var *ccv)
static void
newreno_cong_signal(struct cc_var *ccv, uint32_t type)
{
- u_int win;
+ uint32_t cwin, ssthresh_on_loss;
+ u_int mss;
+
+ cwin = CCV(ccv, snd_cwnd);
+ mss = CCV(ccv, t_maxseg);
+ ssthresh_on_loss =
+ max((CCV(ccv, snd_max) - CCV(ccv, snd_una)) / 2 / mss, 2)
+ * mss;
/* Catch algos which mistakenly leak private signal types. */
KASSERT((type & CC_SIGPRIVMASK) == 0,
("%s: congestion signal type 0x%08x is private\n", __func__, type));
- win = max(CCV(ccv, snd_cwnd) / 2 / CCV(ccv, t_maxseg), 2) *
- CCV(ccv, t_maxseg);
+ cwin = max(cwin / 2 / mss, 2) * mss;
switch (type) {
case CC_NDUPACK:
if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
- if (!IN_CONGRECOVERY(CCV(ccv, t_flags)))
- CCV(ccv, snd_ssthresh) = win;
+ if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
+ CCV(ccv, snd_ssthresh) = ssthresh_on_loss;
+ CCV(ccv, snd_cwnd) = cwin;
+ }
ENTER_RECOVERY(CCV(ccv, t_flags));
}
break;
case CC_ECN:
if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
- CCV(ccv, snd_ssthresh) = win;
- CCV(ccv, snd_cwnd) = win;
+ CCV(ccv, snd_ssthresh) = ssthresh_on_loss;
+ CCV(ccv, snd_cwnd) = cwin;
ENTER_CONGRECOVERY(CCV(ccv, t_flags));
}
break;
+ case CC_RTO:
+ CCV(ccv, snd_ssthresh) = ssthresh_on_loss;
+ CCV(ccv, snd_cwnd) = mss;
+ break;
}
}
diff --git a/freebsd/sys/netinet/if_ether.c b/freebsd/sys/netinet/if_ether.c
index 0a8b101e..9fb25c21 100644
--- a/freebsd/sys/netinet/if_ether.c
+++ b/freebsd/sys/netinet/if_ether.c
@@ -139,6 +139,28 @@ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_log_per_second,
"Maximum number of remotely triggered ARP messages that can be "
"logged per second");
+/*
+ * Due to the exponential backoff algorithm used for the interval between GARP
+ * retransmissions, the maximum number of retransmissions is limited for
+ * sanity. This limit corresponds to a maximum interval between retransmissions
+ * of 2^16 seconds ~= 18 hours.
+ *
+ * Making this limit more dynamic is more complicated than worthwhile,
+ * especially since sending out GARPs spaced days apart would be of little
+ * use. A maximum dynamic limit would look something like:
+ *
+ * const int max = fls(INT_MAX / hz) - 1;
+ */
+#define MAX_GARP_RETRANSMITS 16
+static int sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS);
+static int garp_rexmit_count = 0; /* GARP retransmission setting. */
+
+SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, garp_rexmit_count,
+ CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &garp_rexmit_count, 0, sysctl_garp_rexmit, "I",
+ "Number of times to retransmit GARP packets;"
+ " 0 to disable, maximum of 16");
+
#define ARP_LOG(pri, ...) do { \
if (ppsratecheck(&arp_lastlog, &arp_curpps, arp_maxpps)) \
log((pri), "arp: " __VA_ARGS__); \
@@ -1289,6 +1311,109 @@ arp_add_ifa_lle(struct ifnet *ifp, const struct sockaddr *dst)
lltable_free_entry(LLTABLE(ifp), lle_tmp);
}
+/*
+ * Handle the garp_rexmit_count. Like sysctl_handle_int(), but limits the range
+ * of valid values.
+ */
+static int
+sysctl_garp_rexmit(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int rexmit_count = *(int *)arg1;
+
+ error = sysctl_handle_int(oidp, &rexmit_count, 0, req);
+
+ /* Enforce limits on any new value that may have been set. */
+ if (!error && req->newptr) {
+ /* A new value was set. */
+ if (rexmit_count < 0) {
+ rexmit_count = 0;
+ } else if (rexmit_count > MAX_GARP_RETRANSMITS) {
+ rexmit_count = MAX_GARP_RETRANSMITS;
+ }
+ *(int *)arg1 = rexmit_count;
+ }
+
+ return (error);
+}
+
+/*
+ * Retransmit a Gratuitous ARP (GARP) and, if necessary, schedule a callout to
+ * retransmit it again. A pending callout owns a reference to the ifa.
+ */
+static void
+garp_rexmit(void *arg)
+{
+ struct in_ifaddr *ia = arg;
+
+ if (callout_pending(&ia->ia_garp_timer) ||
+ !callout_active(&ia->ia_garp_timer)) {
+ IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
+ ifa_free(&ia->ia_ifa);
+ return;
+ }
+
+ /*
+ * Drop lock while the ARP request is generated.
+ */
+ IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
+
+ arprequest(ia->ia_ifa.ifa_ifp, &IA_SIN(ia)->sin_addr,
+ &IA_SIN(ia)->sin_addr, IF_LLADDR(ia->ia_ifa.ifa_ifp));
+
+ /*
+ * Increment the count of retransmissions. If the count has reached the
+ * maximum value, stop sending the GARP packets. Otherwise, schedule
+ * the callout to retransmit another GARP packet.
+ */
+ ++ia->ia_garp_count;
+ if (ia->ia_garp_count >= garp_rexmit_count) {
+ ifa_free(&ia->ia_ifa);
+ } else {
+ int rescheduled;
+ IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
+ rescheduled = callout_reset(&ia->ia_garp_timer,
+ (1 << ia->ia_garp_count) * hz,
+ garp_rexmit, ia);
+ IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
+ if (rescheduled) {
+ ifa_free(&ia->ia_ifa);
+ }
+ }
+}
+
+/*
+ * Start the GARP retransmit timer.
+ *
+ * A single GARP is always transmitted when an IPv4 address is added
+ * to an interface and that is usually sufficient. However, in some
+ * circumstances, such as when a shared address is passed between
+ * cluster nodes, this single GARP may occasionally be dropped or
+ * lost. This can lead to neighbors on the network link working with a
+ * stale ARP cache and sending packets destined for that address to
+ * the node that previously owned the address, which may not respond.
+ *
+ * To avoid this situation, GARP retransmits can be enabled by setting
+ * the net.link.ether.inet.garp_rexmit_count sysctl to a value greater
+ * than zero. The setting represents the maximum number of
+ * retransmissions. The interval between retransmissions is calculated
+ * using an exponential backoff algorithm, doubling each time, so the
+ * retransmission intervals are: {1, 2, 4, 8, 16, ...} (seconds).
+ */
+static void
+garp_timer_start(struct ifaddr *ifa)
+{
+ struct in_ifaddr *ia = (struct in_ifaddr *) ifa;
+
+ IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
+ ia->ia_garp_count = 0;
+ if (callout_reset(&ia->ia_garp_timer, (1 << ia->ia_garp_count) * hz,
+ garp_rexmit, ia) == 0) {
+ ifa_ref(ifa);
+ }
+ IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
+}
+
void
arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
{
@@ -1304,6 +1429,9 @@ arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
if (ntohl(dst_in->sin_addr.s_addr) == INADDR_ANY)
return;
arp_announce_ifaddr(ifp, dst_in->sin_addr, IF_LLADDR(ifp));
+ if (garp_rexmit_count > 0) {
+ garp_timer_start(ifa);
+ }
arp_add_ifa_lle(ifp, dst);
}
diff --git a/freebsd/sys/netinet/igmp.c b/freebsd/sys/netinet/igmp.c
index cd57e426..12bb2f07 100644
--- a/freebsd/sys/netinet/igmp.c
+++ b/freebsd/sys/netinet/igmp.c
@@ -545,10 +545,10 @@ igmp_ra_alloc(void)
m = m_get(M_WAITOK, MT_DATA);
p = mtod(m, struct ipoption *);
p->ipopt_dst.s_addr = INADDR_ANY;
- p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
- p->ipopt_list[1] = 0x04; /* 4 bytes long */
- p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
- p->ipopt_list[3] = 0x00; /* pad byte */
+ p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
+ p->ipopt_list[1] = 0x04; /* 4 bytes long */
+ p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
+ p->ipopt_list[3] = 0x00; /* pad byte */
m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
return (m);
diff --git a/freebsd/sys/netinet/in.c b/freebsd/sys/netinet/in.c
index 06b23973..f08e550b 100644
--- a/freebsd/sys/netinet/in.c
+++ b/freebsd/sys/netinet/in.c
@@ -401,6 +401,8 @@ in_aifaddr_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp, struct thread *td)
ifa->ifa_addr = (struct sockaddr *)&ia->ia_addr;
ifa->ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr;
ifa->ifa_netmask = (struct sockaddr *)&ia->ia_sockmask;
+ callout_init_rw(&ia->ia_garp_timer, &ifp->if_addr_lock,
+ CALLOUT_RETURNUNLOCKED);
ia->ia_ifp = ifp;
ia->ia_addr = *addr;
@@ -639,6 +641,12 @@ in_difaddr_ioctl(caddr_t data, struct ifnet *ifp, struct thread *td)
IN_MULTI_UNLOCK();
}
+ IF_ADDR_WLOCK(ifp);
+ if (callout_stop(&ia->ia_garp_timer) == 1) {
+ ifa_free(&ia->ia_ifa);
+ }
+ IF_ADDR_WUNLOCK(ifp);
+
EVENTHANDLER_INVOKE(ifaddr_event, ifp);
ifa_free(&ia->ia_ifa); /* in_ifaddrhead */
diff --git a/freebsd/sys/netinet/in_pcb.c b/freebsd/sys/netinet/in_pcb.c
index f8790938..809a7de0 100644
--- a/freebsd/sys/netinet/in_pcb.c
+++ b/freebsd/sys/netinet/in_pcb.c
@@ -1311,10 +1311,7 @@ in_pcbfree(struct inpcb *inp)
if (inp->inp_moptions != NULL)
inp_freemoptions(inp->inp_moptions);
#endif
- if (inp->inp_route.ro_rt) {
- RTFREE(inp->inp_route.ro_rt);
- inp->inp_route.ro_rt = (struct rtentry *)NULL;
- }
+ RO_RTFREE(&inp->inp_route);
if (inp->inp_route.ro_lle)
LLE_FREE(inp->inp_route.ro_lle); /* zeros ro_lle */
@@ -2254,10 +2251,7 @@ void
in_losing(struct inpcb *inp)
{
- if (inp->inp_route.ro_rt) {
- RTFREE(inp->inp_route.ro_rt);
- inp->inp_route.ro_rt = (struct rtentry *)NULL;
- }
+ RO_RTFREE(&inp->inp_route);
if (inp->inp_route.ro_lle)
LLE_FREE(inp->inp_route.ro_lle); /* zeros ro_lle */
return;
diff --git a/freebsd/sys/netinet/in_var.h b/freebsd/sys/netinet/in_var.h
index af83e9a1..08055c4f 100644
--- a/freebsd/sys/netinet/in_var.h
+++ b/freebsd/sys/netinet/in_var.h
@@ -82,6 +82,8 @@ struct in_ifaddr {
struct sockaddr_in ia_dstaddr; /* reserve space for broadcast addr */
#define ia_broadaddr ia_dstaddr
struct sockaddr_in ia_sockmask; /* reserve space for general netmask */
+ struct callout ia_garp_timer; /* timer for retransmitting GARPs */
+ int ia_garp_count; /* count of retransmitted GARPs */
};
/*
diff --git a/freebsd/sys/netinet/ip_fastfwd.c b/freebsd/sys/netinet/ip_fastfwd.c
index 19dfb1ab..bc4d70b4 100644
--- a/freebsd/sys/netinet/ip_fastfwd.c
+++ b/freebsd/sys/netinet/ip_fastfwd.c
@@ -99,6 +99,7 @@ __FBSDID("$FreeBSD$");
#include <net/vnet.h>
#include <netinet/in.h>
+#include <netinet/in_fib.h>
#include <netinet/in_kdtrace.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
@@ -109,40 +110,33 @@ __FBSDID("$FreeBSD$");
#include <machine/in_cksum.h>
-static struct sockaddr_in *
-ip_findroute(struct route *ro, struct in_addr dest, struct mbuf *m)
+static int
+ip_findroute(struct nhop4_basic *pnh, struct in_addr dest, struct mbuf *m)
{
- struct sockaddr_in *dst;
- struct rtentry *rt;
+ bzero(pnh, sizeof(*pnh));
+ if (fib4_lookup_nh_basic(M_GETFIB(m), dest, 0, 0, pnh) != 0) {
+ IPSTAT_INC(ips_noroute);
+ IPSTAT_INC(ips_cantforward);
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
+ return (EHOSTUNREACH);
+ }
/*
- * Find route to destination.
+ * Drop blackholed traffic and directed broadcasts.
*/
- bzero(ro, sizeof(*ro));
- dst = (struct sockaddr_in *)&ro->ro_dst;
- dst->sin_family = AF_INET;
- dst->sin_len = sizeof(*dst);
- dst->sin_addr.s_addr = dest.s_addr;
- in_rtalloc_ign(ro, 0, M_GETFIB(m));
+ if ((pnh->nh_flags & (NHF_BLACKHOLE | NHF_BROADCAST)) != 0) {
+ IPSTAT_INC(ips_cantforward);
+ m_freem(m);
+ return (EHOSTUNREACH);
+ }
- /*
- * Route there and interface still up?
- */
- rt = ro->ro_rt;
- if (rt && (rt->rt_flags & RTF_UP) &&
- (rt->rt_ifp->if_flags & IFF_UP) &&
- (rt->rt_ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if (rt->rt_flags & RTF_GATEWAY)
- dst = (struct sockaddr_in *)rt->rt_gateway;
- } else {
- IPSTAT_INC(ips_noroute);
+ if (pnh->nh_flags & NHF_REJECT) {
IPSTAT_INC(ips_cantforward);
- if (rt)
- RTFREE(rt);
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
- return NULL;
+ return (EHOSTUNREACH);
}
- return dst;
+
+ return (0);
}
/*
@@ -157,13 +151,11 @@ ip_tryforward(struct mbuf *m)
{
struct ip *ip;
struct mbuf *m0 = NULL;
- struct route ro;
- struct sockaddr_in *dst = NULL;
- struct ifnet *ifp;
+ struct nhop4_basic nh;
+ struct sockaddr_in dst;
struct in_addr odest, dest;
uint16_t ip_len, ip_off;
int error = 0;
- int mtu;
struct m_tag *fwd_tag = NULL;
/*
@@ -173,9 +165,6 @@ ip_tryforward(struct mbuf *m)
M_ASSERTVALID(m);
M_ASSERTPKTHDR(m);
- bzero(&ro, sizeof(ro));
-
-
#ifdef ALTQ
/*
* Is packet dropped by traffic conditioner?
@@ -307,29 +296,17 @@ passin:
/*
* Find route to destination.
*/
- if ((dst = ip_findroute(&ro, dest, m)) == NULL)
- return NULL; /* icmp unreach already sent */
- ifp = ro.ro_rt->rt_ifp;
-
- /*
- * Immediately drop blackholed traffic, and directed broadcasts
- * for either the all-ones or all-zero subnet addresses on
- * locally attached networks.
- */
- if ((ro.ro_rt->rt_flags & (RTF_BLACKHOLE|RTF_BROADCAST)) != 0)
- goto drop;
+ if (ip_findroute(&nh, dest, m) != 0)
+ return (NULL); /* icmp unreach already sent */
/*
* Step 5: outgoing firewall packet processing
*/
-
- /*
- * Run through list of hooks for output packets.
- */
if (!PFIL_HOOKED(&V_inet_pfil_hook))
goto passout;
- if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_OUT, NULL) || m == NULL) {
+ if (pfil_run_hooks(&V_inet_pfil_hook, &m, nh.nh_ifp, PFIL_OUT, NULL) ||
+ m == NULL) {
goto drop;
}
@@ -354,9 +331,7 @@ forwardlocal:
* Return packet for processing by ip_input().
*/
m->m_flags |= M_FASTFWD_OURS;
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
- return m;
+ return (m);
}
/*
* Redo route lookup with new destination address
@@ -367,10 +342,8 @@ forwardlocal:
m_tag_delete(m, fwd_tag);
m->m_flags &= ~M_IP_NEXTHOP;
}
- RTFREE(ro.ro_rt);
- if ((dst = ip_findroute(&ro, dest, m)) == NULL)
- return NULL; /* icmp unreach already sent */
- ifp = ro.ro_rt->rt_ifp;
+ if (ip_findroute(&nh, dest, m) != 0)
+ return (NULL); /* icmp unreach already sent */
}
passout:
@@ -380,32 +353,15 @@ passout:
ip_len = ntohs(ip->ip_len);
ip_off = ntohs(ip->ip_off);
- /*
- * Check if route is dampned (when ARP is unable to resolve)
- */
- if ((ro.ro_rt->rt_flags & RTF_REJECT) &&
- (ro.ro_rt->rt_expire == 0 || time_uptime < ro.ro_rt->rt_expire)) {
- icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
- goto consumed;
- }
-
- /*
- * Check if media link state of interface is not down
- */
- if (ifp->if_link_state == LINK_STATE_DOWN) {
- icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
- goto consumed;
- }
+ bzero(&dst, sizeof(dst));
+ dst.sin_family = AF_INET;
+ dst.sin_len = sizeof(dst);
+ dst.sin_addr = nh.nh_addr;
/*
* Check if packet fits MTU or if hardware will fragment for us
*/
- if (ro.ro_rt->rt_mtu)
- mtu = min(ro.ro_rt->rt_mtu, ifp->if_mtu);
- else
- mtu = ifp->if_mtu;
-
- if (ip_len <= mtu) {
+ if (ip_len <= nh.nh_mtu) {
/*
* Avoid confusing lower layers.
*/
@@ -413,9 +369,9 @@ passout:
/*
* Send off the packet via outgoing interface
*/
- IP_PROBE(send, NULL, NULL, ip, ifp, ip, NULL);
- error = (*ifp->if_output)(ifp, m,
- (struct sockaddr *)dst, &ro);
+ IP_PROBE(send, NULL, NULL, ip, nh.nh_ifp, ip, NULL);
+ error = (*nh.nh_ifp->if_output)(nh.nh_ifp, m,
+ (struct sockaddr *)&dst, NULL);
} else {
/*
* Handle EMSGSIZE with icmp reply needfrag for TCP MTU discovery
@@ -423,14 +379,15 @@ passout:
if (ip_off & IP_DF) {
IPSTAT_INC(ips_cantfrag);
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG,
- 0, mtu);
+ 0, nh.nh_mtu);
goto consumed;
} else {
/*
* We have to fragment the packet
*/
m->m_pkthdr.csum_flags |= CSUM_IP;
- if (ip_fragment(ip, &m, mtu, ifp->if_hwassist))
+ if (ip_fragment(ip, &m, nh.nh_mtu,
+ nh.nh_ifp->if_hwassist) != 0)
goto drop;
KASSERT(m != NULL, ("null mbuf and no error"));
/*
@@ -445,9 +402,11 @@ passout:
*/
m_clrprotoflags(m);
- IP_PROBE(send, NULL, NULL, ip, ifp, ip, NULL);
- error = (*ifp->if_output)(ifp, m,
- (struct sockaddr *)dst, &ro);
+ IP_PROBE(send, NULL, NULL, ip, nh.nh_ifp,
+ ip, NULL);
+ /* XXX: we can use cached route here */
+ error = (*nh.nh_ifp->if_output)(nh.nh_ifp, m,
+ (struct sockaddr *)&dst, NULL);
if (error)
break;
} while ((m = m0) != NULL);
@@ -465,17 +424,13 @@ passout:
if (error != 0)
IPSTAT_INC(ips_odropped);
else {
- counter_u64_add(ro.ro_rt->rt_pksent, 1);
IPSTAT_INC(ips_forward);
IPSTAT_INC(ips_fastforward);
}
consumed:
- RTFREE(ro.ro_rt);
return NULL;
drop:
if (m)
m_freem(m);
- if (ro.ro_rt)
- RTFREE(ro.ro_rt);
return NULL;
}
diff --git a/freebsd/sys/netinet/ip_icmp.c b/freebsd/sys/netinet/ip_icmp.c
index f34cc4bd..a1331cac 100644
--- a/freebsd/sys/netinet/ip_icmp.c
+++ b/freebsd/sys/netinet/ip_icmp.c
@@ -459,6 +459,8 @@ icmp_input(struct mbuf **mp, int *offp, int proto)
* Treat subcodes 2,3 as immediate RST
*/
case ICMP_UNREACH_PROTOCOL:
+ code = PRC_UNREACH_PROTOCOL;
+ break;
case ICMP_UNREACH_PORT:
code = PRC_UNREACH_PORT;
break;
diff --git a/freebsd/sys/netinet/ip_input.c b/freebsd/sys/netinet/ip_input.c
index 425dbc1f..a2278616 100644
--- a/freebsd/sys/netinet/ip_input.c
+++ b/freebsd/sys/netinet/ip_input.c
@@ -1002,7 +1002,7 @@ ip_forward(struct mbuf *m, int srcrt)
* because unnecessary, or because rate limited), so we are
* really we are wasting a lot of work here.
*
- * We don't use m_copy() because it might return a reference
+ * We don't use m_copym() because it might return a reference
* to a shared cluster. Both this function and ip_output()
* assume exclusive access to the IP header in `m', so any
* data in a cluster may change before we reach icmp_error().
diff --git a/freebsd/sys/netinet/ip_mroute.c b/freebsd/sys/netinet/ip_mroute.c
index f8b14735..f5aa0a38 100644
--- a/freebsd/sys/netinet/ip_mroute.c
+++ b/freebsd/sys/netinet/ip_mroute.c
@@ -1346,7 +1346,7 @@ X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
goto fail;
/* Make a copy of the header to send to the user level process */
- mm = m_copy(mb0, 0, hlen);
+ mm = m_copym(mb0, 0, hlen, M_NOWAIT);
if (mm == NULL)
goto fail1;
@@ -1544,7 +1544,7 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
struct igmpmsg *im;
int hlen = ip->ip_hl << 2;
- struct mbuf *mm = m_copy(m, 0, hlen);
+ struct mbuf *mm = m_copym(m, 0, hlen, M_NOWAIT);
if (mm && (!M_WRITABLE(mm) || mm->m_len < hlen))
mm = m_pullup(mm, hlen);
@@ -2736,9 +2736,9 @@ pim_input(struct mbuf **mp, int *offp, int proto)
* actions (e.g., send back PIM_REGISTER_STOP).
* XXX: here m->m_data points to the outer IP header.
*/
- mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN);
+ mcp = m_copym(m, 0, iphlen + PIM_REG_MINLEN, M_NOWAIT);
if (mcp == NULL) {
- CTR1(KTR_IPMF, "%s: m_copy() failed", __func__);
+ CTR1(KTR_IPMF, "%s: m_copym() failed", __func__);
m_freem(m);
return (IPPROTO_DONE);
}
diff --git a/freebsd/sys/netinet/ip_output.c b/freebsd/sys/netinet/ip_output.c
index 81e7b123..5436ea2d 100644
--- a/freebsd/sys/netinet/ip_output.c
+++ b/freebsd/sys/netinet/ip_output.c
@@ -352,7 +352,8 @@ again:
have_ia_ref = 1;
ifp = ia->ia_ifp;
ip->ip_ttl = 1;
- isbroadcast = in_ifaddr_broadcast(dst->sin_addr, ia);
+ isbroadcast = ifp->if_flags & IFF_BROADCAST ?
+ in_ifaddr_broadcast(dst->sin_addr, ia) : 0;
} else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
imo != NULL && imo->imo_multicast_ifp != NULL) {
/*
@@ -405,8 +406,10 @@ again:
gw = (struct sockaddr_in *)rte->rt_gateway;
if (rte->rt_flags & RTF_HOST)
isbroadcast = (rte->rt_flags & RTF_BROADCAST);
- else
+ else if (ifp->if_flags & IFF_BROADCAST)
isbroadcast = in_ifaddr_broadcast(gw->sin_addr, ia);
+ else
+ isbroadcast = 0;
}
/*
@@ -706,11 +709,7 @@ sendit:
IPSTAT_INC(ips_fragmented);
done:
- /*
- * Release the route if using our private route, or if
- * (with flowtable) we don't have our own reference.
- */
- if (ro == &iproute || ro->ro_flags & RT_NORTREF)
+ if (ro == &iproute)
RO_RTFREE(ro);
else if (rte == NULL)
/*
diff --git a/freebsd/sys/netinet/raw_ip.c b/freebsd/sys/netinet/raw_ip.c
index a4679586..c379d681 100644
--- a/freebsd/sys/netinet/raw_ip.c
+++ b/freebsd/sys/netinet/raw_ip.c
@@ -324,7 +324,7 @@ rip_input(struct mbuf **mp, int *offp, int proto)
if (last != NULL) {
struct mbuf *n;
- n = m_copy(m, 0, (int)M_COPYALL);
+ n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
if (n != NULL)
(void) rip_append(last, ip, n, &ripsrc);
/* XXX count dropped packet */
@@ -402,7 +402,7 @@ rip_input(struct mbuf **mp, int *offp, int proto)
if (last != NULL) {
struct mbuf *n;
- n = m_copy(m, 0, (int)M_COPYALL);
+ n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
if (n != NULL)
(void) rip_append(last, ip, n, &ripsrc);
/* XXX count dropped packet */
diff --git a/freebsd/sys/netinet/sctp_asconf.c b/freebsd/sys/netinet/sctp_asconf.c
index 4256ab51..5d86e520 100644
--- a/freebsd/sys/netinet/sctp_asconf.c
+++ b/freebsd/sys/netinet/sctp_asconf.c
@@ -153,24 +153,19 @@ sctp_process_asconf_add_ip(struct sockaddr *src, struct sctp_asconf_paramhdr *ap
union sctp_sockstore store;
struct sctp_paramhdr *ph;
uint16_t param_type, aparam_length;
-
#if defined(INET) || defined(INET6)
uint16_t param_length;
-
#endif
struct sockaddr *sa;
int zero_address = 0;
int bad_address = 0;
-
#ifdef INET
struct sockaddr_in *sin;
struct sctp_ipv4addr_param *v4addr;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
struct sctp_ipv6addr_param *v6addr;
-
#endif
aparam_length = ntohs(aph->ph.param_length);
@@ -308,24 +303,19 @@ sctp_process_asconf_delete_ip(struct sockaddr *src,
union sctp_sockstore store;
struct sctp_paramhdr *ph;
uint16_t param_type, aparam_length;
-
#if defined(INET) || defined(INET6)
uint16_t param_length;
-
#endif
struct sockaddr *sa;
int zero_address = 0;
int result;
-
#ifdef INET
struct sockaddr_in *sin;
struct sctp_ipv4addr_param *v4addr;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
struct sctp_ipv6addr_param *v6addr;
-
#endif
aparam_length = ntohs(aph->ph.param_length);
@@ -443,23 +433,18 @@ sctp_process_asconf_set_primary(struct sockaddr *src,
union sctp_sockstore store;
struct sctp_paramhdr *ph;
uint16_t param_type, aparam_length;
-
#if defined(INET) || defined(INET6)
uint16_t param_length;
-
#endif
struct sockaddr *sa;
int zero_address = 0;
-
#ifdef INET
struct sockaddr_in *sin;
struct sctp_ipv4addr_param *v4addr;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
struct sctp_ipv6addr_param *v6addr;
-
#endif
aparam_length = ntohs(aph->ph.param_length);
@@ -1787,7 +1772,7 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset,
* if there are any "sent" params still on the queue, these are
* implicitly "success", or "failed" (if we got an error back) ...
* so process these appropriately
- *
+ *
* we assume that the correlation_id's are monotonically increasing
* beginning from 1 and that we don't have *that* many outstanding
* at any given time
@@ -1863,7 +1848,6 @@ sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa)
/* didn't find one */
return (0);
}
-
#endif
/*
@@ -2437,10 +2421,8 @@ sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
}
}
- /*
- * we want to find the sequences which consist of ADD -> DEL -> ADD
- * or DEL -> ADD
- */
+ /* we want to find the sequences which consist of ADD -> DEL -> ADD
+ * or DEL -> ADD */
if (add_cnt > del_cnt ||
(add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) {
return (1);
@@ -2492,10 +2474,8 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
(!sctp_is_addr_pending(stcb, sctp_ifa)))
continue;
- /*
- * found a valid local v4 address to
- * use
- */
+ /* found a valid local v4 address to
+ * use */
if (addr_locked == SCTP_ADDR_NOT_LOCKED)
SCTP_IPI_ADDR_RUNLOCK();
return (&sctp_ifa->address.sa);
@@ -2512,10 +2492,8 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
}
sin6 = &sctp_ifa->address.sin6;
if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
- /*
- * we skip unspecifed
- * addresses
- */
+ /* we skip unspecifed
+ * addresses */
continue;
}
if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
@@ -2532,10 +2510,8 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
(!sctp_is_addr_pending(stcb, sctp_ifa)))
continue;
- /*
- * found a valid local v6 address to
- * use
- */
+ /* found a valid local v6 address to
+ * use */
if (addr_locked == SCTP_ADDR_NOT_LOCKED)
SCTP_IPI_ADDR_RUNLOCK();
return (&sctp_ifa->address.sa);
@@ -2783,14 +2759,11 @@ sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m,
uint16_t plen, ptype;
struct sctp_ifa *sctp_ifa;
union sctp_sockstore store;
-
#ifdef INET6
struct sctp_ipv6addr_param addr6_store;
-
#endif
#ifdef INET
struct sctp_ipv4addr_param addr4_store;
-
#endif
SCTPDBG(SCTP_DEBUG_ASCONF2, "processing init-ack addresses\n");
@@ -2917,19 +2890,16 @@ sctp_addr_in_initack(struct mbuf *m, uint32_t offset, uint32_t length, struct so
{
struct sctp_paramhdr tmp_param, *ph;
uint16_t plen, ptype;
-
#ifdef INET
struct sockaddr_in *sin;
struct sctp_ipv4addr_param *a4p;
struct sctp_ipv6addr_param addr4_store;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
struct sctp_ipv6addr_param *a6p;
struct sctp_ipv6addr_param addr6_store;
struct sockaddr_in6 sin6_tmp;
-
#endif
switch (sa->sa_family) {
@@ -3081,14 +3051,11 @@ sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset,
struct sctp_ifn *sctp_ifn;
struct sctp_ifa *sctp_ifa;
uint32_t vrf_id;
-
#ifdef INET
struct sockaddr_in *sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
-
#endif
if (stcb) {
@@ -3303,14 +3270,11 @@ sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
struct sctp_asconf_addr *aa;
struct sctp_ifa *sctp_ifap;
struct sctp_asconf_tag_param *vtag;
-
#ifdef INET
struct sockaddr_in *to;
-
#endif
#ifdef INET6
struct sockaddr_in6 *to6;
-
#endif
if (net == NULL) {
SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
diff --git a/freebsd/sys/netinet/sctp_asconf.h b/freebsd/sys/netinet/sctp_asconf.h
index 183c99b4..c8d670e5 100644
--- a/freebsd/sys/netinet/sctp_asconf.h
+++ b/freebsd/sys/netinet/sctp_asconf.h
@@ -90,7 +90,6 @@ sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
extern int
sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
-
#endif /* _KERNEL */
#endif /* !_NETINET_SCTP_ASCONF_H_ */
diff --git a/freebsd/sys/netinet/sctp_bsd_addr.c b/freebsd/sys/netinet/sctp_bsd_addr.c
index bfd7f816..72c63d76 100644
--- a/freebsd/sys/netinet/sctp_bsd_addr.c
+++ b/freebsd/sys/netinet/sctp_bsd_addr.c
@@ -142,7 +142,6 @@ sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
}
}
-
#endif /* INET6 */
@@ -203,20 +202,18 @@ sctp_init_ifns_for_vrf(int vrfid)
struct ifaddr *ifa;
struct sctp_ifa *sctp_ifa;
uint32_t ifa_flags;
-
#ifdef INET6
struct in6_ifaddr *ifa6;
-
#endif
IFNET_RLOCK();
- TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+ TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
if (sctp_is_desired_interface_type(ifn) == 0) {
/* non desired type */
continue;
}
IF_ADDR_RLOCK(ifn);
- TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
if (ifa->ifa_addr == NULL) {
continue;
}
@@ -363,11 +360,11 @@ void
struct ifaddr *ifa;
IFNET_RLOCK();
- TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+ TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
if (!(*pred) (ifn)) {
continue;
}
- TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
}
}
diff --git a/freebsd/sys/netinet/sctp_bsd_addr.h b/freebsd/sys/netinet/sctp_bsd_addr.h
index 24660ca5..5fb1efb4 100644
--- a/freebsd/sys/netinet/sctp_bsd_addr.h
+++ b/freebsd/sys/netinet/sctp_bsd_addr.h
@@ -48,7 +48,6 @@ void sctp_startup_iterator(void);
#ifdef INET6
void sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa);
-
#endif
#ifdef SCTP_PACKET_LOGGING
diff --git a/freebsd/sys/netinet/sctp_cc_functions.c b/freebsd/sys/netinet/sctp_cc_functions.c
index 68dc460a..49670e9b 100644
--- a/freebsd/sys/netinet/sctp_cc_functions.c
+++ b/freebsd/sys/netinet/sctp_cc_functions.c
@@ -162,17 +162,13 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
uint32_t srtt;
srtt = net->lastsa;
- /*
- * lastsa>>3; we don't need
- * to devide ...
- */
+ /* lastsa>>3; we don't need
+ * to devide ... */
if (srtt == 0) {
srtt = 1;
}
- /*
- * Short Version => Equal to
- * Contel Version MBe
- */
+ /* Short Version => Equal to
+ * Contel Version MBe */
net->ssthresh = (uint32_t) (((uint64_t) 4 *
(uint64_t) net->mtu *
(uint64_t) net->cwnd) /
@@ -211,7 +207,7 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
/* Mark end of the window */
asoc->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
- asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
/*
@@ -224,7 +220,7 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
/* Mark end of the window */
net->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
- net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
@@ -407,10 +403,8 @@ cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint6
net->flight_size,
probepoint);
if (net->cc_mod.rtcc.ret_from_eq) {
- /*
- * Switch over to CA if we are less
- * aggressive
- */
+ /* Switch over to CA if we are less
+ * aggressive */
net->ssthresh = net->cwnd - 1;
net->partial_bytes_acked = 0;
}
@@ -786,9 +780,9 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
/*
* At this point our bw_bytes has been updated by
* incoming sack information.
- *
+ *
* But our bw may not yet be set.
- *
+ *
*/
if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) {
nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000);
@@ -853,10 +847,8 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
}
break;
case SCTP_CMT_RPV2:
- /*
- * lastsa>>3; we don't need
- * to divide ...
- */
+ /* lastsa>>3; we don't need
+ * to divide ... */
srtt = net->lastsa;
if (srtt == 0) {
srtt = 1;
@@ -940,10 +932,8 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
}
break;
case SCTP_CMT_RPV2:
- /*
- * lastsa>>3; we don't need
- * to divide ...
- */
+ /* lastsa>>3; we don't need
+ * to divide ... */
srtt = net->lastsa;
if (srtt == 0) {
srtt = 1;
@@ -1110,10 +1100,8 @@ sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *
sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
}
} else {
- /*
- * Further tuning down required over the drastic
- * original cut
- */
+ /* Further tuning down required over the drastic
+ * original cut */
net->ssthresh -= (net->mtu * num_pkt_lost);
net->cwnd -= (net->mtu * num_pkt_lost);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
@@ -1127,10 +1115,8 @@ sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *
net->ssthresh = net->cwnd / 2;
if (net->ssthresh < net->mtu) {
net->ssthresh = net->mtu;
- /*
- * here back off the timer as well, to slow
- * us down
- */
+ /* here back off the timer as well, to slow
+ * us down */
net->RTO <<= 1;
}
net->cwnd = net->ssthresh;
@@ -1377,10 +1363,8 @@ sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
if (cwnd_in_mtu == 0) {
- /*
- * Using 0 means that the value of RFC 4960
- * is used.
- */
+ /* Using 0 means that the value of RFC 4960
+ * is used. */
cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
} else {
/*
@@ -1392,10 +1376,8 @@ sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
}
if (net->cwnd > cwnd) {
- /*
- * Only set if we are not a timeout (i.e.
- * down to 1 mtu)
- */
+ /* Only set if we are not a timeout (i.e.
+ * down to 1 mtu) */
net->cwnd = cwnd;
}
}
@@ -1718,7 +1700,7 @@ sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
/* Mark end of the window */
asoc->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
- asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
/*
@@ -1731,7 +1713,7 @@ sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
/* Mark end of the window */
net->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
- net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
@@ -2025,10 +2007,8 @@ htcp_param_update(struct sctp_nets *net)
htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
htcp_alpha_update(&net->cc_mod.htcp_ca);
- /*
- * add slowly fading memory for maxRTT to accommodate routing
- * changes etc
- */
+ /* add slowly fading memory for maxRTT to accommodate routing
+ * changes etc */
if (minRTT > 0 && maxRTT > minRTT)
net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
}
@@ -2111,7 +2091,6 @@ htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
{
return (net->ssthresh);
}
-
#endif
static void
@@ -2251,7 +2230,7 @@ sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
/* Mark end of the window */
asoc->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
- asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
/*
@@ -2264,7 +2243,7 @@ sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
/* Mark end of the window */
net->fast_recovery_tsn = asoc->sending_seq - 1;
} else {
- net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
}
sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
diff --git a/freebsd/sys/netinet/sctp_constants.h b/freebsd/sys/netinet/sctp_constants.h
index ecde4fee..e779051d 100644
--- a/freebsd/sys/netinet/sctp_constants.h
+++ b/freebsd/sys/netinet/sctp_constants.h
@@ -898,8 +898,9 @@ __FBSDID("$FreeBSD$");
#define SCTP_SSN_GE(a, b) SCTP_UINT16_GE(a, b)
#define SCTP_TSN_GT(a, b) SCTP_UINT32_GT(a, b)
#define SCTP_TSN_GE(a, b) SCTP_UINT32_GE(a, b)
-#define SCTP_MSGID_GT(o, a, b) ((o == 1) ? SCTP_UINT16_GT((uint16_t)a, (uint16_t)b) : SCTP_UINT32_GT(a, b))
-#define SCTP_MSGID_GE(o, a, b) ((o == 1) ? SCTP_UINT16_GE((uint16_t)a, (uint16_t)b) : SCTP_UINT32_GE(a, b))
+#define SCTP_MID_GT(i, a, b) (((i) == 1) ? SCTP_UINT32_GT(a, b) : SCTP_UINT16_GT((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_GE(i, a, b) (((i) == 1) ? SCTP_UINT32_GE(a, b) : SCTP_UINT16_GE((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_EQ(i, a, b) (((i) == 1) ? a == b : (uint16_t)a == (uint16_t)b)
/* Mapping array manipulation routines */
#define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
diff --git a/freebsd/sys/netinet/sctp_crc32.c b/freebsd/sys/netinet/sctp_crc32.c
index bb081e8f..9130feb2 100644
--- a/freebsd/sys/netinet/sctp_crc32.c
+++ b/freebsd/sys/netinet/sctp_crc32.c
@@ -115,7 +115,6 @@ sctp_calculate_cksum(struct mbuf *m, uint32_t offset)
base = sctp_finalize_crc32c(base);
return (base);
}
-
#endif /* !defined(SCTP_WITH_NO_CSUM) */
diff --git a/freebsd/sys/netinet/sctp_crc32.h b/freebsd/sys/netinet/sctp_crc32.h
index 3f98be41..7fe02f5c 100644
--- a/freebsd/sys/netinet/sctp_crc32.h
+++ b/freebsd/sys/netinet/sctp_crc32.h
@@ -39,9 +39,7 @@ __FBSDID("$FreeBSD$");
#if defined(_KERNEL)
#if !defined(SCTP_WITH_NO_CSUM)
uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
-
#endif
void sctp_delayed_cksum(struct mbuf *, uint32_t offset);
-
#endif /* _KERNEL */
#endif /* __crc32c_h__ */
diff --git a/freebsd/sys/netinet/sctp_header.h b/freebsd/sys/netinet/sctp_header.h
index 3f4948dd..b650d930 100644
--- a/freebsd/sys/netinet/sctp_header.h
+++ b/freebsd/sys/netinet/sctp_header.h
@@ -141,9 +141,9 @@ struct sctp_supported_chunk_types_param {
*/
struct sctp_data {
uint32_t tsn;
- uint16_t stream_id;
- uint16_t stream_sequence;
- uint32_t protocol_id;
+ uint16_t sid;
+ uint16_t ssn;
+ uint32_t ppid;
/* user data follows */
} SCTP_PACKED;
@@ -154,11 +154,11 @@ struct sctp_data_chunk {
struct sctp_idata {
uint32_t tsn;
- uint16_t stream_id;
+ uint16_t sid;
uint16_t reserved; /* Where does the SSN go? */
- uint32_t msg_id;
+ uint32_t mid;
union {
- uint32_t protocol_id;
+ uint32_t ppid;
uint32_t fsn; /* Fragment Sequence Number */
} ppid_fsn;
/* user data follows */
@@ -182,7 +182,6 @@ struct sctp_init {
uint32_t initial_tsn; /* I-TSN */
/* optional param's follow */
} SCTP_PACKED;
-
#define SCTP_IDENTIFICATION_SIZE 16
#define SCTP_ADDRESS_SIZE 4
#define SCTP_RESERVE_SPACE 6
@@ -391,14 +390,14 @@ struct sctp_forward_tsn_chunk {
} SCTP_PACKED;
struct sctp_strseq {
- uint16_t stream;
- uint16_t sequence;
+ uint16_t sid;
+ uint16_t ssn;
} SCTP_PACKED;
struct sctp_strseq_mid {
- uint16_t stream;
+ uint16_t sid;
uint16_t flags;
- uint32_t msg_id;
+ uint32_t mid;
};
struct sctp_forward_tsn_msg {
diff --git a/freebsd/sys/netinet/sctp_indata.c b/freebsd/sys/netinet/sctp_indata.c
index 12c2c80f..d9449a66 100644
--- a/freebsd/sys/netinet/sctp_indata.c
+++ b/freebsd/sys/netinet/sctp_indata.c
@@ -132,8 +132,8 @@ struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb *stcb,
struct sctp_nets *net,
uint32_t tsn, uint32_t ppid,
- uint32_t context, uint16_t stream_no,
- uint32_t stream_seq, uint8_t flags,
+ uint32_t context, uint16_t sid,
+ uint32_t mid, uint8_t flags,
struct mbuf *dm)
{
struct sctp_queued_to_read *read_queue_e = NULL;
@@ -143,14 +143,14 @@ sctp_build_readq_entry(struct sctp_tcb *stcb,
goto failed_build;
}
memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
- read_queue_e->sinfo_stream = stream_no;
- read_queue_e->sinfo_ssn = stream_seq;
+ read_queue_e->sinfo_stream = sid;
read_queue_e->sinfo_flags = (flags << 8);
read_queue_e->sinfo_ppid = ppid;
read_queue_e->sinfo_context = context;
read_queue_e->sinfo_tsn = tsn;
read_queue_e->sinfo_cumtsn = tsn;
read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+ read_queue_e->mid = mid;
read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
TAILQ_INIT(&read_queue_e->reasm);
read_queue_e->whoFrom = net;
@@ -343,10 +343,8 @@ sctp_place_control_in_stream(struct sctp_stream_in *strm,
q = &strm->uno_inqueue;
if (asoc->idata_supported == 0) {
if (!TAILQ_EMPTY(q)) {
- /*
- * Only one stream can be here in old style
- * -- abort
- */
+ /* Only one stream can be here in old style
+ * -- abort */
return (-1);
}
TAILQ_INSERT_TAIL(q, control, next_instrm);
@@ -370,7 +368,7 @@ sctp_place_control_in_stream(struct sctp_stream_in *strm,
return (0);
} else {
TAILQ_FOREACH(at, q, next_instrm) {
- if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
+ if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
/*
* one in queue is bigger than the new one,
* insert before this one
@@ -382,7 +380,7 @@ sctp_place_control_in_stream(struct sctp_stream_in *strm,
control->on_strm_q = SCTP_ON_ORDERED;
}
break;
- } else if (at->msg_id == control->msg_id) {
+ } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
/*
* Gak, He sent me a duplicate msg id
* number?? return -1 to abort.
@@ -427,18 +425,18 @@ sctp_abort_in_reasm(struct sctp_tcb *stcb,
"Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
opspot,
control->fsn_included,
- chk->rec.data.TSN_seq,
- chk->rec.data.stream_number,
- chk->rec.data.fsn_num, chk->rec.data.stream_seq);
+ chk->rec.data.tsn,
+ chk->rec.data.sid,
+ chk->rec.data.fsn, chk->rec.data.mid);
} else {
snprintf(msg, sizeof(msg),
"Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
opspot,
control->fsn_included,
- chk->rec.data.TSN_seq,
- chk->rec.data.stream_number,
- chk->rec.data.fsn_num,
- (uint16_t) chk->rec.data.stream_seq);
+ chk->rec.data.tsn,
+ chk->rec.data.sid,
+ chk->rec.data.fsn,
+ (uint16_t) chk->rec.data.mid);
}
oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
sctp_m_freem(chk->data);
@@ -489,13 +487,13 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb,
* has wrapped but not in the stream. Is this worth worrying about
* or should we just change our queue sort at the bottom to be by
* TSN.
- *
- * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
- * with TSN 1? If the peer is doing some sort of funky TSN/SSN
+ *
+ * Could it also be legal for a peer to send ssn 1 with TSN 2 and
+ * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
* assignment this could happen... and I don't see how this would be
* a violation. So for now I am undecided an will leave the sort by
* SSN alone. Maybe a hybred approach is the answer
- *
+ *
*/
struct sctp_queued_to_read *at;
int queue_needed;
@@ -506,19 +504,27 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
}
- if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
+ if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
/* The incoming sseq is behind where we last delivered? */
SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
- control->sinfo_ssn, strm->last_sequence_delivered);
+ control->mid, strm->last_mid_delivered);
protocol_error:
/*
* throw it in the stream so it gets cleaned up in
* association destruction
*/
TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
- snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
- strm->last_sequence_delivered, control->sinfo_tsn,
- control->sinfo_stream, control->sinfo_ssn);
+ if (asoc->idata_supported) {
+ snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+ strm->last_mid_delivered, control->sinfo_tsn,
+ control->sinfo_stream, control->mid);
+ } else {
+ snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+ (uint16_t) strm->last_mid_delivered,
+ control->sinfo_tsn,
+ control->sinfo_stream,
+ (uint16_t) control->mid);
+ }
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
@@ -532,8 +538,8 @@ protocol_error:
queue_needed = 1;
asoc->size_on_all_streams += control->length;
sctp_ucount_incr(asoc->cnt_on_all_streams);
- nxt_todel = strm->last_sequence_delivered + 1;
- if (nxt_todel == control->sinfo_ssn) {
+ nxt_todel = strm->last_mid_delivered + 1;
+ if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
@@ -556,7 +562,7 @@ protocol_error:
queue_needed = 0;
asoc->size_on_all_streams -= control->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
- strm->last_sequence_delivered++;
+ strm->last_mid_delivered++;
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
@@ -564,8 +570,8 @@ protocol_error:
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
/* all delivered */
- nxt_todel = strm->last_sequence_delivered + 1;
- if ((nxt_todel == control->sinfo_ssn) &&
+ nxt_todel = strm->last_mid_delivered + 1;
+ if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
(((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
asoc->size_on_all_streams -= control->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
@@ -578,7 +584,7 @@ protocol_error:
#endif
}
control->on_strm_q = 0;
- strm->last_sequence_delivered++;
+ strm->last_mid_delivered++;
/*
* We ignore the return of deliver_data here
* since we always can hold the chunk on the
@@ -596,7 +602,7 @@ protocol_error:
SCTP_READ_LOCK_NOT_HELD,
SCTP_SO_LOCKED);
continue;
- } else if (nxt_todel == control->sinfo_ssn) {
+ } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
*need_reasm = 1;
}
break;
@@ -612,8 +618,8 @@ protocol_error:
*/
if (sctp_place_control_in_stream(strm, asoc, control)) {
snprintf(msg, sizeof(msg),
- "Queue to str msg_id: %u duplicate",
- control->msg_id);
+ "Queue to str MID: %u duplicate",
+ control->mid);
sctp_clean_up_control(stcb, control);
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
@@ -724,10 +730,10 @@ sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queu
{
memset(nc, 0, sizeof(struct sctp_queued_to_read));
nc->sinfo_stream = control->sinfo_stream;
- nc->sinfo_ssn = control->sinfo_ssn;
+ nc->mid = control->mid;
TAILQ_INIT(&nc->reasm);
nc->top_fsn = control->top_fsn;
- nc->msg_id = control->msg_id;
+ nc->mid = control->mid;
nc->sinfo_flags = control->sinfo_flags;
nc->sinfo_ppid = control->sinfo_ppid;
nc->sinfo_context = control->sinfo_context;
@@ -766,11 +772,11 @@ sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
{
/*
* Special handling for the old un-ordered data chunk. All the
- * chunks/TSN's go to msg_id 0. So we have to do the old style
- * watching to see if we have it all. If you return one, no other
- * control entries on the un-ordered queue will be looked at. In
- * theory there should be no others entries in reality, unless the
- * guy is sending both unordered NDATA and unordered DATA...
+ * chunks/TSN's go to mid 0. So we have to do the old style watching
+ * to see if we have it all. If you return one, no other control
+ * entries on the un-ordered queue will be looked at. In theory
+ * there should be no others entries in reality, unless the guy is
+ * sending both unordered NDATA and unordered DATA...
*/
struct sctp_tmit_chunk *chk, *lchk, *tchk;
uint32_t fsn;
@@ -787,7 +793,7 @@ restart:
fsn = control->fsn_included + 1;
/* Now what can we add? */
TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
- if (chk->rec.data.fsn_num == fsn) {
+ if (chk->rec.data.fsn == fsn) {
/* Ok lets add it */
sctp_alloc_a_readq(stcb, nc);
if (nc == NULL) {
@@ -811,12 +817,14 @@ restart:
tchk = TAILQ_FIRST(&control->reasm);
if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+ asoc->size_on_reasm_queue -= tchk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
nc->first_frag_seen = 1;
- nc->fsn_included = tchk->rec.data.fsn_num;
+ nc->fsn_included = tchk->rec.data.fsn;
nc->data = tchk->data;
- nc->sinfo_ppid = tchk->rec.data.payloadtype;
- nc->sinfo_tsn = tchk->rec.data.TSN_seq;
- sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
+ nc->sinfo_ppid = tchk->rec.data.ppid;
+ nc->sinfo_tsn = tchk->rec.data.tsn;
+ sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
tchk->data = NULL;
sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
sctp_setup_tail_pointer(nc);
@@ -828,10 +836,8 @@ restart:
TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
tchk = TAILQ_FIRST(&control->reasm);
}
- /*
- * Now lets add it to the queue
- * after removing control
- */
+ /* Now lets add it to the queue
+ * after removing control */
TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
nc->on_strm_q = SCTP_ON_UNORDERED;
if (control->on_strm_q) {
@@ -855,10 +861,8 @@ restart:
}
sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
- /*
- * Switch to the new guy and
- * continue
- */
+ /* Switch to the new guy and
+ * continue */
control = nc;
goto restart;
} else {
@@ -906,7 +910,7 @@ sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
/* Its the very first one. */
SCTPDBG(SCTP_DEBUG_XXX,
"chunk is a first fsn: %u becomes fsn_included\n",
- chk->rec.data.fsn_num);
+ chk->rec.data.fsn);
if (control->first_frag_seen) {
/*
* In old un-ordered we can reassembly on one
@@ -917,24 +921,22 @@ sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
struct mbuf *tdata;
uint32_t tmp;
- if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
- /*
- * Easy way the start of a new guy beyond
- * the lowest
- */
+ if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
+ /* Easy way the start of a new guy beyond
+ * the lowest */
goto place_chunk;
}
- if ((chk->rec.data.fsn_num == control->fsn_included) ||
+ if ((chk->rec.data.fsn == control->fsn_included) ||
(control->pdapi_started)) {
/*
* Ok this should not happen, if it does we
* started the pd-api on the higher TSN
* (since the equals part is a TSN failure
* it must be that).
- *
- * We are completly hosed in that case since I
- * have no way to recover. This really will
- * only happen if we can get more TSN's
+ *
+ * We are completly hosed in that case since
+ * I have no way to recover. This really
+ * will only happen if we can get more TSN's
* higher before the pd-api-point.
*/
sctp_abort_in_reasm(stcb, control, chk,
@@ -958,25 +960,25 @@ sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
sctp_setup_tail_pointer(control);
/* Fix the FSN included */
tmp = control->fsn_included;
- control->fsn_included = chk->rec.data.fsn_num;
- chk->rec.data.fsn_num = tmp;
+ control->fsn_included = chk->rec.data.fsn;
+ chk->rec.data.fsn = tmp;
/* Fix the TSN included */
tmp = control->sinfo_tsn;
- control->sinfo_tsn = chk->rec.data.TSN_seq;
- chk->rec.data.TSN_seq = tmp;
+ control->sinfo_tsn = chk->rec.data.tsn;
+ chk->rec.data.tsn = tmp;
/* Fix the PPID included */
tmp = control->sinfo_ppid;
- control->sinfo_ppid = chk->rec.data.payloadtype;
- chk->rec.data.payloadtype = tmp;
+ control->sinfo_ppid = chk->rec.data.ppid;
+ chk->rec.data.ppid = tmp;
/* Fix tail pointer */
goto place_chunk;
}
control->first_frag_seen = 1;
- control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
- control->sinfo_tsn = chk->rec.data.TSN_seq;
- control->sinfo_ppid = chk->rec.data.payloadtype;
+ control->top_fsn = control->fsn_included = chk->rec.data.fsn;
+ control->sinfo_tsn = chk->rec.data.tsn;
+ control->sinfo_ppid = chk->rec.data.ppid;
control->data = chk->data;
- sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
+ sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
chk->data = NULL;
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
sctp_setup_tail_pointer(control);
@@ -985,7 +987,7 @@ sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
place_chunk:
inserted = 0;
TAILQ_FOREACH(at, &control->reasm, sctp_next) {
- if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
+ if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
/*
* This one in queue is bigger than the new one,
* insert the new one before at.
@@ -995,7 +997,7 @@ place_chunk:
inserted = 1;
TAILQ_INSERT_BEFORE(at, chk, sctp_next);
break;
- } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
+ } else if (at->rec.data.fsn == chk->rec.data.fsn) {
/*
* They sent a duplicate fsn number. This really
* should not happen since the FSN is a TSN and it
@@ -1011,7 +1013,7 @@ place_chunk:
/* Its at the end */
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
- control->top_fsn = chk->rec.data.fsn_num;
+ control->top_fsn = chk->rec.data.fsn;
TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
}
}
@@ -1052,7 +1054,7 @@ sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
while (control) {
SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
- control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
+ control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
nctl = TAILQ_NEXT(control, next_instrm);
if (control->end_added) {
/* We just put the last bit on */
@@ -1097,7 +1099,7 @@ done_un:
if (control == NULL) {
return (ret);
}
- if (strm->last_sequence_delivered == control->sinfo_ssn) {
+ if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
/*
* Ok the guy at the top was being partially delivered
* completed, so we remove it. Note the pd_api flag was
@@ -1107,9 +1109,9 @@ done_un:
nctl = TAILQ_NEXT(control, next_instrm);
SCTPDBG(SCTP_DEBUG_XXX,
"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
- control, control->end_added, control->sinfo_ssn,
+ control, control->end_added, control->mid,
control->top_fsn, control->fsn_included,
- strm->last_sequence_delivered);
+ strm->last_mid_delivered);
if (control->end_added) {
if (control->on_strm_q) {
#ifdef INVARIANTS
@@ -1136,21 +1138,19 @@ done_un:
}
}
if (strm->pd_api_started) {
- /*
- * Can't add more must have gotten an un-ordered above being
- * partially delivered.
- */
+ /* Can't add more must have gotten an un-ordered above being
+ * partially delivered. */
return (0);
}
deliver_more:
- next_to_del = strm->last_sequence_delivered + 1;
+ next_to_del = strm->last_mid_delivered + 1;
if (control) {
SCTPDBG(SCTP_DEBUG_XXX,
"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
- control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
+ control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
next_to_del);
nctl = TAILQ_NEXT(control, next_instrm);
- if ((control->sinfo_ssn == next_to_del) &&
+ if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
(control->first_frag_seen)) {
int done;
@@ -1171,21 +1171,15 @@ deliver_more:
ret++;
}
if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
- /*
- * A singleton now slipping through - mark
- * it non-revokable too
- */
+ /* A singleton now slipping through - mark
+ * it non-revokable too */
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
} else if (control->end_added == 0) {
- /*
- * Check if we can defer adding until its
- * all there
- */
+ /* Check if we can defer adding until its
+ * all there */
if ((control->length < pd_point) || (strm->pd_api_started)) {
- /*
- * Don't need it or cannot add more
- * (one being delivered that way)
- */
+ /* Don't need it or cannot add more
+ * (one being delivered that way) */
goto out;
}
}
@@ -1196,7 +1190,7 @@ deliver_more:
&stcb->sctp_socket->so_rcv, control->end_added,
inp_read_lock_held, SCTP_SO_NOT_LOCKED);
}
- strm->last_sequence_delivered = next_to_del;
+ strm->last_mid_delivered = next_to_del;
if (done) {
control = nctl;
goto deliver_more;
@@ -1237,10 +1231,10 @@ sctp_add_chk_to_control(struct sctp_queued_to_read *control,
} else {
sctp_add_to_tail_pointer(control, chk->data);
}
- control->fsn_included = chk->rec.data.fsn_num;
+ control->fsn_included = chk->rec.data.fsn;
asoc->size_on_reasm_queue -= chk->send_size;
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
- sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
+ sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
chk->data = NULL;
if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
control->first_frag_seen = 1;
@@ -1341,7 +1335,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* Its the very first one. */
SCTPDBG(SCTP_DEBUG_XXX,
"chunk is a first fsn: %u becomes fsn_included\n",
- chk->rec.data.fsn_num);
+ chk->rec.data.fsn);
if (control->first_frag_seen) {
/*
* Error on senders part, they either sent us two
@@ -1355,9 +1349,9 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
return;
}
control->first_frag_seen = 1;
- control->fsn_included = chk->rec.data.fsn_num;
+ control->fsn_included = chk->rec.data.fsn;
control->data = chk->data;
- sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
+ sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
chk->data = NULL;
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
sctp_setup_tail_pointer(control);
@@ -1367,16 +1361,16 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (control->last_frag_seen == 0) {
/* Still willing to raise highest FSN seen */
- if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
+ if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
SCTPDBG(SCTP_DEBUG_XXX,
"We have a new top_fsn: %u\n",
- chk->rec.data.fsn_num);
- control->top_fsn = chk->rec.data.fsn_num;
+ chk->rec.data.fsn);
+ control->top_fsn = chk->rec.data.fsn;
}
if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
SCTPDBG(SCTP_DEBUG_XXX,
"The last fsn is now in place fsn: %u\n",
- chk->rec.data.fsn_num);
+ chk->rec.data.fsn);
control->last_frag_seen = 1;
}
if (asoc->idata_supported || control->first_frag_seen) {
@@ -1386,11 +1380,9 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
* DATA we have to receive the first before
* we know the first FSN (which is the TSN).
*/
- if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
- /*
- * We have already delivered up to
- * this so its a dup
- */
+ if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+ /* We have already delivered up to
+ * this so its a dup */
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
@@ -1402,7 +1394,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* Second last? huh? */
SCTPDBG(SCTP_DEBUG_XXX,
"Duplicate last fsn: %u (top: %u) -- abort\n",
- chk->rec.data.fsn_num, control->top_fsn);
+ chk->rec.data.fsn, control->top_fsn);
sctp_abort_in_reasm(stcb, control,
chk, abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
@@ -1416,28 +1408,24 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
* we know the first FSN (which is the TSN).
*/
- if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
- /*
- * We have already delivered up to
- * this so its a dup
- */
+ if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+ /* We have already delivered up to
+ * this so its a dup */
SCTPDBG(SCTP_DEBUG_XXX,
"New fsn: %u is already seen in included_fsn: %u -- abort\n",
- chk->rec.data.fsn_num, control->fsn_included);
+ chk->rec.data.fsn, control->fsn_included);
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
return;
}
}
- /*
- * validate not beyond top FSN if we have seen last
- * one
- */
- if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
+ /* validate not beyond top FSN if we have seen last
+ * one */
+ if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
SCTPDBG(SCTP_DEBUG_XXX,
"New fsn: %u is beyond or at top_fsn: %u -- abort\n",
- chk->rec.data.fsn_num,
+ chk->rec.data.fsn,
control->top_fsn);
sctp_abort_in_reasm(stcb, control, chk,
abort_flag,
@@ -1451,26 +1439,24 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
SCTPDBG(SCTP_DEBUG_XXX,
"chunk is a not first fsn: %u needs to be inserted\n",
- chk->rec.data.fsn_num);
+ chk->rec.data.fsn);
TAILQ_FOREACH(at, &control->reasm, sctp_next) {
- if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
+ if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
/*
* This one in queue is bigger than the new
* one, insert the new one before at.
*/
SCTPDBG(SCTP_DEBUG_XXX,
"Insert it before fsn: %u\n",
- at->rec.data.fsn_num);
+ at->rec.data.fsn);
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
TAILQ_INSERT_BEFORE(at, chk, sctp_next);
inserted = 1;
break;
- } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
- /*
- * Gak, He sent me a duplicate str seq
- * number
- */
+ } else if (at->rec.data.fsn == chk->rec.data.fsn) {
+ /* Gak, He sent me a duplicate str seq
+ * number */
/*
* foo bar, I guess I will just free this
* new guy, should we abort too? FIX ME
@@ -1481,7 +1467,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
SCTPDBG(SCTP_DEBUG_XXX,
"Duplicate to fsn: %u -- abort\n",
- at->rec.data.fsn_num);
+ at->rec.data.fsn);
sctp_abort_in_reasm(stcb, control,
chk, abort_flag,
SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
@@ -1491,7 +1477,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (inserted == 0) {
/* Goes on the end */
SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
- chk->rec.data.fsn_num);
+ chk->rec.data.fsn);
asoc->size_on_reasm_queue += chk->send_size;
sctp_ucount_incr(asoc->cnt_on_reasm_queue);
TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
@@ -1509,12 +1495,12 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (control->first_frag_seen) {
next_fsn = control->fsn_included + 1;
TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
- if (at->rec.data.fsn_num == next_fsn) {
+ if (at->rec.data.fsn == next_fsn) {
/* We can add this one now to the control */
SCTPDBG(SCTP_DEBUG_XXX,
"Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
control, at,
- at->rec.data.fsn_num,
+ at->rec.data.fsn,
next_fsn, control->fsn_included);
TAILQ_REMOVE(&control->reasm, at, sctp_next);
sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
@@ -1548,25 +1534,25 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
static struct sctp_queued_to_read *
-sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
+sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
{
struct sctp_queued_to_read *control;
if (ordered) {
TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
- if (control->msg_id == msg_id) {
+ if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
break;
}
}
} else {
- if (old) {
- control = TAILQ_FIRST(&strm->uno_inqueue);
- return (control);
- }
- TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
- if (control->msg_id == msg_id) {
- break;
+ if (idata_supported) {
+ TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
+ if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+ break;
+ }
}
+ } else {
+ control = TAILQ_FIRST(&strm->uno_inqueue);
}
}
return (control);
@@ -1583,22 +1569,21 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
struct sctp_data_chunk *ch;
struct sctp_idata_chunk *nch, chunk_buf;
struct sctp_tmit_chunk *chk;
- uint32_t tsn, fsn, gap, msg_id;
+ uint32_t tsn, fsn, gap, mid;
struct mbuf *dmbuf;
int the_len;
int need_reasm_check = 0;
- uint16_t strmno;
+ uint16_t sid;
struct mbuf *op_err;
char msg[SCTP_DIAG_INFO_LEN];
struct sctp_queued_to_read *control = NULL;
- uint32_t protocol_id;
+ uint32_t ppid;
uint8_t chunk_flags;
struct sctp_stream_reset_list *liste;
struct sctp_stream_in *strm;
int ordered;
size_t clen;
int created_control = 0;
- uint8_t old_data;
chk = NULL;
if (chtype == SCTP_IDATA) {
@@ -1607,23 +1592,21 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
ch = (struct sctp_data_chunk *)nch;
clen = sizeof(struct sctp_idata_chunk);
tsn = ntohl(ch->dp.tsn);
- msg_id = ntohl(nch->dp.msg_id);
- protocol_id = nch->dp.ppid_fsn.protocol_id;
+ mid = ntohl(nch->dp.mid);
+ ppid = nch->dp.ppid_fsn.ppid;
if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
fsn = 0;
else
fsn = ntohl(nch->dp.ppid_fsn.fsn);
- old_data = 0;
} else {
ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
tsn = ntohl(ch->dp.tsn);
- protocol_id = ch->dp.protocol_id;
+ ppid = ch->dp.ppid;
clen = sizeof(struct sctp_data_chunk);
fsn = tsn;
- msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
+ mid = (uint32_t) (ntohs(ch->dp.ssn));
nch = NULL;
- old_data = 1;
}
chunk_flags = ch->ch.chunk_flags;
if ((size_t)chk_length == clen) {
@@ -1709,9 +1692,9 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
/* Is the stream valid? */
- strmno = ntohs(ch->dp.stream_id);
+ sid = ntohs(ch->dp.sid);
- if (strmno >= asoc->streamincnt) {
+ if (sid >= asoc->streamincnt) {
struct sctp_error_invalid_stream *cause;
op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
@@ -1728,7 +1711,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
- cause->stream_id = ch->dp.stream_id;
+ cause->stream_id = ch->dp.sid;
cause->reserved = htons(0);
sctp_queue_op_err(stcb, op_err);
}
@@ -1744,7 +1727,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
return (0);
}
- strm = &asoc->strmin[strmno];
+ strm = &asoc->strmin[sid];
/*
* If its a fragmented message, lets see if we can find the control
* on the reassembly queues.
@@ -1758,18 +1741,18 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
* wrap around. Ignore is for now.
*/
snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
- msg_id, chunk_flags);
+ mid, chunk_flags);
goto err_out;
}
- control = sctp_find_reasm_entry(strm, msg_id, ordered, old_data);
+ control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
chunk_flags, control);
if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
/* See if we can find the re-assembly entity */
if (control != NULL) {
/* We found something, does it belong? */
- if (ordered && (msg_id != control->sinfo_ssn)) {
- snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", msg_id);
+ if (ordered && (mid != control->mid)) {
+ snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
err_out:
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
@@ -1778,19 +1761,15 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
return (0);
}
if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
- /*
- * We can't have a switched order with an
- * unordered chunk
- */
+ /* We can't have a switched order with an
+ * unordered chunk */
snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
tsn);
goto err_out;
}
if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
- /*
- * We can't have a switched unordered with a
- * ordered chunk
- */
+ /* We can't have a switched unordered with a
+ * ordered chunk */
snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
tsn);
goto err_out;
@@ -1803,10 +1782,10 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
* ordered) or in the same Stream for unordered.
*/
if (control != NULL) {
- if (ordered || (old_data == 0)) {
- SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
- chunk_flags, msg_id);
- snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", msg_id);
+ if (ordered || asoc->idata_supported) {
+ SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
+ chunk_flags, mid);
+ snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
goto err_out;
} else {
if ((tsn == control->fsn_included + 1) &&
@@ -1883,8 +1862,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
asoc->tsn_in_wrapped = 1;
}
asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
- asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
- asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
+ asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
+ asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
@@ -1902,14 +1881,24 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
(TAILQ_EMPTY(&asoc->resetHead)) &&
(chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
- SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
+ SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
/* The incoming sseq is behind where we last delivered? */
SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
- msg_id, asoc->strmin[strmno].last_sequence_delivered);
+ mid, asoc->strmin[sid].last_mid_delivered);
- snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
- asoc->strmin[strmno].last_sequence_delivered,
- tsn, strmno, msg_id);
+ if (asoc->idata_supported) {
+ snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+ asoc->strmin[sid].last_mid_delivered,
+ tsn,
+ sid,
+ mid);
+ } else {
+ snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+ (uint16_t) asoc->strmin[sid].last_mid_delivered,
+ tsn,
+ sid,
+ (uint16_t) mid);
+ }
op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
@@ -1982,10 +1971,10 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (control == NULL) {
sctp_alloc_a_readq(stcb, control);
sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
- protocol_id,
- strmno, msg_id,
+ ppid,
+ sid,
chunk_flags,
- NULL, fsn, msg_id);
+ NULL, fsn, mid);
if (control == NULL) {
SCTP_STAT_INCR(sctps_nomem);
return (0);
@@ -1998,13 +1987,13 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
created_control = 1;
}
- SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
- chunk_flags, ordered, msg_id, control);
+ SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
+ chunk_flags, ordered, mid, control);
if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
TAILQ_EMPTY(&asoc->resetHead) &&
((ordered == 0) ||
- ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
- TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
+ (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
+ TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
/* Candidate for express delivery */
/*
* Its not fragmented, No PD-API is up, Nothing in the
@@ -2017,8 +2006,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
asoc->highest_tsn_inside_nr_map = tsn;
}
- SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
- control, msg_id);
+ SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
+ control, mid);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control, &stcb->sctp_socket->so_rcv,
@@ -2026,11 +2015,11 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
/* for ordered, bump what we delivered */
- strm->last_sequence_delivered++;
+ strm->last_mid_delivered++;
}
SCTP_STAT_INCR(sctps_recvexpress);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
- sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
+ sctp_log_strm_del_alt(stcb, tsn, mid, sid,
SCTP_STR_LOG_FROM_EXPRS_DEL);
}
control = NULL;
@@ -2048,21 +2037,21 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
return (0);
}
- chk->rec.data.TSN_seq = tsn;
+ chk->rec.data.tsn = tsn;
chk->no_fr_allowed = 0;
- chk->rec.data.fsn_num = fsn;
- chk->rec.data.stream_seq = msg_id;
- chk->rec.data.stream_number = strmno;
- chk->rec.data.payloadtype = protocol_id;
+ chk->rec.data.fsn = fsn;
+ chk->rec.data.mid = mid;
+ chk->rec.data.sid = sid;
+ chk->rec.data.ppid = ppid;
chk->rec.data.context = stcb->asoc.context;
chk->rec.data.doing_fast_retransmit = 0;
chk->rec.data.rcv_flags = chunk_flags;
chk->asoc = asoc;
chk->send_size = the_len;
chk->whoTo = net;
- SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
+ SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
chk,
- control, msg_id);
+ control, mid);
atomic_add_int(&net->ref_count, 1);
chk->data = dmbuf;
}
@@ -2086,7 +2075,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
* if it is not being reset.. that way we would not create a
* HOLB when amongst streams being reset and those not being
* reset.
- *
+ *
*/
if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
SCTP_TSN_GT(tsn, liste->tsn)) {
@@ -2125,8 +2114,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
if (chunk_flags & SCTP_DATA_UNORDERED) {
/* queue directly into socket buffer */
- SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
- control, msg_id);
+ SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
+ control, mid);
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
@@ -2134,8 +2123,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
} else {
- SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
- msg_id);
+ SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
+ mid);
sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
if (*abort_flag) {
if (last_chunk) {
@@ -2149,8 +2138,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* If we reach here its a reassembly */
need_reasm_check = 1;
SCTPDBG(SCTP_DEBUG_XXX,
- "Queue data to stream for reasm control: %p msg_id: %u\n",
- control, msg_id);
+ "Queue data to stream for reasm control: %p MID: %u\n",
+ control, mid);
sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
if (*abort_flag) {
/*
@@ -2179,7 +2168,7 @@ finish_express_del:
SCTP_STAT_INCR(sctps_recvdata);
/* Set it present please */
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
- sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
+ sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
@@ -2284,10 +2273,10 @@ sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
/*
* Now we also need to check the mapping array in a couple of ways.
* 1) Did we move the cum-ack point?
- *
- * When you first glance at this you might think that all entries that
- * make up the position of the cum-ack would be in the nr-mapping
- * array only.. i.e. things up to the cum-ack are always
+ *
+ * When you first glance at this you might think that all entries
+ * that make up the position of the cum-ack would be in the
+ * nr-mapping array only.. i.e. things up to the cum-ack are always
* deliverable. Thats true with one exception, when its a fragmented
* message we may not deliver the data until some threshold (or all
* of it) is in place. So we must OR the nr_mapping_array and
@@ -2346,10 +2335,8 @@ sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
/* The complete array was completed by a single FR */
/* highest becomes the cum-ack */
int clr;
-
#ifdef INVARIANTS
unsigned int i;
-
#endif
/* clear the array */
@@ -2497,8 +2484,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
(stcb->asoc.numduptsns) || /* we have dup's */
(is_a_gap) || /* is still a gap */
(stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
- (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
- ) {
+ (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
if ((stcb->asoc.sctp_cmt_on_off > 0) &&
(SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
@@ -2510,9 +2496,9 @@ sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
/*
* CMT DAC algorithm: With CMT, delay acks
* even in the face of
- *
- * reordering. Therefore, if acks that do not
- * have to be sent because of the above
+ *
+ * reordering. Therefore, if acks that do
+ * not have to be sent because of the above
* reasons, will be delayed. That is, acks
* that would have been sent due to gap
* reports will be delayed with DAC. Start
@@ -2719,7 +2705,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
* Now, what do we do with KNOWN
* chunks that are NOT in the right
* place?
- *
+ *
* For now, I do nothing but ignore
* them. We may later want to add
* sysctl stuff to switch out and do
@@ -2761,8 +2747,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
/* discard the rest of this packet */
stop_proc = 1;
} /* else skip this bad chunk and
- * continue... */
- break;
+ * continue... */ break;
} /* switch of chunk type */
}
*offset += SCTP_SIZE32(chk_length);
@@ -2845,16 +2830,16 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
(tp1->whoTo->find_pseudo_cumack == 1) &&
(tp1->snd_count == 1)) {
- tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
tp1->whoTo->find_pseudo_cumack = 0;
}
if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
(tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
(tp1->snd_count > 1)) {
- tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
tp1->whoTo->find_rtx_pseudo_cumack = 0;
}
- if (tp1->rec.data.TSN_seq == theTSN) {
+ if (tp1->rec.data.tsn == theTSN) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
/*-
* must be held until
@@ -2868,9 +2853,9 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
* via previous Gap Ack Blocks...
* i.e. ACKED or RESEND.
*/
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+ if (SCTP_TSN_GT(tp1->rec.data.tsn,
*biggest_newly_acked_tsn)) {
- *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
+ *biggest_newly_acked_tsn = tp1->rec.data.tsn;
}
/*-
* CMT: SFR algo (and HTNA) - set
@@ -2882,10 +2867,10 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
if (tp1->rec.data.chunk_was_revoked == 0)
tp1->whoTo->saw_newack = 1;
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+ if (SCTP_TSN_GT(tp1->rec.data.tsn,
tp1->whoTo->this_sack_highest_newack)) {
tp1->whoTo->this_sack_highest_newack =
- tp1->rec.data.TSN_seq;
+ tp1->rec.data.tsn;
}
/*-
* CMT DAC algo: also update
@@ -2895,12 +2880,12 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(*this_sack_lowest_newack,
last_tsn,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
0,
0,
SCTP_LOG_TSN_ACKED);
}
- *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ *this_sack_lowest_newack = tp1->rec.data.tsn;
}
/*-
* CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
@@ -2910,16 +2895,16 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
* Separate pseudo_cumack trackers for first transmissions and
* retransmissions.
*/
- if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
+ if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
if (tp1->rec.data.chunk_was_revoked == 0) {
tp1->whoTo->new_pseudo_cumack = 1;
}
tp1->whoTo->find_pseudo_cumack = 1;
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
- sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
}
- if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
+ if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
if (tp1->rec.data.chunk_was_revoked == 0) {
tp1->whoTo->new_pseudo_cumack = 1;
}
@@ -2928,7 +2913,7 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(*biggest_newly_acked_tsn,
last_tsn,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
frag_strt,
frag_end,
SCTP_LOG_TSN_ACKED);
@@ -2938,7 +2923,7 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
sctp_flight_size_decrease(tp1);
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
@@ -2976,10 +2961,10 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
}
}
if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+ if (SCTP_TSN_GT(tp1->rec.data.tsn,
stcb->asoc.this_sack_highest_gap)) {
stcb->asoc.this_sack_highest_gap =
- tp1->rec.data.TSN_seq;
+ tp1->rec.data.tsn;
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
@@ -3005,24 +2990,22 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
/* NR Sack code here */
if (nr_sacking &&
(tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
- if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
- stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
+ if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+ stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
#endif
}
- if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
- (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
- TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
+ if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+ (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
stcb->asoc.trigger_reset = 1;
}
tp1->sent = SCTP_DATAGRAM_NR_ACKED;
if (tp1->data) {
- /*
- * sa_ignore
- * NO_NULL_CHK
- */
+ /* sa_ignore
+ * NO_NULL_CHK */
sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
sctp_m_freem(tp1->data);
tp1->data = NULL;
@@ -3031,8 +3014,7 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
}
}
break;
- } /* if (tp1->TSN_seq == theTSN) */
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
+ } /* if (tp1->tsn == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
break;
}
tp1 = TAILQ_NEXT(tp1, sctp_next);
@@ -3124,14 +3106,14 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
struct sctp_tmit_chunk *tp1;
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
/*
* ok this guy is either ACK or MARKED. If it is
* ACKED it has been previously acked but not this
* time i.e. revoked. If it is MARKED it was ACK'ed
* again.
*/
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
break;
}
if (tp1->sent == SCTP_DATAGRAM_ACKED) {
@@ -3147,7 +3129,7 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
sctp_flight_size_increase(tp1);
sctp_total_flight_increase(stcb, tp1);
@@ -3159,7 +3141,7 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cumack,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
0,
0,
SCTP_LOG_TSN_REVOKED);
@@ -3195,7 +3177,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (tp1 == NULL) {
sending_seq = asoc->sending_seq;
} else {
- sending_seq = tp1->rec.data.TSN_seq;
+ sending_seq = tp1->rec.data.tsn;
}
/* CMT DAC algo: finding out if SACK is a mixed SACK */
@@ -3218,11 +3200,11 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
if (tp1->sent < SCTP_DATAGRAM_RESEND)
sctp_log_fr(biggest_tsn_newly_acked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_CHECK_STRIKE);
}
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
tp1->sent == SCTP_DATAGRAM_UNSENT) {
/* done */
break;
@@ -3240,7 +3222,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
}
}
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
/* we are beyond the tsn in the sack */
break;
}
@@ -3264,7 +3246,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* FR using this SACK.
*/
continue;
- } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+ } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
tp1->whoTo->this_sack_highest_newack)) {
/*
* CMT: New acks were receieved for data sent to
@@ -3294,7 +3276,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(biggest_tsn_newly_acked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
}
@@ -3316,10 +3298,10 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* received after this missing TSN.
*/
if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
- SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
+ SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(16 + num_dests_sacked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
}
@@ -3357,7 +3339,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(biggest_tsn_newly_acked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
}
@@ -3389,10 +3371,10 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
(num_dests_sacked == 1) &&
SCTP_TSN_GT(this_sack_lowest_newack,
- tp1->rec.data.TSN_seq)) {
+ tp1->rec.data.tsn)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(32 + num_dests_sacked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
}
@@ -3407,7 +3389,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* JRI: TODO: remove code for HTNA algo. CMT's SFR
* algo covers HTNA.
*/
- } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+ } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
biggest_tsn_newly_acked)) {
/*
* We don't strike these: This is the HTNA
@@ -3419,7 +3401,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* Strike the TSN */
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(biggest_tsn_newly_acked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
}
@@ -3441,10 +3423,10 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* received after this missing TSN.
*/
if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
- SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
+ SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(48 + num_dests_sacked,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
}
@@ -3461,7 +3443,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
(tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
if (tp1->whoTo) {
tp1->whoTo->net_ack++;
@@ -3483,10 +3465,8 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if ((stcb->asoc.prsctp_supported) &&
(PR_SCTP_RTX_ENABLED(tp1->flags))) {
- /*
- * Has it been retransmitted tv_sec times? -
- * we store the retran count there.
- */
+ /* Has it been retransmitted tv_sec times? -
+ * we store the retran count there. */
if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
/* Yes, so drop it */
if (tp1->data != NULL) {
@@ -3498,12 +3478,10 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
continue;
}
}
- /*
- * SCTP_PRINTF("OK, we are now ready to FR this
- * guy\n");
- */
+ /* SCTP_PRINTF("OK, we are now ready to FR this
+ * guy\n"); */
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
- sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
+ sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
0, SCTP_FR_MARKED);
}
if (strike_flag) {
@@ -3521,18 +3499,14 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
alt = tp1->whoTo;
/* sa_ignore NO_NULL_CHK */
if (asoc->sctp_cmt_pf > 0) {
- /*
- * JRS 5/18/07 - If CMT PF is on,
+ /* JRS 5/18/07 - If CMT PF is on,
* use the PF version of
- * find_alt_net()
- */
+ * find_alt_net() */
alt = sctp_find_alternate_net(stcb, alt, 2);
} else {
- /*
- * JRS 5/18/07 - If only CMT is on,
+ /* JRS 5/18/07 - If only CMT is on,
* use the CMT version of
- * find_alt_net()
- */
+ * find_alt_net() */
/* sa_ignore NO_NULL_CHK */
alt = sctp_find_alternate_net(stcb, alt, 1);
}
@@ -3570,7 +3544,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* mark the sending seq for possible subsequent FR's */
/*
* SCTP_PRINTF("Marking TSN for FR new value %x\n",
- * (uint32_t)tpi->rec.data.TSN_seq);
+ * (uint32_t)tpi->rec.data.tsn);
*/
if (TAILQ_EMPTY(&asoc->send_queue)) {
/*
@@ -3593,7 +3567,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
ttt = TAILQ_FIRST(&asoc->send_queue);
tp1->rec.data.fast_retran_tsn =
- ttt->rec.data.TSN_seq;
+ ttt->rec.data.tsn;
}
if (tp1->do_rtt) {
@@ -3641,7 +3615,7 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
(tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
sctp_misc_ints(SCTP_FWD_TSN_CHECK,
asoc->advanced_peer_ack_point,
- tp1->rec.data.TSN_seq, 0, 0);
+ tp1->rec.data.tsn, 0, 0);
}
}
if (!PR_SCTP_ENABLED(tp1->flags)) {
@@ -3689,10 +3663,10 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
(tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
/* advance PeerAckPoint goes forward */
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
- asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
+ asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
a_adv = tp1;
- } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
+ } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
/* No update but we do save the chk */
a_adv = tp1;
}
@@ -3713,10 +3687,8 @@ sctp_fs_audit(struct sctp_association *asoc)
struct sctp_tmit_chunk *chk;
int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
int ret;
-
#ifndef INVARIANTS
int entry_flight, entry_cnt;
-
#endif
ret = 0;
@@ -3730,7 +3702,7 @@ sctp_fs_audit(struct sctp_association *asoc)
TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
if (chk->sent < SCTP_DATAGRAM_RESEND) {
SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
- chk->rec.data.TSN_seq,
+ chk->rec.data.tsn,
chk->send_size,
chk->snd_count);
inflight++;
@@ -3773,7 +3745,7 @@ sctp_window_probe_recovery(struct sctp_tcb *stcb,
tp1->whoTo ? tp1->whoTo->flight_size : 0,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
return;
}
/* First setup this by shrinking flight */
@@ -3792,7 +3764,7 @@ sctp_window_probe_recovery(struct sctp_tcb *stcb,
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
}
@@ -3863,7 +3835,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
if (!TAILQ_EMPTY(&asoc->sent_queue)) {
tp1 = TAILQ_LAST(&asoc->sent_queue,
sctpchunk_listhead);
- send_s = tp1->rec.data.TSN_seq + 1;
+ send_s = tp1->rec.data.tsn + 1;
} else {
send_s = asoc->sending_seq;
}
@@ -3892,7 +3864,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
/* process the new consecutive TSN first */
TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
- if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
+ if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
SCTP_PRINTF("Warning, an unsent is now acked?\n");
}
@@ -3908,7 +3880,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
sctp_flight_size_decrease(tp1);
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
@@ -3933,8 +3905,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
tp1->whoTo->RTO =
/*
* sa_ignore
- * NO_NULL_CH
- * K
+ * NO_NULL_CHK
*/
sctp_calculate_rto(stcb,
asoc, tp1->whoTo,
@@ -3967,7 +3938,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
/* sa_ignore NO_NULL_CHK */
- sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
}
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
@@ -3979,17 +3950,17 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
tp1->rec.data.chunk_was_revoked = 0;
}
if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
- if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
#endif
}
}
- if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
- (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
- TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
+ if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+ (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
asoc->trigger_reset = 1;
}
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
@@ -4002,7 +3973,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cumack,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
0,
0,
SCTP_LOG_FREE_SENT);
@@ -4066,10 +4037,8 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
}
if (net == stcb->asoc.primary_destination) {
if (stcb->asoc.alternate) {
- /*
- * release the alternate,
- * primary is good
- */
+ /* release the alternate,
+ * primary is good */
sctp_free_remote_addr(stcb->asoc.alternate);
stcb->asoc.alternate = NULL;
}
@@ -4152,10 +4121,8 @@ again:
}
} else {
if (net->window_probe) {
- /*
- * In window probes we must assure a timer
- * is still running there
- */
+ /* In window probes we must assure a timer
+ * is still running there */
net->window_probe = 0;
if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
@@ -4406,7 +4373,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
if (!TAILQ_EMPTY(&asoc->sent_queue)) {
tp1 = TAILQ_LAST(&asoc->sent_queue,
sctpchunk_listhead);
- send_s = tp1->rec.data.TSN_seq + 1;
+ send_s = tp1->rec.data.tsn + 1;
} else {
tp1 = NULL;
send_s = asoc->sending_seq;
@@ -4423,7 +4390,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
cum_ack, send_s);
if (tp1) {
SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
- tp1->rec.data.TSN_seq, (void *)tp1);
+ tp1->rec.data.tsn, (void *)tp1);
}
hopeless_peer:
*abort_now = 1;
@@ -4498,7 +4465,7 @@ hopeless_peer:
}
/* process the new consecutive TSN first */
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
+ if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
accum_moved = 1;
if (tp1->sent < SCTP_DATAGRAM_ACKED) {
@@ -4527,7 +4494,7 @@ hopeless_peer:
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
@@ -4539,7 +4506,7 @@ hopeless_peer:
tp1->whoTo->net_ack += tp1->send_size;
/* CMT SFR and DAC algos */
- this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ this_sack_lowest_newack = tp1->rec.data.tsn;
tp1->whoTo->saw_newack = 1;
if (tp1->snd_count < 2) {
@@ -4587,13 +4554,13 @@ hopeless_peer:
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cum_ack,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
0,
0,
SCTP_LOG_TSN_ACKED);
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
- sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
}
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
@@ -4682,21 +4649,21 @@ hopeless_peer:
asoc->last_acked_seq = cum_ack;
TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
break;
}
if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
- if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
#endif
}
}
- if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
- (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
- TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
+ if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+ (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
asoc->trigger_reset = 1;
}
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
@@ -4717,7 +4684,7 @@ hopeless_peer:
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cum_ack,
- tp1->rec.data.TSN_seq,
+ tp1->rec.data.tsn,
0,
0,
SCTP_LOG_FREE_SENT);
@@ -4775,7 +4742,7 @@ hopeless_peer:
}
/*
* Check for revoked fragments:
- *
+ *
* if Previous sack - Had no frags then we can't have any revoked if
* Previous sack - Had frag's then - If we now have frags aka
* num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
@@ -4798,7 +4765,7 @@ hopeless_peer:
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
sctp_flight_size_increase(tp1);
sctp_total_flight_increase(stcb, tp1);
@@ -4840,10 +4807,8 @@ hopeless_peer:
}
if (net == stcb->asoc.primary_destination) {
if (stcb->asoc.alternate) {
- /*
- * release the alternate,
- * primary is good
- */
+ /* release the alternate,
+ * primary is good */
sctp_free_remote_addr(stcb->asoc.alternate);
stcb->asoc.alternate = NULL;
}
@@ -5054,10 +5019,8 @@ again:
}
} else {
if (net->window_probe) {
- /*
- * In window probes we must assure a timer
- * is still running there
- */
+ /* In window probes we must assure a timer
+ * is still running there */
if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_SEND,
stcb->sctp_ep, stcb, net);
@@ -5167,22 +5130,17 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
{
struct sctp_queued_to_read *ctl, *nctl;
struct sctp_association *asoc;
- uint32_t tt;
- int need_reasm_check = 0, old;
+ uint32_t mid;
+ int need_reasm_check = 0;
asoc = &stcb->asoc;
- tt = strmin->last_sequence_delivered;
- if (asoc->idata_supported) {
- old = 0;
- } else {
- old = 1;
- }
+ mid = strmin->last_mid_delivered;
/*
* First deliver anything prior to and including the stream no that
* came in.
*/
TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
- if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
+ if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
/* this is deliverable now */
if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
if (ctl->on_strm_q) {
@@ -5213,11 +5171,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
} else {
/* Its a fragmented message */
if (ctl->first_frag_seen) {
- /*
- * Make it so this is next to
- * deliver, we restore later
- */
- strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
+ /* Make it so this is next to
+ * deliver, we restore later */
+ strmin->last_mid_delivered = ctl->mid - 1;
need_reasm_check = 1;
break;
}
@@ -5231,9 +5187,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
int ret;
ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
- if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
+ if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
/* Restore the next to deliver unless we are ahead */
- strmin->last_sequence_delivered = tt;
+ strmin->last_mid_delivered = mid;
}
if (ret == 0) {
/* Left the front Partial one on */
@@ -5245,9 +5201,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
* now we must deliver things in queue the normal way if any are
* now ready.
*/
- tt = strmin->last_sequence_delivered + 1;
+ mid = strmin->last_mid_delivered + 1;
TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
- if (tt == ctl->sinfo_ssn) {
+ if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
/* this is deliverable now */
if (ctl->on_strm_q) {
@@ -5267,7 +5223,7 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
asoc->size_on_all_streams -= ctl->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
/* deliver it to at least the delivery-q */
- strmin->last_sequence_delivered = ctl->sinfo_ssn;
+ strmin->last_mid_delivered = ctl->mid;
if (stcb->sctp_socket) {
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
@@ -5276,15 +5232,13 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
}
- tt = strmin->last_sequence_delivered + 1;
+ mid = strmin->last_mid_delivered + 1;
} else {
/* Its a fragmented message */
if (ctl->first_frag_seen) {
- /*
- * Make it so this is next to
- * deliver
- */
- strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
+ /* Make it so this is next to
+ * deliver */
+ strmin->last_mid_delivered = ctl->mid - 1;
need_reasm_check = 1;
break;
}
@@ -5303,7 +5257,7 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
static void
sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
struct sctp_association *asoc,
- uint16_t stream, uint32_t seq, int ordered, int old, uint32_t cumtsn)
+ uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
{
struct sctp_queued_to_read *control;
struct sctp_stream_in *strm;
@@ -5319,15 +5273,18 @@ sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
* queue.
*/
strm = &asoc->strmin[stream];
- control = sctp_find_reasm_entry(strm, (uint32_t) seq, ordered, old);
+ control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
if (control == NULL) {
/* Not found */
return;
}
+ if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
+ return;
+ }
TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
/* Purge hanging chunks */
- if (old && (ordered == 0)) {
- if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumtsn)) {
+ if (!asoc->idata_supported && (ordered == 0)) {
+ if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
break;
}
}
@@ -5389,11 +5346,11 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
/*
* here we will perform all the data receiver side steps for
* processing FwdTSN, as required in by pr-sctp draft:
- *
+ *
* Assume we get FwdTSN(x):
- *
- * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
- * others we have 3) examine and update re-ordering queue on
+ *
+ * 1) update local cumTSN to x 2) try to further advance cumTSN to x
+ * + others we have 3) examine and update re-ordering queue on
* pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
* report where we are.
*/
@@ -5479,7 +5436,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
/* Flush all the un-ordered data based on cum-tsn */
SCTP_INP_READ_LOCK(stcb->sctp_ep);
for (sid = 0; sid < asoc->streamincnt; sid++) {
- sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, 1, new_cum_tsn);
+ sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
}
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
}
@@ -5491,10 +5448,9 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (m && fwd_sz) {
/* New method. */
unsigned int num_str;
- uint32_t sequence;
- uint16_t stream;
+ uint32_t mid, cur_mid;
+ uint16_t sid;
uint16_t ordered, flags;
- int old;
struct sctp_strseq *stseq, strseqbuf;
struct sctp_strseq_mid *stseq_m, strseqbuf_m;
@@ -5503,10 +5459,8 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
SCTP_INP_READ_LOCK(stcb->sctp_ep);
if (asoc->idata_supported) {
num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
- old = 0;
} else {
num_str = fwd_sz / sizeof(struct sctp_strseq);
- old = 1;
}
for (i = 0; i < num_str; i++) {
if (asoc->idata_supported) {
@@ -5517,8 +5471,8 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (stseq_m == NULL) {
break;
}
- stream = ntohs(stseq_m->stream);
- sequence = ntohl(stseq_m->msg_id);
+ sid = ntohs(stseq_m->sid);
+ mid = ntohl(stseq_m->mid);
flags = ntohs(stseq_m->flags);
if (flags & PR_SCTP_UNORDERED_FLAG) {
ordered = 0;
@@ -5533,8 +5487,8 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (stseq == NULL) {
break;
}
- stream = ntohs(stseq->stream);
- sequence = (uint32_t) ntohs(stseq->sequence);
+ sid = ntohs(stseq->sid);
+ mid = (uint32_t) ntohs(stseq->ssn);
ordered = 1;
}
/* Convert */
@@ -5546,12 +5500,12 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
* queue where its not all delivered. If we find it
* we transmute the read entry into a PDI_ABORTED.
*/
- if (stream >= asoc->streamincnt) {
+ if (sid >= asoc->streamincnt) {
/* screwed up streams, stop! */
break;
}
- if ((asoc->str_of_pdapi == stream) &&
- (asoc->ssn_of_pdapi == sequence)) {
+ if ((asoc->str_of_pdapi == sid) &&
+ (asoc->ssn_of_pdapi == mid)) {
/*
* If this is the one we were partially
* delivering now then we no longer are.
@@ -5560,24 +5514,14 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
*/
asoc->fragmented_delivery_inprogress = 0;
}
- strm = &asoc->strmin[stream];
- if (asoc->idata_supported == 0) {
- uint16_t strm_at;
-
- for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(1, sequence, strm_at); strm_at++) {
- sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
- }
- } else {
- uint32_t strm_at;
-
- for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(0, sequence, strm_at); strm_at++) {
- sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
- }
+ strm = &asoc->strmin[sid];
+ for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
+ sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
}
TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
- if ((ctl->sinfo_stream == stream) &&
- (ctl->sinfo_ssn == sequence)) {
- str_seq = (stream << 16) | (0x0000ffff & sequence);
+ if ((ctl->sinfo_stream == sid) &&
+ (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
+ str_seq = (sid << 16) | (0x0000ffff & mid);
ctl->pdapi_aborted = 1;
sv = stcb->asoc.control_pdapi;
ctl->end_added = 1;
@@ -5600,15 +5544,15 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
SCTP_SO_NOT_LOCKED);
stcb->asoc.control_pdapi = sv;
break;
- } else if ((ctl->sinfo_stream == stream) &&
- SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
+ } else if ((ctl->sinfo_stream == sid) &&
+ SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
/* We are past our victim SSN */
break;
}
}
- if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
+ if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
/* Update the sequence number */
- strm->last_sequence_delivered = sequence;
+ strm->last_mid_delivered = mid;
}
/* now kick the stream the new way */
/* sa_ignore NO_NULL_CHK */
diff --git a/freebsd/sys/netinet/sctp_indata.h b/freebsd/sys/netinet/sctp_indata.h
index 162ca905..e277ae88 100644
--- a/freebsd/sys/netinet/sctp_indata.h
+++ b/freebsd/sys/netinet/sctp_indata.h
@@ -42,20 +42,19 @@ struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb *stcb,
struct sctp_nets *net,
uint32_t tsn, uint32_t ppid,
- uint32_t context, uint16_t stream_no,
- uint32_t stream_seq, uint8_t flags,
+ uint32_t context, uint16_t sid,
+ uint32_t mid, uint8_t flags,
struct mbuf *dm);
-#define sctp_build_readq_entry_mac(_ctl, in_it, context, net, tsn, ppid, stream_no, stream_seq, flags, dm, tfsn, msgid) do { \
+#define sctp_build_readq_entry_mac(_ctl, in_it, context, net, tsn, ppid, sid, flags, dm, tfsn, mid) do { \
if (_ctl) { \
atomic_add_int(&((net)->ref_count), 1); \
memset(_ctl, 0, sizeof(struct sctp_queued_to_read)); \
- (_ctl)->sinfo_stream = stream_no; \
- (_ctl)->sinfo_ssn = stream_seq; \
+ (_ctl)->sinfo_stream = sid; \
TAILQ_INIT(&_ctl->reasm); \
(_ctl)->top_fsn = tfsn; \
- (_ctl)->msg_id = msgid; \
+ (_ctl)->mid = mid; \
(_ctl)->sinfo_flags = (flags << 8); \
(_ctl)->sinfo_ppid = ppid; \
(_ctl)->sinfo_context = context; \
diff --git a/freebsd/sys/netinet/sctp_input.c b/freebsd/sys/netinet/sctp_input.c
index 621784ea..3c596c48 100644
--- a/freebsd/sys/netinet/sctp_input.c
+++ b/freebsd/sys/netinet/sctp_input.c
@@ -163,10 +163,8 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset,
*abort_no_unlock = 1;
goto outnow;
}
- /*
- * We are only accepting if we have a socket with positive
- * so_qlimit.
- */
+ /* We are only accepting if we have a socket with positive
+ * so_qlimit. */
if ((stcb == NULL) &&
((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
@@ -322,14 +320,14 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
/* abandon the upper streams */
newcnt = ntohs(init->num_inbound_streams);
TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
- if (chk->rec.data.stream_number >= newcnt) {
+ if (chk->rec.data.sid >= newcnt) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt--;
- if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
#endif
}
if (chk->data != NULL) {
@@ -414,8 +412,8 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
return (-1);
}
for (i = 0; i < asoc->streamincnt; i++) {
- asoc->strmin[i].stream_no = i;
- asoc->strmin[i].last_sequence_delivered = 0xffffffff;
+ asoc->strmin[i].sid = i;
+ asoc->strmin[i].last_mid_delivered = 0xffffffff;
TAILQ_INIT(&asoc->strmin[i].inqueue);
TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
asoc->strmin[i].pd_api_started = 0;
@@ -711,10 +709,8 @@ sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
LIST_REMOVE(stcb, sctp_asocs);
stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
- /*
- * put it in the bucket in the vtag hash of assoc's for the
- * system
- */
+ /* put it in the bucket in the vtag hash of assoc's for the
+ * system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
return (1);
@@ -732,10 +728,8 @@ sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
sctp_toss_old_cookies(stcb, &stcb->asoc);
stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
- /*
- * put it in the bucket in the vtag hash of assoc's for the
- * system
- */
+ /* put it in the bucket in the vtag hash of assoc's for the
+ * system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
return (1);
@@ -766,7 +760,6 @@ sctp_handle_abort(struct sctp_abort_chunk *abort,
{
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
uint16_t len;
uint16_t error;
@@ -868,10 +861,8 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
struct sctp_association *asoc;
int some_on_streamwheel;
int old_state;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
SCTPDBG(SCTP_DEBUG_INPUT2,
@@ -948,10 +939,8 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
- /*
- * notify upper layer that peer has initiated a
- * shutdown
- */
+ /* notify upper layer that peer has initiated a
+ * shutdown */
sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
/* reset time */
@@ -1001,7 +990,6 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
struct sctp_nets *net)
{
struct sctp_association *asoc;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
@@ -1171,10 +1159,8 @@ sctp_handle_error(struct sctp_chunkhdr *ch,
uint16_t error_len;
struct sctp_association *asoc;
int adjust;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
/* parse through all of the errors and process */
@@ -1494,10 +1480,8 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
int retval;
int spec_flag = 0;
uint32_t how_indx;
-
#if defined(SCTP_DETAILED_STR_STATS)
int j;
-
#endif
net = *netp;
@@ -1621,7 +1605,6 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
) {
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
/*
* Here is where collision would go if we
@@ -1822,7 +1805,6 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
(inp->sctp_socket->so_qlimit == 0)) {
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
stcb->sctp_ep->sctp_flags |=
SCTP_PCB_FLAGS_CONNECTED;
@@ -1882,10 +1864,8 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
cookie->tie_tag_peer_vtag != 0) {
struct sctpasochead *head;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
if (asoc->peer_supports_nat) {
@@ -1977,7 +1957,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
asoc->strmout[i].abandoned_sent[0] = 0;
asoc->strmout[i].abandoned_unsent[0] = 0;
#endif
- stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].sid = i;
stcb->asoc.strmout[i].next_mid_ordered = 0;
stcb->asoc.strmout[i].next_mid_unordered = 0;
stcb->asoc.strmout[i].last_msg_incomplete = 0;
@@ -2069,7 +2049,6 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
int retval;
int error = 0;
uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
@@ -2444,14 +2423,11 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
struct sctp_nets *netl;
int had_a_existing_tcb = 0;
int send_int_conf = 0;
-
#ifdef INET
struct sockaddr_in sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6;
-
#endif
SCTPDBG(SCTP_DEBUG_INPUT2,
@@ -2773,10 +2749,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
if (so == NULL) {
struct mbuf *op_err;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *pcb_so;
-
#endif
/* Too many sockets */
SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
@@ -2889,10 +2863,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
(*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
}
- /*
- * Pull it from the incomplete queue and wake the
- * guy
- */
+ /* Pull it from the incomplete queue and wake the
+ * guy */
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
atomic_add_int(&(*stcb)->asoc.refcnt, 1);
SCTP_TCB_UNLOCK((*stcb));
@@ -3057,18 +3029,18 @@ sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
if (lchk == NULL) {
window_data_tsn = stcb->asoc.sending_seq - 1;
} else {
- window_data_tsn = lchk->rec.data.TSN_seq;
+ window_data_tsn = lchk->rec.data.tsn;
}
/* Find where it was sent to if possible. */
net = NULL;
TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
- if (lchk->rec.data.TSN_seq == tsn) {
+ if (lchk->rec.data.tsn == tsn) {
net = lchk->whoTo;
net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
break;
}
- if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) {
+ if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
break;
}
}
@@ -3104,10 +3076,8 @@ sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
}
if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
- /*
- * JRS - Use the congestion control given in the pluggable
- * CC module
- */
+ /* JRS - Use the congestion control given in the pluggable
+ * CC module */
stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
/*
* We reduce once every RTT. So we will only lower cwnd at
@@ -3195,10 +3165,8 @@ sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSE
struct sctp_tcb *stcb, struct sctp_nets *net)
{
struct sctp_association *asoc;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
SCTPDBG(SCTP_DEBUG_INPUT2,
@@ -3262,11 +3230,11 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
tsn = ntohl(desc->tsn_ifany);
TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
- if (tp1->rec.data.TSN_seq == tsn) {
+ if (tp1->rec.data.tsn == tsn) {
/* found it */
break;
}
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
/* not found */
tp1 = NULL;
break;
@@ -3279,7 +3247,7 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
*/
SCTP_STAT_INCR(sctps_pdrpdnfnd);
TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
- if (tp1->rec.data.TSN_seq == tsn) {
+ if (tp1->rec.data.tsn == tsn) {
/* found it */
break;
}
@@ -3345,7 +3313,7 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
} else {
- tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
+ tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
}
/* restart the timer */
@@ -3361,7 +3329,7 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
tp1->whoTo->flight_size,
tp1->book_size,
(uint32_t) (uintptr_t) stcb,
- tp1->rec.data.TSN_seq);
+ tp1->rec.data.tsn);
}
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
sctp_flight_size_decrease(tp1);
@@ -3432,10 +3400,8 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
case SCTP_HEARTBEAT_REQUEST:
/* resend a demand HB */
if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
- /*
- * Only retransmit if we KNOW we wont destroy the
- * tcb
- */
+ /* Only retransmit if we KNOW we wont destroy the
+ * tcb */
sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
}
break;
@@ -3507,12 +3473,12 @@ sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *
if (temp >= stcb->asoc.streamincnt) {
continue;
}
- stcb->asoc.strmin[temp].last_sequence_delivered = 0xffffffff;
+ stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
}
} else {
list = NULL;
for (i = 0; i < stcb->asoc.streamincnt; i++) {
- stcb->asoc.strmin[i].last_sequence_delivered = 0xffffffff;
+ stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
}
}
sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
@@ -3676,10 +3642,8 @@ sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
} else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
- /*
- * Set it up so we don't stop
- * retransmitting
- */
+ /* Set it up so we don't stop
+ * retransmitting */
asoc->stream_reset_outstanding++;
stcb->asoc.str_reset_seq_out--;
asoc->stream_reset_out_is_outstanding = 1;
@@ -4056,8 +4020,8 @@ sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *ch
for (i = 0; i < stcb->asoc.streamincnt; i++) {
TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
- stcb->asoc.strmin[i].stream_no = i;
- stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
+ stcb->asoc.strmin[i].sid = i;
+ stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
/* now anything on those queues? */
@@ -4074,8 +4038,8 @@ sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *ch
for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
- stcb->asoc.strmin[i].stream_no = i;
- stcb->asoc.strmin[i].last_sequence_delivered = 0xffffffff;
+ stcb->asoc.strmin[i].sid = i;
+ stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
stcb->asoc.strmin[i].pd_api_started = 0;
stcb->asoc.strmin[i].delivery_started = 0;
}
@@ -4532,10 +4496,8 @@ __attribute__((noinline))
uint32_t auth_offset = 0, auth_len = 0;
int auth_skipped = 0;
int asconf_cnt = 0;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
@@ -4735,10 +4697,8 @@ __attribute__((noinline))
return (NULL);
}
}
- } /* end if !SCTP_COOKIE_ECHO */
- /*
- * process all control chunks...
- */
+ } /* end if !SCTP_COOKIE_ECHO *//* process all
+ * control chunks... */
if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
(ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
(ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
@@ -5032,10 +4992,8 @@ process_control_chunks:
}
}
break;
- /*
- * EY - nr_sack: If the received chunk is an
- * nr_sack chunk
- */
+ /* EY - nr_sack: If the received chunk is an
+ * nr_sack chunk */
case SCTP_NR_SELECTIVE_ACK:
{
struct sctp_nr_sack_chunk *nr_sack;
@@ -5517,6 +5475,11 @@ process_control_chunks:
*offset = length;
return (NULL);
}
+ /*
+ * For sending a SACK this looks like DATA
+ * chunks.
+ */
+ stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
sctp_handle_forward_tsn(stcb,
(struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
if (abort_flag) {
@@ -5648,8 +5611,7 @@ process_control_chunks:
/* discard this packet */
*offset = length;
return (stcb);
- } /* else skip this bad chunk and continue... */
- break;
+ } /* else skip this bad chunk and continue... */ break;
} /* switch (ch->chunk_type) */
@@ -6106,10 +6068,8 @@ sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
struct sctphdr *sh;
struct sctp_chunkhdr *ch;
int length, offset;
-
#if !defined(SCTP_WITH_NO_CSUM)
uint8_t compute_crc;
-
#endif
uint32_t mflowid;
uint8_t mflowtype;
@@ -6210,7 +6170,6 @@ out:
#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
extern int *sctp_cpuarry;
-
#endif
int
@@ -6258,5 +6217,4 @@ sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
sctp_input_with_port(m, off, 0);
return (IPPROTO_DONE);
}
-
#endif
diff --git a/freebsd/sys/netinet/sctp_os_bsd.h b/freebsd/sys/netinet/sctp_os_bsd.h
index e87914e5..438973cb 100644
--- a/freebsd/sys/netinet/sctp_os_bsd.h
+++ b/freebsd/sys/netinet/sctp_os_bsd.h
@@ -247,7 +247,6 @@ MALLOC_DECLARE(SCTP_M_MCORE);
/* SCTP_ZONE_INIT: initialize the zone */
typedef struct uma_zone *sctp_zone_t;
-
#define SCTP_ZONE_INIT(zone, name, size, number) { \
zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,\
0); \
diff --git a/freebsd/sys/netinet/sctp_output.c b/freebsd/sys/netinet/sctp_output.c
index 9e12e775..94f9d866 100644
--- a/freebsd/sys/netinet/sctp_output.c
+++ b/freebsd/sys/netinet/sctp_output.c
@@ -1945,7 +1945,6 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
struct sctp_paramhdr *parmh;
struct mbuf *mret;
uint16_t plen;
-
#endif
switch (ifa->address.sa.sa_family) {
@@ -2141,10 +2140,8 @@ skip_count:
cnt++;
total_count++;
if (cnt >= 2) {
- /*
- * two from each
- * address
- */
+ /* two from each
+ * address */
break;
}
if (total_count > SCTP_ADDRESS_LIMIT) {
@@ -2786,7 +2783,6 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
{
struct sctp_ifa *ifa, *sifa;
int num_eligible_addr = 0;
-
#ifdef INET6
struct sockaddr_in6 sin6, lsa6;
@@ -2831,10 +2827,8 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
if (fam == AF_INET6 &&
IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
- /*
- * link-local <-> link-local must belong to the same
- * scope.
- */
+ /* link-local <-> link-local must belong to the same
+ * scope. */
memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
(void)sa6_recoverscope(&lsa6);
if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
@@ -2968,10 +2962,8 @@ sctp_choose_boundall(struct sctp_inpcb *inp,
struct sctp_ifa *sctp_ifa, *sifa;
uint32_t ifn_index;
struct sctp_vrf *vrf;
-
#ifdef INET
int retried = 0;
-
#endif
/*-
@@ -3317,14 +3309,11 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
struct sctp_ifa *answer;
uint8_t dest_is_priv, dest_is_loop;
sa_family_t fam;
-
#ifdef INET
struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
-
#endif
#ifdef INET6
struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
-
#endif
/**
@@ -3572,14 +3561,11 @@ sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *er
struct cmsghdr cmh;
int tlen, at;
struct sctp_initmsg initmsg;
-
#ifdef INET
struct sockaddr_in sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6;
-
#endif
tlen = SCTP_BUF_LEN(control);
@@ -3620,10 +3606,8 @@ sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *er
if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
struct sctp_stream_out *tmp_str;
unsigned int i;
-
#if defined(SCTP_DETAILED_STR_STATS)
int j;
-
#endif
/* Default is NOT correct */
@@ -3656,7 +3640,7 @@ sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *er
stcb->asoc.strmout[i].abandoned_sent[0] = 0;
stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
#endif
- stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].sid = i;
stcb->asoc.strmout[i].last_msg_incomplete = 0;
stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
@@ -3746,14 +3730,11 @@ sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
int tlen, at;
struct sctp_tcb *stcb;
struct sockaddr *addr;
-
#ifdef INET
struct sockaddr_in sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6;
-
#endif
tlen = SCTP_BUF_LEN(control);
@@ -3972,7 +3953,6 @@ sctp_handle_no_route(struct sctp_tcb *stcb,
}
}
}
-
#endif
static int
@@ -4018,22 +3998,17 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
struct sctphdr *sctphdr;
int packet_length;
int ret;
-
#if defined(INET) || defined(INET6)
uint32_t vrf_id;
-
#endif
#if defined(INET) || defined(INET6)
struct mbuf *o_pak;
sctp_route_t *ro = NULL;
struct udphdr *udp = NULL;
-
#endif
uint8_t tos_value;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so = NULL;
-
#endif
if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
@@ -4291,10 +4266,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
/* free tempy routes */
RO_RTFREE(ro);
} else {
- /*
- * PMTU check versus smallest asoc MTU goes
- * here
- */
+ /* PMTU check versus smallest asoc MTU goes
+ * here */
if ((ro->ro_rt != NULL) &&
(net->ro._s_addr)) {
uint32_t mtu;
@@ -4569,10 +4542,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
if (net) {
sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
- /*
- * preserve the port and scope for link
- * local send
- */
+ /* preserve the port and scope for link
+ * local send */
prev_scope = sin6->sin6_scope_id;
prev_port = sin6->sin6_port;
}
@@ -4638,10 +4609,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
/* Now if we had a temp route free it */
RO_RTFREE(ro);
} else {
- /*
- * PMTU check versus smallest asoc MTU goes
- * here
- */
+ /* PMTU check versus smallest asoc MTU goes
+ * here */
if (ro->ro_rt == NULL) {
/* Route was freed */
if (net->ro._s_addr &&
@@ -4971,11 +4940,11 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
* being equal to the beginning of the params i.e. (iphlen +
* sizeof(struct sctp_init_msg) parse through the parameters to the
* end of the mbuf verifying that all parameters are known.
- *
+ *
* For unknown parameters build and return a mbuf with
* UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
* processing this chunk stop, and set *abort_processing to 1.
- *
+ *
* By having param_offset be pre-set to where parameters begin it is
* hoped that this routine may be reused in the future by new
* features.
@@ -5262,7 +5231,6 @@ invalid_size:
*abort_processing = 1;
if ((op_err == NULL) && phdr) {
int l_len;
-
#ifdef INET6
l_len = SCTP_MIN_OVERHEAD;
#else
@@ -5320,14 +5288,11 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
uint8_t fnd;
struct sctp_nets *net;
int check_src;
-
#ifdef INET
struct sockaddr_in sin4, *sa4;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6, *sa6;
-
#endif
#ifdef INET
@@ -5511,18 +5476,15 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct sctp_paramhdr *ph;
union sctp_sockstore *over_addr;
struct sctp_scoping scp;
-
#ifdef INET
struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
struct sockaddr_in *src4 = (struct sockaddr_in *)src;
struct sockaddr_in *sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
struct sockaddr_in6 *sin6;
-
#endif
struct sockaddr *to;
struct sctp_state_cookie stc;
@@ -5546,10 +5508,10 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
/*
* new addresses, out of here in non-cookie-wait
* states
- *
- * Send an ABORT, without the new address error cause.
- * This looks no different than if no listener was
- * present.
+ *
+ * Send an ABORT, without the new address error
+ * cause. This looks no different than if no
+ * listener was present.
*/
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
"Address added");
@@ -5562,9 +5524,9 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
/*
* change of remote encapsulation port, out of here
* in non-cookie-wait states
- *
- * Send an ABORT, without an specific error cause. This
- * looks no different than if no listener was
+ *
+ * Send an ABORT, without an specific error cause.
+ * This looks no different than if no listener was
* present.
*/
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
@@ -5718,10 +5680,8 @@ do_a_abort:
* show up in our scoped count.
*/
cnt_inits_to = 1;
- /*
- * pull out the scope_id from
- * incoming pkt
- */
+ /* pull out the scope_id from
+ * incoming pkt */
} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
/*
@@ -5746,7 +5706,6 @@ do_a_abort:
#ifdef INET6
struct sctp_nets *lnet;
-
#endif
stc.loopback_scope = asoc->scope.loopback_scope;
@@ -6208,9 +6167,9 @@ sctp_prune_prsctp(struct sctp_tcb *stcb,
if (freed_spc >= dataout) {
return;
}
- } /* if chunk was present */
- } /* if of sufficient priority */
- } /* if chunk has enabled */
+ } /* if chunk was present */
+ } /* if of sufficient priority */
+ } /* if chunk has enabled */
} /* tailqforeach */
TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
@@ -6231,11 +6190,11 @@ sctp_prune_prsctp(struct sctp_tcb *stcb,
if (freed_spc >= dataout) {
return;
}
- } /* end if chk->data */
- } /* end if right class */
- } /* end if chk pr-sctp */
+ } /* end if chk->data */
+ } /* end if right class */
+ } /* end if chk pr-sctp */
} /* tailqforeachsafe (chk) */
- } /* if enabled in asoc */
+ } /* if enabled in asoc */
}
int
@@ -6391,7 +6350,7 @@ sctp_msg_append(struct sctp_tcb *stcb,
sp->net = NULL;
}
(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
- sp->stream = srcv->sinfo_stream;
+ sp->sid = srcv->sinfo_stream;
sp->msg_is_complete = 1;
sp->sender_all_done = 1;
sp->some_taken = 0;
@@ -6478,10 +6437,8 @@ error_out:
/* get the prepend space */
SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
} else {
- /*
- * We really should not get a NULL
- * in endofchain
- */
+ /* We really should not get a NULL
+ * in endofchain */
/* find end */
m = outchain;
while (m) {
@@ -6493,10 +6450,8 @@ error_out:
}
/* sanity */
if (*endofchain == NULL) {
- /*
- * huh, TSNH XXX maybe we
- * should panic
- */
+ /* huh, TSNH XXX maybe we
+ * should panic */
sctp_m_freem(outchain);
goto new_mbuf;
}
@@ -6695,17 +6650,13 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
goto abort_anyway;
}
- /*
- * there is nothing queued to send, so I'm
- * done...
- */
+ /* there is nothing queued to send, so I'm
+ * done... */
if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
- /*
- * only send SHUTDOWN the first time
- * through
- */
+ /* only send SHUTDOWN the first time
+ * through */
if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
}
@@ -6982,14 +6933,14 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
/* record time */
data_list[i]->sent_rcv_time = net->last_sent_time;
data_list[i]->rec.data.cwnd_at_send = net->cwnd;
- data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
+ data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
if (data_list[i]->whoTo == NULL) {
data_list[i]->whoTo = net;
atomic_add_int(&net->ref_count, 1);
}
/* on to the sent queue */
tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
- if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
+ if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
struct sctp_tmit_chunk *tpp;
/* need to move back */
@@ -7000,7 +6951,7 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
goto all_done;
}
tp1 = tpp;
- if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
+ if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
goto back_up_more;
}
TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
@@ -7029,7 +6980,7 @@ all_done:
data_list[i]->whoTo->flight_size,
data_list[i]->book_size,
(uint32_t) (uintptr_t) data_list[i]->whoTo,
- data_list[i]->rec.data.TSN_seq);
+ data_list[i]->rec.data.tsn);
}
sctp_flight_size_increase(data_list[i]);
sctp_total_flight_increase(stcb, data_list[i]);
@@ -7197,7 +7148,7 @@ one_more_time:
(stcb->asoc.idata_supported == 0) &&
(strq->last_msg_incomplete)) {
SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
- strq->stream_no,
+ strq->sid,
strq->last_msg_incomplete);
strq->last_msg_incomplete = 0;
}
@@ -7336,10 +7287,8 @@ re_look:
SCTP_TCB_SEND_LOCK(stcb);
send_lock_up = 1;
if (sp->msg_is_complete) {
- /*
- * the sender finished the
- * msg
- */
+ /* the sender finished the
+ * msg */
goto re_look;
}
}
@@ -7546,28 +7495,28 @@ dont_do_it:
if (stcb->asoc.idata_supported == 0) {
if (rcv_flags & SCTP_DATA_UNORDERED) {
/* Just use 0. The receiver ignores the values. */
- chk->rec.data.stream_seq = 0;
+ chk->rec.data.mid = 0;
} else {
- chk->rec.data.stream_seq = strq->next_mid_ordered;
+ chk->rec.data.mid = strq->next_mid_ordered;
if (rcv_flags & SCTP_DATA_LAST_FRAG) {
strq->next_mid_ordered++;
}
}
} else {
if (rcv_flags & SCTP_DATA_UNORDERED) {
- chk->rec.data.stream_seq = strq->next_mid_unordered;
+ chk->rec.data.mid = strq->next_mid_unordered;
if (rcv_flags & SCTP_DATA_LAST_FRAG) {
strq->next_mid_unordered++;
}
} else {
- chk->rec.data.stream_seq = strq->next_mid_ordered;
+ chk->rec.data.mid = strq->next_mid_ordered;
if (rcv_flags & SCTP_DATA_LAST_FRAG) {
strq->next_mid_ordered++;
}
}
}
- chk->rec.data.stream_number = sp->stream;
- chk->rec.data.payloadtype = sp->ppid;
+ chk->rec.data.sid = sp->sid;
+ chk->rec.data.ppid = sp->ppid;
chk->rec.data.context = sp->context;
chk->rec.data.doing_fast_retransmit = 0;
@@ -7585,12 +7534,12 @@ dont_do_it:
sctp_auth_key_acquire(stcb, chk->auth_keyid);
chk->holds_key_ref = 1;
}
- chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
+ chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
(uint32_t) (uintptr_t) stcb, sp->length,
- (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
- chk->rec.data.TSN_seq);
+ (uint32_t) ((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
+ chk->rec.data.tsn);
}
if (stcb->asoc.idata_supported == 0) {
dchkh = mtod(chk->data, struct sctp_data_chunk *);
@@ -7608,9 +7557,9 @@ dont_do_it:
asoc->tsn_out_at = 0;
asoc->tsn_out_wrapped = 1;
}
- asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
- asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
- asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
+ asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
+ asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
+ asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
@@ -7621,20 +7570,20 @@ dont_do_it:
if (stcb->asoc.idata_supported == 0) {
dchkh->ch.chunk_type = SCTP_DATA;
dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
- dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
- dchkh->dp.stream_id = htons((strq->stream_no & 0x0000ffff));
- dchkh->dp.stream_sequence = htons((uint16_t) chk->rec.data.stream_seq);
- dchkh->dp.protocol_id = chk->rec.data.payloadtype;
+ dchkh->dp.tsn = htonl(chk->rec.data.tsn);
+ dchkh->dp.sid = htons(strq->sid);
+ dchkh->dp.ssn = htons((uint16_t) chk->rec.data.mid);
+ dchkh->dp.ppid = chk->rec.data.ppid;
dchkh->ch.chunk_length = htons(chk->send_size);
} else {
ndchkh->ch.chunk_type = SCTP_IDATA;
ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
- ndchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
- ndchkh->dp.stream_id = htons(strq->stream_no);
+ ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
+ ndchkh->dp.sid = htons(strq->sid);
ndchkh->dp.reserved = htons(0);
- ndchkh->dp.msg_id = htonl(chk->rec.data.stream_seq);
+ ndchkh->dp.mid = htonl(chk->rec.data.mid);
if (sp->fsn == 0)
- ndchkh->dp.ppid_fsn.protocol_id = chk->rec.data.payloadtype;
+ ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
else
ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
sp->fsn++;
@@ -7957,9 +7906,9 @@ nothing_to_send:
* (when CMT is off) then it calls
* sctp_fill_outqueue for the net. This gets data on
* the send queue for that network.
- *
- * In sctp_fill_outqueue TSN's are assigned and data is
- * copied out of the stream buffers. Note mostly
+ *
+ * In sctp_fill_outqueue TSN's are assigned and data
+ * is copied out of the stream buffers. Note mostly
* copy by reference (we hope).
*/
net->window_probe = 0;
@@ -8228,10 +8177,8 @@ again_one_more_time:
net->port, NULL,
0, 0,
so_locked))) {
- /*
- * error, we could not
- * output
- */
+ /* error, we could not
+ * output */
SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
if (from_where == 0) {
SCTP_STAT_INCR(sctps_lowlevelerrusr);
@@ -8304,16 +8251,12 @@ again_one_more_time:
* to where the sack is going..
*/
if (chk->whoTo == net) {
- /*
- * Don't transmit it to where its
- * going (current net)
- */
+ /* Don't transmit it to where its
+ * going (current net) */
continue;
} else if (sack_goes_to == net) {
- /*
- * But do transmit it to this
- * address
- */
+ /* But do transmit it to this
+ * address */
goto skip_net_check;
}
}
@@ -8506,10 +8449,8 @@ again_one_more_time:
net->port, NULL,
0, 0,
so_locked))) {
- /*
- * error, we could not
- * output
- */
+ /* error, we could not
+ * output */
SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
if (from_where == 0) {
SCTP_STAT_INCR(sctps_lowlevelerrusr);
@@ -8706,17 +8647,13 @@ again_one_more_time:
override_ok = 0;
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
} else if (override_ok) {
- /*
- * use this data's
- * keyid
- */
+ /* use this data's
+ * keyid */
auth_keyid = chk->auth_keyid;
override_ok = 0;
} else if (auth_keyid != chk->auth_keyid) {
- /*
- * different keyid,
- * so done bundling
- */
+ /* different keyid,
+ * so done bundling */
break;
}
}
@@ -8795,8 +8732,7 @@ again_one_more_time:
break;
}
} /* for (chunk gather loop for this net) */
- } /* if asoc.state OPEN */
-no_data_fill:
+} /* if asoc.state OPEN */ no_data_fill:
/* Is there something to send for this destination? */
if (outchain) {
/* We may need to start a control timer or two */
@@ -8883,7 +8819,7 @@ no_data_fill:
}
if (bundle_at) {
/* setup for a RTO measurement */
- tsns_sent = data_list[0]->rec.data.TSN_seq;
+ tsns_sent = data_list[0]->rec.data.tsn;
/* fill time if not already filled */
if (*now_filled == 0) {
(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
@@ -9564,7 +9500,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
}
if (chk->data == NULL) {
SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
- chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
+ chk->rec.data.tsn, chk->snd_count, chk->sent);
continue;
}
if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
@@ -9573,7 +9509,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
char msg[SCTP_DIAG_INFO_LEN];
snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
- chk->rec.data.TSN_seq, chk->snd_count);
+ chk->rec.data.tsn, chk->snd_count);
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
msg);
atomic_add_int(&stcb->asoc.refcnt, 1);
@@ -9607,7 +9543,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
uint32_t tsn;
tsn = asoc->last_acked_seq + 1;
- if (tsn == chk->rec.data.TSN_seq) {
+ if (tsn == chk->rec.data.tsn) {
/*
* we make a special exception for this
* case. The peer has no rwnd but is missing
@@ -9736,10 +9672,8 @@ one_chunk_around:
auth_keyid = fwd->auth_keyid;
override_ok = 0;
} else if (fwd->auth_keyid != auth_keyid) {
- /*
- * different keyid,
- * so done bundling
- */
+ /* different keyid,
+ * so done bundling */
break;
}
}
@@ -9819,7 +9753,7 @@ one_chunk_around:
sctp_audit_log(0xC4, bundle_at);
#endif
if (bundle_at) {
- tsns_sent = data_list[0]->rec.data.TSN_seq;
+ tsns_sent = data_list[0]->rec.data.tsn;
}
for (i = 0; i < bundle_at; i++) {
SCTP_STAT_INCR(sctps_sendretransdata);
@@ -9869,7 +9803,7 @@ one_chunk_around:
data_list[i]->whoTo->flight_size,
data_list[i]->book_size,
(uint32_t) (uintptr_t) data_list[i]->whoTo,
- data_list[i]->rec.data.TSN_seq);
+ data_list[i]->rec.data.tsn);
}
sctp_flight_size_increase(data_list[i]);
sctp_total_flight_increase(stcb, data_list[i]);
@@ -10115,11 +10049,9 @@ do_it_again:
if (asoc->max_burst > 0) {
if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
- /*
- * JRS - Use the congestion
+ /* JRS - Use the congestion
* control given in the
- * congestion control module
- */
+ * congestion control module */
asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
@@ -10129,10 +10061,8 @@ do_it_again:
net->fast_retran_ip = 0;
} else {
if (net->flight_size == 0) {
- /*
- * Should be decaying the
- * cwnd here
- */
+ /* Should be decaying the
+ * cwnd here */
;
}
}
@@ -10268,13 +10198,7 @@ send_forward_tsn(struct sctp_tcb *stcb,
unsigned int cnt_of_space, i, ovh;
unsigned int space_needed;
unsigned int cnt_of_skipped = 0;
- int old;
- if (asoc->idata_supported) {
- old = 0;
- } else {
- old = 1;
- }
SCTP_TCB_LOCK_ASSERT(stcb);
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
@@ -10328,18 +10252,18 @@ sctp_fill_in_rest:
/* no more to look at */
break;
}
- if (old && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
+ if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
/* We don't report these */
continue;
}
cnt_of_skipped++;
}
- if (old) {
+ if (asoc->idata_supported) {
space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
- (cnt_of_skipped * sizeof(struct sctp_strseq)));
+ (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
} else {
space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
- (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
+ (cnt_of_skipped * sizeof(struct sctp_strseq)));
}
cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
@@ -10368,12 +10292,11 @@ sctp_fill_in_rest:
0xff, 0xff, cnt_of_space,
space_needed);
}
- if (old) {
- cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
- cnt_of_skipped /= sizeof(struct sctp_strseq);
- } else {
- cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+ cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+ if (asoc->idata_supported) {
cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
+ } else {
+ cnt_of_skipped /= sizeof(struct sctp_strseq);
}
/*-
* Go through and find the TSN that will be the one
@@ -10391,7 +10314,7 @@ sctp_fill_in_rest:
}
if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
sctp_misc_ints(SCTP_FWD_TSN_CHECK,
- 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
+ 0xff, cnt_of_skipped, at->rec.data.tsn,
asoc->advanced_peer_ack_point);
}
last = at;
@@ -10400,14 +10323,14 @@ sctp_fill_in_rest:
* peer ack point
*/
if (last) {
- advance_peer_ack_point = last->rec.data.TSN_seq;
+ advance_peer_ack_point = last->rec.data.tsn;
}
- if (old) {
+ if (asoc->idata_supported) {
space_needed = sizeof(struct sctp_forward_tsn_chunk) +
- cnt_of_skipped * sizeof(struct sctp_strseq);
+ cnt_of_skipped * sizeof(struct sctp_strseq_mid);
} else {
space_needed = sizeof(struct sctp_forward_tsn_chunk) +
- cnt_of_skipped * sizeof(struct sctp_strseq_mid);
+ cnt_of_skipped * sizeof(struct sctp_strseq);
}
}
chk->send_size = space_needed;
@@ -10415,10 +10338,10 @@ sctp_fill_in_rest:
fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
fwdtsn->ch.chunk_length = htons(chk->send_size);
fwdtsn->ch.chunk_flags = 0;
- if (old) {
- fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
- } else {
+ if (asoc->idata_supported) {
fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
+ } else {
+ fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
}
fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
SCTP_BUF_LEN(chk->data) = chk->send_size;
@@ -10427,10 +10350,12 @@ sctp_fill_in_rest:
* Move pointer to after the fwdtsn and transfer to the
* strseq pointer.
*/
- if (old) {
- strseq = (struct sctp_strseq *)fwdtsn;
- } else {
+ if (asoc->idata_supported) {
strseq_m = (struct sctp_strseq_mid *)fwdtsn;
+ strseq = NULL;
+ } else {
+ strseq = (struct sctp_strseq *)fwdtsn;
+ strseq_m = NULL;
}
/*-
* Now populate the strseq list. This is done blindly
@@ -10449,26 +10374,26 @@ sctp_fill_in_rest:
if (i >= cnt_of_skipped) {
break;
}
- if (old && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
+ if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
/* We don't report these */
continue;
}
- if (at->rec.data.TSN_seq == advance_peer_ack_point) {
+ if (at->rec.data.tsn == advance_peer_ack_point) {
at->rec.data.fwd_tsn_cnt = 0;
}
- if (old) {
- strseq->stream = htons(at->rec.data.stream_number);
- strseq->sequence = htons((uint16_t) at->rec.data.stream_seq);
- strseq++;
- } else {
- strseq_m->stream = htons(at->rec.data.stream_number);
+ if (asoc->idata_supported) {
+ strseq_m->sid = htons(at->rec.data.sid);
if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
} else {
strseq_m->flags = 0;
}
- strseq_m->msg_id = htonl(at->rec.data.stream_seq);
+ strseq_m->mid = htonl(at->rec.data.mid);
strseq_m++;
+ } else {
+ strseq->sid = htons(at->rec.data.sid);
+ strseq->ssn = htons((uint16_t) at->rec.data.mid);
+ strseq++;
}
i++;
}
@@ -11019,23 +10944,18 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
struct mbuf *mout;
struct sctphdr *shout;
struct sctp_chunkhdr *ch;
-
#if defined(INET) || defined(INET6)
struct udphdr *udp;
int ret;
-
#endif
int len, cause_len, padding_len;
-
#ifdef INET
struct sockaddr_in *src_sin, *dst_sin;
struct ip *ip;
-
#endif
#ifdef INET6
struct sockaddr_in6 *src_sin6, *dst_sin6;
struct ip6_hdr *ip6;
-
#endif
/* Compute the length of the cause and add final padding. */
@@ -11624,10 +11544,8 @@ sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, u
asoc = &stcb->asoc;
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
- /*
- * found a previous CWR queued to same destination
- * update it if needed
- */
+ /* found a previous CWR queued to same destination
+ * update it if needed */
uint32_t ctsn;
cwr = mtod(chk->data, struct sctp_cwr_chunk *);
@@ -12171,10 +12089,8 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
struct sctp_stream_out *oldstream;
struct sctp_stream_queue_pending *sp, *nsp;
int i;
-
#if defined(SCTP_DETAILED_STR_STATS)
int j;
-
#endif
oldstream = stcb->asoc.strmout;
@@ -12203,13 +12119,11 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
- stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].sid = i;
stcb->asoc.strmout[i].state = oldstream[i].state;
/* FIX ME FIX ME */
- /*
- * This should be a SS_COPY operation FIX ME STREAM
- * SCHEDULER EXPERT
- */
+ /* This should be a SS_COPY operation FIX ME STREAM
+ * SCHEDULER EXPERT */
stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
/* now anything on those queues? */
TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
@@ -12234,7 +12148,7 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
#endif
stcb->asoc.strmout[i].next_mid_ordered = 0;
stcb->asoc.strmout[i].next_mid_unordered = 0;
- stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].sid = i;
stcb->asoc.strmout[i].last_msg_incomplete = 0;
stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
@@ -12393,7 +12307,7 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
sp->fsn = 0;
(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
- sp->stream = srcv->sinfo_stream;
+ sp->sid = srcv->sinfo_stream;
sp->length = (uint32_t) min(uio->uio_resid, max_send_len);
if ((sp->length == (uint32_t) uio->uio_resid) &&
((user_marks_eor == 0) ||
@@ -12453,10 +12367,8 @@ sctp_sosend(struct socket *so,
int error, use_sndinfo = 0;
struct sctp_sndrcvinfo sndrcvninfo;
struct sockaddr *addr_to_use;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin;
-
#endif
if (control) {
@@ -12741,10 +12653,8 @@ sctp_lower_sosend(struct socket *so,
}
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
- /*
- * Set the connected flag so we can queue
- * data
- */
+ /* Set the connected flag so we can queue
+ * data */
soisconnecting(so);
}
hold_tcblock = 1;
@@ -12754,10 +12664,8 @@ sctp_lower_sosend(struct socket *so,
} else {
SCTP_PRINTF("Huh-3? create lock should have been on??\n");
}
- /*
- * Turn on queue only flag to prevent data from
- * being sent
- */
+ /* Turn on queue only flag to prevent data from
+ * being sent */
queue_only = 1;
asoc = &stcb->asoc;
SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
@@ -13252,10 +13160,8 @@ skip_preblock:
}
/* PR-SCTP? */
if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
- /*
- * This is ugly but we must assure locking
- * order
- */
+ /* This is ugly but we must assure locking
+ * order */
if (hold_tcblock == 0) {
SCTP_TCB_LOCK(stcb);
hold_tcblock = 1;
@@ -13532,10 +13438,8 @@ dataless_eof:
msg);
sctp_abort_an_association(stcb->sctp_ep, stcb,
op_err, SCTP_SO_LOCKED);
- /*
- * now relock the stcb so everything
- * is sane
- */
+ /* now relock the stcb so everything
+ * is sane */
hold_tcblock = 0;
stcb = NULL;
goto out;
@@ -13609,10 +13513,8 @@ skip_out_eof:
if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
/* we can attempt to send too. */
if (hold_tcblock == 0) {
- /*
- * If there is activity recv'ing sacks no need to
- * send
- */
+ /* If there is activity recv'ing sacks no need to
+ * send */
if (SCTP_TCB_TRYLOCK(stcb)) {
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
hold_tcblock = 1;
@@ -13751,6 +13653,7 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
return (0);
/* get prefix entry of address */
+ ND6_RLOCK();
LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
if (pfx->ndpr_stateflags & NDPRF_DETACHED)
continue;
@@ -13760,6 +13663,7 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
}
/* no prefix entry in the prefix list */
if (pfx == NULL) {
+ ND6_RUNLOCK();
SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
return (0);
@@ -13778,16 +13682,16 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
- if (sctp_cmpaddr((struct sockaddr *)&gw6,
- ro->ro_rt->rt_gateway)) {
+ if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
+ ND6_RUNLOCK();
SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
return (1);
}
}
+ ND6_RUNLOCK();
SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
return (0);
}
-
#endif
int
diff --git a/freebsd/sys/netinet/sctp_pcb.c b/freebsd/sys/netinet/sctp_pcb.c
index 62ef1e3d..ca86a139 100644
--- a/freebsd/sys/netinet/sctp_pcb.c
+++ b/freebsd/sys/netinet/sctp_pcb.c
@@ -79,7 +79,6 @@ SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
}
return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr));
}
-
#endif
void
@@ -839,14 +838,11 @@ static int
sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to)
{
int loopback_scope;
-
#if defined(INET)
int ipv4_local_scope, ipv4_addr_legal;
-
#endif
#if defined(INET6)
int local_scope, site_scope, ipv6_addr_legal;
-
#endif
struct sctp_vrf *vrf;
struct sctp_ifn *sctp_ifn;
@@ -1218,10 +1214,8 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
if (netp != NULL) {
*netp = net;
}
- /*
- * Update the endpoint
- * pointer
- */
+ /* Update the endpoint
+ * pointer */
*inp_p = inp;
SCTP_INP_RUNLOCK(inp);
return (stcb);
@@ -1242,10 +1236,8 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
if (netp != NULL) {
*netp = net;
}
- /*
- * Update the endpoint
- * pointer
- */
+ /* Update the endpoint
+ * pointer */
*inp_p = inp;
SCTP_INP_RUNLOCK(inp);
return (stcb);
@@ -1624,15 +1616,12 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
{
struct sctp_inpcb *inp;
struct sctp_laddr *laddr;
-
#ifdef INET
struct sockaddr_in *sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
struct sockaddr_in6 *intf_addr6;
-
#endif
int fnd;
@@ -1675,10 +1664,8 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
case AF_INET:
if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
SCTP_IPV6_V6ONLY(inp)) {
- /*
- * IPv4 on a IPv6 socket with ONLY
- * IPv6 set
- */
+ /* IPv4 on a IPv6 socket with ONLY
+ * IPv6 set */
SCTP_INP_RUNLOCK(inp);
continue;
}
@@ -1691,10 +1678,8 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
#endif
#ifdef INET6
case AF_INET6:
- /*
- * A V6 address and the endpoint is NOT
- * bound V6
- */
+ /* A V6 address and the endpoint is NOT
+ * bound V6 */
if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
SCTP_INP_RUNLOCK(inp);
continue;
@@ -1928,14 +1913,11 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
struct sctppcbhead *head;
int lport;
unsigned int i;
-
#ifdef INET
struct sockaddr_in *sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
-
#endif
switch (nam->sa_family) {
@@ -2059,21 +2041,16 @@ sctp_findassociation_special_addr(struct mbuf *m, int offset,
struct sockaddr *dst)
{
struct sctp_paramhdr *phdr, parm_buf;
-
#if defined(INET) || defined(INET6)
struct sctp_tcb *stcb;
uint16_t ptype;
-
#endif
uint16_t plen;
-
#ifdef INET
struct sockaddr_in sin4;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6;
-
#endif
#ifdef INET
@@ -2200,10 +2177,8 @@ sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag
continue;
}
if (remote_tag) {
- /*
- * If we have both vtags that's all we match
- * on
- */
+ /* If we have both vtags that's all we match
+ * on */
if (stcb->asoc.peer_vtag == remote_tag) {
/*
* If both tags match we consider it
@@ -2321,14 +2296,11 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int offset,
struct sctp_paramhdr parm_buf, *phdr;
int ptype;
int zero_address = 0;
-
#ifdef INET
struct sockaddr_in *sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
-
#endif
memset(&remote_store, 0, sizeof(remote_store));
@@ -2899,10 +2871,8 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
#ifdef INET6
case AF_INET6:
{
- /*
- * Only for pure IPv6 Address. (No IPv4
- * Mapped!)
- */
+ /* Only for pure IPv6 Address. (No IPv4
+ * Mapped!) */
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)addr;
@@ -2979,10 +2949,8 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
/* unlock info */
if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
(sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
- /*
- * Ok, must be one-2-one and
- * allowing port re-use
- */
+ /* Ok, must be one-2-one and
+ * allowing port re-use */
port_reuse_active = 1;
goto continue_anyway;
}
@@ -3005,10 +2973,8 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
/* unlock info */
if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
(sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
- /*
- * Ok, must be one-2-one and
- * allowing port re-use
- */
+ /* Ok, must be one-2-one and
+ * allowing port re-use */
port_reuse_active = 1;
goto continue_anyway;
}
@@ -3148,11 +3114,11 @@ continue_anyway:
* bind specific, make sure flags is off and add a new
* address structure to the sctp_addr_list inside the ep
* structure.
- *
- * We will need to allocate one and insert it at the head. The
- * socketopt call can just insert new addresses in there as
- * well. It will also have to do the embed scope kame hack
- * too (before adding).
+ *
+ * We will need to allocate one and insert it at the head.
+ * The socketopt call can just insert new addresses in there
+ * as well. It will also have to do the embed scope kame
+ * hack too (before adding).
*/
struct sctp_ifa *ifa;
union sctp_sockstore store;
@@ -3270,11 +3236,11 @@ sctp_iterator_inp_being_freed(struct sctp_inpcb *inp)
* from happening. But of course the iterator has a
* reference on the stcb and inp. We can mark it and it will
* stop.
- *
- * If its a single iterator situation, we set the end iterator
- * flag. Otherwise we set the iterator to go to the next
- * inp.
- *
+ *
+ * If its a single iterator situation, we set the end
+ * iterator flag. Otherwise we set the iterator to go to the
+ * next inp.
+ *
*/
if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
@@ -3307,10 +3273,8 @@ sctp_iterator_inp_being_freed(struct sctp_inpcb *inp)
SCTP_INP_INCR_REF(it->inp);
}
}
- /*
- * When its put in the refcnt is incremented so decr
- * it
- */
+ /* When its put in the refcnt is incremented so decr
+ * it */
SCTP_INP_DECR_REF(inp);
}
}
@@ -3995,10 +3959,8 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt);
if (rmtu == 0) {
- /*
- * Start things off to match mtu of
- * interface please.
- */
+ /* Start things off to match mtu of
+ * interface please. */
SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa,
net->ro.ro_rt, net->mtu);
} else {
@@ -4820,7 +4782,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
uint32_t strseq;
stcb->asoc.control_pdapi = sq;
- strseq = (sq->sinfo_stream << 16) | sq->sinfo_ssn;
+ strseq = (sq->sinfo_stream << 16) | (sq->mid & 0x0000ffff);
sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
stcb,
SCTP_PARTIAL_DELIVERY_ABORTED,
@@ -4912,6 +4874,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
SS_ISCONNECTED);
}
socantrcvmore_locked(so);
+ socantsendmore(so);
sctp_sowwakeup(inp, so);
sctp_sorwakeup(inp, so);
SCTP_SOWAKEUP(so);
@@ -5040,11 +5003,11 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
/* pending send queue SHOULD be empty */
TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
- if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
#endif
}
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
@@ -5072,11 +5035,11 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
/* sent queue SHOULD be empty */
TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
- if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
#endif
}
}
@@ -5488,10 +5451,8 @@ sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
if (stcb->asoc.last_used_address == laddr)
/* delete this address */
stcb->asoc.last_used_address = NULL;
- /*
- * Now spin through all the nets and purge any ref
- * to laddr
- */
+ /* Now spin through all the nets and purge any ref
+ * to laddr */
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
if (net->ro._s_addr == laddr->ifa) {
/* Yep, purge src address selected */
@@ -5759,7 +5720,6 @@ sctp_startup_mcore_threads(void)
}
}
-
#endif
void
@@ -6101,14 +6061,11 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
uint8_t peer_supports_nrsack;
uint8_t peer_supports_pktdrop;
uint8_t peer_supports_idata;
-
#ifdef INET
struct sockaddr_in sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6;
-
#endif
/* First get the destination address setup too. */
@@ -6271,10 +6228,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
struct mbuf *op_err;
char msg[SCTP_DIAG_INFO_LEN];
- /*
- * in setup state we
- * abort this guy
- */
+ /* in setup state we
+ * abort this guy */
snprintf(msg, sizeof(msg),
"%s:%d at %s", __FILE__, __LINE__, __func__);
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
@@ -6316,10 +6271,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
goto next_param;
}
if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
- /*
- * Link local make no sense without
- * scope
- */
+ /* Link local make no sense without
+ * scope */
goto next_param;
}
sa = (struct sockaddr *)&sin6;
@@ -6370,10 +6323,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
struct mbuf *op_err;
char msg[SCTP_DIAG_INFO_LEN];
- /*
- * in setup state we
- * abort this guy
- */
+ /* in setup state we
+ * abort this guy */
snprintf(msg, sizeof(msg),
"%s:%d at %s", __FILE__, __LINE__, __func__);
op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
@@ -6413,10 +6364,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
struct sctp_asconf_addr_param lstore, *fee;
int lptype;
struct sockaddr *lsa = NULL;
-
#ifdef INET
struct sctp_asconf_addrv4_param *fii;
-
#endif
if (stcb->asoc.asconf_supported == 0) {
@@ -6896,7 +6845,7 @@ sctp_drain_mbufs(struct sctp_tcb *stcb)
/* Now its reasm? */
TAILQ_FOREACH_SAFE(chk, &ctl->reasm, sctp_next, nchk) {
cnt++;
- SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.TSN_seq, asoc->mapping_array_base_tsn);
+ SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn);
asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
@@ -6938,7 +6887,7 @@ sctp_drain_mbufs(struct sctp_tcb *stcb)
/* Now its reasm? */
TAILQ_FOREACH_SAFE(chk, &ctl->reasm, sctp_next, nchk) {
cnt++;
- SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.TSN_seq, asoc->mapping_array_base_tsn);
+ SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn);
asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
diff --git a/freebsd/sys/netinet/sctp_pcb.h b/freebsd/sys/netinet/sctp_pcb.h
index 98204096..a2a1e7d7 100644
--- a/freebsd/sys/netinet/sctp_pcb.h
+++ b/freebsd/sys/netinet/sctp_pcb.h
@@ -353,7 +353,6 @@ struct sctp_pcbtsn_rlog {
uint16_t sz;
uint16_t flgs;
};
-
#define SCTP_READ_LOG_SIZE 135 /* we choose the number to make a pcb a page */
@@ -380,10 +379,8 @@ struct sctp_inpcb {
/* list of addrs in use by the EP, NULL if bound-all */
struct sctpladdr sctp_addr_list;
- /*
- * used for source address selection rotation when we are subset
- * bound
- */
+ /* used for source address selection rotation when we are subset
+ * bound */
struct sctp_laddr *next_addr_touse;
/* back pointer to our socket */
@@ -490,7 +487,6 @@ VNET_DECLARE(struct sctp_base_info, system_base_info);
#ifdef INET6
int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b);
-
#endif
void sctp_fill_pcbinfo(struct sctp_pcbinfo *);
@@ -646,7 +642,6 @@ sctp_initiate_iterator(inp_func inpf,
end_func ef,
struct sctp_inpcb *,
uint8_t co_off);
-
#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
void
sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use);
diff --git a/freebsd/sys/netinet/sctp_peeloff.h b/freebsd/sys/netinet/sctp_peeloff.h
index dd905676..00b87031 100644
--- a/freebsd/sys/netinet/sctp_peeloff.h
+++ b/freebsd/sys/netinet/sctp_peeloff.h
@@ -38,6 +38,5 @@ __FBSDID("$FreeBSD$");
#if defined(_KERNEL)
int sctp_can_peel_off(struct socket *, sctp_assoc_t);
int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
-
#endif /* _KERNEL */
#endif /* _NETINET_SCTP_PEELOFF_H_ */
diff --git a/freebsd/sys/netinet/sctp_structs.h b/freebsd/sys/netinet/sctp_structs.h
index 280100bb..22a3c736 100644
--- a/freebsd/sys/netinet/sctp_structs.h
+++ b/freebsd/sys/netinet/sctp_structs.h
@@ -183,7 +183,6 @@ struct iterator_control {
uint32_t iterator_running;
uint32_t iterator_flags;
};
-
#define SCTP_ITERATOR_STOP_CUR_IT 0x00000004
#define SCTP_ITERATOR_STOP_CUR_INP 0x00000008
@@ -389,10 +388,10 @@ struct sctp_nets {
struct sctp_data_chunkrec {
- uint32_t TSN_seq; /* the TSN of this transmit */
- uint32_t stream_seq; /* the stream sequence number of this transmit */
- uint16_t stream_number; /* the stream number of this guy */
- uint32_t payloadtype;
+ uint32_t tsn; /* the TSN of this transmit */
+ uint32_t mid; /* the message identifier of this transmit */
+ uint16_t sid; /* the stream number of this guy */
+ uint32_t ppid;
uint32_t context; /* from send */
uint32_t cwnd_at_send;
/*
@@ -401,7 +400,7 @@ struct sctp_data_chunkrec {
*/
uint32_t fast_retran_tsn; /* sending_seq at the time of FR */
struct timeval timetodrop; /* time we drop it from queue */
- uint32_t fsn_num; /* Fragment Sequence Number */
+ uint32_t fsn; /* Fragment Sequence Number */
uint8_t doing_fast_retransmit;
uint8_t rcv_flags; /* flags pulled from data chunk on inbound for
* outbound holds sending flags for PR-SCTP. */
@@ -455,7 +454,6 @@ struct sctp_tmit_chunk {
struct sctp_queued_to_read { /* sinfo structure Pluse more */
uint16_t sinfo_stream; /* off the wire */
- uint32_t sinfo_ssn; /* off the wire */
uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for
* EOR */
uint32_t sinfo_ppid; /* off the wire */
@@ -465,7 +463,7 @@ struct sctp_queued_to_read { /* sinfo structure Pluse more */
uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */
sctp_assoc_t sinfo_assoc_id; /* our assoc id */
/* Non sinfo stuff */
- uint32_t msg_id; /* Fragment Index */
+ uint32_t mid; /* Fragment Index */
uint32_t length; /* length of data */
uint32_t held_length; /* length held in sb */
uint32_t top_fsn; /* Highest FSN in queue */
@@ -527,7 +525,7 @@ struct sctp_stream_queue_pending {
uint32_t ppid;
uint32_t context;
uint16_t sinfo_flags;
- uint16_t stream;
+ uint16_t sid;
uint16_t act_flags;
uint16_t auth_keyid;
uint8_t holds_key_ref;
@@ -546,8 +544,8 @@ TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
struct sctp_stream_in {
struct sctp_readhead inqueue;
struct sctp_readhead uno_inqueue;
- uint32_t last_sequence_delivered; /* used for re-order */
- uint16_t stream_no;
+ uint32_t last_mid_delivered; /* used for re-order */
+ uint16_t sid;
uint8_t delivery_started;
uint8_t pd_api_started;
};
@@ -630,7 +628,7 @@ struct sctp_stream_out {
*/
uint32_t next_mid_ordered;
uint32_t next_mid_unordered;
- uint16_t stream_no;
+ uint16_t sid;
uint8_t last_msg_incomplete;
uint8_t state;
};
@@ -883,10 +881,8 @@ struct sctp_association {
/* JRS - the congestion control functions are in this struct */
struct sctp_cc_functions cc_functions;
- /*
- * JRS - value to store the currently loaded congestion control
- * module
- */
+ /* JRS - value to store the currently loaded congestion control
+ * module */
uint32_t congestion_control_module;
/* RS - the stream scheduling functions are in this struct */
struct sctp_ss_functions ss_functions;
diff --git a/freebsd/sys/netinet/sctp_sysctl.c b/freebsd/sys/netinet/sctp_sysctl.c
index 8715c69b..152a2996 100644
--- a/freebsd/sys/netinet/sctp_sysctl.c
+++ b/freebsd/sys/netinet/sctp_sysctl.c
@@ -645,12 +645,10 @@ static int
sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS)
{
int error;
-
#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
struct sctpstat *sarry;
struct sctpstat sb;
int cpu;
-
#endif
struct sctpstat sb_temp;
@@ -830,7 +828,6 @@ sctp_sysctl_handle_trace_log_clear(SYSCTL_HANDLER_ARGS)
memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
return (error);
}
-
#endif
#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix) \
diff --git a/freebsd/sys/netinet/sctp_timer.c b/freebsd/sys/netinet/sctp_timer.c
index c851317b..2d427e48 100644
--- a/freebsd/sys/netinet/sctp_timer.c
+++ b/freebsd/sys/netinet/sctp_timer.c
@@ -195,10 +195,8 @@ sctp_find_alternate_net(struct sctp_tcb *stcb,
*/
if (mode == 2) {
TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
- /*
- * JRS 5/14/07 - If the destination is unreachable
- * or unconfirmed, skip it.
- */
+ /* JRS 5/14/07 - If the destination is unreachable
+ * or unconfirmed, skip it. */
if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
(mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
continue;
@@ -288,11 +286,8 @@ sctp_find_alternate_net(struct sctp_tcb *stcb,
} else {
return (max_cwnd_net);
}
- }
- /*
- * JRS 5/14/07 - If mode is set to 1, use the CMT policy for
- * choosing an alternate net.
- */
+ } /* JRS 5/14/07 - If mode is set to 1, use the
+ * CMT policy for choosing an alternate net. */
else if (mode == 1) {
TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
@@ -436,17 +431,17 @@ sctp_recover_sent_list(struct sctp_tcb *stcb)
asoc = &stcb->asoc;
TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
- if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.tsn)) {
SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
- (void *)chk, chk->rec.data.TSN_seq, asoc->last_acked_seq);
+ (void *)chk, chk->rec.data.tsn, asoc->last_acked_seq);
if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
- if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
}
}
- if ((asoc->strmout[chk->rec.data.stream_number].chunks_on_queues == 0) &&
- (asoc->strmout[chk->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
- TAILQ_EMPTY(&asoc->strmout[chk->rec.data.stream_number].outqueue)) {
+ if ((asoc->strmout[chk->rec.data.sid].chunks_on_queues == 0) &&
+ (asoc->strmout[chk->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+ TAILQ_EMPTY(&asoc->strmout[chk->rec.data.sid].outqueue)) {
asoc->trigger_reset = 1;
}
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
@@ -469,10 +464,9 @@ sctp_recover_sent_list(struct sctp_tcb *stcb)
}
SCTP_PRINTF("after recover order is as follows\n");
TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
- SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.TSN_seq);
+ SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.tsn);
}
}
-
#endif
static int
@@ -554,10 +548,10 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
start_again:
#endif
TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) {
- if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) {
+ if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.tsn)) {
/* Strange case our list got out of order? */
SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n",
- (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq);
+ (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.tsn);
recovery_cnt++;
#ifdef INVARIANTS
panic("last acked >= chk on sent-Q");
@@ -582,7 +576,7 @@ start_again:
/* validate its been outstanding long enough */
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
- sctp_log_fr(chk->rec.data.TSN_seq,
+ sctp_log_fr(chk->rec.data.tsn,
chk->sent_rcv_time.tv_sec,
chk->sent_rcv_time.tv_usec,
SCTP_FR_T3_MARK_TIME);
@@ -646,11 +640,11 @@ start_again:
num_mk++;
if (fir == 0) {
fir = 1;
- tsnfirst = chk->rec.data.TSN_seq;
+ tsnfirst = chk->rec.data.tsn;
}
- tsnlast = chk->rec.data.TSN_seq;
+ tsnlast = chk->rec.data.tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
- sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
+ sctp_log_fr(chk->rec.data.tsn, chk->snd_count,
0, SCTP_FR_T3_MARKED);
}
if (chk->rec.data.chunk_was_revoked) {
@@ -665,7 +659,7 @@ start_again:
chk->whoTo->flight_size,
chk->book_size,
(uint32_t) (uintptr_t) chk->whoTo,
- chk->rec.data.TSN_seq);
+ chk->rec.data.tsn);
}
sctp_flight_size_decrease(chk);
sctp_total_flight_decrease(stcb, chk);
@@ -695,7 +689,7 @@ start_again:
if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
} else {
- chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
+ chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
}
}
/*
@@ -793,7 +787,7 @@ start_again:
chk->whoTo->flight_size,
chk->book_size,
(uint32_t) (uintptr_t) chk->whoTo,
- chk->rec.data.TSN_seq);
+ chk->rec.data.tsn);
}
sctp_flight_size_increase(chk);
sctp_total_flight_increase(stcb, chk);
diff --git a/freebsd/sys/netinet/sctp_uio.h b/freebsd/sys/netinet/sctp_uio.h
index e65b7b5e..d6055163 100644
--- a/freebsd/sys/netinet/sctp_uio.h
+++ b/freebsd/sys/netinet/sctp_uio.h
@@ -147,7 +147,6 @@ struct sctp_extrcvinfo {
uint16_t sinfo_keynumber_valid;
uint8_t __reserve_pad[SCTP_ALIGN_RESV_PAD_SHORT];
};
-
#define sinfo_pr_value sinfo_timetolive
#define sreinfo_next_flags serinfo_next_flags
#define sreinfo_next_stream serinfo_next_stream
@@ -573,7 +572,6 @@ struct sctp_paddrparams {
uint16_t spp_pathmaxrxt;
uint8_t spp_dscp;
};
-
#define spp_ipv4_tos spp_dscp
#define SPP_HB_ENABLE 0x00000001
@@ -1284,7 +1282,6 @@ sctp_sorecvmsg(struct socket *so,
int *msg_flags,
struct sctp_sndrcvinfo *sinfo,
int filling_sinfo);
-
#endif
/*
diff --git a/freebsd/sys/netinet/sctp_usrreq.c b/freebsd/sys/netinet/sctp_usrreq.c
index 1cbb7076..e3e398d4 100644
--- a/freebsd/sys/netinet/sctp_usrreq.c
+++ b/freebsd/sys/netinet/sctp_usrreq.c
@@ -137,7 +137,7 @@ sctp_pathmtu_adjustment(struct sctp_tcb *stcb, uint16_t nxtsz)
chk->whoTo->flight_size,
chk->book_size,
(uint32_t) (uintptr_t) chk->whoTo,
- chk->rec.data.TSN_seq);
+ chk->rec.data.tsn);
}
/* Clear any time so NO RTT is being done */
chk->do_rtt = 0;
@@ -158,7 +158,6 @@ sctp_notify(struct sctp_inpcb *inp,
{
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
int timer_stopped;
@@ -346,7 +345,6 @@ sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
}
return;
}
-
#endif
static int
@@ -979,10 +977,8 @@ sctp_shutdown(struct socket *so)
}
}
sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, netp);
- /*
- * XXX: Why do this in the case where we have still data
- * queued?
- */
+ /* XXX: Why do this in the case where we have still data
+ * queued? */
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_RUNLOCK(inp);
@@ -1023,14 +1019,11 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
struct sctp_ifa *sctp_ifa;
size_t actual;
int loopback_scope;
-
#if defined(INET)
int ipv4_local_scope, ipv4_addr_legal;
-
#endif
#if defined(INET6)
int local_scope, site_scope, ipv6_addr_legal;
-
#endif
struct sctp_vrf *vrf;
@@ -1176,19 +1169,14 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
if (sin6->sin6_scope_id == 0) {
if (sa6_recoverscope(sin6) != 0)
/*
- *
+ *
* bad
- *
- * li
- * nk
- *
- * loc
- * al
- *
- * add
- * re
- * ss
- * */
+ * link
+ *
+ * local
+ *
+ * address
+ */
continue;
}
}
@@ -1523,19 +1511,19 @@ out_now:
stcb = LIST_FIRST(&inp->sctp_asoc_list); \
if (stcb) { \
SCTP_TCB_LOCK(stcb); \
- } \
+ } \
SCTP_INP_RUNLOCK(inp); \
} else if (assoc_id > SCTP_ALL_ASSOC) { \
stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
if (stcb == NULL) { \
- SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
error = ENOENT; \
break; \
} \
} else { \
stcb = NULL; \
- } \
- }
+ } \
+}
#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
@@ -1546,7 +1534,7 @@ out_now:
} else { \
destp = (type *)srcp; \
} \
- }
+}
static int
sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
@@ -1825,10 +1813,8 @@ flags_out:
}
SCTP_TCB_UNLOCK(stcb);
} else {
- /*
- * Can't get stream value without
- * association
- */
+ /* Can't get stream value without
+ * association */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
}
@@ -2328,10 +2314,8 @@ flags_out:
struct sctp_paddrparams *paddrp;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
@@ -2531,10 +2515,8 @@ flags_out:
struct sctp_paddrinfo *paddri;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
@@ -3200,10 +3182,8 @@ flags_out:
struct sctp_paddrthlds *thlds;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, *optsize);
@@ -3314,10 +3294,8 @@ flags_out:
struct sctp_udpencaps *encaps;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, *optsize);
@@ -3911,12 +3889,10 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
(sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS))) {
inp->idata_supported = 1;
} else {
- /*
- * Must have Frag
+ /* Must have Frag
* interleave and
* stream interleave
- * on
- */
+ * on */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
}
@@ -4127,10 +4103,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
SCTP_INP_RUNLOCK(inp);
} else {
- /*
- * Can't set stream value without
- * association
- */
+ /* Can't set stream value without
+ * association */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
}
@@ -4360,10 +4334,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
SCTP_TCB_LOCK(stcb);
shared_keys = &stcb->asoc.shared_keys;
- /*
- * clear the cached keys for
- * this key id
- */
+ /* clear the cached keys for
+ * this key id */
sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
/*
* create the new shared key
@@ -4762,10 +4734,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
int cnt;
addstream |= 2;
- /*
- * We allocate inside
- * sctp_send_str_reset_req()
- */
+ /* We allocate inside
+ * sctp_send_str_reset_req() */
add_i_strmcnt = stradd->sas_instrms;
cnt = add_i_strmcnt;
cnt += stcb->asoc.streamincnt;
@@ -4813,10 +4783,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
SCTP_TCB_UNLOCK(stcb);
break;
}
- /*
- * Is there any data pending in the send or sent
- * queues?
- */
+ /* Is there any data pending in the send or sent
+ * queues? */
if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
busy_out:
@@ -4962,10 +4930,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
(av->assoc_id == SCTP_FUTURE_ASSOC)) {
SCTP_INP_WLOCK(inp);
- /*
- * FIXME MT: I think this is not in
- * tune with the API ID
- */
+ /* FIXME MT: I think this is not in
+ * tune with the API ID */
if (av->assoc_value) {
inp->sctp_frag_point = (av->assoc_value + ovh);
} else {
@@ -5108,10 +5074,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
SCTP_TCB_UNLOCK(stcb);
}
- /*
- * Send up the sender dry event only for 1-to-1
- * style sockets.
- */
+ /* Send up the sender dry event only for 1-to-1
+ * style sockets. */
if (events->sctp_sender_dry_event) {
if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
@@ -5197,10 +5161,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
struct sctp_paddrparams *paddrp;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
@@ -5701,10 +5663,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
struct sctp_setprim *spa;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
@@ -5793,10 +5753,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
{
struct sctp_setpeerprim *sspp;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
@@ -5828,10 +5786,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
goto out_of_it;
}
if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
- /*
- * Must validate the ifa found is in
- * our ep
- */
+ /* Must validate the ifa found is in
+ * our ep */
struct sctp_laddr *laddr;
int found = 0;
@@ -6246,10 +6202,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
struct sctp_paddrthlds *thlds;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, optsize);
@@ -6417,10 +6371,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
struct sctp_udpencaps *encaps;
struct sctp_nets *net;
struct sockaddr *addr;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin_store;
-
#endif
SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, optsize);
@@ -6591,10 +6543,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
(av->assoc_id == SCTP_FUTURE_ASSOC)) {
if ((av->assoc_value == 0) &&
(inp->asconf_supported == 1)) {
- /*
- * AUTH is required for
- * ASCONF
- */
+ /* AUTH is required for
+ * ASCONF */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
} else {
@@ -6630,10 +6580,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
(av->assoc_id == SCTP_FUTURE_ASSOC)) {
if ((av->assoc_value != 0) &&
(inp->auth_supported == 0)) {
- /*
- * AUTH is required for
- * ASCONF
- */
+ /* AUTH is required for
+ * ASCONF */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
} else {
@@ -7012,7 +6960,6 @@ out_now:
SCTP_INP_DECR_REF(inp);
return (error);
}
-
#endif
int
@@ -7066,10 +7013,8 @@ sctp_listen(struct socket *so, int backlog, struct thread *p)
((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
(tinp->sctp_socket->so_qlimit)) {
- /*
- * we have a listener already and
- * its not this inp.
- */
+ /* we have a listener already and
+ * its not this inp. */
SCTP_INP_DECR_REF(tinp);
return (EADDRINUSE);
} else if (tinp) {
@@ -7110,10 +7055,8 @@ sctp_listen(struct socket *so, int backlog, struct thread *p)
((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
(tinp->sctp_socket->so_qlimit)) {
- /*
- * we have a listener already and its not
- * this inp.
- */
+ /* we have a listener already and its not
+ * this inp. */
SCTP_INP_DECR_REF(tinp);
return (EADDRINUSE);
} else if (tinp) {
@@ -7186,10 +7129,8 @@ sctp_accept(struct socket *so, struct sockaddr **addr)
struct sctp_tcb *stcb;
struct sctp_inpcb *inp;
union sctp_sockstore store;
-
#ifdef INET6
int error;
-
#endif
inp = (struct sctp_inpcb *)so->so_pcb;
@@ -7472,5 +7413,4 @@ struct pr_usrreqs sctp_usrreqs = {
.pru_sosend = sctp_sosend,
.pru_soreceive = sctp_soreceive
};
-
#endif
diff --git a/freebsd/sys/netinet/sctp_var.h b/freebsd/sys/netinet/sctp_var.h
index a4d2b998..6365dfec 100644
--- a/freebsd/sys/netinet/sctp_var.h
+++ b/freebsd/sys/netinet/sctp_var.h
@@ -267,7 +267,7 @@ extern struct pr_usrreqs sctp_usrreqs;
if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
stcb->asoc.fs_index = 0;\
stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
- stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.TSN_seq; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
@@ -288,7 +288,7 @@ extern struct pr_usrreqs sctp_usrreqs;
if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
stcb->asoc.fs_index = 0;\
stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
- stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.TSN_seq; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \
@@ -332,11 +332,9 @@ void sctp_close(struct socket *so);
int sctp_disconnect(struct socket *so);
void sctp_ctlinput(int, struct sockaddr *, void *);
int sctp_ctloutput(struct socket *, struct sockopt *);
-
#ifdef INET
void sctp_input_with_port(struct mbuf *, int, uint16_t);
int sctp_input(struct mbuf **, int *, int);
-
#endif
void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t);
void sctp_drain(void);
diff --git a/freebsd/sys/netinet/sctputil.c b/freebsd/sys/netinet/sctputil.c
index 36a9c2ce..136b4bb6 100644
--- a/freebsd/sys/netinet/sctputil.c
+++ b/freebsd/sys/netinet/sctputil.c
@@ -257,7 +257,6 @@ sctp_log_mbc(struct mbuf *m, int from)
sctp_log_mb(mat, from);
}
}
-
#endif
void
@@ -271,11 +270,11 @@ sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_rea
}
sctp_clog.x.strlog.stcb = control->stcb;
sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
- sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
+ sctp_clog.x.strlog.n_sseq = (uint16_t) control->mid;
sctp_clog.x.strlog.strm = control->sinfo_stream;
if (poschk != NULL) {
sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
- sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
+ sctp_clog.x.strlog.e_sseq = (uint16_t) poschk->mid;
} else {
sctp_clog.x.strlog.e_tsn = 0;
sctp_clog.x.strlog.e_sseq = 0;
@@ -449,7 +448,6 @@ sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mb
sctp_clog.x.misc.log3,
sctp_clog.x.misc.log4);
}
-
#endif
void
@@ -959,10 +957,8 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* caller in the sctp_aloc_assoc() function.
*/
int i;
-
#if defined(SCTP_DETAILED_STR_STATS)
int j;
-
#endif
asoc = &stcb->asoc;
@@ -1137,7 +1133,7 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
asoc->strmout[i].abandoned_sent[0] = 0;
asoc->strmout[i].abandoned_unsent[0] = 0;
#endif
- asoc->strmout[i].stream_no = i;
+ asoc->strmout[i].sid = i;
asoc->strmout[i].last_msg_incomplete = 0;
asoc->strmout[i].state = SCTP_STREAM_OPENING;
asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
@@ -1488,10 +1484,8 @@ sctp_handle_addr_wq(void)
sctp_asconf_iterator_end, NULL, 0);
if (ret) {
SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
- /*
- * Freeing if we are stopping or put back on the
- * addr_wq.
- */
+ /* Freeing if we are stopping or put back on the
+ * addr_wq. */
if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
sctp_asconf_iterator_end(asc, 0);
} else {
@@ -1514,10 +1508,8 @@ sctp_timeout_handler(void *t)
struct sctp_nets *net;
struct sctp_timer *tmr;
struct mbuf *op_err;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
int did_output;
int type;
@@ -2493,10 +2485,8 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
/* compute rtt in ms */
rtt = (int32_t) (net->rtt / 1000);
if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
- /*
- * Tell the CC module that a new update has just occurred
- * from a sack
- */
+ /* Tell the CC module that a new update has just occurred
+ * from a sack */
(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
}
/*
@@ -2688,10 +2678,8 @@ sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
unsigned int notif_len;
uint16_t abort_len;
unsigned int i;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
if (stcb == NULL) {
@@ -2989,9 +2977,9 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
ssfe->ssfe_length = (uint32_t) (notifhdr_len + payload_len);
ssfe->ssfe_error = error;
/* not exactly what the user sent in, but should be close :) */
- ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
+ ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
- ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
+ ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
ssfe->ssfe_info.snd_context = chk->rec.data.context;
ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
@@ -3007,10 +2995,10 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
ssf->ssf_length = (uint32_t) (notifhdr_len + payload_len);
ssf->ssf_error = error;
/* not exactly what the user sent in, but should be close :) */
- ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
- ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.stream_seq;
+ ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
+ ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.mid;
ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
- ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
+ ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
ssf->ssf_info.sinfo_context = chk->rec.data.context;
ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
ssf->ssf_assoc_id = sctp_get_associd(stcb);
@@ -3093,7 +3081,7 @@ sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
ssfe->ssfe_length = (uint32_t) (notifhdr_len + sp->length);
ssfe->ssfe_error = error;
/* not exactly what the user sent in, but should be close :) */
- ssfe->ssfe_info.snd_sid = sp->stream;
+ ssfe->ssfe_info.snd_sid = sp->sid;
if (sp->some_taken) {
ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
} else {
@@ -3111,7 +3099,7 @@ sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
ssf->ssf_length = (uint32_t) (notifhdr_len + sp->length);
ssf->ssf_error = error;
/* not exactly what the user sent in, but should be close :) */
- ssf->ssf_info.sinfo_stream = sp->stream;
+ ssf->ssf_info.sinfo_stream = sp->sid;
ssf->ssf_info.sinfo_ssn = 0;
if (sp->some_taken) {
ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
@@ -3861,11 +3849,11 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock,
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
asoc->sent_queue_cnt--;
if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
- if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
#endif
}
}
@@ -3885,11 +3873,11 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock,
TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt--;
- if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
- asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
#ifdef INVARIANTS
} else {
- panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+ panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
#endif
}
if (chk->data != NULL) {
@@ -3977,25 +3965,22 @@ sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
uint32_t vrf_id, uint16_t port)
{
uint32_t vtag;
-
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
vtag = 0;
if (stcb != NULL) {
- /* We have a TCB to abort, send notification too */
vtag = stcb->asoc.peer_vtag;
- sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
- /* get the assoc vrf id and table id */
vrf_id = stcb->asoc.vrf_id;
- stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
}
sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
mflowtype, mflowid, inp->fibnum,
vrf_id, port);
if (stcb != NULL) {
+ /* We have a TCB to abort, send notification too */
+ sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
+ stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
/* Ok, now lets free it */
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
so = SCTP_INP_SO(inp);
@@ -4017,7 +4002,6 @@ sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
#endif
}
}
-
#ifdef SCTP_ASOCLOG_OF_TSNS
void
sctp_print_out_track_log(struct sctp_tcb *stcb)
@@ -4079,7 +4063,6 @@ none_in:
}
#endif
}
-
#endif
void
@@ -4093,7 +4076,6 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
{
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
-
#endif
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
@@ -4111,10 +4093,6 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
} else {
stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
}
- /* notify the ulp */
- if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
- sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
- }
/* notify the peer */
sctp_send_abort_tcb(stcb, op_err, so_locked);
SCTP_STAT_INCR_COUNTER32(sctps_aborted);
@@ -4122,6 +4100,10 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
(SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
SCTP_STAT_DECR_GAUGE32(sctps_currestab);
}
+ /* notify the ulp */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
+ }
/* now free the asoc */
#ifdef SCTP_ASOCLOG_OF_TSNS
sctp_print_out_track_log(stcb);
@@ -4305,7 +4287,6 @@ sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
}
return (addr);
}
-
#endif
/*
@@ -4359,7 +4340,6 @@ sctp_print_address(struct sockaddr *sa)
{
#ifdef INET6
char ip6buf[INET6_ADDRSTRLEN];
-
#endif
switch (sa->sa_family) {
@@ -4712,25 +4692,26 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
struct sctp_stream_out *strq;
struct sctp_tmit_chunk *chk = NULL, *tp2;
struct sctp_stream_queue_pending *sp;
- uint16_t stream = 0, seq = 0;
+ uint32_t mid;
+ uint16_t sid;
uint8_t foundeom = 0;
int ret_sz = 0;
int notdone;
int do_wakeup_routine = 0;
- stream = tp1->rec.data.stream_number;
- seq = tp1->rec.data.stream_seq;
+ sid = tp1->rec.data.sid;
+ mid = tp1->rec.data.mid;
if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
stcb->asoc.abandoned_sent[0]++;
stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
- stcb->asoc.strmout[stream].abandoned_sent[0]++;
+ stcb->asoc.strmout[sid].abandoned_sent[0]++;
#if defined(SCTP_DETAILED_STR_STATS)
stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
#endif
} else {
stcb->asoc.abandoned_unsent[0]++;
stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
- stcb->asoc.strmout[stream].abandoned_unsent[0]++;
+ stcb->asoc.strmout[sid].abandoned_unsent[0]++;
#if defined(SCTP_DETAILED_STR_STATS)
stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
#endif
@@ -4784,8 +4765,8 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
* sent queue.
*/
TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
- if ((tp1->rec.data.stream_number != stream) ||
- (tp1->rec.data.stream_seq != seq)) {
+ if ((tp1->rec.data.sid != sid) ||
+ (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
break;
}
/*
@@ -4813,10 +4794,8 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
do_wakeup_routine = 1;
tp1->sent = SCTP_FORWARD_TSN_SKIP;
TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
- /*
- * on to the sent queue so we can wait for it to be
- * passed by.
- */
+ /* on to the sent queue so we can wait for it to be
+ * passed by. */
TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
sctp_next);
stcb->asoc.send_queue_cnt--;
@@ -4829,7 +4808,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
* stream out queue.. yuck.
*/
SCTP_TCB_SEND_LOCK(stcb);
- strq = &stcb->asoc.strmout[stream];
+ strq = &stcb->asoc.strmout[sid];
sp = TAILQ_FIRST(&strq->outqueue);
if (sp != NULL) {
sp->discard_rest = 1;
@@ -4856,23 +4835,23 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
chk->asoc = &stcb->asoc;
if (stcb->asoc.idata_supported == 0) {
if (sp->sinfo_flags & SCTP_UNORDERED) {
- chk->rec.data.stream_seq = 0;
+ chk->rec.data.mid = 0;
} else {
- chk->rec.data.stream_seq = strq->next_mid_ordered;
+ chk->rec.data.mid = strq->next_mid_ordered;
}
} else {
if (sp->sinfo_flags & SCTP_UNORDERED) {
- chk->rec.data.stream_seq = strq->next_mid_unordered;
+ chk->rec.data.mid = strq->next_mid_unordered;
} else {
- chk->rec.data.stream_seq = strq->next_mid_ordered;
+ chk->rec.data.mid = strq->next_mid_ordered;
}
}
- chk->rec.data.stream_number = sp->stream;
- chk->rec.data.payloadtype = sp->ppid;
+ chk->rec.data.sid = sp->sid;
+ chk->rec.data.ppid = sp->ppid;
chk->rec.data.context = sp->context;
chk->flags = sp->act_flags;
chk->whoTo = NULL;
- chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
+ chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
strq->chunks_on_queues++;
TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
stcb->asoc.sent_queue_cnt++;
@@ -5190,7 +5169,7 @@ sctp_sorecvmsg(struct socket *so,
* mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
* On the way out we may send out any combination of:
* MSG_NOTIFICATION MSG_EOR
- *
+ *
*/
struct sctp_inpcb *inp = NULL;
int my_len = 0;
@@ -5309,10 +5288,8 @@ restart_nosblocks:
* connect.
*/
if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
- /*
- * You were aborted, passive side
- * always hits here
- */
+ /* You were aborted, passive side
+ * always hits here */
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
error = ECONNRESET;
}
@@ -5411,10 +5388,8 @@ restart_nosblocks:
}
if ((control->length == 0) &&
(control->end_added == 1)) {
- /*
- * Do we also need to check for (control->pdapi_aborted ==
- * 1)?
- */
+ /* Do we also need to check for (control->pdapi_aborted ==
+ * 1)? */
if (hold_rlock == 0) {
hold_rlock = 1;
SCTP_INP_READ_LOCK(inp);
@@ -5555,7 +5530,7 @@ found_one:
/* First lets get off the sinfo and sockaddr info */
if ((sinfo != NULL) && (filling_sinfo != 0)) {
sinfo->sinfo_stream = control->sinfo_stream;
- sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
+ sinfo->sinfo_ssn = (uint16_t) control->mid;
sinfo->sinfo_flags = control->sinfo_flags;
sinfo->sinfo_ppid = control->sinfo_ppid;
sinfo->sinfo_context = control->sinfo_context;
@@ -5631,7 +5606,7 @@ found_one:
entry = &inp->readlog[index];
entry->vtag = control->sinfo_assoc_id;
entry->strm = control->sinfo_stream;
- entry->seq = control->sinfo_ssn;
+ entry->seq = (uint16_t) control->mid;
entry->sz = control->length;
entry->flgs = control->sinfo_flags;
}
@@ -5757,10 +5732,8 @@ get_more_data:
atomic_subtract_int(&control->length, cp_len);
control->data = sctp_m_free(m);
m = control->data;
- /*
- * been through it all, must hold sb
- * lock ok to null tail
- */
+ /* been through it all, must hold sb
+ * lock ok to null tail */
if (control->data == NULL) {
#ifdef INVARIANTS
if ((control->end_added == 0) ||
@@ -5979,10 +5952,8 @@ wait_some_more:
*/
SCTP_INP_READ_LOCK(inp);
if ((control->length > 0) && (control->data == NULL)) {
- /*
- * big trouble.. we have the lock and its
- * corrupt?
- */
+ /* big trouble.. we have the lock and its
+ * corrupt? */
#ifdef INVARIANTS
panic("Impossible data==NULL length !=0");
#endif
@@ -6273,14 +6244,11 @@ sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
struct sctp_inpcb *inp;
struct sockaddr *sa;
size_t incr = 0;
-
#ifdef INET
struct sockaddr_in *sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 *sin6;
-
#endif
sa = addr;
@@ -6402,6 +6370,7 @@ sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
#endif
default:
*totaddr = i;
+ incr = 0;
/* we are done */
break;
}
@@ -6435,10 +6404,8 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
uint32_t vrf_id, int *error, void *p)
{
struct sockaddr *addr_touse;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin;
-
#endif
/* see if we're bound all already! */
@@ -6567,10 +6534,8 @@ sctp_bindx_delete_address(struct sctp_inpcb *inp,
uint32_t vrf_id, int *error)
{
struct sockaddr *addr_touse;
-
#if defined(INET) && defined(INET6)
struct sockaddr_in sin;
-
#endif
/* see if we're bound all already! */
@@ -6584,7 +6549,6 @@ sctp_bindx_delete_address(struct sctp_inpcb *inp,
if (sa->sa_family == AF_INET6) {
#ifdef INET
struct sockaddr_in6 *sin6;
-
#endif
if (sa->sa_len != sizeof(struct sockaddr_in6)) {
@@ -6655,14 +6619,11 @@ int
sctp_local_addr_count(struct sctp_tcb *stcb)
{
int loopback_scope;
-
#if defined(INET)
int ipv4_local_scope, ipv4_addr_legal;
-
#endif
#if defined (INET6)
int local_scope, site_scope, ipv6_addr_legal;
-
#endif
struct sctp_vrf *vrf;
struct sctp_ifn *sctp_ifn;
@@ -6707,10 +6668,8 @@ sctp_local_addr_count(struct sctp_tcb *stcb)
sin = &sctp_ifa->address.sin;
if (sin->sin_addr.s_addr == 0) {
- /*
- * skip unspecified
- * addrs
- */
+ /* skip unspecified
+ * addrs */
continue;
}
if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
@@ -6747,19 +6706,14 @@ sctp_local_addr_count(struct sctp_tcb *stcb)
if (sin6->sin6_scope_id == 0) {
if (sa6_recoverscope(sin6) != 0)
/*
- *
+ *
* bad
- *
- * li
- * nk
- *
- * loc
- * al
- *
- * add
- * re
- * ss
- * */
+ * link
+ *
+ * local
+ *
+ * address
+ */
continue;
}
}
@@ -6831,10 +6785,8 @@ sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
{
struct ip *iph;
-
#ifdef INET6
struct ip6_hdr *ip6;
-
#endif
struct mbuf *sp, *last;
struct udphdr *uhdr;
@@ -7019,7 +6971,6 @@ sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ct
}
return;
}
-
#endif
#ifdef INET6
@@ -7161,7 +7112,6 @@ sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx
}
}
}
-
#endif
void
@@ -7190,14 +7140,11 @@ sctp_over_udp_start(void)
{
uint16_t port;
int ret;
-
#ifdef INET
struct sockaddr_in sin;
-
#endif
#ifdef INET6
struct sockaddr_in6 sin6;
-
#endif
/*
* This function assumes sysctl caller holds sctp_sysctl_info_lock()
diff --git a/freebsd/sys/netinet/sctputil.h b/freebsd/sys/netinet/sctputil.h
index 292068af..a3a4f3c6 100644
--- a/freebsd/sys/netinet/sctputil.h
+++ b/freebsd/sys/netinet/sctputil.h
@@ -43,13 +43,11 @@ __FBSDID("$FreeBSD$");
#ifdef SCTP_ASOCLOG_OF_TSNS
void sctp_print_out_track_log(struct sctp_tcb *stcb);
-
#endif
#ifdef SCTP_MBUF_LOGGING
struct mbuf *sctp_m_free(struct mbuf *m);
void sctp_m_freem(struct mbuf *m);
-
#else
#define sctp_m_free m_free
#define sctp_m_freem m_freem
@@ -58,7 +56,6 @@ void sctp_m_freem(struct mbuf *m);
#if defined(SCTP_LOCAL_TRACE_BUF) || defined(__APPLE__)
void
sctp_log_trace(uint32_t fr, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f);
-
#endif
#define sctp_get_associd(stcb) ((sctp_assoc_t)stcb->asoc.assoc_id)
@@ -222,7 +219,6 @@ sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
int *error, unsigned int limit, int *bad_addr);
int sctp_is_there_an_abort_here(struct mbuf *, int, uint32_t *);
-
#ifdef INET6
uint32_t sctp_is_same_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
@@ -357,7 +353,6 @@ void
void
sctp_log_mbc(struct mbuf *m, int from);
-
#endif
void
diff --git a/freebsd/sys/netinet/tcp_debug.c b/freebsd/sys/netinet/tcp_debug.c
index c5f74182..707e7c5d 100644
--- a/freebsd/sys/netinet/tcp_debug.c
+++ b/freebsd/sys/netinet/tcp_debug.c
@@ -217,9 +217,9 @@ tcp_trace(short act, short ostate, struct tcpcb *tp, void *ipgen,
return;
printf(
"\trcv_(nxt,wnd,up) (%lx,%lx,%lx) snd_(una,nxt,max) (%lx,%lx,%lx)\n",
- (u_long)tp->rcv_nxt, tp->rcv_wnd, (u_long)tp->rcv_up,
+ (u_long)tp->rcv_nxt, (u_long)tp->rcv_wnd, (u_long)tp->rcv_up,
(u_long)tp->snd_una, (u_long)tp->snd_nxt, (u_long)tp->snd_max);
printf("\tsnd_(wl1,wl2,wnd) (%lx,%lx,%lx)\n",
- (u_long)tp->snd_wl1, (u_long)tp->snd_wl2, tp->snd_wnd);
+ (u_long)tp->snd_wl1, (u_long)tp->snd_wl2, (u_long)tp->snd_wnd);
#endif /* TCPDEBUG */
}
diff --git a/freebsd/sys/netinet/tcp_fsm.h b/freebsd/sys/netinet/tcp_fsm.h
index 61fd0c1f..5423e1f1 100644
--- a/freebsd/sys/netinet/tcp_fsm.h
+++ b/freebsd/sys/netinet/tcp_fsm.h
@@ -73,7 +73,8 @@
#define TCPS_HAVERCVDSYN(s) ((s) >= TCPS_SYN_RECEIVED)
#define TCPS_HAVEESTABLISHED(s) ((s) >= TCPS_ESTABLISHED)
-#define TCPS_HAVERCVDFIN(s) ((s) >= TCPS_TIME_WAIT)
+#define TCPS_HAVERCVDFIN(s) \
+ ((s) == TCPS_CLOSE_WAIT || ((s) >= TCPS_CLOSING && (s) != TCPS_FIN_WAIT_2))
#ifdef TCPOUTFLAGS
/*
diff --git a/freebsd/sys/netinet/tcp_hostcache.c b/freebsd/sys/netinet/tcp_hostcache.c
index 4e78b8b2..e0c4b493 100644
--- a/freebsd/sys/netinet/tcp_hostcache.c
+++ b/freebsd/sys/netinet/tcp_hostcache.c
@@ -126,6 +126,12 @@ static void tcp_hc_purge(void *);
static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0,
"TCP Host cache");
+VNET_DEFINE(int, tcp_use_hostcache) = 1;
+#define V_tcp_use_hostcache VNET(tcp_use_hostcache)
+SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
+ &VNET_NAME(tcp_use_hostcache), 0,
+ "Enable the TCP hostcache");
+
SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
&VNET_NAME(tcp_hostcache.cache_limit), 0,
"Overall entry limit for hostcache");
@@ -278,6 +284,9 @@ tcp_hc_lookup(struct in_conninfo *inc)
struct hc_head *hc_head;
struct hc_metrics *hc_entry;
+ if (!V_tcp_use_hostcache)
+ return NULL;
+
KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
/*
@@ -334,6 +343,9 @@ tcp_hc_insert(struct in_conninfo *inc)
struct hc_head *hc_head;
struct hc_metrics *hc_entry;
+ if (!V_tcp_use_hostcache)
+ return NULL;
+
KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
/*
@@ -423,6 +435,9 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
{
struct hc_metrics *hc_entry;
+ if (!V_tcp_use_hostcache)
+ return;
+
/*
* Find the right bucket.
*/
@@ -454,14 +469,17 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
/*
* External function: look up an entry in the hostcache and return the
- * discovered path MTU. Returns NULL if no entry is found or value is not
+ * discovered path MTU. Returns 0 if no entry is found or value is not
* set.
*/
-u_long
+uint32_t
tcp_hc_getmtu(struct in_conninfo *inc)
{
struct hc_metrics *hc_entry;
- u_long mtu;
+ uint32_t mtu;
+
+ if (!V_tcp_use_hostcache)
+ return 0;
hc_entry = tcp_hc_lookup(inc);
if (hc_entry == NULL) {
@@ -480,10 +498,13 @@ tcp_hc_getmtu(struct in_conninfo *inc)
* Creates a new entry if none was found.
*/
void
-tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
+tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
{
struct hc_metrics *hc_entry;
+ if (!V_tcp_use_hostcache)
+ return;
+
/*
* Find the right bucket.
*/
@@ -523,6 +544,9 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
{
struct hc_metrics *hc_entry;
+ if (!V_tcp_use_hostcache)
+ return;
+
hc_entry = tcp_hc_lookup(inc);
if (hc_entry == NULL) {
hc_entry = tcp_hc_insert(inc);
@@ -536,16 +560,16 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
if (hc_entry->rmx_rtt == 0)
hc_entry->rmx_rtt = hcml->rmx_rtt;
else
- hc_entry->rmx_rtt =
- (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
+ hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt +
+ (uint64_t)hcml->rmx_rtt) / 2;
TCPSTAT_INC(tcps_cachedrtt);
}
if (hcml->rmx_rttvar != 0) {
if (hc_entry->rmx_rttvar == 0)
hc_entry->rmx_rttvar = hcml->rmx_rttvar;
else
- hc_entry->rmx_rttvar =
- (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
+ hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar +
+ (uint64_t)hcml->rmx_rttvar) / 2;
TCPSTAT_INC(tcps_cachedrttvar);
}
if (hcml->rmx_ssthresh != 0) {
@@ -560,8 +584,8 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
if (hc_entry->rmx_cwnd == 0)
hc_entry->rmx_cwnd = hcml->rmx_cwnd;
else
- hc_entry->rmx_cwnd =
- (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
+ hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd +
+ (uint64_t)hcml->rmx_cwnd) / 2;
/* TCPSTAT_INC(tcps_cachedcwnd); */
}
if (hcml->rmx_sendpipe != 0) {
@@ -569,7 +593,8 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
else
hc_entry->rmx_sendpipe =
- (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
+ ((uint64_t)hc_entry->rmx_sendpipe +
+ (uint64_t)hcml->rmx_sendpipe) /2;
/* TCPSTAT_INC(tcps_cachedsendpipe); */
}
if (hcml->rmx_recvpipe != 0) {
@@ -577,7 +602,8 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
else
hc_entry->rmx_recvpipe =
- (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
+ ((uint64_t)hc_entry->rmx_recvpipe +
+ (uint64_t)hcml->rmx_recvpipe) /2;
/* TCPSTAT_INC(tcps_cachedrecvpipe); */
}
@@ -614,7 +640,7 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
rmx_q) {
sbuf_printf(&sb,
- "%-15s %5lu %8lu %6lums %6lums %8lu %8lu %8lu %4lu "
+ "%-15s %5u %8u %6lums %6lums %8u %8u %8u %4lu "
"%4lu %4i\n",
hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
#ifdef INET6
@@ -624,9 +650,9 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
#endif
hc_entry->rmx_mtu,
hc_entry->rmx_ssthresh,
- msec(hc_entry->rmx_rtt *
+ msec((u_long)hc_entry->rmx_rtt *
(RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
- msec(hc_entry->rmx_rttvar *
+ msec((u_long)hc_entry->rmx_rttvar *
(RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
hc_entry->rmx_cwnd,
hc_entry->rmx_sendpipe,
diff --git a/freebsd/sys/netinet/tcp_hostcache.h b/freebsd/sys/netinet/tcp_hostcache.h
index 44875ff6..23a0c673 100644
--- a/freebsd/sys/netinet/tcp_hostcache.h
+++ b/freebsd/sys/netinet/tcp_hostcache.h
@@ -48,22 +48,22 @@ struct hc_head {
struct hc_metrics {
/* housekeeping */
TAILQ_ENTRY(hc_metrics) rmx_q;
- struct hc_head *rmx_head; /* head of bucket tail queue */
- struct in_addr ip4; /* IP address */
- struct in6_addr ip6; /* IP6 address */
- uint32_t ip6_zoneid; /* IPv6 scope zone id */
+ struct hc_head *rmx_head; /* head of bucket tail queue */
+ struct in_addr ip4; /* IP address */
+ struct in6_addr ip6; /* IP6 address */
+ uint32_t ip6_zoneid; /* IPv6 scope zone id */
/* endpoint specific values for tcp */
- u_long rmx_mtu; /* MTU for this path */
- u_long rmx_ssthresh; /* outbound gateway buffer limit */
- u_long rmx_rtt; /* estimated round trip time */
- u_long rmx_rttvar; /* estimated rtt variance */
- u_long rmx_cwnd; /* congestion window */
- u_long rmx_sendpipe; /* outbound delay-bandwidth product */
- u_long rmx_recvpipe; /* inbound delay-bandwidth product */
+ uint32_t rmx_mtu; /* MTU for this path */
+ uint32_t rmx_ssthresh; /* outbound gateway buffer limit */
+ uint32_t rmx_rtt; /* estimated round trip time */
+ uint32_t rmx_rttvar; /* estimated rtt variance */
+ uint32_t rmx_cwnd; /* congestion window */
+ uint32_t rmx_sendpipe; /* outbound delay-bandwidth product */
+ uint32_t rmx_recvpipe; /* inbound delay-bandwidth product */
/* TCP hostcache internal data */
- int rmx_expire; /* lifetime for object */
- u_long rmx_hits; /* number of hits */
- u_long rmx_updates; /* number of updates */
+ int rmx_expire; /* lifetime for object */
+ u_long rmx_hits; /* number of hits */
+ u_long rmx_updates; /* number of updates */
};
struct tcp_hostcache {
diff --git a/freebsd/sys/netinet/tcp_input.c b/freebsd/sys/netinet/tcp_input.c
index eaa3eb3d..7e07fc0a 100644
--- a/freebsd/sys/netinet/tcp_input.c
+++ b/freebsd/sys/netinet/tcp_input.c
@@ -59,7 +59,9 @@ __FBSDID("$FreeBSD$");
#include <rtems/bsd/sys/param.h>
#include <sys/kernel.h>
+#ifdef TCP_HHOOK
#include <sys/hhook.h>
+#endif
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h> /* for proc0 declaration */
@@ -280,6 +282,7 @@ kmod_tcpstat_inc(int statnum)
counter_u64_add(VNET(tcpstat)[statnum], 1);
}
+#ifdef TCP_HHOOK
/*
* Wrapper for the TCP established input helper hook.
*/
@@ -297,15 +300,18 @@ hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
tp->osd);
}
}
+#endif
/*
* CC wrapper hook functions
*/
void
-cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
+cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
+ uint16_t type)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
+ tp->ccv->nsegs = nsegs;
tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
if (tp->snd_cwnd <= tp->snd_wnd)
tp->ccv->flags |= CCF_CWND_LIMITED;
@@ -315,7 +321,7 @@ cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
if (type == CC_ACK) {
if (tp->snd_cwnd > tp->snd_ssthresh) {
tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
- V_tcp_abc_l_var * tcp_maxseg(tp));
+ nsegs * V_tcp_abc_l_var * tcp_maxseg(tp));
if (tp->t_bytes_acked >= tp->snd_cwnd) {
tp->t_bytes_acked -= tp->snd_cwnd;
tp->ccv->flags |= CCF_ABC_SENTAWND;
@@ -434,9 +440,16 @@ cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
tp->t_dupacks = 0;
tp->t_bytes_acked = 0;
EXIT_RECOVERY(tp->t_flags);
- tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
- maxseg) * maxseg;
- tp->snd_cwnd = maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL) {
+ /*
+ * RFC5681 Section 3.1
+ * ssthresh = max (FlightSize / 2, 2*SMSS) eq (4)
+ */
+ tp->snd_ssthresh =
+ max((tp->snd_max - tp->snd_una) / 2 / maxseg, 2)
+ * maxseg;
+ tp->snd_cwnd = maxseg;
+ }
break;
case CC_RTO_ERR:
TCPSTAT_INC(tcps_sndrexmitbad);
@@ -603,7 +616,7 @@ tcp_input(struct mbuf **mp, int *offp, int proto)
#ifdef TCP_SIGNATURE
uint8_t sig_checked = 0;
#endif
- uint8_t iptos = 0;
+ uint8_t iptos;
struct m_tag *fwd_tag = NULL;
#ifdef INET6
struct ip6_hdr *ip6 = NULL;
@@ -675,6 +688,7 @@ tcp_input(struct mbuf **mp, int *offp, int proto)
/* XXX stat */
goto drop;
}
+ iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
}
#endif
#if defined(INET) && defined(INET6)
@@ -701,6 +715,7 @@ tcp_input(struct mbuf **mp, int *offp, int proto)
th = (struct tcphdr *)((caddr_t)ip + off0);
tlen = ntohs(ip->ip_len) - off0;
+ iptos = ip->ip_tos;
if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
th->th_sum = m->m_pkthdr.csum_data;
@@ -721,29 +736,20 @@ tcp_input(struct mbuf **mp, int *offp, int proto)
ipov->ih_len = htons(tlen);
th->th_sum = in_cksum(m, len);
/* Reset length for SDT probes. */
- ip->ip_len = htons(tlen + off0);
+ ip->ip_len = htons(len);
+ /* Reset TOS bits */
+ ip->ip_tos = iptos;
+ /* Re-initialization for later version check */
+ ip->ip_v = IPVERSION;
}
if (th->th_sum) {
TCPSTAT_INC(tcps_rcvbadsum);
goto drop;
}
- /* Re-initialization for later version check */
- ip->ip_v = IPVERSION;
}
#endif /* INET */
-#ifdef INET6
- if (isipv6)
- iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
-#endif
-#if defined(INET) && defined(INET6)
- else
-#endif
-#ifdef INET
- iptos = ip->ip_tos;
-#endif
-
/*
* Check that TCP offset makes sense,
* pull out TCP options and adjust length. XXX
@@ -923,6 +929,16 @@ findpcb:
goto dropwithreset;
}
INP_WLOCK_ASSERT(inp);
+ /*
+ * While waiting for inp lock during the lookup, another thread
+ * can have dropped the inpcb, in which case we need to loop back
+ * and try to find a new inpcb to deliver to.
+ */
+ if (inp->inp_flags & INP_DROPPED) {
+ INP_WUNLOCK(inp);
+ inp = NULL;
+ goto findpcb;
+ }
if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
((inp->inp_socket == NULL) ||
@@ -983,6 +999,10 @@ relocked:
if (in_pcbrele_wlocked(inp)) {
inp = NULL;
goto findpcb;
+ } else if (inp->inp_flags & INP_DROPPED) {
+ INP_WUNLOCK(inp);
+ inp = NULL;
+ goto findpcb;
}
} else
ti_locked = TI_RLOCKED;
@@ -1031,7 +1051,7 @@ relocked:
#endif
if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) ||
(tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) &&
- !(tp->t_flags & TF_FASTOPEN)))) {
+ !IS_FASTOPEN(tp->t_flags)))) {
if (ti_locked == TI_UNLOCKED) {
if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) {
in_pcbref(inp);
@@ -1042,6 +1062,10 @@ relocked:
if (in_pcbrele_wlocked(inp)) {
inp = NULL;
goto findpcb;
+ } else if (inp->inp_flags & INP_DROPPED) {
+ INP_WUNLOCK(inp);
+ inp = NULL;
+ goto findpcb;
}
goto relocked;
} else
@@ -1074,11 +1098,11 @@ relocked:
* state) we look into the SYN cache if this is a new connection
* attempt or the completion of a previous one.
*/
- if (so->so_options & SO_ACCEPTCONN) {
+ KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN),
+ ("%s: so accepting but tp %p not listening", __func__, tp));
+ if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) {
struct in_conninfo inc;
- KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
- "tp not listening", __func__));
bzero(&inc, sizeof(inc));
#ifdef INET6
if (isipv6) {
@@ -1124,7 +1148,7 @@ relocked:
goto dropwithreset;
}
#ifdef TCP_RFC7413
-new_tfo_socket:
+tfo_socket_result:
#endif
if (so == NULL) {
/*
@@ -1390,7 +1414,7 @@ new_tfo_socket:
tcp_dooptions(&to, optp, optlen, TO_SYN);
#ifdef TCP_RFC7413
if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL))
- goto new_tfo_socket;
+ goto tfo_socket_result;
#else
syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
#endif
@@ -1503,12 +1527,15 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
{
int thflags, acked, ourfinisacked, needoutput = 0, sack_changed;
int rstreason, todrop, win;
- u_long tiwin;
+ uint32_t tiwin;
+ uint16_t nsegs;
char *s;
struct in_conninfo *inc;
struct mbuf *mfree;
struct tcpopt to;
+#ifdef TCP_RFC7413
int tfo_syn;
+#endif
#ifdef TCPDEBUG
/*
@@ -1523,6 +1550,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
inc = &tp->t_inpcb->inp_inc;
tp->sackhint.last_sack_ack = 0;
sack_changed = 0;
+ nsegs = max(1, m->m_pkthdr.lro_nsegs);
/*
* If this is either a state-changing packet or current state isn't
@@ -1564,8 +1592,6 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* validation to ignore broken/spoofed segs.
*/
tp->t_rcvtime = ticks;
- if (TCPS_HAVEESTABLISHED(tp->t_state))
- tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
/*
* Scale up the window into a 32-bit value.
@@ -1741,7 +1767,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if ((to.to_flags & TOF_TS) != 0 &&
to.to_tsecr) {
- u_int t;
+ uint32_t t;
t = tcp_ts_getticks() - to.to_tsecr;
if (!tp->t_rttlow || tp->t_rttlow > t)
@@ -1758,10 +1784,12 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
acked = BYTES_THIS_ACK(tp, th);
+#ifdef TCP_HHOOK
/* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
hhook_run_tcp_est_in(tp, th, &to);
+#endif
- TCPSTAT_INC(tcps_rcvackpack);
+ TCPSTAT_ADD(tcps_rcvackpack, nsegs);
TCPSTAT_ADD(tcps_rcvackbyte, acked);
sbdrop(&so->so_snd, acked);
if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
@@ -1774,7 +1802,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* typically means increasing the congestion
* window.
*/
- cc_ack_received(tp, th, CC_ACK);
+ cc_ack_received(tp, th, nsegs, CC_ACK);
tp->snd_una = th->th_ack;
/*
@@ -1840,7 +1868,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* rcv_nxt.
*/
tp->rcv_up = tp->rcv_nxt;
- TCPSTAT_INC(tcps_rcvpack);
+ TCPSTAT_ADD(tcps_rcvpack, nsegs);
TCPSTAT_ADD(tcps_rcvbyte, tlen);
#ifdef TCPDEBUG
if (so->so_options & SO_DEBUG)
@@ -1963,7 +1991,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
goto dropwithreset;
}
#ifdef TCP_RFC7413
- if (tp->t_flags & TF_FASTOPEN) {
+ if (IS_FASTOPEN(tp->t_flags)) {
/*
* When a TFO connection is in SYN_RECEIVED, the
* only valid packets are the initial SYN, a
@@ -2030,7 +2058,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
(TF_RCVD_SCALE|TF_REQ_SCALE)) {
tp->rcv_scale = tp->request_r_scale;
}
- tp->rcv_adv += imin(tp->rcv_wnd,
+ tp->rcv_adv += min(tp->rcv_wnd,
TCP_MAXWIN << tp->rcv_scale);
tp->snd_una++; /* SYN is acked */
/*
@@ -2176,9 +2204,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
case TCPS_FIN_WAIT_1:
case TCPS_FIN_WAIT_2:
case TCPS_CLOSE_WAIT:
+ case TCPS_CLOSING:
+ case TCPS_LAST_ACK:
so->so_error = ECONNRESET;
close:
- tcp_state_change(tp, TCPS_CLOSED);
/* FALLTHROUGH */
default:
tp = tcp_close(tp);
@@ -2397,7 +2426,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
(tp->t_flags & TF_NEEDSYN)) {
#ifdef TCP_RFC7413
if (tp->t_state == TCPS_SYN_RECEIVED &&
- tp->t_flags & TF_FASTOPEN) {
+ IS_FASTOPEN(tp->t_flags)) {
tp->snd_wnd = tiwin;
cc_conn_init(tp);
}
@@ -2460,7 +2489,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* snd_cwnd reduction that occurs when a TFO SYN|ACK
* is retransmitted.
*/
- if (!(tp->t_flags & TF_FASTOPEN))
+ if (!IS_FASTOPEN(tp->t_flags))
#endif
cc_conn_init(tp);
tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
@@ -2504,8 +2533,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
tp->sackhint.sacked_bytes = 0;
+#ifdef TCP_HHOOK
/* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
hhook_run_tcp_est_in(tp, th, &to);
+#endif
if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
u_int maxseg;
@@ -2572,7 +2603,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_dupacks = 0;
else if (++tp->t_dupacks > tcprexmtthresh ||
IN_FASTRECOVERY(tp->t_flags)) {
- cc_ack_received(tp, th, CC_DUPACK);
+ cc_ack_received(tp, th, nsegs,
+ CC_DUPACK);
if ((tp->t_flags & TF_SACK_PERMIT) &&
IN_FASTRECOVERY(tp->t_flags)) {
int awnd;
@@ -2591,6 +2623,15 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (awnd < tp->snd_ssthresh) {
tp->snd_cwnd += maxseg;
+ /*
+ * RFC5681 Section 3.2 talks about cwnd
+ * inflation on additional dupacks and
+ * deflation on recovering from loss.
+ *
+ * We keep cwnd into check so that
+ * we don't have to 'deflate' it when we
+ * get out of recovery.
+ */
if (tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd = tp->snd_ssthresh;
}
@@ -2622,26 +2663,30 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
/* Congestion signal before ack. */
cc_cong_signal(tp, th, CC_NDUPACK);
- cc_ack_received(tp, th, CC_DUPACK);
+ cc_ack_received(tp, th, nsegs,
+ CC_DUPACK);
tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
if (tp->t_flags & TF_SACK_PERMIT) {
TCPSTAT_INC(
tcps_sack_recovery_episode);
tp->sack_newdata = tp->snd_nxt;
- tp->snd_cwnd = maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
goto drop;
}
tp->snd_nxt = th->th_ack;
- tp->snd_cwnd = maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
KASSERT(tp->snd_limited <= 2,
("%s: tp->snd_limited too big",
__func__));
- tp->snd_cwnd = tp->snd_ssthresh +
- maxseg *
- (tp->t_dupacks - tp->snd_limited);
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = tp->snd_ssthresh +
+ maxseg *
+ (tp->t_dupacks - tp->snd_limited);
if (SEQ_GT(onxt, tp->snd_nxt))
tp->snd_nxt = onxt;
goto drop;
@@ -2656,8 +2701,9 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* segment. Restore the original
* snd_cwnd after packet transmission.
*/
- cc_ack_received(tp, th, CC_DUPACK);
- u_long oldcwnd = tp->snd_cwnd;
+ cc_ack_received(tp, th, nsegs,
+ CC_DUPACK);
+ uint32_t oldcwnd = tp->snd_cwnd;
tcp_seq oldsndmax = tp->snd_max;
u_int sent;
int avail;
@@ -2758,7 +2804,7 @@ process_ACK:
KASSERT(acked >= 0, ("%s: acked unexepectedly negative "
"(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__,
tp->snd_una, th->th_ack, tp, m));
- TCPSTAT_INC(tcps_rcvackpack);
+ TCPSTAT_ADD(tcps_rcvackpack, nsegs);
TCPSTAT_ADD(tcps_rcvackbyte, acked);
/*
@@ -2787,7 +2833,7 @@ process_ACK:
* huge RTT and blow up the retransmit timer.
*/
if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
- u_int t;
+ uint32_t t;
t = tcp_ts_getticks() - to.to_tsecr;
if (!tp->t_rttlow || tp->t_rttlow > t)
@@ -2823,7 +2869,7 @@ process_ACK:
* control related information. This typically means increasing
* the congestion window.
*/
- cc_ack_received(tp, th, CC_ACK);
+ cc_ack_received(tp, th, nsegs, CC_ACK);
SOCKBUF_LOCK(&so->so_snd);
if (acked > sbavail(&so->so_snd)) {
@@ -2836,7 +2882,7 @@ process_ACK:
ourfinisacked = 1;
} else {
mfree = sbcut_locked(&so->so_snd, acked);
- if (tp->snd_wnd >= (u_long) acked)
+ if (tp->snd_wnd >= (uint32_t) acked)
tp->snd_wnd -= acked;
else
tp->snd_wnd = 0;
@@ -2997,7 +3043,7 @@ step6:
* but if two URG's are pending at once, some out-of-band
* data may creep in... ick.
*/
- if (th->th_urp <= (u_long)tlen &&
+ if (th->th_urp <= (uint32_t)tlen &&
!(so->so_options & SO_OOBINLINE)) {
/* hdr drop is delayed */
tcp_pulloutofband(so, th, m, drop_hdrlen);
@@ -3022,8 +3068,12 @@ dodata: /* XXX */
* case PRU_RCVD). If a FIN has already been received on this
* connection then we just ignore the text.
*/
+#ifdef TCP_RFC7413
tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
- (tp->t_flags & TF_FASTOPEN));
+ IS_FASTOPEN(tp->t_flags));
+#else
+#define tfo_syn (false)
+#endif
if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
TCPS_HAVERCVDFIN(tp->t_state) == 0) {
tcp_seq save_start = th->th_seq;
@@ -3247,6 +3297,9 @@ drop:
if (tp != NULL)
INP_WUNLOCK(tp->t_inpcb);
m_freem(m);
+#ifndef TCP_RFC7413
+#undef tfo_syn
+#endif
}
/*
@@ -3306,6 +3359,8 @@ tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
} else {
if (th->th_flags & TH_SYN)
tlen++;
+ if (th->th_flags & TH_FIN)
+ tlen++;
tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
(tcp_seq)0, TH_RST|TH_ACK);
}
@@ -3562,7 +3617,7 @@ tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
{
int mss = 0;
- u_long maxmtu = 0;
+ uint32_t maxmtu = 0;
struct inpcb *inp = tp->t_inpcb;
struct hc_metrics_lite metrics;
#ifdef INET6
@@ -3708,7 +3763,7 @@ void
tcp_mss(struct tcpcb *tp, int offer)
{
int mss;
- u_long bufsize;
+ uint32_t bufsize;
struct inpcb *inp;
struct socket *so;
struct hc_metrics_lite metrics;
@@ -3745,7 +3800,15 @@ tcp_mss(struct tcpcb *tp, int offer)
(void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
}
SOCKBUF_UNLOCK(&so->so_snd);
- tp->t_maxseg = mss;
+ /*
+ * Sanity check: make sure that maxseg will be large
+ * enough to allow some data on segments even if the
+ * all the option space is used (40bytes). Otherwise
+ * funny things may happen in tcp_output.
+ *
+ * XXXGL: shouldn't we reserve space for IP/IPv6 options?
+ */
+ tp->t_maxseg = max(mss, 64);
SOCKBUF_LOCK(&so->so_rcv);
if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
@@ -3777,8 +3840,8 @@ int
tcp_mssopt(struct in_conninfo *inc)
{
int mss = 0;
- u_long maxmtu = 0;
- u_long thcmtu = 0;
+ uint32_t thcmtu = 0;
+ uint32_t maxmtu = 0;
size_t min_protoh;
KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
@@ -3823,7 +3886,7 @@ void
tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
{
tcp_seq onxt = tp->snd_nxt;
- u_long ocwnd = tp->snd_cwnd;
+ uint32_t ocwnd = tp->snd_cwnd;
u_int maxseg = tcp_maxseg(tp);
INP_WLOCK_ASSERT(tp->t_inpcb);
diff --git a/freebsd/sys/netinet/tcp_lro.c b/freebsd/sys/netinet/tcp_lro.c
index 3550ab84..f318e78b 100644
--- a/freebsd/sys/netinet/tcp_lro.c
+++ b/freebsd/sys/netinet/tcp_lro.c
@@ -394,6 +394,7 @@ tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
#endif
}
+ le->m_head->m_pkthdr.lro_nsegs = le->append_cnt + 1;
(*lc->ifp->if_input)(lc->ifp, le->m_head);
lc->lro_queued += le->append_cnt + 1;
lc->lro_flushed++;
diff --git a/freebsd/sys/netinet/tcp_output.c b/freebsd/sys/netinet/tcp_output.c
index af11d805..a310512e 100644
--- a/freebsd/sys/netinet/tcp_output.c
+++ b/freebsd/sys/netinet/tcp_output.c
@@ -42,7 +42,9 @@ __FBSDID("$FreeBSD$");
#include <rtems/bsd/sys/param.h>
#include <sys/systm.h>
#include <sys/domain.h>
+#ifdef TCP_HHOOK
#include <sys/hhook.h>
+#endif
#include <sys/kernel.h>
#include <rtems/bsd/sys/lock.h>
#include <sys/mbuf.h>
@@ -142,17 +144,20 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
tcp_timer_active((tp), TT_PERSIST), \
("neither rexmt nor persist timer is set"))
+#ifdef TCP_HHOOK
static void inline hhook_run_tcp_est_out(struct tcpcb *tp,
struct tcphdr *th, struct tcpopt *to,
- long len, int tso);
+ uint32_t len, int tso);
+#endif
static void inline cc_after_idle(struct tcpcb *tp);
+#ifdef TCP_HHOOK
/*
* Wrapper for the TCP established output helper hook.
*/
static void inline
hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
- struct tcpopt *to, long len, int tso)
+ struct tcpopt *to, uint32_t len, int tso)
{
struct tcp_hhook_data hhook_data;
@@ -167,6 +172,7 @@ hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
tp->osd);
}
}
+#endif
/*
* CC wrapper hook functions
@@ -187,7 +193,8 @@ int
tcp_output(struct tcpcb *tp)
{
struct socket *so = tp->t_inpcb->inp_socket;
- long len, recwin, sendwin;
+ int32_t len;
+ uint32_t recwin, sendwin;
int off, flags, error = 0; /* Keep compiler happy */
struct mbuf *m;
struct ip *ip = NULL;
@@ -225,7 +232,7 @@ tcp_output(struct tcpcb *tp)
* For TFO connections in SYN_RECEIVED, only allow the initial
* SYN|ACK and those sent by the retransmit timer.
*/
- if ((tp->t_flags & TF_FASTOPEN) &&
+ if (IS_FASTOPEN(tp->t_flags) &&
(tp->t_state == TCPS_SYN_RECEIVED) &&
SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
(tp->snd_nxt != tp->snd_una)) /* not a retransmit */
@@ -279,11 +286,10 @@ again:
p = NULL;
if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) &&
(p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
- long cwin;
+ uint32_t cwin;
- cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt;
- if (cwin < 0)
- cwin = 0;
+ cwin =
+ imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0);
/* Do not retransmit SACK segments beyond snd_recover */
if (SEQ_GT(p->end, tp->snd_recover)) {
/*
@@ -302,10 +308,10 @@ again:
goto after_sack_rexmit;
} else
/* Can rexmit part of the current hole */
- len = ((long)ulmin(cwin,
+ len = ((int32_t)ulmin(cwin,
tp->snd_recover - p->rxmit));
} else
- len = ((long)ulmin(cwin, p->end - p->rxmit));
+ len = ((int32_t)ulmin(cwin, p->end - p->rxmit));
off = p->rxmit - tp->snd_una;
KASSERT(off >= 0,("%s: sack block to the left of una : %d",
__func__, off));
@@ -378,17 +384,17 @@ after_sack_rexmit:
*/
if (sack_rxmit == 0) {
if (sack_bytes_rxmt == 0)
- len = ((long)ulmin(sbavail(&so->so_snd), sendwin) -
+ len = ((int32_t)ulmin(sbavail(&so->so_snd), sendwin) -
off);
else {
- long cwin;
+ int32_t cwin;
/*
* We are inside of a SACK recovery episode and are
* sending new data, having retransmitted all the
* data possible in the scoreboard.
*/
- len = ((long)ulmin(sbavail(&so->so_snd), tp->snd_wnd) -
+ len = ((int32_t)min(sbavail(&so->so_snd), tp->snd_wnd) -
off);
/*
* Don't remove this (len > 0) check !
@@ -404,7 +410,7 @@ after_sack_rexmit:
sack_bytes_rxmt;
if (cwin < 0)
cwin = 0;
- len = lmin(len, cwin);
+ len = imin(len, cwin);
}
}
}
@@ -422,7 +428,7 @@ after_sack_rexmit:
* When sending additional segments following a TFO SYN|ACK,
* do not include the SYN bit.
*/
- if ((tp->t_flags & TF_FASTOPEN) &&
+ if (IS_FASTOPEN(tp->t_flags) &&
(tp->t_state == TCPS_SYN_RECEIVED))
flags &= ~TH_SYN;
#endif
@@ -445,7 +451,7 @@ after_sack_rexmit:
* don't include data, as the presence of data may have caused the
* original SYN|ACK to have been dropped by a middlebox.
*/
- if ((tp->t_flags & TF_FASTOPEN) &&
+ if (IS_FASTOPEN(tp->t_flags) &&
(((tp->t_state == TCPS_SYN_RECEIVED) && (tp->t_rxtshift > 0)) ||
(flags & TH_RST)))
len = 0;
@@ -568,7 +574,8 @@ after_sack_rexmit:
flags &= ~TH_FIN;
}
- recwin = sbspace(&so->so_rcv);
+ recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
+ (long)TCP_MAXWIN << tp->rcv_scale);
/*
* Sender silly window avoidance. We transmit under the following
@@ -594,7 +601,7 @@ after_sack_rexmit:
*/
if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
(idle || (tp->t_flags & TF_NODELAY)) &&
- len + off >= sbavail(&so->so_snd) &&
+ (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) &&
(tp->t_flags & TF_NOPUSH) == 0) {
goto send;
}
@@ -645,10 +652,10 @@ after_sack_rexmit:
* taking into account that we are limited by
* TCP_MAXWIN << tp->rcv_scale.
*/
- long adv;
+ int32_t adv;
int oldwin;
- adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale);
+ adv = recwin;
if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
oldwin = (tp->rcv_adv - tp->rcv_nxt);
adv -= oldwin;
@@ -656,15 +663,16 @@ after_sack_rexmit:
oldwin = 0;
/*
- * If the new window size ends up being the same as the old
- * size when it is scaled, then don't force a window update.
+ * If the new window size ends up being the same as or less
+ * than the old size when it is scaled, then don't force
+ * a window update.
*/
- if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale)
+ if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
goto dontupdate;
- if (adv >= (long)(2 * tp->t_maxseg) &&
- (adv >= (long)(so->so_rcv.sb_hiwat / 4) ||
- recwin <= (long)(so->so_rcv.sb_hiwat / 8) ||
+ if (adv >= (int32_t)(2 * tp->t_maxseg) &&
+ (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
+ recwin <= (so->so_rcv.sb_hiwat / 8) ||
so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg))
goto send;
}
@@ -780,7 +788,7 @@ send:
* the TFO option may have caused the original
* SYN|ACK to have been dropped by a middlebox.
*/
- if ((tp->t_flags & TF_FASTOPEN) &&
+ if (IS_FASTOPEN(tp->t_flags) &&
(tp->t_state == TCPS_SYN_RECEIVED) &&
(tp->t_rxtshift == 0)) {
to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
@@ -951,7 +959,8 @@ send:
* emptied:
*/
max_len = (tp->t_maxseg - optlen);
- if ((off + len) < sbavail(&so->so_snd)) {
+ if (((uint32_t)off + (uint32_t)len) <
+ sbavail(&so->so_snd)) {
moff = len % max_len;
if (moff != 0) {
len -= moff;
@@ -1047,11 +1056,11 @@ send:
mb = sbsndptr(&so->so_snd, off, len, &moff);
if (len <= MHLEN - hdrlen - max_linkhdr) {
- m_copydata(mb, moff, (int)len,
+ m_copydata(mb, moff, len,
mtod(m, caddr_t) + hdrlen);
m->m_len += len;
} else {
- m->m_next = m_copy(mb, moff, (int)len);
+ m->m_next = m_copym(mb, moff, len, M_NOWAIT);
if (m->m_next == NULL) {
SOCKBUF_UNLOCK(&so->so_snd);
(void) m_free(m);
@@ -1067,7 +1076,8 @@ send:
* give data to the user when a buffer fills or
* a PUSH comes in.)
*/
- if ((off + len == sbused(&so->so_snd)) && !(flags & TH_SYN))
+ if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) &&
+ !(flags & TH_SYN))
flags |= TH_PUSH;
SOCKBUF_UNLOCK(&so->so_snd);
} else {
@@ -1200,14 +1210,12 @@ send:
* Calculate receive window. Don't shrink window,
* but avoid silly window syndrome.
*/
- if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
- recwin < (long)tp->t_maxseg)
+ if (recwin < (so->so_rcv.sb_hiwat / 4) &&
+ recwin < tp->t_maxseg)
recwin = 0;
if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
- recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
- recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
- if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
- recwin = (long)TCP_MAXWIN << tp->rcv_scale;
+ recwin < (tp->rcv_adv - tp->rcv_nxt))
+ recwin = (tp->rcv_adv - tp->rcv_nxt);
/*
* According to RFC1323 the window field in a SYN (i.e., a <SYN>
@@ -1298,16 +1306,18 @@ send:
#ifdef IPSEC
KASSERT(len + hdrlen + ipoptlen - ipsec_optlen == m_length(m, NULL),
- ("%s: mbuf chain shorter than expected: %ld + %u + %u - %u != %u",
+ ("%s: mbuf chain shorter than expected: %d + %u + %u - %u != %u",
__func__, len, hdrlen, ipoptlen, ipsec_optlen, m_length(m, NULL)));
#else
KASSERT(len + hdrlen + ipoptlen == m_length(m, NULL),
- ("%s: mbuf chain shorter than expected: %ld + %u + %u != %u",
+ ("%s: mbuf chain shorter than expected: %d + %u + %u != %u",
__func__, len, hdrlen, ipoptlen, m_length(m, NULL)));
#endif
+#ifdef TCP_HHOOK
/* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
hhook_run_tcp_est_out(tp, th, &to, len, tso);
+#endif
#ifdef TCPDEBUG
/*
@@ -1521,7 +1531,7 @@ timer:
tp->t_flags |= TF_SENTFIN;
}
if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
- tp->snd_max = tp->snd_nxt + len;
+ tp->snd_max = tp->snd_nxt + xlen;
}
if (error) {
@@ -1598,7 +1608,7 @@ timer:
* then remember the size of the advertised window.
* Any pending ACK has now been sent.
*/
- if (recwin >= 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
+ if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
tp->rcv_adv = tp->rcv_nxt + recwin;
tp->last_ack_sent = tp->rcv_nxt;
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
diff --git a/freebsd/sys/netinet/tcp_seq.h b/freebsd/sys/netinet/tcp_seq.h
index 51d971f2..666cf603 100644
--- a/freebsd/sys/netinet/tcp_seq.h
+++ b/freebsd/sys/netinet/tcp_seq.h
@@ -75,20 +75,17 @@
* tcp_ts_getticks() in ms, should be 1ms < x < 1000ms according to RFC 1323.
* We always use 1ms granularity independent of hz.
*/
-static __inline u_int
+static __inline uint32_t
tcp_ts_getticks(void)
{
struct timeval tv;
- u_long ms;
/*
* getmicrouptime() should be good enough for any 1-1000ms granularity.
* Do not use getmicrotime() here as it might break nfsroot/tcp.
*/
getmicrouptime(&tv);
- ms = tv.tv_sec * 1000 + tv.tv_usec / 1000;
-
- return (ms);
+ return (tv.tv_sec * 1000 + tv.tv_usec / 1000);
}
#endif /* _KERNEL */
diff --git a/freebsd/sys/netinet/tcp_subr.c b/freebsd/sys/netinet/tcp_subr.c
index cff9bd7b..4f196a15 100644
--- a/freebsd/sys/netinet/tcp_subr.c
+++ b/freebsd/sys/netinet/tcp_subr.c
@@ -48,9 +48,13 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/callout.h>
#include <sys/eventhandler.h>
+#ifdef TCP_HHOOK
#include <sys/hhook.h>
+#endif
#include <sys/kernel.h>
+#ifdef TCP_HHOOK
#include <sys/khelp.h>
+#endif
#include <sys/sysctl.h>
#include <sys/jail.h>
#include <sys/malloc.h>
@@ -244,7 +248,9 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, signature_verify_input, CTLFLAG_RW,
VNET_DEFINE(uma_zone_t, sack_hole_zone);
#define V_sack_hole_zone VNET(sack_hole_zone)
+#ifdef TCP_HHOOK
VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
+#endif
static struct inpcb *tcp_notify(struct inpcb *, int);
static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
@@ -455,7 +461,9 @@ struct tcpcb_mem {
struct tcpcb tcb;
struct tcp_timer tt;
struct cc_var ccv;
+#ifdef TCP_HHOOK
struct osd osd;
+#endif
};
static VNET_DEFINE(uma_zone_t, tcpcb_zone);
@@ -611,12 +619,14 @@ tcp_init(void)
tcbhash_tuneable = "net.inet.tcp.tcbhashsize";
+#ifdef TCP_HHOOK
if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
&V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
printf("%s: WARNING: unable to register helper hook\n", __func__);
if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
&V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
printf("%s: WARNING: unable to register helper hook\n", __func__);
+#endif
hashsize = TCBHASHSIZE;
TUNABLE_INT_FETCH(tcbhash_tuneable, &hashsize);
if (hashsize == 0) {
@@ -738,7 +748,10 @@ tcp_init(void)
static void
tcp_destroy(void *unused __unused)
{
- int error, n;
+ int n;
+#ifdef TCP_HHOOK
+ int error;
+#endif
/*
* All our processes are gone, all our sockets should be cleaned
@@ -769,6 +782,7 @@ tcp_destroy(void *unused __unused)
tcp_fastopen_destroy();
#endif
+#ifdef TCP_HHOOK
error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
if (error != 0) {
printf("%s: WARNING: unable to deregister helper hook "
@@ -781,6 +795,7 @@ tcp_destroy(void *unused __unused)
"type=%d, id=%d: error %d returned\n", __func__,
HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
}
+#endif
}
VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
#endif
@@ -919,8 +934,8 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
if (tp != NULL) {
if (!(flags & TH_RST)) {
win = sbspace(&inp->inp_socket->so_rcv);
- if (win > (long)TCP_MAXWIN << tp->rcv_scale)
- win = (long)TCP_MAXWIN << tp->rcv_scale;
+ if (win > TCP_MAXWIN << tp->rcv_scale)
+ win = TCP_MAXWIN << tp->rcv_scale;
}
if ((tp->t_flags & TF_NOOPT) == 0)
incl_opts = true;
@@ -1210,6 +1225,7 @@ tcp_newtcpcb(struct inpcb *inp)
return (NULL);
}
+#ifdef TCP_HHOOK
tp->osd = &tm->osd;
if (khelp_init_osd(HELPER_CLASS_TCP, tp->osd)) {
if (tp->t_fb->tfb_tcp_fb_fini)
@@ -1218,6 +1234,7 @@ tcp_newtcpcb(struct inpcb *inp)
uma_zfree(V_tcpcb_zone, tm);
return (NULL);
}
+#endif
#ifdef VIMAGE
tp->t_vnet = inp->inp_vnet;
@@ -1418,7 +1435,7 @@ tcp_discardcb(struct tcpcb *tp)
*/
if (tp->t_rttupdated >= 4) {
struct hc_metrics_lite metrics;
- u_long ssthresh;
+ uint32_t ssthresh;
bzero(&metrics, sizeof(metrics));
/*
@@ -1439,7 +1456,7 @@ tcp_discardcb(struct tcpcb *tp)
ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
if (ssthresh < 2)
ssthresh = 2;
- ssthresh *= (u_long)(tp->t_maxseg +
+ ssthresh *= (tp->t_maxseg +
#ifdef INET6
(isipv6 ? sizeof (struct ip6_hdr) +
sizeof (struct tcphdr) :
@@ -1483,12 +1500,15 @@ tcp_discardcb(struct tcpcb *tp)
if (CC_ALGO(tp)->cb_destroy != NULL)
CC_ALGO(tp)->cb_destroy(tp->ccv);
+#ifdef TCP_HHOOK
khelp_destroy_osd(tp->osd);
+#endif
CC_ALGO(tp) = NULL;
inp->inp_ppcb = NULL;
if (tp->t_timers->tt_draincnt == 0) {
/* We own the last reference on tcpcb, let's free it. */
+ TCPSTATES_DEC(tp->t_state);
if (tp->t_fb->tfb_tcp_fb_fini)
(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
refcount_release(&tp->t_fb->tfb_refcnt);
@@ -1518,6 +1538,7 @@ tcp_timer_discard(void *ptp)
tp->t_timers->tt_draincnt--;
if (tp->t_timers->tt_draincnt == 0) {
/* We own the last reference on this tcpcb, let's free it. */
+ TCPSTATES_DEC(tp->t_state);
if (tp->t_fb->tfb_tcp_fb_fini)
(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
refcount_release(&tp->t_fb->tfb_refcnt);
@@ -1564,7 +1585,8 @@ tcp_close(struct tcpcb *tp)
#endif
in_pcbdrop(inp);
TCPSTAT_INC(tcps_closed);
- TCPSTATES_DEC(tp->t_state);
+ if (tp->t_state != TCPS_CLOSED)
+ tcp_state_change(tp, TCPS_CLOSED);
KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
so = inp->inp_socket;
soisdisconnected(so);
@@ -1955,7 +1977,8 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
if (cmd == PRC_MSGSIZE)
notify = tcp_mtudisc_notify;
else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
- cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
+ cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
+ cmd == PRC_TIMXCEED_INTRANS) && ip)
notify = tcp_drop_syn_sent;
/*
@@ -2087,8 +2110,8 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
if (cmd == PRC_MSGSIZE)
notify = tcp_mtudisc_notify;
else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
- cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
- ip6 != NULL)
+ cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
+ cmd == PRC_TIMXCEED_INTRANS) && ip6 != NULL)
notify = tcp_drop_syn_sent;
/*
@@ -2388,12 +2411,12 @@ tcp_mtudisc(struct inpcb *inp, int mtuoffer)
* is called by TCP routines that access the rmx structure and by
* tcp_mss_update to get the peer/interface MTU.
*/
-u_long
+uint32_t
tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
{
struct nhop4_extended nh4;
struct ifnet *ifp;
- u_long maxmtu = 0;
+ uint32_t maxmtu = 0;
KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
@@ -2423,14 +2446,14 @@ tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
#endif /* INET */
#ifdef INET6
-u_long
+uint32_t
tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
{
struct nhop6_extended nh6;
struct in6_addr dst6;
uint32_t scopeid;
struct ifnet *ifp;
- u_long maxmtu = 0;
+ uint32_t maxmtu = 0;
KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
@@ -2717,6 +2740,7 @@ tcp_signature_do_compute(struct mbuf *m, int len, int optlen,
* Note: Upper-Layer Packet Length comes before Next Header.
*/
case (IPV6_VERSION >> 4):
+ ip6 = mtod(m, struct ip6_hdr *);
in6 = ip6->ip6_src;
in6_clearscope(&in6);
MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
diff --git a/freebsd/sys/netinet/tcp_syncache.c b/freebsd/sys/netinet/tcp_syncache.c
index d7da3a01..453d5ba4 100644
--- a/freebsd/sys/netinet/tcp_syncache.c
+++ b/freebsd/sys/netinet/tcp_syncache.c
@@ -928,8 +928,6 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
tp->t_keepcnt = sototcpcb(lso)->t_keepcnt;
tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
- soisconnected(so);
-
TCPSTAT_INC(tcps_accepts);
return (so);
@@ -1081,10 +1079,17 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
}
/*
- * If timestamps were negotiated the reflected timestamp
- * must be equal to what we actually sent in the SYN|ACK.
+ * If timestamps were negotiated, the reflected timestamp
+ * must be equal to what we actually sent in the SYN|ACK
+ * except in the case of 0. Some boxes are known for sending
+ * broken timestamp replies during the 3whs (and potentially
+ * during the connection also).
+ *
+ * Accept the final ACK of 3whs with reflected timestamp of 0
+ * instead of sending a RST and deleting the syncache entry.
*/
- if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts) {
+ if ((to->to_flags & TOF_TS) && to->to_tsecr &&
+ to->to_tsecr != sc->sc_ts) {
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, "
"segment rejected\n",
@@ -1159,11 +1164,10 @@ syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m,
* the data, we avoid this DoS scenario.
*
* The exception to the above is when a SYN with a valid TCP Fast Open (TFO)
- * cookie is processed, V_tcp_fastopen_enabled set to true, and the
- * TCP_FASTOPEN socket option is set. In this case, a new socket is created
- * and returned via lsop, the mbuf is not freed so that tcp_input() can
- * queue its data to the socket, and 1 is returned to indicate the
- * TFO-socket-creation path was taken.
+ * cookie is processed and a new socket is created. In this case, any data
+ * accompanying the SYN will be queued to the socket by tcp_input() and will
+ * be ACKed either when the application sends response data or the delayed
+ * ACK timer expires, whichever comes first.
*/
int
syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
@@ -1189,6 +1193,7 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
struct ucred *cred;
#ifdef TCP_RFC7413
uint64_t tfo_response_cookie;
+ unsigned int *tfo_pending = NULL;
int tfo_cookie_valid = 0;
int tfo_response_cookie_valid = 0;
#endif
@@ -1217,7 +1222,7 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE));
#ifdef TCP_RFC7413
- if (V_tcp_fastopen_enabled && (tp->t_flags & TF_FASTOPEN) &&
+ if (V_tcp_fastopen_enabled && IS_FASTOPEN(tp->t_flags) &&
(tp->t_tfo_pending != NULL) && (to->to_flags & TOF_FASTOPEN)) {
/*
* Limit the number of pending TFO connections to
@@ -1234,8 +1239,13 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
&tfo_response_cookie);
tfo_cookie_valid = (result > 0);
tfo_response_cookie_valid = (result >= 0);
- } else
- atomic_subtract_int(tp->t_tfo_pending, 1);
+ }
+
+ /*
+ * Remember the TFO pending counter as it will have to be
+ * decremented below if we don't make it to syncache_tfo_expand().
+ */
+ tfo_pending = tp->t_tfo_pending;
}
#endif
@@ -1476,9 +1486,9 @@ skip_alloc:
#ifdef TCP_RFC7413
if (tfo_cookie_valid) {
syncache_tfo_expand(sc, lsop, m, tfo_response_cookie);
- /* INP_WUNLOCK(inp) will be performed by the called */
+ /* INP_WUNLOCK(inp) will be performed by the caller */
rv = 1;
- goto tfo_done;
+ goto tfo_expanded;
}
#endif
@@ -1504,7 +1514,16 @@ done:
m_freem(m);
}
#ifdef TCP_RFC7413
-tfo_done:
+ /*
+ * If tfo_pending is not NULL here, then a TFO SYN that did not
+ * result in a new socket was processed and the associated pending
+ * counter has not yet been decremented. All such TFO processing paths
+ * transit this point.
+ */
+ if (tfo_pending != NULL)
+ tcp_fastopen_decrement_counter(tfo_pending);
+
+tfo_expanded:
#endif
if (cred != NULL)
crfree(cred);
diff --git a/freebsd/sys/netinet/tcp_timer.c b/freebsd/sys/netinet/tcp_timer.c
index edfc3829..89b61ad8 100644
--- a/freebsd/sys/netinet/tcp_timer.c
+++ b/freebsd/sys/netinet/tcp_timer.c
@@ -470,6 +470,26 @@ tcp_timer_keep(void *xtp)
}
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
("%s: tp %p tcpcb can't be stopped here", __func__, tp));
+
+ /*
+ * Because we don't regularly reset the keepalive callout in
+ * the ESTABLISHED state, it may be that we don't actually need
+ * to send a keepalive yet. If that occurs, schedule another
+ * call for the next time the keepalive timer might expire.
+ */
+ if (TCPS_HAVEESTABLISHED(tp->t_state)) {
+ u_int idletime;
+
+ idletime = ticks - tp->t_rcvtime;
+ if (idletime < TP_KEEPIDLE(tp)) {
+ callout_reset(&tp->t_timers->tt_keep,
+ TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp);
+ INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+
/*
* Keep-alive timer went off; send something
* or drop connection if idle for too long.
diff --git a/freebsd/sys/netinet/tcp_timewait.c b/freebsd/sys/netinet/tcp_timewait.c
index 330e842e..7eb05462 100644
--- a/freebsd/sys/netinet/tcp_timewait.c
+++ b/freebsd/sys/netinet/tcp_timewait.c
@@ -233,6 +233,10 @@ tcp_twstart(struct tcpcb *tp)
INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
+ /* A dropped inp should never transition to TIME_WAIT state. */
+ KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: "
+ "(inp->inp_flags & INP_DROPPED) != 0"));
+
if (V_nolocaltimewait) {
int error = 0;
#ifdef INET6
@@ -338,6 +342,7 @@ tcp_twstart(struct tcpcb *tp)
tcp_twrespond(tw, TH_ACK);
inp->inp_ppcb = tw;
inp->inp_flags |= INP_TIMEWAIT;
+ TCPSTATES_INC(TCPS_TIME_WAIT);
tcp_tw_2msl_reset(tw, 0);
/*
diff --git a/freebsd/sys/netinet/tcp_usrreq.c b/freebsd/sys/netinet/tcp_usrreq.c
index d5fa680f..436f30f8 100644
--- a/freebsd/sys/netinet/tcp_usrreq.c
+++ b/freebsd/sys/netinet/tcp_usrreq.c
@@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/protosw.h>
#include <sys/proc.h>
#include <sys/jail.h>
+#include <sys/syslog.h>
#ifdef DDB
#include <ddb/ddb.h>
@@ -212,10 +213,26 @@ tcp_detach(struct socket *so, struct inpcb *inp)
* In all three cases the tcptw should not be freed here.
*/
if (inp->inp_flags & INP_DROPPED) {
- KASSERT(tp == NULL, ("tcp_detach: INP_TIMEWAIT && "
- "INP_DROPPED && tp != NULL"));
in_pcbdetach(inp);
- in_pcbfree(inp);
+ if (__predict_true(tp == NULL)) {
+ in_pcbfree(inp);
+ } else {
+ /*
+ * This case should not happen as in TIMEWAIT
+ * state the inp should not be destroyed before
+ * its tcptw. If INVARIANTS is defined, panic.
+ */
+#ifdef INVARIANTS
+ panic("%s: Panic before an inp double-free: "
+ "INP_TIMEWAIT && INP_DROPPED && tp != NULL"
+ , __func__);
+#else
+ log(LOG_ERR, "%s: Avoid an inp double-free: "
+ "INP_TIMEWAIT && INP_DROPPED && tp != NULL"
+ , __func__);
+#endif
+ INP_WUNLOCK(inp);
+ }
} else {
in_pcbdetach(inp);
INP_WUNLOCK(inp);
@@ -412,7 +429,7 @@ tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
SOCK_UNLOCK(so);
#ifdef TCP_RFC7413
- if (tp->t_flags & TF_FASTOPEN)
+ if (IS_FASTOPEN(tp->t_flags))
tp->t_tfo_pending = tcp_fastopen_alloc_counter();
#endif
out:
@@ -462,7 +479,7 @@ tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
SOCK_UNLOCK(so);
#ifdef TCP_RFC7413
- if (tp->t_flags & TF_FASTOPEN)
+ if (IS_FASTOPEN(tp->t_flags))
tp->t_tfo_pending = tcp_fastopen_alloc_counter();
#endif
out:
@@ -828,7 +845,7 @@ tcp_usr_rcvd(struct socket *so, int flags)
* application response data, or failing that, when the DELACK timer
* expires.
*/
- if ((tp->t_flags & TF_FASTOPEN) &&
+ if (IS_FASTOPEN(tp->t_flags) &&
(tp->t_state == TCPS_SYN_RECEIVED))
goto out;
#endif
@@ -1331,9 +1348,11 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
ti->tcpi_snd_wscale = tp->snd_scale;
ti->tcpi_rcv_wscale = tp->rcv_scale;
}
+ if (tp->t_flags & TF_ECN_PERMIT)
+ ti->tcpi_options |= TCPI_OPT_ECN;
ti->tcpi_rto = tp->t_rxtcur * tick;
- ti->tcpi_last_data_recv = (long)(ticks - (int)tp->t_rcvtime) * tick;
+ ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT;
ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT;
@@ -1819,16 +1838,16 @@ unlock_and_done:
case TCP_KEEPCNT:
switch (sopt->sopt_name) {
case TCP_KEEPIDLE:
- ui = tp->t_keepidle / hz;
+ ui = TP_KEEPIDLE(tp) / hz;
break;
case TCP_KEEPINTVL:
- ui = tp->t_keepintvl / hz;
+ ui = TP_KEEPINTVL(tp) / hz;
break;
case TCP_KEEPINIT:
- ui = tp->t_keepinit / hz;
+ ui = TP_KEEPINIT(tp) / hz;
break;
case TCP_KEEPCNT:
- ui = tp->t_keepcnt;
+ ui = TP_KEEPCNT(tp);
break;
}
INP_WUNLOCK(inp);
@@ -2242,15 +2261,15 @@ db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
tp->iss, tp->irs, tp->rcv_nxt);
db_print_indent(indent);
- db_printf("rcv_adv: 0x%08x rcv_wnd: %lu rcv_up: 0x%08x\n",
+ db_printf("rcv_adv: 0x%08x rcv_wnd: %u rcv_up: 0x%08x\n",
tp->rcv_adv, tp->rcv_wnd, tp->rcv_up);
db_print_indent(indent);
- db_printf("snd_wnd: %lu snd_cwnd: %lu\n",
+ db_printf("snd_wnd: %u snd_cwnd: %u\n",
tp->snd_wnd, tp->snd_cwnd);
db_print_indent(indent);
- db_printf("snd_ssthresh: %lu snd_recover: "
+ db_printf("snd_ssthresh: %u snd_recover: "
"0x%08x\n", tp->snd_ssthresh, tp->snd_recover);
db_print_indent(indent);
@@ -2271,7 +2290,7 @@ db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
tp->t_rttbest);
db_print_indent(indent);
- db_printf("t_rttupdated: %lu max_sndwnd: %lu t_softerror: %d\n",
+ db_printf("t_rttupdated: %lu max_sndwnd: %u t_softerror: %d\n",
tp->t_rttupdated, tp->max_sndwnd, tp->t_softerror);
db_print_indent(indent);
@@ -2289,10 +2308,10 @@ db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
db_print_indent(indent);
db_printf("ts_offset: %u last_ack_sent: 0x%08x snd_cwnd_prev: "
- "%lu\n", tp->ts_offset, tp->last_ack_sent, tp->snd_cwnd_prev);
+ "%u\n", tp->ts_offset, tp->last_ack_sent, tp->snd_cwnd_prev);
db_print_indent(indent);
- db_printf("snd_ssthresh_prev: %lu snd_recover_prev: 0x%08x "
+ db_printf("snd_ssthresh_prev: %u snd_recover_prev: 0x%08x "
"t_badrxtwin: %u\n", tp->snd_ssthresh_prev,
tp->snd_recover_prev, tp->t_badrxtwin);
diff --git a/freebsd/sys/netinet/tcp_var.h b/freebsd/sys/netinet/tcp_var.h
index 5dcd35b8..f4ea246b 100644
--- a/freebsd/sys/netinet/tcp_var.h
+++ b/freebsd/sys/netinet/tcp_var.h
@@ -192,13 +192,13 @@ struct tcpcb {
tcp_seq rcv_nxt; /* receive next */
tcp_seq rcv_adv; /* advertised window */
- u_long rcv_wnd; /* receive window */
+ uint32_t rcv_wnd; /* receive window */
tcp_seq rcv_up; /* receive urgent pointer */
- u_long snd_wnd; /* send window */
- u_long snd_cwnd; /* congestion-controlled window */
+ uint32_t snd_wnd; /* send window */
+ uint32_t snd_cwnd; /* congestion-controlled window */
u_long snd_spare1; /* unused */
- u_long snd_ssthresh; /* snd_cwnd size threshold for
+ uint32_t snd_ssthresh; /* snd_cwnd size threshold for
* for slow start exponential to
* linear switch
*/
@@ -223,7 +223,7 @@ struct tcpcb {
u_int t_rttmin; /* minimum rtt allowed */
u_int t_rttbest; /* best rtt we've seen */
u_long t_rttupdated; /* number of times rtt sampled */
- u_long max_sndwnd; /* largest window peer has offered */
+ uint32_t max_sndwnd; /* largest window peer has offered */
int t_softerror; /* possible error not yet reported */
/* out-of-band data */
@@ -239,8 +239,8 @@ struct tcpcb {
tcp_seq last_ack_sent;
/* experimental */
- u_long snd_cwnd_prev; /* cwnd prior to retransmit */
- u_long snd_ssthresh_prev; /* ssthresh prior to retransmit */
+ uint32_t snd_cwnd_prev; /* cwnd prior to retransmit */
+ uint32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */
tcp_seq snd_recover_prev; /* snd_recover prior to retransmit */
int t_sndzerowin; /* zero-window updates sent */
u_int t_badrxtwin; /* window for retransmit recovery */
@@ -349,6 +349,12 @@ struct tcpcb {
#define ENTER_RECOVERY(t_flags) t_flags |= (TF_CONGRECOVERY | TF_FASTRECOVERY)
#define EXIT_RECOVERY(t_flags) t_flags &= ~(TF_CONGRECOVERY | TF_FASTRECOVERY)
+#if defined(_KERNEL) && !defined(TCP_RFC7413)
+#define IS_FASTOPEN(t_flags) (false)
+#else
+#define IS_FASTOPEN(t_flags) (t_flags & TF_FASTOPEN)
+#endif
+
#define BYTES_THIS_ACK(tp, th) (th->th_ack - tp->snd_una)
/*
@@ -415,13 +421,13 @@ struct tcpopt {
#define TO_SYN 0x01 /* parse SYN-only options */
struct hc_metrics_lite { /* must stay in sync with hc_metrics */
- u_long rmx_mtu; /* MTU for this path */
- u_long rmx_ssthresh; /* outbound gateway buffer limit */
- u_long rmx_rtt; /* estimated round trip time */
- u_long rmx_rttvar; /* estimated rtt variance */
- u_long rmx_cwnd; /* congestion window */
- u_long rmx_sendpipe; /* outbound delay-bandwidth product */
- u_long rmx_recvpipe; /* inbound delay-bandwidth product */
+ uint32_t rmx_mtu; /* MTU for this path */
+ uint32_t rmx_ssthresh; /* outbound gateway buffer limit */
+ uint32_t rmx_rtt; /* estimated round trip time */
+ uint32_t rmx_rttvar; /* estimated rtt variance */
+ uint32_t rmx_cwnd; /* congestion window */
+ uint32_t rmx_sendpipe; /* outbound delay-bandwidth product */
+ uint32_t rmx_recvpipe; /* inbound delay-bandwidth product */
};
/*
@@ -657,7 +663,7 @@ struct tcp_hhook_data {
struct tcpcb *tp;
struct tcphdr *th;
struct tcpopt *to;
- long len;
+ uint32_t len;
int tso;
tcp_seq curack;
};
@@ -749,8 +755,10 @@ VNET_DECLARE(int, tcp_ecn_maxretries);
#define V_tcp_do_ecn VNET(tcp_do_ecn)
#define V_tcp_ecn_maxretries VNET(tcp_ecn_maxretries)
+#ifdef TCP_HHOOK
VNET_DECLARE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST + 1]);
#define V_tcp_hhh VNET(tcp_hhh)
+#endif
VNET_DECLARE(int, tcp_do_rfc6675_pipe);
#define V_tcp_do_rfc6675_pipe VNET(tcp_do_rfc6675_pipe)
@@ -784,12 +792,14 @@ void tcp_pulloutofband(struct socket *,
void tcp_xmit_timer(struct tcpcb *, int);
void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
void cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
- uint16_t type);
+ uint16_t nsegs, uint16_t type);
void cc_conn_init(struct tcpcb *tp);
void cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
void cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type);
+#ifdef TCP_HHOOK
void hhook_run_tcp_est_in(struct tcpcb *tp,
struct tcphdr *th, struct tcpopt *to);
+#endif
int tcp_input(struct mbuf **, int *, int);
void tcp_do_segment(struct mbuf *, struct tcphdr *,
@@ -802,8 +812,8 @@ struct tcp_function_block *find_and_ref_tcp_functions(struct tcp_function_set *f
struct tcp_function_block *find_and_ref_tcp_fb(struct tcp_function_block *blk);
int tcp_default_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp);
-u_long tcp_maxmtu(struct in_conninfo *, struct tcp_ifcap *);
-u_long tcp_maxmtu6(struct in_conninfo *, struct tcp_ifcap *);
+uint32_t tcp_maxmtu(struct in_conninfo *, struct tcp_ifcap *);
+uint32_t tcp_maxmtu6(struct in_conninfo *, struct tcp_ifcap *);
u_int tcp_maxseg(const struct tcpcb *);
void tcp_mss_update(struct tcpcb *, int, int, struct hc_metrics_lite *,
struct tcp_ifcap *);
@@ -852,8 +862,8 @@ void tcp_hc_init(void);
void tcp_hc_destroy(void);
#endif
void tcp_hc_get(struct in_conninfo *, struct hc_metrics_lite *);
-u_long tcp_hc_getmtu(struct in_conninfo *);
-void tcp_hc_updatemtu(struct in_conninfo *, u_long);
+uint32_t tcp_hc_getmtu(struct in_conninfo *);
+void tcp_hc_updatemtu(struct in_conninfo *, uint32_t);
void tcp_hc_update(struct in_conninfo *, struct hc_metrics_lite *);
extern struct pr_usrreqs tcp_usrreqs;
@@ -867,7 +877,6 @@ struct sackhole *tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt);
void tcp_sack_partialack(struct tcpcb *, struct tcphdr *);
void tcp_free_sackholes(struct tcpcb *tp);
int tcp_newreno(struct tcpcb *, struct tcphdr *);
-u_long tcp_seq_subtract(u_long, u_long );
int tcp_compute_pipe(struct tcpcb *);
static inline void
diff --git a/freebsd/sys/netinet/udp_usrreq.c b/freebsd/sys/netinet/udp_usrreq.c
index 7eb11648..42461ce9 100644
--- a/freebsd/sys/netinet/udp_usrreq.c
+++ b/freebsd/sys/netinet/udp_usrreq.c
@@ -603,7 +603,8 @@ udp_input(struct mbuf **mp, int *offp, int proto)
if (last != NULL) {
struct mbuf *n;
- if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
+ if ((n = m_copym(m, 0, M_COPYALL, M_NOWAIT)) !=
+ NULL) {
UDP_PROBE(receive, NULL, last, ip,
last, uh);
if (udp_append(last, ip, n, iphlen,
@@ -1572,12 +1573,18 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
release:
if (unlock_udbinfo == UH_WLOCKED) {
+ KASSERT(unlock_inp == UH_WLOCKED,
+ ("%s: excl udbinfo lock, shared inp lock", __func__));
INP_HASH_WUNLOCK(pcbinfo);
INP_WUNLOCK(inp);
} else if (unlock_udbinfo == UH_RLOCKED) {
+ KASSERT(unlock_inp == UH_RLOCKED,
+ ("%s: shared udbinfo lock, excl inp lock", __func__));
INP_HASH_RUNLOCK(pcbinfo);
INP_RUNLOCK(inp);
- } else
+ } else if (unlock_inp == UH_WLOCKED)
+ INP_WUNLOCK(inp);
+ else
INP_RUNLOCK(inp);
m_freem(m);
return (error);